Saturday 31 December 2016

Calculating CIredEdge from Sentinel 2 composite image and adding as Band in Google Earth Explorer?


This is a follow-up question to Seeking Function to calculate red-edgeNDVI in Google Earth Engine?


I’ve added the redEdgeNDVI band to the image collection by mapping the function over the collection as suggested in the previous post. I’ve then created a composite image based on the maximum redEdgeNDVI for each pixel across all the images in the collection (S2_max_reNDVI). I’ve then split this image in two based on the value of the redEdgeNDVI creating two images, one with max redEdgeNDVI < 0.6 (S2_reNDVI_0_59) and one with max redEdgeNDVI >=0.6 S2_reNDVI_6). My code works fine up to this point.


What I then want to do is take the image S2_reNDVI_6 and calculate a new chlorophyll index called CIredEdge ((NIR/redEdge)-1) and add this as a band to S2_reNDVI_6, just like I did with redEdgeNDVI. However this time the object S2_reNDVI_6 is an Image and not a Collection.


I’ve tried writing this as a function:


//Function to calculate CIredEdge
var add_CIredEdge = function(image){
//Create band variables

var redEdge = image.select('B5');
var NIR = image.select('B8');
var CIredEdge = NIR.divide(redEdge).subtract(1);
return image.addBands(CIredEdge);
};

//Map image to CIre
var S2_CIredEdge = S2_reNDVI_6.map(add_CIredEdge);

As an expression:



var CIre = S2_reNDVI_6.expression(
'(NIR / redEdge) -1', {
'redEdge': S2_reNDVI_6.select('B5'),
'NIR': S2_reNDVI_6.select('B8')
});

And by computing it the hard way:


var CIre = S2_reNDVI_6.select('B8').divide(S2_reNDVI_6.select('B5')).subtract(1);

var S2_CIredEdge = S2_reNDVI_6.addBands(CIre);


Map.addLayer(S2_CIredEdge, {palette: ['ffeda0', 'feb24c', 'f03b20']}, 'CIrededge pixel composite');
//Map.addLayer(S2_CIredEdge, {bands:['CIre'], palette: ['ffeda0', 'feb24c', 'f03b20']}, 'CIrededge pixel composite');

But none of these methods work. Using the function I get the error message “S2_reNDVI_6.map is not a function” when trying to map the image.


With the expression and by doing it the hard way the result returns a 1 band (float) image but the band is ‘B8’ and the layer does not display even though the layers dialogue does. When I stretch to 100% the ranges given are 0 to 0. If I add the band to S2_reNDVI_6 there are 11 bands with the new band being named ‘B8_1’.


I suspect that there is something wrong with way I input the formula for the calculation.


I am new to GEE.


The object 'aoi' is a polygon shapefile.


Here is my code that works:



/**
* Function to mask clouds using the Sentinel-2 QA band
* @param {ee.Image} image Sentinel-2 image
* @return {ee.Image} cloud masked Sentinel-2 image
*/
function maskS2clouds(image) {
var qa = image.select('QA60');

// Bits 10 and 11 are clouds and cirrus, respectively.
var cloudBitMask = 1 << 10;

var cirrusBitMask = 1 << 11;

// Both flags should be set to zero, indicating clear conditions.
var mask = qa.bitwiseAnd(cloudBitMask).eq(0)
.and(qa.bitwiseAnd(cirrusBitMask).eq(0));

return image.updateMask(mask)
.divide(10000);
}
//-----------------------------------------------------------------------------------

//Section 2
// Load Sentinel-2 TOA reflectance data.
var S2 = ee.ImageCollection('COPERNICUS/S2')
.filterDate('2017-06-01', '2017-09-30')
// Pre-filter to get less cloudy granules.
.filter(ee.Filter.lt('CLOUDY_PIXEL_PERCENTAGE', 20))
//Select required bands only
.select('B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'B8', 'B8A', 'QA60')
//Apply cloud mask
.map(maskS2clouds);


// Create clipping function
var clipper = function(image){
return image.clip(aoi); // Add the table or shapefile to clip to here
};

//Clip images
var S2Clip = S2.map(clipper);

//Function to calculate redEdgeNDVI

var add_reNDVI = function(image){
//Create band variables
var redEdge = image.select('B5');
var NIR = image.select('B8');
var redEdgeNDVI = NIR.subtract(redEdge).divide(NIR.add(redEdge)).rename('reNDVI');
return image.addBands(redEdgeNDVI);
};

//Map collection to reNDVI
var S2_reNDVI = S2Clip.map(add_reNDVI);


// Make a "max redEdgeNDVI" pixel composite taking max reNDVI from all images
var S2_max_reNDVI = S2_reNDVI.qualityMosaic('reNDVI');

// Display the result.
//Map.addLayer(S2_max_reNDVI, {bands:['reNDVI'], palette: ['ffeda0', 'feb24c', 'f03b20']}, 'Max reNDVI pixel composite');

//Split image in 2
//Set threshold for using redEdgeNDVI
var reNDVI_threshold = S2_max_reNDVI.lt(0.6);


//Set threshold for using CIredEdge
var CIre_threshold = S2_max_reNDVI.gte(0.6);

//Extract pixels with max reNDVI < 0.6 for redEdgeNDVI - LAI conversion
var S2_reNDVI_0_59 = S2_max_reNDVI.updateMask(reNDVI_threshold);

//Extract pixels with max reNDVI >= 0.6 for CIredEdge - LAI conversion
var S2_reNDVI_6 = S2_max_reNDVI.updateMask(CIre_threshold);


// Display the results < 0.6.
//Map.addLayer(S2_reNDVI_0_59, {bands:['reNDVI'], palette: ['ffeda0', 'feb24c', 'f03b20']}, 'Pixels Max reNDVI pixel composite');

// Display the results >= 0.6.
//Map.addLayer(S2_reNDVI_6, {bands:['reNDVI'], palette: ['ffeda0', 'feb24c', 'f03b20']}, 'Pixels Max reNDVI CIredEdge pixel composite');

Answer



The problem was I was setting the threshold on all bands, not just on reNDVI.


I changed:


var reNDVI_threshold = S2_max_reNDVI.lt(0.6);


to


var reNDVI_threshold = S2_max_reNDVI.select('reNDVI').lt(0.6);

and


var CIre_threshold = S2_max_reNDVI.gte(0.6);

to


var CIre_threshold = S2_max_reNDVI.select('reNDVI').gte(0.6);

This solved it.



carto - Synced CSV File Google Drive - Not Filling out Georeference field for visual


I Have a JS account on Cartodb with the sync option for data.


I have created code to upload CSV file to Google Drive of all login activity of our Google Accounts for the last day. If I select this document as a data source without adding sync option, it converts the ip_address column I have to the co-ords using Georeference via IP Addressing.


If I select the sync option instead for the table data, the data syncs correctly but I can't see where to setup the georeference column for the map view. The Georeference section in the top right drop down is greyed out.


Please tell me I haven't upgraded our account without this feature working...





sql server - Choosing database for storing spatial data?



A couple of days ago I installed the demo of spatialware 4.9 from MapInfo into my SQL Server 2005 install and loaded all the larger dataset into it. I was quite impressed with the performance vs the old file based approach but it got me thinking what other options are out there and what are the pros and cons with them.


A con I would have to say with spatialware is the fact that it is $5000 p/a and only MapInfo can read the objects from it. Which at the moment is fine because MapInfo is all we use.


I'm wondering what other people have gone with and what their experiences are.



Answer



PostGIS based on PostgreSQL is a popular database for GIS.


I haven't used it much myself, but a pro is that it's open source and that many other GIS uses it so it have an active GIS community.


arcgis 10.0 - Arcobjects: DisplayTransformation.FromMapPoint() transforms wrong


I want to transform map coords into page coords in ArcObjects in Python. So I do, say:



>>> pApp = NewObj(esriFrame.AppROT, esriFrame.IAppROT).Item(0)
>>> pDoc = pApp.Document
>>> pMxDoc = CType(pDoc, esriMapUI.IMxDocument)

>>> pDisplay = pMxDoc.ActiveView.ScreenDisplay # something wrong here?
>>> pDisTrans = pDisplay.DisplayTransformation

>>> pageX, pageY = pDisTrans.FromMapPoint(ptInRealCoords) # coords are ~389700,~316671m
>>> pageX
6523059 # which should be ~4.73 in cm


Any clues whats wrong?



Answer



IScreenDisplay's IDisplayTransformation.FromMapPoint transforms active view's coordinates into device coordinates (i.e. pixels). Moreover, you are accessing pMxDoc.ActiveView which will vary depending on whether you are in data view or page view. This means that the source unit space will be either map units or page units.


If you want to transform a coordinates in a certain map into page coordinates on the page layout you need:



  1. Transform the map's coordinates to the device space (i.e. cast the particular map as IActiveView and use its screen display's transformation, FromMapPoint).

  2. Transform the obtained device coordinates into page coordinates (i.e. cast the document's page layout as IActiveView and use its screen display transformation, ToMapPoint). Note that the name ToMapPoint here is slightly misleading as the "map point" here actually means page coordinates.


How do you make Amazon Cloud GIS Server accessible outside local environment?


I have services in the cloud and want to access them from applications outside the cloud. How do I set permissions to consume services outside of the cloud?



Also, what is the format or syntax used for referencing Amazon Cloud GIS Server? So, when I try to add GIS Server what would type in?




maptips - Map tip display text in QGIS 2.16: pictures not showing up anymore!


I've been using the great function of QGIS: "map tips". It allows to include html-formatted text (and pictures) in order to popup a window by hovering features. I use map tips for a very convenient visualization of georeferenced pictures, as I presented once on digital-geography blog



But sadly I cannot make it woking anymore since (?) few weeks, I think since I updated QGIS to 2.16.1 (and later 2.16.2)! I'm always using the same code as before:


[%CONCAT('')%]


(I know it can be written in an other way but this was working before...)


Now the frame popping up but there's just no picture enter image description here Is that a consequence of the map tips improvements of QGIS 2.16? Is there a new syntax I'm missing? Or I might have something going wrong on my comp?


Anyone sharing the issue? Any ... tip?


Note that including an Iframe link works, I tried the code proposed by mbernasocchi answering to the question "How to view a video using map-tip QGIS 2.16" and it works great.


I'm working under Kubuntu 14.04
kdelibs: 4.13.3
Qt: 4.8.6
Core: 4.2.0-42-generic

System: 32 bit



Answer



you need to add the file:// prefix. The most portable way to use this (assuming you have the images in the same folder as the project) currently (tested on QGIS 3.2 - ubuntu) is:



you can also add width="300" and height="300" or so if needed


please note the @project_home this is a QGIS variable that is automatically set on each project thus allowing you to maintain relative paths


Editing and saving CSV file to use in plugin for QGIS?


For using in a plugin (PS Time Series Viewer), I want to edit a CSV file in the way plugin wants. Somehow I think there's a problem with the CSV file. I would like to tell you all my steps because I cannot diagnose the problem properly.


I edited with MS Excel and saved the CSV file. I called it into QGIS (Layer/Add Layer/Add delimited text layer). I saved as shapefile (export/save feature as) this point layer. At this step, when I clicking a point with PS Time Series tool, I get an error. The error message is "No time series values found for the selected point".


I attached my CSV file and the plugin's test data (It is given as shapefile, and when I try on this, the plugin works very well).


Original CSV file: https://www.dropbox.com/s/p5z529wwzzmu6cy/_MyOriginalCSV.csv?dl=0


Edited CSV file: https://www.dropbox.com/s/i67bex4kzxehmqw/_MyEditedCSV.csv?dl=0


Plugin's test data: https://www.dropbox.com/s/cs35nhwze77y86n/_PluginsTestShapeFile.rar?dl=0


Error message: https://www.dropbox.com/s/24c244qdl6565bj/Photo%2030.03.2019%2020%2007%2048.jpg?dl=0


Is there any problem with my CSV file or I missed another point?


QGIS version: 3.6 PS Time Series version: 0.3.1





Storing shapefile in PostgreSQL database as a table using PHP?


I want to create a table from a shapefile in PostgreSQL. To do that, I can run this command in cmd:


shp2pgsql -s 4326 -W LATIN1 E:\Any\THA_adm public.test123 | psql -h localhost -d DATA -U postgres -w 123;

This works, but I want to run it from PHP. How can I do that, I tried PHP functions:



  1. shell_exec()


  2. exec()

  3. system()


but did not work. Is there any other way to create a table from shape in PHP or run this command?



Answer



So the answer was "by using exec() as in an example showed in the answer to question https://stackoverflow.com/questions/18259089/execute-gdal-translate-in-php-using-exec".


How can I work with very large shapefiles (~1 GB) in QGIS?



I'm performing some basic spatial analyses on a large database of traffic injuries in California between 2003 - 2011. Because the dataset is large (nearly 1 GB of points), I'd like to first cut it down to a specific geographic region using a spatial query, but I find that QGIS consistently freezes and hangs if I try to use a spatial query or filter the layer.


What are some ways I can work with this dataset? Are there more efficient data formats I can use?



Answer



Load the data into SpatiaLite in QGIS... best way is to create a new SpatiaLite database via the right-click GUI in the browser window, then simply drag and drop your shapefile onto the SpatiaLite database you just created.


From there you have all the power of SpatiaLite at your disposal, including SQL and SQL Spatial functions, which are a way more intuitive way to work with this kind of data.


Some more documentation can be found here, and there's the SpatiaLite manager plugin.


Friday 30 December 2016

arcgis desktop - Removing features whose labels are not displayed?


Using ArcMap 10.0, I am working on a map that shows many villages. Each village has a name attribute that I want to use for a label, but in some areas there are so many villages that not all can be labeled. I am using the Maplex label engine and have not checked the "Never Remove (allow overlap)" box.


I am quite happy that not all village names are displayed as this would be way too much. And it is ok that Maplex chooses which labels to display, as I have no way to know which village is more important than another one. This also means that I can't base the labeling on a field such as population or something else, even though that would be less random.


However, I would also like the village points that are not labeled to similarly not be displayed. I think it unlikely that someone would want to see the point without knowing the name of the village. If all points are shown, it is also difficult to tell which point a name label goes with in crowded areas. So instead of choosing just labels not to be shown, I also want the computer to chose features not to be shown.


To give you a better idea on what I mean:



To give you an idea


Is there a way to make these features/points not draw if their label is not shown?




python - Special characters in QGIS script tools


I would like to write a script that includes a selection from the attribute table. But my data is in Hungarian, so it includes special characters like: Ă¶Ă¼Å‘Å±Ă¡Ă©Ă­


So when there is non of these in the query it executes, otherwise I get this:


Is there a setting or something to solve this?



Answer




What happens when you use diacritic characters?


It might be a simple python problem. Open the python editor in QGIS, and ensure that you start with the two comment lines on the top, like this:


#!/usr/bin/env python
# -*- coding: utf-8 -*-
layers = QgsMapLayerRegistry.instance().mapLayersByName('utilizador')
layer = layers[0]
it = layer.getFeatures()

layer.startEditing()
for feat in it:

layer.changeAttributeValue(feat.id(), 7, u'JĂ³ napot kĂ­vĂ¡nok!')

layer.commitChanges()

Creating constant slope DEM using ArcGIS Desktop?


I wanted to create a DEM of constant slope say 1/3000.


How can I proceed in ArcGIS Desktop 9.3?


The area may be 1000m x 1000m or any other as per convenience.




Thursday 29 December 2016

Creating DEM from point data in CSV files using ArcGIS Desktop?


I have a lot of csv files which contain point data containing height, aspect and slope measurements at a resolution of 12,5 meters.


I need to create a dem from this data and have ArcGIS 10 with spatial analyst and 3d Analyst. I did this before with 9.3 but I am not sure if my methodology is correct.




  1. Create point shapefiles from the csv file

  2. interpolate to raster (here I never know whether to use spline, kriging or some other method)?


Last time I did something wrong and rounded all the values up to 1m even though the data was to cm accuracy so this time I want to do it right!



Answer



Because these are regular points, you effectively already have a DEM; it's just in a different format than ArcGIS likes. This makes two different strategies available to you:




  1. Convert the data into a format ArcGIS can handle. One way is to set up a raster extent and cellsize that (i) cover your DEM and (ii) situate each point near the middle of its cell. Import your DEM as a point layer and convert that directly, with no interpolation, into a grid format. (Each cell acquires the value of the unique point lying within it.) From now on, interpolation will be automatically performed whenever you resample the DEM, when aggregating, resizing, or reprojecting it. This gives you access to nearest-neighbor (not recommended), bilinear, and cubic convolution methods. You can create three separate grids this way: one for elevation, one for slope, one for aspect. For this to work, though, you must be using exactly the same coordinate system (including the projection) used to create the DEM in the first place, so that the points are truly evenly spaced and parallel to the coordinate axes.





  2. Pretend the points are "irregular" and interpolate only the elevations using any method you can (splines, IDW, kriging, etc). Derive new slope and aspect DEMs from the interpolated elevations. Unless you are using a Topo2DEM approach and have additional information you can supply--such as stream beds, elevation contours, peaks, or such material--then it is likely this method will merely produce meaningless artifacts that reflect the interpolation method as much as they do the original data. It will also require much more computation than the first method (kriging may even be impossible because of this).




javascript - Earth Engine convert list with coordinates and values into a feature collection for export


I need to create a feature collection from a list containing coordinates and band values. These are extracted from a landsat image as shown in this linked script.


Extract complete pixel values inside a geometry


I'm unable to find a solution in creating a feature collection. Here is the tail end code.


var valuesList = joinedImage.reduceRegion({
reducer: ee.Reducer.toList(4),
geometry: myGeometry
}).values().get(0);


var feature = ee.Feature(null, valuesList);


// Wrap the Feature in a FeatureCollection for export.
var myFeatures = ee.FeatureCollection([feature]);


// Export the image, specifying scale and region.
Export.table.toDrive(myFeatures,
"data",

"myData",
"B4-B5",
"CSV");

This doesn't work, because it is clearly the wrong way to create the feature collection. I've tried to create it similar to Rodrigo E. Principe's solution of mapping a Feature Collection but I can't seem to create the initial feature collection to start with.



Answer



valuesList = ee.List(valuesList) // Cast valuesList

var myFeatures = ee.FeatureCollection(valuesList.map(function(el){
el = ee.List(el) // cast every element of the list

var geom = ee.Geometry.Point([ee.Number(el.get(0)), ee.Number(el.get(1))])
return ee.Feature(geom, {'B4':ee.Number(el.get(2)), 'B5':ee.Number(el.get(3))})
}))

Map.addLayer(myFeatures) // see the result
Map.centerObject(myFeatures)

// Export the image, specifying scale and region.
Export.table.toDrive(myFeatures,
"data",

"myData",
"B4-B5",
"CSV");

python - Intersecting MultiLinestring based geodataframes with geopandas?


I would like to perform the following operation using geopandas.


enter image description here


The segments are delimited by red points and the blue items are attribute information. My inputs are the first and second line segments and my output is the third line segment.



Initially I thought this would be an intersection operation, but I soon learned that geopandas can only intersect polygons, therefore something like:


intersection = geopandas.overlay(split_lines, original_lines, how='intersection')


returns the following error:


raise TypeError("overlay only takes GeoDataFrames with (multi)polygon "
TypeError: overlay only takes GeoDataFrames with (multi)polygon

This to me looks like a standard geoprocessing operation and I really hope I won't have to code this up from scratch. Are there any simplified ways to come up with the following result without having to code a custom function?


EDIT Unfortunately I cannot get this to work for more complicated geometries such as


ORIGINAL_LINES


                                            geometry property

0 (LINESTRING (0 0, 6.656423206909781 4.43757029... a
1 (LINESTRING (6.656423206909781 4.4375702913320... b
2 (LINESTRING (8.070636769282876 5.8517838537051... c
3 (LINESTRING (6.656423206909781 4.4375702913320... d
4 (LINESTRING (10.98655022583197 1.9375702913320... e
5 (LINESTRING (13.68293236472948 0.6224568509648... a
6 (LINESTRING (17.54663566988575 -0.412819329445... a

SPLIT_LINES


                                            geometry  susc

0 LINESTRING (0 0, 4.160264504318614 2.773481432... 1
1 LINESTRING (4.160264504318614 2.77348143208253... 2
2 LINESTRING (6.656423206909781 4.43757029133205... 3
3 LINESTRING (9.950815132334437 8.18950268263064... 4
4 LINESTRING (13.08444573742037 12.0857007308397... 5
5 LINESTRING (6.656423206909781 4.43757029133205... 4
6 LINESTRING (10.98655022583197 1.93757029133205... 3
7 LINESTRING (15.61478401730761 0.10481876075978... 2

The output seem to be a 1D line... which is incorrect for this application.




Answer



EDIT: The following answer only works for the 1-D case. To extend it to 2-D, you will need to parameterize your links by the along-road distance, and replace the x-coordinate with the parameterized length. However, I'm fairly confident this is doable much simpler with Geopandas.


It would be too hard to give hints in the comments, so here's a script that should give you what you want. It's not written for efficiency--probably you could get geopandas to do what you want with some finegaling, but here ya go. It's also not written very generally, but that could be done if you have more than one attribute.


import geopandas as gpd
from shapely.geometry import LineString
import numpy as np

slgeom = [[(0,0),(7,0)],[(7,0),(13,0)],[(13,0),(15,0)],[(15,0),(19,0)]]
geoms = []
for s in slgeom:

geoms.append(LineString(s))
properti = ['a','b','c','d']
split_lines = gpd.GeoDataFrame(geometry=geoms)
split_lines['property'] = properti

olgeom = [[(0,0),(5,0)],[(5,0),(7,0), (10,0)],[(10,0),(13,0),(15,0)],[(15,0),(19,0)]]
geoms = []
for o in olgeom:
geoms.append(LineString(o))
susc = [1,2,3,4]

original_lines = gpd.GeoDataFrame(geometry=geoms)
original_lines['susc'] = susc


# Do split lines
xs1 = []
attrib1 = []
for g, a in zip(split_lines.geometry.values, split_lines.property.values):
x = g.coords.xy[0].tolist()
xs1.extend(x)

try:
attrib1[-1] = a
except:
pass
attrib1.extend([a for l in range(len(x))])

# Do originals
xs2 = []
attrib2 = []
for g, a in zip(original_lines.geometry.values, original_lines.susc.values):

x = g.coords.xy[0].tolist()
xs2.extend(x)
try:
attrib2[-1] = a
except:
pass
attrib2.extend([a for l in range(len(x))])

# Create numpy array for sorting
allxs = list(set(xs1 + xs2))

x_forsort = []
a1_forsort = []
a2_forsort = []
for x in allxs:
try:
idx = xs1.index(x)
a1_forsort.append(attrib1[idx])
except:
a1_forsort.append(None)
try:

idx = xs2.index(x)
a2_forsort.append(attrib2[idx])
except:
a2_forsort.append(None)
forsort = np.transpose(np.array([allxs, a1_forsort, a2_forsort]))

# Now sort based on x value (1st column)
sorteds = forsort[forsort[:,0].argsort()]

# Work through the sorted lists to create segments with the appropriate attributes

# Store results in a dictionary
output = dict()
output['geometry'] = []
output['attrib_1'] = []
output['attrib_2'] = []
for i in range(len(sorteds)-1):

# Store shapely linestring
output['geometry'].append(LineString([(sorteds[i,0],0),(sorteds[i+1,0],0)]))


# Store attributes
if i == 0:
output['attrib_1'].append(sorteds[i,1])
output['attrib_2'].append(sorteds[i,2])
else:
if sorteds[i,1] is None:
output['attrib_1'].append(output['attrib_1'][-1])
else:
output['attrib_1'].append(sorteds[i,1])


if sorteds[i,2] is None:
output['attrib_2'].append(output['attrib_2'][-1])
else:
output['attrib_2'].append(sorteds[i,2])

# Convert back to geopandas dataframe
out_gdf = gpd.GeoDataFrame(output)
out_gdf.crs = original_lines.crs

Result:



out_gdf
Out[185]:
attrib_1 attrib_2 geometry
0 a 1 LINESTRING (0 0, 5 0)
1 a 2 LINESTRING (5 0, 7 0)
2 b 2 LINESTRING (7 0, 10 0)
3 b 3 LINESTRING (10 0, 13 0)
4 c 3 LINESTRING (13 0, 15 0)
5 d 4 LINESTRING (15 0, 19 0)

postgis - ST_Buffer returns inaccurate results with radius of over 1 meter


I have a specific location: lat, lng and accuracy(meters) I want to return a geography object that will represent a polygon that surrounds this point with a given radius that will be equivalent to accuracy (I don't want to use a circle)


postgis code:


SELECT ST_AsText(CAST(ST_Buffer(ST_SetSRID(ST_Point(-74.005847, 40.716862),4326),10) As geography));

returns an object with points such as: (-64.005847 40.71.6862) That are on a huge radius and not 10 meters as requested.



What am I doing wrong here?



Answer



You need to cast it as Geography before buffering it, not after. As you have it, it's being buffered by 10 map units (degrees) instead of meters because the value returned from ST_SetSRID is a geometry.


SELECT ST_AsText(ST_Buffer(CAST(ST_SetSRID(ST_Point(-74.005847, 40.716862),4326) AS geography),10));


openlayers 2 - Understanding web mapping tools



Before I start, this is going to be a general question but with specific cases. I've been trying to understand GIS tools and applications to achieve the following:



  1. Build my own basemap

  2. Publish the basemap online

  3. Add my own data layers to the online basemap

  4. Perform analysis (on-the-fly)

  5. Add interactivity to the map



So, In my first attempt, I built a spatially enabled database on PostGIS, queried this using PHP, overlayed it on google maps using their API and added interactivity using their api itself. I carried out analysis like area, denisty etc. on the fly using postGIS functions.


For my next attempt, I downloaded TileMill and rendered some really beautiful maps and added zoom level functions to it. I also exported my MBtile files. When I came across trying to publish this online, I hit quite a few names, but got lost and confused:



  1. TileStache

  2. TileCache

  3. TileStream

  4. Mapserver

  5. Mapnik

  6. GeoServer

  7. Mapbox



So, I understand these are mapservers. But what exactly does a mapserver do? Why is it used? Why can't i server my tiles from a normal web Apache server? And for MBtiles, how many servers are out there which lets you publish it directly?


I can also export my basemap as a png/vsvg format. Can these formats be used to make a basemap through a mapserver?


When researching interactivity I came across:



  1. Leaflet

  2. ModestMaps

  3. OpenLayers

  4. Mapstraction



I understand these are Javascript Mapping API's. But on what base map can I use these? Can it be used on Google maps? Can I use them on my own base maps too?


I basically want to understand the differences between these tools and which would be the best software stack to build a fully customized, interactive map with my own basemap.


Sorry if my question sounds to general. I will definitely be here to clarify my questions



Answer



As you go deeper and deeper into the world of GIS, you'll realize that a lot of work has been already been done to solve common problems.




  1. You have your geographic data in your database. How do you arrange the various layers and render them, and add various map elements to produce a Map that you can print? You need a desktop GIS apllication. You could use one of the FOSS like QGIS, Grass, uDIG ect or a commercial option like ArcGIS desktop etc.





  2. You have geographic data in your database and want to use that to create to a map in your website dynamical. Use a mapserver to read the geo-data and create an image (rendered according to your requirements) for you on the fly that you can show to your users. There are many options for you like Geoserver, Mapserver, ArcGIS Server.




  3. Now you want an interactive slippy map on your website, and using a mapserver to come up with those images in real time is just slow. that's when you use a tiling software to precompute the tiles and serve them. This tiling software can be a mapserver in itself, or something that works with your mapserver, or could be built in the mapserver itself. When you read about TileMill, or TileStream or cached Map services from ArcGIS server, they are used for this purpose




  4. When you were creating the slippy map, how do you handle the various useractions, and calls to the server? Things like zooming in on double click, or calling for new tiles on panning the map, to showing the coordinates on mouseover, as well as stacking images/map/services from different servers/service? You use a client side mapping library like OpenLayers, Leaflet, ArcGIS server's Javascript API and so on.




So to conclude, these different softwares are there to fulfill different needs and solve different problems. You usually need more than one software and usually work with a stack of them, to make something easy and simple looking slippy map on a webpage.



QGIS contours smoothing and cleaning


Trying to utilize the contour extraction QGIS tool but having trouble creating smoothed contours. The smooth, generalize, and dissolve tools have not worked for me. Also the plugin for contours appears to no longer be working? screen shot




Seeking algorithm to place maximum number of points within constrained area at minimum spacing?


I have a polygon layer that describes a constraint; I wish to add points within this area. I want to add as many points as possible, but they must have a minimum spacing between them. Is it possible to do this with GIS?


To clarify, it would be best if an ordered grid could be generated, as this would guarantee the most points. However the constraint would rarely allow this, and it may be preferable to remove points to allow an offset to better fit within the constraint.



Answer



I think this could be thought of as a "packing" problem.


If so, you might want to try a Genetic Algorithm, perhaps one similar to that in On Genetic Algorithms for the Packing of Polygons.


Wednesday 28 December 2016

arcgis 10.0 - Including variable in where clause of arcpy.Select_analysis()?


I am trying to loop through a shapefile, selecting each feature in turn and copying it to a temporary shapefile to by included in a union analysis. I'm using a cursor to find the ID name for each feature which I'm setting to a varible 'Name'. Whenever I try to use this variable as part of the where clause in arcpy.Select_analysis I get an error:


ExecuteError: ERROR 999999: Error executing function. An invalid SQL statement was used. An invalid SQL statement was used. Failed to execute (Select).


The code I'm using is:


Name = 101
where = "\'\"StudyID\" = \\'"+str(Name)+"\\'\'"

arcpy.Select_analysis("C:\\input.shp", "C:\\output.shp", where)

If I type it out without using the variables:


arcpy.Select_analysis("C:\\input.shp", "C:\\output.shp", '"StudyID" = \'101\'')

it works fine


What to I need to do to fit the variable into the sql statement?



Answer



Another, maybe simpler, way is:


where = '"StudyID" = ' + "'%s'" %Name

coordinate system - What is the current Web Mercator projection code?


The web mercator projection, popularised by Google Maps, seems to be given a new EPSG code every couple of years.


EPSG:900913 (Google in calculator text) was an unofficial code



EPSG:3785 - is the projection I currently use for my datasets


However this blog post suggests that the code is now EPSG:3857.


This projection can be found on the EPSG site but it seems to also use the code SR-ORG:6864 and claims "it is not a recognized geodetic system: see WGS 84 / World Mercator (CRS code 3395)."


So what is the official code to use?



Answer



This has been an annoying problem for a while, and hopefully will no longer be an issue.


3857 looks to be the current and correct code (I hope, that's what all my tile caches are in!).


Update 9/7/11 - as noted by Vadim below in comments, Esri did in fact revert back to 102100 from 3857 at Service Pack 1. Oddly, ArcGIS Server with SP1 applied returns a WKID of 102100 for a web mercator map service, but in the Services Directory, a web mercator map service has a spatial reference of '102100 (3857)'. EPSG has no entry for 102100. Not sure why Esri has chosen this route, but Esri's 102100 and 3857 are treated as equivalent by their products.


EPSG - (no direct link)




Code: EPSG::2008.114 Reporter: OGP Request: Revisit spherical mercator used for some web mapping applications Actions Taken: Deprecated ellipsoid 7059, datum 6055, method 9841, projection 19847, CRSs 4055 and 3785, tfm 15973. Added methods 1024 and 1026, proj 3856 and projCRS 3857. Entity Types Affected: Ellipsoid; Datum; Coordinate Operation Method; Coordinate Operation; Coordinate Reference System Codes Affected: 7059; 6055; 9841; 15973 19847; 4055 3785 Report Date (UTC): 2008-12-11 Closed Date (UTC): 2009-02-10



OpenLayers -



Today, there is an officially registered EPSG code 3857 whose projection is identical to EPSG:900913. (http://www.epsg-registry.org/export.htm?gml=urn:ogc:def:crs:EPSG::3857). So, if you need to combine overlay layers that are using either an alias or the official EPSG code with an OpenLayers SphericalMercator layer, you have to make sure that OpenLayers requests EPSG:3857 or other alias in stead of EPSG:900913.



ESRI -



@jamie - For a long time EPSG refused to assign a code to this coordinate system; therefore ESRI created the WKID codes 102113 and 102100. When EPSG did assign a code, they used 3785, but later changed it to 3857. ArcGIS 10 will follow ESRI practice of using an EPSG code when one exists, and will advertise the coordinate system of the service as 3857. ArcGIS 10 and all Web APIs are being designed to recognize EPSG 3857, ESRI WKID 102113, and ESRI WKID 102100 as equivalent.




coordinate system - Reprojecting base layers in OpenLayers


I have vector/topo/image tiled maps for some places in North America which were published as ArcGIS Server REST service. These maps (A) are based in EPSG:26912 projection. Now I want to use these as base map for my OpenLayers web app; besides, I also want to consume other base map sources (B), e.g., OpenStreetmap, Google Maps, ESRI, etc. Since most of these maps are in EPSG:900913 which is spherical Mercator projection to my knowledge, if I just add these base map layers into OpenLayers map component, the group A and group B won't overlay correctly, which means they are displayed as separate maps.


I know this is something related to projection, and might need some code with proj4js, however, as a newbie, I don't really know what to start with.


Basically, I want to put OSM/Google maps as the base map for the whole world in tier 1, then put EPSG:26912 base maps for some places in North America in tier 2, and put other WMS or WFS in the top tier.


do I need to transform EPSG:26912 to EPSG:900913 or opposite? I guess I should use EPSG:900913 as the base projection.



Answer




OpenLayers can transform vector layers (like WFS). If your vector layer is in EPSG:900913 or EPSG:4326 OpenLayers can handle the transformation itself, otherwise, it needs Proj4js included.


There are examples of how to use OpenLayers with Proj4js.


Raster Layers cannot be transformed by OpenLayers. If you need to include them in a different projection, you have to reproject them by yourself or use a reprojecting WMS proxy like GeoWebCache.


Google Earth, Google satellite, and Bing aerial accuracy


Does anybody know the accuracy of Google Earth, Google satellite, and Bing aerials, and at what scale? I think that you have different/better accuracy if you have subscribed to their professional version.




How to connect OpenLayers to PostGIS data?


I have read in OpenLayers how to load KML, GeoJSON etc. vector files in OpenLayers. But how can I connect to PostGIS data. If not, why should I upload my data to PostGIS then instead of directly putting my data using WebServices. Am I missing anything?


And I want to load this vector data so that one can see the attributes by clicking on the features. I need PostGIS because in future I want to enable queries. I think it's only possible by maintaining a database like PostGIS. But how using OpenLayers?


I have read OpenLayers Beginner's Guide, but nowhere it is mentioned. Please help!!



Answer




Unfortunately you cannot connect a web page directly to a database because of security concerns, normally you need some middleware to join the two together.


So for your example and if you want to stick with Open Source software you could easily use GeoServer as your geographic server to serve your data from your PostGIS database to your OpenLayers HTML web page.


Why this is good is that GeoServer will serve the data in a standard way, OGC Web Map Server (WMS) or Web Feature Service (WFS) and both are understood by OpenLayers and many other APIs (ESRI ones, Leaflet etc) and other Desktop GIS software (ESRI, MapInfo, QGIS, uDig etc)


So I would look at the GeoServer documentation getting started which walks you through how to connect GeoServer to PostGIS and then serve the data.


http://docs.geoserver.org/stable/en/user/gettingstarted/index.html


If you are new to these types of things there is no better place to start than the OpenGeo tutorials


http://workshops.opengeo.org/


I would definitely stick with your data being in PostGIS if


a) you have a lot of data b) you want to run queries (like you do) and c) if lots of people are going to use your app


If you are going to run queries I would look at Web Processing Services (WPS) these are still "new" but these are supported in GeoServer.



If however you have a small amount of data and can pre-run the queries and then just use OpenLayers to display then you should stick to using GeoJSON from a flat file or even look at TopoJSON which is gaining alot of popularity with D3 javascript library - see here http://bost.ocks.org/mike/map/ Mike Bostock's tutorial is great.


Hope that helps


Making QGIS toolbox tool available directly from toolbar?


At QGIS 2.18.16, I commonly use a tool that is only available (AFAIK) through the Toolbox (Processing > Toolbox > QGIS Geoalgorithms > Vector table tools > Frequency analysis).


I would like to have this tool directly available on the toolbar, as a button or dropdown, so that it is more easily and quickly accessed.


A Google search turned up nothing.


How do I accomplish this?



Answer




You could activate the 'Commander' utility of Processing. From menĂ¹ Processing -> Commander activate it.


After that you could choose the Geoalgorithms from a dropdown menĂ¹ (or type the beginnings letters) and then press ENTER on keyboard.


enter image description here


qgis - How to make survey lines within polygon?


I am trying to figure out how to make survey lines within a polygon.


So first I make a polygon. Now I need to fill this polygon with lines, where I can choose the spacing between the lines, and the angle of the lines. All the lines have the same angle and spacing.


I am trying to do it in QGIS, but I also have ArcGIS for Desktop.



Does anybody have an idea of how to do this?




remote sensing - Two Different Areas for the same Image in Google Earth Engine


IMPORTS:



var polygon = /* color: #d63000 */ee.Geometry.Polygon(
[[[-119.33364769821117, 46.05123532178373],
[-119.3233620672313, 45.869732769408905],
[-119.04111088542663, 45.873079023065166],
[-119.0396574679861, 46.045448840018565]]]),
landsat8 = ee.ImageCollection("LANDSAT/LC8_L1T_TOA_FMASK");

FIRST CODE: In the First Code, Collection is Filtered and Mosaic is applied even though it is only one image.


var PIXEL_SCALE = 30; // Meters. Resolution of most Landsat 8 bands
var PIXEL_AREA = PIXEL_SCALE * PIXEL_SCALE; // Square meters.


// Fmask classification values
var FMASK_CLEAR_GROUND = 0;
var FMASK_WATER = 1;
var FMASK_CLOUD_SHADOW = 2;
var FMASK_SNOW = 3;
var FMASK_CLOUD = 4;

var mosaic = landsat8
.filterBounds(polygon)

.filterDate('2016-08-01', '2016-08-30')
.mosaic();

// Update the mask on our mosaic to mask cloud and cloud shadow pixels
var fmask = mosaic.select('fmask');
var cloudMask = fmask.neq(FMASK_CLOUD).and(fmask.neq(FMASK_CLOUD_SHADOW));
var maskedMosaic = mosaic.updateMask(cloudMask);


Map.addLayer(fmask, {min:0, max:4, palette:'green, blue, black, cyan,

white'}, 'Fmask');
Map.addLayer(maskedMosaic.select('B4'), {min:0, max:0.5, palette:'yellow,
green'}, 'Masked NIR');
Map.setCenter(-119.34, 45.97, 8);

// Calculate the number of pixels of each classification in our polygon
var regionCoverHistogram = mosaic.select('fmask')
.reduceRegion(ee.Reducer.frequencyHistogram(), polygon, PIXEL_SCALE);
print('Fmask class pixel count within region', regionCoverHistogram);



var waterPixelCount = ee.Dictionary(regionCoverHistogram.get('fmask'))
.get(FMASK_WATER.toString());

var waterArea = ee.Number(waterPixelCount).multiply(PIXEL_AREA);
print('Water Area (sq meters) in region', waterArea);

SECOND CODE:


Whereas in the Second Code, I am directly taking the Image ID of the same image and running the same algorithm.


var PIXEL_SCALE = 30; // Meters. Resolution of most Landsat 8 bands

var PIXEL_AREA = PIXEL_SCALE * PIXEL_SCALE; // Square meters.

// Fmask classification values
var FMASK_CLEAR_GROUND = 0;
var FMASK_WATER = 1;
var FMASK_CLOUD_SHADOW = 2;
var FMASK_SNOW = 3;
var FMASK_CLOUD = 4;



var mosaic = ee.Image('LANDSAT/LC8_L1T_TOA_FMASK/LC80440282016227LGN00');

// Update the mask on our mosaic to mask cloud and cloud shadow pixels
var fmask = mosaic.select('fmask');
var cloudMask = fmask.neq(FMASK_CLOUD).and(fmask.neq(FMASK_CLOUD_SHADOW));
var maskedMosaic = mosaic.updateMask(cloudMask);


Map.addLayer(fmask, {min:0, max:4, palette:'green, blue, black, cyan,
white'}, 'Fmask');

Map.addLayer(maskedMosaic.select('B4'), {min:0, max:0.5, palette:'yellow,
green'}, 'Masked NIR');
Map.setCenter(-119.34, 45.97, 8);

//
// Calculating Region Cover Statistics
//

// Calculate the number of pixels of each classification in our polygon
var regionCoverHistogram = mosaic.select('fmask')

.reduceRegion(ee.Reducer.frequencyHistogram(), polygon, PIXEL_SCALE);
print('Fmask class pixel count within region', regionCoverHistogram);


var waterPixelCount = ee.Dictionary(regionCoverHistogram.get('fmask'))
.get(FMASK_WATER.toString());

var waterArea = ee.Number(waterPixelCount).multiply(PIXEL_AREA);
print('Water Area (sq meters) in region', waterArea);


I am getting two Different Water Areas for the same image. Why?



Answer



You are getting different answers because the first code is using the default projection of the mosaic (WGS84) and the second one is using the default projection of the image (EPSG:32628). The reprojection is significantly altering the QA band.


You should always specify a crs to reduceRegion so you know in what projection the calculation is being done.


topology - Python - gdal.Polygonize produce invalid geometries


gdal.Polygonize is making invalid geometries and later in processing chain an error is reported stating that there are Ring Self-Intersections. Not one, quite a few so manual solution is not an option in my case. Is there any solution to this problem? I am using Python 2.7 and QGIS. Hot spot (ring self-intersection) is circled in red. enter image description here


Question is the same like QGIS Raster to Polygons produce invalid geometries, but I am looking for some solution using Python and its libraries.




qgis - Reprojecting between NAD27 and WGS 84?


I am trying to perform a reprojection for data with datum Nad27 to WGS84 datum. According to AndreJ in some previous questions in Stackexchange (CRS reprojection for instance) QGIS uses internally by default the ntv2 transformation from NAD27 to WGS84, that should be very exact. I tested with the custom crs calculation tool. First a lat lon point at 99w , 19n with parameters for EPSG32614 looks like this:



enter image description here


The same point (which is near Mexico City) with EPSG 26714 parameters looks like this:


enter image description here


There is a difference of 126 meters on the north coordinate but no difference in the east coordinate which is odd. I tested a custom crs I created with a towgs84 argument as follows:


enter image description here


Now I have a difference for both the east and north coordinate that should be expected for the datum change. Adding the same towgs84 to EPSG 26714 makes no difference with figure 2


enter image description here


Making the reprojection of the same coordinates in GPS Babel gives me that the point in UTM NAD27 is 500032mE, 2,100,627mN. There is almost no difference with my custom crs. So I wonder how to make the correctly the reprojection of the data.



Answer



The +datum=nad27 parameter overrides the +towgs84 parameter.



GDAL can only do grid shift, or Helmert/Molodensky transformation, but not both. Since the grid is more correct in most cases, the developers decided to skip the 3-/7-parameter transformation if both options are given.


The transformation for NAD27 is stored in several grid files, which can be found in the proj/share folder and loaded into QGIS. The main source is the conus file for the Conterminous United States:


enter image description here


The extent of the file is: -131.125,19.875 : -62.875,50.125


The grid is a two-band raster file, containing the shift value for every lat/lon coordinate.


There are similar solutions for Canada (the first ntv2 grid), Alaska and Hawaii, but not for the rest of Northern America. Since your point falls ouside the extent, the grid might not be used at all.


According to http://forums.esri.com/Thread.asp?c=93&f=984&t=273181 Mexico uses a different method for converting between NAD27 and WGS84/ITRF92. An online conversion tool exists, but the parameters are not known.


You might also take a look at this Grids&Datums article on Mexico.




EDIT



To visualize the applied datum shifts, I have compared the NADCON grid shift with the online tool at INEGI:


enter image description here


Red contour lines are Eastings, blue ones Northings in arc seconds (1 sec approx. 30m). The grid of the INEGI online tool distorts heavily beyond the coastline, so I have clipped it to the onshore region. I would not recommend to use it for the islands belonging to Mexico. You see that the datum shift distorts even on the mainland close to the Pacific, this might be influenced by tectonic moves.


The NADCON grid in fainted red and blue ends at 20°N, but it should better not be used outside the U.S.


For comparison, this is how the datum shift of the NIMA 3-parameters you prefer looks like:


enter image description here


Tuesday 27 December 2016

sql server - Any Good Map Rendering Engines for SqlGeometry/SqlGeography


Can anyone suggest good map rendering engines for MS SQL Server Spatial types (SqlGeometry/SqlGeography)? By rendering, I mean plot the geometries to a common image format such as png or jpeg with very simple symbology.


The 2 I have experimented with thus far are: 1. SharpMap 2. MapServer with the MSSql plugin



I just want to see if there is anything else out there that I might have missed.



Answer



MapGuide OS has the ability to serve MSSQL Server 2008 data.


postgis - Creating many origin-destination routes with pgRouting


I'm using a PostGIS / pgRouting database setup with a road network. I'm trying to create an origin-destination table with the complete route information, so not just the sum of the length, like in this post.


I'm trying to get something like this as output table:


Origin | Destination | vertex_id | cost
1 2 415 14.2
1 2 508 4.3
1 3 919 2.4
1 3 1024 6.8


The following code for calculating a single route works fine, but how do I automate this for a lot of origin-destination pairs?


SELECT * FROM shortest_path('
SELECT gid AS id,
start_id::int4 AS source,
end_id::int4 AS target,
length::float8 AS cost
FROM network',
1, -- start_id
2, -- end_id

false, false);

I've tried something with subqueries, like this where I'm getting the start_ids from table nodes:


SELECT id,
(SELECT * FROM shortest_path('
SELECT gid AS id,
start_id::int4 AS source,
end_id::int4 AS target,
length::float8 AS cost
FROM network',

id::int4, -- start_id
450969, -- end_id
false, false))
FROM nodes;

But this doesn't work because the subquery returns multiple rows (as expected): ERROR: subquery must return only one column. Any help?



Answer



For completeness, this is the method I used:



  • Creating the routes table from origin 'id' to a single destination:



CREATE TABLE routes AS
SELECT gid, id,
shortest_path('
SELECT gid AS id,
start_id::int4 AS source,
end_id::int4 AS target,
length::float8 As cost,
* FROM network',
id::int4, -- start_id

935560, -- end_id
false, false) AS segment
FROM nodes;

I'm using a single destination (end_id) here, because of the time needed for calculation. This query takes a couple of minutes for a few hundred start_ids. The result is a table with the route information in field 'segment', in the format (vertex_id, edge_id, cost).



  • Then I split the segment string into the parts and joined this with my links table using the edge_id:


CREATE TABLE merge_routes_links AS
SELECT * FROM links

JOIN (
SELECT btrim(split_part(segment::varchar, ',', 1), '()') AS vertex_id
btrim(split_part(segment::varchar, ',', 2), '()') AS edge_id
btrim(split_part(segment::varchar, ',', 3), '()') AS cost
FROM routes) AS route
ON route.edge_id::int4 = links.gid::int4;


  • Finally I calculated the total length of the routes from origin id to destination and joined this to the merge_routes_links table.



SELECT id,
sum(length::float8) AS length_route
FROM merge_routes_links
GROUP BY id;

Rearranging display order of attribute table fields using QGIS?


Using QGIS 2.10.1, I open an attribute table. How do I rearrange the field display order? For example, a table's default display order (from left to right) is State, County, City. But I would like to see them displayed as City, State, County.


I'm not interested in rearranging the table's internal structure, rather just the table's display.



Answer



You can't, as far as I know, at least in table view. But you can customise forms and use them in the attribute editor to make it easier to view and edit records.


UPDATE as of 2.18 this is possible, see answer from Dan.


Form-based attribute table


There's an alternative form-based view which you might find easier to use (less scrolling than the normal table view).


If you look in the bottom-right corner of the Attribute Table, there's a couple of small icons. Very small icons. Blink and you'll miss them :)



icons to switch between form and table views


Clicking on one of these will show the form-based view, the other the table based view. Here's the standard form view.


attribute table form-based view


Easier to use than your standard 'spreadsheet' view, but still not sorted.


Drag and drop form editor


You can create custom forms for use with attribute editor.


Go into layer properties, "Fields" panel. Change from "Autogenerate" to "Drag and drop designer".


Here, I've used the Natural Earth dataset, which has a lot of columns. You create 'groups' (categories) on the right hand panel using the plus (+) icon. In my case I chose categories 'Population', 'Coding' and 'Names'. You then drag the fields over and drop them on the corresponding category.


using drag-and-drop form editor in qgis


Once the fields are copied over, you can nudge them up and down with the 'up' and 'down' arrow icons. This changes the order.



Now, when you go into attribute editor in 'form' mode, the form editor has one tab per category, and the fields in whichever order you want.


example of custom form in attribute editor


It doesn't affect the default 'table' view, though. Hope that helps!


qgis - can not delete file after using runalg function


I my plugin I use the runalg function:


processing.runalg("qgis:clip",filepath + "/temp2.shp", Layer, filepath + '/' + layerName)
vlayer = QgsVectorLayer(filepath + '/' + layerName + '.shp',layerName, "ogr")
QgsMapLayerRegistry.instance().addMapLayer(vlayer)

Afterwards I want to delete temp2.shp with


os.remove(filepath + "/temp2.shp") 

but I get the error WindowsError: [Error 32] The process cannot access the file because it is being used by another process: 'C:/Users/toke.nielsen/Desktop/temp/temp2.shp'



Is there a way to unlock temp2.shp after using runalg? And is there a way to clear all variables and memory when the plugin is done?


I have tried this, but it is not working with QGIS 2.2:


Releasing PyQGIS file locks?




openlayers - Missing LayerSwitcher in OpenLayers3?


I was wondering if there is a LayerSwitcher control in OpenLayers 3?


I read that ol3 is a complete rewrite. But I can't seem to find the equivalent of LayerSwitcher from http://openlayers.org/en/latest/apidoc/ol.control.html. I don't see how one can turn on and off layers without this control. Is LayerSwitcher renamed to something else, missing or is the documentation incomplete?



If OpenLayers3 does not have LayerSwitcher, does anyone know of an example that implements a custom control like the old LayerSwitcher?



Answer



This question was asked on Twitter recently https://twitter.com/RemiBovard/status/525028570780139520


If you follow the answer, at the moment, integrating layer switcher in the core is not the priority but there is an available component at https://github.com/walkermatt/ol3-layerswitcher


You can also take a look on "The book of OpenLayers" samples (by @acanimal) for some custom legend implementations.


How to simplify streets such that seperate driving tracks are reduced?


Streets in Openstreetmap have more detail than I need. For instance I do not need seperate street tracks for each driving direction, instead I just want to show the general path of the street. Example:


OSM linestrings for a street



OSM linestrings for a street


Simplified


Simpified linestring(s) (here it is just one, but in case of sidearms would be > 1)


input:  collection of linestrings
output: collection of linestrings
simplified lines = f(osm lines)
f = ?

the output is a collection as well, since sidearms should remain available.


How do I programatically simplify such linestring collections?



Would like to use JTS, NTS, GEOS but not PostGIS if possible.



Answer



Hard to imagine how you can reach what you want automatically. Perhaps by deleting one of the oneway tagged streets if there exist two of them with the same name and close enough?


Often used workflow starts from unioned linestrings followed by buffering and skeletonizing. This PhD thesis is good reading http://paduaresearch.cab.unipd.it/4077/1/Tesi-Savino-2011.pdf I also tried the workflow with OpenJUMP 1.7.0 and the Skeletonizer plugin http://kent.dl.sourceforge.net/project/jump-pilot/OpenJUMP_plugins/More%20Plugins/JUMP%20Skeletonizer%20Plugin/skeletonizer-1.0.zip The plugin works but the result is one long street, not a reduced number of short ones. I found it necessary to read the manual before using the plugin. It is inside the zip in the doc folder.


arcpy - Ways to Speed Up Python Scripts Running As ArcGIS Tools



This is a pretty general question. I just wondering what tips and tricks GIS programmers have used to speed up arcpy scripts that you import into the toolbox and run.



I work most everyday writing little scripts to help non-GIS users at my office process GIS data. I have found that ArcGIS 10.0 processing in general is slower than 9.3.1 and sometimes it gets even slower when running a python script.


I'm going to list a particular example of a script that takes over 24 hours to run. It's a loop that tabulates the area of a raster in a buffer for each shape in the buffer. The buffer has about 7000 shapes. I don't believe it should run this long. A


while x <= layerRecords:

arcpy.SetProgressorLabel("Tabulating Row: " + str(x) + " of " + str(ELClayerRecords))
arcpy.SelectLayerByAttribute_management(Buff,"NEW_SELECTION", "Recno = " + str(x)) # Selecting the record
TabulateArea(Buff, "Recno", MatGRID, "VALUE", ScratchWS + "/tab" + str(z) +".dbf", nMatGRIDc) # Tabulate the area of the single row

arcpy.AddMessage (" - Row: " + str(x) + " completed")
x = x + 1

z = z + 1

Before anyone says it, I have run tabulate area on the entire buffer, but it produces errors if run on more then 1 record. It's a flawed tool, but I have to use it.


Anyways, if anyone has any ideas on how to optimize, or speed up this script, it would be most appreciated. Otherwise, di you have any speed up tricks for python, when used in ArcGIS?



Answer



A couple potential suggestions to help speed up your process are:




  1. Select Layer By Attribute can be in a Python-only script, without ever launching ArcGIS Desktop. You need to convert your "buff" reference from a file-based reference to an "ArcGIS layer" reference, which ArcGIS can process selection queries against. Use arcpy.MakeFeatureLayer_management("buff","buff_lyr") above your "while" loop, and then change your references below the while loop to use "buff_lyr".





  2. Process as much of your GP operations using the in_memory workspace as possible... Use arcpy.CopyFeatures_management(shapefile, "in_memory\memFeatureClass") to move your source into memory. This only works well if you have enough RAM to read all of the feature class(es) that you need into memory. Beware, however, that there are some GP operations that cannot run using the in_memory workspace (Eg: the Project tool).




From ArcGIS 9.3 online help article "Intermediate data and the scratch workspace" (note, this language was removed from the 10.0 & 10.1 help):



NOTE: Only tables and feature classes (points, lines, polygons) can be written to the in_memory workspace. The in_memory workspace does not support extended geodatabase elements such as subtypes, domains, representations, topologies, geometric networks and network datasets. Only simple features and tables can be written.



From ArcGIS 10.1 online help article "Using in-memory workspace":




The following considerations must be made in deciding to write output to the in-memory workspace:



  • Data written to the in-memory workspace is temporary and will be deleted when the application is closed.

  • Tables, feature classes, and rasters can be written to the in-memory workspace.

  • The in-memory workspace does not support extended geodatabase elements such as subtypes, domains, representations, topologies, geometric networks, and network datasets.

  • Feature datasets or folders cannot be created in the in-memory workspace.



Monday 26 December 2016

Editing .tiff rasters in Python


I am trying to edit .tif rasters in python, but I have some issues regarding the tiff to array conversion. I read there are multiple options for opening rasters (gdal, PIL, matplotlib), but which of those is the best for allowing me to convert to array, edit (apply local filters), then convert back to .tif?



For example, I get a strange result when opening arrays from a .tif raster using PIL library.


The code looks like this:


import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib.cm as cm
import numpy as np
import pywt as pw
import tkFileDialog as tk
from PIL import Image


def fct(fisier):
img = Image.open(fisier)
arr=np.array(img, dtype=np.float)
plt.imshow(arr, cmap=cm.Greys_r)
plt.show()
print(arr)
return arr, img
file = tk.askopenfile(initialdir='C:/temp', mode='rb')
fct(file)


But the array that is displayd in all white, and has this form:


[[ -2.14748365e+09  -2.14748365e+09  -2.14748365e+09 ...,   0.00000000e+00
0.00000000e+00 6.19762981e+08]

What do I need to do in order to open the tiff raster as an array, so I can edit it further?




raster - Clustering 30m pixels into 120m pixels, fishnet using QGIS?


I have a raster file representing tree cover loss made of 30m*30m pixels taking the value 0 or 1. 0 means no loss and 1 means tree cover loss. I want to cluster these pixels into bigger pixels of the size 120m*120m (i.e. 4 pixels by 4 pixels so 16 pixels in each cluster). I plan to do this for computational reasons as by doing so I reduce the number of observations by a factor of 16. My goal is to measure the share of 30m pixels with value 1 within each 120m cluster of pixels.


I am not sure how to do this.


Here is a solution I thought of:




  • On QGIS I thought of creating a fishnet of 4 by 4 pixels and then doing zonal statistics within each cell. BUT, I could not find how to create this 4 by 4 pixels grid AND I found no solution yet on how to match perfectly cell boundaries with pixel extent (in order to have exactly 16 pixels per fishnet cell, see red cells below).


enter image description here



Answer



So I found a second best solution to the question asked above.


Using R one can aggregate raster pixels by a factor, in my case a factor of 4, and then export the newly created raster. This raster can subsequently be used in QGIS.


Please note that this can take a very long time. My raster is a 80000*40000 pixels so aggregation takes about one hour per raster file.


library(raster)
DEM <- raster("/Users/.../input.tif")

dem_low <- aggregate(DEM, fact = 4, fun = mean)
writeRaster(dem_low,'/Users/output.tif',options=c('TFW=YES'))
rm(DEM,dem_low)

geoserver - DWithin WFS filter is not working


I'm new to GeoServer andI want to buffer an area around a point. I'm using a DWithin WFS filter but not getting any results. Please tell me the correct format to use DWithin.


My GET request:


http://localhost:8085/geoserver/wfs?request=GetFeature&version=1.0.0&service=WFS&outputformat=json&cql_filter=DWITHIN(the_geom,POINT(76.7,8.5,osm_id=16173236))&CQL_FILTER=typein(hospital)&typename=india_road_nw:points&


Result obtained:


no buffering is done.. all the points are obtained. What to do?




arcgis 10.2 - ArcPy function works in ArcMap Python window but not from standalone script?


So I think I'm running into a data/schema lock issue, but I have no idea how to tell, or how to fix it for that matter. Here's the setup:


I have this chain of gp functions:


#extract elevations to stations
arcpy.gp.ExtractValuesToPoints_sa(station_locations, elevation_raster, scratchGDB + "\station_elevations", "NONE", "VALUE_ONLY")

#calculate the mean air temperature for each station over the n-hour time period
arcpy.Statistics_analysis(data_table, scratchGDB + "\station_statistics", "air_temperature MEAN", "site_key")
#join elevations to the statistics table created above
arcpy.JoinField_management(scratchGDB + "\station_statistics", "site_key", scratchGDB + "\station_elevations", "Site_Key", "RASTERVALU")
#select rows in "station_statistics" that have a positive elevation (non-null)
arcpy.TableSelect_analysis(scratchGDB + "\station_statistics", "stats1", "RASTERVALU IS NOT NULL")

scratchGDB is set from:


scratchGDB = arcpy.env.scratchGDB


When I run these four gp functions one at a time in the ArcMap python window, they run successfully. But when I run the debug in PyScripter on the stand-alone script, I get this error from the "arcpy.TableSelect_analysis" function:


table select error


Looking into "ERROR 000210" is what makes me think it's a data lock issue, but I'd like to know what's making it work in the python window and not work in the PyScripter IDE?



Answer



I don't see where you set the env so try changing your Table Select line to:


arcpy.TableSelect_analysis(scratchGDB + "\station_statistics", scratchGDB + "\stats1", "RASTERVALU IS NOT NULL")

python - Creating a custom "flat" projection definition in proj4


I want to create a simple, custom projection in proj4. I need it to be completly flat ( rectangular coordinates ). Starts in point (0,0), meters as unit, everything else - as simple as possible. What parameters my definition should have to achieve it?


Am I even able to do it? Ignoring the curvature of the Earth?




qgis - Merge single points to multi point geometry


Is it possible to transform a point shapefile in single part geometry to a shapefile with multi-point geometry based on an attribute. e.g. in the file are 50.000 records in 700 categories, now I want to create a multi-part geometry point file.


The result should be a shapefile with 700 records (=categories in the former shape).


Is this possible in QGIS, Spatialite or something else?



Answer



I believe you can do this through default top menu:


Vector - Geometry Tools - Singleparts to Multipart...


Though above the input there is written "Input line or polygon vector layer" I just tested it with point layer and it did work.


Also once you create multipoint layer to edit - add / delete parts (point from multipoint) you need to use Advanced Digitizing (right click on top bar to activate it) and use tools Add Part / Delete Part.



Sunday 25 December 2016

qgis - Associating file with data point?




Is it possible with GRASS or QGIS to link a data point on a map with a report of some kind (photo, pdf, graph etc)?


I am interested in this feature, to facilitate data entry from the field, understanding of site conditions, tracking data, presentation etc.




file geodatabase - Setting default gdb from ArcPy?


I created a new gdb by


arcpy.CreateFileGDB_management('e:/Rstack', 'new.gdb')
arcpy.env.workspace = 'e:/Rstack/new.gdb'

But how to set this new gdb as my default gdb by arcpy, without opening ArcGIS desktop? Setting env workspace does not seem to work here.




Drawing arrow on line using SLD of GeoServer?


I have a line layer in geoserver as in the 1st image.


How can I draw an arrow on the line like in the 2nd image?


The line.rar includes a shapefile and a sld file.


Can edit a sample for me?


line.rar(download)


enter image description here enter image description here




Answer



The GeoServer documentations states how to extract start and end points with geometry transformations (just use the end- point rule). The code example provided uses square as marks, but you could always replace this with e.g. the shape symbol shape://oarrow.


EDIT: I forgot to mention how to rotate the triangle correctly (couldn't find it in the documentation, but took it from page 38 in this presentation)!




the_geom



EDIT2: Just wanted to make sure everything works as described, here is a code sample based on GeoServer's default blue line SLD style:




xsi:schemaLocation="http://www.opengis.net/sld StyledLayerDescriptor.xsd"
xmlns="http://www.opengis.net/sld"
xmlns:ogc="http://www.opengis.net/ogc"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">

Blue arrows


A blue line with end arrows


Blue_Arrow_Line


#0000FF
2






the_geom




shape://oarrow


#0000FF
0.5


#0000FF
2


30



the_geom











This is how it should look like:


Blue Arrow Example image


data - Is geocomm.com permanently unavailable?



I have been trying to get to some county data from geocomm.com but the website has been down for several days.


Does anyone have information regarding their status?




Style GeoJSON layers depending on attributes in Openlayers 3


I'm trying to mimic most of the great google-vector-layers and leaflet-vector-layers from Jason Sanford (https://github.com/JasonSanford) to easily display, style and add customized popups for data from Postgis-databases. This works in combination with a modified version of the PHP-Database-GeoJSON from Bryan McBride.


Until now I got a lot of things to work (load Postgis-layers, by passing field-nemes and a WHERE-cause, creating layer-individual popups, ...) but I'm actually having problems to implement a styling function.


In the examples mentioned above they pass a symbology-object to a constructor that cares about the styling of the layer. I've tried to use and adapt this code so that it could work in Openlayers 3 as well, but I need help from experienced developers. Could someone have a look at the code and point me in the right direction?


The layer definitions, that are used to create new ol3Vectors look like this:


n2k_dh_l = new ol3Vector({
map: map,
title: "Natura 2000 Habitats Directive",
attribution: "
RĂ©seau Natura 2000 Habitats Directive",

geotable: "n2k_dh",
fields: "gid as id,surfha,sitecode,sitename",
where: "sitename ilike '%moselle%'",
minResolution: 0.01,
maxResolution: 50,
content: "

BH {sitecode}


{sitename}
{surfha} ha

",
symbology: {
type: "single",
styleOptions: {
fill: "rgba(100,250,0,0.1)",

color: "green",
width: 2
}
}
});

communes = new ol3Vector({
map: map,
title: "Communes",
attribution: "
Communes ACT",

geotable: "communes",
fields: "gid as id,surfha,commune,stat",
where: "",
minResolution: 0.01,
maxResolution: 50,
content: "

{commune}


{stat}
{surfha} ha

",
symbology:{
type: "unique",
property: "stat",
values: [

{
value: "XYZ",
styleOptions: {
fill: "rgba(100,250,0,0.1)",
color: "green",
width: 2
}},
{
value: "UVZ",
styleOptions: {

fill: "rgba(100,250,0,0.1)",
color: "red",
width: 2
}},
{
value: "",
styleOptions: {
fill: "rgba(100,250,0,0.1)",
color: "grey",
width: 2

}}
]
}
});

Inside the ol3Vector-constructor I try to implement a function to style features based on the symbology defined in the layer options.


When I pass the style options for a layer with a single style directly like this:


style: new ol.style.Style({
fill: new ol.style.Fill({color: options.symbology.styleOptions.fill}),
stroke: new ol.style.Stroke({

color: options.symbology.styleOptions.color,
width: options.symbology.styleOptions.width
})
}),

everything works fine, but the following styling function _getStyledFeatures inside the ol3Vector-function I would like to use, causes me headache.


Maybe someone has an idea how to get that work.


function ol3Vector(options){

var options = options || {};


newLayer = new ol.layer.Vector({
title: options.title,
visible: false,
minResolution: options.minResolution,
maxResolution: options.maxResolution,
content: options.content,
symbology: options.symbology,
style: _getStyledFeatures,
source: new ol.source.Vector({

projection: 'EPSG:4326',
attributions: [new ol.Attribution ({html: options.attribution })],
strategy: ol.loadingstrategy.bbox,
loader: function(extent, resolution, projection) {
var extent = ol.proj.transformExtent(extent, projection.getCode(), ol.proj.get('EPSG:4326').getCode());
$.ajax({
type: 'GET',
url: "./mapdata/get_geojson.php?"+
"geotable=" + options.geotable +
"&fields=" + options.fields +

"&where=" + options.where +
"&bbox=" + extent.join(','),
context: this
}).done (function (data) {
var format = new ol.format.GeoJSON();
this.addFeatures (format.readFeatures(data,{
dataProjection: 'EPSG:4326',
featureProjection: 'EPSG:3857'
}));
});

}
})

})


return newLayer;
};

ol.inherits(ol3Vector, ol.layer.Vector);




function _getStyledFeatures(feature) {
//
// Create empty styleOptions and feartureStyles objects to add to, or leave as is if no symbology can be found
//
var styleOptions = {};
var featureStyles = {};


//
// get properties.
//
var atts = feature.getProperties();

//
// Is there a symbology set for this layer?
// if not, style with default style
var defaultStyle = new ol.style.Style({
fill: new ol.style.Fill({color: 'rgba(255,255,255,0.4)'}),

stroke: new ol.style.Stroke({
color: '#3399CC',
width: 1.25
})
});
if (!options.symbology) {
return defaultStyle;
}
else (options.symbology) {
switch (options.symbology.type) {

case "single":
//
// Its a single symbology for all features so just set the key/value pairs in styleOptions
//
for (var key in options.symbology.styleOptions) {
styleOptions[key] = options.symbology.styleOptions[key];
if (styleOptions.title) {
for (var prop in atts) {
var re = new RegExp("{" + prop + "}", "g");
styleOptions.title = styleOptions.title.replace(re, atts[prop]);

featureStyles = new ol.style.Style(styleOptions){
fill: new ol.style.Fill({color: styleOptions.fill}),
stroke: new ol.style.Stroke({
color: styleOptions.color,
width: styleOptions.width
})
};
}
}
}

break;
case "unique":
//
// Its a unique symbology. Check if the features property value matches that in the symbology and style accordingly
//
var att = options.symbology.property;
for (var i = 0, len = options.symbology.values.length; i < len; i++) {
if (atts[att] == options.symbology.values[i].value) {
for (var key in options.symbology.values[i].styleOptions) {
styleOptions[key] = options.symbology.values[i].styleOptions[key];

if (styleOptions.title) {
for (var prop in atts) {
var re = new RegExp("{" + prop + "}", "g");
styleOptions.title = styleOptions.title.replace(re, atts[prop]);
featureStyles = new ol.style.Style(styleOptions){
fill: new ol.style.Fill({color: styleOptions.fill}),
stroke: new ol.style.Stroke({
color: styleOptions.color,
width: styleOptions.width
})

};
}
}
}
}
}
break;
case "range":
//
// Its a range symbology. Check if the features property value is in the range set in the symbology and style accordingly

//
var att = options.symbology.property;
for (var i = 0, len = options.symbology.ranges.length; i < len; i++) {
if (atts[att] >= options.symbology.ranges[i].range[0] && atts[att] <= options.symbology.ranges[i].range[1]) {
for (var key in options.symbology.ranges[i].styleOptions) {
styleOptions[key] = options.symbology.ranges[i].styleOptions[key];
if (styleOptions.title) {
for (var prop in atts) {
var re = new RegExp("{" + prop + "}", "g");
styleOptions.title = styleOptions.title.replace(re, atts[prop]);

featureStyles = new ol.style.Style(styleOptions){
fill: new ol.style.Fill({color: styleOptions.fill}),
stroke: new ol.style.Stroke({
color: styleOptions.color,
width: styleOptions.width
})
};
}
}
}

}
}
break;
}
}
return featureStyles;
}


arcpy - Changing output name when exporting data driven pages to JPG?

Is there a way to save the output JPG, changing the output file name to the page name, instead of page number? I mean changing the script fo...