Browse Source

initial import 0.7.4-dev

tags/v0.7.4
Olivier Courtin 1 month ago
parent
commit
c2e7fcb39c
57 changed files with 4767 additions and 2 deletions
  1. +5
    -0
      MANIFEST.in
  2. +161
    -0
      Makefile
  3. +192
    -2
      README.md
  4. +64
    -0
      config.toml
  5. +114
    -0
      docs/101.md
  6. +67
    -0
      docs/config.md
  7. BIN
      docs/img/101/images.png
  8. BIN
      docs/img/101/labels.png
  9. BIN
      docs/img/101/predict_compare.png
  10. BIN
      docs/img/101/predict_compare_side.png
  11. BIN
      docs/img/101/predict_masks.png
  12. BIN
      docs/img/readme/draw_me_neat_eo.png
  13. BIN
      docs/img/readme/stacks.png
  14. +20
    -0
      docs/makefile.md
  15. +374
    -0
      docs/tools.md
  16. +1
    -0
      neat_eo/__init__.py
  17. +203
    -0
      neat_eo/core.py
  18. +0
    -0
      neat_eo/da/__init__.py
  19. +43
    -0
      neat_eo/da/core.py
  20. +39
    -0
      neat_eo/da/rgb.py
  21. +82
    -0
      neat_eo/geojson.py
  22. +0
    -0
      neat_eo/loaders/__init__.py
  23. +93
    -0
      neat_eo/loaders/semseg.py
  24. +0
    -0
      neat_eo/losses/__init__.py
  25. +47
    -0
      neat_eo/losses/lovasz.py
  26. +16
    -0
      neat_eo/metrics/IoU.py
  27. +16
    -0
      neat_eo/metrics/MCC.py
  28. +23
    -0
      neat_eo/metrics/QoD.py
  29. +0
    -0
      neat_eo/metrics/__init__.py
  30. +59
    -0
      neat_eo/metrics/core.py
  31. +0
    -0
      neat_eo/nn/__init__.py
  32. +105
    -0
      neat_eo/nn/albunet.py
  33. +0
    -0
      neat_eo/osm/__init__.py
  34. +42
    -0
      neat_eo/osm/building.py
  35. +117
    -0
      neat_eo/osm/road.py
  36. +340
    -0
      neat_eo/tiles.py
  37. +0
    -0
      neat_eo/tools/__init__.py
  38. +50
    -0
      neat_eo/tools/__main__.py
  39. +196
    -0
      neat_eo/tools/_sat.py
  40. +224
    -0
      neat_eo/tools/compare.py
  41. +213
    -0
      neat_eo/tools/cover.py
  42. +72
    -0
      neat_eo/tools/dataset.py
  43. +119
    -0
      neat_eo/tools/download.py
  44. +152
    -0
      neat_eo/tools/eval.py
  45. +131
    -0
      neat_eo/tools/export.py
  46. +35
    -0
      neat_eo/tools/extract.py
  47. +129
    -0
      neat_eo/tools/info.py
  48. +143
    -0
      neat_eo/tools/predict.py
  49. +195
    -0
      neat_eo/tools/rasterize.py
  50. +83
    -0
      neat_eo/tools/subset.py
  51. +274
    -0
      neat_eo/tools/tile.py
  52. +253
    -0
      neat_eo/tools/train.py
  53. +63
    -0
      neat_eo/tools/vectorize.py
  54. +66
    -0
      neat_eo/web_ui/compare.html
  55. +73
    -0
      neat_eo/web_ui/leaflet.html
  56. +18
    -0
      requirements.txt
  57. +55
    -0
      setup.py

+ 5
- 0
MANIFEST.in View File

@@ -0,0 +1,5 @@
include LICENSE
include requirements.txt
include neat_eo/web_ui/compare.html
include neat_eo/web_ui/leaflet.html
recursive-include data *

+ 161
- 0
Makefile View File

@@ -0,0 +1,161 @@
help:
@echo "This Makefile rules are designed for Neat-EO.pink devs and power-users."
@echo "For plain user installation follow README.md instructions, instead."
@echo ""
@echo ""
@echo " make install To install, few Python dev tools and Neat-EO.pink in editable mode."
@echo " So any further Neat-EO.pink Python code modification will be usable at once,"
@echo " throught either neo tools commands or neat_eo.* modules."
@echo ""
@echo " make check Launchs code tests, and tools doc updating."
@echo " Do it, at least, before sending a Pull Request."
@echo ""
@echo " make check_tuto Launchs neo commands embeded in tutorials, to be sure everything still up to date."
@echo " Do it, at least, on each CLI modifications, and before a release."
@echo " NOTA: It takes a while."
@echo ""
@echo " make pink Python code beautifier,"
@echo " as Pink is the new Black ^^"



# Dev install
install:
pip3 install pytest black flake8 twine
pip3 install -e .


# Lauch all tests
check: ut it doc
@echo "==================================================================================="
@echo "All tests passed !"
@echo "==================================================================================="


# Python code beautifier
pink:
black -l 125 *.py neat_eo/*.py neat_eo/*/*.py tests/*py tests/*/*.py


# Perform units tests, and linter checks
ut:
@echo "==================================================================================="
black -l 125 --check *.py neat_eo/*.py neat_eo/*/*.py
@echo "==================================================================================="
flake8 --max-line-length 125 --ignore=E203,E241,E226,E272,E261,E221,W503,E722
@echo "==================================================================================="
pytest tests -W ignore::UserWarning


# Launch Integration Tests
it: it_pre it_train it_post


# Integration Tests: Data Preparation
it_pre:
@echo "==================================================================================="
rm -rf it
neo info
neo cover --zoom 18 --bbox 4.8,45.7,4.82,45.72 --out it/cover
neo download --rate 20 --type WMS --url "https://download.data.grandlyon.com/wms/grandlyon?SERVICE=WMS&REQUEST=GetMap&VERSION=1.3.0&LAYERS=Ortho2015_vue_ensemble_16cm_CC46&WIDTH=512&HEIGHT=512&CRS=EPSG:3857&BBOX={xmin},{ymin},{xmax},{ymax}&FORMAT=image/jpeg" --cover it/cover --out it/images
echo "Download Buildings GeoJSON" && wget --show-progress -q -nc -O it/lyon_roofprint.json "https://download.data.grandlyon.com/wfs/grandlyon?SERVICE=WFS&REQUEST=GetFeature&TYPENAME=ms:fpc_fond_plan_communaut.fpctoit&VERSION=1.1.0&srsName=EPSG:4326&BBOX=4.79,45.69,4.83,45.73&outputFormat=application/json; subtype=geojson" | true
echo "Download Roads GeoJSON" && wget --show-progress -q -nc -O it/lyon_road.json "https://download.data.grandlyon.com/wfs/grandlyon?SERVICE=WFS&VERSION=1.1.0&request=GetFeature&typename=pvo_patrimoine_voirie.pvochausseetrottoir&outputFormat=application/json; subtype=geojson&SRSNAME=EPSG:4326&bbox=`neo cover --dir it/images --type extent`" | true
ogr2ogr -f SQLite it/lyon_road.sqlite it/lyon_road.json -dsco SPATIALITE=YES -t_srs EPSG:3857 -nln roads -lco GEOMETRY_NAME=geom
ogr2ogr -f GeoJSON it/lyon_road_poly.json it/lyon_road.sqlite -dialect sqlite -sql "SELECT Buffer(geom, IFNULL(largeurchaussee, 5.0) / 2.0) AS geom FROM roads"
neo rasterize --type Building --geojson it/lyon_roofprint.json --config config.toml --cover it/cover --out it/labels
neo rasterize --type Road --geojson it/lyon_road_poly.json --config config.toml --cover it/cover --append --out it/labels
neo rasterize --type Building --geojson it/lyon_roofprint.json --config config.toml --cover it/cover --out it/labels_osm
neo cover --dir it/images --splits 80/20 --out it/train/cover it/eval/cover
neo subset --dir it/images --cover it/train/cover --out it/train/images
neo subset --dir it/labels --cover it/train/cover --out it/train/labels
neo subset --dir it/images --cover it/eval/cover --out it/eval/images
neo subset --dir it/labels --cover it/eval/cover --out it/eval/labels
mkdir --parents it/predict/tiff
wget -nc -O it/predict/tiff/1841_5174_08_CC46.tif "https://download.data.grandlyon.com/files/grandlyon/imagerie/ortho2018/ortho/GeoTiff_YcBcR/1km_8cm_CC46/1841_5174_08_CC46.tif"
wget -nc -O it/predict/tiff/1842_5174_08_CC46.tif "https://download.data.grandlyon.com/files/grandlyon/imagerie/ortho2018/ortho/GeoTiff_YcBcR/1km_8cm_CC46/1842_5174_08_CC46.tif"
neo tile --zoom 18 --rasters it/predict/tiff/*.tif --out it/predict/images
neo cover --zoom 18 --dir it/predict/images --out it/predict/cover
echo "Download PBF" && wget -nc -O it/predict/ra.pbf "http://download.geofabrik.de/europe/france/rhone-alpes-latest.osm.pbf" | true
osmium extract --bbox `neo cover --dir it/predict/images --type extent` -o it/predict/lyon.pbf it/predict/ra.pbf
neo extract --type Building --pbf it/predict/lyon.pbf --out it/predict/osm_building.json
neo extract --type Road --pbf it/predict/lyon.pbf --out it/predict/osm_road.json
neo rasterize --type Building --geojson it/predict/osm_building.json --config config.toml --cover it/predict/cover --out it/predict/labels
neo rasterize --type Road --geojson it/predict/osm_road.json --config config.toml --cover it/predict/cover --append --out it/predict/labels



# Integration Tests: Training
it_train:
@echo "==================================================================================="
export CUDA_VISIBLE_DEVICES=0 && neo train --config config.toml --bs 4 --lr 0.00025 --epochs 2 --dataset it/train --classes_weights `neo dataset --mode weights --dataset it/train --config config.toml` --out it/pth
export CUDA_VISIBLE_DEVICES=0,1 && neo train --config config.toml --bs 4 --lr 0.00025 --epochs 4 --resume --checkpoint it/pth/checkpoint-00002.pth --classes_weights auto --dataset it/train --out it/pth
export CUDA_VISIBLE_DEVICES=0,1 && neo train --config config.toml --bs 4 --optimizer AdamW --lr 0.00025 --epochs 6 --resume --checkpoint it/pth/checkpoint-00004.pth --classes_weights auto --dataset it/train --out it/pth
neo eval --config config.toml --bs 4 --checkpoint it/pth/checkpoint-00006.pth --classes_weights auto --dataset it/eval
neo info --checkpoint it/pth/checkpoint-00006.pth


# Integration Tests: Post Training
it_post:
@echo "==================================================================================="
neo export --checkpoint it/pth/checkpoint-00006.pth --type jit --out it/pth/export.jit
neo export --checkpoint it/pth/checkpoint-00006.pth --type onnx --out it/pth/export.onnx
neo predict --config config.toml --checkpoint it/pth/checkpoint-00006.pth --dataset it/predict --out it/predict/masks
neo predict --metatiles --config config.toml --checkpoint it/pth/checkpoint-00006.pth --dataset it/predict --out it/predict/masks_meta
neo predict --metatiles --keep_borders --config config.toml --checkpoint it/pth/checkpoint-00006.pth --dataset it/predict --out it/predict/masks_keep
neo cover --dir it/predict/masks_meta --out it/predict/cover
neo compare --cover it/predict/cover --config config.toml --images it/predict/images it/predict/labels it/predict/masks --mode stack --labels it/predict/labels --masks it/predict/masks_meta --out it/predict/compare
neo compare --cover it/predict/cover --images it/predict/images it/predict/compare --mode side --out it/predict/compare_side
neo compare --cover it/predict/cover --config config.toml --mode list --labels it/predict/labels --max Building QoD 0.50 --masks it/predict/masks_meta --geojson --out it/predict/compare/tiles.json
cp it/predict/compare/tiles.json it/predict/compare_side/tiles.json
neo vectorize --type Building --config config.toml --masks it/predict/masks_meta --out it/predict/building.json
neo vectorize --type Road --config config.toml --masks it/predict/masks_meta --out it/predict/road.json


# Documentation generation (tools and config file)
doc:
@echo "==================================================================================="
@echo "# Neat-EO.pink tools documentation" > docs/tools.md
@for tool in `ls neat_eo/tools/[^_]*py | sed -e 's#.*/##g' -e 's#.py##'`; do \
echo "Doc generation: $$tool"; \
echo "## neo $$tool" >> docs/tools.md; \
echo '```' >> docs/tools.md; \
neo $$tool -h >> docs/tools.md; \
echo '```' >> docs/tools.md; \
done
@echo "Doc generation: config.toml"
@echo "## config.toml" > docs/config.md; \
echo '```' >> docs/config.md; \
cat config.toml >> docs/config.md; \
echo '```' >> docs/config.md;
@echo "Doc generation: Makefile"
@echo "## Makefile" > docs/makefile.md; \
echo '```' >> docs/makefile.md; \
make --no-print-directory >> docs/makefile.md; \
echo '```' >> docs/makefile.md;


# Check neo commands embeded in Documentation
check_doc:
@echo "==================================================================================="
@echo "Checking README:"
@echo "==================================================================================="
@rm -rf ds && sed -n -e '/```bash/,/```/ p' README.md | sed -e '/```/d' > .CHECK && sh .CHECK
@echo "==================================================================================="


# Check neo commands embeded in Tutorials
check_tuto: check_101

check_101:
@echo "==================================================================================="
@echo "Checking 101"
@mkdir -p tuto && cd tuto && mkdir -p 101 && sed -n -e '/```bash/,/```/ p' ../docs/101.md | sed -e '/```/d' > 101/.CHECK && cd 101 && sh .CHECK && cd ..
@cd tuto/101 && tar cf 101.tar train/images train/labels predict/masks predict/compare predict/compare_side



# Send a release on PyPI
pypi:
rm -rf dist Neat-EO.pink.egg-info
python3 setup.py sdist
twine upload dist/* -r pypi

+ 192
- 2
README.md View File

@@ -1,3 +1,193 @@
# neat-EO
<a href="https://twitter.com/neat_eo"><img src="https://img.shields.io/badge/Follow-neat_eo-ff69b4.svg" /></a> <a href="https://gitter.im/Neat-EO-pink/community"><img src="https://img.shields.io/gitter/room/Neat-EO-pink/community.svg?color=ff69b4&style=popout" /></a>
<a href="https://pepy.tech/project/neat-eo/week"><img align="right" src="https://pepy.tech/badge/neat-eo/week" /></a>

Efficient AI4EO OpenSource framework
<h1 align='center'>neat-EO</h1>
<h2 align='center'>Efficient AI4EO OpenSource framework</h2>

<p align=center>
<img src="https://pbs.twimg.com/media/DpjonykWwAANpPr.jpg" alt="Neat-EO.pink buildings segmentation from Imagery" />
</p>



Purposes:
---------
- DataSet Quality Analysis
- Change Detection highlighter
- Features extraction and completion


Main Features:
--------------
- Provides several command line tools, you can combine together to build your own workflow
- Follows geospatial standards to ease interoperability and performs fast data preparation
- Build-in cutting edge Computer Vision papers implementation
- Support either RGB and multibands imagery, and allows Data Fusion
- Web-UI tools to easily display, hilight or select results (and allow to use your own templates)
- High performances
- Extensible by design




<img alt="Draw me neat-EO" src="https://raw.githubusercontent.com/datapink/neat-eo.pink/master/docs/img/readme/draw_me_neat_eo.png" />


Documentation:
--------------
### Tutorial:
- <a href="https://github.com/datapink/neat-eo/blob/master/docs/101.md">Learn to use neat-EO, in a couple of hours</a>

### Config file:
- <a href="https://github.com/datapink/neat-eo/blob/master/docs/config.md">neat-EO configuration file</a>

### Tools:

- <a href="https://github.com/datapink/neat-eo/blob/master/docs/tools.md#neo-cover">`neo cover`</a> Generate a tiles covering, in csv format: X,Y,Z
- <a href="https://github.com/datapink/neat-eo/blob/master/docs/tools.md#neo-download">`neo download`</a> Downloads tiles from a Web Server (XYZ or WMS)
- <a href="https://github.com/datapink/neat-eo/blob/master/docs/tools.md#neo-extract">`neo extract`</a> Extracts GeoJSON features from OpenStreetMap .pbf
- <a href="https://github.com/datapink/neat-eo/blob/master/docs/tools.md#neo-rasterize">`neo rasterize`</a> Rasterize vector features (GeoJSON or PostGIS), to raster tiles
- <a href="https://github.com/datapink/neat-eo/blob/master/docs/tools.md#neo-subset">`neo subset`</a> Filter images in a slippy map dir using a csv tiles cover
- <a href="https://github.com/datapink/neat-eo/blob/master/docs/tools.md#neo-tile">`neo tile`</a> Tile a raster coverage
- <a href="https://github.com/datapink/neat-eo/blob/master/docs/tools.md#neo-train">`neo train`</a> Trains a model on a dataset
- <a href="https://github.com/datapink/neat-eo/blob/master/docs/tools.md#neo-eval">`neo eval`</a> Evals a model on a dataset
- <a href="https://github.com/datapink/neat-eo/blob/master/docs/tools.md#neo-export">`neo export`</a> Export a model to ONNX or Torch JIT
- <a href="https://github.com/datapink/neat-eo/blob/master/docs/tools.md#neo-predict">`neo predict`</a> Predict masks, from a dataset, with an already trained model
- <a href="https://github.com/datapink/neat-eo/blob/master/docs/tools.md#neo-compare">`neo compare`</a> Compute composite images and/or metrics to compare several slippy map dirs
- <a href="https://github.com/datapink/neat-eo/blob/master/docs/tools.md#neo-vectorize">`neo vectorize`</a> Extract GeoJSON features from predicted masks
- <a href="https://github.com/datapink/neat-eo/blob/master/docs/tools.md#neo-info">`neo info`</a> Print neat-EO version informations

### Presentations slides:
- <a href="http://www.datapink.com/presentations/2020-fosdem.pdf">@FOSDEM 2020</a>





Installs:
--------

### With PIP:
```
pip3 install neat-EO
```

### With Ubuntu 19.10, from scratch:

```
# neat-EO [mandatory]
sudo sh -c "apt update && apt install -y build-essential python3-pip"
pip3 install neat-EO && export PATH=$PATH:~/.local/bin

# NVIDIA GPU Drivers [mandatory for train and predict]
wget http://us.download.nvidia.com/XFree86/Linux-x86_64/435.21/NVIDIA-Linux-x86_64-435.21.run
sudo sh NVIDIA-Linux-x86_64-435.21.run -a -q --ui=none

# HTTP Server [for WebUI rendering]
sudo apt install -y apache2 && sudo ln -s ~ /var/www/html/neo
```


### NOTAS:
- Requires: Python 3.6 or 3.7
- GPU with VRAM >= 8Go is mandatory
- To test neat-EO install, launch in a new terminal: `neo info`
- If needed, to remove pre-existing Nouveau driver: ```sudo sh -c "echo blacklist nouveau > /etc/modprobe.d/blacklist-nvidia-nouveau.conf && update-initramfs -u && reboot"```




Architecture:
------------

neat-EO use cherry-picked Open Source libs among Deep Learning, Computer Vision and GIS stacks.

<img alt="Stacks" src="https://raw.githubusercontent.com/datapink/neat-EO/master/docs/img/readme/stacks.png" />



GeoSpatial OpenDataSets:
------------------------
- <a href="https://github.com/chrieke/awesome-satellite-imagery-datasets">Christoph Rieke's Awesome Satellite Imagery Datasets</a>
- <a href="https://zhangbin0917.github.io/2018/06/12/%E9%81%A5%E6%84%9F%E6%95%B0%E6%8D%AE%E9%9B%86/">Zhang Bin, Earth Observation OpenDataset blog</a>

Bibliography:
-------------

- <a href="https://arxiv.org/abs/1912.01703">PyTorch: An Imperative Style, High-Performance Deep Learning Library</a>
- <a href="https://arxiv.org/abs/1505.04597">U-Net: Convolutional Networks for Biomedical Image Segmentation</a>
- <a href="https://arxiv.org/abs/1512.03385">Deep Residual Learning for Image Recognition</a>
- <a href="https://arxiv.org/abs/1806.00844">TernausNetV2: Fully Convolutional Network for Instance Segmentation</a>
- <a href="https://arxiv.org/abs/1705.08790">The Lovász-Softmax loss: A tractable surrogate for the optimization of the IoU measure in neural networks</a>
- <a href="https://arxiv.org/abs/1809.06839">Albumentations: fast and flexible image augmentations</a>










Contributions and Services:
---------------------------

- Pull Requests are welcome ! Feel free to send code...
Don't hesitate either to initiate a prior discussion via <a href="https://gitter.im/Neat-EO-pink/community">gitter</a> or ticket on any implementation question.
And give also a look at <a href="https://github.com/datapink/neat-EO/blob/master/docs/makefile.md">Makefile rules</a>.

- If you want to collaborate through code production and maintenance on a long term basis, please get in touch, co-edition with an ad hoc governance can be considered.

- If you want a new feature, but don't want to implement it, <a href="http://datapink.com">DataPink</a> provide core-dev services.

- Expertise, assistance and training on neat-EO are also provided by <a href="http://datapink.com">DataPink</a>.

- And if you want to support the whole project, because it means for your own business, funding is also welcome.


### Requests for funding:

We've already identified several new features and research papers able to improve again neat-EO,
your funding would make a difference to implement them on a coming release:

- Increase again accuracy :
- on low resolution imagery
- even with few labels (aka Weakly Supervised)
- Topology handling
- Instance Segmentation
- Improve again performances

- Add support for :
- Time Series Imagery
- StreetView Imagery
- MultiHosts scaling
- Vectors post-treatments
- ...






Authors:
--------
- Olivier Courtin <https://github.com/ocourtin>
- Daniel J. Hofmann <https://github.com/daniel-j-h>



Citing:
-------
```
@Manual{,
title = {neat-EO} Efficient AI4EO OpenSource framework},
author = {Olivier Courtin, Daniel J. Hofmann},
organization = {DataPink},
year = {2020},
url = {http://neat-EO.pink},
}
```

+ 64
- 0
config.toml View File

@@ -0,0 +1,64 @@
# Neat-EO.pink Configuration


# Input channels configuration
# You can, add several channels blocks to compose your input Tensor. Order is meaningful.
#
# name: dataset subdirectory name
# bands: bands to keep from sub source. Order is meaningful

[[channels]]
name = "images"
bands = [1, 2, 3]


# Output Classes configuration
# Nota: available colors are either CSS3 colors names or #RRGGBB hexadecimal representation.
# Nota: special color name "transparent" could be use on a single class to apply transparency
# Nota: default weight is 1.0 for each class, or 0.0 if a transparent color one.

[[classes]]
title = "Background"
color = "transparent"

[[classes]]
title = "Building"
color = "deeppink"

[[classes]]
title = "Road"
color = "deepskyblue"


[model]
# Neurals Network name
nn = "Albunet"

# Encoder name
encoder = "resnet50"

# Dataset loader name
loader = "SemSeg"

# Model internal input tile size [W, H]
#ts = [512, 512]


[train]
# Pretrained Encoder
#pretrained = true

# Batch size
#bs = 4

# Data Augmentation to apply, to whole input tensor, with associated probability
da = {name="RGB", p=1.0}

# Loss function name
loss = "Lovasz"

# Eval Metrics
metrics = ["IoU", "MCC", "QoD"]

# Optimizer, cf https://pytorch.org/docs/stable/optim.html
#optimizer = {name="Adam", lr=0.0001}

+ 114
- 0
docs/101.md View File

@@ -0,0 +1,114 @@
# Neat-EO.pink 101

These tutorial allow you in about 2 hours to experiment basic neat-EO usages.


Check Neat-EO.pink installation and GPU
---------------------------------------
```bash
neo info
```


Retrieve DataSet (Subset of <a href="https://www.drivendata.org/competitions/60/building-segmentation-disaster-resilience">Open Cities AI Challenge 2020</a>)
---------------------------------
```bash
wget -nc http://www.datapink.net/neo/101/ds.tar
tar xf ds.tar
```

Configuration file:
-------------------

```bash
echo '
# Inputs
[[channels]]
name = "images"
bands = [1, 2, 3]

# Outputs
[[classes]]
title = "Background"
color = "transparent"

[[classes]]
title = "Building"
color = "deeppink"

# AI stuff
[model]
nn = "Albunet"
loader = "SemSeg"
encoder = "resnet50"

[train]
bs = 4
loss = "Lovasz"
da = {name="RGB", p=1.0}
optimizer = {name="Adam", lr=0.000025}
metrics = ["QoD"]

' > 101.toml


export NEO_CONFIG=101.toml
```


Tile Imagery:
-------------
```bash
neo tile --zoom 19 --bands 1,2,3 --nodata_threshold 25 --rasters train/*/*[^-]/*tif --out train/images
```
<a href="http://www.datapink.net/neo/101/train/images/"><img src="img/101/images.png" /></a>


Retrieve and tile labels accordingly:
-------------------------------------

```bash
neo cover --dir train/images --out train/cover.csv
neo rasterize --geojson train/*/*-labels/*.geojson --type Building --cover train/cover.csv --out train/labels
```
<a href="http://www.datapink.net/neo/101/train/labels/"><img src="img/101/labels.png" /></a>



Launch training :
-----------------

```bash
neo train --dataset train --epochs 5 --out model
neo eval --checkpoint model/checkpoint-00005.pth --dataset train
```


Retrieve, prepare and predict on a new imagery:
-----------------------------------------------
```bash
neo tile --zoom 19 --bands 1,2,3 --nodata_threshold 25 --rasters predict/*/*[^-]/*tif --out predict/images
neo predict --checkpoint model/checkpoint-00005.pth --dataset predict --metatiles --out predict/masks
```
<a href="http://www.datapink.net/neo/101/predict/masks/leaflet.html"><img src="img/101/predict_masks.png" /></a>

Compare our trained model prediction against labels:
----------------------------------------------------
```bash
neo cover --dir predict/masks --out predict/cover.csv
neo rasterize --geojson predict/*/*-labels/*.geojson --type Building --cover predict/cover.csv --out predict/labels
neo compare --mode stack --images predict/images predict/labels predict/masks --cover predict/cover.csv --out predict/compare
neo compare --mode list --labels predict/labels --masks predict/masks --max Building QoD 0.80 --cover predict/cover.csv --geojson --out predict/compare/tiles.json
```
<a href="http://www.datapink.net/neo/101/predict/compare/"><img src="img/101/predict_compare.png" /></a>

```bash
neo compare --mode side --images predict/images predict/compare --cover predict/cover.csv --out predict/compare_side
```
<a href="http://www.datapink.net/neo/101/predict/compare_side/"><img src="img/101/predict_compare_side.png" /></a>

Vectorize results:
------------------
```bash
neo vectorize --masks predict/masks --type Building --out predict/building.json
```

+ 67
- 0
docs/config.md View File

@@ -0,0 +1,67 @@
## config.toml
```
# Neat-EO.pink Configuration


# Input channels configuration
# You can, add several channels blocks to compose your input Tensor. Order is meaningful.
#
# name: dataset subdirectory name
# bands: bands to keep from sub source. Order is meaningful

[[channels]]
name = "images"
bands = [1, 2, 3]


# Output Classes configuration
# Nota: available colors are either CSS3 colors names or #RRGGBB hexadecimal representation.
# Nota: special color name "transparent" could be use on a single class to apply transparency
# Nota: default weight is 1.0 for each class, or 0.0 if a transparent color one.

[[classes]]
title = "Background"
color = "transparent"

[[classes]]
title = "Building"
color = "deeppink"

[[classes]]
title = "Road"
color = "deepskyblue"


[model]
# Neurals Network name
nn = "Albunet"

# Encoder name
encoder = "resnet50"

# Dataset loader name
loader = "SemSeg"

# Model internal input tile size [W, H]
#ts = [512, 512]


[train]
# Pretrained Encoder
#pretrained = true

# Batch size
#bs = 4

# Data Augmentation to apply, to whole input tensor, with associated probability
da = {name="RGB", p=1.0}

# Loss function name
loss = "Lovasz"

# Eval Metrics
metrics = ["IoU", "MCC", "QoD"]

# Optimizer, cf https://pytorch.org/docs/stable/optim.html
#optimizer = {name="Adam", lr=0.0001}
```

BIN
docs/img/101/images.png View File

Before After
Width: 1337  |  Height: 729  |  Size: 2.3MB

BIN
docs/img/101/labels.png View File

Before After
Width: 939  |  Height: 701  |  Size: 79KB

BIN
docs/img/101/predict_compare.png View File

Before After
Width: 1129  |  Height: 735  |  Size: 1.7MB

BIN
docs/img/101/predict_compare_side.png View File

Before After
Width: 1047  |  Height: 526  |  Size: 617KB

BIN
docs/img/101/predict_masks.png View File

Before After
Width: 1028  |  Height: 682  |  Size: 178KB

BIN
docs/img/readme/draw_me_neat_eo.png View File

Before After
Width: 870  |  Height: 533  |  Size: 75KB

BIN
docs/img/readme/stacks.png View File

Before After
Width: 901  |  Height: 621  |  Size: 124KB

+ 20
- 0
docs/makefile.md View File

@@ -0,0 +1,20 @@
## Makefile
```
This Makefile rules are designed for Neat-EO.pink devs and power-users.
For plain user installation follow README.md instructions, instead.


make install To install, few Python dev tools and Neat-EO.pink in editable mode.
So any further Neat-EO.pink Python code modification will be usable at once,
throught either neo tools commands or neat_eo.* modules.

make check Launchs code tests, and tools doc updating.
Do it, at least, before sending a Pull Request.

make check_tuto Launchs neo commands embeded in tutorials, to be sure everything still up to date.
Do it, at least, on each CLI modifications, and before a release.
NOTA: It takes a while.

make pink Python code beautifier,
as Pink is the new Black ^^
```

+ 374
- 0
docs/tools.md View File

@@ -0,0 +1,374 @@
# Neat-EO.pink tools documentation
## neo compare
```
usage: neo compare [-h] [--mode {side,stack,list}] [--labels LABELS]
[--masks MASKS] [--config CONFIG]
[--images IMAGES [IMAGES ...]] [--cover COVER]
[--workers WORKERS] [--min MIN MIN MIN] [--max MAX MAX MAX]
[--vertical] [--geojson] [--format FORMAT] [--out OUT]
[--web_ui_base_url WEB_UI_BASE_URL]
[--web_ui_template WEB_UI_TEMPLATE] [--no_web_ui]

optional arguments:
-h, --help show this help message and exit

Inputs:
--mode {side,stack,list} compare mode [default: side]
--labels LABELS path to tiles labels directory [required for metrics filtering]
--masks MASKS path to tiles masks directory [required for metrics filtering)
--config CONFIG path to config file [required for metrics filtering, if no global config setting]
--images IMAGES [IMAGES ...] path to images directories [required for stack or side modes]
--cover COVER path to csv tiles cover file, to filter tiles to tile [optional]
--workers WORKERS number of workers [default: CPU]

Metrics Filtering:
--min MIN MIN MIN skip tile if class below metric value [0-1] (e.g --min Building QoD 0.7)
--max MAX MAX MAX skip tile if class above metric value [0-1] (e.g --max Building IoU 0.85)

Outputs:
--vertical output vertical image aggregate [optionnal for side mode]
--geojson output results as GeoJSON [optionnal for list mode]
--format FORMAT output images file format [default: webp]
--out OUT output path

Web UI:
--web_ui_base_url WEB_UI_BASE_URL alternate Web UI base URL
--web_ui_template WEB_UI_TEMPLATE alternate Web UI template path
--no_web_ui desactivate Web UI output
```
## neo cover
```
usage: neo cover [-h] [--dir DIR] [--bbox BBOX]
[--geojson GEOJSON [GEOJSON ...]] [--cover COVER]
[--raster RASTER [RASTER ...]] [--sql SQL] [--pg PG]
[--no_xyz] [--zoom ZOOM] [--type {cover,extent,geojson}]
[--union] [--splits SPLITS] [--out [OUT [OUT ...]]]

optional arguments:
-h, --help show this help message and exit

Input [one among the following is required]:
--dir DIR plain tiles dir path
--bbox BBOX a lat/lon bbox: xmin,ymin,xmax,ymax or a bbox: xmin,xmin,xmax,xmax,EPSG:xxxx
--geojson GEOJSON [GEOJSON ...] path to GeoJSON features files
--cover COVER a cover file path
--raster RASTER [RASTER ...] a raster file path
--sql SQL SQL to retrieve geometry features (e.g SELECT geom FROM a_table)

Spatial DataBase [required with --sql input]:
--pg PG PostgreSQL dsn using psycopg2 syntax (e.g 'dbname=db user=postgres')

Tiles:
--no_xyz if set, tiles are not expected to be XYZ based.

Outputs:
--zoom ZOOM zoom level of tiles [required, except with --dir or --cover inputs]
--type {cover,extent,geojson} Output type (default: cover)
--union if set, union adjacent tiles, imply --type geojson
--splits SPLITS if set, shuffle and split in several cover subpieces (e.g 50/15/35)
--out [OUT [OUT ...]] cover output paths [required except with --type extent]
```
## neo dataset
```
usage: neo dataset [-h] [--config CONFIG] --dataset DATASET [--cover COVER]
[--workers WORKERS] [--mode {check,weights}]

optional arguments:
-h, --help show this help message and exit
--config CONFIG path to config file [required, if no global config setting]
--dataset DATASET dataset path [required]
--cover COVER path to csv tiles cover file, to filter tiles dataset on [optional]
--workers WORKERS number of workers [default: CPU]
--mode {check,weights} dataset mode [default: check]
```
## neo download
```
usage: neo download [-h] --url URL [--type {XYZ,WMS}] [--rate RATE]
[--timeout TIMEOUT] [--workers WORKERS] --cover COVER
[--format FORMAT] --out OUT
[--web_ui_base_url WEB_UI_BASE_URL]
[--web_ui_template WEB_UI_TEMPLATE] [--no_web_ui]

optional arguments:
-h, --help show this help message and exit

Web Server:
--url URL URL server endpoint, with: {z}/{x}/{y} or {xmin},{ymin},{xmax},{ymax} [required]
--type {XYZ,WMS} service type [default: XYZ]
--rate RATE download rate limit in max requests/seconds [default: 10]
--timeout TIMEOUT download request timeout (in seconds) [default: 10]
--workers WORKERS number of workers [default: same as --rate value]

Coverage to download:
--cover COVER path to .csv tiles list [required]

Output:
--format FORMAT file format to save images in [default: webp]
--out OUT output directory path [required]

Web UI:
--web_ui_base_url WEB_UI_BASE_URL alternate Web UI base URL
--web_ui_template WEB_UI_TEMPLATE alternate Web UI template path
--no_web_ui desactivate Web UI output
```
## neo eval
```
usage: neo eval [-h] [--config CONFIG] --dataset DATASET [--cover COVER]
[--classes_weights CLASSES_WEIGHTS]
[--tiles_weights TILES_WEIGHTS] [--loader LOADER] [--bs BS]
[--metrics METRICS [METRICS ...]] --checkpoint CHECKPOINT
[--workers WORKERS]

optional arguments:
-h, --help show this help message and exit
--config CONFIG path to config file [required, if no global config setting]

Dataset:
--dataset DATASET dataset path [required]
--cover COVER path to csv tiles cover file, to filter tiles dataset on [optional]
--classes_weights CLASSES_WEIGHTS classes weights separated with comma or 'auto' [optional]
--tiles_weights TILES_WEIGHTS path to csv tiles cover file, with specials weights on [optional]
--loader LOADER dataset loader name [if set override config file value]

Eval:
--bs BS batch size
--metrics METRICS [METRICS ...] metric name (e.g QoD IoU MCC)
--checkpoint CHECKPOINT path to model checkpoint.
--workers WORKERS number of pre-processing images workers, per GPU [default: batch size]
```
## neo export
```
usage: neo export [-h] --checkpoint CHECKPOINT [--type {onnx,jit,pth}]
[--nn NN] [--loader LOADER] [--doc_string DOC_STRING]
[--shape_in SHAPE_IN] [--shape_out SHAPE_OUT]
[--encoder ENCODER] --out OUT

optional arguments:
-h, --help show this help message and exit

Inputs:
--checkpoint CHECKPOINT model checkpoint to load [required]
--type {onnx,jit,pth} output type [default: onnx]

To set or override metadata pth parameters::
--nn NN nn name
--loader LOADER nn loader
--doc_string DOC_STRING nn documentation abstract
--shape_in SHAPE_IN nn shape in (e.g 3,512,512)
--shape_out SHAPE_OUT nn shape_out (e.g 2,512,512)
--encoder ENCODER nn encoder (e.g resnet50)

Output:
--out OUT path to save export model to [required]
```
## neo extract
```
usage: neo extract [-h] --type TYPE --pbf PBF --out OUT

optional arguments:
-h, --help show this help message and exit

Inputs:
--type TYPE OSM feature type to extract (e.g Building, Road) [required]
--pbf PBF path to .osm.pbf file [required]

Output:
--out OUT GeoJSON output file path [required]
```
## neo info
```
usage: neo info [-h] [--version] [--processes] [--checkpoint CHECKPOINT]

optional arguments:
-h, --help show this help message and exit
--version if set, output Neat-EO.pink version only
--processes if set, output GPU processes list
--checkpoint CHECKPOINT if set with a .pth path, output related model metadata

Usages:
To kill GPU processes: neo info --processes | xargs sudo kill -9
```
## neo predict
```
usage: neo predict [-h] [--dataset DATASET] --checkpoint CHECKPOINT
[--config CONFIG] [--cover COVER] --out OUT [--metatiles]
[--keep_borders] [--bs BS] [--workers WORKERS]
[--web_ui_base_url WEB_UI_BASE_URL]
[--web_ui_template WEB_UI_TEMPLATE] [--no_web_ui]

optional arguments:
-h, --help show this help message and exit

Inputs:
--dataset DATASET predict dataset directory path [required]
--checkpoint CHECKPOINT path to the trained model to use [required]
--config CONFIG path to config file [required, if no global config setting]
--cover COVER path to csv tiles cover file, to filter tiles to predict [optional]

Outputs:
--out OUT output directory path [required]
--metatiles if set, use surrounding tiles to avoid margin effects
--keep_borders if set, with --metatiles, force borders tiles to be kept

Performances:
--bs BS batch size [default: CPU/GPU]
--workers WORKERS number of pre-processing images workers, per GPU [default: batch_size]

Web UI:
--web_ui_base_url WEB_UI_BASE_URL alternate Web UI base URL
--web_ui_template WEB_UI_TEMPLATE alternate Web UI template path
--no_web_ui desactivate Web UI output
```
## neo rasterize
```
usage: neo rasterize [-h] --cover COVER [--config CONFIG] --type TYPE
[--geojson GEOJSON [GEOJSON ...]] [--sql SQL] [--pg PG]
[--buffer BUFFER] --out OUT [--append] [--ts TS]
[--workers WORKERS] [--web_ui_base_url WEB_UI_BASE_URL]
[--web_ui_template WEB_UI_TEMPLATE] [--no_web_ui]

optional arguments:
-h, --help show this help message and exit

Inputs [either --sql or --geojson is required]:
--cover COVER path to csv tiles cover file [required]
--config CONFIG path to config file [required, if no global config setting]
--type TYPE type of features to rasterize (i.e class title) [required]
--geojson GEOJSON [GEOJSON ...] path to GeoJSON features files
--sql SQL SQL to retrieve geometry features [e.g SELECT geom FROM table WHERE ST_Intersects(TILE_GEOM, geom)]
--pg PG If set, override config PostgreSQL dsn.
--buffer BUFFER Add a Geometrical Buffer around each Features (distance in meter)

Outputs:
--out OUT output directory path [required]
--append Append to existing tile if any, useful to multiclasses labels
--ts TS output tile size [default: 512,512]

Performances:
--workers WORKERS number of workers [default: CPU]

Web UI:
--web_ui_base_url WEB_UI_BASE_URL alternate Web UI base URL
--web_ui_template WEB_UI_TEMPLATE alternate Web UI template path
--no_web_ui desactivate Web UI output
```
## neo subset
```
usage: neo subset [-h] --dir DIR --cover COVER [--copy] [--delete] [--quiet]
[--out [OUT]] [--web_ui_base_url WEB_UI_BASE_URL]
[--web_ui_template WEB_UI_TEMPLATE] [--no_web_ui]

optional arguments:
-h, --help show this help message and exit

Inputs:
--dir DIR to XYZ tiles input dir path [required]
--cover COVER path to csv cover file to filter dir by [required]

Alternate modes, as default is to create relative symlinks:
--copy copy tiles from input to output
--delete delete tiles listed in cover

Output:
--quiet if set, suppress warning output
--out [OUT] output dir path [required for copy]

Web UI:
--web_ui_base_url WEB_UI_BASE_URL alternate Web UI base URL
--web_ui_template WEB_UI_TEMPLATE alternate Web UI template path
--no_web_ui desactivate Web UI output
```
## neo tile
```
usage: neo tile [-h] --rasters RASTERS [RASTERS ...] [--cover COVER]
[--bands BANDS] --zoom ZOOM [--ts TS] [--nodata [0-255]]
[--nodata_threshold [0-100]] [--keep_borders]
[--format FORMAT] --out OUT [--label] [--config CONFIG]
[--workers WORKERS] [--web_ui_base_url WEB_UI_BASE_URL]
[--web_ui_template WEB_UI_TEMPLATE] [--no_web_ui]

optional arguments:
-h, --help show this help message and exit

Inputs:
--rasters RASTERS [RASTERS ...] path to raster files to tile [required]
--cover COVER path to csv tiles cover file, to filter tiles to tile [optional]
--bands BANDS list of 1-n index bands to select (e.g 1,2,3) [optional]

Output:
--zoom ZOOM zoom level of tiles [required]
--ts TS tile size in pixels [default: 512,512]
--nodata [0-255] nodata pixel value, used by default to remove coverage border's tile [default: 0]
--nodata_threshold [0-100] Skip tile if nodata pixel ratio > threshold. [default: 100]
--keep_borders keep tiles even if borders are empty (nodata)
--format FORMAT file format to save images in (e.g jpeg)
--out OUT output directory path [required]

Labels:
--label if set, generate label tiles
--config CONFIG path to config file [required with --label, if no global config setting]

Performances:
--workers WORKERS number of workers [default: raster files]

Web UI:
--web_ui_base_url WEB_UI_BASE_URL alternate Web UI base URL
--web_ui_template WEB_UI_TEMPLATE alternate Web UI template path
--no_web_ui desactivate Web UI output
```
## neo train
```
usage: neo train [-h] [--config CONFIG] --dataset DATASET [--cover COVER]
[--classes_weights CLASSES_WEIGHTS]
[--tiles_weights TILES_WEIGHTS] [--loader LOADER] [--bs BS]
[--lr LR] [--ts TS] [--nn NN] [--encoder ENCODER]
[--optimizer OPTIMIZER] [--loss LOSS] [--epochs EPOCHS]
[--resume] [--checkpoint CHECKPOINT] [--workers WORKERS]
[--saving SAVING] --out OUT

optional arguments:
-h, --help show this help message and exit
--config CONFIG path to config file [required, if no global config setting]

Dataset:
--dataset DATASET train dataset path [required]
--cover COVER path to csv tiles cover file, to filter tiles dataset on [optional]
--classes_weights CLASSES_WEIGHTS classes weights separated with comma or 'auto' [optional]
--tiles_weights TILES_WEIGHTS path to csv tiles cover file, to apply weights on [optional]
--loader LOADER dataset loader name [if set override config file value]

Hyper Parameters [if set override config file value]:
--bs BS batch size
--lr LR learning rate
--ts TS tile size
--nn NN neurals network name
--encoder ENCODER encoder name
--optimizer OPTIMIZER optimizer name
--loss LOSS model loss

Training:
--epochs EPOCHS number of epochs to train
--resume resume model training, if set imply to provide a checkpoint
--checkpoint CHECKPOINT path to a model checkpoint. To fine tune or resume a training
--workers WORKERS number of pre-processing images workers, per GPU [default: batch size]

Output:
--saving SAVING number of epochs beetwen checkpoint saving [default: 1]
--out OUT output directory path to save checkpoint and logs [required]
```
## neo vectorize
```
usage: neo vectorize [-h] --masks MASKS --type TYPE [--config CONFIG] --out
OUT

optional arguments:
-h, --help show this help message and exit

Inputs:
--masks MASKS input masks directory path [required]
--type TYPE type of features to extract (i.e class title) [required]
--config CONFIG path to config file [required, if no global config setting]

Outputs:
--out OUT path to output file to store features in [required]
```

+ 1
- 0
neat_eo/__init__.py View File

@@ -0,0 +1 @@
__version__ = "0.7.4-dev"

+ 203
- 0
neat_eo/core.py View File

@@ -0,0 +1,203 @@
import os
import sys
import glob
import toml
from importlib import import_module

import re
import colorsys
import webcolors
from pathlib import Path

from neat_eo.tiles import tile_pixel_to_location, tiles_to_geojson


#
# Import module
#
def load_module(module):
module = import_module(module)
assert module, "Unable to import module {}".format(module)
return module


#
# Config
#
def load_config(path):
"""Loads a dictionary from configuration file."""

if not path and "NEO_CONFIG" in os.environ:
path = os.environ["NEO_CONFIG"]
if not path and os.path.isfile(os.path.expanduser("~/.neo_config")):
path = "~/.neo_config"
assert path, "Either ~/.neo_config or NEO_CONFIG env var or --config parameter, is required."

config = toml.load(os.path.expanduser(path))
assert config, "Unable to parse config file"

# Set default values

if "model" not in config.keys():
config["model"] = {}

if "ts" not in config["model"].keys():
config["model"]["ts"] = (512, 512)

if "train" not in config.keys():
config["train"] = {}

if "pretrained" not in config["train"].keys():
config["train"]["pretrained"] = True

if "bs" not in config["train"].keys():
config["train"]["bs"] = 4

if "auth" not in config.keys():
config["auth"] = {}

if "da" in config["train"].keys():
config["train"]["da"] = dict(config["train"]["da"]) # dict is serializable

if "optimizer" in config["train"].keys():
config["train"]["optimizer"] = dict(config["train"]["optimizer"]) # dict is serializable
else:
config["train"]["optimizer"] = {"name": "Adam", "lr": 0.0001}

assert "classes" in config.keys(), "CONFIG: Classes are mandatory"
for c, classe in enumerate(config["classes"]):
config["classes"][c]["weight"] = config["classes"][c]["weight"] if "weight" in config["classes"][c].keys() else 1.0
if config["classes"][c]["color"] == "transparent" and "weight" not in config["classes"][c].keys():
config["classes"][c]["weight"] = 0.0

return config


def check_channels(config):
assert "channels" in config.keys(), "CONFIG: At least one Channel is mandatory"

# TODO


def check_classes(config):
"""Check if config file classes subpart is consistent. Exit on error if not."""

assert "classes" in config.keys() and len(config["classes"]) >= 2, "CONFIG: At least 2 Classes are mandatory"

for classe in config["classes"]:
assert "title" in classe.keys() and len(classe["title"]), "CONFIG: Missing or Empty classes.title value"
assert "color" in classe.keys() and check_color(classe["color"]), "CONFIG: Missing or Invalid classes.color value"


def check_model(config):

assert "model" in config.keys(), "CONFIG: Missing or Invalid model"

# TODO


#
# Logs
#
class Logs:
def __init__(self, path, out=sys.stderr):
"""Create a logs instance on a logs file."""

self.fp = None
self.out = out
if path:
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path), exist_ok=True)
self.fp = open(path, mode="a")

def log(self, msg):
"""Log a new message to the opened logs file, and optionnaly on stdout or stderr too."""
if self.fp:
self.fp.write(msg + os.linesep)
self.fp.flush()

if self.out:
print(msg, file=self.out)


#
# Colors
#
def make_palette(colors, complementary=False):
"""Builds a PNG PIL color palette from Classes CSS3 color names, or hex values patterns as #RRGGBB."""

assert 0 < len(colors) < 256

try:
transparency = [key for key, color in enumerate(colors) if color == "transparent"][0]
except:
transparency = None

colors = ["white" if color.lower() == "transparent" else color for color in colors]
hex_colors = [webcolors.CSS3_NAMES_TO_HEX[color.lower()] if color[0] != "#" else color for color in colors]
rgb_colors = [(int(h[1:3], 16), int(h[3:5], 16), int(h[5:7], 16)) for h in hex_colors]

palette = list(sum(rgb_colors, ())) # flatten
palette = palette if not complementary else complementary_palette(palette)

return palette, transparency


def complementary_palette(palette):
"""Creates a PNG PIL complementary colors palette based on an initial PNG PIL palette."""

comp_palette = []
colors = [palette[i : i + 3] for i in range(0, len(palette), 3)]

for color in colors:
r, g, b = [v for v in color]
h, s, v = colorsys.rgb_to_hsv(r, g, b)
comp_palette.extend(map(int, colorsys.hsv_to_rgb((h + 0.5) % 1, s, v)))

return comp_palette


def check_color(color):
"""Check if an input color is or not valid (i.e CSS3 color name, transparent, or #RRGGBB)."""

color = "white" if color.lower() == "transparent" else color
hex_color = webcolors.CSS3_NAMES_TO_HEX[color.lower()] if color[0] != "#" else color
return bool(re.match(r"^#([0-9a-fA-F]){6}$", hex_color))


#
# Web UI
#
def web_ui(out, base_url, coverage_tiles, selected_tiles, ext, template, union_tiles=True):

out = os.path.expanduser(out)
template = os.path.expanduser(template)

templates = glob.glob(os.path.join(Path(__file__).parent, "web_ui", "*"))
if os.path.isfile(template):
templates.append(template)
if os.path.lexists(os.path.join(out, "index.html")):
os.remove(os.path.join(out, "index.html")) # if already existing output dir, as symlink can't be overwriten
os.symlink(os.path.basename(template), os.path.join(out, "index.html"))

def process_template(template):
web_ui = open(template, "r").read()
web_ui = re.sub("{{base_url}}", base_url, web_ui)
web_ui = re.sub("{{ext}}", ext, web_ui)
web_ui = re.sub("{{tiles}}", "tiles.json" if selected_tiles else "''", web_ui)

if coverage_tiles:
tile = list(coverage_tiles)[0] # Could surely be improved, but for now, took the first tile to center on
x, y, z = map(int, [tile.x, tile.y, tile.z])
web_ui = re.sub("{{zoom}}", str(z), web_ui)
web_ui = re.sub("{{center}}", str(list(tile_pixel_to_location(tile, 0.5, 0.5))[::-1]), web_ui)

with open(os.path.join(out, os.path.basename(template)), "w", encoding="utf-8") as fp:
fp.write(web_ui)

for template in templates:
process_template(template)

if selected_tiles:
with open(os.path.join(out, "tiles.json"), "w", encoding="utf-8") as fp:
fp.write(tiles_to_geojson(selected_tiles, union_tiles))

+ 0
- 0
neat_eo/da/__init__.py View File


+ 43
- 0
neat_eo/da/core.py View File

@@ -0,0 +1,43 @@
"""PyTorch-compatible Data Augmentation."""

import sys
import cv2
import torch
import numpy as np
from importlib import import_module


def to_tensor(config, ts, image, mask=None, da=False, resize=False):

assert len(ts) == 2 # W,H
assert image is not None

# Resize, ToTensor and Data Augmentation
if da:
assert mask is not None

try:
module = import_module("neat_eo.da.{}".format(config["train"]["da"]["name"].lower()))
except:
sys.exit("Unable to load data augmentation module")

transform = module.transform(config, image, mask)
image = cv2.resize(image, ts, interpolation=cv2.INTER_LINEAR) if resize else image
image = torch.from_numpy(np.moveaxis(transform["image"], 2, 0)).float()
mask = cv2.resize(mask, ts, interpolation=cv2.INTER_NEAREST) if resize else image
mask = torch.from_numpy(transform["mask"]).long()
assert image is not None and mask is not None
return image, mask

else:
image = cv2.resize(image, ts, interpolation=cv2.INTER_LINEAR) if resize else image
image = torch.from_numpy(np.moveaxis(image, 2, 0)).float()

if mask is None:
assert image is not None
return image

mask = cv2.resize(mask, ts, interpolation=cv2.INTER_NEAREST) if resize else mask
mask = torch.from_numpy(mask).long()
assert image is not None and mask is not None
return image, mask

+ 39
- 0
neat_eo/da/rgb.py View File

@@ -0,0 +1,39 @@
from albumentations import (
Compose,
IAAAdditiveGaussianNoise,
GaussNoise,
OneOf,
Flip,
Transpose,
MotionBlur,
Blur,
ShiftScaleRotate,
IAASharpen,
IAAEmboss,
RandomBrightnessContrast,
MedianBlur,
HueSaturationValue,
)


def transform(config, image, mask):

try:
p = config["train"]["dap"]["p"]
except:
p = 1

assert 0 <= p <= 1

# Inspire by: https://albumentations.readthedocs.io/en/latest/examples.html
return Compose(
[
Flip(),
Transpose(),
OneOf([IAAAdditiveGaussianNoise(), GaussNoise()], p=0.2),
OneOf([MotionBlur(p=0.2), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1)], p=0.2),
ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
OneOf([IAASharpen(), IAAEmboss(), RandomBrightnessContrast()], p=0.3),
HueSaturationValue(p=0.3),
]
)(image=image, mask=mask, p=p)

+ 82
- 0
neat_eo/geojson.py View File

@@ -0,0 +1,82 @@
from rasterio.crs import CRS
from rasterio.warp import transform_geom
from rasterio.features import rasterize
from rasterio.transform import from_bounds

import mercantile
from supermercado import burntiles
from shapely.geometry import shape, mapping

from neat_eo.tiles import tile_bbox


def geojson_parse_feature(zoom, srid, feature_map, feature, buffer):
def geojson_parse_polygon(zoom, srid, feature_map, polygon):

if isinstance(polygon["coordinates"], list): # https://github.com/Toblerity/Shapely/issues/245
for i, ring in enumerate(polygon["coordinates"]): # GeoJSON coordinates could be N dimensionals
polygon["coordinates"][i] = [[x, y] for point in ring for x, y in zip([point[0]], [point[1]])]

if srid != 4326:
try:
polygon = transform_geom(CRS.from_epsg(srid), CRS.from_epsg(4326), polygon)
except: # negative buffer could lead to empty/invalid geom
return feature_map

try:
for tile in burntiles.burn([{"type": "feature", "geometry": polygon}], zoom=zoom):
feature_map[mercantile.Tile(*tile)].append({"type": "feature", "geometry": polygon})
except:
pass

return feature_map

def geojson_parse_geometry(zoom, srid, feature_map, geometry, buffer):
if buffer:
geometry = transform_geom(CRS.from_epsg(srid), CRS.from_epsg(3857), geometry) # be sure to be planar
geometry = mapping(shape(geometry).buffer(buffer))
srid = 3857

if geometry["type"] == "Polygon":
feature_map = geojson_parse_polygon(zoom, srid, feature_map, geometry)

elif geometry["type"] == "MultiPolygon":
for polygon in geometry["coordinates"]:
feature_map = geojson_parse_polygon(zoom, srid, feature_map, {"type": "Polygon", "coordinates": polygon})

return feature_map

if not feature or not feature["geometry"]:
return feature_map

if feature["geometry"]["type"] == "GeometryCollection":
for geometry in feature["geometry"]["geometries"]:
feature_map = geojson_parse_geometry(zoom, srid, feature_map, geometry, buffer)
else:
feature_map = geojson_parse_geometry(zoom, srid, feature_map, feature["geometry"], buffer)

return feature_map


def geojson_srid(feature_collection):

try:
crs_mapping = {"CRS84": "4326", "900913": "3857"}
srid = feature_collection["crs"]["properties"]["name"].split(":")[-1]
srid = int(srid) if srid not in crs_mapping else int(crs_mapping[srid])
except:
srid = int(4326)

return srid


def geojson_tile_burn(tile, features, srid, ts, burn_value=1):
"""Burn tile with GeoJSON features."""

crs = (CRS.from_epsg(srid), CRS.from_epsg(3857))
shapes = ((transform_geom(*crs, feature["geometry"]), burn_value) for feature in features)

try:
return rasterize(shapes, out_shape=ts, transform=from_bounds(*tile_bbox(tile, mercator=True), *ts))
except:
return None

+ 0
- 0
neat_eo/loaders/__init__.py View File


+ 93
- 0
neat_eo/loaders/semseg.py View File

@@ -0,0 +1,93 @@
"""PyTorch-compatible datasets. Cf: https://pytorch.org/docs/stable/data.html """

import os
import numpy as np
import torch.utils.data

from neat_eo.da.core import to_tensor
from neat_eo.tiles import tiles_from_dir, tile_image_from_file, tile_label_from_file, tile_image_buffer, tile_is_neighboured


class SemSeg(torch.utils.data.Dataset):
def __init__(self, config, ts, root, cover=None, tiles_weights=None, mode=None, metatiles=False, keep_borders=False):
super().__init__()

self.mode = mode
self.config = config
self.tiles_weights = tiles_weights
self.metatiles = metatiles
self.da = True if "da" in self.config["train"].keys() and self.config["train"]["da"]["p"] > 0.0 else False

assert mode in ["train", "eval", "predict"]

path = os.path.join(root, config["channels"][0]["name"])
self.tiles_paths = [(tile, path) for tile, path in tiles_from_dir(path, cover=cover, xyz_path=True)]
if metatiles:
self.metatiles_paths = self.tiles_paths
if not keep_borders:
self.tiles_paths = [
(tile, path) for tile, path in self.metatiles_paths if tile_is_neighboured(tile, self.metatiles_paths)
]
self.cover = {tile for tile, path in self.tiles_paths}
assert len(self.tiles_paths), "Empty Dataset"

self.tiles = {}
num_channels = 0
for channel in config["channels"]:
path = os.path.join(root, channel["name"])
self.tiles[channel["name"]] = [
(tile, path) for tile, path in tiles_from_dir(path, cover=self.cover, xyz_path=True)
]
num_channels += len(channel["bands"])

self.shape_in = (num_channels,) + tuple(ts) # C,W,H
self.shape_out = (len(config["classes"]),) + tuple(ts) # C,W,H

if self.mode in ["train", "eval"]:
path = os.path.join(root, "labels")
self.tiles["labels"] = [(tile, path) for tile, path in tiles_from_dir(path, cover=self.cover, xyz_path=True)]

for channel in config["channels"]: # Order images and labels accordingly
self.tiles[channel["name"]].sort(key=lambda tile: tile[0])
self.tiles["labels"].sort(key=lambda tile: tile[0])

assert len(self.tiles), "Empty Dataset"

def __len__(self):
return len(self.tiles_paths)

def __getitem__(self, i):

tile = None
mask = None
image = None

for channel in self.config["channels"]:

image_channel = None
tile, path = self.tiles[channel["name"]][i]
bands = None if not channel["bands"] else channel["bands"]

if self.metatiles:
image_channel = tile_image_buffer(tile, self.metatiles_paths, bands)
else:
image_channel = tile_image_from_file(path, bands)

assert image_channel is not None, "Dataset channel {} not retrieved: {}".format(channel["name"], path)

image = np.concatenate((image, image_channel), axis=2) if image is not None else image_channel

if self.mode in ["train", "eval"]:
assert tile == self.tiles["labels"][i][0], "Dataset mask inconsistency"

mask = tile_label_from_file(self.tiles["labels"][i][1])
assert mask is not None, "Dataset mask not retrieved"

weight = self.tiles_weights[tile] if self.tiles_weights is not None and tile in self.tiles_weights else 1.0

image, mask = to_tensor(self.config, self.shape_in[1:3], image, mask=mask, da=self.da)
return image, mask, tile, weight

if self.mode in ["predict"]:
image = to_tensor(self.config, self.shape_in[1:3], image, resize=False, da=False)
return image, torch.IntTensor([tile.x, tile.y, tile.z])

+ 0
- 0
neat_eo/losses/__init__.py View File


+ 47
- 0
neat_eo/losses/lovasz.py View File

@@ -0,0 +1,47 @@
import torch
import torch.nn as nn


class Lovasz(nn.Module):
"""Lovasz Loss. Cf: https://arxiv.org/abs/1705.08790 """

def __init__(self):
super().__init__()

def forward(self, inputs, targets, classes_weights, tiles_weights, config):

N, C, H, W = inputs.size()
assert C >= 2, "Classification imply at least two Classes"
assert len(classes_weights) == C, "Classes Weights mismatch Classes"

loss = 0.0
non_empty_C = 0

for c in range(C):

if classes_weights[c] == 0.0:
continue

inputs_class = inputs[:, c]
masks = (targets == c).float()

for mask, input_class, tile_weight in zip(masks.view(N, -1), inputs_class.view(N, -1), tiles_weights):

if mask.sum() == 0 and (input_class > 0.25).sum() == 0:
continue

distance = (mask - input_class).abs()
distance_sorted, indices = torch.sort(distance, 0, descending=True)
mask_sorted = mask[indices.data]

inter = mask_sorted.sum() - mask_sorted.cumsum(0)
union = mask_sorted.sum() + (1.0 - mask_sorted).cumsum(0)
iou = 1.0 - inter / union

p = len(mask_sorted)
iou[1:p] = iou[1:p] - iou[0:-1]

loss += torch.dot(distance_sorted, iou) * tile_weight * classes_weights[c]
non_empty_C += 1

return loss / N / non_empty_C

+ 16
- 0
neat_eo/metrics/IoU.py View File

@@ -0,0 +1,16 @@
from neat_eo.metrics.core import confusion


def get(label, predicted, config=None):

tn, fn, fp, tp = confusion(label, predicted)
if tp == 0 and fp == 0 and fn == 0:
return float("NaN")

try:
assert tp or fp or fn
iou = float(tp / (fp + fn + tp))
except ZeroDivisionError:
iou = float("NaN")

return iou

+ 16
- 0
neat_eo/metrics/MCC.py View File

@@ -0,0 +1,16 @@
import math
from neat_eo.metrics.core import confusion


def get(label, predicted, config=None):

tn, fn, fp, tp = confusion(label, predicted)
if tp == 0 and fp == 0 and fn == 0:
return float("NaN")

try:
mcc = (tp * tn - fp * fn) / math.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
except ZeroDivisionError:
mcc = float("NaN")

return mcc

+ 23
- 0
neat_eo/metrics/QoD.py View File

@@ -0,0 +1,23 @@
import torch
import math

from neat_eo.metrics.core import confusion


def get(label, mask, config=None):

tn, fn, fp, tp = confusion(label, mask)

try:
iou = tp / (tp + fn + fp)
except ZeroDivisionError:
iou = float("NaN")

W, H = mask.size()
ratio = float(100 * torch.max(torch.sum(mask.float()), torch.sum(label.float())) / (W * H))
dist = 0.0 if iou != iou else 1.0 - iou

qod = 100 - (dist * (math.log(ratio + 1.0) + 1e-7) * (100 / math.log(100)))
qod = 0.0 if qod < 0.0 else qod # Corner case prophilaxy

return qod / 100.0

+ 0
- 0
neat_eo/metrics/__init__.py View File


+ 59
- 0
neat_eo/metrics/core.py View File

@@ -0,0 +1,59 @@
import math
import torch
from neat_eo.core import load_module


class Metrics:
def __init__(self, metrics, classes, config=None):
self.config = config
self.classes = classes
self.metrics = []
for classe in classes:
self.metrics.append({metric: [] for metric in metrics})
self.modules = {metric: load_module("neat_eo.metrics." + metric) for metric in metrics}
self.n = 0

def add(self, label, output):
assert self.modules
assert self.metrics
self.n += 1
for metric, module in self.modules.items():
for c, classe in enumerate(self.classes):
mask = (output[c] > 0.5).float()
self.metrics[c][metric].append(module.get(label, mask, self.config))

def get(self):
assert self.metrics

results = []
for c, classe in enumerate(self.classes):
μ = {}
σ = {}
for metric, values in self.metrics[c].items():
n = sum([1 for v in values if not math.isnan(v)])

try:
μ[metric] = sum([v for v in values if not math.isnan(v)]) / n
except ZeroDivisionError:
μ[metric] = float("NaN")

try:
σ[metric] = sum({(math.sqrt(((v - μ[metric]) ** 2))) for v in values if not math.isnan(v)}) / n
except ZeroDivisionError:
σ[metric] = float("NaN")

results.append({metric: {"μ": μ[metric], "σ": σ[metric]} for metric in self.metrics[c]})

return results


def confusion(label, predicted):

confusion = predicted.view(-1).float() / label.view(-1).float()

tn = torch.sum(torch.isnan(confusion)).item()
fn = torch.sum(confusion == float("inf")).item()
fp = torch.sum(confusion == 0).item()
tp = torch.sum(confusion == 1).item()

return tn, fn, fp, tp

+ 0
- 0
neat_eo/nn/__init__.py View File


+ 105
- 0
neat_eo/nn/albunet.py View File

@@ -0,0 +1,105 @@
import torch
import torch.nn as nn

from neat_eo.core import load_module


class ConvRelu(nn.Module):
"""3x3 convolution followed by ReLU activation building block."""

def __init__(self, num_in, num_out):
super().__init__()
self.block = nn.Conv2d(num_in, num_out, kernel_size=3, padding=1, bias=False)

def forward(self, x):
return nn.functional.relu(self.block(x), inplace=True)


class DecoderBlock(nn.Module):
"""Decoder building block upsampling resolution by a factor of two."""

def __init__(self, num_in, num_out):
super().__init__()
self.block = ConvRelu(num_in, num_out)

def forward(self, x):
return self.block(nn.functional.interpolate(x, scale_factor=2, mode="nearest"))


class Albunet(nn.Module):
def __init__(self, shape_in, shape_out, encoder="resnet50", train_config=None):
super().__init__()

doc = "U-Net like encoder-decoder architecture with a ResNet, ResNext or WideResNet encoder.\n\n"
doc += " - https://arxiv.org/abs/1505.04597 - U-Net: Convolutional Networks for Biomedical Image Segmentation\n"

if encoder in ["resnet50", "resnet101", "resnet152"]:
doc += " - https://arxiv.org/abs/1512.03385 - Deep Residual Learning for Image Recognition\n"
elif encoder in ["resnext50_32x4d", "resnext101_32x8d"]:
doc += " - https://arxiv.org/pdf/1611.05431 - Aggregated Residual Transformations for DNN\n"
elif encoder in ["wide_resnet50_2", "wide_resnet101_2"]:
doc += " - https://arxiv.org/abs/1605.07146 - Wide Residual Networks\n"
else:
encoders = "resnet50, resnet101, resnet152, resnext50_32x4d, resnext101_32x8d, wide_resnet50_2, wide_resnet101_2"
assert False, "Albunet, expects as encoder: " + encoders

self.version = 2
self.doc_string = doc

num_filters = 32
num_channels = shape_in[0]
num_classes = shape_out[0]

assert num_channels, "Empty Channels"
assert num_classes, "Empty Classes"

try:
pretrained = train_config["pretrained"]
except:
pretrained = False

models = load_module("torchvision.models")
self.encoder = getattr(models, encoder)(pretrained=pretrained)
# https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py

if num_channels != 3:
weights = nn.init.kaiming_normal_(torch.zeros((64, num_channels, 7, 7)), mode="fan_out", nonlinearity="relu")
if pretrained:
for c in range(min(num_channels, 3)):
weights.data[:, c, :, :] = self.encoder.conv1.weight.data[:, c, :, :]
self.encoder.conv1 = nn.Conv2d(num_channels, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.encoder.conv1.weight = nn.Parameter(weights)

self.center = DecoderBlock(2048, num_filters * 8)

self.dec0 = DecoderBlock(2048 + num_filters * 8, num_filters * 8)
self.dec1 = DecoderBlock(1024 + num_filters * 8, num_filters * 8)
self.dec2 = DecoderBlock(512 + num_filters * 8, num_filters * 2)
self.dec3 = DecoderBlock(256 + num_filters * 2, num_filters * 2 * 2)
self.dec4 = DecoderBlock(num_filters * 2 * 2, num_filters)
self.dec5 = ConvRelu(num_filters, num_filters)

self.final = nn.Conv2d(num_filters, num_classes, kernel_size=1)

def forward(self, x):

enc0 = self.encoder.conv1(x)
enc0 = self.encoder.bn1(enc0)
enc0 = self.encoder.relu(enc0)
enc0 = self.encoder.maxpool(enc0)

enc1 = self.encoder.layer1(enc0)
enc2 = self.encoder.layer2(enc1)
enc3 = self.encoder.layer3(enc2)
enc4 = self.encoder.layer4(enc3)

center = self.center(nn.functional.max_pool2d(enc4, kernel_size=2, stride=2))

dec0 = self.dec0(torch.cat([enc4, center], dim=1))
dec1 = self.dec1(torch.cat([enc3, dec0], dim=1))
dec2 = self.dec2(torch.cat([enc2, dec1], dim=1))
dec3 = self.dec3(torch.cat([enc1, dec2], dim=1))
dec4 = self.dec4(dec3)
dec5 = self.dec5(dec4)

return self.final(dec5)

+ 0
- 0
neat_eo/osm/__init__.py View File


+ 42
- 0
neat_eo/osm/building.py View File

@@ -0,0 +1,42 @@
import sys
import osmium
import geojson
import shapely.geometry


class BuildingHandler(osmium.SimpleHandler):
"""Extracts building polygon features"""

def __init__(self):
super().__init__()
self.features = []

def way(self, w):
if not w.is_closed() or len(w.nodes) < 4:
return

if not list(set(["building", "construction"]) & set([k for k in dict(w.tags).keys()])):
return

if "building" in w.tags and w.tags["building"] in set(
["houseboat", "static_caravan", "stadium", "digester", "ruins"]
):
return

if "location" in w.tags and w.tags["location"] in set(["underground", "underwater"]):
return

geometry = geojson.Polygon([[(n.lon, n.lat) for n in w.nodes]])
shape = shapely.geometry.shape(geometry)

if shape.is_valid:
feature = geojson.Feature(geometry=geometry)
self.features.append(feature)
else:
print("Warning: invalid feature: https://www.openstreetmap.org/way/{}".format(w.id), file=sys.stderr)

def save(self, out):
collection = geojson.FeatureCollection(self.features)

with open(out, "w") as fp:
geojson.dump(collection, fp)

+ 117
- 0
neat_eo/osm/road.py View File

@@ -0,0 +1,117 @@
import sys

import math
import osmium
import geojson
import shapely.geometry


class RoadHandler(osmium.SimpleHandler):
"""Extracts road polygon features (visible in satellite imagery) from the map.
"""

highway_attributes = {
"motorway": {"lanes": 4, "lane_width": 3.75, "left_hard_shoulder_width": 0.75, "right_hard_shoulder_width": 3.0},
"trunk": {"lanes": 3, "lane_width": 3.75, "left_hard_shoulder_width": 0.75, "right_hard_shoulder_width": 3.0},
"primary": {"lanes": 2, "lane_width": 3.75, "left_hard_shoulder_width": 0.50, "right_hard_shoulder_width": 1.50},
"secondary": {"lanes": 1, "lane_width": 3.50, "left_hard_shoulder_width": 0.00, "right_hard_shoulder_width": 0.75},
"tertiary": {"lanes": 1, "lane_width": 3.50, "left_hard_shoulder_width": 0.00, "right_hard_shoulder_width": 0.75},
"unclassified": {
"lanes": 1,
"lane_width": 3.50,
"left_hard_shoulder_width": 0.00,
"right_hard_shoulder_width": 0.00,
},
"residential": {"lanes": 1, "lane_width": 3.50, "left_hard_shoulder_width": 0.00, "right_hard_shoulder_width": 0.75},
"service": {"lanes": 1, "lane_width": 3.00, "left_hard_shoulder_width": 0.00, "right_hard_shoulder_width": 0.00},
"motorway_link": {
"lanes": 2,
"lane_width": 3.75,
"left_hard_shoulder_width": 0.75,
"right_hard_shoulder_width": 3.00,
},
"trunk_link": {"lanes": 2, "lane_width": 3.75, "left_hard_shoulder_width": 0.50, "right_hard_shoulder_width": 1.50},
"primary_link": {
"lanes": 1,
"lane_width": 3.50,
"left_hard_shoulder_width": 0.00,
"right_hard_shoulder_width": 0.75,
},
"secondary_link": {
"lanes": 1,
"lane_width": 3.50,
"left_hard_shoulder_width": 0.00,
"right_hard_shoulder_width": 0.75,
},
"tertiary_link": {
"lanes": 1,
"lane_width": 3.50,
"left_hard_shoulder_width": 0.00,
"right_hard_shoulder_width": 0.00,
},
}

road_filter = set(highway_attributes.keys())

EARTH_MEAN_RADIUS = 6371004.0

def __init__(self):
super().__init__()
self.features = []

def way(self, w):
if "highway" not in w.tags:
return

if w.tags["highway"] not in self.road_filter:
return

left_hard_shoulder_width = self.highway_attributes[w.tags["highway"]]["left_hard_shoulder_width"]
lane_width = self.highway_attributes[w.tags["highway"]]["lane_width"]
lanes = self.highway_attributes[w.tags["highway"]]["lanes"]
right_hard_shoulder_width = self.highway_attributes[w.tags["highway"]]["right_hard_shoulder_width"]

if "oneway" not in w.tags:
lanes = lanes * 2
elif w.tags["oneway"] == "no":
lanes = lanes * 2

if "lanes" in w.tags:
try:
# Roads have at least one lane; guard against data issues.
lanes = max(int(w.tags["lanes"]), 1)

# Todo: take into account related lane tags
# https://wiki.openstreetmap.org/wiki/Tag:busway%3Dlane
# https://wiki.openstreetmap.org/wiki/Tag:cycleway%3Dlane
# https://wiki.openstreetmap.org/wiki/Key:parking:lane
except ValueError:
print("Warning: invalid feature: https://www.openstreetmap.org/way/{}".format(w.id), file=sys.stderr)

road_width = left_hard_shoulder_width + lane_width * lanes + right_hard_shoulder_width

if "width" in w.tags:
try:
# At least one meter wide, for road classes specified above
road_width = max(float(w.tags["width"]), 1.0)

# Todo: handle optional units such as "2 m"
# https://wiki.openstreetmap.org/wiki/Key:width
except ValueError:
print("Warning: invalid feature: https://www.openstreetmap.org/way/{}".format(w.id), file=sys.stderr)

geometry = geojson.LineString([(n.lon, n.lat) for n in w.nodes])
shape = shapely.geometry.shape(geometry)
geometry_buffer = shape.buffer(math.degrees(road_width / 2.0 / self.EARTH_MEAN_RADIUS))

if shape.is_valid:
feature = geojson.Feature(geometry=shapely.geometry.mapping(geometry_buffer))
self.features.append(feature)
else:
print("Warning: invalid feature: https://www.openstreetmap.org/way/{}".format(w.id), file=sys.stderr)

def save(self, out):
collection = geojson.FeatureCollection(self.features)

with open(out, "w") as fp:
geojson.dump(collection, fp)

+ 340
- 0
neat_eo/tiles.py View File

@@ -0,0 +1,340 @@
"""Slippy Map Tiles.
See: https://wiki.openstreetmap.org/wiki/Slippy_map_tilenames
"""

import io
import os
import re
import glob
import warnings

import numpy as np
from PIL import Image
from rasterio import open as rasterio_open
import cv2

import json
import psycopg2
import rasterio
import mercantile
import supermercado

warnings.simplefilter("ignore", UserWarning) # To prevent rasterio NotGeoreferencedWarning


def tile_pixel_to_location(tile, dx, dy):
"""Converts a pixel in a tile to lon/lat coordinates."""

assert 0 <= dx <= 1 and 0 <= dy <= 1, "x and y offsets must be in [0, 1]"

w, s, e, n = mercantile.bounds(tile)

def lerp(a, b, c):
return a + c * (b - a)

return lerp(w, e, dx), lerp(s, n, dy) # lon, lat


def tiles_from_csv(path, xyz=True, extra_columns=False):
"""Retrieve tiles from a line-delimited csv file."""

assert os.path.isfile(os.path.expanduser(path)), "'{}' seems not a valid CSV file".format(path)
with open(os.path.expanduser(path)) as fp:

for row in fp:
row = row.replace("\n", "")
if not row:
continue

row = re.split(",|\t", row) # use either comma or tab as separator
if xyz:
assert len(row) >= 3, "Invalid Cover"
if not extra_columns or len(row) == 3:
yield mercantile.Tile(int(row[0]), int(row[1]), int(row[2]))
else:
yield [mercantile.Tile(int(row[0]), int(row[1]), int(row[2])), *map(float, row[3:])]

if not xyz:
assert len(row) >= 1, "Invalid Cover"
if not extra_columns:
yield row[0]
else:
yield [row[0], *map(float, row[1:])]


def tiles_from_dir(root, cover=None, xyz=True, xyz_path=False):
"""Loads files from an on-disk dir."""
root = os.path.expanduser(root)

if xyz is True:
paths = glob.glob(os.path.join(root, "[0-9]*/[0-9]*/[0-9]*.*"))

for path in paths:
tile_xyz = re.match(os.path.join(root, "(?P<z>[0-9]+)/(?P<x>[0-9]+)/(?P<y>[0-9]+).+"), path)
if not tile_xyz:
continue
tile = mercantile.Tile(int(tile_xyz["x"]), int(tile_xyz["y"]), int(tile_xyz["z"]))

if cover is not None and tile not in cover:
continue

if xyz_path is True:
yield tile, path
else:
yield tile

else:
paths = glob.glob(root, "**/*.*", recursive=True)

for path in paths:
return path


def tile_from_xyz(root, x, y, z):
"""Retrieve a single tile from a slippy map dir."""

path = glob.glob(os.path.join(os.path.expanduser(root), str(z), str(x), str(y) + ".*"))
if not path:
return None