forked from Alingsaskommun/ML-Markt-ckedata
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtiny_byggnader.py
75 lines (67 loc) · 2.92 KB
/
tiny_byggnader.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
# tiny_spacenet.py
from os.path import join
from rastervision.core.rv_pipeline import *
from rastervision.core.backend import *
from rastervision.core.data import *
from rastervision.pytorch_backend import *
from rastervision.pytorch_learner import *
def get_config(runner):
root_uri = '/opt/data/output/'
base_uri = '/opt/data/data_input'
train_image_uri = '{}/train.tif'.format(
base_uri)
train_label_uri = '{}/labels2.geojson'.format(
base_uri)
val_image_uri = '{}/val_image2.tif'.format(base_uri)
val_label_uri = '{}/val_label2.geojson'.format(base_uri)
channel_order = [0, 1, 2]
class_config = ClassConfig(
names=['building', 'background'], colors=['red', 'black'])
def make_scene(scene_id, image_uri, label_uri):
"""
- StatsTransformer is used to convert uint16 values to uint8.
- The GeoJSON does not have a class_id property for each geom,
so it is inferred as 0 (ie. building) because the default_class_id
is set to 0.
- The labels are in the form of GeoJSON which needs to be rasterized
to use as label for semantic segmentation, so we use a RasterizedSource.
- The rasterizer set the background (as opposed to foreground) pixels
to 1 because background_class_id is set to 1.
"""
raster_source = RasterioSourceConfig(
uris=[image_uri],
channel_order=channel_order,
transformers=[StatsTransformerConfig()])
vector_source = GeoJSONVectorSourceConfig(
uri=label_uri, default_class_id=0, ignore_crs_field=True)
label_source = SemanticSegmentationLabelSourceConfig(
raster_source=RasterizedSourceConfig(
vector_source=vector_source,
rasterizer_config=RasterizerConfig(background_class_id=1)))
return SceneConfig(
id=scene_id,
raster_source=raster_source,
label_source=label_source)
dataset = DatasetConfig(
class_config=class_config,
train_scenes=[
make_scene('scene_206', train_image_uri, train_label_uri)
],
validation_scenes=[
make_scene('scene_26', val_image_uri, val_label_uri)
])
# Use the PyTorch backend for the SemanticSegmentation pipeline.
chip_sz = 500
backend = PyTorchSemanticSegmentationConfig(
model=SemanticSegmentationModelConfig(backbone=Backbone.resnet50),
solver=SolverConfig(lr=1e-4, num_epochs=100, batch_sz=2))
chip_options = SemanticSegmentationChipOptions(
window_method=SemanticSegmentationWindowMethod.random_sample,
chips_per_scene=10)
return SemanticSegmentationConfig(
root_uri=root_uri,
dataset=dataset,
backend=backend,
train_chip_sz=chip_sz,
predict_chip_sz=chip_sz,
chip_options=chip_options)