diff --git a/ost/Project.py b/ost/Project.py index 747a23b..6ff3e80 100644 --- a/ost/Project.py +++ b/ost/Project.py @@ -25,10 +25,12 @@ from ost.helpers import vector as vec, raster as ras from ost.helpers import scihub, helpers as h, srtm, copdem +from ost.helpers import copernicus as cop from ost.helpers.settings import set_log_level, setup_logfile, OST_ROOT from ost.helpers.settings import check_ard_parameters -from ost.s1 import search, refine_inventory, download +from ost.s1 import search_data as search +from ost.s1 import refine_inventory, download from ost.s1 import burst_inventory, burst_batch from ost.s1 import grd_batch @@ -178,55 +180,23 @@ class Sentinel1(Generic): """ product_type = None - "TBD" - beam_mode = None - "TBD" - polarisation = None - "TBD" - inventory_file = None - "TBD" - inventory = None - "TBD" - refined_inventory_dict = None - "TBD" - coverages = None - "TBD" - burst_inventory = None - "TBD" - burst_inventory_file = None - "TBD" - - scihub_uname = None - "str: the scihub username" - - scihub_pword = None - "str: the scihub password" - + dataspace_uname = None + dataspace_pword = None asf_uname = None - "TBD" - asf_pword = None - "TBD" - peps_uname = None - "TBD" - peps_pword = None - "TBD" - onda_uname = None - "TBD" - onda_pword = None - "TBD" + def __init__( self, @@ -235,9 +205,9 @@ def __init__( start="2014-10-01", end=datetime.today().strftime(OST_DATEFORMAT), data_mount=None, - product_type="*", - beam_mode="*", - polarisation="*", + product_type=None, + beam_mode=None, + polarisation=None, log_level=logging.INFO, ): @@ -247,21 +217,21 @@ def __init__( # ------------------------------------------ # 2 Check and set product type - if product_type in ["*", "RAW", "SLC", "GRD"]: + if product_type in [None, "RAW", "SLC", "GRD"]: self.product_type = product_type else: - raise ValueError("Product type must be one out of '*', 'RAW', " "'SLC', 'GRD'") + raise ValueError("Product type must be one out of None, 'RAW', " "'SLC', 'GRD'") # ------------------------------------------ # 3 Check and set beam mode - if beam_mode in ["*", "IW", "EW", "SM"]: + if beam_mode in [None, "IW", "EW", "SM"]: self.beam_mode = beam_mode else: - raise ValueError("Beam mode must be one out of 'IW', 'EW', 'SM'") + raise ValueError("Beam mode must be one out of None, 'IW', 'EW', 'SM'") # ------------------------------------------ # 4 Check and set polarisations - possible_pols = ["*", "VV", "VH", "HV", "HH", "VV VH", "HH HV"] + possible_pols = [None, "VV", "VH", "HV", "HH", "VV VH", "HH HV", "*"] if polarisation in possible_pols: self.polarisation = polarisation else: @@ -292,8 +262,8 @@ def __init__( # ------------------------------------------ # 7 Initialize uname and pword to None - self.scihub_uname = None - self.scihub_pword = None + self.dataspace_uname = None + self.dataspace_pword = None self.asf_uname = None self.asf_pword = None @@ -310,7 +280,7 @@ def search( self, outfile=OST_INVENTORY_FILE, append=False, - base_url="https://apihub.copernicus.eu/apihub", + base_url="https://catalogue.dataspace.copernicus.eu/resto/api/", ): """High Level search function @@ -332,9 +302,17 @@ def search( # construct the final query query = urllib.parse.quote(f"Sentinel-1 AND {product_specs} AND {aoi} AND {toi}") - if not self.scihub_uname or not self.scihub_pword: + # create query + aoi = cop.create_aoi_str(self.aoi) + toi = cop.create_toi_str(self.start, self.end) + specs = cop.create_s1_product_specs( + self.product_type, self.polarisation, self.beam_mode + ) + query = aoi + toi + specs + '&maxRecords=100' + + if not self.dataspace_uname or not self.dataspace_pword: # ask for username and password - self.scihub_uname, self.scihub_pword = scihub.ask_credentials() + self.dataspace_uname, self.dataspace_pword = cop.ask_credentials() # do the search if outfile == OST_INVENTORY_FILE: @@ -342,12 +320,13 @@ def search( else: self.inventory_file = outfile - search.scihub_catalogue( + base_url = base_url + 'collections/Sentinel1/search.json?' + search.dataspace_catalogue( query, self.inventory_file, append, - self.scihub_uname, - self.scihub_pword, + self.dataspace_uname, + self.dataspace_pword, base_url, ) @@ -355,7 +334,7 @@ def search( # read inventory into the inventory attribute self.read_inventory() else: - logger.info("No images found in the AOI for this date range") + logger.info("No matching scenes found for the specified search parameters") def read_inventory(self): """Read the Sentinel-1 data inventory from a OST invetory shapefile @@ -568,7 +547,7 @@ def __init__( data_mount=None, product_type="SLC", beam_mode="IW", - polarisation="*", + polarisation="VV VH", ard_type="OST-GTC", snap_cpu_parallelism=cpu_count(), max_workers=1, diff --git a/ost/helpers/copernicus.py b/ost/helpers/copernicus.py new file mode 100644 index 0000000..fb72935 --- /dev/null +++ b/ost/helpers/copernicus.py @@ -0,0 +1,178 @@ +"""This module provides helper functions for Copernicus Dataspace API.""" + +import getpass +import logging +from pathlib import Path +from datetime import datetime as dt + +import requests +from shapely.wkt import loads + +logger = logging.getLogger(__name__) + +def ask_credentials(): + """Interactive function to ask for Copernicus credentials.""" + print( + "If you do not have a Copernicus dataspace user account" + " go to: https://dataspace.copernicus.eu/ and register" + ) + uname = input("Your Copernicus Dataspace Username:") + pword = getpass.getpass("Your Copernicus Dataspace Password:") + + return uname, pword + + +def get_access_token(username, password: None): + + if not password: + logger.info(' Please provide your Copernicus Dataspace password:') + password = getpass.getpass() + + data = { + "client_id": "cdse-public", + "username": username, + "password": password, + "grant_type": "password", + } + try: + r = requests.post( + "https://identity.dataspace.copernicus.eu/auth/realms/CDSE/protocol/openid-connect/token", + data=data, + ) + r.raise_for_status() + except Exception as e: + raise Exception( + f"Access token creation failed. Reponse from the server was: {r.json()}" + ) + return r.json()["access_token"] + + +def refresh_access_token(refresh_token: str) -> str: + data = { + "client_id": "cdse-public", + "refresh_token": refresh_token, + "grant_type": "refresh_token", + } + + try: + r = requests.post( + "https://identity.dataspace.copernicus.eu/auth/realms/CDSE/protocol/openid-connect/token", + data=data, + ) + r.raise_for_status() + except Exception as e: + raise Exception( + f"Access token refresh failed. Reponse from the server was: {r.json()}" + ) + + return r.json()["access_token"] + + +def create_aoi_str(aoi): + """Convert WKT formatted AOI to dataspace's geometry attribute.""" + # load to shapely geometry to easily test for geometry type + geom = loads(aoi) + + # dependent on the type construct the query string + if geom.geom_type == "Point": + return f'&lon={geom.y}&lat={geom.x}' + + else: + # simplify geometry, as we might otherwise bump into too long string issue + aoi_convex = geom.convex_hull + + # create scihub-confrom aoi string + return f'&geometry={aoi_convex}' + +def create_toi_str(start="2014-10-01", end=dt.now().strftime("%Y-%m-%d")): + """Convert start and end date to scihub's search url time period attribute.""" + # bring start and end date to query format + return f"&startDate={start}T00:00:00Z&completionDate={end}T23:59:59Z" + +def create_s1_product_specs(product_type=None, polarisation=None, beam=None): + """Convert Sentinel-1's product metadata to scihub's product attributes.""" + # transform product type, polarisation and beam to query format + product_type_query = f'&productType={product_type}' if product_type else '' + polarisation_query = f'&polarisation={polarisation.replace(" ", "%26")}' if polarisation else '' + sensor_mode_query = f'&sensorMode={beam}' if beam else '' + + return product_type_query + polarisation_query + sensor_mode_query + + +def extract_basic_metadata(properties): + + # those are the things we wnat out of the standard json + wanted = ['title', 'orbitDirection', 'platform', 'polarisation', 'swath', 'thumbnail', 'published'] + + # loop through all properties + _dict = {} + for k, v in properties.items(): + # consider if in the list of wanted properties + if k in wanted: + if k == 'polarisation': + # remove & sign + _dict[k] = v.replace('&', ' ') + elif k == 'title': + # remove .SAFE extension + _dict[k] = v[:-5] + elif k == 'thumbnail': + _dict[k] = '/'.join(v.split('/')[:-2]) + '/manifest.safe' + else: + _dict[k] = v + + sorted_dict = dict(sorted(_dict.items(), key=lambda item: wanted.index(item[0]))) + return sorted_dict.values() + + +def get_entry(line): + + return line.split('>')[1].split('<')[0] + + +def get_advanced_metadata(metafile, access_token): + + with requests.Session() as session: + headers={'Authorization': f'Bearer {access_token}'} + request = session.request("get", metafile) + response = session.get(request.url, headers=headers, stream=True) + + for line in response.iter_lines(): + + line = line.decode('utf-8') + if 's1sarl1:sliceNumber' in line: + slicenumber = get_entry(line) + if 's1sarl1:totalSlices' in line: + total_slices = get_entry(line) + if 'relativeOrbitNumber type="start"' in line: + relativeorbit = get_entry(line) + if 'relativeOrbitNumber type="stop"' in line: + lastrelativeorbit = get_entry(line) + if 'safe:nssdcIdentifier' in line: + platformidentifier = get_entry(line) + if 's1sarl1:missionDataTakeID' in line: + missiondatatakeid = get_entry(line) + if 's1sarl1:mode' in line: + sensoroperationalmode = get_entry(line) + if 'orbitNumber type="start"' in line: + orbitnumber = get_entry(line) + if 'orbitNumber type="stop"' in line: + lastorbitnumber = get_entry(line) + if 'safe:startTime' in line: + beginposition = get_entry(line) + if 'safe:stopTime' in line: + endposition = get_entry(line) + if '1sarl1:productType' in line: + product_type = get_entry(line) + + # add acquisitiondate + acqdate = dt.strftime(dt.strptime(beginposition, '%Y-%m-%dT%H:%M:%S.%f'), format='%Y%m%d') + + return ( + slicenumber, total_slices, + relativeorbit, lastrelativeorbit, + platformidentifier, missiondatatakeid, + sensoroperationalmode, product_type, + orbitnumber, lastorbitnumber, + beginposition, endposition, acqdate, + 0 # placeholder for size + ) diff --git a/ost/helpers/raster.py b/ost/helpers/raster.py index d9a9a32..ef155d3 100644 --- a/ost/helpers/raster.py +++ b/ost/helpers/raster.py @@ -42,7 +42,7 @@ def polygonize_ls(infile, outfile, driver="GeoJSON"): outfile, "w", driver=driver, - crs=pyproj.Proj(src.crs).srs, + crs=src.crs, schema={"properties": [("raster_val", "int")], "geometry": "Polygon"}, ) as dst: dst.writerecords(results) @@ -105,7 +105,7 @@ def polygonize_bounds(infile, outfile, mask_value=1, driver="GeoJSON"): outfile, "w", driver=driver, - crs=pyproj.Proj(src.crs).srs, + crs=src.crs, schema={"properties": [("raster_val", "int")], "geometry": "MultiPolygon"}, ) as dst: dst.writerecords(results) diff --git a/ost/s1/burst_batch.py b/ost/s1/burst_batch.py index 7dd22d9..aeeba66 100644 --- a/ost/s1/burst_batch.py +++ b/ost/s1/burst_batch.py @@ -66,6 +66,7 @@ def bursts_to_ards(burst_gdf, config_file): logger.info("Preparing the processing pipeline. This may take a moment.") proc_inventory = prepare_burst_inventory(burst_gdf, config_file) + #print(proc_inventory) with open(config_file, "r") as file: config_dict = json.load(file) diff --git a/ost/s1/burst_inventory.py b/ost/s1/burst_inventory.py index d83b6f4..4be1c0c 100644 --- a/ost/s1/burst_inventory.py +++ b/ost/s1/burst_inventory.py @@ -43,9 +43,9 @@ def burst_extract(scene_id, track, acq_date, et_root): # pol = root.find('adsHeader').find('polarisation').text swath = et_root.find("adsHeader").find("swath").text - burst_lines = np.int(et_root.find("swathTiming").find("linesPerBurst").text) + burst_lines = int(et_root.find("swathTiming").find("linesPerBurst").text) - burst_samples = np.int(et_root.find("swathTiming").find("samplesPerBurst").text) + burst_samples = int(et_root.find("swathTiming").find("samplesPerBurst").text) list_of_bursts = et_root.find("swathTiming").find("burstList") geolocation_grid = et_root.find("geolocationGrid")[0] @@ -252,9 +252,11 @@ def refine_burst_inventory(aoi, burst_gdf, outfile, coverages=None): warnings.filterwarnings("ignore", "Geometry is in a geographic CRS", UserWarning) # turn aoi into a geodataframe - aoi_gdf = gpd.GeoDataFrame(vec.wkt_to_gdf(aoi).buffer(0.05)) - aoi_gdf.columns = ["geometry"] - aoi_gdf.crs = "epsg:4326" + aoi_gdf = gpd.GeoDataFrame( + vec.wkt_to_gdf(aoi).buffer(0.05), + columns=['geometry'], + crs='epsg:4326' + ) # get columns of input dataframe for later return function cols = burst_gdf.columns diff --git a/ost/s1/search_data.py b/ost/s1/search_data.py new file mode 100644 index 0000000..968c8bb --- /dev/null +++ b/ost/s1/search_data.py @@ -0,0 +1,364 @@ +#! /usr/bin/env python +# -*- coding: utf-8 -*- + +# import stdlib modules +import os +import requests +import logging + +from pathlib import Path + +# import external modules +import pandas as pd +import geopandas as gpd +from shapely.wkt import dumps +from shapely.geometry import Polygon, shape +from tqdm import tqdm + +# internal OST libs +from ost.helpers.db import pgHandler +from ost.helpers import copernicus as cop + +# set up logger +logger = logging.getLogger(__name__) + + + +def _to_shapefile(gdf, outfile, append=False): + + # check if file is there + if os.path.isfile(outfile): + + # in case we want to append, we load the old one and add the new one + if append: + columns = [ + "id", + "identifier", + "polarisationmode", + "orbitdirection", + "acquisitiondate", + "relativeorbit", + "orbitnumber", + "product_type", + "slicenumber", + "size", + "beginposition", + "endposition", + "lastrelativeorbitnumber", + "lastorbitnumber", + "uuid", + "platformidentifier", + "missiondatatakeid", + "swathidentifier", + "ingestiondate", + "sensoroperationalmode", + "geometry", + ] + + # get existing geodataframe from file + old_df = gpd.read_file(outfile) + old_df.columns = columns + # drop id + old_df.drop("id", axis=1, inplace=True) + # append new results + gdf.columns = columns[1:] + gdf = old_df.append(gdf) + + # remove duplicate entries + gdf.drop_duplicates(subset="identifier", inplace=True) + + # remove old file + os.remove(outfile) + os.remove("{}.cpg".format(outfile[:-4])) + os.remove("{}.prj".format(outfile[:-4])) + os.remove("{}.shx".format(outfile[:-4])) + os.remove("{}.dbf".format(outfile[:-4])) + + # calculate new index + gdf.insert(loc=0, column="id", value=range(1, 1 + len(gdf))) + + # write to new file + if len(gdf.index) >= 1: + gdf.to_file(outfile) + else: + logger.info("No scenes found in this AOI during this time") + + +def _to_geopackage(gdf, outfile, append=False): + + # check if file is there + if Path(outfile).exists(): + + # in case we want to append, we load the old one and add the new one + if append: + columns = [ + "id", + "identifier", + "polarisationmode", + "orbitdirection", + "acquisitiondate", + "relativeorbit", + "orbitnumber", + "product_type", + "slicenumber", + "size", + "beginposition", + "endposition", + "lastrelativeorbitnumber", + "lastorbitnumber", + "uuid", + "platformidentifier", + "missiondatatakeid", + "swathidentifier", + "ingestiondate", + "sensoroperationalmode", + "geometry", + ] + + # get existing geodataframe from file + old_df = gpd.read_file(outfile) + old_df.columns = columns + # drop id + old_df.drop("id", axis=1, inplace=True) + # append new results + gdf.columns = columns[1:] + gdf = old_df.append(gdf) + + # remove duplicate entries + gdf.drop_duplicates(subset="identifier", inplace=True) + + # remove old file + Path(outfile).unlink() + + # calculate new index + gdf.insert(loc=0, column="id", value=range(1, 1 + len(gdf))) + + # write to new file + if len(gdf.index) > 0: + gdf.to_file(outfile, driver="GPKG") + else: + logger.info("No scenes found in this AOI during this time") + + +def _to_postgis(gdf, db_connect, outtable): + + # check if tablename already exists + db_connect.cursor.execute( + "SELECT EXISTS (SELECT * FROM " + "information_schema.tables WHERE " + "LOWER(table_name) = " + "LOWER('{}'))".format(outtable) + ) + result = db_connect.cursor.fetchall() + if result[0][0] is False: + logger.info(f"Table {outtable} does not exist in the database. Creating it...") + db_connect.pgCreateS1("{}".format(outtable)) + maxid = 1 + else: + try: + maxid = db_connect.pgSQL(f"SELECT max(id) FROM {outtable}") + maxid = maxid[0][0] + if maxid is None: + maxid = 0 + + logger.info( + f"Table {outtable} already exists with {maxid} entries. " + f"Will add all non-existent results to this table." + ) + maxid = maxid + 1 + except Exception: + raise RuntimeError( + f"Existent table {outtable} does not seem to be compatible " f"with Sentinel-1 data." + ) + + # add an index as first column + gdf.insert(loc=0, column="id", value=range(maxid, maxid + len(gdf))) + db_connect.pgSQLnoResp(f"SELECT UpdateGeometrySRID('{outtable.lower()}', 'geometry', 0);") + + # construct the SQL INSERT line + for _index, row in gdf.iterrows(): + + row["geometry"] = dumps(row["footprint"]) + row.drop("footprint", inplace=True) + identifier = row.identifier + uuid = row.uuid + line = tuple(row.tolist()) + + # first check if scene is already in the table + result = db_connect.pgSQL("SELECT uuid FROM {} WHERE " "uuid = '{}'".format(outtable, uuid)) + try: + result[0][0] + except IndexError: + logger.info(f"Inserting scene {identifier} to {outtable}") + db_connect.pgInsert(outtable, line) + # apply the dateline correction routine + db_connect.pgDateline(outtable, uuid) + maxid += 1 + else: + logger.info(f"Scene {identifier} already exists within table {outtable}.") + + logger.info(f"Inserted {len(gdf)} entries into {outtable}.") + logger.info(f"Table {outtable} now contains {maxid - 1} entries.") + logger.info("Optimising database table.") + + # drop index if existent + try: + db_connect.pgSQLnoResp("DROP INDEX {}_gix;".format(outtable.lower())) + except Exception: + pass + + # create geometry index and vacuum analyze + db_connect.pgSQLnoResp("SELECT UpdateGeometrySRID('{}', " "'geometry', 4326);".format(outtable.lower())) + db_connect.pgSQLnoResp( + "CREATE INDEX {}_gix ON {} USING GIST " "(geometry);".format(outtable, outtable.lower()) + ) + db_connect.pgSQLnoResp("VACUUM ANALYZE {};".format(outtable.lower())) + + +def check_availability(inventory_gdf, download_dir, data_mount): + """This function checks if the data is already downloaded or + available through a mount point on DIAS cloud + + :param inventory_gdf: + :param download_dir: + :param data_mount: + :return: + """ + + from ost import Sentinel1Scene + + # add download path, or set to None if not found + inventory_gdf["download_path"] = inventory_gdf.identifier.apply( + lambda row: str(Sentinel1Scene(row).get_path(download_dir, data_mount)) + ) + + return inventory_gdf + +def transform_geometry(geometry): + + try: + geom = Polygon(geometry['coordinates'][0]) + except: + geom = Polygon(geometry['coordinates'][0][0]) + + return geom + + +def query_dataspace(query, access_token): + + _next = query + + logger.info('Querying the Copernicus Dataspace Server for the search request') + dfs, i = [], 1 + while _next: + # get request + json = requests.get(_next).json() + #print(json) + # append json outout to list of dataframes + dfs.append(pd.DataFrame.from_dict(json['features'])) + try: + _next = next( + link['href'] for link in json['properties']['links'] if link['rel'] == 'next' + ) + #_next = [link['href'] for link in json['properties']['links'] if link['rel'] == 'next'][0] + except: + _next = None + df = pd.concat(dfs) + + if df.empty: + raise ValueError('No products found for the given search parameters.') + + logger.info('Extracting basic metadata for the scenes') + # extract basic metadata from retrieved json + tqdm.pandas() + df[[ + 'identifier', + 'orbitdirection', + 'platformidentifier', + 'polarisationmode', + 'swathidentifier', + 'metafile', + 'ingestiondate' + ]] = df.progress_apply( + lambda x: cop.extract_basic_metadata(x['properties']), axis=1, result_type='expand' + ) + + + # Rename the id column to 'uuid' + df.rename(columns={'id': 'uuid'}, inplace=True) + + # turn geometry into shapely objects + df['geometry']= df['geometry'].apply(lambda x: transform_geometry(x)) + gdf = gpd.GeoDataFrame(df, geometry='geometry', crs='epsg:4326') + + logger.info('Extracting advanced metadata directly from the Copernicus dataspace server.') + gdf[[ + 'slicenumber', 'totalslicenumbers', + 'relativeorbitnumber', 'lastrelativeorbitnumber', + 'platformidentifier', 'missiondatatakeid', + 'sensoroperationalmode', 'producttype', + 'orbitnumber', 'lastorbitnumber', + 'beginposition', 'endposition', 'acquisitiondate', + 'size' + ]] = gdf.progress_apply( + lambda x: cop.get_advanced_metadata( + x['metafile'], access_token + ), axis=1, result_type='expand' + ) + + # add a unique id + #gdf['id'] = [i + 1 for i in range(len(gdf))] + + # a list of columns to keep + scihub_legacy_columns = [ + 'identifier', 'polarisationmode', 'orbitdirection', + 'acquisitiondate', 'relativeorbitnumber', 'orbitnumber', 'producttype', + 'slicenumber', 'size', 'beginposition', 'endposition', + 'lastrelativeorbitnumber', 'lastorbitnumber', 'uuid', + 'platformidentifier', 'missiondatatakeid', 'swathidentifier', + 'ingestiondate', 'sensoroperationalmode', 'geometry' + ] + + gdf = gdf[scihub_legacy_columns] + return gdf + + +def dataspace_catalogue( + query_string, + output, + append=False, + uname=None, + pword=None, + base_url="https://catalogue.dataspace.copernicus.eu/resto/api/collections/Sentinel1/search.json?", +): + """This is the main search function on scihub + + :param query_string: + :param output: + :param append: + :param uname: + :param pword: + :param base_url: + :return: + """ + + # retranslate Path object to string + output = str(output) + + # get connected to scihub + access_token = cop.get_access_token(uname, pword) + query = base_url + query_string + + # get the catalogue in a dict + gdf = query_dataspace(query, access_token) + + if output[-4:] == ".shp": + logger.info(f"Writing inventory data to shape file: {output}") + _to_shapefile(gdf, output, append) + elif output[-5:] == ".gpkg": + logger.info(f"Writing inventory data to geopackage file: {output}") + _to_geopackage(gdf, output, append) + else: + logger.info(f"Writing inventory data toPostGIS table: {output}") + db_connect = pgHandler() + _to_postgis(gdf, db_connect, output) diff --git a/pyproject.toml b/pyproject.toml index 8b24fe3..95d059e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,2 +1,114 @@ -[tool.black] -line-length = 110 \ No newline at end of file +[build-system] +requires = ["setuptools>=61.2", "wheel"] +build-backend = "setuptools.build_meta" + +[project] +name = "opensartoolkit" +version = "0.13.1" +description = "python module" +keywords = ["skeleton", "Python"] +classifiers = [ + "Development Status :: 3 - Alpha", + "Intended Audience :: Developers", + "License :: OSI Approved :: MIT License", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", +] +requires-python = ">=3.6.9" + +dependencies = [ + "descartes", + "fiona", + "gdal==3.6.4", + "godale", + "pyproj", + "geopandas", + "jupyterlab", + "matplotlib", + "numpy", + "pandas", + "psycopg2-binary", + "rasterio", + "requests", + "scipy", + "shapely", + "tqdm", + "imageio", + "rtree", + "retrying", + "pytest", + "pytest-cov", + "pytest-runner" +] + +[[project.authors]] +name = "Andreas Vollrath" +email = "andreas.vollrath@fao.org" + +[project.license] +text = "MIT" + +[project.readme] +file = "README.rst" +content-type = "text/x-rst" + +[project.urls] +Homepage = "https://github.com/ESA-PhiLab/OpenSarToolkit/" + +[project.optional-dependencies] +dev = ["pre-commit", "commitizen", "nox", "mypy"] +test = ["pytest", "pytest-sugar", "pytest-cov", "pytest-deadfixtures"] +doc = ["sphinx", "pydata-sphinx-theme", "sphinx-copybutton", "sphinx-design", "sphinx-icon", "sphinx-btn"] + +[tool.setuptools] +include-package-data = true +license-files = ["LICENSE"] + +[tool.setuptools.packages.find] +include = ["ost*"] +exclude = ["docs*", "tests*"] + +[tool.commitizen] +tag_format = "v$major.$minor.$patch$prerelease" +update_changelog_on_bump = false +version = "0.0.0" +version_files = [ + "pyproject.toml:version", + "sampling_handler/__init__.py:__version__", + "docs/conf.py:release" +] + +[tool.pytest.ini_options] +testpaths = "tests" + +[tool.ruff] +ignore-init-module-imports = true +fix = true +select = ["E", "F", "W", "I", "D", "RUF"] +ignore = ["E501"] # line too long | Black take care of it + +[tool.ruff.flake8-quotes] +docstring-quotes = "double" + +[tool.ruff.pydocstyle] +convention = "google" + +[tool.coverage.run] +source = ["ost"] + +[tool.doc8] +ignore = ["D001"] # we follow a 1 line = 1 paragraph style + +[tool.mypy] +scripts_are_modules = true +ignore_missing_imports = true +install_types = true +non_interactive = true +warn_redundant_casts = true + +[tool.licensecheck] +using = "PEP631:test;dev;doc" diff --git a/setup.py b/setup.py index 415424f..6d214ad 100755 --- a/setup.py +++ b/setup.py @@ -1,104 +1,5 @@ -from pathlib import Path -from setuptools import setup, find_packages -from setuptools.command.develop import develop -from subprocess import check_call +"""Install the package.""" -# check that gdal has already been installed prior to ost -try: - from osgeo import gdal -except ModuleNotFoundError: - raise ImportError( - "please install a GDAL distribution>=1.7 prior to install OpenSarTollkit, we recommand using the pygdal packages" - ) +import setuptools -# to make flake8 happy -gdal.__version__ - -# the version number -version = "0.12.15" - -# The directory containing this file -HERE = Path(__file__).parent - -DESCRIPTION = "High-level functionality for the inventory, download and pre-processing of Sentinel-1 data" -LONG_DESCRIPTION = (HERE / "README.rst").read_text() - - -class DevelopCmd(develop): - """overwrite normal develop pip command to install the pre-commit""" - - def run(self): - check_call( - [ - "pre-commit", - "install", - "--install-hooks", - "-t", - "pre-commit", - "-t", - "commit-msg", - ] - ) - super(DevelopCmd, self).run() - - -setup( - name="opensartoolkit", - version=version, - license="MIT License", - description=DESCRIPTION, - long_description=LONG_DESCRIPTION, - long_description_content_type="text/x-rst", - author="Andreas Vollrath", - author_email="opensarkit@gmail.com", - url="https://github.com/ESA-PhiLab/OpenSarToolkit", - download_url=f"https://github.com/ESA-PhiLab/OpenSarToolkit/archive/{version}.tar.gz", - keywords=[ - "Sentinel-1", - "ESA", - "SAR", - "Radar", - "Earth Observation", - "Remote Sensing", - "Synthetic Aperture Radar", - ], - packages=find_packages(), - include_package_data=True, - install_requires=[ - "pyproj", - "descartes", - "godale", - "geopandas>=0.8", - "jupyterlab", - "psycopg2-binary", - "rasterio", - "requests", - "scipy", - "tqdm", - "imageio", - "rtree", - "retrying", - ], - extras_require={ - "dev": [ - "pre-commit", - "pytest", - "coverage", - "nbsphinx", - "pydata-sphinx-theme", - "sphinx-copybutton", - "Commitizen", - ] - }, - cmdclass={"develop": DevelopCmd}, - zip_safe=False, - classifiers=[ - "Development Status :: 5 - Production/Stable", - "Intended Audience :: Science/Research", - "License :: OSI Approved :: MIT License", - "Natural Language :: English", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: 3.8", - "Topic :: Scientific/Engineering :: GIS", - ], -) +setuptools.setup()