signature
stringlengths 8
3.44k
| body
stringlengths 0
1.41M
| docstring
stringlengths 1
122k
| id
stringlengths 5
17
|
|---|---|---|---|
def read_geojson(geojson_file):
|
with open(geojson_file) as f:<EOL><INDENT>return geojson.load(f)<EOL><DEDENT>
|
Read a GeoJSON file into a GeoJSON object.
|
f9466:m0
|
def geojson_to_wkt(geojson_obj, feature_number=<NUM_LIT:0>, decimals=<NUM_LIT:4>):
|
if '<STR_LIT>' in geojson_obj:<EOL><INDENT>geometry = geojson_obj<EOL><DEDENT>elif '<STR_LIT>' in geojson_obj:<EOL><INDENT>geometry = geojson_obj['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>geometry = geojson_obj['<STR_LIT>'][feature_number]['<STR_LIT>']<EOL><DEDENT>def ensure_2d(geometry):<EOL><INDENT>if isinstance(geometry[<NUM_LIT:0>], (list, tuple)):<EOL><INDENT>return list(map(ensure_2d, geometry))<EOL><DEDENT>else:<EOL><INDENT>return geometry[:<NUM_LIT:2>]<EOL><DEDENT><DEDENT>def check_bounds(geometry):<EOL><INDENT>if isinstance(geometry[<NUM_LIT:0>], (list, tuple)):<EOL><INDENT>return list(map(check_bounds, geometry))<EOL><DEDENT>else:<EOL><INDENT>if geometry[<NUM_LIT:0>] > <NUM_LIT> or geometry[<NUM_LIT:0>] < -<NUM_LIT>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if geometry[<NUM_LIT:1>] > <NUM_LIT> or geometry[<NUM_LIT:1>] < -<NUM_LIT>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT>geometry['<STR_LIT>'] = ensure_2d(geometry['<STR_LIT>'])<EOL>check_bounds(geometry['<STR_LIT>'])<EOL>wkt = geomet.wkt.dumps(geometry, decimals=decimals)<EOL>wkt = re.sub(r'<STR_LIT>', '<STR_LIT>', wkt)<EOL>return wkt<EOL>
|
Convert a GeoJSON object to Well-Known Text. Intended for use with OpenSearch queries.
In case of FeatureCollection, only one of the features is used (the first by default).
3D points are converted to 2D.
Parameters
----------
geojson_obj : dict
a GeoJSON object
feature_number : int, optional
Feature to extract polygon from (in case of MultiPolygon
FeatureCollection), defaults to first Feature
decimals : int, optional
Number of decimal figures after point to round coordinate to. Defaults to 4 (about 10
meters).
Returns
-------
polygon coordinates
string of comma separated coordinate tuples (lon, lat) to be used by SentinelAPI
|
f9466:m1
|
def format_query_date(in_date):
|
if in_date is None:<EOL><INDENT>return '<STR_LIT:*>'<EOL><DEDENT>if isinstance(in_date, (datetime, date)):<EOL><INDENT>return in_date.strftime('<STR_LIT>')<EOL><DEDENT>elif not isinstance(in_date, string_types):<EOL><INDENT>raise ValueError('<STR_LIT>'.format(in_date))<EOL><DEDENT>in_date = in_date.strip()<EOL>if in_date == '<STR_LIT:*>':<EOL><INDENT>return in_date<EOL><DEDENT>valid_date_pattern = r'<STR_LIT>'<EOL>units = r'<STR_LIT>'<EOL>valid_date_pattern += r'<STR_LIT>'.format(units)<EOL>valid_date_pattern += r'<STR_LIT>'.format(units)<EOL>in_date = in_date.strip()<EOL>if re.match(valid_date_pattern, in_date):<EOL><INDENT>return in_date<EOL><DEDENT>try:<EOL><INDENT>return datetime.strptime(in_date, '<STR_LIT>').strftime('<STR_LIT>')<EOL><DEDENT>except ValueError:<EOL><INDENT>raise ValueError('<STR_LIT>'.format(in_date))<EOL><DEDENT>
|
Format a date, datetime or a YYYYMMDD string input as YYYY-MM-DDThh:mm:ssZ
or validate a date string as suitable for the full text search interface and return it.
`None` will be converted to '\*', meaning an unlimited date bound in date ranges.
Parameters
----------
in_date : str or datetime or date or None
Date to be formatted
Returns
-------
str
Formatted string
Raises
------
ValueError
If the input date type is incorrect or passed date string is invalid
|
f9466:m2
|
def _parse_odata_timestamp(in_date):
|
timestamp = int(in_date.replace('<STR_LIT>', '<STR_LIT>').replace('<STR_LIT>', '<STR_LIT>'))<EOL>seconds = timestamp // <NUM_LIT:1000><EOL>ms = timestamp % <NUM_LIT:1000><EOL>return datetime.utcfromtimestamp(seconds) + timedelta(milliseconds=ms)<EOL>
|
Convert the timestamp received from OData JSON API to a datetime object.
|
f9466:m7
|
def _parse_opensearch_response(products):
|
converters = {'<STR_LIT:date>': _parse_iso_date, '<STR_LIT:int>': int, '<STR_LIT>': int, '<STR_LIT:float>': float, '<STR_LIT>': float}<EOL>default_converter = lambda x: x<EOL>output = OrderedDict()<EOL>for prod in products:<EOL><INDENT>product_dict = {}<EOL>prod_id = prod['<STR_LIT:id>']<EOL>output[prod_id] = product_dict<EOL>for key in prod:<EOL><INDENT>if key == '<STR_LIT:id>':<EOL><INDENT>continue<EOL><DEDENT>if isinstance(prod[key], string_types):<EOL><INDENT>product_dict[key] = prod[key]<EOL><DEDENT>else:<EOL><INDENT>properties = prod[key]<EOL>if isinstance(properties, dict):<EOL><INDENT>properties = [properties]<EOL><DEDENT>if key == '<STR_LIT>':<EOL><INDENT>for p in properties:<EOL><INDENT>name = '<STR_LIT>'<EOL>if '<STR_LIT>' in p:<EOL><INDENT>name = '<STR_LIT>' + p['<STR_LIT>']<EOL><DEDENT>product_dict[name] = p['<STR_LIT>']<EOL><DEDENT><DEDENT>else:<EOL><INDENT>f = converters.get(key, default_converter)<EOL>for p in properties:<EOL><INDENT>try:<EOL><INDENT>product_dict[p['<STR_LIT:name>']] = f(p['<STR_LIT:content>'])<EOL><DEDENT>except KeyError:<EOL><INDENT>product_dict[p['<STR_LIT:name>']] = f(p['<STR_LIT:str>'])<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>return output<EOL>
|
Convert a query response to a dictionary.
The resulting dictionary structure is {<product id>: {<property>: <value>}}.
The property values are converted to their respective Python types unless `parse_values`
is set to `False`.
|
f9466:m8
|
def query(self, area=None, date=None, raw=None, area_relation='<STR_LIT>',<EOL>order_by=None, limit=None, offset=<NUM_LIT:0>, **keywords):
|
query = self.format_query(area, date, raw, area_relation, **keywords)<EOL>self.logger.debug("<STR_LIT>",<EOL>order_by, limit, offset, query)<EOL>formatted_order_by = _format_order_by(order_by)<EOL>response, count = self._load_query(query, formatted_order_by, limit, offset)<EOL>self.logger.info("<STR_LIT>", count)<EOL>return _parse_opensearch_response(response)<EOL>
|
Query the OpenSearch API with the coordinates of an area, a date interval
and any other search keywords accepted by the API.
Parameters
----------
area : str, optional
The area of interest formatted as a Well-Known Text string.
date : tuple of (str or datetime) or str, optional
A time interval filter based on the Sensing Start Time of the products.
Expects a tuple of (start, end), e.g. ("NOW-1DAY", "NOW").
The timestamps can be either a Python datetime or a string in one of the
following formats:
- yyyyMMdd
- yyyy-MM-ddThh:mm:ss.SSSZ (ISO-8601)
- yyyy-MM-ddThh:mm:ssZ
- NOW
- NOW-<n>DAY(S) (or HOUR(S), MONTH(S), etc.)
- NOW+<n>DAY(S)
- yyyy-MM-ddThh:mm:ssZ-<n>DAY(S)
- NOW/DAY (or HOUR, MONTH etc.) - rounds the value to the given unit
Alternatively, an already fully formatted string such as "[NOW-1DAY TO NOW]" can be
used as well.
raw : str, optional
Additional query text that will be appended to the query.
area_relation : {'Intersects', 'Contains', 'IsWithin'}, optional
What relation to use for testing the AOI. Case insensitive.
- Intersects: true if the AOI and the footprint intersect (default)
- Contains: true if the AOI is inside the footprint
- IsWithin: true if the footprint is inside the AOI
order_by: str, optional
A comma-separated list of fields to order by (on server side).
Prefix the field name by '+' or '-' to sort in ascending or descending order,
respectively. Ascending order is used if prefix is omitted.
Example: "cloudcoverpercentage, -beginposition".
limit: int, optional
Maximum number of products returned. Defaults to no limit.
offset: int, optional
The number of results to skip. Defaults to 0.
**keywords
Additional keywords can be used to specify other query parameters,
e.g. `relativeorbitnumber=70`.
See https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch
for a full list.
Range values can be passed as two-element tuples, e.g. `cloudcoverpercentage=(0, 30)`.
`None` can be used in range values for one-sided ranges, e.g. `orbitnumber=(16302, None)`.
Ranges with no bounds (`orbitnumber=(None, None)`) will not be included in the query.
The time interval formats accepted by the `date` parameter can also be used with
any other parameters that expect time intervals (that is: 'beginposition', 'endposition',
'date', 'creationdate', and 'ingestiondate').
Returns
-------
dict[string, dict]
Products returned by the query as a dictionary with the product ID as the key and
the product's attributes (a dictionary) as the value.
|
f9466:c0:m1
|
@staticmethod<EOL><INDENT>def format_query(area=None, date=None, raw=None, area_relation='<STR_LIT>',<EOL>**keywords):<DEDENT>
|
if area_relation.lower() not in {"<STR_LIT>", "<STR_LIT>", "<STR_LIT>"}:<EOL><INDENT>raise ValueError("<STR_LIT>".format(area_relation))<EOL><DEDENT>kw_lower = set(x.lower() for x in keywords)<EOL>if (len(kw_lower) != len(keywords) or<EOL>(date is not None and '<STR_LIT>' in kw_lower) or<EOL>(area is not None and '<STR_LIT>' in kw_lower)):<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>query_parts = []<EOL>if date is not None:<EOL><INDENT>keywords['<STR_LIT>'] = date<EOL><DEDENT>for attr, value in sorted(keywords.items()):<EOL><INDENT>if isinstance(value, string_types):<EOL><INDENT>value = value.strip()<EOL>if not any(value.startswith(s[<NUM_LIT:0>]) and value.endswith(s[<NUM_LIT:1>]) for s in ['<STR_LIT>', '<STR_LIT:{}>', '<STR_LIT>', '<STR_LIT>']):<EOL><INDENT>value = re.sub(r'<STR_LIT>', r'<STR_LIT>', value, re.M)<EOL><DEDENT><DEDENT>date_attrs = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT:date>', '<STR_LIT>', '<STR_LIT>']<EOL>if attr.lower() in date_attrs:<EOL><INDENT>if isinstance(value, string_types) and '<STR_LIT>' in value:<EOL><INDENT>pass<EOL><DEDENT>elif not isinstance(value, string_types) and len(value) == <NUM_LIT:2>:<EOL><INDENT>value = (format_query_date(value[<NUM_LIT:0>]), format_query_date(value[<NUM_LIT:1>]))<EOL><DEDENT>else:<EOL><INDENT>raise ValueError("<STR_LIT>"<EOL>"<STR_LIT>".format(attr, value))<EOL><DEDENT><DEDENT>if isinstance(value, (list, tuple)):<EOL><INDENT>if len(value) == <NUM_LIT:2>:<EOL><INDENT>value = ['<STR_LIT:*>' if x is None else x for x in value]<EOL>if all(x == '<STR_LIT:*>' for x in value):<EOL><INDENT>continue<EOL><DEDENT>value = '<STR_LIT>'.format(*value)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError("<STR_LIT>"<EOL>"<STR_LIT:{}>".format(len(value)))<EOL><DEDENT><DEDENT>query_parts.append('<STR_LIT>'.format(attr, value))<EOL><DEDENT>if raw:<EOL><INDENT>query_parts.append(raw)<EOL><DEDENT>if area is not None:<EOL><INDENT>query_parts.append('<STR_LIT>'.format(area_relation, area))<EOL><DEDENT>return '<STR_LIT:U+0020>'.join(query_parts)<EOL>
|
Create a OpenSearch API query string.
|
f9466:c0:m2
|
def query_raw(self, query, order_by=None, limit=None, offset=<NUM_LIT:0>):
|
warnings.warn(<EOL>"<STR_LIT>",<EOL>PendingDeprecationWarning<EOL>)<EOL>return self.query(raw=query, order_by=order_by, limit=limit, offset=offset)<EOL>
|
Do a full-text query on the OpenSearch API using the format specified in
https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch
DEPRECATED: use :meth:`query(raw=...) <.query>` instead. This method will be removed in the next major release.
Parameters
----------
query : str
The query string.
order_by: str, optional
A comma-separated list of fields to order by (on server side).
Prefix the field name by '+' or '-' to sort in ascending or descending order, respectively.
Ascending order is used, if prefix is omitted.
Example: "cloudcoverpercentage, -beginposition".
limit: int, optional
Maximum number of products returned. Defaults to no limit.
offset: int, optional
The number of results to skip. Defaults to 0.
Returns
-------
dict[string, dict]
Products returned by the query as a dictionary with the product ID as the key and
the product's attributes (a dictionary) as the value.
|
f9466:c0:m3
|
def count(self, area=None, date=None, raw=None, area_relation='<STR_LIT>', **keywords):
|
for kw in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>if kw in keywords:<EOL><INDENT>del keywords[kw]<EOL><DEDENT><DEDENT>query = self.format_query(area, date, raw, area_relation, **keywords)<EOL>_, total_count = self._load_query(query, limit=<NUM_LIT:0>)<EOL>return total_count<EOL>
|
Get the number of products matching a query.
Accepted parameters are identical to :meth:`SentinelAPI.query()`.
This is a significantly more efficient alternative to doing `len(api.query())`,
which can take minutes to run for queries matching thousands of products.
Returns
-------
int
The number of products matching a query.
|
f9466:c0:m4
|
@staticmethod<EOL><INDENT>def to_geojson(products):<DEDENT>
|
feature_list = []<EOL>for i, (product_id, props) in enumerate(products.items()):<EOL><INDENT>props = props.copy()<EOL>props['<STR_LIT:id>'] = product_id<EOL>poly = geomet.wkt.loads(props['<STR_LIT>'])<EOL>del props['<STR_LIT>']<EOL>del props['<STR_LIT>']<EOL>for k, v in props.items():<EOL><INDENT>if isinstance(v, (date, datetime)):<EOL><INDENT>props[k] = v.strftime('<STR_LIT>')<EOL><DEDENT><DEDENT>feature_list.append(<EOL>geojson.Feature(geometry=poly, id=i, properties=props)<EOL>)<EOL><DEDENT>return geojson.FeatureCollection(feature_list)<EOL>
|
Return the products from a query response as a GeoJSON with the values in their
appropriate Python types.
|
f9466:c0:m8
|
@staticmethod<EOL><INDENT>def to_dataframe(products):<DEDENT>
|
try:<EOL><INDENT>import pandas as pd<EOL><DEDENT>except ImportError:<EOL><INDENT>raise ImportError("<STR_LIT>")<EOL><DEDENT>return pd.DataFrame.from_dict(products, orient='<STR_LIT:index>')<EOL>
|
Return the products from a query response as a Pandas DataFrame
with the values in their appropriate Python types.
|
f9466:c0:m9
|
@staticmethod<EOL><INDENT>def to_geodataframe(products):<DEDENT>
|
try:<EOL><INDENT>import geopandas as gpd<EOL>import shapely.wkt<EOL><DEDENT>except ImportError:<EOL><INDENT>raise ImportError("<STR_LIT>")<EOL><DEDENT>crs = {'<STR_LIT>': '<STR_LIT>'} <EOL>if len(products) == <NUM_LIT:0>:<EOL><INDENT>return gpd.GeoDataFrame(crs=crs)<EOL><DEDENT>df = SentinelAPI.to_dataframe(products)<EOL>geometry = [shapely.wkt.loads(fp) for fp in df['<STR_LIT>']]<EOL>df.drop(['<STR_LIT>', '<STR_LIT>'], axis=<NUM_LIT:1>, inplace=True)<EOL>return gpd.GeoDataFrame(df, crs=crs, geometry=geometry)<EOL>
|
Return the products from a query response as a GeoPandas GeoDataFrame
with the values in their appropriate Python types.
|
f9466:c0:m10
|
def get_product_odata(self, id, full=False):
|
url = urljoin(self.api_url, u"<STR_LIT>".format(id))<EOL>if full:<EOL><INDENT>url += '<STR_LIT>'<EOL><DEDENT>response = self.session.get(url, auth=self.session.auth,<EOL>timeout=self.timeout)<EOL>_check_scihub_response(response)<EOL>values = _parse_odata_response(response.json()['<STR_LIT:d>'])<EOL>return values<EOL>
|
Access OData API to get info about a product.
Returns a dict containing the id, title, size, md5sum, date, footprint and download url
of the product. The date field corresponds to the Start ContentDate value.
If `full` is set to True, then the full, detailed metadata of the product is returned
in addition to the above.
Parameters
----------
id : string
The UUID of the product to query
full : bool
Whether to get the full metadata for the Product. False by default.
Returns
-------
dict[str, Any]
A dictionary with an item for each metadata attribute
Notes
-----
For a full list of mappings between the OpenSearch (Solr) and OData attribute names
see the following definition files:
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-1/src/main/resources/META-INF/sentinel-1.owl
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-2/src/main/resources/META-INF/sentinel-2.owl
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-3/src/main/resources/META-INF/sentinel-3.owl
|
f9466:c0:m11
|
def _trigger_offline_retrieval(self, url):
|
with self.session.get(url, auth=self.session.auth, timeout=self.timeout) as r:<EOL><INDENT>if r.status_code == <NUM_LIT>:<EOL><INDENT>self.logger.info("<STR_LIT>")<EOL><DEDENT>elif r.status_code == <NUM_LIT>:<EOL><INDENT>self.logger.error("<STR_LIT>")<EOL>raise SentinelAPILTAError('<STR_LIT>', r)<EOL><DEDENT>elif r.status_code == <NUM_LIT>:<EOL><INDENT>self.logger.error("<STR_LIT>")<EOL>raise SentinelAPILTAError('<STR_LIT>', r)<EOL><DEDENT>elif r.status_code == <NUM_LIT>:<EOL><INDENT>self.logger.error("<STR_LIT>")<EOL>raise SentinelAPILTAError('<STR_LIT>', r)<EOL><DEDENT>return r.status_code<EOL><DEDENT>
|
Triggers retrieval of an offline product
Trying to download an offline product triggers its retrieval from the long term archive.
The returned HTTP status code conveys whether this was successful.
Parameters
----------
url : string
URL for downloading the product
Notes
-----
https://scihub.copernicus.eu/userguide/LongTermArchive
|
f9466:c0:m12
|
def download(self, id, directory_path='<STR_LIT:.>', checksum=True):
|
product_info = self.get_product_odata(id)<EOL>path = join(directory_path, product_info['<STR_LIT:title>'] + '<STR_LIT>')<EOL>product_info['<STR_LIT:path>'] = path<EOL>product_info['<STR_LIT>'] = <NUM_LIT:0><EOL>self.logger.info('<STR_LIT>', id, path)<EOL>if exists(path):<EOL><INDENT>return product_info<EOL><DEDENT>if not product_info['<STR_LIT>']:<EOL><INDENT>self.logger.warning(<EOL>'<STR_LIT>',<EOL>product_info['<STR_LIT:id>'])<EOL>self._trigger_offline_retrieval(product_info['<STR_LIT:url>'])<EOL>return product_info<EOL><DEDENT>temp_path = path + '<STR_LIT>'<EOL>skip_download = False<EOL>if exists(temp_path):<EOL><INDENT>if getsize(temp_path) > product_info['<STR_LIT:size>']:<EOL><INDENT>self.logger.warning(<EOL>"<STR_LIT>"<EOL>"<STR_LIT>",<EOL>str(temp_path), getsize(temp_path), product_info['<STR_LIT:size>'])<EOL>remove(temp_path)<EOL><DEDENT>elif getsize(temp_path) == product_info['<STR_LIT:size>']:<EOL><INDENT>if self._md5_compare(temp_path, product_info['<STR_LIT>']):<EOL><INDENT>skip_download = True<EOL><DEDENT>else:<EOL><INDENT>self.logger.warning(<EOL>"<STR_LIT>"<EOL>"<STR_LIT>",<EOL>str(temp_path))<EOL>remove(temp_path)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self.logger.info(<EOL>"<STR_LIT>", temp_path)<EOL>pass<EOL><DEDENT><DEDENT>if not skip_download:<EOL><INDENT>product_info['<STR_LIT>'] = self._download(<EOL>product_info['<STR_LIT:url>'], temp_path, self.session, product_info['<STR_LIT:size>'])<EOL><DEDENT>if checksum is True:<EOL><INDENT>if not self._md5_compare(temp_path, product_info['<STR_LIT>']):<EOL><INDENT>remove(temp_path)<EOL>raise InvalidChecksumError('<STR_LIT>')<EOL><DEDENT><DEDENT>shutil.move(temp_path, path)<EOL>return product_info<EOL>
|
Download a product.
Uses the filename on the server for the downloaded file, e.g.
"S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.zip".
Incomplete downloads are continued and complete files are skipped.
Parameters
----------
id : string
UUID of the product, e.g. 'a8dd0cfd-613e-45ce-868c-d79177b916ed'
directory_path : string, optional
Where the file will be downloaded
checksum : bool, optional
If True, verify the downloaded file's integrity by checking its MD5 checksum.
Throws InvalidChecksumError if the checksum does not match.
Defaults to True.
Returns
-------
product_info : dict
Dictionary containing the product's info from get_product_info() as well as
the path on disk.
Raises
------
InvalidChecksumError
If the MD5 checksum does not match the checksum on the server.
|
f9466:c0:m13
|
def download_all(self, products, directory_path='<STR_LIT:.>', max_attempts=<NUM_LIT:10>, checksum=True):
|
product_ids = list(products)<EOL>self.logger.info("<STR_LIT>", len(product_ids))<EOL>return_values = OrderedDict()<EOL>last_exception = None<EOL>for i, product_id in enumerate(products):<EOL><INDENT>for attempt_num in range(max_attempts):<EOL><INDENT>try:<EOL><INDENT>product_info = self.download(product_id, directory_path, checksum)<EOL>return_values[product_id] = product_info<EOL>break<EOL><DEDENT>except (KeyboardInterrupt, SystemExit):<EOL><INDENT>raise<EOL><DEDENT>except InvalidChecksumError as e:<EOL><INDENT>last_exception = e<EOL>self.logger.warning(<EOL>"<STR_LIT>", product_id)<EOL><DEDENT>except SentinelAPILTAError as e:<EOL><INDENT>last_exception = e<EOL>self.logger.exception("<STR_LIT>", product_id)<EOL>break<EOL><DEDENT>except Exception as e:<EOL><INDENT>last_exception = e<EOL>self.logger.exception("<STR_LIT>", product_id)<EOL><DEDENT><DEDENT>self.logger.info("<STR_LIT>", i + <NUM_LIT:1>, len(product_ids))<EOL><DEDENT>failed = set(products) - set(return_values)<EOL>triggered = OrderedDict([(k, v) for k, v in return_values.items() if v['<STR_LIT>'] is False])<EOL>downloaded = OrderedDict([(k, v) for k, v in return_values.items() if v['<STR_LIT>'] is True])<EOL>if len(failed) == len(product_ids) and last_exception is not None:<EOL><INDENT>raise last_exception<EOL><DEDENT>return downloaded, triggered, failed<EOL>
|
Download a list of products.
Takes a list of product IDs as input. This means that the return value of query() can be
passed directly to this method.
File names on the server are used for the downloaded files, e.g.
"S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.zip".
In case of interruptions or other exceptions, downloading will restart from where it left
off. Downloading is attempted at most max_attempts times to avoid getting stuck with
unrecoverable errors.
Parameters
----------
products : list
List of product IDs
directory_path : string
Directory where the downloaded files will be downloaded
max_attempts : int, optional
Number of allowed retries before giving up downloading a product. Defaults to 10.
checksum : bool, optional
If True, verify the downloaded files' integrity by checking its MD5 checksum.
Throws InvalidChecksumError if the checksum does not match.
Defaults to True.
Raises
------
Raises the most recent downloading exception if all downloads failed.
Returns
-------
dict[string, dict]
A dictionary containing the return value from download() for each successfully
downloaded product.
dict[string, dict]
A dictionary containing the product information for products whose retrieval
from the long term archive was successfully triggered.
set[string]
The list of products that failed to download.
|
f9466:c0:m14
|
@staticmethod<EOL><INDENT>def get_products_size(products):<DEDENT>
|
size_total = <NUM_LIT:0><EOL>for title, props in products.items():<EOL><INDENT>size_product = props["<STR_LIT:size>"]<EOL>size_value = float(size_product.split("<STR_LIT:U+0020>")[<NUM_LIT:0>])<EOL>size_unit = str(size_product.split("<STR_LIT:U+0020>")[<NUM_LIT:1>])<EOL>if size_unit == "<STR_LIT>":<EOL><INDENT>size_value /= <NUM_LIT><EOL><DEDENT>if size_unit == "<STR_LIT>":<EOL><INDENT>size_value /= <NUM_LIT> * <NUM_LIT><EOL><DEDENT>size_total += size_value<EOL><DEDENT>return round(size_total, <NUM_LIT:2>)<EOL>
|
Return the total file size in GB of all products in the OpenSearch response.
|
f9466:c0:m15
|
@staticmethod<EOL><INDENT>def check_query_length(query):<DEDENT>
|
<EOL>effective_length = len(quote_plus(query, safe="<STR_LIT>").replace('<STR_LIT>', '<STR_LIT>'))<EOL>return effective_length / <NUM_LIT><EOL>
|
Determine whether a query to the OpenSearch API is too long.
The length of a query string is limited to approximately 3938 characters but
any special characters (that is, not alphanumeric or -_.*) will take up more space.
Parameters
----------
query : str
The query string
Returns
-------
float
Ratio of the query length to the maximum length
|
f9466:c0:m16
|
def _query_names(self, names):
|
def chunks(l, n):<EOL><INDENT>"""<STR_LIT>"""<EOL>for i in range(<NUM_LIT:0>, len(l), n):<EOL><INDENT>yield l[i:i + n]<EOL><DEDENT><DEDENT>products = {}<EOL>for chunk in chunks(names, <NUM_LIT>):<EOL><INDENT>query = "<STR_LIT>".join(chunk)<EOL>products.update(self.query(raw=query))<EOL><DEDENT>output = OrderedDict((name, dict()) for name in names)<EOL>for id, metadata in products.items():<EOL><INDENT>name = metadata['<STR_LIT>']<EOL>output[name][id] = metadata<EOL><DEDENT>return output<EOL>
|
Find products by their names, e.g.
S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.
Note that duplicates exist on server, so multiple products can be returned for each name.
Parameters
----------
names : list[string]
List of product names.
Returns
-------
dict[string, dict[str, dict]]
A dictionary mapping each name to a dictionary which contains the products with
that name (with ID as the key).
|
f9466:c0:m17
|
def check_files(self, paths=None, ids=None, directory=None, delete=False):
|
if not ids and not paths:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>if ids and not directory:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>paths = paths or []<EOL>ids = ids or []<EOL>def name_from_path(path):<EOL><INDENT>return splitext(basename(path))[<NUM_LIT:0>]<EOL><DEDENT>names = []<EOL>if paths:<EOL><INDENT>names = list(map(name_from_path, paths))<EOL>result = self._query_names(names)<EOL>for product_dicts in result.values():<EOL><INDENT>ids += list(product_dicts)<EOL><DEDENT><DEDENT>names_from_paths = set(names)<EOL>ids = set(ids)<EOL>product_infos = defaultdict(list)<EOL>for id in ids:<EOL><INDENT>odata = self.get_product_odata(id)<EOL>name = odata['<STR_LIT:title>']<EOL>product_infos[name].append(odata)<EOL>if name not in names_from_paths:<EOL><INDENT>paths.append(join(directory, name + '<STR_LIT>'))<EOL><DEDENT><DEDENT>corrupt = {}<EOL>for path in paths:<EOL><INDENT>name = name_from_path(path)<EOL>if len(product_infos[name]) > <NUM_LIT:1>:<EOL><INDENT>self.logger.warning("<STR_LIT>".format(path))<EOL><DEDENT>if not exists(path):<EOL><INDENT>self.logger.info("<STR_LIT>".format(path))<EOL>corrupt[path] = product_infos[name]<EOL>continue<EOL><DEDENT>is_fine = False<EOL>for product_info in product_infos[name]:<EOL><INDENT>if (getsize(path) == product_info['<STR_LIT:size>'] and<EOL>self._md5_compare(path, product_info['<STR_LIT>'])):<EOL><INDENT>is_fine = True<EOL>break<EOL><DEDENT><DEDENT>if not is_fine:<EOL><INDENT>self.logger.info("<STR_LIT>".format(path))<EOL>corrupt[path] = product_infos[name]<EOL>if delete:<EOL><INDENT>remove(path)<EOL><DEDENT><DEDENT><DEDENT>return corrupt<EOL>
|
Verify the integrity of product files on disk.
Integrity is checked by comparing the size and checksum of the file with the respective
values on the server.
The input can be a list of products to check or a list of IDs and a directory.
In cases where multiple products with different IDs exist on the server for given product
name, the file is considered to be correct if any of them matches the file size and
checksum. A warning is logged in such situations.
The corrupt products' OData info is included in the return value to make it easier to
re-download the products, if necessary.
Parameters
----------
paths : list[string]
List of product file paths.
ids : list[string]
List of product IDs.
directory : string
Directory where the files are located, if checking based on product IDs.
delete : bool
Whether to delete corrupt products. Defaults to False.
Returns
-------
dict[str, list[dict]]
A dictionary listing the invalid or missing files. The dictionary maps the corrupt
file paths to a list of OData dictionaries of matching products on the server (as
returned by :meth:`SentinelAPI.get_product_odata()`).
|
f9466:c0:m18
|
def _md5_compare(self, file_path, checksum, block_size=<NUM_LIT:2> ** <NUM_LIT>):
|
with closing(self._tqdm(desc="<STR_LIT>", total=getsize(file_path), unit="<STR_LIT:B>",<EOL>unit_scale=True)) as progress:<EOL><INDENT>md5 = hashlib.md5()<EOL>with open(file_path, "<STR_LIT:rb>") as f:<EOL><INDENT>while True:<EOL><INDENT>block_data = f.read(block_size)<EOL>if not block_data:<EOL><INDENT>break<EOL><DEDENT>md5.update(block_data)<EOL>progress.update(len(block_data))<EOL><DEDENT><DEDENT>return md5.hexdigest().lower() == checksum.lower()<EOL><DEDENT>
|
Compare a given MD5 checksum with one calculated from a file.
|
f9466:c0:m19
|
def _tqdm(self, **kwargs):
|
kwargs.update({'<STR_LIT>': not self.show_progressbars})<EOL>return tqdm(**kwargs)<EOL>
|
tqdm progressbar wrapper. May be overridden to customize progressbar behavior
|
f9466:c0:m21
|
@click.command(context_settings=dict(help_option_names=['<STR_LIT>', '<STR_LIT>']))<EOL>@click.option(<EOL>'<STR_LIT>', '<STR_LIT>', type=str, envvar='<STR_LIT>', default=None,<EOL>help='<STR_LIT>')<EOL>@click.option(<EOL>'<STR_LIT>', '<STR_LIT>', type=str, envvar='<STR_LIT>', default=None,<EOL>help='<STR_LIT>')<EOL>@click.option(<EOL>'<STR_LIT>', type=str, default='<STR_LIT>', envvar='<STR_LIT>',<EOL>help="""<STR_LIT>""")<EOL>@click.option(<EOL>'<STR_LIT>', '<STR_LIT>', type=str, default='<STR_LIT>',<EOL>help='<STR_LIT>')<EOL>@click.option(<EOL>'<STR_LIT>', '<STR_LIT>', type=str, default='<STR_LIT>',<EOL>help='<STR_LIT>')<EOL>@click.option(<EOL>'<STR_LIT>', '<STR_LIT>', type=click.Path(exists=True),<EOL>help='<STR_LIT>')<EOL>@click.option(<EOL>'<STR_LIT>', type=CommaSeparatedString(), default=None,<EOL>help='<STR_LIT>')<EOL>@click.option(<EOL>'<STR_LIT>', type=CommaSeparatedString(), default=None,<EOL>help='<STR_LIT>')<EOL>@click.option(<EOL>'<STR_LIT>', type=click.Choice(['<STR_LIT:1>', '<STR_LIT:2>', '<STR_LIT:3>', '<STR_LIT:5>']),<EOL>help='<STR_LIT>')<EOL>@click.option(<EOL>'<STR_LIT>', type=click.Choice(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']),<EOL>help='<STR_LIT>')<EOL>@click.option(<EOL>'<STR_LIT>', type=str, default=None,<EOL>help='<STR_LIT>')<EOL>@click.option(<EOL>'<STR_LIT:-c>', '<STR_LIT>', type=int,<EOL>help='<STR_LIT>')<EOL>@click.option(<EOL>'<STR_LIT>', '<STR_LIT>', type=str,<EOL>help="<STR_LIT>"<EOL>"<STR_LIT>")<EOL>@click.option(<EOL>'<STR_LIT>', '<STR_LIT>', type=int,<EOL>help='<STR_LIT>')<EOL>@click.option(<EOL>'<STR_LIT>', '<STR_LIT>', is_flag=True,<EOL>help='<STR_LIT>')<EOL>@click.option(<EOL>'<STR_LIT>', type=click.Path(exists=True), default='<STR_LIT:.>',<EOL>help='<STR_LIT>')<EOL>@click.option(<EOL>'<STR_LIT>', '<STR_LIT>', type=CommaSeparatedString(), default=None,<EOL>help="""<STR_LIT>""")<EOL>@click.option(<EOL>'<STR_LIT>', is_flag=True,<EOL>help="""<STR_LIT>""")<EOL>@click.version_option(version=sentinelsat_version, prog_name="<STR_LIT>")<EOL>def cli(user, password, geometry, start, end, uuid, name, download, sentinel, producttype,<EOL>instrument, cloud, footprints, path, query, url, order_by, limit):
|
_set_logger_handler()<EOL>if user is None or password is None:<EOL><INDENT>try:<EOL><INDENT>user, password = requests.utils.get_netrc_auth(url)<EOL><DEDENT>except TypeError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>if user is None or password is None:<EOL><INDENT>raise click.UsageError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>api = SentinelAPI(user, password, url)<EOL>search_kwargs = {}<EOL>if sentinel and not (producttype or instrument):<EOL><INDENT>search_kwargs["<STR_LIT>"] = "<STR_LIT>" + sentinel<EOL><DEDENT>if instrument and not producttype:<EOL><INDENT>search_kwargs["<STR_LIT>"] = instrument<EOL><DEDENT>if producttype:<EOL><INDENT>search_kwargs["<STR_LIT>"] = producttype<EOL><DEDENT>if cloud:<EOL><INDENT>if sentinel not in ['<STR_LIT:2>', '<STR_LIT:3>']:<EOL><INDENT>logger.error('<STR_LIT>')<EOL>exit(<NUM_LIT:1>)<EOL><DEDENT>search_kwargs["<STR_LIT>"] = (<NUM_LIT:0>, cloud)<EOL><DEDENT>if query is not None:<EOL><INDENT>search_kwargs.update((x.split('<STR_LIT:=>') for x in query))<EOL><DEDENT>if geometry is not None:<EOL><INDENT>search_kwargs['<STR_LIT>'] = geojson_to_wkt(read_geojson(geometry))<EOL><DEDENT>if uuid is not None:<EOL><INDENT>uuid_list = [x.strip() for x in uuid]<EOL>products = {}<EOL>for productid in uuid_list:<EOL><INDENT>try:<EOL><INDENT>products[productid] = api.get_product_odata(productid)<EOL><DEDENT>except SentinelAPIError as e:<EOL><INDENT>if '<STR_LIT>' in e.msg:<EOL><INDENT>logger.error('<STR_LIT>', productid)<EOL>exit(<NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>raise<EOL><DEDENT><DEDENT><DEDENT><DEDENT>elif name is not None:<EOL><INDENT>search_kwargs["<STR_LIT>"] = name[<NUM_LIT:0>] if len(name) == <NUM_LIT:1> else '<STR_LIT:(>' + '<STR_LIT>'.join(name) + '<STR_LIT:)>'<EOL>products = api.query(order_by=order_by, limit=limit, **search_kwargs)<EOL><DEDENT>else:<EOL><INDENT>start = start or "<STR_LIT>"<EOL>end = end or "<STR_LIT>"<EOL>products = api.query(date=(start, end),<EOL>order_by=order_by, limit=limit, **search_kwargs)<EOL><DEDENT>if footprints is True:<EOL><INDENT>footprints_geojson = api.to_geojson(products)<EOL>with open(os.path.join(path, "<STR_LIT>"), "<STR_LIT:w>") as outfile:<EOL><INDENT>outfile.write(gj.dumps(footprints_geojson))<EOL><DEDENT><DEDENT>if download is True:<EOL><INDENT>product_infos, triggered, failed_downloads = api.download_all(products, path)<EOL>if len(failed_downloads) > <NUM_LIT:0>:<EOL><INDENT>with open(os.path.join(path, "<STR_LIT>"), "<STR_LIT:w>") as outfile:<EOL><INDENT>for failed_id in failed_downloads:<EOL><INDENT>outfile.write("<STR_LIT>" % (failed_id, products[failed_id]['<STR_LIT:title>']))<EOL><DEDENT><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>for product_id, props in products.items():<EOL><INDENT>if uuid is None:<EOL><INDENT>logger.info('<STR_LIT>', product_id, props['<STR_LIT>'])<EOL><DEDENT>else: <EOL><INDENT>logger.info('<STR_LIT>', product_id, props['<STR_LIT:title>'],<EOL>round(int(props['<STR_LIT:size>']) / (<NUM_LIT> * <NUM_LIT>), <NUM_LIT:2>))<EOL><DEDENT><DEDENT>if uuid is None:<EOL><INDENT>logger.info('<STR_LIT>')<EOL>logger.info('<STR_LIT>',<EOL>len(products), api.get_products_size(products))<EOL><DEDENT><DEDENT>
|
Search for Sentinel products and, optionally, download all the results
and/or create a geojson file with the search result footprints.
Beyond your Copernicus Open Access Hub user and password, you must pass a geojson file
containing the geometry of the area you want to search for or the UUIDs of the products. If you
don't specify the start and end dates, it will search in the last 24 hours.
|
f9468:m1
|
def _get_template_dirs():
|
return filter(lambda x: os.path.exists(x), [<EOL>os.path.join(os.path.expanduser('<STR_LIT>'), '<STR_LIT>', '<STR_LIT>'),<EOL>os.path.join('<STR_LIT:/>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'),<EOL>os.path.join(os.path.dirname(os.path.abspath(__file__)), '<STR_LIT>'),<EOL>])<EOL>
|
existing directories where to search for jinja2 templates. The order
is important. The first found template from the first found dir wins!
|
f9479:m0
|
def _license_from_classifiers(data):
|
classifiers = data.get('<STR_LIT>', [])<EOL>found_license = None<EOL>for c in classifiers:<EOL><INDENT>if c.startswith("<STR_LIT>"):<EOL><INDENT>found_license = c.replace("<STR_LIT>", "<STR_LIT>")<EOL><DEDENT><DEDENT>return found_license<EOL>
|
try to get a license from the classifiers
|
f9479:m8
|
def _normalize_license(data):
|
license = data.get('<STR_LIT>', None)<EOL>if not license:<EOL><INDENT>license = _license_from_classifiers(data)<EOL><DEDENT>if license:<EOL><INDENT>if license in SDPX_LICENSES.keys():<EOL><INDENT>data['<STR_LIT>'] = SDPX_LICENSES[license]<EOL><DEDENT>else:<EOL><INDENT>data['<STR_LIT>'] = "<STR_LIT>" % (license)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>data['<STR_LIT>'] = "<STR_LIT>"<EOL><DEDENT>
|
try to get SDPX license
|
f9479:m9
|
def _get_source_url(pypi_name, filename):
|
<EOL>return '<STR_LIT>'.format(<EOL>pypi_name[<NUM_LIT:0>], pypi_name, filename)<EOL>
|
get the source url
|
f9479:m11
|
def _get_archive_filelist(filename):<EOL>
|
names = [] <EOL>if tarfile.is_tarfile(filename):<EOL><INDENT>with tarfile.open(filename) as tar_file:<EOL><INDENT>names = sorted(tar_file.getnames())<EOL><DEDENT><DEDENT>elif zipfile.is_zipfile(filename):<EOL><INDENT>with zipfile.ZipFile(filename) as zip_file:<EOL><INDENT>names = sorted(zip_file.namelist())<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise ValueError("<STR_LIT>"<EOL>"<STR_LIT>".format(filename))<EOL><DEDENT>if "<STR_LIT>" in names:<EOL><INDENT>names.remove("<STR_LIT>")<EOL><DEDENT>return names<EOL>
|
Extract the list of files from a tar or zip archive.
Args:
filename: name of the archive
Returns:
Sorted list of files in the archive, excluding './'
Raises:
ValueError: when the file is neither a zip nor a tar archive
FileNotFoundError: when the provided file does not exist (for Python 3)
IOError: when the provided file does not exist (for Python 2)
|
f9480:m0
|
def get_cmdclass():
|
return {<EOL>"<STR_LIT>": SPDXUpdateCommand,<EOL>}<EOL>
|
Dictionary of all distutils commands defined in this module.
|
f9481:m0
|
def _requirement_filter_by_marker(req):<EOL>
|
if hasattr(req, '<STR_LIT>') and req.marker:<EOL><INDENT>marker_env = {<EOL>'<STR_LIT>': '<STR_LIT:.>'.join(map(str, sys.version_info[:<NUM_LIT:2>])),<EOL>'<STR_LIT>': sys.platform<EOL>}<EOL>if not req.marker.evaluate(environment=marker_env):<EOL><INDENT>return False<EOL><DEDENT><DEDENT>return True<EOL>
|
Check if the requirement is satisfied by the marker.
This function checks for a given Requirement whether its environment marker
is satisfied on the current platform. Currently only the python version and
system platform are checked.
|
f9482:m0
|
def _requirement_find_lowest_possible(req):<EOL>
|
version_dep = None <EOL>version_comp = None <EOL>for dep in req.specs:<EOL><INDENT>version = pkg_resources.parse_version(dep[<NUM_LIT:1>])<EOL>if dep[<NUM_LIT:0>] == '<STR_LIT>':<EOL><INDENT>continue<EOL><DEDENT>if (not version_dep or<EOL>version < pkg_resources.parse_version(version_dep)):<EOL><INDENT>version_dep = dep[<NUM_LIT:1>]<EOL>version_comp = dep[<NUM_LIT:0>]<EOL><DEDENT><DEDENT>assert (version_dep is None and version_comp is None) or(version_dep is not None and version_comp is not None)<EOL>return [<EOL>x for x in (req.unsafe_name, version_comp, version_dep)<EOL>if x is not None]<EOL>
|
Find lowest required version.
Given a single Requirement, this function calculates the lowest required
version to satisfy it. If the requirement excludes a specific version, then
this version will not be used as the minimal supported version.
Examples
--------
>>> req = pkg_resources.Requirement.parse("foobar>=1.0,>2")
>>> _requirement_find_lowest_possible(req)
['foobar', '>=', '1.0']
>>> req = pkg_resources.Requirement.parse("baz>=1.3,>3,!=1.5")
>>> _requirement_find_lowest_possible(req)
['baz', '>=', '1.3']
|
f9482:m1
|
def _requirements_sanitize(req_list):<EOL>
|
filtered_req_list = (<EOL>_requirement_find_lowest_possible(req) for req in<EOL>(pkg_resources.Requirement.parse(s) for s in req_list)<EOL>if _requirement_filter_by_marker(req)<EOL>)<EOL>return ["<STR_LIT:U+0020>".join(req) for req in filtered_req_list]<EOL>
|
Cleanup a list of requirement strings (e.g. from requirements.txt) to only
contain entries valid for this platform and with the lowest required version
only.
Example
-------
>>> from sys import version_info
>>> _requirements_sanitize([
... 'foo>=3.0',
... "monotonic>=1.0,>0.1;python_version=='2.4'",
... "bar>1.0;python_version=='{}.{}'".format(version_info[0], version_info[1])
... ])
['foo >= 3.0', 'bar > 1.0']
|
f9482:m2
|
def get_driver(browser='<STR_LIT>', args=None):
|
if browser not in BROWSER_MAPPING.keys():<EOL><INDENT>raise RuntimeError("<STR_LIT>" % (browser, "<STR_LIT:U+002CU+0020>".join(BROWSER_MAPPING.keys())))<EOL><DEDENT>driver_cls = BROWSER_MAPPING.get(browser)<EOL>safe_args = {}<EOL>if args is not None:<EOL><INDENT>expected_arguments = inspect.getargspec(driver_cls.__init__).args<EOL>expected_arguments.remove("<STR_LIT>")<EOL>for arg in expected_arguments:<EOL><INDENT>if arg in args:<EOL><INDENT>safe_args[arg] = args[arg]<EOL><DEDENT><DEDENT><DEDENT>return driver_cls(**safe_args)<EOL>
|
:param browser:
:param args:
:rtype: RemoteDriver
:return:
|
f9483:m0
|
def define_selector(by, value, el_class):
|
el = el_class<EOL>selector = by<EOL>if isinstance(value, six.string_types):<EOL><INDENT>selector = (by, value)<EOL><DEDENT>elif value is not None:<EOL><INDENT>el = value<EOL><DEDENT>if el is None:<EOL><INDENT>el = elements.PageElement<EOL><DEDENT>return el, selector<EOL>
|
:param by:
:param value:
:param el_class:
:rtype: tuple[type, str|tuple[str, str]]
:return:
|
f9485:m1
|
def build_locator(selector):
|
if type(selector) is tuple:<EOL><INDENT>return selector<EOL><DEDENT>if not isinstance(selector, six.string_types):<EOL><INDENT>raise InvalidSelectorException("<STR_LIT>")<EOL><DEDENT>s = selector.strip()<EOL>for test, by, index in selectors:<EOL><INDENT>if test(s):<EOL><INDENT>return by, s[index:]<EOL><DEDENT><DEDENT>raise InvalidSelectorException("<STR_LIT>".format(selector))<EOL>
|
- ID = "#valid_id"
- CLASS_NAME = ".valid_class_name"
- TAG_NAME = "valid_tag_name"
- XPATH = start with "./" or "//" or "$x:"
- LINK_TEXT = start with "$link_text:"
- PARTIAL_LINK_TEXT = start with "$partial_link_text:"
- NAME = "@valid_name_attribute_value"
CSS_SELECTOR = all other that starts with *|.|#|[\w-]|\[|:
:type selector: str|tuple
:param selector:
:rtype: tuple[selenium.webdriver.common.by.By, str]
:return:
|
f9485:m3
|
def all_elements(self):
|
return [(k, getattr(self, k)) for k, v in get_members_safety(self.__class__)<EOL>if not k.startswith("<STR_LIT:_>") and isinstance(v, (BasePageElement,))]<EOL>
|
returns all public BasePageElements grouped by this element and it parent(s)
:rtype: list[(str, BasePageElement)]
|
f9485:c0:m1
|
def child_element(self, by=By.ID, value=None, el_class=None):
|
el, selector = define_selector(by, value, el_class)<EOL>return self._init_element(el(selector))<EOL>
|
Doesn't rise NoSuchElementException in case if there are no element with the selector.
In this case ``exists()`` and ``is_displayed()`` methods of the element will return *False*.
Attempt to call any other method supposed to interact with browser will raise NoSuchElementException.
usages with ``'one string'`` selector:
- find_element(by: str) -> PageElement
- find_element(by: str, value: T <= PageElement) -> T
usages with ``'webdriver'`` By selector
- find_element(by: str, value: str) -> PageElement
- find_element(by: str, value: str, el_class: T <= PageElement) -> T
:type by: str
:param by:
:type value: str | T <= PageElement
:param value:
:type el_class: T <= PageElement
:param el_class:
:rtype: T <= PageElement
:return:
|
f9485:c2:m0
|
def child_elements(self, by=By.ID, value=None, el_class=None):
|
el, selector = define_selector(by, value, el_class)<EOL>return self._init_element(elements.PageElementsList(selector, el))<EOL>
|
alias for ``find_elements``
:param by:
:param value:
:param el_class:
:return:
|
f9485:c2:m1
|
def find_element(self, by=By.ID, value=None, el_class=None):
|
el = self.child_element(by, value, el_class)<EOL>el.reload()<EOL>return el<EOL>
|
usages with ``'one string'`` selector:
- find_element(by: str) -> PageElement
- find_element(by: str, value: T <= PageElement) -> T
usages with ``'webdriver'`` By selector
- find_element(by: str, value: str) -> PageElement
- find_element(by: str, value: str, el_class: T <= PageElement) -> T
:type by: str
:param by:
:type value: str | T <= PageElement
:param value:
:type el_class: T <= PageElement
:param el_class:
:rtype: T <= PageElement
:return:
|
f9485:c2:m2
|
def find_elements(self, by=By.ID, value=None, el_class=None):
|
els = self.child_elements(by, value, el_class)<EOL>els.reload()<EOL>return els<EOL>
|
usages with ``'one string'`` selector:
- find_elements(by: str) -> PageElementsList[ListElement]
- find_elements(by: str, value: T <= ListElement) -> PageElementsList[T]
usages with ``'webdriver'`` By selector
- find_elements(by: str, value: str) -> PageElementsList[ListElement]
- find_elements(by: str, value: str, el_class: T <= ListElement) -> PageElementsList[T]
:type by: str
:param by:
:type value: str | T <= ListElement
:param value:
:type el_class: T <= ListElement
:param el_class:
:rtype: PageElementsList[T | ListElement]
:return:
|
f9485:c2:m3
|
def exists(self):
|
t = self.wait_timeout<EOL>self.wait_timeout = <NUM_LIT:0><EOL>try:<EOL><INDENT>self.reload()<EOL>return True<EOL><DEDENT>except NoSuchElementException:<EOL><INDENT>return False<EOL><DEDENT>finally:<EOL><INDENT>self.wait_timeout = t<EOL><DEDENT>
|
:return: True if element is present in the DOM, otherwise False.
Ignore implicit and element timeouts and execute immediately.
|
f9486:c0:m2
|
def is_displayed(self):
|
t = self.wait_timeout<EOL>self.wait_timeout = <NUM_LIT:0><EOL>try:<EOL><INDENT>return super(PageElement, self).is_displayed()<EOL><DEDENT>except NoSuchElementException:<EOL><INDENT>return False<EOL><DEDENT>finally:<EOL><INDENT>self.wait_timeout = t<EOL><DEDENT>
|
:return: False if element is not present in the DOM or invisible, otherwise True.
Ignore implicit and element timeouts and execute immediately.
To wait when element displayed or not, use ``waiter.wait_displayed`` or ``waiter.wait_not_displayed``
|
f9486:c0:m3
|
def __init__(self, container, index):
|
<EOL>super(_ListItem, self).__init__("<STR_LIT>")<EOL>self._container = container<EOL>self._index = index<EOL>self.__cached__ = True<EOL>self._name = "<STR_LIT>".format(container.name, index)<EOL>
|
:type container: PageElementsList
:param container:
:param index:
|
f9486:c1:m0
|
def __init__(self, selector, el_class=PageElement, timeout=None, name=None):
|
super(PageElementsList, self).__init__(selector, name, timeout)<EOL>self._el_class = type("<STR_LIT>" + el_class.__name__ + uuid.uuid4().get_hex(), (_ListItem, el_class,), {})<EOL>self.__cache = {}<EOL>self.__items = []<EOL>
|
:type selector: tuple[str, str]|str
:param selector:
:type el_class: T <= PageElement
:param el_class:
:type name: str
:param name:
:return:
|
f9486:c2:m0
|
def is_displayed(self):
|
t = self.wait_timeout<EOL>self.wait_timeout = <NUM_LIT:0><EOL>try:<EOL><INDENT>self.reload()<EOL>return any(e.is_displayed() for e in self)<EOL><DEDENT>finally:<EOL><INDENT>self.wait_timeout = t<EOL><DEDENT>
|
:return: True id at least one element is displayed, otherwise False.
Ignore implicit and element timeouts and execute immediately.
|
f9486:c2:m1
|
def wait(method, timeout, fail_on_timeout=None, **kwargs):
|
return Waiter(lambda value: bool(value)).start(method, timeout, fail_on_timeout, **kwargs)<EOL>
|
Wait ``timeout`` seconds until ``method(**kwargs)`` returns a ``value`` that *bool(value)==True*.
Returns last ``value``.
If time expired and ``fail_on_timeout`` specified, then raise TimeoutException.
:param method:
:param timeout:
:param fail_on_timeout:
:param kwargs:
:return:
|
f9487:m0
|
def wait_not(method, timeout, fail_on_timeout=None, **kwargs):
|
return Waiter(lambda value: not value).start(method, timeout, fail_on_timeout, **kwargs)<EOL>
|
Wait ``timeout`` seconds until ``method(**kwargs)`` returns a ``value`` that *not value==True*.
Returns last ``value``.
If time expired and ``fail_on_timeout`` specified, then raise TimeoutException.
:param method:
:param timeout:
:param fail_on_timeout:
:param kwargs:
:return:
|
f9487:m1
|
def wait_displayed(element, timeout=None, fail_on_timeout=None):
|
return wait(lambda: element.is_displayed(), timeout or element.wait_timeout, fail_on_timeout)<EOL>
|
Wait until element becomes visible or time out.
Returns true is element became visible, otherwise false.
If timeout is not specified or 0, then uses specific element wait timeout.
:param element:
:param timeout:
:param fail_on_timeout:
:return:
|
f9487:m2
|
def wait_not_displayed(element, timeout=None, fail_on_timeout=None):
|
return wait(lambda: not element.is_displayed(), timeout or element.wait_timeout, fail_on_timeout)<EOL>
|
Wait until element becomes invisible or time out.
Returns true is element became invisible, otherwise false.
If timeout is not specified or 0, then uses specific element wait timeout.
:param element:
:param timeout:
:param fail_on_timeout:
:return:
|
f9487:m3
|
def aggregate_per_prefix(self, start_time, end_time, limit=<NUM_LIT:0>, net_masks='<STR_LIT>', exclude_net_masks=False, filter_proto=None):
|
if net_masks == '<STR_LIT>':<EOL><INDENT>net_mask_filter = '<STR_LIT>'<EOL><DEDENT>elif not exclude_net_masks:<EOL><INDENT>net_mask_filter = '<STR_LIT>'.format(net_masks)<EOL><DEDENT>elif exclude_net_masks:<EOL><INDENT>net_mask_filter = '<STR_LIT>'.format(net_masks)<EOL><DEDENT>if filter_proto is None:<EOL><INDENT>proto_filter = '<STR_LIT>'<EOL><DEDENT>elif int(filter_proto) == <NUM_LIT:4>:<EOL><INDENT>proto_filter = '<STR_LIT>'<EOL><DEDENT>elif int(filter_proto) == <NUM_LIT:6>:<EOL><INDENT>proto_filter = '<STR_LIT>'<EOL><DEDENT>query = '''<STR_LIT>'''.format(net_mask_filter, proto_filter)<EOL>if limit > <NUM_LIT:0>:<EOL><INDENT>query += '<STR_LIT>' % limit<EOL><DEDENT>return self._execute_query(query, [start_time, end_time])<EOL>
|
Given a time range aggregates bytes per prefix.
Args:
start_time: A string representing the starting time of the time range
end_time: A string representing the ending time of the time range
limit: An optional integer. If it's >0 it will limit the amount of prefixes returned.
filter_proto: Can be:
- None: Returns both ipv4 and ipv6
- 4: Returns only ipv4
- 6: Retruns only ipv6
Returns:
A list of prefixes sorted by sum_bytes. For example:
[
{'key': '192.168.1.0/25', 'sum_bytes': 3000, 'as_dst': 345},
{'key': '192.213.1.0/25', 'sum_bytes': 2000, 'as_dst': 123},
{'key': '231.168.1.0/25', 'sum_bytes': 1000, 'as_dst': 321},
]
|
f9491:c0:m4
|
def aggregate_per_as(self, start_time, end_time):
|
query = '''<STR_LIT>'''<EOL>return self._execute_query(query, [start_time, end_time])<EOL>
|
Given a time range aggregates bytes per ASNs.
Args:
start_time: A string representing the starting time of the time range
end_time: A string representing the ending time of the time range
Returns:
A list of prefixes sorted by sum_bytes. For example:
[
{'key': '6500', 'sum_bytes': 3000},
{'key': '2310', 'sum_bytes': 2000},
{'key': '8182', 'sum_bytes': 1000},
]
|
f9491:c0:m5
|
def normalize(ext):
|
return re.sub(r'<STR_LIT>', '<STR_LIT>', str(ext).lower())<EOL>
|
Normalise file extensions
|
f9511:m0
|
def unique(seq):
|
seen = set()<EOL>seen_add = seen.add<EOL>return [x for x in seq if x not in seen and not seen_add(x)]<EOL>
|
Removes duplicate elements from a List
|
f9511:m1
|
@staticmethod<EOL><INDENT>def register(template_class, *extensions):<DEDENT>
|
for ext in extensions:<EOL><INDENT>ext = normalize(ext)<EOL>if ext not in Lean.template_mappings:<EOL><INDENT>Lean.template_mappings[ext] = []<EOL><DEDENT>Lean.template_mappings[ext].insert(<NUM_LIT:0>, template_class)<EOL>Lean.template_mappings[ext] = unique(Lean.template_mappings[ext])<EOL><DEDENT>
|
Register a template for a given extension or range of extensions
|
f9511:c0:m0
|
@staticmethod<EOL><INDENT>def prefer(template_class, *extensions):<DEDENT>
|
if len(extensions):<EOL><INDENT>for (ext, klasses) in list(Lean.template_mappings.items()):<EOL><INDENT>if klasses.count(template_class):<EOL><INDENT>Lean.preferred_mappings[ext] = template_class<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>for ext in extensions:<EOL><INDENT>ext = normalize(ext)<EOL>Lean.register(template_class, ext)<EOL>Lean.preferred_mappings[ext] = template_class<EOL><DEDENT><DEDENT>
|
Makes a template class preferred for the given file extensions. If you
don't provide any extensions, it will be preferred for all its already
registered extensions:
# Prefer Markdown for its registered file extensions:
Lean.prefer(MarkdownTemplate)
# Prefer Markdown only for the .md elxtensions:
Lean.prefer(MarkdownTemplate, '.md')
|
f9511:c0:m1
|
@staticmethod<EOL><INDENT>def is_registered(ext):<DEDENT>
|
return ext.lower() in Lean.template_mappings and len(Lean.template_mappings[ext])<EOL>
|
Returns true when a template exists on an exact match of the provided file extension
|
f9511:c0:m2
|
@staticmethod<EOL><INDENT>def load(file, line=None, options={}, block=None):<DEDENT>
|
template_class = Lean.get_template(file)<EOL>if template_class:<EOL><INDENT>return template_class(file, line, options, block)<EOL><DEDENT>else:<EOL><INDENT>raise LookupError(<EOL>'<STR_LIT>' + os.path.basename(file))<EOL><DEDENT>
|
Create a new template for the given file using the file's extension
to determine the the template mapping.
|
f9511:c0:m3
|
@staticmethod<EOL><INDENT>def get_template(file):<DEDENT>
|
pattern = str(file).lower()<EOL>while len(pattern) and not Lean.is_registered(pattern):<EOL><INDENT>pattern = os.path.basename(pattern)<EOL>pattern = re.sub(r'<STR_LIT>', '<STR_LIT>', pattern)<EOL><DEDENT>preferred_klass = Lean.preferred_mappings[pattern] if pattern in Lean.preferred_mappings else None<EOL>if preferred_klass:<EOL><INDENT>return preferred_klass<EOL><DEDENT>klasses = Lean.template_mappings[pattern]<EOL>template = None<EOL>for klass in klasses:<EOL><INDENT>if hasattr(klass, '<STR_LIT>') and callable(klass.is_engine_initialized):<EOL><INDENT>if klass.is_engine_initialized():<EOL><INDENT>template = klass<EOL>break<EOL><DEDENT><DEDENT>if template:<EOL><INDENT>return template<EOL><DEDENT><DEDENT>first_failure = None<EOL>for klass in klasses:<EOL><INDENT>try:<EOL><INDENT>return klass<EOL><DEDENT>except Exception as e:<EOL><INDENT>if not first_failure:<EOL><INDENT>first_failure = e<EOL><DEDENT>if first_failure:<EOL><INDENT>raise Exception(first_failure)<EOL><DEDENT><DEDENT><DEDENT>
|
Lookup a template class for the given filename or file
extension. Return nil when no implementation is found.
|
f9511:c0:m4
|
def __init__(self,file=None,line=<NUM_LIT:1>,options={},block=None):
|
self._file = os.path.abspath(file) if file else None<EOL>self._line = line<EOL>self._options = options<EOL>if not file and not block:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>an instance of this class has been created.<EOL>if not self.is_engine_initialized():<EOL><INDENT>self.initialize_engine()<EOL>self.__class__.engine_initialized = True<EOL><DEDENT>self.compiled_methods = {}<EOL>self.default_encoding = self._options.pop('<STR_LIT>','<STR_LIT>')<EOL>self.reader = block if block else lambda t:io.open(self._file,'<STR_LIT:r>',encoding = self.default_encoding).read()<EOL>self.data = self.reader(self)<EOL>return self.prepare()<EOL>
|
Create a new template with the file, line, and options specified. By
default, template data is read from the file. When a block is given,
it should read template data and return as a String. When file is nil,
a block is required.
All arguments are optional.
|
f9512:c0:m1
|
def render(self,scope=None,local_vars=None,block=None):
|
if not scope:<EOL><INDENT>class Scope(object):<EOL><INDENT>pass<EOL><DEDENT>scope = Scope()<EOL><DEDENT>return self.evaluate(scope,local_vars or {}, block)<EOL>
|
Render the template in the given scope with the locals specified. If a
block is given, it is typically available within the template via
+yield+.
|
f9512:c0:m2
|
def basename(self, suffix='<STR_LIT>'):
|
return os.path.basename(self._file, suffix) if self._file else None<EOL>
|
The basename of the template file.
|
f9512:c0:m3
|
def name(self):
|
return self.basename().split('<STR_LIT:.>', <NUM_LIT:2>)[<NUM_LIT:0>] if self.basename() else None<EOL>
|
The template file's basename with all extensions chomped off.
|
f9512:c0:m4
|
def eval_file(self):
|
return self._file if self._file else '<STR_LIT>'<EOL>
|
The filename used in backtraces to describe the template.
|
f9512:c0:m5
|
def initialize_engine(self):
|
return<EOL>
|
Called once and only once for each template subclass the first time
the template class is initialized. This should be used to require the
underlying template library and perform any initial setup.
|
f9512:c0:m6
|
def prepare(self):
|
raise NotImplementedError<EOL>
|
Do whatever preparation is necessary to setup the underlying template
engine. Called immediately after template data is loaded. Instance
variables set in this method are available when #evaluate is called.
Subclasses must provide an implementation of this method.
|
f9512:c0:m7
|
def evaluate(self,scope,local_vars,block=None):
|
method = self.compiled_method(local_vars.keys())<EOL>setattr(scope ,'<STR_LIT>',method)<EOL>return scope.compiled(local_vars,block=block)<EOL>
|
Execute the compiled template and return the result string. Template
evaluation is guaranteed to be performed in the scope object with the
locals specified and with support for yielding to the block.
This method is only used by source generating templates. Subclasses that
override render() may not support all features.
|
f9512:c0:m8
|
def Vars(*args):
|
return tuple(Var(x) for x in args)<EOL>
|
Convenience function to return a tuple of unboound variables
|
f9519:m0
|
def __init__(self) -> None:
|
<EOL>self.m: int = <NUM_LIT:0><EOL>self.n: int = <NUM_LIT:0><EOL>self.T: int = <NUM_LIT:0><EOL>self.var_names = {}<EOL>
|
Instantiate a fluxion.
INPUTS:
======
m: space R^m that this function maps from
n: space R^n that this function maps to
f is a mapping from R^m to R^n and has an mxn Jacobian
name: the name of this fluxion
|
f9519:c1:m0
|
def shape(self) -> Tuple[int, int]:
|
return (self.m, self.n)<EOL>
|
The shape of this fluxion according to numpy standard
|
f9519:c1:m1
|
def val(self, *args):
|
arg_dicts = self._parse_args_forward_mode(*args)<EOL>val, diff = self._forward_mode(*arg_dicts)<EOL>return val<EOL>
|
Funcation evaluation; abstract base class
|
f9519:c1:m2
|
def diff(self, *args):
|
arg_dicts = self._parse_args_forward_mode(*args)<EOL>val, diff = self._forward_mode(*arg_dicts)<EOL>return diff<EOL>
|
Call forward_mode; discard value, only keep the derivative.
|
f9519:c1:m3
|
def __call__(self, *args):
|
<EOL>arg_dicts = self._parse_args_forward_mode(*args)<EOL>val, diff = self._forward_mode(*arg_dicts)<EOL>return (val, diff)<EOL>
|
Make Fluxion object callable like functions
|
f9519:c1:m4
|
def _forward_mode(self, *args):
|
raise NotImplementedError<EOL>
|
Forward mode differentiation; abstract base class
|
f9519:c1:m5
|
def _parse_args_forward_mode(self, *args) -> Tuple[dict, dict]:
|
<EOL>arg_vars: np.ndarray = {}<EOL>arg_seed: np.ndarray = {}<EOL>argc: int = len(args)<EOL>m: int = self.m<EOL>if argc == <NUM_LIT:0>:<EOL><INDENT>return (None, None)<EOL><DEDENT>if argc == <NUM_LIT:1>:<EOL><INDENT>arg = args[<NUM_LIT:0>]<EOL>if arg is None:<EOL><INDENT>return (None, None)<EOL><DEDENT>elif isinstance(arg, dict):<EOL><INDENT>arg_vars = self._parse_var_dict(arg)<EOL><DEDENT>elif isinstance(arg, scalar_instance_types):<EOL><INDENT>arg_vars = self._parse_var_scalar(arg)<EOL><DEDENT>elif isinstance(arg, np.ndarray):<EOL><INDENT>arg_vars = self._parse_var_array(arg)<EOL><DEDENT>T_vars = self._check_forward_mode_input_dict(arg_vars)<EOL>self.T = T_vars<EOL>return (arg_vars, self._default_seed(arg_vars))<EOL><DEDENT>if argc == <NUM_LIT:2>:<EOL><INDENT>if isinstance(args[<NUM_LIT:0>], dict) and isinstance(args[<NUM_LIT:1>], dict):<EOL><INDENT>arg_vars = self._parse_var_dict(args[<NUM_LIT:0>])<EOL>arg_seed = self._parse_seed_dict(args[<NUM_LIT:1>])<EOL><DEDENT>elif isinstance(args[<NUM_LIT:0>], scalar_instance_types) and isinstance(args[<NUM_LIT:1>], scalar_instance_types):<EOL><INDENT>arg_vars = self._parse_var_scalar(args[<NUM_LIT:0>])<EOL>arg_seed = self._parse_var_scalar(args[<NUM_LIT:1>])<EOL><DEDENT>elif isinstance(args[<NUM_LIT:0>], np.ndarray) and isinstance(args[<NUM_LIT:1>], np.ndarray):<EOL><INDENT>arg_vars = self._parse_var_array(args[<NUM_LIT:0>])<EOL>arg_seed = self._parse_var_array(args[<NUM_LIT:1>])<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(f'<STR_LIT>')<EOL><DEDENT>T_vars = self._check_forward_mode_input_dict(arg_vars)<EOL>T_seed = self._check_forward_mode_input_dict(arg_seed)<EOL>self.T = T_vars<EOL>if T_seed in (<NUM_LIT:1>,T_vars):<EOL><INDENT>return (arg_vars, arg_seed)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(f'<STR_LIT>')<EOL><DEDENT><DEDENT>if argc == <NUM_LIT:2> * m:<EOL><INDENT>X = np.array(args[<NUM_LIT:0>:m],dtype=np.float64)<EOL>dX = np.array(args[m:<NUM_LIT:2>*m],dtype=np.float64)<EOL>return self._parse_args_forward_mode(X, dX)<EOL><DEDENT>msg = f'<STR_LIT>'<EOL>for arg in args:<EOL><INDENT>msg += f'<STR_LIT>'<EOL><DEDENT>raise ValueError(f'<STR_LIT>')<EOL>
|
Parse input arguments used in forward mode differentiation.
End result will be two arrays X and dX, each of shape (n) or (T, n)
Allowed input shapes are:
(1) ARRAY_N: two arrays of size n
(2) ARRAY_TxN: two arrays of size Txn
(3) DICT: two dictionaries, each mapping variable names to values
(4) ARGS: a list of 2n values; n variables followed by n seeds
(5) KWARGS: a kwargs list (currently not supported)
|
f9519:c1:m6
|
def _default_seed(self, var_tbl: dict) -> dict:
|
var_names = {}<EOL>for v in var_tbl:<EOL><INDENT>var_names[v]=<NUM_LIT:1><EOL><DEDENT>return var_names<EOL>
|
Returns inferred dict of variable: val = 1 pairs
|
f9519:c1:m7
|
def _parse_var_dict(self, var_tbl: dict) -> dict:
|
var_names = var_tbl.copy()<EOL>for v in self.var_names:<EOL><INDENT>if v not in var_tbl:<EOL><INDENT>if self.var_names[v] is None:<EOL><INDENT>raise(KeyError("<STR_LIT>" + v + "<STR_LIT>"))<EOL><DEDENT>else:<EOL><INDENT>var_names[v] = self.var_names[v]<EOL><DEDENT><DEDENT><DEDENT>return self._squeeze(var_names)<EOL>
|
Create an extended list of variable names including any pre-bound ones
Throws an error if a variable is unbound
Returns inferred dict of variable: val pairs
|
f9519:c1:m8
|
def _parse_seed_dict(self, var_tbl: dict) -> dict:
|
var_names = var_tbl.copy()<EOL>for v in self.var_names:<EOL><INDENT>if v not in var_tbl:<EOL><INDENT>raise(KeyError("<STR_LIT>" + v + "<STR_LIT>"))<EOL><DEDENT><DEDENT>return self._squeeze(var_names)<EOL>
|
Create an extended list of variable names including any pre-bound ones
Throws an error if a variable is unbound
Returns inferred dict of variable: val pairs
|
f9519:c1:m9
|
def _squeeze(self, var_tbl: dict) -> dict:
|
var_names = var_tbl.copy()<EOL>for v in var_tbl:<EOL><INDENT>val = var_tbl[v]<EOL>if isinstance(val, np.ndarray):<EOL><INDENT>var_names[v] = val.squeeze()<EOL><DEDENT><DEDENT>return var_names<EOL>
|
Makes sure no extra dimensions are floating around in the input arrays
Returns inferred dict of variable: val pairs
|
f9519:c1:m10
|
def _parse_var_scalar(self, X: scalar_type) -> dict:
|
arg_vars = {}<EOL>for var_name in self.var_names:<EOL><INDENT>arg_vars[var_name] = X<EOL><DEDENT>return arg_vars<EOL>
|
Unpack the numpy array and bind each column to one of the variables in self.var_names
Returns inferred dict of variable: val pairs
|
f9519:c1:m11
|
def _parse_var_array(self, X: np.ndarray) -> dict:
|
arg_vars = {}<EOL>shape = X.shape<EOL>tensor_rank = len(shape)<EOL>T = self._check_forward_mode_input_array(X)<EOL>if tensor_rank == <NUM_LIT:0>:<EOL><INDENT>for var_name in self.var_names:<EOL><INDENT>arg_vars[var_name] = X<EOL><DEDENT><DEDENT>if tensor_rank == <NUM_LIT:1> and T == shape[<NUM_LIT:0>]:<EOL><INDENT>for var_name in self.var_names:<EOL><INDENT>arg_vars[var_name] = X.squeeze()<EOL><DEDENT><DEDENT>elif tensor_rank == <NUM_LIT:1>:<EOL><INDENT>for j, var_name in enumerate(sorted(self.var_names)):<EOL><INDENT>arg_vars[var_name] = X[j].squeeze()<EOL><DEDENT><DEDENT>elif tensor_rank == <NUM_LIT:2>:<EOL><INDENT>for j, var_name in enumerate(sorted(self.var_names)):<EOL><INDENT>arg_vars[var_name] = X[:,j].squeeze()<EOL><DEDENT><DEDENT>return arg_vars<EOL>
|
Unpack the numpy array and bind each column to one of the variables in self.var_names
Returns inferred dict of variable: val pairs
|
f9519:c1:m12
|
def _check_forward_mode_input_dict(self, var_tbl: dict) -> int:
|
T: int = <NUM_LIT:1><EOL>for var_name in var_tbl:<EOL><INDENT>val = var_tbl[var_name]<EOL>if isinstance(val, scalar_instance_types):<EOL><INDENT>t = <NUM_LIT:1><EOL><DEDENT>elif isinstance(val, np.ndarray):<EOL><INDENT>t = self._calc_T_var(val)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(f'<STR_LIT>')<EOL><DEDENT>if t > <NUM_LIT:1> and T == <NUM_LIT:1>:<EOL><INDENT>T = t<EOL><DEDENT>elif t not in (<NUM_LIT:1>,T):<EOL><INDENT>raise ValueError(f'<STR_LIT>')<EOL><DEDENT><DEDENT>return T<EOL>
|
Check whether one forward mode input dict has elements of valid shape
Returns inferred value of T
|
f9519:c1:m13
|
def _check_forward_mode_input_array(self, X: np.ndarray) -> int:
|
<EOL>if not isinstance(X, np.ndarray):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>shape = X.shape<EOL>tensor_rank = len(shape)<EOL>T = <NUM_LIT:0><EOL>if tensor_rank not in (<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:2>):<EOL><INDENT>raise ValueError(f'<STR_LIT>') <EOL><DEDENT>if tensor_rank == <NUM_LIT:0>:<EOL><INDENT>T = <NUM_LIT:1><EOL><DEDENT>if tensor_rank == <NUM_LIT:1> and (shape[<NUM_LIT:0>] != self.m) and self.m != <NUM_LIT:1>:<EOL><INDENT>raise ValueError(f'<STR_LIT>')<EOL><DEDENT>if tensor_rank == <NUM_LIT:1> and shape[<NUM_LIT:0>] == self.m:<EOL><INDENT>T = <NUM_LIT:1><EOL><DEDENT>if tensor_rank == <NUM_LIT:1> and self.m == <NUM_LIT:1>:<EOL><INDENT>T = shape[<NUM_LIT:0>]<EOL><DEDENT>if tensor_rank == <NUM_LIT:2> and (shape[<NUM_LIT:1>] != self.m):<EOL><INDENT>raise ValueError(f'<STR_LIT>')<EOL><DEDENT>if tensor_rank == <NUM_LIT:2>:<EOL><INDENT>T = shape[<NUM_LIT:0>]<EOL><DEDENT>return T<EOL>
|
Check whether one forward mode input array is of valid shape
Returns inferred value of T
|
f9519:c1:m14
|
def _calc_T_var(self,X) -> int:
|
shape = X.shape<EOL>tensor_rank: int = len(shape)<EOL>if tensor_rank == <NUM_LIT:0>:<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT>if tensor_rank == <NUM_LIT:1>:<EOL><INDENT>return shape[<NUM_LIT:0>]<EOL><DEDENT>if tensor_rank == <NUM_LIT:2>:<EOL><INDENT>if shape[<NUM_LIT:1>] > <NUM_LIT:1>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>return shape[<NUM_LIT:0>]<EOL><DEDENT>
|
Calculate the number of samples, T, from the shape of X
|
f9519:c1:m15
|
def _forward_mode(self, *args):
|
<EOL>val: float = self.a<EOL>diff: float = <NUM_LIT:0.0><EOL>return (val, diff)<EOL>
|
Forward mode differentiation for a constant
|
f9519:c2:m1
|
def _forward_mode(self, *args):
|
<EOL>X: np.ndarray<EOL>dX: np.ndarray<EOL>X, dX = self.f._forward_mode(*args)<EOL>p: float = self.p<EOL>val = X ** p<EOL>diff = p * X ** (p-<NUM_LIT:1>) * dX<EOL>return (val, diff)<EOL>
|
Forward mode differentiation for a constant
|
f9519:c4:m1
|
def _forward_mode(self, *args):
|
<EOL>f_val, f_diff = self.f._forward_mode(*args)<EOL>g_val, g_diff = self.g._forward_mode(*args)<EOL>val = f_val + g_val<EOL>diff = f_diff + g_diff<EOL>return val, diff<EOL>
|
Forward mode differentiation for a sum
|
f9519:c6:m1
|
def _forward_mode(self, *args):
|
<EOL>f_val, f_diff = self.f._forward_mode(*args)<EOL>g_val, g_diff = self.g._forward_mode(*args)<EOL>val = f_val - g_val<EOL>diff = f_diff - g_diff<EOL>return val, diff<EOL>
|
Forward mode differentiation for a difference
|
f9519:c7:m1
|
def _forward_mode(self, *args):
|
<EOL>f_val, f_diff = self.f._forward_mode(*args)<EOL>g_val, g_diff = self.g._forward_mode(*args)<EOL>val = f_val * g_val<EOL>return (val, f_val * g_diff + f_diff * g_val)<EOL>
|
Forward mode differentiation for a product
|
f9519:c8:m1
|
def __init__(self, var_name: str, initial_value: Optional[np.ndarray]=None):
|
<EOL>self.m = <NUM_LIT:1><EOL>self.n = <NUM_LIT:1><EOL>if initial_value is not None:<EOL><INDENT>X = initial_value<EOL>if not isinstance(X, value_instance_types):<EOL><INDENT>raise ValueError(f'<STR_LIT>')<EOL><DEDENT>if isinstance(X, scalar_instance_types):<EOL><INDENT>X = np.array(float(X))<EOL>self.X = X<EOL>self.T = <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>self.X = X<EOL>self.T = self._calc_T_var(X)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self.X = None<EOL>self.T = <NUM_LIT:0><EOL><DEDENT>self.var_name = var_name<EOL>self.var_names = {var_name: self.X}<EOL>
|
Variables must be instantiated with a name; binding an initial value is optional
|
f9519:c10:m0
|
def _forward_mode(self, *args):
|
<EOL>X: np.ndarray<EOL>dX: np.ndarray<EOL>X, dX = self._parse_dicts(*args)<EOL>if X is not None:<EOL><INDENT>val = X<EOL><DEDENT>else:<EOL><INDENT>val = self.X<EOL><DEDENT>if dX is not None:<EOL><INDENT>diff = dX<EOL><DEDENT>else:<EOL><INDENT>diff = np.ones_like(val)<EOL><DEDENT>return (val, diff)<EOL>
|
Forward mode differentiation for variables
|
f9519:c10:m1
|
def _parse_dicts(self, *args) -> Tuple[np.ndarray, np.ndarray]:
|
<EOL>X: np.ndarray<EOL>dX: np.ndarray<EOL>argc: int = len(args)<EOL>if argc == <NUM_LIT:2>:<EOL><INDENT>arg_vars = args[<NUM_LIT:0>]<EOL>arg_seed = args[<NUM_LIT:1>]<EOL>if arg_vars is None:<EOL><INDENT>return (None,None)<EOL><DEDENT>X = self._parse_vars_tbl(arg_vars)<EOL>dX = self._parse_seed_tbl(arg_seed)<EOL>if X.shape[<NUM_LIT:0>] > <NUM_LIT:1> and dX.shape[<NUM_LIT:0>] == <NUM_LIT:1>:<EOL><INDENT>dX = np.tile(dX, (X.shape[<NUM_LIT:0>], <NUM_LIT:1>))<EOL><DEDENT>return (X,dX)<EOL><DEDENT>msg = f'<STR_LIT>'<EOL>for arg in args:<EOL><INDENT>msg += f'<STR_LIT>'<EOL><DEDENT>raise ValueError(f'<STR_LIT>')<EOL>
|
Parse input arguments used in function evaluation.
End result will be two arrays X of shape (m) or (T, m)
Allowed input shapes are:
(1) DICT: dictionary mapping variable names to values
(2) ARGS: 2 dictionaries mapping variable names to values
|
f9519:c10:m2
|
def _parse_vars_tbl(self, var_tbl):
|
<EOL>T = self._check_forward_mode_input_dict(var_tbl)<EOL>shape = (T, <NUM_LIT:1>)<EOL>X = np.zeros(shape)<EOL>X[:,<NUM_LIT:0>] = var_tbl[self.var_name]<EOL>return X<EOL>
|
Parse a table of variable bindings (dictionary with key = variable name)
|
f9519:c10:m3
|
def _parse_seed_tbl(self, var_tbl):
|
m: int = len(var_tbl)<EOL>T = self._check_forward_mode_input_dict(var_tbl)<EOL>shape = (T, m)<EOL>X = np.zeros(shape)<EOL>for j, var_name in enumerate(sorted(var_tbl)):<EOL><INDENT>if var_name in self.var_names:<EOL><INDENT>X[:,j] = var_tbl[var_name]<EOL><DEDENT><DEDENT>return X<EOL>
|
Parse a table of variable bindings (dictionary with key = variable name)
|
f9519:c10:m4
|
def jacobian(f, v, v_mapping):
|
<EOL>if isinstance(f, Fluxion):<EOL><INDENT>f = [f]<EOL><DEDENT>f = np.asarray(f)<EOL>v = np.asarray(v)<EOL>v_mapping = _check_input_vals(v_mapping)<EOL>m = len(v)<EOL>n = len(f)<EOL>T = len(list(v_mapping.values())[<NUM_LIT:0>]) <EOL>J = np.zeros((m,n,T))<EOL>for i, f_i in enumerate(f):<EOL><INDENT>seed = dict.fromkeys(v, <NUM_LIT:1>)<EOL>dfi_dvj = f_i.diff(v_mapping, seed)<EOL>J[:,i,:] = dfi_dvj.T<EOL><DEDENT>return J.squeeze()<EOL>
|
f: single fluxion object or an array or list of fluxions, representing a scalar or vector function
v: vector of variables in f with respect to which the Jacobian will be calculated
v_mapping: dict mapping variables in f to scalar or vector of values
|
f9520:m1
|
def report_success():
|
test_name = sys._getframe(<NUM_LIT:1>).f_code.co_name<EOL>print(f'<STR_LIT>')<EOL>
|
Report that a test was successful
|
f9522:m0
|
def report_success():
|
test_name = sys._getframe(<NUM_LIT:1>).f_code.co_name<EOL>print(f'<STR_LIT>')<EOL>
|
Report that a test was successful
|
f9523:m0
|
def _minus_sin(x):
|
return -np.sin(x)<EOL>
|
Return -sin(x); for the derivative of cos(x)
|
f9525:m0
|
def _sec2(x):
|
cx = np.cos(x)<EOL>return <NUM_LIT:1.0> / (cx * cx)<EOL>
|
Return sec^2(x); for the derivative of tan(x)
|
f9525:m1
|
def _deriv_arcsin(x):
|
return <NUM_LIT:1.0> / np.sqrt(<NUM_LIT:1.0> - x*x)<EOL>
|
The derivative of arcsin(x)
|
f9525:m2
|
def _deriv_arccos(x):
|
return -<NUM_LIT:1.0> / np.sqrt(<NUM_LIT:1.0> - x*x)<EOL>
|
The derivative of arccos(x)
|
f9525:m3
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.