code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
# Default implementation data = self.pack(namedstruct) return stream.write(data)
def packto(self, namedstruct, stream)
Pack a struct to a stream :param namedstruct: struct to pack :param stream: a buffered stream :return: appended bytes size
6.603942
9.936929
0.664586
''' Unpack the struct from specified bytes. If the struct is sub-classed, definitions from the sub type is not unpacked. :param data: bytes of the struct, including fields of sub type and "extra" data. :param namedstruct: a NamedStruct object of this type :returns: unused bytes from data, which forms data of the sub type and "extra" data. ''' try: result = self.struct.unpack(data[0:self.struct.size]) except struct.error as exc: raise BadFormatError(exc) start = 0 t = namedstruct._target for p in self.properties: if len(p) > 1: if isinstance(result[start], bytes): v = [r.rstrip(b'\x00') for r in result[start:start + p[1]]] else: v = list(result[start:start + p[1]]) start += p[1] else: v = result[start] if isinstance(v, bytes): v = v.rstrip(b'\x00') start += 1 setin = t for sp in p[0][0:-1]: if not hasattr(setin, sp): setin2 = InlineStruct(namedstruct._target) setattr(setin, sp, setin2) setin = setin2 else: setin = getattr(setin, sp) setattr(setin, p[0][-1], v) return data[self.struct.size:]
def unpack(self, data, namedstruct)
Unpack the struct from specified bytes. If the struct is sub-classed, definitions from the sub type is not unpacked. :param data: bytes of the struct, including fields of sub type and "extra" data. :param namedstruct: a NamedStruct object of this type :returns: unused bytes from data, which forms data of the sub type and "extra" data.
3.801316
2.15451
1.764353
''' Pack the struct and return the packed bytes. :param namedstruct: a NamedStruct of this type. :returns: packed bytes, only contains fields of definitions in this type, not the sub type and "extra" data. ''' elements = [] t = namedstruct._target for p in self.properties: v = t for sp in p[0]: v = getattr(v, sp) if len(p) > 1: elements.extend(v[0:p[1]]) else: elements.append(v) return self.struct.pack(*elements)
def pack(self, namedstruct)
Pack the struct and return the packed bytes. :param namedstruct: a NamedStruct of this type. :returns: packed bytes, only contains fields of definitions in this type, not the sub type and "extra" data.
6.180995
2.703819
2.286024
''' Compatible to Parser.parse() ''' if len(buffer) < self.struct.size: return None try: return (self.struct.unpack(buffer[:self.struct.size])[0], self.struct.size) except struct.error as exc: raise BadFormatError(exc)
def parse(self, buffer, inlineparent = None)
Compatible to Parser.parse()
4.808735
3.63216
1.323932
''' Compatible to Parser.create() ''' try: return self.struct.unpack(data)[0] except struct.error as exc: raise BadFormatError(exc)
def create(self, data, inlineparent = None)
Compatible to Parser.create()
9.676879
5.505748
1.757596
''' Compatible to Parser.parse() ''' size = 0 v = [] for i in range(0, self.size): # @UnusedVariable r = self.innerparser.parse(buffer[size:], None) if r is None: return None v.append(r[0]) size += r[1] return (v, size)
def parse(self, buffer, inlineparent = None)
Compatible to Parser.parse()
4.722564
3.66607
1.288182
''' Compatible to Parser.new() ''' v = list(range(0, self.size)) for i in range(0, self.size): v[i] = self.innerparser.new() return v
def new(self, inlineparent = None)
Compatible to Parser.new()
7.384466
4.388314
1.682757
''' Compatible to Parser.create() ''' if self.size > 0: r = self.parse(data) if r is None: raise ParseError('data is not enough to create an array of size ' + self.size) else: return r[0] else: v = [] start = 0 while start < len(data): r = self.innerparser.parse(data[start:], None) if r is None: break v.append(r[0]) start += r[1] return v
def create(self, data, inlineparent = None)
Compatible to Parser.create()
4.001372
3.264997
1.225536
''' Compatible to Parser.sizeof() ''' size = 0 arraysize = self.size if arraysize == 0: arraysize = len(prim) for i in range(0, arraysize): if i >= len(prim): tp = self.innerparser.new() if hasattr(self.innerparser, 'fullparse'): self.innerparser.fullparse(tp) size += self.innerparser.paddingsize(tp) else: size += self.innerparser.paddingsize(prim[i]) return size
def sizeof(self, prim)
Compatible to Parser.sizeof()
4.612371
3.62183
1.273492
''' Compatible to Parser.tobytes() ''' stream = BytesIO() self.tostream(prim, stream, skipprepack=skipprepack) return stream.getvalue()
def tobytes(self, prim, skipprepack = False)
Compatible to Parser.tobytes()
5.511955
3.294816
1.672917
''' Compatible to Parser.create() ''' if self.cstr: return _copy(data).rstrip(b'\x00') else: return _copy(data)
def create(self, data, inlineparent = None)
Compatible to Parser.create()
12.502045
7.073442
1.767463
''' Get parser for this type. Create the parser on first call. ''' if not hasattr(self, '_parser'): self._parser = self._compile() return self._parser
def parser(self)
Get parser for this type. Create the parser on first call.
7.110573
3.206828
2.217323
''' Create a new object of this type. It is also available as __call__, so you can create a new object just like creating a class instance: a = mytype(a=1,b=2) :param args: Replace the embedded struct type. Each argument is a tuple (name, newtype). It is equivalent to call _replace_embedded_type with *name* and *newtype* one by one. Both the "directly" embedded struct and the embedded struct inside another embedded struct can be set. If you want to replace an embedded struct in a replaced struct type, make sure the outer struct is replaced first. The embeded struct type must have a *name* to be replaced by specify *name* option. :param kwargs: extra key-value arguments, each one will be set on the new object, to set value to the fields conveniently. :returns: a new object, with the specified "kwargs" set. ''' obj = self.parser().new() for k,v in args: obj._replace_embedded_type(k,v) for k,v in kwargs.items(): setattr(obj, k, v) return obj
def new(self, *args, **kwargs)
Create a new object of this type. It is also available as __call__, so you can create a new object just like creating a class instance: a = mytype(a=1,b=2) :param args: Replace the embedded struct type. Each argument is a tuple (name, newtype). It is equivalent to call _replace_embedded_type with *name* and *newtype* one by one. Both the "directly" embedded struct and the embedded struct inside another embedded struct can be set. If you want to replace an embedded struct in a replaced struct type, make sure the outer struct is replaced first. The embeded struct type must have a *name* to be replaced by specify *name* option. :param kwargs: extra key-value arguments, each one will be set on the new object, to set value to the fields conveniently. :returns: a new object, with the specified "kwargs" set.
9.496607
1.323706
7.174255
''' Get the enumerate name of a specified value. :param value: the enumerate value :param defaultName: returns if the enumerate value is not defined :returns: the corresponding enumerate value or *defaultName* if not found ''' for k,v in self._values.items(): if v == value: return k return defaultName
def getName(self, value, defaultName = None)
Get the enumerate name of a specified value. :param value: the enumerate value :param defaultName: returns if the enumerate value is not defined :returns: the corresponding enumerate value or *defaultName* if not found
5.333311
2.003679
2.661759
''' Import all the enumerate values from this enumerate to *gs* :param gs: usually globals(), a dictionary. At lease __setitem__ should be implemented if not a dictionary. ''' for k,v in self._values.items(): gs[k] = v
def importAll(self, gs)
Import all the enumerate values from this enumerate to *gs* :param gs: usually globals(), a dictionary. At lease __setitem__ should be implemented if not a dictionary.
14.760615
2.136389
6.909142
''' Create a new enumerate with current values merged with new enumerate values :param namespace: same as __init__ :param name: same as __init__ :param kwargs: same as __init__ :returns: a new enumerate type ''' if name is None: name = self._readablename d = dict(self._values) d.update(kwargs) return enum(name, namespace, self, self._bitwise, **d)
def extend(self, namespace = None, name = None, **kwargs)
Create a new enumerate with current values merged with new enumerate values :param namespace: same as __init__ :param name: same as __init__ :param kwargs: same as __init__ :returns: a new enumerate type
6.36284
3.087122
2.061091
''' Format a enumerate value to enumerate names if possible. Used to generate human readable dump result. ''' if not self._bitwise: n = self.getName(value) if n is None: return value else: return n else: names = [] for k,v in sorted(self._values.items(), key=lambda x: x[1], reverse=True): if (v & value) == v: names.append(k) value = value ^ v names.reverse() if value != 0: names.append(hex(value)) if not names: return 0 return ' '.join(names)
def formatter(self, value)
Format a enumerate value to enumerate names if possible. Used to generate human readable dump result.
4.664517
2.710961
1.720614
if hasattr(namedstruct, self.name): return _tostream(self.basetypeparser, getattr(namedstruct, self.name), stream, True) else: return 0
def packto(self, namedstruct, stream)
Pack a struct to a stream
7.416031
7.765532
0.954993
''' Run prepack ''' if not skip_sub and hasattr(namedstruct, self.name) and hasattr(self.basetypeparser, 'fullprepack'): self.basetypeparser.fullprepack(getattr(namedstruct, self.name)) Parser.prepack(self, namedstruct, skip_self, skip_sub)
def prepack(self, namedstruct, skip_self=False, skip_sub=False)
Run prepack
4.442608
4.136995
1.073873
''' Run prepack ''' if not skip_sub and hasattr(self.innertypeparser, 'fullprepack'): for v in getattr(namedstruct, self.name): self.innertypeparser.fullprepack(v) Parser.prepack(self, namedstruct, skip_self, skip_sub)
def prepack(self, namedstruct, skip_self=False, skip_sub=False)
Run prepack
5.686546
5.390869
1.054848
''' Run prepack ''' if not skip_sub and self.header is not None and hasattr(self.header, 'fullprepack'): self.header.fullprepack(namedstruct._seqs[0]) Parser.prepack(self, namedstruct, skip_self, skip_sub)
def prepack(self, namedstruct, skip_self=False, skip_sub=False)
Run prepack
5.415241
5.01015
1.080854
r results_error = 'No results were found matching your query' auth_error = 'The token or API key is not valid, please contact Josh Clark at joshua.m.clark@utah.edu to ' \ 'resolve this' rule_error = 'This request violates a rule of the API. Please check the guidelines for formatting a data ' \ 'request and try again' catch_error = 'Something went wrong. Check all your calls and try again' if response['SUMMARY']['RESPONSE_CODE'] == 1: return response elif response['SUMMARY']['RESPONSE_CODE'] == 2: if response['SUMMARY']['NUMBER_OF_OBJECTS'] == 0: return None raise MesoPyError(results_error) elif response['SUMMARY']['RESPONSE_CODE'] == 200: raise MesoPyError(auth_error) elif response['SUMMARY']['RESPONSE_CODE'] == 400: raise MesoPyError(rule_error) elif response['SUMMARY']['RESPONSE_CODE'] == -1: format_error = response['SUMMARY']['RESPONSE_MESSAGE'] raise MesoPyError(format_error) else: raise MesoPyError(catch_error)
def _checkresponse(response)
r""" Returns the data requested by the other methods assuming the response from the API is ok. If not, provides error handling for all possible API errors. HTTP errors are handled in the get_response() function. Arguments: ---------- None. Returns: -------- The response from the API as a dictionary if the API code is 2. Raises: ------- MesoPyError: Gives different response messages depending on returned code from API. If the response is 2, resultsError is displayed. For a response of 200, an authError message is shown. A ruleError is displayed if the code is 400, a formatError for -1, and catchError for any other invalid response.
4.558412
3.590274
1.269656
http_error = 'Could not connect to the API. This could be because you have no internet connection, a parameter' \ ' was input incorrectly, or the API is currently down. Please try again.' json_error = 'Could not retrieve JSON values. Try again with a shorter date range.' # For python 3.4 try: qsp = urllib.parse.urlencode(request_dict, doseq=True) resp = urllib.request.urlopen(self.base_url + endpoint + '?' + qsp).read() # For python 2.7 except AttributeError or NameError: try: qsp = urllib.urlencode(request_dict, doseq=True) resp = urllib2.urlopen(self.base_url + endpoint + '?' + qsp).read() except urllib2.URLError: raise MesoPyError(http_error) except urllib.error.URLError: raise MesoPyError(http_error) try: json_data = json.loads(resp.decode('utf-8')) except ValueError: raise MesoPyError(json_error) return self._checkresponse(json_data)
def _get_response(self, endpoint, request_dict)
Returns a dictionary of data requested by each function. Arguments: ---------- endpoint: string, mandatory Set in all other methods, this is the API endpoint specific to each function. request_dict: string, mandatory A dictionary of parameters that are formatted into the API call. Returns: -------- response: A dictionary that has been dumped from JSON. Raises: ------- MesoPyError: Overrides the exceptions given in the requests library to give more custom error messages. Connection_error occurs if no internet connection exists. Timeout_error occurs if the request takes too long and redirect_error is shown if the url is formatted incorrectly.
3.280096
3.123489
1.050138
r geo_func = lambda a, b: any(i in b for i in a) check = geo_func(self.geo_criteria, arg_list) if check is False: raise MesoPyError('No stations or geographic search criteria specified. Please provide one of the ' 'following: stid, state, county, country, radius, bbox, cwa, nwsfirezone, gacc, subgacc')
def _check_geo_param(self, arg_list)
r""" Checks each function call to make sure that the user has provided at least one of the following geographic parameters: 'stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa', 'nwsfirezone', 'gacc', or 'subgacc'. Arguments: ---------- arg_list: list, mandatory A list of kwargs from other functions. Returns: -------- None. Raises: ------- MesoPyError if no geographic search criteria is provided.
12.347051
4.13645
2.984939
count = self.count() if count: return self.sum(key) / count
def avg(self, key=None)
Get the average value of a given key. :param key: The key to get the average for :type key: mixed :rtype: float or int
6.058104
8.157946
0.742602
chunks = self._chunk(size) return self.__class__(list(map(self.__class__, chunks)))
def chunk(self, size)
Chunk the underlying collection. :param size: The chunk size :type size: int :rtype: Collection
7.151961
9.60536
0.74458
items = self.items return [items[i:i + size] for i in range(0, len(items), size)]
def _chunk(self, size)
Chunk the underlying collection. :param size: The chunk size :type size: int :rtype: Collection
3.484296
4.040767
0.862286
if value is not None: return self.contains(lambda x: data_get(x, key) == value) if self._use_as_callable(key): return self.first(key) is not None return key in self.items
def contains(self, key, value=None)
Determine if an element is in the collection :param key: The element :type key: int or str or callable :param value: The value of the element :type value: mixed :return: Whether the element is in the collection :rtype: bool
6.286368
6.615718
0.950217
results = [] items = self.items for values in items: if isinstance(values, BaseCollection): values = values.all() results += values return self.__class__(results)
def collapse(self)
Collapse the collection items into a single element (list) :return: A new Collection instance with collapsed items :rtype: Collection
6.678391
5.297978
1.260554
return self.__class__([i for i in self.items if i not in items])
def diff(self, items)
Diff the collections with the given items :param items: The items to diff with :type items: mixed :return: A Collection instance :rtype: Collection
5.019434
7.518273
0.667631
items = self.items for item in items: if callback(item) is False: break return self
def each(self, callback)
Execute a callback over each item. .. code:: collection = Collection([1, 2, 3]) collection.each(lambda x: x + 3) .. warning:: It only applies the callback but does not modify the collection's items. Use the `transform() <#backpack.Collection.transform>`_ method to modify the collection. :param callback: The callback to execute :type callback: callable :rtype: Collection
4.528707
9.684708
0.467614
new = [] for position, item in enumerate(self.items): if position % step == offset: new.append(item) return self.__class__(new)
def every(self, step, offset=0)
Create a new collection consisting of every n-th element. :param step: The step size :type step: int :param offset: The start offset :type offset: int :rtype: Collection
3.754689
4.428864
0.847777
items = copy(self.items) keys = reversed(sorted(keys)) for key in keys: del items[key] return self.__class__(items)
def without(self, *keys)
Get all items except for those with the specified keys. :param keys: The keys to remove :type keys: tuple :rtype: Collection
4.257104
5.693091
0.747767
items = [] for key, value in enumerate(self.items): if key in keys: items.append(value) return self.__class__(items)
def only(self, *keys)
Get the items with the specified keys. :param keys: The keys to keep :type keys: tuple :rtype: Collection
3.758117
4.740165
0.792824
if callback: return self.__class__(list(filter(callback, self.items))) return self.__class__(list(filter(None, self.items)))
def filter(self, callback=None)
Run a filter over each of the items. :param callback: The filter callback :type callback: callable or None :rtype: Collection
3.332628
3.820971
0.872194
return self.filter(lambda item: data_get(item, key) == value)
def where(self, key, value)
Filter items by the given key value pair. :param key: The key to filter by :type key: str :param value: The value to filter by :type value: mixed :rtype: Collection
7.397999
9.008998
0.821179
if callback is not None: for val in self.items: if callback(val): return val return value(default) if len(self.items) > 0: return self.items[0] else: return default
def first(self, callback=None, default=None)
Get the first item of the collection. :param default: The default value :type default: mixed
3.145687
3.226602
0.974923
def _flatten(d): if isinstance(d, dict): for v in d.values(): for nested_v in _flatten(v): yield nested_v elif isinstance(d, list): for list_v in d: for nested_v in _flatten(list_v): yield nested_v else: yield d return self.__class__(list(_flatten(self.items)))
def flatten(self)
Get a flattened list of the items in the collection. :rtype: Collection
2.563773
2.507762
1.022335
keys = reversed(sorted(keys)) for key in keys: del self[key] return self
def forget(self, *keys)
Remove an item from the collection by key. :param keys: The keys to remove :type keys: tuple :rtype: Collection
5.844242
8.930862
0.654387
try: return self.items[key] except IndexError: return value(default)
def get(self, key, default=None)
Get an element of the collection. :param key: The index of the element :type key: mixed :param default: The default value to return :type default: mixed :rtype: mixed
8.443874
8.064325
1.047065
first = self.first() if not isinstance(first, (basestring)): return glue.join(self.pluck(value).all()) return value.join(self.items)
def implode(self, value, glue='')
Concatenate values of a given key as a string. :param value: The value :type value: str :param glue: The glue :type glue: str :rtype: str
10.015759
9.582474
1.045216
if callback is not None: for val in reversed(self.items): if callback(val): return val return value(default) if len(self.items) > 0: return self.items[-1] else: return default
def last(self, callback=None, default=None)
Get the last item of the collection. :param default: The default value :type default: mixed
3.185696
3.302162
0.96473
if key: return dict(map(lambda x: (data_get(x, key), data_get(x, value)), self.items)) else: results = list(map(lambda x: data_get(x, value), self.items)) return self.__class__(results)
def pluck(self, value, key=None)
Get a list with the values of a given key. :rtype: Collection
3.309126
3.709026
0.892182
return self.__class__(list(map(callback, self.items)))
def map(self, callback)
Run a map over each of the item. :param callback: The map function :type callback: callable :rtype: Collection
7.116045
11.715284
0.607415
def _max(result, item): val = data_get(item, key) if result is None or val > result: return value return result return self.reduce(_max)
def max(self, key=None)
Get the max value of a given key. :param key: The key :type key: str or None :rtype: mixed
7.200907
6.965945
1.03373
def _min(result, item): val = data_get(item, key) if result is None or val < result: return value return result return self.reduce(_min)
def min(self, key=None)
Get the min value of a given key. :param key: The key :type key: str or None :rtype: mixed
7.514151
7.199533
1.0437
start = (page - 1) * per_page return self[start:start + per_page]
def for_page(self, page, per_page)
"Paginate" the collection by slicing it into a smaller collection. :param page: The current page :type page: int :param per_page: Number of items by slice :type per_page: int :rtype: Collection
3.69307
5.780058
0.638933
val = self.get(key, default) self.forget(key) return val
def pull(self, key, default=None)
Pulls an item from the collection. :param key: The key :type key: mixed :param default: The default value :type default: mixed :rtype: mixed
7.520689
10.789847
0.697015
if self._use_as_callable(callback): return self.filter(lambda item: not callback(item)) return self.filter(lambda item: item != callback)
def reject(self, callback)
Create a collection of all elements that do not pass a given truth test. :param callback: The truth test :type callback: callable :rtype: Collection
5.331324
5.653095
0.943081
items = self.items if callback: return self.__class__(sorted(items, key=callback)) else: return self.__class__(sorted(items))
def sort(self, callback=None)
Sort through each item with a callback. :param callback: The callback :type callback: callable or None :rtype: Collection
3.577066
4.547804
0.786548
if callback is None: return sum(self.items) callback = self._value_retriever(callback) return self.reduce(lambda result, item: (result or 0) + callback(item))
def sum(self, callback=None)
Get the sum of the given values. :param callback: The callback :type callback: callable or string or None :rtype: mixed
5.428282
5.421427
1.001264
if key is None: seen = set() seen_add = seen.add return self.__class__([x for x in self.items if not (x in seen or seen_add(x))]) key = self._value_retriever(key) exists = [] def _check(item): id_ = key(item) if id_ in exists: return True exists.append(id_) return self.reject(_check)
def unique(self, key=None)
Return only unique items from the collection list. :param key: The key to chech uniqueness on :type key: mixed :rtype: Collection
4.132912
4.080786
1.012773
return self.__class__(list(zip(self.items, *items)))
def zip(self, *items)
Zip the collection together with one or more arrays. :param items: The items to zip :type items: list :rtype: Collection
5.583055
9.277632
0.601776
if isinstance(items, BaseCollection): items = items.all() if not isinstance(items, list): raise ValueError('Unable to merge uncompatible types') self._items += items return self
def merge(self, items)
Merge the collection with the given items. :param items: The items to merge :type items: list or Collection :rtype: Collection
5.400376
5.527461
0.977008
self._items = self.map(callback).all() return self
def transform(self, callback)
Transform each item in the collection using a callback. :param callback: The callback :type callback: callable :rtype: Collection
20.710165
22.366854
0.925931
if self._use_as_callable(value): return value return lambda item: data_get(item, value)
def _value_retriever(self, value)
Get a value retrieving callback. :type value: mixed :rtype: callable
12.84531
12.264318
1.047373
def _serialize(value): if hasattr(value, 'serialize'): return value.serialize() elif hasattr(value, 'to_dict'): return value.to_dict() else: return value return list(map(_serialize, self.items))
def serialize(self)
Get the collection of items as a serialized object (ready to be json encoded). :rtype: dict or list
3.112595
2.613029
1.191182
# Schemes validation interface if is_scheme(self.validator): params = getcallargs(self.function, *args, **kwargs) params.update(kwargs) validator = self.validator(data=params, request=None) if validator.is_valid(): return raise self.exception(validator.errors) # Simple validation interface if hasattr(self.validator, 'is_valid'): validator = self.validator(*args, **kwargs) # is valid if validator.is_valid(): return # is invalid if hasattr(validator, 'errors'): raise self.exception(validator.errors) if hasattr(validator, '_errors'): raise self.exception(validator._errors) raise self.exception validation_result = self.validator(*args, **kwargs) # is invalid (validator return error message) if isinstance(validation_result, string_types): raise self.exception(validation_result) # is valid (truely result) if validation_result: return # is invalid (falsy result) raise self.exception
def validate(self, *args, **kwargs)
Step 4 (6 for invariant). Process contract (validator)
3.194005
3.112803
1.026087
self.validate(*args, **kwargs) return self.function(*args, **kwargs)
def patched_function(self, *args, **kwargs)
Step 3. Wrapped function calling.
4.148136
3.978823
1.042553
result = self.function(*args, **kwargs) self.validate(result) return result
def patched_function(self, *args, **kwargs)
Step 3. Wrapped function calling.
4.439865
3.938103
1.127412
# disable methods matching before validation self._disable_patching = True # validation by Invariant.validate self._validate_base(self) # enable methods matching after validation self._disable_patching = False
def _validate(self)
Step 5 (1st flow) or Step 4 (2nd flow). Process contract for object.
13.534653
13.111025
1.032311
self._validate() result = method(*args, **kwargs) self._validate() return result
def _patched_method(self, method, *args, **kwargs)
Step 4 (1st flow). Call method
4.821635
4.78765
1.007099
# create destination directory if not Path(output_file).parent.exists(): Path(output_file).parent.mkdir(parents=True, exist_ok=True) # make sure we have absolute paths and strings since BuildVRT does not like something else input_file_list = [str(Path(p).absolute()) for p in input_file_list] output_file = str(Path(output_file).absolute()) vrt_options = gdal.BuildVRTOptions(**kwargs) vrt = gdal.BuildVRT(output_file, input_file_list, options=vrt_options) vrt = None # if needed, create the input file paths relative to the output vrt path # and replace them in the vrt. # if desired, fix the paths and the relativeToVRT tag in the VRT if relative: input_file_list_relative = [relpath(p, Path(output_file).parent) for p in input_file_list] with open(output_file, 'r') as file: # read a list of lines into data lines = file.readlines() new_lines = [] counter = -1 for line in lines: # sometimes it is relative by default # maybe when all files contain the parent directory of the output file (?) if "relativeToVRT=\"1\"" in line: counter += 1 elif "relativeToVRT=\"0\"" in line: counter += 1 input_file = str(input_file_list[counter]) input_file_relative = str(input_file_list_relative[counter]) if input_file not in line: raise Exception(f"Expect path {input_file} not part of line {line}.") line = line.replace(input_file, input_file_relative) line = line.replace("relativeToVRT=\"0\"", "relativeToVRT=\"1\"") else: pass new_lines.append(line) with open(output_file, 'w') as file: file.writelines(new_lines) return 0
def buildvrt(input_file_list, output_file, relative=True, **kwargs)
Build a VRT See also: https://www.gdal.org/gdalbuildvrt.html You can find the possible BuildVRTOptions (**kwargs**) here: https://github.com/nextgis/pygdal/blob/78a793057d2162c292af4f6b240e19da5d5e52e2/2.1.0/osgeo/gdal.py#L1051 Arguments: input_file_list {list of str or Path objects} -- List of input files. output_file {str or Path object} -- Output file (VRT). Keyword Arguments: relative {bool} -- If ``True``, the ``input_file_list`` paths are converted to relative paths (relative to the output file) and the VRT works even if the data is moved somewhere else - given that the relative location of theVRT and the input files does not chance! **kwargs {} -- BuildVRTOptions - see function description for a link to . Returns: [int] -- If successful, 0 is returned as exit code.
3.005787
2.894628
1.038402
if not overwrite and Path(dst_file).exists(): print("Processing skipped. Destination file exists.") return 0 GDAL_RESAMPLING_ALGORITHMS = { "bilinear": "GRA_Bilinear", "cubic": "GRA_Cubic", "cubicspline": "GRA_CubicSpline", "lanczos": "GRA_Lanczos", "average": "GRA_Average", "mode": "GRA_Mode", "max": "GRA_Max", "min": "GRA_Min", "med": "GRA_Med", "near": "GRA_NearestNeighbour", "q1": "GRA_Q1", "q3": "GRA_Q3" } compressions = ["lzw", "packbits", "deflate"] if resampling not in GDAL_RESAMPLING_ALGORITHMS.keys(): raise ValueError(f"'resampling must be one of {', '.join(GDAL_RESAMPLING_ALGORITHMS.keys())}") if compress is None: options = [] else: if compress.lower() not in compressions: raise ValueError(f"'compress must be one of {', '.join(compressions)}") else: options = [f'COMPRESS={compress.upper()}'] # Source src = gdal.Open(src_file, gdalconst.GA_ReadOnly) src_band = src.GetRasterBand(1) src_proj = src.GetProjection() # We want a section of source that matches this: match_ds = gdal.Open(template_file, gdalconst.GA_ReadOnly) match_proj = match_ds.GetProjection() match_geotrans = match_ds.GetGeoTransform() wide = match_ds.RasterXSize high = match_ds.RasterYSize # Output / destination Path(dst_file).parent.mkdir(parents=True, exist_ok=True) dst = gdal.GetDriverByName('GTiff').Create(dst_file, wide, high, 1, src_band.DataType, options=options) dst.SetGeoTransform( match_geotrans ) dst.SetProjection( match_proj) # Do the work gdal.ReprojectImage(src, dst, src_proj, match_proj, getattr(gdalconst, GDAL_RESAMPLING_ALGORITHMS[resampling])) del dst # Flush return 0
def reproject_on_template_raster(src_file, dst_file, template_file, resampling="near", compress=None, overwrite=False)
Reproject a one-band raster to fit the projection, extend, pixel size etc. of a template raster. Function based on https://stackoverflow.com/questions/10454316/how-to-project-and-resample-a-grid-to-match-another-grid-with-gdal-python Arguments: src_file {str} -- Filename of the source one-band raster. dst_file {str} -- Filename of the destination raster. template_file {str} -- Filename of the template raster. resampling {str} -- Resampling type: 'near' (default), 'bilinear', 'cubic', 'cubicspline', 'lanczos', 'average', 'mode', 'max', 'min', 'med', 'q1', 'q3', see https://www.gdal.org/gdalwarp.html -r parameter. compress {str} -- Compression type: None (default), 'lzw', 'packbits', 'defalte'.
2.086051
2.067825
1.008814
data = gdal.Open(str(src_raster_template), # str for the case that a Path instance arrives here gdalconst.GA_ReadOnly) geo_transform = data.GetGeoTransform() #source_layer = data.GetLayer() # x_max = x_min + geo_transform[1] * data.RasterXSize # y_min = y_max + geo_transform[5] * data.RasterYSize x_res = data.RasterXSize y_res = data.RasterYSize mb_v = ogr.Open(src_vector) mb_l = mb_v.GetLayer() target_ds = gdal.GetDriverByName('GTiff').Create(dst_rasterized, x_res, y_res, 1, gdal_dtype) # gdal.GDT_Byte # import osr target_ds.SetGeoTransform((geo_transform[0], # x_min geo_transform[1], # pixel_width 0, geo_transform[3], # y_max 0, geo_transform[5] # pixel_height )) prj = data.GetProjection() # srs = osr.SpatialReference(wkt=prj) # Where was this needed? target_ds.SetProjection(prj) band = target_ds.GetRasterBand(1) # NoData_value = 0 # band.SetNoDataValue(NoData_value) band.FlushCache() gdal.RasterizeLayer(target_ds, [1], mb_l, options=[f"ATTRIBUTE={burn_attribute}"]) target_ds = None
def rasterize(src_vector: str, burn_attribute: str, src_raster_template: str, dst_rasterized: str, gdal_dtype: int = 4)
Rasterize the values of a spatial vector file. Arguments: src_vector {str}} -- A OGR vector file (e.g. GeoPackage, ESRI Shapefile) path containing the data to be rasterized. burn_attribute {str} -- The attribute of the vector data to be burned in the raster. src_raster_template {str} -- Path to a GDAL raster file to be used as template for the rasterized data. dst_rasterized {str} -- Path of the destination file. gdal_dtype {int} -- Numeric GDAL data type, defaults to 4 which is UInt32. See https://github.com/mapbox/rasterio/blob/master/rasterio/dtypes.py for useful look-up tables. Returns: None
2.57108
2.628128
0.978293
if Path(dst_raster).exists() and not overwrite: print(f"Returning 0 - File exists: {dst_raster}") return 0 with rasterio.open(template_raster) as tmp: crs = tmp.crs dst_raster = Path(dst_raster) dst_raster.parent.mkdir(exist_ok=True, parents=True) tempdir = Path(tempfile.mkdtemp(prefix=f"TEMPDIR_{dst_raster.stem}_", dir=dst_raster.parent)) interim_file_lines_vector = tempdir / "interim_sample_vector_dataset_lines.shp" interim_file_lines_raster = tempdir / "interim_sample_vector_dataset_lines.tif" exit_code = convert_polygons_to_lines(polygons, interim_file_lines_vector, crs=crs, add_allone_col=True) rasterize(src_vector=str(interim_file_lines_vector), burn_attribute="ALLONE", src_raster_template=str(template_raster), dst_rasterized=str(interim_file_lines_raster), gdal_dtype=1) cmd = f"{PROXIMITY_PATH} " \ f"{str(Path(interim_file_lines_raster).absolute())} " \ f"{str(Path(dst_raster).absolute())} " \ f"-ot Float32 -distunits PIXEL -values 1 -maxdist 255" subprocess.check_call(cmd, shell=True) if not keep_interim_files: shutil.rmtree(tempdir) else: print(f"Interim files are in {tempdir}") return 0
def calc_distance_to_border(polygons, template_raster, dst_raster, overwrite=False, keep_interim_files=False)
Calculate the distance of each raster cell (in and outside the polygons) to the next polygon border. Arguments: polygons {str} -- Filename to a geopandas-readable file with polygon features. template_raster {[type]} -- Filename to a rasterio-readable file. dst_raster {[type]} -- Destination filename for the distance to polygon border raster file (tif). Keyword Arguments: overwrite {bool} -- Overwrite files if they exists? (default: {False}) keep_interim_files {bool} -- Keep the interim line vector and raster files (default: {True}) Returns: [type] -- [description]
3.315373
3.339632
0.992736
gdf = gpd.read_file(src_polygons) geom_coords = gdf["geometry"] # featureset.get(5)["geometry"]["coordinates"] lines = [] row_ids = [] for i_row, pol in tqdm(enumerate(geom_coords), total=len(geom_coords)): boundary = pol.boundary if boundary.type == 'MultiLineString': for line in boundary: lines.append(line) row_ids.append(i_row) else: lines.append(boundary) row_ids.append(i_row) gdf_lines = gdf.drop("geometry", axis=1).iloc[row_ids, :] gdf_lines["Coordinates"] = lines gdf_lines = gpd.GeoDataFrame(gdf_lines, geometry='Coordinates', crs=gdf.crs) if crs is not None: gdf_lines = gdf_lines.to_crs(crs) if add_allone_col: gdf_lines["ALLONE"] = 1 Path(dst_lines).parent.mkdir(exist_ok=True, parents=True) gdf_lines.to_file(dst_lines) return 0
def convert_polygons_to_lines(src_polygons, dst_lines, crs=None, add_allone_col=False)
Convert polygons to lines. Arguments: src_polygons {path to geopandas-readable file} -- Filename of the the polygon vector dataset to be converted to lines. dst_lines {[type]} -- Filename where to write the line vector dataset to. Keyword Arguments: crs {dict or str} -- Output projection parameters as string or in dictionary format. This will reproject the data when a crs is given (not {None}) (default: {None}). add_allone_col {bool} -- Add an additional attribute column with all ones. This is useful, e.g. in case you want to use the lines with gdal_proximity afterwards (default: {True}). Returns: int -- Exit code 0 if successeful.
2.380791
2.381666
0.999632
dtype_range = dtype_ranges[dtype] df_out_of_range = (df < dtype_range[0]) | (df > dtype_range[1]) | (~np.isfinite(df)) if df_out_of_range.any().any(): if return_== "colsums": df_out_of_range = df_out_of_range.apply(sum, axis=0) # column elif return_== "rowsums": df_out_of_range = df_out_of_range.apply(sum, axis=1) # row elif return_== "all": df_out_of_range = df_out_of_range else: df_out_of_range = 1 else: df_out_of_range = 0 return df_out_of_range
def dtype_checker_df(df, dtype, return_=None)
Check if there are NaN values of values outside of a given datatype range. Arguments: df {dataframe} -- A dataframe. dtype {str} -- The datatype to check for. Keyword Arguments: return_ {str} -- Returns a boolean dataframe with the values not in the range of the dtype ('all'), the row ('rowsums') or column ('colsums') sums of that dataframe or an exit code 1 (None, default) if any of the values is not in the range. Returns: [int or DataFrame or Series] -- If no value is out of the range exit code 0 is returned, else depends on return_.
2.201093
1.955518
1.125581
df = self.df_layers.copy() df["index"] = range(df.shape[0]) idx_layers = [] if isinstance(band, str) and isinstance(date, str): idx_layers = df[(df["date"] == date) & (df["band"] == band)]["index"].values[0] if isinstance(band, list) and isinstance(date, str): for b in band: idx = df[(df["date"] == date) & (df["band"] == b)]["index"].values[0] idx_layers.append(idx) elif isinstance(band, str) and isinstance(date, list): for d in date: idx = df[(df["band"] == band) & (df["date"] == d)]["index"].values[0] idx_layers.append(idx) return idx_layers
def get_df_ilocs(self, band, date)
Get positions of rows matching specific band(s) and date(s). The method supports three typical queries: * one band and one date (both given as strings) * one band and of several dates (band given as strings, date as list of strings) * several band and of one date (date given as strings, band as list of strings) Arguments: band {str or list} -- Band(s) for which to derive the iloc index. date {str or list} -- Date(s) for which to derive the iloc index. Returns: int or list -- Integer (if band and date are str) or list of iloc indices.
1.839721
1.931968
0.952252
# This should be a MultiRasterIO method with rasterio.open(self._mrio._get_template_for_given_resolution(self._mrio.dst_res, "path")) as src_layer: pass # later we need src_layer for src_layer.window_transform(win) win_transform = src_layer.window_transform(self._window) bounds = rasterio.windows.bounds(window=self._window, transform=win_transform, height=0, width=0) return bounds
def _get_spatial_bounds(self)
Get the spatial bounds of the chunk.
9.636243
9.119719
1.056638
if self._data_structure != "DataFrame": raise Exception(f"Data is not a DataFrame but {self._data_structure}.") self._data = self._convert_to_ndarray(self._data) self._update_data_structure() return self
def convert_data_to_ndarray(self)
Converts the data from dataframe to ndarray format. Assumption: df-columns are ndarray-layers (3rd dim.)
4.065848
3.618697
1.123567
if data.__class__.__name__ != "DataFrame": raise Exception(f"data is not a DataFrame but {data.__class__.__name__}.") shape_ndarray = (self._height, self._width, data.shape[1]) data_ndarray = data.values.reshape(shape_ndarray) return data_ndarray
def _convert_to_ndarray(self, data)
Converts data from dataframe to ndarray format. Assumption: df-columns are ndarray-layers (3rd dim.)
3.547629
2.906417
1.220619
result = self._convert_to_ndarray(result) self.write_ndarray(result, dst_paths, nodata=nodata, compress=compress)
def write_dataframe(self, result, dst_paths, nodata=None, compress='lzw')
Write results (dataframe) to disc.
3.449849
2.948618
1.169988
assert len(dst_paths) == result.shape[2] assert result.shape[0] == self._height assert result.shape[1] == self._width assert result.shape[2] == len(dst_paths) with rasterio.open(self._mrio._get_template_for_given_resolution(self._mrio.dst_res, "path")) as src_layer: pass # later we need src_layer for src_layer.window_transform(win) for i, pth in enumerate(dst_paths): dst_path_chunk = self.get_chunk_path_from_layer_path(pth, self.ji) result_layer_i = result[:, :, [i]] assert result_layer_i.shape[2] == 1 kwargs = self._mrio._get_template_for_given_resolution( res=self._mrio.dst_res, return_="meta").copy() kwargs.update({"driver": "GTiff", "compress": compress, "nodata": nodata, "height": self._height, "width": self._width, "dtype": result_layer_i.dtype, "transform": src_layer.window_transform(self._window)}) with rasterio.open(dst_path_chunk, "w", **kwargs) as dst: dst.write(result_layer_i[:, :, 0], 1)
def write_ndarray(self, result, dst_paths, nodata=None, compress='lzw')
Write results (ndarray) to disc.
3.585985
3.555296
1.008632
# from the seaborn code # https://github.com/mwaskom/seaborn/blob/3a3ec75befab52c02650c62772a90f8c23046038/seaborn/matrix.py#L201 def _get_vmin_vmax(arr2d, vmin=None, vmax=None): if vmin is None: vmin = np.percentile(arr2d, 2) if robust else arr2d.min() if vmax is None: vmax = np.percentile(arr2d, 98) if robust else arr2d.max() return vmin, vmax if len(arr.shape) == 3 and vmin is None and vmax is None: vmin = [] vmax = [] for i in range(arr.shape[2]): arr_i = arr[:, :, i] vmin_i, vmax_i = _get_vmin_vmax(arr_i, vmin=None, vmax=None) vmin.append(vmin_i) vmax.append(vmax_i) else: vmin, vmax = _get_vmin_vmax(arr, vmin=vmin, vmax=vmax) return vmin, vmax
def robust_data_range(arr, robust=False, vmin=None, vmax=None)
Get a robust data range, i.e. 2nd and 98th percentile for vmin, vmax parameters.
2.330821
2.283513
1.020717
eocubewin = EOCubeChunk(ji, eocube.df_layers, eocube.chunksize, eocube.wdir) return eocubewin
def from_eocube(eocube, ji)
Create a EOCubeChunk object from an EOCube object.
10.016061
7.41067
1.351573
return EOCubeSceneCollectionChunk(ji=ji, df_layers=self.df_layers, chunksize=self.chunksize, variables=self.variables, qa=self.qa, qa_valid=self.qa_valid, wdir=self.wdir)
def get_chunk(self, ji)
Get a EOCubeChunk
7.938716
5.667808
1.400668
def print_elapsed_time(start, last_stopped, prefix): # print(f"{prefix} - Elapsed time [s] since start / last stopped: \ # {(int(time.time() - start_time))} / {(int(time.time() - last_stopped))}") return time.time() start_time = time.time() last_stopped = time.time() last_stopped = print_elapsed_time(start_time, last_stopped, "Starting chunk function") verbose = False self.read_data() last_stopped = print_elapsed_time(start_time, last_stopped, "Data read") # 2. sc_chunk = self.convert_data_to_dataframe() last_stopped = print_elapsed_time(start_time, last_stopped, "Data converted to df") # 3.B. if mask: # 3.A. ilocs_qa = np.where((self.df_layers["band"] == self.qa).values)[0] df_qa = self.data.iloc[:, ilocs_qa] df_qa.columns = self.df_layers["date"].iloc[ilocs_qa] df_clearsky = df_qa.isin(self.qa_valid) last_stopped = print_elapsed_time(start_time, last_stopped, "Clearsky df created") return_bands = self.variables else: return_bands = self.variables + [self.qa] dfs_variables = {} for var in return_bands: if verbose: print("VARIABLE:", var) ilocs_var = np.where((self.df_layers["band"] == var).values)[0] df_var = self.data.iloc[:, ilocs_var] df_var.columns = self.df_layers["date"].iloc[ilocs_var] if mask: df_var = df_var.where(df_clearsky, other=np.nan) dfs_variables[var] = df_var last_stopped = print_elapsed_time(start_time, last_stopped, "Clearsky df created") self._data = dfs_variables return self
def read_data_by_variable(self, mask=True)
Reads and masks (if desired) the data and converts it in one dataframe per variable.
3.125466
3.105888
1.006304
if dataset == "s2l1c": search_string = os.path.join(DIR_DATA, dataset, "**", "*_B??.jp2") files = glob.glob(search_string, recursive=True) if not files: raise IOError(f"Could not find raster files of the s2l1c dataset. Search string: {search_string}") basename_splitted = [pth.replace(".jp2", "").split("_")[-2:] for pth in files] dset = {"raster_files": files, "raster_bands": [ele[1] for ele in basename_splitted], "raster_times": [ele[0] for ele in basename_splitted], "vector_file": os.path.join(DIR_DATA, "s2l1c", "s2l1c_ref.gpkg"), "vector_file_osm": os.path.join(DIR_DATA, "s2l1c", "gis_osm_landuse-water_a_free_1_area-10000-to-500000.gpkg")} elif dataset == "lsts": search_string = os.path.join(DIR_DATA, dataset, "**", "*.tif") files = glob.glob(search_string, recursive=True) if not files: raise IOError(f"Could not find raster files of the lsts dataset. Search string: {search_string}") basename_splitted = [os.path.basename(pth).replace(".tif", "").split("_") for pth in files] dset = {"raster_files": files, "raster_bands": [ele[1] for ele in basename_splitted], "raster_times": [ele[0][9:16] for ele in basename_splitted]} # If you want to add a new dataset here, do not forget to do all of the following steps: # 1) add the dataset in the eo-box/sampledata/eobox/sampledata/data/<name of new dataset> # 2) write the code here to get the paths of the data and eventually some additional information # 3) write a test to make sure you get the data # 4) add the new dataset to package_data in eo-box/sampledata/eobox/setup.py # 5) add the new dataset to package_data in eo-box/sampledata/MANIFEST.in # 4) change the version number in eo-box/sampledata/eobox/sampledata/__init__.py to '<current>.<current+1>.0' return dset
def get_dataset(dataset="s2l1c")
Get a specific sampledata to play around. So far the following sampledata exist: * 's2l1c': One Sentinel-2 Level 1C scene with a reference dataset. * 'lsts': A time series of 105 Landsat scenes each with the bands b3 (red), b4 (nir), b5 (swir1) and fmask. Keyword Arguments: dataset {str} -- The name of the dataset (default: {'s2l1c'}). Returns: [dict] -- A dictionary with paths and information about the sampledata.
3.347963
3.22947
1.036691
# checks the blocksize input value_error_msg = "'blocksize must be an integer or a list of two integers.'" if isinstance(blocksize_xy, int): blockxsize, blockysize = (blocksize_xy, blocksize_xy) elif isinstance(blocksize_xy, list): if len(blocksize_xy) != 2: raise ValueError(value_error_msg) else: if not all([isinstance(blocksize_xy[0], int), isinstance(blocksize_xy[1], int)]): raise ValueError(value_error_msg) blockxsize, blockysize = blocksize_xy else: raise ValueError(value_error_msg) # create the col_off and row_off elements for all windows n_cols = int(np.ceil(width / blockxsize)) n_rows = int(np.ceil(height / blockysize)) col = list(range(n_cols)) * n_rows col_off = np.array(col) * blockxsize row = np.repeat(list(range(n_rows)), n_cols) row_off = row * blockysize # create the windows # if necessary, reduce the width and/or height of the border windows blocksize_wins = [] for ridx, roff, cidx, coff, in zip(row, row_off, col, col_off): if coff + blockxsize > width: bxsize = width - coff else: bxsize = blockxsize if roff + blockysize > height: bysize = height - roff else: bysize = blockysize blocksize_wins.append([[ridx, cidx], rasterio.windows.Window(coff, roff, bxsize, bysize)]) return blocksize_wins
def windows_from_blocksize(blocksize_xy, width, height)
Create rasterio.windows.Window instances with given size which fully cover a raster. Arguments: blocksize_xy {int or list of two int} -- [description] width {int} -- With of the raster for which to create the windows. height {int} -- Heigth of the raster for which to create the windows. Returns: list -- List of windows according to the following format ``[[<row-index>, <column index>], rasterio.windows.Window(<col_off>, <row_off>, <width>, <height>)]``.
2.347387
2.118951
1.107806
if dst_res is None: dst_res = min(self._res_indices.keys()) return dst_res
def _get_dst_resolution(self, dst_res=None)
Get default resolution, i.e. the highest resolution or smallest cell size.
5.620346
4.123308
1.363067
res = max(self._res_indices.keys()) self._windows_res = res a_file_index_given_res = self._res_indices[res][0] with rasterio.open(self._layer_files[a_file_index_given_res]) as src: wins_of_first_dst_res_layer = tuple(src.block_windows()) self.windows = np.array([win[1] for win in wins_of_first_dst_res_layer]) self.windows_row = np.array([win[0][0] for win in wins_of_first_dst_res_layer]) self.windows_col = np.array([win[0][1] for win in wins_of_first_dst_res_layer])
def block_windows(self, res=None): # setter and getter ? if res is None
Load windows for chunks-wise processing from raster internal tiling (first raster of given resolution). Arguments: res {numeric} -- Resolution determining the raster (1st of resolution group) from which to take the tiling.
3.489637
3.192764
1.092983
meta = self._get_template_for_given_resolution(self.dst_res, "meta") width = meta["width"] height = meta["height"] blocksize_wins = windows_from_blocksize(blocksize_xy, width, height) self.windows = np.array([win[1] for win in blocksize_wins]) self.windows_row = np.array([win[0][0] for win in blocksize_wins]) self.windows_col = np.array([win[0][1] for win in blocksize_wins]) return self
def windows_from_blocksize(self, blocksize_xy=512)
Create rasterio.windows.Window instances with given size which fully cover the raster. Arguments: blocksize_xy {int or list of two int} -- Size of the window. If one integer is given it defines the width and height of the window. If a list of two integers if given the first defines the width and the second the height. Returns: None -- But the attributes ``windows``, ``windows_row`` and ``windows_col`` are updated.
3.187003
2.953001
1.079242
path = self._layer_files[self._res_indices[res][0]] if return_ == "path": return_value = path else: with rasterio.open(str(path)) as src: if return_ == "meta": return_value = src.meta elif return_ == "windows": return_value = tuple(src.block_windows()) else: raise ValueError("'return_' must be 'path', meta' or 'windows'.") return return_value
def _get_template_for_given_resolution(self, res, return_)
Given specified resolution ('res') return template layer 'path', 'meta' or 'windows'.
3.439179
2.682148
1.282248
import pandas as pd if self.windows is None: raise Exception("You need to call the block_windows or windows before.") df_wins = [] for row, col, win in zip(self.windows_row, self.windows_col, self.windows): df_wins.append(pd.DataFrame({"row":[row], "col":[col], "Window":[win]})) df_wins = pd.concat(df_wins).set_index(["row", "col"]) df_wins["window_index"] = range(df_wins.shape[0]) df_wins = df_wins.sort_index() return df_wins
def windows_df(self)
Get Windows (W) W-row, W-col and W-index of windows e.g. loaded with :meth:`block_windows` as a dataframe. Returns: [dataframe] -- A dataframe with the window information and indices (row, col, index).
3.142128
2.477839
1.268092
transform_dst = self._layer_meta[self._res_indices[res][0]]["transform"] ji_windows[res] = window_from_window(window_src=self.windows[ij_win], transform_src=transform_src, transform_dst=transform_dst) return ji_windows
def ji_windows(self, ij_win): # what can be given to ij_win NOT intuitive/right name by now!!! ji_windows = {} transform_src = self._layer_meta[self._res_indices[self._windows_res][0]]["transform"] for res in self._res_indices
For a given specific window, i.e. an element of :attr:`windows`, get the windows of all resolutions. Arguments: ij_win {int} -- The index specifying the window for which to return the resolution-windows.
3.629268
3.84724
0.943343
if isinstance(ji_win, dict): ji_windows = ji_win else: ji_windows = self.ji_windows(ji_win) arrays = [] for filename, res in zip(self._layer_files, self._layer_resolution): with rasterio.open(filename) as src: arr = src.read(1, window=ji_windows[res]) arrays.append(arr) if self.dst_res is not None: arrays = self._resample(arrays=arrays, ji_windows=ji_windows) return arrays
def get_arrays(self, ji_win)
Get the data of the a window given the ji_windows derived with :method:`ji_windows`. Arguments: ji_win {[type]} -- The index of the window or the (multi-resolution) windows returned by :meth:`ji_window`. Returns: (list of) array(s) -- List of 2D arrays in native resolution in case `dst_res` is `None` or a 3D array where all layers are resampled to `dst_res` resolution.
3.355769
2.787666
1.203792
# get a destination array template win_dst = ji_windows[self.dst_res] aff_dst = self._layer_meta[self._res_indices[self.dst_res][0]]["transform"] arrays_dst = list() for i, array in enumerate(arrays): arr_dst = np.zeros((int(win_dst.height), int(win_dst.width))) if self._layer_resolution[i] > self.dst_res: resampling = getattr(Resampling, self.upsampler) elif self._layer_resolution[i] < self.dst_res: resampling = getattr(Resampling, self.downsampler) else: arrays_dst.append(array.copy()) continue reproject(array, arr_dst, # arr_dst[0, :, :, i], src_transform=self._layer_meta[i]["transform"], dst_transform=aff_dst, src_crs=self._layer_meta[0]["crs"], dst_crs=self._layer_meta[0]["crs"], resampling=resampling) arrays_dst.append(arr_dst.copy()) arrays_dst = np.stack(arrays_dst, axis=2) # n_images x n x m x 10 would be the synergise format return arrays_dst
def _resample(self, arrays, ji_windows)
Resample all arrays with potentially different resolutions to a common resolution.
3.764838
3.602419
1.045086
ji_results = self._process_windows(func, **kwargs) for idx_layer in range(len(ji_results[0])): # this is the number of output layers for j in np.unique(self.windows_row): win_indices_j = np.where(self.windows_row == j)[0] layer_merged_j = np.hstack([ji_results[idx][idx_layer] for idx in win_indices_j]) if j == 0: layer_merged = layer_merged_j else: layer_merged = np.vstack([layer_merged, layer_merged_j]) if idx_layer == 0: layers_merged = layer_merged else: layers_merged = np.stack([layers_merged, layer_merged], axis=2) return layers_merged
def _process_windows_merge_stack(self, func, **kwargs)
Load (resampled) array of all windows, apply custom function on it, merge and stack results to one array.
2.92009
2.764016
1.056466
ji_results = [] for ji_win in range(len(self.windows)): ji_results.append(self._process_window(ji_win, func, **kwargs)) return ji_results
def _process_windows(self, func, **kwargs)
Load (resampled) array of all windows and apply custom function on it.
4.358201
3.691858
1.18049
arr = self.get_arrays(ji_win) result = func(arr, **kwargs) return result
def _process_window(self, ji_win, func, **kwargs)
Load (resampled) array of window ji_win and apply custom function on it.
5.280904
4.97067
1.062413
a_transform = self._get_template_for_given_resolution(res=self.dst_res, return_="meta")["transform"] row, col = transform.rowcol(a_transform, xy[0], xy[1]) ij_containing_xy = None for ji, win in enumerate(self.windows): (row_start, row_end), (col_start, col_end) = rasterio.windows.toranges(win) # print(row, col, row_start, row_end, col_start, col_end) if ((col >= col_start) & (col < col_end)) & ((row >= row_start) & (row < row_end)): ij_containing_xy = ji break if ij_containing_xy is None: raise ValueError("The given 'xy' value is not contained in any window.") return ij_containing_xy
def get_window_from_xy(self, xy)
Get the window index given a coordinate (raster CRS).
3.781894
3.412195
1.108346
def _load(path, index): if index is None: arr = np.load(str(path)) else: arr = np.load(str(path), mmap_mode="r")[index] return arr src_dir = Path(src_dir) paths = [] if isinstance(patterns, str): patterns = [patterns] for pat in patterns: paths += src_dir.glob(pat) if vars_in_cols: df_data = {} for path in paths: df_data[path.stem] = _load(path, index) df_data = pd.DataFrame(df_data) if index is not None: df_data.index = index.index[index] else: df_data = [] for path in paths: arr = _load(path, index) df_data.append(pd.DataFrame(np.expand_dims(arr, 0), index=[path.stem])) df_data = pd.concat(df_data) if index is not None: df_data.columns = index.index[index] return df_data
def load_extracted(src_dir: str, patterns="*.npy", vars_in_cols: bool = True, index: pd.Series = None)
Load data extracted and stored by :py:func:`extract` Arguments: src_dir {str} -- The directory where the data is stored. Keyword Arguments: patterns {str, or list of str} -- A pattern (str) or list of patterns (list) to identify the variables to be loaded. The default loads all variables, i.e. all .npy files. (default: {'*.npy'}) vars_in_cols {bool} -- Return the variables in columns (``True``) or rows ``False`` (default: {True}) index {pd.Series} -- A boolean pandas Series which indicates with ``True`` which samples to load. Returns: pandas.DataFrame -- A dataframe with the data.
1.955938
2.032174
0.962486
minimum = 9223372036854775807 maximum = 0 for y in range(y0, y0 + h): for x in range(x0, x0 + w): value = self[x, y] if value != self.filler: minimum = min(minimum, value) maximum = max(maximum, value) return minimum, maximum
def extrema(self, x0, y0, w, h)
Returns the minimum and maximum values contained in a given area. :param x0: Starting x index. :param y0: Starting y index. :param w: Width of the area to scan. :param h: Height of the area to scan. :return: Tuple containing the minimum and maximum values of the given area.
1.94296
2.292272
0.847613
incs = (0x00, 0x5f, 0x87, 0xaf, 0xd7, 0xff) # Break 6-char RGB code into 3 integer vals. parts = [ int(h, 16) for h in re.split(r'(..)(..)(..)', rgb)[1:4] ] res = [] for part in parts: i = 0 while i < len(incs)-1: s, b = incs[i], incs[i+1] # smaller, bigger if s <= part <= b: s1 = abs(s - part) b1 = abs(b - part) if s1 < b1: closest = s else: closest = b res.append(closest) break i += 1 #print '***', res res = ''.join([ ('%02.x' % i) for i in res ]) equiv = RGB2SHORT_DICT[ res ] #print '***', res, equiv return equiv, res
def rgb2short(rgb)
Find the closest xterm-256 approximation to the given RGB value. @param rgb: Hex code representing an RGB value, eg, 'abcdef' @returns: String between 0 and 255, compatible with xterm. >>> rgb2short('123456') ('23', '005f5f') >>> rgb2short('ffffff') ('231', 'ffffff') >>> rgb2short('0DADD6') # vimeo logo ('38', '00afd7')
3.810724
3.676389
1.03654
curses.curs_set(1) self.screen.move(y, x)
def set_cursor(self, x, y)
Sets the cursor to the desired position. :param x: X position :param y: Y position
3.848842
5.926117
0.649471
if x < self.width and y < self.height: try: self.screen.addstr(y, x, symbols.encode(text), self.pairs[fg, bg]) except curses.error: # Ignore out of bounds error pass
def put(self, x, y, text, fg, bg)
Puts a string at the desired coordinates using the provided colors. :param x: X position :param y: Y position :param text: Text to write :param fg: Foreground color number :param bg: Background color number
4.181864
4.816956
0.868155
# Flush all inputs before this one that were done since last poll curses.flushinp() ch = self.screen.getch() if ch == 27: return EVENT_ESC elif ch == -1 or ch == curses.KEY_RESIZE: return EVENT_RESIZE elif ch == 10 or ch == curses.KEY_ENTER: return EVENT_ENTER elif ch == 127 or ch == curses.KEY_BACKSPACE: return EVENT_BACKSPACE elif ch == curses.KEY_UP: return EVENT_UP elif ch == curses.KEY_DOWN: return EVENT_DOWN elif ch == curses.KEY_LEFT: return EVENT_LEFT elif ch == curses.KEY_RIGHT: return EVENT_RIGHT elif ch == 3: return EVENT_CTRL_C elif 0 <= ch < 256: return chr(ch) else: return EVENT_UNHANDLED
def poll_event(self)
Waits for an event to happen and returns a string related to the event. If the event is a normal (letter) key press, the letter is returned (case sensitive) :return: Event type
2.313379
2.372481
0.975088
x, y, params = args return x, y, mandelbrot(x, y, params)
def compute(args)
Callable function for the multiprocessing pool.
11.156794
11.193359
0.996733
x, y, w, h, params = args return x, y, mandelbrot_capture(x, y, w, h, params)
def compute_capture(args)
Callable function for the multiprocessing pool.
8.790978
8.915833
0.985996