_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q6200
|
BaseBaseModel.find_by_id
|
train
|
def find_by_id(self, _id, projection=None):
"""find record by _id
"""
if isinstance(_id, list) or isinstance(_id, tuple):
return list(self.__collect.find(
{'_id': {'$in': [self._to_primary_key(i) for i in _id]}}, projection))
document_id = self._to_primary_key(_id)
if document_id is None:
return None
return self.__collect.find_one({'_id': document_id}, projection)
|
python
|
{
"resource": ""
}
|
q6201
|
to_list_str
|
train
|
def to_list_str(value, encode=None):
"""recursively convert list content into string
:arg list value: The list that need to be converted.
:arg function encode: Function used to encode object.
"""
result = []
for index, v in enumerate(value):
if isinstance(v, dict):
result.append(to_dict_str(v, encode))
continue
if isinstance(v, list):
result.append(to_list_str(v, encode))
continue
if encode:
result.append(encode(v))
else:
result.append(default_encode(v))
return result
|
python
|
{
"resource": ""
}
|
q6202
|
to_dict_str
|
train
|
def to_dict_str(origin_value, encode=None):
"""recursively convert dict content into string
"""
value = copy.deepcopy(origin_value)
for k, v in value.items():
if isinstance(v, dict):
value[k] = to_dict_str(v, encode)
continue
if isinstance(v, list):
value[k] = to_list_str(v, encode)
continue
if encode:
value[k] = encode(v)
else:
value[k] = default_encode(v)
return value
|
python
|
{
"resource": ""
}
|
q6203
|
default_encode
|
train
|
def default_encode(v):
"""convert ObjectId, datetime, date into string
"""
if isinstance(v, ObjectId):
return unicode_type(v)
if isinstance(v, datetime):
return format_time(v)
if isinstance(v, date):
return format_time(v)
return v
|
python
|
{
"resource": ""
}
|
q6204
|
to_str
|
train
|
def to_str(v, encode=None):
"""convert any list, dict, iterable and primitives object to string
"""
if isinstance(v, basestring_type):
return v
if isinstance(v, dict):
return to_dict_str(v, encode)
if isinstance(v, Iterable):
return to_list_str(v, encode)
if encode:
return encode(v)
else:
return default_encode(v)
|
python
|
{
"resource": ""
}
|
q6205
|
get_base_dir
|
train
|
def get_base_dir(currfile, dir_level_num=3):
"""
find certain path according to currfile
"""
root_path = os.path.abspath(currfile)
for i in range(0, dir_level_num):
root_path = os.path.dirname(root_path)
return root_path
|
python
|
{
"resource": ""
}
|
q6206
|
join_sys_path
|
train
|
def join_sys_path(currfile, dir_level_num=3):
"""
find certain path then load into sys path
"""
if os.path.isdir(currfile):
root_path = currfile
else:
root_path = get_base_dir(currfile, dir_level_num)
sys.path.append(root_path)
|
python
|
{
"resource": ""
}
|
q6207
|
camel_to_underscore
|
train
|
def camel_to_underscore(name):
"""
convert CamelCase style to under_score_case
"""
as_list = []
length = len(name)
for index, i in enumerate(name):
if index != 0 and index != length - 1 and i.isupper():
as_list.append('_%s' % i.lower())
else:
as_list.append(i.lower())
return ''.join(as_list)
|
python
|
{
"resource": ""
}
|
q6208
|
encode_http_params
|
train
|
def encode_http_params(**kw):
'''
url paremeter encode
'''
try:
_fo = lambda k, v: '{name}={value}'.format(
name=k, value=to_basestring(quote(v)))
except:
_fo = lambda k, v: '%s=%s' % (k, to_basestring(quote(v)))
_en = utf8
return '&'.join([_fo(k, _en(v)) for k, v in kw.items() if not is_empty(v)])
|
python
|
{
"resource": ""
}
|
q6209
|
_init_file_logger
|
train
|
def _init_file_logger(logger, level, log_path, log_size, log_count):
"""
one logger only have one level RotatingFileHandler
"""
if level not in [logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL]:
level = logging.DEBUG
for h in logger.handlers:
if isinstance(h, logging.handlers.RotatingFileHandler):
if h.level == level:
return
fh = logging.handlers.RotatingFileHandler(
log_path, maxBytes=log_size, backupCount=log_count)
fh.setLevel(level)
fh.setFormatter(_formatter)
logger.addHandler(fh)
|
python
|
{
"resource": ""
}
|
q6210
|
Session._processor
|
train
|
def _processor(self):
"""Application processor to setup session for every request"""
self.store.cleanup(self._config.timeout)
self._load()
|
python
|
{
"resource": ""
}
|
q6211
|
Session._load
|
train
|
def _load(self):
"""Load the session from the store, by the id from cookie"""
self.session_id = self._session_object.get_session_id()
# protection against session_id tampering
if self.session_id and not self._valid_session_id(self.session_id):
self.session_id = None
if self.session_id:
d = self.store[self.session_id]
if isinstance(d, dict) and d:
self.update(d)
if not self.session_id:
self.session_id = self._session_object.generate_session_id()
if not self._data:
if self._initializer and isinstance(self._initializer, dict):
self.update(deepcopy(self._initializer))
self._session_object.set_session_id(self.session_id)
|
python
|
{
"resource": ""
}
|
q6212
|
Store.encode
|
train
|
def encode(self, session_data):
"""encodes session dict as a string"""
pickled = pickle.dumps(session_data)
return to_basestring(encodebytes(pickled))
|
python
|
{
"resource": ""
}
|
q6213
|
Store.decode
|
train
|
def decode(self, session_data):
"""decodes the data to get back the session dict """
pickled = decodebytes(utf8(session_data))
return pickle.loads(pickled)
|
python
|
{
"resource": ""
}
|
q6214
|
cli
|
train
|
def cli():
"""An improved shell command, based on konch."""
from flask.globals import _app_ctx_stack
app = _app_ctx_stack.top.app
options = {key: app.config.get(key, DEFAULTS[key]) for key in DEFAULTS.keys()}
base_context = {"app": app}
if options["KONCH_FLASK_IMPORTS"]:
base_context.update(get_flask_imports())
context = dict(base_context)
if options["KONCH_FLASK_SHELL_CONTEXT"]:
flask_context = app.make_shell_context()
context.update(flask_context)
context.update(options["KONCH_CONTEXT"])
def context_formatter(ctx):
formatted_base = ", ".join(sorted(base_context.keys()))
ret = "\n{FLASK}\n{base_context}\n".format(
FLASK=click.style("Flask:", bold=True), base_context=formatted_base
)
if options["KONCH_FLASK_SHELL_CONTEXT"]:
variables = ", ".join(sorted(flask_context.keys()))
ret += "\n{ADDITIONAL}\n{variables}\n".format(
ADDITIONAL=click.style(
"Flask shell context (see shell_context_processor()):", bold=True
),
variables=variables,
)
if options["KONCH_CONTEXT"]:
variables = ", ".join(sorted(options["KONCH_CONTEXT"].keys()))
ret += "\n{ADDITIONAL}\n{variables}".format(
ADDITIONAL=click.style(
"Additional variables (see KONCH_CONTEXT):", bold=True
),
variables=variables,
)
return ret
context_format = options["KONCH_CONTEXT_FORMAT"] or context_formatter
konch.start(
context=context,
shell=options["KONCH_SHELL"],
banner=options["KONCH_BANNER"],
prompt=options["KONCH_PROMPT"],
output=options["KONCH_OUTPUT"],
ptpy_vi_mode=options["KONCH_PTPY_VI_MODE"],
context_format=context_format,
ipy_extensions=options["KONCH_IPY_EXTENSIONS"],
ipy_autoreload=options["KONCH_IPY_AUTORELOAD"],
ipy_colors=options["KONCH_IPY_COLORS"],
ipy_highlighting_style=options["KONCH_IPY_HIGHLIGHTING_STYLE"],
)
|
python
|
{
"resource": ""
}
|
q6215
|
LazyUUIDTask.replace
|
train
|
def replace(self):
"""
Performs conversion to the regular Task object, referenced by the
stored UUID.
"""
replacement = self._tw.tasks.get(uuid=self._uuid)
self.__class__ = replacement.__class__
self.__dict__ = replacement.__dict__
|
python
|
{
"resource": ""
}
|
q6216
|
LazyUUIDTaskSet.replace
|
train
|
def replace(self):
"""
Performs conversion to the regular TaskQuerySet object, referenced by
the stored UUIDs.
"""
replacement = self._tw.tasks.filter(' '.join(self._uuids))
self.__class__ = replacement.__class__
self.__dict__ = replacement.__dict__
|
python
|
{
"resource": ""
}
|
q6217
|
TaskResource._update_data
|
train
|
def _update_data(self, data, update_original=False, remove_missing=False):
"""
Low level update of the internal _data dict. Data which are coming as
updates should already be serialized. If update_original is True, the
original_data dict is updated as well.
"""
self._data.update(dict((key, self._deserialize(key, value))
for key, value in data.items()))
# In certain situations, we want to treat missing keys as removals
if remove_missing:
for key in set(self._data.keys()) - set(data.keys()):
self._data[key] = None
if update_original:
self._original_data = copy.deepcopy(self._data)
|
python
|
{
"resource": ""
}
|
q6218
|
TaskResource.export_data
|
train
|
def export_data(self):
"""
Exports current data contained in the Task as JSON
"""
# We need to remove spaces for TW-1504, use custom separators
data_tuples = ((key, self._serialize(key, value))
for key, value in six.iteritems(self._data))
# Empty string denotes empty serialized value, we do not want
# to pass that to TaskWarrior.
data_tuples = filter(lambda t: t[1] is not '', data_tuples)
data = dict(data_tuples)
return json.dumps(data, separators=(',', ':'))
|
python
|
{
"resource": ""
}
|
q6219
|
Task.from_input
|
train
|
def from_input(cls, input_file=sys.stdin, modify=None, backend=None):
"""
Creates a Task object, directly from the stdin, by reading one line.
If modify=True, two lines are used, first line interpreted as the
original state of the Task object, and second line as its new,
modified value. This is consistent with the TaskWarrior's hook
system.
Object created by this method should not be saved, deleted
or refreshed, as t could create a infinite loop. For this
reason, TaskWarrior instance is set to None.
Input_file argument can be used to specify the input file,
but defaults to sys.stdin.
"""
# Detect the hook type if not given directly
name = os.path.basename(sys.argv[0])
modify = name.startswith('on-modify') if modify is None else modify
# Create the TaskWarrior instance if none passed
if backend is None:
backends = importlib.import_module('tasklib.backends')
hook_parent_dir = os.path.dirname(os.path.dirname(sys.argv[0]))
backend = backends.TaskWarrior(data_location=hook_parent_dir)
# TaskWarrior instance is set to None
task = cls(backend)
# Load the data from the input
task._load_data(json.loads(input_file.readline().strip()))
# If this is a on-modify event, we are provided with additional
# line of input, which provides updated data
if modify:
task._update_data(json.loads(input_file.readline().strip()),
remove_missing=True)
return task
|
python
|
{
"resource": ""
}
|
q6220
|
TaskQuerySet.filter
|
train
|
def filter(self, *args, **kwargs):
"""
Returns a new TaskQuerySet with the given filters added.
"""
clone = self._clone()
for f in args:
clone.filter_obj.add_filter(f)
for key, value in kwargs.items():
clone.filter_obj.add_filter_param(key, value)
return clone
|
python
|
{
"resource": ""
}
|
q6221
|
CSequenceMatcher.set_seq1
|
train
|
def set_seq1(self, a):
"""Same as SequenceMatcher.set_seq1, but check for non-list inputs
implementation."""
if a is self.a:
return
self.a = a
if not isinstance(self.a, list):
self.a = list(self.a)
# Types must be hashable to work in the c layer. This will raise if
# list items are *not* hashable.
[hash(x) for x in self.a]
|
python
|
{
"resource": ""
}
|
q6222
|
CSequenceMatcher.set_seq2
|
train
|
def set_seq2(self, b):
"""Same as SequenceMatcher.set_seq2, but uses the c chainb
implementation.
"""
if b is self.b and hasattr(self, 'isbjunk'):
return
self.b = b
if not isinstance(self.a, list):
self.a = list(self.a)
if not isinstance(self.b, list):
self.b = list(self.b)
# Types must be hashable to work in the c layer. This check lines will
# raise the correct error if they are *not* hashable.
[hash(x) for x in self.a]
[hash(x) for x in self.b]
self.matching_blocks = self.opcodes = None
self.fullbcount = None
junk, popular = _cdifflib.chain_b(self)
assert hasattr(junk, '__contains__')
assert hasattr(popular, '__contains__')
self.isbjunk = junk.__contains__
self.isbpopular = popular.__contains__
|
python
|
{
"resource": ""
}
|
q6223
|
CSequenceMatcher.get_matching_blocks
|
train
|
def get_matching_blocks(self):
"""Same as SequenceMatcher.get_matching_blocks, but calls through to a
faster loop for find_longest_match. The rest is the same.
"""
if self.matching_blocks is not None:
return self.matching_blocks
matching_blocks = _cdifflib.matching_blocks(self)
matching_blocks.append((len(self.a), len(self.b), 0))
self.matching_blocks = matching_blocks
return map(_Match._make, self.matching_blocks)
|
python
|
{
"resource": ""
}
|
q6224
|
_tostream
|
train
|
def _tostream(parser, obj, stream, skipprepack = False):
"""
Compatible to old parsers
"""
if hasattr(parser, 'tostream'):
return parser.tostream(obj, stream, skipprepack)
else:
data = parser.tobytes(obj, skipprepack)
cls = type(parser)
if cls not in _deprecated_parsers:
_deprecated_parsers.add(cls)
warnings.warn("Parser %r does not have 'tostream' interfaces" % (cls,), UserWarning)
return stream.write(data)
|
python
|
{
"resource": ""
}
|
q6225
|
_to_str
|
train
|
def _to_str(dumped_val, encoding='utf-8', ordered=True):
"""
Convert bytes in a dump value to str, allowing json encode
"""
_dict = OrderedDict if ordered else dict
if isinstance(dumped_val, dict):
return OrderedDict((k, _to_str(v, encoding)) for k,v in dumped_val.items())
elif isinstance(dumped_val, (list, tuple)):
return [_to_str(v, encoding) for v in dumped_val]
elif isinstance(dumped_val, bytes):
try:
d = dumped_val.decode('utf-8')
except Exception:
d = repr(dumped_val)
return d
else:
return dumped_val
|
python
|
{
"resource": ""
}
|
q6226
|
NamedStruct._unpack
|
train
|
def _unpack(self, data):
'''
Unpack a struct from bytes. For parser internal use.
'''
#self._logger.log(logging.DEBUG, 'unpacking %r', self)
current = self
while current is not None:
data = current._parser.unpack(data, current)
last = current
current = getattr(current, '_sub', None)
_set(last, '_extra', data)
|
python
|
{
"resource": ""
}
|
q6227
|
NamedStruct._prepack
|
train
|
def _prepack(self):
'''
Prepack stage. For parser internal use.
'''
current = self
while current is not None:
current._parser.prepack(current, skip_self = True)
current = getattr(current, '_sub', None)
current = self
while current is not None:
current._parser.prepack(current, skip_sub = True)
current = getattr(current, '_sub', None)
|
python
|
{
"resource": ""
}
|
q6228
|
NamedStruct._getextra
|
train
|
def _getextra(self):
'''
Get the extra data of this struct.
'''
current = self
while hasattr(current, '_sub'):
current = current._sub
return getattr(current, '_extra', None)
|
python
|
{
"resource": ""
}
|
q6229
|
Parser.paddingsize2
|
train
|
def paddingsize2(self, realsize):
'''
Return a padded size from realsize, for NamedStruct internal use.
'''
if self.base is not None:
return self.base.paddingsize2(realsize)
return (realsize + self.padding - 1) // self.padding * self.padding
|
python
|
{
"resource": ""
}
|
q6230
|
typedef.parser
|
train
|
def parser(self):
'''
Get parser for this type. Create the parser on first call.
'''
if not hasattr(self, '_parser'):
self._parser = self._compile()
return self._parser
|
python
|
{
"resource": ""
}
|
q6231
|
enum.formatter
|
train
|
def formatter(self, value):
'''
Format a enumerate value to enumerate names if possible. Used to generate human readable
dump result.
'''
if not self._bitwise:
n = self.getName(value)
if n is None:
return value
else:
return n
else:
names = []
for k,v in sorted(self._values.items(), key=lambda x: x[1], reverse=True):
if (v & value) == v:
names.append(k)
value = value ^ v
names.reverse()
if value != 0:
names.append(hex(value))
if not names:
return 0
return ' '.join(names)
|
python
|
{
"resource": ""
}
|
q6232
|
OptionalParser.packto
|
train
|
def packto(self, namedstruct, stream):
"""
Pack a struct to a stream
"""
if hasattr(namedstruct, self.name):
return _tostream(self.basetypeparser, getattr(namedstruct, self.name), stream, True)
else:
return 0
|
python
|
{
"resource": ""
}
|
q6233
|
Meso._get_response
|
train
|
def _get_response(self, endpoint, request_dict):
""" Returns a dictionary of data requested by each function.
Arguments:
----------
endpoint: string, mandatory
Set in all other methods, this is the API endpoint specific to each function.
request_dict: string, mandatory
A dictionary of parameters that are formatted into the API call.
Returns:
--------
response: A dictionary that has been dumped from JSON.
Raises:
-------
MesoPyError: Overrides the exceptions given in the requests library to give more custom error messages.
Connection_error occurs if no internet connection exists. Timeout_error occurs if the request takes too
long and redirect_error is shown if the url is formatted incorrectly.
"""
http_error = 'Could not connect to the API. This could be because you have no internet connection, a parameter' \
' was input incorrectly, or the API is currently down. Please try again.'
json_error = 'Could not retrieve JSON values. Try again with a shorter date range.'
# For python 3.4
try:
qsp = urllib.parse.urlencode(request_dict, doseq=True)
resp = urllib.request.urlopen(self.base_url + endpoint + '?' + qsp).read()
# For python 2.7
except AttributeError or NameError:
try:
qsp = urllib.urlencode(request_dict, doseq=True)
resp = urllib2.urlopen(self.base_url + endpoint + '?' + qsp).read()
except urllib2.URLError:
raise MesoPyError(http_error)
except urllib.error.URLError:
raise MesoPyError(http_error)
try:
json_data = json.loads(resp.decode('utf-8'))
except ValueError:
raise MesoPyError(json_error)
return self._checkresponse(json_data)
|
python
|
{
"resource": ""
}
|
q6234
|
BaseCollection.avg
|
train
|
def avg(self, key=None):
"""
Get the average value of a given key.
:param key: The key to get the average for
:type key: mixed
:rtype: float or int
"""
count = self.count()
if count:
return self.sum(key) / count
|
python
|
{
"resource": ""
}
|
q6235
|
BaseCollection.diff
|
train
|
def diff(self, items):
"""
Diff the collections with the given items
:param items: The items to diff with
:type items: mixed
:return: A Collection instance
:rtype: Collection
"""
return self.__class__([i for i in self.items if i not in items])
|
python
|
{
"resource": ""
}
|
q6236
|
BaseCollection.each
|
train
|
def each(self, callback):
"""
Execute a callback over each item.
.. code::
collection = Collection([1, 2, 3])
collection.each(lambda x: x + 3)
.. warning::
It only applies the callback but does not modify the collection's items.
Use the `transform() <#backpack.Collection.transform>`_ method to
modify the collection.
:param callback: The callback to execute
:type callback: callable
:rtype: Collection
"""
items = self.items
for item in items:
if callback(item) is False:
break
return self
|
python
|
{
"resource": ""
}
|
q6237
|
BaseCollection.every
|
train
|
def every(self, step, offset=0):
"""
Create a new collection consisting of every n-th element.
:param step: The step size
:type step: int
:param offset: The start offset
:type offset: int
:rtype: Collection
"""
new = []
for position, item in enumerate(self.items):
if position % step == offset:
new.append(item)
return self.__class__(new)
|
python
|
{
"resource": ""
}
|
q6238
|
BaseCollection.without
|
train
|
def without(self, *keys):
"""
Get all items except for those with the specified keys.
:param keys: The keys to remove
:type keys: tuple
:rtype: Collection
"""
items = copy(self.items)
keys = reversed(sorted(keys))
for key in keys:
del items[key]
return self.__class__(items)
|
python
|
{
"resource": ""
}
|
q6239
|
BaseCollection.only
|
train
|
def only(self, *keys):
"""
Get the items with the specified keys.
:param keys: The keys to keep
:type keys: tuple
:rtype: Collection
"""
items = []
for key, value in enumerate(self.items):
if key in keys:
items.append(value)
return self.__class__(items)
|
python
|
{
"resource": ""
}
|
q6240
|
BaseCollection.filter
|
train
|
def filter(self, callback=None):
"""
Run a filter over each of the items.
:param callback: The filter callback
:type callback: callable or None
:rtype: Collection
"""
if callback:
return self.__class__(list(filter(callback, self.items)))
return self.__class__(list(filter(None, self.items)))
|
python
|
{
"resource": ""
}
|
q6241
|
BaseCollection.where
|
train
|
def where(self, key, value):
"""
Filter items by the given key value pair.
:param key: The key to filter by
:type key: str
:param value: The value to filter by
:type value: mixed
:rtype: Collection
"""
return self.filter(lambda item: data_get(item, key) == value)
|
python
|
{
"resource": ""
}
|
q6242
|
BaseCollection.first
|
train
|
def first(self, callback=None, default=None):
"""
Get the first item of the collection.
:param default: The default value
:type default: mixed
"""
if callback is not None:
for val in self.items:
if callback(val):
return val
return value(default)
if len(self.items) > 0:
return self.items[0]
else:
return default
|
python
|
{
"resource": ""
}
|
q6243
|
BaseCollection.flatten
|
train
|
def flatten(self):
"""
Get a flattened list of the items in the collection.
:rtype: Collection
"""
def _flatten(d):
if isinstance(d, dict):
for v in d.values():
for nested_v in _flatten(v):
yield nested_v
elif isinstance(d, list):
for list_v in d:
for nested_v in _flatten(list_v):
yield nested_v
else:
yield d
return self.__class__(list(_flatten(self.items)))
|
python
|
{
"resource": ""
}
|
q6244
|
BaseCollection.forget
|
train
|
def forget(self, *keys):
"""
Remove an item from the collection by key.
:param keys: The keys to remove
:type keys: tuple
:rtype: Collection
"""
keys = reversed(sorted(keys))
for key in keys:
del self[key]
return self
|
python
|
{
"resource": ""
}
|
q6245
|
BaseCollection.get
|
train
|
def get(self, key, default=None):
"""
Get an element of the collection.
:param key: The index of the element
:type key: mixed
:param default: The default value to return
:type default: mixed
:rtype: mixed
"""
try:
return self.items[key]
except IndexError:
return value(default)
|
python
|
{
"resource": ""
}
|
q6246
|
BaseCollection.implode
|
train
|
def implode(self, value, glue=''):
"""
Concatenate values of a given key as a string.
:param value: The value
:type value: str
:param glue: The glue
:type glue: str
:rtype: str
"""
first = self.first()
if not isinstance(first, (basestring)):
return glue.join(self.pluck(value).all())
return value.join(self.items)
|
python
|
{
"resource": ""
}
|
q6247
|
BaseCollection.last
|
train
|
def last(self, callback=None, default=None):
"""
Get the last item of the collection.
:param default: The default value
:type default: mixed
"""
if callback is not None:
for val in reversed(self.items):
if callback(val):
return val
return value(default)
if len(self.items) > 0:
return self.items[-1]
else:
return default
|
python
|
{
"resource": ""
}
|
q6248
|
BaseCollection.pluck
|
train
|
def pluck(self, value, key=None):
"""
Get a list with the values of a given key.
:rtype: Collection
"""
if key:
return dict(map(lambda x: (data_get(x, key), data_get(x, value)), self.items))
else:
results = list(map(lambda x: data_get(x, value), self.items))
return self.__class__(results)
|
python
|
{
"resource": ""
}
|
q6249
|
BaseCollection.max
|
train
|
def max(self, key=None):
"""
Get the max value of a given key.
:param key: The key
:type key: str or None
:rtype: mixed
"""
def _max(result, item):
val = data_get(item, key)
if result is None or val > result:
return value
return result
return self.reduce(_max)
|
python
|
{
"resource": ""
}
|
q6250
|
BaseCollection.min
|
train
|
def min(self, key=None):
"""
Get the min value of a given key.
:param key: The key
:type key: str or None
:rtype: mixed
"""
def _min(result, item):
val = data_get(item, key)
if result is None or val < result:
return value
return result
return self.reduce(_min)
|
python
|
{
"resource": ""
}
|
q6251
|
BaseCollection.for_page
|
train
|
def for_page(self, page, per_page):
"""
"Paginate" the collection by slicing it into a smaller collection.
:param page: The current page
:type page: int
:param per_page: Number of items by slice
:type per_page: int
:rtype: Collection
"""
start = (page - 1) * per_page
return self[start:start + per_page]
|
python
|
{
"resource": ""
}
|
q6252
|
BaseCollection.pull
|
train
|
def pull(self, key, default=None):
"""
Pulls an item from the collection.
:param key: The key
:type key: mixed
:param default: The default value
:type default: mixed
:rtype: mixed
"""
val = self.get(key, default)
self.forget(key)
return val
|
python
|
{
"resource": ""
}
|
q6253
|
BaseCollection.reject
|
train
|
def reject(self, callback):
"""
Create a collection of all elements that do not pass a given truth test.
:param callback: The truth test
:type callback: callable
:rtype: Collection
"""
if self._use_as_callable(callback):
return self.filter(lambda item: not callback(item))
return self.filter(lambda item: item != callback)
|
python
|
{
"resource": ""
}
|
q6254
|
BaseCollection.sort
|
train
|
def sort(self, callback=None):
"""
Sort through each item with a callback.
:param callback: The callback
:type callback: callable or None
:rtype: Collection
"""
items = self.items
if callback:
return self.__class__(sorted(items, key=callback))
else:
return self.__class__(sorted(items))
|
python
|
{
"resource": ""
}
|
q6255
|
BaseCollection.sum
|
train
|
def sum(self, callback=None):
"""
Get the sum of the given values.
:param callback: The callback
:type callback: callable or string or None
:rtype: mixed
"""
if callback is None:
return sum(self.items)
callback = self._value_retriever(callback)
return self.reduce(lambda result, item: (result or 0) + callback(item))
|
python
|
{
"resource": ""
}
|
q6256
|
BaseCollection.zip
|
train
|
def zip(self, *items):
"""
Zip the collection together with one or more arrays.
:param items: The items to zip
:type items: list
:rtype: Collection
"""
return self.__class__(list(zip(self.items, *items)))
|
python
|
{
"resource": ""
}
|
q6257
|
BaseCollection.merge
|
train
|
def merge(self, items):
"""
Merge the collection with the given items.
:param items: The items to merge
:type items: list or Collection
:rtype: Collection
"""
if isinstance(items, BaseCollection):
items = items.all()
if not isinstance(items, list):
raise ValueError('Unable to merge uncompatible types')
self._items += items
return self
|
python
|
{
"resource": ""
}
|
q6258
|
BaseCollection.transform
|
train
|
def transform(self, callback):
"""
Transform each item in the collection using a callback.
:param callback: The callback
:type callback: callable
:rtype: Collection
"""
self._items = self.map(callback).all()
return self
|
python
|
{
"resource": ""
}
|
q6259
|
BaseCollection._value_retriever
|
train
|
def _value_retriever(self, value):
"""
Get a value retrieving callback.
:type value: mixed
:rtype: callable
"""
if self._use_as_callable(value):
return value
return lambda item: data_get(item, value)
|
python
|
{
"resource": ""
}
|
q6260
|
buildvrt
|
train
|
def buildvrt(input_file_list, output_file,
relative=True, **kwargs):
"""Build a VRT
See also: https://www.gdal.org/gdalbuildvrt.html
You can find the possible BuildVRTOptions (**kwargs**) here:
https://github.com/nextgis/pygdal/blob/78a793057d2162c292af4f6b240e19da5d5e52e2/2.1.0/osgeo/gdal.py#L1051
Arguments:
input_file_list {list of str or Path objects} -- List of input files.
output_file {str or Path object} -- Output file (VRT).
Keyword Arguments:
relative {bool} -- If ``True``, the ``input_file_list`` paths are converted to relative
paths (relative to the output file) and the VRT works even if the data is moved somewhere else -
given that the relative location of theVRT and the input files does not chance!
**kwargs {} -- BuildVRTOptions - see function description for a link to .
Returns:
[int] -- If successful, 0 is returned as exit code.
"""
# create destination directory
if not Path(output_file).parent.exists():
Path(output_file).parent.mkdir(parents=True, exist_ok=True)
# make sure we have absolute paths and strings since BuildVRT does not like something else
input_file_list = [str(Path(p).absolute()) for p in input_file_list]
output_file = str(Path(output_file).absolute())
vrt_options = gdal.BuildVRTOptions(**kwargs)
vrt = gdal.BuildVRT(output_file,
input_file_list,
options=vrt_options)
vrt = None
# if needed, create the input file paths relative to the output vrt path
# and replace them in the vrt.
# if desired, fix the paths and the relativeToVRT tag in the VRT
if relative:
input_file_list_relative = [relpath(p, Path(output_file).parent) for p in input_file_list]
with open(output_file, 'r') as file:
# read a list of lines into data
lines = file.readlines()
new_lines = []
counter = -1
for line in lines:
# sometimes it is relative by default
# maybe when all files contain the parent directory of the output file (?)
if "relativeToVRT=\"1\"" in line:
counter += 1
elif "relativeToVRT=\"0\"" in line:
counter += 1
input_file = str(input_file_list[counter])
input_file_relative = str(input_file_list_relative[counter])
if input_file not in line:
raise Exception(f"Expect path {input_file} not part of line {line}.")
line = line.replace(input_file,
input_file_relative)
line = line.replace("relativeToVRT=\"0\"",
"relativeToVRT=\"1\"")
else:
pass
new_lines.append(line)
with open(output_file, 'w') as file:
file.writelines(new_lines)
return 0
|
python
|
{
"resource": ""
}
|
q6261
|
rasterize
|
train
|
def rasterize(src_vector: str,
burn_attribute: str,
src_raster_template: str,
dst_rasterized: str,
gdal_dtype: int = 4):
"""Rasterize the values of a spatial vector file.
Arguments:
src_vector {str}} -- A OGR vector file (e.g. GeoPackage, ESRI Shapefile) path containing the
data to be rasterized.
burn_attribute {str} -- The attribute of the vector data to be burned in the raster.
src_raster_template {str} -- Path to a GDAL raster file to be used as template for the
rasterized data.
dst_rasterized {str} -- Path of the destination file.
gdal_dtype {int} -- Numeric GDAL data type, defaults to 4 which is UInt32.
See https://github.com/mapbox/rasterio/blob/master/rasterio/dtypes.py for useful look-up
tables.
Returns:
None
"""
data = gdal.Open(str(src_raster_template), # str for the case that a Path instance arrives here
gdalconst.GA_ReadOnly)
geo_transform = data.GetGeoTransform()
#source_layer = data.GetLayer()
# x_max = x_min + geo_transform[1] * data.RasterXSize
# y_min = y_max + geo_transform[5] * data.RasterYSize
x_res = data.RasterXSize
y_res = data.RasterYSize
mb_v = ogr.Open(src_vector)
mb_l = mb_v.GetLayer()
target_ds = gdal.GetDriverByName('GTiff').Create(dst_rasterized,
x_res, y_res, 1,
gdal_dtype) # gdal.GDT_Byte
# import osr
target_ds.SetGeoTransform((geo_transform[0], # x_min
geo_transform[1], # pixel_width
0,
geo_transform[3], # y_max
0,
geo_transform[5] # pixel_height
))
prj = data.GetProjection()
# srs = osr.SpatialReference(wkt=prj) # Where was this needed?
target_ds.SetProjection(prj)
band = target_ds.GetRasterBand(1)
# NoData_value = 0
# band.SetNoDataValue(NoData_value)
band.FlushCache()
gdal.RasterizeLayer(target_ds, [1], mb_l, options=[f"ATTRIBUTE={burn_attribute}"])
target_ds = None
|
python
|
{
"resource": ""
}
|
q6262
|
convert_polygons_to_lines
|
train
|
def convert_polygons_to_lines(src_polygons, dst_lines, crs=None, add_allone_col=False):
"""Convert polygons to lines.
Arguments:
src_polygons {path to geopandas-readable file} -- Filename of the the polygon vector dataset to be
converted to lines.
dst_lines {[type]} -- Filename where to write the line vector dataset to.
Keyword Arguments:
crs {dict or str} -- Output projection parameters as string or in dictionary format.
This will reproject the data when a crs is given (not {None}) (default: {None}).
add_allone_col {bool} -- Add an additional attribute column with all ones.
This is useful, e.g. in case you want to use the lines with gdal_proximity afterwards (default: {True}).
Returns:
int -- Exit code 0 if successeful.
"""
gdf = gpd.read_file(src_polygons)
geom_coords = gdf["geometry"] # featureset.get(5)["geometry"]["coordinates"]
lines = []
row_ids = []
for i_row, pol in tqdm(enumerate(geom_coords), total=len(geom_coords)):
boundary = pol.boundary
if boundary.type == 'MultiLineString':
for line in boundary:
lines.append(line)
row_ids.append(i_row)
else:
lines.append(boundary)
row_ids.append(i_row)
gdf_lines = gdf.drop("geometry", axis=1).iloc[row_ids, :]
gdf_lines["Coordinates"] = lines
gdf_lines = gpd.GeoDataFrame(gdf_lines, geometry='Coordinates', crs=gdf.crs)
if crs is not None:
gdf_lines = gdf_lines.to_crs(crs)
if add_allone_col:
gdf_lines["ALLONE"] = 1
Path(dst_lines).parent.mkdir(exist_ok=True, parents=True)
gdf_lines.to_file(dst_lines)
return 0
|
python
|
{
"resource": ""
}
|
q6263
|
dtype_checker_df
|
train
|
def dtype_checker_df(df, dtype, return_=None):
"""Check if there are NaN values of values outside of a given datatype range.
Arguments:
df {dataframe} -- A dataframe.
dtype {str} -- The datatype to check for.
Keyword Arguments:
return_ {str} -- Returns a boolean dataframe with the values not in the range of the dtype ('all'),
the row ('rowsums') or column ('colsums') sums of that dataframe or an exit code 1 (None, default)
if any of the values is not in the range.
Returns:
[int or DataFrame or Series] -- If no value is out of the range exit code 0 is returned, else depends on return_.
"""
dtype_range = dtype_ranges[dtype]
df_out_of_range = (df < dtype_range[0]) | (df > dtype_range[1]) | (~np.isfinite(df))
if df_out_of_range.any().any():
if return_== "colsums":
df_out_of_range = df_out_of_range.apply(sum, axis=0) # column
elif return_== "rowsums":
df_out_of_range = df_out_of_range.apply(sum, axis=1) # row
elif return_== "all":
df_out_of_range = df_out_of_range
else:
df_out_of_range = 1
else:
df_out_of_range = 0
return df_out_of_range
|
python
|
{
"resource": ""
}
|
q6264
|
EOCubeChunk._get_spatial_bounds
|
train
|
def _get_spatial_bounds(self):
"""Get the spatial bounds of the chunk."""
# This should be a MultiRasterIO method
with rasterio.open(self._mrio._get_template_for_given_resolution(self._mrio.dst_res, "path")) as src_layer:
pass # later we need src_layer for src_layer.window_transform(win)
win_transform = src_layer.window_transform(self._window)
bounds = rasterio.windows.bounds(window=self._window,
transform=win_transform,
height=0, width=0)
return bounds
|
python
|
{
"resource": ""
}
|
q6265
|
EOCubeChunk.robust_data_range
|
train
|
def robust_data_range(arr, robust=False, vmin=None, vmax=None):
"""Get a robust data range, i.e. 2nd and 98th percentile for vmin, vmax parameters."""
# from the seaborn code
# https://github.com/mwaskom/seaborn/blob/3a3ec75befab52c02650c62772a90f8c23046038/seaborn/matrix.py#L201
def _get_vmin_vmax(arr2d, vmin=None, vmax=None):
if vmin is None:
vmin = np.percentile(arr2d, 2) if robust else arr2d.min()
if vmax is None:
vmax = np.percentile(arr2d, 98) if robust else arr2d.max()
return vmin, vmax
if len(arr.shape) == 3 and vmin is None and vmax is None:
vmin = []
vmax = []
for i in range(arr.shape[2]):
arr_i = arr[:, :, i]
vmin_i, vmax_i = _get_vmin_vmax(arr_i, vmin=None, vmax=None)
vmin.append(vmin_i)
vmax.append(vmax_i)
else:
vmin, vmax = _get_vmin_vmax(arr, vmin=vmin, vmax=vmax)
return vmin, vmax
|
python
|
{
"resource": ""
}
|
q6266
|
EOCubeChunk.from_eocube
|
train
|
def from_eocube(eocube, ji):
"""Create a EOCubeChunk object from an EOCube object."""
eocubewin = EOCubeChunk(ji, eocube.df_layers, eocube.chunksize, eocube.wdir)
return eocubewin
|
python
|
{
"resource": ""
}
|
q6267
|
EOCubeSceneCollection.get_chunk
|
train
|
def get_chunk(self, ji):
"""Get a EOCubeChunk"""
return EOCubeSceneCollectionChunk(ji=ji,
df_layers=self.df_layers,
chunksize=self.chunksize,
variables=self.variables,
qa=self.qa,
qa_valid=self.qa_valid,
wdir=self.wdir)
|
python
|
{
"resource": ""
}
|
q6268
|
get_dataset
|
train
|
def get_dataset(dataset="s2l1c"):
"""Get a specific sampledata to play around.
So far the following sampledata exist:
* 's2l1c': One Sentinel-2 Level 1C scene with a reference dataset.
* 'lsts': A time series of 105 Landsat scenes each with the bands b3 (red), b4 (nir), b5 (swir1) and fmask.
Keyword Arguments:
dataset {str} -- The name of the dataset (default: {'s2l1c'}).
Returns:
[dict] -- A dictionary with paths and information about the sampledata.
"""
if dataset == "s2l1c":
search_string = os.path.join(DIR_DATA, dataset, "**", "*_B??.jp2")
files = glob.glob(search_string, recursive=True)
if not files:
raise IOError(f"Could not find raster files of the s2l1c dataset. Search string: {search_string}")
basename_splitted = [pth.replace(".jp2", "").split("_")[-2:] for pth in files]
dset = {"raster_files": files,
"raster_bands": [ele[1] for ele in basename_splitted],
"raster_times": [ele[0] for ele in basename_splitted],
"vector_file": os.path.join(DIR_DATA, "s2l1c", "s2l1c_ref.gpkg"),
"vector_file_osm": os.path.join(DIR_DATA, "s2l1c", "gis_osm_landuse-water_a_free_1_area-10000-to-500000.gpkg")}
elif dataset == "lsts":
search_string = os.path.join(DIR_DATA, dataset, "**", "*.tif")
files = glob.glob(search_string, recursive=True)
if not files:
raise IOError(f"Could not find raster files of the lsts dataset. Search string: {search_string}")
basename_splitted = [os.path.basename(pth).replace(".tif", "").split("_") for pth in files]
dset = {"raster_files": files,
"raster_bands": [ele[1] for ele in basename_splitted],
"raster_times": [ele[0][9:16] for ele in basename_splitted]}
# If you want to add a new dataset here, do not forget to do all of the following steps:
# 1) add the dataset in the eo-box/sampledata/eobox/sampledata/data/<name of new dataset>
# 2) write the code here to get the paths of the data and eventually some additional information
# 3) write a test to make sure you get the data
# 4) add the new dataset to package_data in eo-box/sampledata/eobox/setup.py
# 5) add the new dataset to package_data in eo-box/sampledata/MANIFEST.in
# 4) change the version number in eo-box/sampledata/eobox/sampledata/__init__.py to '<current>.<current+1>.0'
return dset
|
python
|
{
"resource": ""
}
|
q6269
|
windows_from_blocksize
|
train
|
def windows_from_blocksize(blocksize_xy, width, height):
"""Create rasterio.windows.Window instances with given size which fully cover a raster.
Arguments:
blocksize_xy {int or list of two int} -- [description]
width {int} -- With of the raster for which to create the windows.
height {int} -- Heigth of the raster for which to create the windows.
Returns:
list -- List of windows according to the following format
``[[<row-index>, <column index>], rasterio.windows.Window(<col_off>, <row_off>, <width>, <height>)]``.
"""
# checks the blocksize input
value_error_msg = "'blocksize must be an integer or a list of two integers.'"
if isinstance(blocksize_xy, int):
blockxsize, blockysize = (blocksize_xy, blocksize_xy)
elif isinstance(blocksize_xy, list):
if len(blocksize_xy) != 2:
raise ValueError(value_error_msg)
else:
if not all([isinstance(blocksize_xy[0], int), isinstance(blocksize_xy[1], int)]):
raise ValueError(value_error_msg)
blockxsize, blockysize = blocksize_xy
else:
raise ValueError(value_error_msg)
# create the col_off and row_off elements for all windows
n_cols = int(np.ceil(width / blockxsize))
n_rows = int(np.ceil(height / blockysize))
col = list(range(n_cols)) * n_rows
col_off = np.array(col) * blockxsize
row = np.repeat(list(range(n_rows)), n_cols)
row_off = row * blockysize
# create the windows
# if necessary, reduce the width and/or height of the border windows
blocksize_wins = []
for ridx, roff, cidx, coff, in zip(row, row_off, col, col_off):
if coff + blockxsize > width:
bxsize = width - coff
else:
bxsize = blockxsize
if roff + blockysize > height:
bysize = height - roff
else:
bysize = blockysize
blocksize_wins.append([[ridx, cidx], rasterio.windows.Window(coff, roff, bxsize, bysize)])
return blocksize_wins
|
python
|
{
"resource": ""
}
|
q6270
|
MultiRasterIO._get_dst_resolution
|
train
|
def _get_dst_resolution(self, dst_res=None):
"""Get default resolution, i.e. the highest resolution or smallest cell size."""
if dst_res is None:
dst_res = min(self._res_indices.keys())
return dst_res
|
python
|
{
"resource": ""
}
|
q6271
|
MultiRasterIO.windows_from_blocksize
|
train
|
def windows_from_blocksize(self, blocksize_xy=512):
"""Create rasterio.windows.Window instances with given size which fully cover the raster.
Arguments:
blocksize_xy {int or list of two int} -- Size of the window. If one integer is given it defines
the width and height of the window. If a list of two integers if given the first defines the
width and the second the height.
Returns:
None -- But the attributes ``windows``, ``windows_row`` and ``windows_col`` are updated.
"""
meta = self._get_template_for_given_resolution(self.dst_res, "meta")
width = meta["width"]
height = meta["height"]
blocksize_wins = windows_from_blocksize(blocksize_xy, width, height)
self.windows = np.array([win[1] for win in blocksize_wins])
self.windows_row = np.array([win[0][0] for win in blocksize_wins])
self.windows_col = np.array([win[0][1] for win in blocksize_wins])
return self
|
python
|
{
"resource": ""
}
|
q6272
|
MultiRasterIO._resample
|
train
|
def _resample(self, arrays, ji_windows):
"""Resample all arrays with potentially different resolutions to a common resolution."""
# get a destination array template
win_dst = ji_windows[self.dst_res]
aff_dst = self._layer_meta[self._res_indices[self.dst_res][0]]["transform"]
arrays_dst = list()
for i, array in enumerate(arrays):
arr_dst = np.zeros((int(win_dst.height), int(win_dst.width)))
if self._layer_resolution[i] > self.dst_res:
resampling = getattr(Resampling, self.upsampler)
elif self._layer_resolution[i] < self.dst_res:
resampling = getattr(Resampling, self.downsampler)
else:
arrays_dst.append(array.copy())
continue
reproject(array, arr_dst, # arr_dst[0, :, :, i],
src_transform=self._layer_meta[i]["transform"],
dst_transform=aff_dst,
src_crs=self._layer_meta[0]["crs"],
dst_crs=self._layer_meta[0]["crs"],
resampling=resampling)
arrays_dst.append(arr_dst.copy())
arrays_dst = np.stack(arrays_dst, axis=2) # n_images x n x m x 10 would be the synergise format
return arrays_dst
|
python
|
{
"resource": ""
}
|
q6273
|
extract
|
train
|
def extract(src_vector: str,
burn_attribute: str,
src_raster: list,
dst_names: list,
dst_dir: str,
src_raster_template: str = None,
gdal_dtype: int = 4,
n_jobs: int = 1):
"""Extract values from list of single band raster for pixels overlapping with a vector data.
The extracted data will be stored in the ``dst_dir`` by using the ``dst_names`` for the
filename. If a file with a given name already exists the raster will be skipped.
Arguments:
src_vector {str} -- Filename of the vector dataset. Currently it must have the same CRS as
the raster.
burn_attribute {str} -- Name of the attribute column in the ``src_vector`` dataset to be
stored with the extracted data. This should usually be a unique ID for the features
(points, lines, polygons) in the vector dataset.
src_raster {list} -- List of filenames of the single band raster files from which to
extract.
dst_names {list} -- List corresponding to ``src_raster`` names used to store and later
identify the extracted to.
dst_dir {str} -- Directory to store the data to.
Keyword Arguments:
src_raster_template {str} -- A template raster to be used for rasterizing the vectorfile.
Usually the first element of ``src_raster``. (default: {None})
gdal_dtype {int} -- Numeric GDAL data type, defaults to 4 which is UInt32.
See https://github.com/mapbox/rasterio/blob/master/rasterio/dtypes.py for useful look-up
tables.
Returns:
[int] -- If successful, 0 is returned as exit code.
"""
if src_raster_template is None:
src_raster_template = src_raster[0]
path_rasterized = os.path.join(dst_dir, f"burn_attribute_rasterized_{burn_attribute}.tif")
paths_extracted_aux = {ele: os.path.join(dst_dir, f"{ele}.npy") \
for ele in [f"aux_vector_{burn_attribute}",
"aux_coord_x",
"aux_coord_y"]}
paths_extracted_raster = {}
for path, name in zip(src_raster, dst_names):
dst = f"{os.path.join(dst_dir, name)}.npy"
if not os.path.exists(dst):
paths_extracted_raster[path] = dst
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
# if it does not already exist, here we first create the rasterized data
if not os.path.exists(path_rasterized):
if src_raster_template is None:
src_raster_template = src_raster[0]
# print("Rasterizing vector attribute.")
rasterize(src_vector=src_vector,
burn_attribute=burn_attribute,
src_raster_template=src_raster_template,
dst_rasterized=path_rasterized,
gdal_dtype=gdal_dtype)
# if any of the destination files do not exist we need the locations of the pixels to be
# extracted in form of a numpy array bool (mask_arr) that fits the rasters from which we will
# extract below
if not (all([os.path.exists(path) for path in paths_extracted_aux.values()]) and \
all([os.path.exists(path) for path in paths_extracted_raster.values()])):
# print("Creating mask array for pixels to be extracted.")
mask_arr = _get_mask_array(path_rasterized, paths_extracted_aux, burn_attribute)
else:
return 0
# create the pixel coordinates if they do not exist
if not all([os.path.exists(paths_extracted_aux["aux_coord_x"]),
os.path.exists(paths_extracted_aux["aux_coord_y"])]):
_create_and_save_coords(path_rasterized, paths_extracted_aux, mask_arr)
# lets extract the raster values in case of sequential processing
# or remove existing raster layers to prepare parallel processing
if n_jobs == 1:
for path_src, path_dst in tqdm(paths_extracted_raster.items(),
total=len(paths_extracted_raster)):
_extract_and_save_one_layer(path_src, path_dst, mask_arr)
else:
import multiprocessing as mp
if n_jobs == -1:
n_jobs = mp.cpu_count()
pool = mp.Pool(processes=n_jobs)
_ = [pool.apply_async(_extract_and_save_one_layer,
args=(src, dst, mask_arr)) for \
src, dst in paths_extracted_raster.items()]
pool.close()
pool.join()
return 0
|
python
|
{
"resource": ""
}
|
q6274
|
Plane.extrema
|
train
|
def extrema(self, x0, y0, w, h):
"""
Returns the minimum and maximum values contained in a given area.
:param x0: Starting x index.
:param y0: Starting y index.
:param w: Width of the area to scan.
:param h: Height of the area to scan.
:return: Tuple containing the minimum and maximum values of the given area.
"""
minimum = 9223372036854775807
maximum = 0
for y in range(y0, y0 + h):
for x in range(x0, x0 + w):
value = self[x, y]
if value != self.filler:
minimum = min(minimum, value)
maximum = max(maximum, value)
return minimum, maximum
|
python
|
{
"resource": ""
}
|
q6275
|
Cursebox.set_cursor
|
train
|
def set_cursor(self, x, y):
"""
Sets the cursor to the desired position.
:param x: X position
:param y: Y position
"""
curses.curs_set(1)
self.screen.move(y, x)
|
python
|
{
"resource": ""
}
|
q6276
|
Cursebox.put
|
train
|
def put(self, x, y, text, fg, bg):
"""
Puts a string at the desired coordinates using the provided colors.
:param x: X position
:param y: Y position
:param text: Text to write
:param fg: Foreground color number
:param bg: Background color number
"""
if x < self.width and y < self.height:
try:
self.screen.addstr(y, x, symbols.encode(text), self.pairs[fg, bg])
except curses.error:
# Ignore out of bounds error
pass
|
python
|
{
"resource": ""
}
|
q6277
|
Cursebox.poll_event
|
train
|
def poll_event(self):
"""
Waits for an event to happen and returns a string related to the event.
If the event is a normal (letter) key press, the letter is returned (case sensitive)
:return: Event type
"""
# Flush all inputs before this one that were done since last poll
curses.flushinp()
ch = self.screen.getch()
if ch == 27:
return EVENT_ESC
elif ch == -1 or ch == curses.KEY_RESIZE:
return EVENT_RESIZE
elif ch == 10 or ch == curses.KEY_ENTER:
return EVENT_ENTER
elif ch == 127 or ch == curses.KEY_BACKSPACE:
return EVENT_BACKSPACE
elif ch == curses.KEY_UP:
return EVENT_UP
elif ch == curses.KEY_DOWN:
return EVENT_DOWN
elif ch == curses.KEY_LEFT:
return EVENT_LEFT
elif ch == curses.KEY_RIGHT:
return EVENT_RIGHT
elif ch == 3:
return EVENT_CTRL_C
elif 0 <= ch < 256:
return chr(ch)
else:
return EVENT_UNHANDLED
|
python
|
{
"resource": ""
}
|
q6278
|
draw_panel
|
train
|
def draw_panel(cb, pool, params, plane):
"""
Draws the application's main panel, displaying the current Mandelbrot view.
:param cb: Cursebox instance.
:type cb: cursebox.Cursebox
:param params: Current application parameters.
:type params: params.Params
:param plane: Plane containing the current Mandelbrot values.
:type plane: plane.Plane
"""
w = cb.width - MENU_WIDTH - 1
h = cb.height - 1
params.plane_w = w
params.plane_h = h
params.resize(w, h)
palette = PALETTES[params.palette][1]
if params.reverse_palette:
palette = palette[::-1]
# draw_gradient(t, 1, 1, w, h, palette, params.dither_type)
generated = 0
missing_coords = []
# Check for coordinates that have no value in current plane
xs = range(params.plane_x0, params.plane_x0 + params.plane_w - 1)
ys = range(params.plane_y0, params.plane_y0 + params.plane_h - 1)
for x in xs:
for y in ys:
if plane[x, y] is None:
missing_coords.append((x, y, params))
generated += 1
# Compute all missing values via multiprocessing
n_processes = 0
if len(missing_coords) > 0:
n_cores = pool._processes
n_processes = len(missing_coords) // 256
if n_processes > n_cores:
n_processes = n_cores
start = time.time()
for i, result in enumerate(pool.imap_unordered(compute, missing_coords, chunksize=256)):
plane[result[0], result[1]] = result[2]
if time.time() - start > 2:
if i % 200 == 0:
draw_progress_bar(cb, "Render is taking a longer time...", i, len(missing_coords))
cb.refresh()
if generated > 0:
params.log("Added %d missing cells" % generated)
if n_processes > 1:
params.log("(Used %d processes)" % n_processes)
min_value = 0.0
max_value = params.max_iterations
max_iterations = params.max_iterations
if params.adaptive_palette:
min_value, max_value = plane.extrema(params.plane_x0, params.plane_y0,
params.plane_w, params.plane_h)
crosshairs_coord = None
if params.crosshairs:
crosshairs_coord = params.crosshairs_coord
# Draw all values in cursebox
for x in xs:
for y in ys:
value = (plane[x, y] + params.palette_offset) % (params.max_iterations + 1)
if params.adaptive_palette:
# Remap values from (min_value, max_value) to (0, max_iterations)
if max_value - min_value > 0:
value = ((value - min_value) / (max_value - min_value)) * max_iterations
else:
value = max_iterations
# Dithered mode
if params.dither_type < 2:
draw_dithered_color(cb, x - params.plane_x0 + 1,
y - params.plane_y0 + 1,
palette, params.dither_type,
value, max_iterations,
crosshairs_coord=crosshairs_coord)
# 256 colors mode
else:
draw_color(cb, x - params.plane_x0 + 1,
y - params.plane_y0 + 1,
value, max_iterations, palette,
crosshairs_coord=crosshairs_coord)
# Draw bounding box
draw_box(cb, 0, 0, w + 1, h + 1)
|
python
|
{
"resource": ""
}
|
q6279
|
update_display
|
train
|
def update_display(cb, pool, params, plane, qwertz):
"""
Draws everything.
:param cb: Cursebox instance.
:type cb: cursebox.Cursebox
:param params: Current application parameters.
:type params: params.Params
:param plane: Plane containing the current Mandelbrot values.
:type plane: plane.Plane
:return:
"""
cb.clear()
draw_panel(cb, pool, params, plane)
update_position(params) # Update Mandelbrot-space coordinates before drawing them
draw_menu(cb, params, qwertz)
cb.refresh()
|
python
|
{
"resource": ""
}
|
q6280
|
save
|
train
|
def save(params):
"""
Saves the current parameters to a file.
:param params: Current application parameters.
:return:
"""
if is_python3():
import pickle
cPickle = pickle
else:
import cPickle
ts = datetime.datetime.fromtimestamp(time.time()).strftime("%Y-%m-%d_%H-%M-%S")
if not os.path.exists("saves/"):
os.makedirs("saves/")
with open("saves/almonds_%s.params" % ts, "wb") as f:
cPickle.dump(params, f)
params.log("Current scene saved!")
|
python
|
{
"resource": ""
}
|
q6281
|
capture
|
train
|
def capture(cb, pool, params):
"""
Renders and saves a screen-sized picture of the current position.
:param cb: Cursebox instance.
:type cb: cursebox.Cursebox
:param params: Current application parameters.
:type params: params.Params
"""
w, h = screen_resolution()
# Re-adapt dimensions to match current plane ratio
old_ratio = w / h
new_ratio = params.plane_ratio
if old_ratio > new_ratio:
w = int(h * new_ratio)
else:
h = int(w / new_ratio)
image = Image.new("RGB", (w, h), "white")
pixels = image.load()
# FIXME: refactor common code to get_palette(params)
palette = PALETTES[params.palette][1]
if params.reverse_palette:
palette = palette[::-1]
# All coordinates to be computed as single arguments for processes
coords = [(x, y, w, h, params) for x in range(w) for y in range(h)]
results = []
# Dispatch work to pool and draw results as they come in
for i, result in enumerate(pool.imap_unordered(compute_capture, coords, chunksize=256)):
results.append(result)
if i % 2000 == 0:
draw_progress_bar(cb, "Capturing current scene...", i, w * h)
cb.refresh()
min_value = 0.0
max_value = params.max_iterations
max_iterations = params.max_iterations
if params.adaptive_palette:
from operator import itemgetter
min_value = min(results, key=itemgetter(2))[2]
max_value = max(results, key=itemgetter(2))[2]
# Draw pixels
for result in results:
value = result[2]
if params.adaptive_palette:
# Remap values from (min_value, max_value) to (0, max_iterations)
if max_value - min_value > 0:
value = ((value - min_value) / (max_value - min_value)) * max_iterations
else:
value = max_iterations
pixels[result[0], result[1]] = get_color(value, params.max_iterations, palette)
if not os.path.exists("captures/"):
os.makedirs("captures/")
ts = datetime.datetime.fromtimestamp(time.time()).strftime("%Y-%m-%d_%H-%M-%S")
filename = "captures/almonds_%s.png" % ts
image.save(filename, "PNG")
params.log("Current scene captured!")
params.log("(Used %d processes)" % pool._processes)
open_file(filename)
|
python
|
{
"resource": ""
}
|
q6282
|
cycle
|
train
|
def cycle(cb, pool, params, plane):
"""
Fun function to do a palette cycling animation.
:param cb: Cursebox instance.
:type cb: cursebox.Cursebox
:param params: Current application parameters.
:type params: params.Params
:param plane: Plane containing the current Mandelbrot values.
:type plane: plane.Plane
:return:
"""
step = params.max_iterations // 20
if step == 0:
step = 1
for i in range(0, params.max_iterations, step):
params.palette_offset = i
draw_panel(cb, pool, params, plane)
cb.refresh()
params.palette_offset = 0
|
python
|
{
"resource": ""
}
|
q6283
|
init_coords
|
train
|
def init_coords(cb, params):
"""
Initializes coordinates and zoom for first use.
Loads coordinates from Mandelbrot-space.
:param cb: Cursebox instance.
:type cb: cursebox.Cursebox
:param params: Current application parameters.
:type params: params.Params
:return:
"""
w = cb.width - MENU_WIDTH - 1
h = cb.height - 1
params.plane_w = w
params.plane_h = h
params.resize(w, h)
zoom(params, 1)
|
python
|
{
"resource": ""
}
|
q6284
|
screen_resolution
|
train
|
def screen_resolution():
"""
Returns the current screen's resolution.
Should be multi-platform.
:return: A tuple containing the width and height of the screen.
"""
w = 0
h = 0
try:
# Windows
import ctypes
user32 = ctypes.windll.user32
w, h = user32.GetSystemMetrics(0), user32.GetSystemMetrics(1)
except AttributeError:
try:
# Mac OS X
import AppKit
size = AppKit.NSScreen.screens()[0].frame().size
w, h = int(size.width), int(size.height)
except ImportError:
try:
# Linux
import Xlib
import Xlib.display
display = Xlib.display.Display()
root = display.screen().root
size = root.get_geometry()
w, h = size.width, size.height
except ImportError:
w = 1920
h = 1080
return w, h
|
python
|
{
"resource": ""
}
|
q6285
|
open_file
|
train
|
def open_file(filename):
"""
Multi-platform way to make the OS open a file with its default application
"""
if sys.platform.startswith("darwin"):
subprocess.call(("open", filename))
elif sys.platform == "cygwin":
subprocess.call(("cygstart", filename))
elif os.name == "nt":
os.system("start %s" % filename)
elif os.name == "posix":
subprocess.call(("xdg-open", filename))
|
python
|
{
"resource": ""
}
|
q6286
|
mandelbrot_iterate
|
train
|
def mandelbrot_iterate(c, max_iterations, julia_seed=None):
"""
Returns the number of iterations before escaping the Mandelbrot fractal.
:param c: Coordinates as a complex number
:type c: complex
:param max_iterations: Limit of how many tries are attempted.
:return: Tuple containing the last complex number in the sequence and the number of iterations.
"""
z = c
if julia_seed is not None:
c = julia_seed
for iterations in range(max_iterations):
z = z * z + c
if abs(z) > 1000:
return z, iterations
return z, max_iterations
|
python
|
{
"resource": ""
}
|
q6287
|
mandelbrot
|
train
|
def mandelbrot(x, y, params):
"""
Computes the number of iterations of the given plane-space coordinates.
:param x: X coordinate on the plane.
:param y: Y coordinate on the plane.
:param params: Current application parameters.
:type params: params.Params
:return: Discrete number of iterations.
"""
mb_x, mb_y = get_coords(x, y, params)
mb = mandelbrot_iterate(mb_x + 1j * mb_y, params.max_iterations, params.julia_seed)
return mb[1]
|
python
|
{
"resource": ""
}
|
q6288
|
mandelbrot_capture
|
train
|
def mandelbrot_capture(x, y, w, h, params):
"""
Computes the number of iterations of the given pixel-space coordinates,
for high-res capture purposes.
Contrary to :func:`mandelbrot`, this function returns a continuous
number of iterations to avoid banding.
:param x: X coordinate on the picture
:param y: Y coordinate on the picture
:param w: Width of the picture
:param h: Height of the picture
:param params: Current application parameters.
:type params: params.Params
:return: Continuous number of iterations.
"""
# FIXME: Figure out why these corrections are necessary or how to make them perfect
# Viewport is offset compared to window when capturing without these (found empirically)
if params.plane_ratio >= 1.0:
x -= params.plane_w
else:
x += 3.0 * params.plane_w
ratio = w / h
n_x = x * 2.0 / w * ratio - 1.0
n_y = y * 2.0 / h - 1.0
mb_x = params.zoom * n_x + params.mb_cx
mb_y = params.zoom * n_y + params.mb_cy
mb = mandelbrot_iterate(mb_x + 1j * mb_y, params.max_iterations, params.julia_seed)
z, iterations = mb
# Continuous iteration count for no banding
# https://en.wikipedia.org/wiki/Mandelbrot_set#Continuous_.28smooth.29_coloring
nu = params.max_iterations
if iterations < params.max_iterations:
nu = iterations + 2 - abs(cmath.log(cmath.log(abs(z)) / cmath.log(params.max_iterations), 2))
return clamp(nu, 0, params.max_iterations)
|
python
|
{
"resource": ""
}
|
q6289
|
update_position
|
train
|
def update_position(params):
"""
Computes the center of the viewport's Mandelbrot-space coordinates.
:param params: Current application parameters.
:type params: params.Params
"""
cx = params.plane_x0 + params.plane_w / 2.0
cy = params.plane_y0 + params.plane_h / 2.0
params.mb_cx, params.mb_cy = get_coords(cx, cy, params)
|
python
|
{
"resource": ""
}
|
q6290
|
zoom
|
train
|
def zoom(params, factor):
"""
Applies a zoom on the current parameters.
Computes the top-left plane-space coordinates from the Mandelbrot-space coordinates.
:param params: Current application parameters.
:param factor: Zoom factor by which the zoom ratio is divided (bigger factor, more zoom)
"""
params.zoom /= factor
n_x = params.mb_cx / params.zoom
n_y = params.mb_cy / params.zoom
params.plane_x0 = int((n_x + 1.0) * params.plane_w / (2.0 * params.plane_ratio)) - params.plane_w // 2
params.plane_y0 = int((n_y + 1.0) * params.plane_h / 2.0) - params.plane_h // 2
|
python
|
{
"resource": ""
}
|
q6291
|
Params.resize
|
train
|
def resize(self, w, h):
"""
Used when resizing the plane, resets the plane ratio factor.
:param w: New width of the visible section of the plane.
:param h: New height of the visible section of the plane.
"""
self.plane_w = w
self.plane_h = h
self.plane_ratio = self.char_ratio * w / h
if self.crosshairs:
self.crosshairs_coord = ((w + 2) // 2, (h + 2) // 2)
|
python
|
{
"resource": ""
}
|
q6292
|
check_sender_and_entity_handle_match
|
train
|
def check_sender_and_entity_handle_match(sender_handle, entity_handle):
"""Ensure that sender and entity handles match.
Basically we've already verified the sender is who they say when receiving the payload. However, the sender might
be trying to set another author in the payload itself, since Diaspora has the sender in both the payload headers
AND the object. We must ensure they're the same.
"""
if sender_handle != entity_handle:
logger.warning("sender_handle and entity_handle don't match, aborting! sender_handle: %s, entity_handle: %s",
sender_handle, entity_handle)
return False
return True
|
python
|
{
"resource": ""
}
|
q6293
|
transform_attributes
|
train
|
def transform_attributes(attrs, cls):
"""Transform some attribute keys.
:param attrs: Properties from the XML
:type attrs: dict
:param cls: Class of the entity
:type cls: class
"""
transformed = {}
for key, value in attrs.items():
if value is None:
value = ""
if key == "text":
transformed["raw_content"] = value
elif key == "author":
if cls == DiasporaProfile:
# Diaspora Profile XML message contains no GUID. We need the guid. Fetch it.
profile = retrieve_and_parse_profile(value)
transformed['id'] = value
transformed["guid"] = profile.guid
else:
transformed["actor_id"] = value
transformed["handle"] = value
elif key == 'guid':
if cls != DiasporaProfile:
transformed["id"] = value
transformed["guid"] = value
elif key in ("root_author", "recipient"):
transformed["target_id"] = value
transformed["target_handle"] = value
elif key in ("target_guid", "root_guid", "parent_guid"):
transformed["target_id"] = value
transformed["target_guid"] = value
elif key in ("first_name", "last_name"):
values = [attrs.get('first_name'), attrs.get('last_name')]
values = [v for v in values if v]
transformed["name"] = " ".join(values)
elif key == "image_url":
if "image_urls" not in transformed:
transformed["image_urls"] = {}
transformed["image_urls"]["large"] = value
elif key == "image_url_small":
if "image_urls" not in transformed:
transformed["image_urls"] = {}
transformed["image_urls"]["small"] = value
elif key == "image_url_medium":
if "image_urls" not in transformed:
transformed["image_urls"] = {}
transformed["image_urls"]["medium"] = value
elif key == "tag_string":
if value:
transformed["tag_list"] = value.replace("#", "").split(" ")
elif key == "bio":
transformed["raw_content"] = value
elif key == "searchable":
transformed["public"] = True if value == "true" else False
elif key in ["target_type"] and cls == DiasporaRetraction:
transformed["entity_type"] = DiasporaRetraction.entity_type_from_remote(value)
elif key == "remote_photo_path":
transformed["remote_path"] = value
elif key == "remote_photo_name":
transformed["remote_name"] = value
elif key == "status_message_guid":
transformed["linked_guid"] = value
transformed["linked_type"] = "Post"
elif key == "author_signature":
transformed["signature"] = value
elif key in BOOLEAN_KEYS:
transformed[key] = True if value == "true" else False
elif key in DATETIME_KEYS:
transformed[key] = datetime.strptime(value, "%Y-%m-%dT%H:%M:%SZ")
elif key in INTEGER_KEYS:
transformed[key] = int(value)
else:
transformed[key] = value
return transformed
|
python
|
{
"resource": ""
}
|
q6294
|
get_element_child_info
|
train
|
def get_element_child_info(doc, attr):
"""Get information from child elements of this elementas a list since order is important.
Don't include signature tags.
:param doc: XML element
:param attr: Attribute to get from the elements, for example "tag" or "text".
"""
props = []
for child in doc:
if child.tag not in ["author_signature", "parent_author_signature"]:
props.append(getattr(child, attr))
return props
|
python
|
{
"resource": ""
}
|
q6295
|
rfc7033_webfinger_view
|
train
|
def rfc7033_webfinger_view(request, *args, **kwargs):
"""
Django view to generate an RFC7033 webfinger.
"""
resource = request.GET.get("resource")
if not resource:
return HttpResponseBadRequest("No resource found")
if not resource.startswith("acct:"):
return HttpResponseBadRequest("Invalid resource")
handle = resource.replace("acct:", "").lower()
profile_func = get_function_from_config("get_profile_function")
try:
profile = profile_func(handle=handle, request=request)
except Exception as exc:
logger.warning("rfc7033_webfinger_view - Failed to get profile by handle %s: %s", handle, exc)
return HttpResponseNotFound()
config = get_configuration()
webfinger = RFC7033Webfinger(
id=profile.id,
handle=profile.handle,
guid=profile.guid,
base_url=config.get('base_url'),
profile_path=get_path_from_url(profile.url),
hcard_path=config.get('hcard_path'),
atom_path=get_path_from_url(profile.atom_url),
search_path=config.get('search_path'),
)
return JsonResponse(
webfinger.render(),
content_type="application/jrd+json",
)
|
python
|
{
"resource": ""
}
|
q6296
|
retrieve_diaspora_hcard
|
train
|
def retrieve_diaspora_hcard(handle):
"""
Retrieve a remote Diaspora hCard document.
:arg handle: Remote handle to retrieve
:return: str (HTML document)
"""
webfinger = retrieve_and_parse_diaspora_webfinger(handle)
document, code, exception = fetch_document(webfinger.get("hcard_url"))
if exception:
return None
return document
|
python
|
{
"resource": ""
}
|
q6297
|
retrieve_and_parse_diaspora_webfinger
|
train
|
def retrieve_and_parse_diaspora_webfinger(handle):
"""
Retrieve a and parse a remote Diaspora webfinger document.
:arg handle: Remote handle to retrieve
:returns: dict
"""
try:
host = handle.split("@")[1]
except AttributeError:
logger.warning("retrieve_and_parse_diaspora_webfinger: invalid handle given: %s", handle)
return None
document, code, exception = fetch_document(
host=host, path="/.well-known/webfinger?resource=acct:%s" % quote(handle),
)
if document:
return parse_diaspora_webfinger(document)
hostmeta = retrieve_diaspora_host_meta(host)
if not hostmeta:
return None
url = hostmeta.find_link(rels="lrdd").template.replace("{uri}", quote(handle))
document, code, exception = fetch_document(url)
if exception:
return None
return parse_diaspora_webfinger(document)
|
python
|
{
"resource": ""
}
|
q6298
|
retrieve_diaspora_host_meta
|
train
|
def retrieve_diaspora_host_meta(host):
"""
Retrieve a remote Diaspora host-meta document.
:arg host: Host to retrieve from
:returns: ``XRD`` instance
"""
document, code, exception = fetch_document(host=host, path="/.well-known/host-meta")
if exception:
return None
xrd = XRD.parse_xrd(document)
return xrd
|
python
|
{
"resource": ""
}
|
q6299
|
_get_element_text_or_none
|
train
|
def _get_element_text_or_none(document, selector):
"""
Using a CSS selector, get the element and return the text, or None if no element.
:arg document: ``HTMLElement`` document
:arg selector: CSS selector
:returns: str or None
"""
element = document.cssselect(selector)
if element:
return element[0].text
return None
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.