_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q44800 | setup_app | train | def setup_app(command, conf, vars):
"""Place any commands to setup tg2raptorized here"""
load_environment(conf.global_conf, conf.local_conf)
setup_schema(command, conf, vars)
bootstrap.bootstrap(command, conf, vars) | python | {
"resource": ""
} |
q44801 | VocationFilter.from_name | train | def from_name(cls, name, all_fallback=True):
"""Gets a vocation filter from a vocation's name.
Parameters
----------
name: :class:`str`
The name of the vocation.
all_fallback: :class:`bool`
Whether to return :py:attr:`ALL` if no match is found. Otherwise, ``None`` will be returned.
Returns
-------
VocationFilter, optional:
The matching vocation filter.
"""
name = name.upper()
for vocation in cls: # type: VocationFilter
if vocation.name in name or vocation.name[:-1] in name and vocation != cls.ALL:
return vocation
if all_fallback or name.upper() == "ALL":
return cls.ALL
return None | python | {
"resource": ""
} |
q44802 | connect | train | def connect(*args, **kwargs):
"""Connect to the database. Passes arguments along to
``pymongo.connection.Connection`` unmodified.
The Connection returned by this proxy method will be used by micromongo
for all of its queries. Micromongo will alter the behavior of this
conneciton object in some subtle ways; if you want a clean one, call
``micromongo.clean_connection`` after connecting."""
global __connection, __connection_args
__connection_args = (args, dict(kwargs))
# inject our class_router
kwargs['class_router'] = class_router
__connection = Connection(*args, **kwargs)
return __connection | python | {
"resource": ""
} |
q44803 | Model.new | train | def new(cls, *args, **kwargs):
"""Create a new instance of this model based on its spec and either
a map or the provided kwargs."""
new = cls(make_default(getattr(cls, 'spec', {})))
new.update(args[0] if args and not kwargs else kwargs)
return new | python | {
"resource": ""
} |
q44804 | Model.find_one | train | def find_one(cls, *args, **kwargs):
"""Run a find_one on this model's collection. The arguments to
``Model.find_one`` are the same as to ``pymongo.Collection.find_one``."""
database, collection = cls._collection_key.split('.')
return current()[database][collection].find_one(*args, **kwargs) | python | {
"resource": ""
} |
q44805 | apply | train | def apply(query, collection=None):
"""Enhance the query restricting not permitted collections.
Get the permitted restricted collection for the current user from the
user_info object and all the restriced collections from the
restricted_collection_cache.
"""
if not collection:
return query
result_tree = create_collection_query(collection)
return AndOp(query, result_tree) | python | {
"resource": ""
} |
q44806 | SinonSpy.calledBefore | train | def calledBefore(self, spy): #pylint: disable=invalid-name
"""
Compares the order in which two spies were called
E.g.
spy_a()
spy_b()
spy_a.calledBefore(spy_b) # True
spy_b.calledBefore(spy_a) # False
spy_a()
spy_b.calledBefore(spy_a) # True
Args: a Spy to compare with
Return: Boolean True if this spy's first call was called before the given spy's last call
"""
this_call = self.firstCall if self.firstCall is not None else False
given_call = spy.lastCall if spy.lastCall is not None else False
return (this_call and not given_call) or (this_call and given_call and this_call.callId < given_call.callId) | python | {
"resource": ""
} |
q44807 | SinonSpy.reset | train | def reset(self):
"""
Reseting wrapped function
"""
super(SinonSpy, self).unwrap()
super(SinonSpy, self).wrap2spy() | python | {
"resource": ""
} |
q44808 | b64decode | train | def b64decode(foo, *args):
'Only here for consistency with the above.'
if isinstance(foo, str):
foo = foo.encode('utf8')
return base64.b64decode(foo, *args) | python | {
"resource": ""
} |
q44809 | UserLock.from_passphrase | train | def from_passphrase(cls, email, passphrase):
"""
This performs key derivation from an email address and passphrase according
to the miniLock specification.
Specifically, the passphrase is digested with a standard blake2s 32-bit digest,
then it is passed through scrypt with the email address as salt value using
N = 217, r = 8, p = 1, L = 32.
The 32-byte digest from scrypt is then used as the Private Key from which
the public key is derived.
"""
pp_blake = pyblake2.blake2s(cls.ensure_bytes(passphrase)).digest()
#pp_scrypt = scrypt.hash(pp_blake, cls.ensure_bytes(email), 2**17, 8, 1, 32)
pp_scrypt = pylibscrypt.scrypt(pp_blake, cls.ensure_bytes(email), 2**17, 8, 1, 32)
key = nacl.public.PrivateKey(pp_scrypt)
return cls(key.public_key, key) | python | {
"resource": ""
} |
q44810 | UserLock.from_id | train | def from_id(cls, id):
"""
This decodes an ID to a public key and verifies the checksum byte. ID
structure in miniLock is the base58 encoded form of the public key
appended with a single-byte digest from blake2s of the public key, as a
simple check-sum.
"""
decoded = cls.ensure_bytes(base58.b58decode(id))
assert_type_and_length('id', decoded, bytes, L=33)
pk = nacl.public.PublicKey(decoded[:-1])
cs = decoded[-1:]
if cs != pyblake2.blake2s(pk.encode(), 1).digest():
raise ValueError("Public Key does not match its attached checksum byte: id='{}', decoded='{}', given checksum='{}', calculated checksum={}".format(id, decoded, cs, pyblake2.blake2s(pk.encode(), 1).digest()))
return cls(pk) | python | {
"resource": ""
} |
q44811 | UserLock.ephemeral | train | def ephemeral(cls):
"""
Creates a new ephemeral key constructed using a raw 32-byte string from urandom.
Ephemeral keys are used once for each encryption task and are then discarded;
they are not intended for long-term or repeat use.
"""
private_key = nacl.public.PrivateKey(os.urandom(32))
return cls(private_key.public_key, private_key) | python | {
"resource": ""
} |
q44812 | SymmetricMiniLock.piece_file | train | def piece_file(input_f, chunk_size):
"""
Provides a streaming interface to file data in chunks of even size, which
avoids memoryerrors from loading whole files into RAM to pass to `pieces`.
"""
chunk = input_f.read(chunk_size)
total_bytes = 0
while chunk:
yield chunk
chunk = input_f.read(chunk_size)
total_bytes += len(chunk) | python | {
"resource": ""
} |
q44813 | MiniLockHeader.decrypt | train | def decrypt(self, recipient_key):
"""
Attempt decryption of header with a private key; returns decryptInfo.
Returns a dictionary, not a new MiniLockHeader!
"""
ephem = UserLock.from_b64(self.dict['ephemeral'])
ephem_box = nacl.public.Box(recipient_key.private_key, ephem.public_key)
# Scan available entries in decryptInfo and try to decrypt each; break when
# successful with any.
for nonce, crypted_decryptInfo in self.dict['decryptInfo'].items():
raw_nonce = b64decode(nonce)
crypted_decryptInfo = b64decode(crypted_decryptInfo)
try:
decryptInfo_raw = ephem_box.decrypt(crypted_decryptInfo, raw_nonce)
decryptInfo = json.loads(decryptInfo_raw.decode('utf8'))
success_nonce = raw_nonce
break
except Exception as E:
#print("Decoding exception: '{}' - with ciphertext '{}'".format(E, crypted_decryptInfo))
pass
else:
raise ValueError("No decryptInfo block found for this recipient Key.")
if not recipient_key.userID == decryptInfo['recipientID']:
raise ValueError("Decrypted a meta block but stated recipient is not this private key!")
# Now work with decryptInfo and success_nonce to extract file data.
senderKey = UserLock.from_id(decryptInfo['senderID'])
senderBox = nacl.public.Box(recipient_key.private_key, senderKey.public_key)
fileInfo_raw = b64decode(decryptInfo['fileInfo'])
fileInfo_decrypted = senderBox.decrypt(fileInfo_raw, success_nonce).decode('utf8')
fileInfo = json.loads(fileInfo_decrypted)
# Overwrite decryptInfo's fileInfo key
decryptInfo['fileInfo'] = fileInfo
return decryptInfo | python | {
"resource": ""
} |
q44814 | unique_index | train | def unique_index(data, keys=None, fail_on_dup=True):
"""
RETURN dict THAT USES KEYS TO INDEX DATA
ONLY ONE VALUE ALLOWED PER UNIQUE KEY
"""
o = UniqueIndex(listwrap(keys), fail_on_dup=fail_on_dup)
for d in data:
try:
o.add(d)
except Exception as e:
o.add(d)
Log.error(
"index {{index}} is not unique {{key}} maps to both {{value1}} and {{value2}}",
index=keys,
key=select([d], keys)[0],
value1=o[d],
value2=d,
cause=e,
)
return o | python | {
"resource": ""
} |
q44815 | tuple | train | def tuple(data, field_name):
"""
RETURN LIST OF TUPLES
"""
if isinstance(data, Cube):
Log.error("not supported yet")
if isinstance(data, FlatList):
Log.error("not supported yet")
if is_data(field_name) and "value" in field_name:
# SIMPLIFY {"value":value} AS STRING
field_name = field_name["value"]
# SIMPLE PYTHON ITERABLE ASSUMED
if is_text(field_name):
if len(split_field(field_name)) == 1:
return [(d[field_name],) for d in data]
else:
path = split_field(field_name)
output = []
flat_list._tuple1(data, path, 0, output)
return output
elif is_list(field_name):
paths = [_select_a_field(f) for f in field_name]
output = FlatList()
_tuple((), unwrap(data), paths, 0, output)
return output
else:
paths = [_select_a_field(field_name)]
output = FlatList()
_tuple((), data, paths, 0, output)
return output | python | {
"resource": ""
} |
q44816 | select | train | def select(data, field_name):
"""
return list with values from field_name
"""
if isinstance(data, Cube):
return data._select(_normalize_selects(field_name))
if isinstance(data, PartFlatList):
return data.select(field_name)
if isinstance(data, UniqueIndex):
data = (
data._data.values()
) # THE SELECT ROUTINE REQUIRES dicts, NOT Data WHILE ITERATING
if is_data(data):
return select_one(data, field_name)
if is_data(field_name):
field_name = wrap(field_name)
if field_name.value in ["*", "."]:
return data
if field_name.value:
# SIMPLIFY {"value":value} AS STRING
field_name = field_name.value
# SIMPLE PYTHON ITERABLE ASSUMED
if is_text(field_name):
path = split_field(field_name)
if len(path) == 1:
return FlatList([d[field_name] for d in data])
else:
output = FlatList()
flat_list._select1(data, path, 0, output)
return output
elif is_list(field_name):
keys = [_select_a_field(wrap(f)) for f in field_name]
return _select(Data(), unwrap(data), keys, 0)
else:
keys = [_select_a_field(field_name)]
return _select(Data(), unwrap(data), keys, 0) | python | {
"resource": ""
} |
q44817 | wrap_function | train | def wrap_function(func):
"""
RETURN A THREE-PARAMETER WINDOW FUNCTION TO MATCH
"""
if is_text(func):
return compile_expression(func)
numarg = func.__code__.co_argcount
if numarg == 0:
def temp(row, rownum, rows):
return func()
return temp
elif numarg == 1:
def temp(row, rownum, rows):
return func(row)
return temp
elif numarg == 2:
def temp(row, rownum, rows):
return func(row, rownum)
return temp
elif numarg == 3:
return func | python | {
"resource": ""
} |
q44818 | get_context_hints_per_source | train | def get_context_hints_per_source(context_renderers):
"""
Given a list of context renderers, return a dictionary of context hints per source.
"""
# Merge the context render hints for each source as there can be multiple context hints for
# sources depending on the render target. Merging them together involves combining select
# and prefetch related hints for each context renderer
context_hints_per_source = defaultdict(lambda: defaultdict(lambda: {
'app_name': None,
'model_name': None,
'select_related': set(),
'prefetch_related': set(),
}))
for cr in context_renderers:
for key, hints in cr.context_hints.items() if cr.context_hints else []:
for source in cr.get_sources():
context_hints_per_source[source][key]['app_name'] = hints['app_name']
context_hints_per_source[source][key]['model_name'] = hints['model_name']
context_hints_per_source[source][key]['select_related'].update(hints.get('select_related', []))
context_hints_per_source[source][key]['prefetch_related'].update(hints.get('prefetch_related', []))
return context_hints_per_source | python | {
"resource": ""
} |
q44819 | dict_find | train | def dict_find(d, which_key):
"""
Finds key values in a nested dictionary. Returns a tuple of the dictionary in which
the key was found along with the value
"""
# If the starting point is a list, iterate recursively over all values
if isinstance(d, (list, tuple)):
for i in d:
for result in dict_find(i, which_key):
yield result
# Else, iterate over all key values of the dictionary
elif isinstance(d, dict):
for k, v in d.items():
if k == which_key:
yield d, v
for result in dict_find(v, which_key):
yield result | python | {
"resource": ""
} |
q44820 | fetch_model_data | train | def fetch_model_data(model_querysets, model_ids_to_fetch):
"""
Given a dictionary of models to querysets and model IDs to models, fetch the IDs
for every model and return the objects in the following structure.
{
model: {
id: obj,
...
},
...
}
"""
return {
model: id_dict(model_querysets[model].filter(id__in=ids_to_fetch))
for model, ids_to_fetch in model_ids_to_fetch.items()
} | python | {
"resource": ""
} |
q44821 | load_fetched_objects_into_contexts | train | def load_fetched_objects_into_contexts(events, model_data, context_hints_per_source):
"""
Given the fetched model data and the context hints for each source, go through each
event and populate the contexts with the loaded information.
"""
for event in events:
context_hints = context_hints_per_source.get(event.source, {})
for context_key, hints in context_hints.items():
model = get_model(hints['app_name'], hints['model_name'])
for d, value in dict_find(event.context, context_key):
if isinstance(value, list):
for i, model_id in enumerate(d[context_key]):
d[context_key][i] = model_data[model].get(model_id)
else:
d[context_key] = model_data[model].get(value) | python | {
"resource": ""
} |
q44822 | load_renderers_into_events | train | def load_renderers_into_events(events, mediums, context_renderers, default_rendering_style):
"""
Given the events and the context renderers, load the renderers into the event objects
so that they may be able to call the 'render' method later on.
"""
# Make a mapping of source groups and rendering styles to context renderers. Do
# the same for sources and rendering styles to context renderers
source_group_style_to_renderer = {
(cr.source_group_id, cr.rendering_style_id): cr
for cr in context_renderers if cr.source_group_id
}
source_style_to_renderer = {
(cr.source_id, cr.rendering_style_id): cr
for cr in context_renderers if cr.source_id
}
for e in events:
for m in mediums:
# Try the following when loading a context renderer for a medium in an event.
# 1. Try to look up the renderer based on the source group and medium rendering style
# 2. If step 1 doesn't work, look up based on the source and medium rendering style
# 3. If step 2 doesn't work, look up based on the source group and default rendering style
# 4. if step 3 doesn't work, look up based on the source and default rendering style
# If none of those steps work, this event will not be able to be rendered for the mediun
cr = source_group_style_to_renderer.get((e.source.group_id, m.rendering_style_id))
if not cr:
cr = source_style_to_renderer.get((e.source_id, m.rendering_style_id))
if not cr and default_rendering_style:
cr = source_group_style_to_renderer.get((e.source.group_id, default_rendering_style.id))
if not cr and default_rendering_style:
cr = source_style_to_renderer.get((e.source_id, default_rendering_style.id))
if cr:
e._context_renderers[m] = cr | python | {
"resource": ""
} |
q44823 | load_contexts_and_renderers | train | def load_contexts_and_renderers(events, mediums):
"""
Given a list of events and mediums, load the context model data into the contexts of the events.
"""
sources = {event.source for event in events}
rendering_styles = {medium.rendering_style for medium in mediums if medium.rendering_style}
# Fetch the default rendering style and add it to the set of rendering styles
default_rendering_style = get_default_rendering_style()
if default_rendering_style:
rendering_styles.add(default_rendering_style)
context_renderers = ContextRenderer.objects.filter(
Q(source__in=sources, rendering_style__in=rendering_styles) |
Q(source_group_id__in=[s.group_id for s in sources], rendering_style__in=rendering_styles)).select_related(
'source', 'rendering_style').prefetch_related('source_group__source_set')
context_hints_per_source = get_context_hints_per_source(context_renderers)
model_querysets = get_querysets_for_context_hints(context_hints_per_source)
model_ids_to_fetch = get_model_ids_to_fetch(events, context_hints_per_source)
model_data = fetch_model_data(model_querysets, model_ids_to_fetch)
load_fetched_objects_into_contexts(events, model_data, context_hints_per_source)
load_renderers_into_events(events, mediums, context_renderers, default_rendering_style)
return events | python | {
"resource": ""
} |
q44824 | get_printer | train | def get_printer(colors: bool = True, width_limit: bool = True, disabled: bool = False) -> Printer:
"""
Returns an already initialized instance of the printer.
:param colors: If False, no colors will be printed.
:param width_limit: If True, printing width will be limited by console width.
:param disabled: If True, nothing will be printed.
"""
global _printer
global _colors
# Make sure we can print colors if needed.
colors = colors and _colors
# If the printer was never defined before, or the settings have changed.
if not _printer or (colors != _printer._colors) or (width_limit != _printer._width_limit):
_printer = Printer(DefaultWriter(disabled=disabled), colors=colors, width_limit=width_limit)
return _printer | python | {
"resource": ""
} |
q44825 | _get_windows_console_width | train | def _get_windows_console_width() -> int:
"""
A small utility function for getting the current console window's width, in Windows.
:return: The current console window's width.
"""
from ctypes import byref, windll
import pyreadline
out = windll.kernel32.GetStdHandle(-11)
info = pyreadline.console.CONSOLE_SCREEN_BUFFER_INFO()
windll.kernel32.GetConsoleScreenBufferInfo(out, byref(info))
return info.dwSize.X | python | {
"resource": ""
} |
q44826 | _in_qtconsole | train | def _in_qtconsole() -> bool:
"""
A small utility function which determines if we're running in QTConsole's context.
"""
try:
from IPython import get_ipython
try:
from ipykernel.zmqshell import ZMQInteractiveShell
shell_object = ZMQInteractiveShell
except ImportError:
from IPython.kernel.zmq import zmqshell
shell_object = zmqshell.ZMQInteractiveShell
return isinstance(get_ipython(), shell_object)
except Exception:
return False | python | {
"resource": ""
} |
q44827 | get_console_width | train | def get_console_width() -> int:
"""
A small utility function for getting the current console window's width.
:return: The current console window's width.
"""
# Assigning the value once, as frequent call to this function
# causes a major slow down(ImportErrors + isinstance).
global _IN_QT
if _IN_QT is None:
_IN_QT = _in_qtconsole()
try:
if _IN_QT:
# QTConsole determines and handles the max line length by itself.
width = sys.maxsize
else:
width = _get_windows_console_width() if os.name == 'nt' else _get_linux_console_width()
if width <= 0:
return 80
return width
except Exception:
# Default value.
return 80 | python | {
"resource": ""
} |
q44828 | Printer.group | train | def group(self, indent: int = DEFAULT_INDENT, add_line: bool = True) -> _TextGroup:
"""
Returns a context manager which adds an indentation before each line.
:param indent: Number of spaces to print.
:param add_line: If True, a new line will be printed after the group.
:return: A TextGroup context manager.
"""
return _TextGroup(self, indent, add_line) | python | {
"resource": ""
} |
q44829 | Printer._split_lines | train | def _split_lines(self, original_lines: List[str]) -> List[str]:
"""
Splits the original lines list according to the current console width and group indentations.
:param original_lines: The original lines list to split.
:return: A list of the new width-formatted lines.
"""
console_width = get_console_width()
# We take indent into account only in the inner group lines.
max_line_length = console_width - len(self.LINE_SEP) - self._last_position - \
(self.indents_sum if not self._is_first_line else self.indents_sum - self._indents[-1])
lines = []
for i, line in enumerate(original_lines):
fixed_line = []
colors_counter = 0
line_index = 0
while line_index < len(line):
c = line[line_index]
# Check if we're in a color block.
if self._colors and c == self._ANSI_COLOR_PREFIX and \
len(line) >= (line_index + self._ANSI_COLOR_LENGTH):
current_color = line[line_index:line_index + self._ANSI_COLOR_LENGTH]
# If it really is a color, skip it.
if self._ANSI_REGEXP.match(current_color):
line_index += self._ANSI_COLOR_LENGTH
fixed_line.extend(list(current_color))
colors_counter += 1
continue
fixed_line.append(line[line_index])
line_index += 1
# Create a new line, if max line is reached.
if len(fixed_line) >= max_line_length + (colors_counter * self._ANSI_COLOR_LENGTH):
# Special case in which we want to split right before the line break.
if len(line) > line_index and line[line_index] == self.LINE_SEP:
continue
line_string = ''.join(fixed_line)
if not line_string.endswith(self.LINE_SEP):
line_string += self.LINE_SEP
lines.append(line_string)
fixed_line = []
colors_counter = 0
self._last_position = 0
# Max line length has changed since the last position is now 0.
max_line_length = console_width - len(self.LINE_SEP) - self.indents_sum
self._is_first_line = False
if len(fixed_line) > 0:
fixed_line = ''.join(fixed_line)
# If this line contains only color codes, attach it to the last line instead of creating a new one.
if len(fixed_line) == self._ANSI_COLOR_LENGTH and self._ANSI_REGEXP.match(fixed_line) is not None and \
len(lines) > 0:
lines[-1] = lines[-1][:-1] + fixed_line
else:
lines.append(fixed_line)
return lines | python | {
"resource": ""
} |
q44830 | Printer.write | train | def write(self, text: str):
"""
Prints text to the screen.
Supports colors by using the color constants.
To use colors, add the color before the text you want to print.
:param text: The text to print.
"""
# Default color is NORMAL.
last_color = (self._DARK_CODE, 0)
# We use splitlines with keepends in order to keep the line breaks.
# Then we split by using the console width.
original_lines = text.splitlines(True)
lines = self._split_lines(original_lines) if self._width_limit else original_lines
# Print the new width-formatted lines.
for line in lines:
# Print indents only at line beginnings.
if not self._in_line:
self._writer.write(' ' * self.indents_sum)
# Remove colors if needed.
if not self._colors:
for color_code in self._ANSI_REGEXP.findall(line):
line = line.replace(self._ANSI_COLOR_CODE % (color_code[0], int(color_code[1])), '')
elif not self._ANSI_REGEXP.match(line):
# Check if the line starts with a color. If not, we apply the color from the last line.
line = self._ANSI_COLOR_CODE % (last_color[0], int(last_color[1])) + line
# Print the final line.
self._writer.write(line)
# Update the in_line status.
self._in_line = not line.endswith(self.LINE_SEP)
# Update the last color used.
if self._colors:
last_color = self._ANSI_REGEXP.findall(line)[-1]
# Update last position (if there was no line break in the end).
if len(lines) > 0:
last_line = lines[-1]
if not last_line.endswith(self.LINE_SEP):
# Strip the colors to figure out the real number of characters in the line.
if self._colors:
for color_code in self._ANSI_REGEXP.findall(last_line):
last_line = last_line.replace(self._ANSI_COLOR_CODE % (color_code[0], int(color_code[1])), '')
self._last_position += len(last_line)
else:
self._last_position = 0
self._is_first_line = False
else:
self._last_position = 0
# Reset colors for the next print.
if self._colors and not text.endswith(self.NORMAL):
self._writer.write(self.NORMAL) | python | {
"resource": ""
} |
q44831 | Printer.write_aligned | train | def write_aligned(self, key: str, value: str, not_important_keys: Optional[List[str]] = None,
is_list: bool = False, align_size: Optional[int] = None, key_color: str = PURPLE,
value_color: str = GREEN, dark_key_color: str = DARK_PURPLE, dark_value_color: str = DARK_GREEN,
separator: str = SEPARATOR):
"""
Prints keys and values aligned to align_size.
:param key: The name of the property to print.
:param value: The value of the property to print.
:param not_important_keys: Properties that will be printed in a darker color.
:param is_list: True if the value is a list of items.
:param align_size: The alignment size to use.
:param key_color: The key text color (default is purple).
:param value_color: The value text color (default is green).
:param dark_key_color: The key text color for unimportant keys (default is dark purple).
:param dark_value_color: The values text color for unimportant values (default is dark green).
:param separator: The separator to use (default is ':').
"""
align_size = align_size or min(32, get_console_width() // 2)
not_important_keys = not_important_keys or []
if value is None:
return
if isinstance(value, bool):
value = str(value)
if key in not_important_keys:
key_color = dark_key_color
value_color = dark_value_color
self.write(key_color + key + separator)
self.write(' ' * (align_size - len(key) - 1))
with self.group(indent=align_size):
if is_list and len(value) > 0:
self.write_line(value_color + value[0])
if len(value) > 1:
for v in value[1:]:
self.write_line(value_color + v)
elif not is_list:
self.write_line(value_color + str(value)) | python | {
"resource": ""
} |
q44832 | Printer.write_title | train | def write_title(self, title: str, title_color: str = YELLOW, hyphen_line_color: str = WHITE):
"""
Prints title with hyphen line underneath it.
:param title: The title to print.
:param title_color: The title text color (default is yellow).
:param hyphen_line_color: The hyphen line color (default is white).
"""
self.write_line(title_color + title)
self.write_line(hyphen_line_color + '=' * (len(title) + 3)) | python | {
"resource": ""
} |
q44833 | generate_pos_tagger | train | def generate_pos_tagger(check_accuracy=False):
"""Accuracy is about 0.94 with 90% training data."""
global tagger
logging.debug("Reading TIGER corpus")
corp = nltk.corpus.ConllCorpusReader(DIR_PATH, TIGER_FILE_NAME,
['ignore', 'words', 'ignore', 'ignore', 'pos'],
encoding='utf-8')
tagged_sents = list(corp.tagged_sents())
logging.debug("Shuffling sentences")
random.shuffle(tagged_sents)
if check_accuracy:
# set a split size: use 90% for training, 10% for testing
split_perc = 0.1
split_size = int(len(tagged_sents) * split_perc)
train_sents, test_sents = tagged_sents[split_size:], tagged_sents[:split_size]
else:
train_sents = tagged_sents
logging.debug("Training Tagger")
tagger = ClassifierBasedGermanTagger(train=train_sents)
logging.debug("Training finished")
if check_accuracy:
accuracy = tagger.evaluate(test_sents)
logging.debug("Accurracy is {}.".format(accuracy))
logging.debug("Serializing the Tagger")
with open(os.path.join(DIR_PATH, TAGGER_FILE_NAME), 'wb') as f:
pickle.dump(tagger, f, protocol=3) | python | {
"resource": ""
} |
q44834 | make_lock_securely | train | def make_lock_securely(email = None, warn_only = False):
"Terminal oriented; produces a prompt for user input of email and password. Returns crypto.UserLock."
email = email or input("Please provide email address: ")
while True:
passphrase = getpass.getpass("Please type a secure passphrase (with spaces): ")
ok, score = check_passphrase(passphrase, email)
if ok: break
print("Insufficiently strong passphrase; has {entropy} bits of entropy, could be broken in {crack_time_display}".format(**score))
if warn_only: break
print("Suggestion:", make_random_phrase(email))
key = crypto.UserLock.from_passphrase(email, passphrase)
return key | python | {
"resource": ""
} |
q44835 | encrypt_file | train | def encrypt_file(file_path, sender, recipients):
"Returns encrypted binary file content if successful"
for recipient_key in recipients:
crypto.assert_type_and_length('recipient_key', recipient_key, (str, crypto.UserLock))
crypto.assert_type_and_length("sender_key", sender, crypto.UserLock)
if (not os.path.exists(file_path)) or (not os.path.isfile(file_path)):
raise OSError("Specified path does not point to a valid file: {}".format(file_path))
_, filename = os.path.split(file_path)
with open(file_path, "rb") as I:
crypted = crypto.MiniLockFile.new(filename, I.read(), sender, recipients)
return crypted.contents | python | {
"resource": ""
} |
q44836 | encrypt_folder | train | def encrypt_folder(path, sender, recipients):
"""
This helper function should zip the contents of a folder and encrypt it as
a zip-file. Recipients are responsible for opening the zip-file.
"""
for recipient_key in recipients:
crypto.assert_type_and_length('recipient_key', recipient_key, (str, crypto.UserLock))
crypto.assert_type_and_length("sender_key", sender, crypto.UserLock)
if (not os.path.exists(path)) or (not os.path.isdir(path)):
raise OSError("Specified path is not a valid directory: {}".format(path))
buf = io.BytesIO()
zipf = zipfile.ZipFile(buf, mode="w", compression=zipfile.ZIP_DEFLATED)
for root, folders, files in os.walk(path):
for fn in files:
fp = os.path.join(root, fn)
zipf.write(fp)
zipf.close()
zip_contents = buf.getvalue()
_, filename = os.path.split(path)
filename += ".zip"
crypted = crypto.MiniLockFile.new(filename, zip_contents, sender, recipients)
return crypted.contents | python | {
"resource": ""
} |
q44837 | get_profile | train | def get_profile(A):
"Fail-soft profile getter; if no profile is present assume none and quietly ignore."
try:
with open(os.path.expanduser(A.profile)) as I:
profile = json.load(I)
return profile
except:
return {} | python | {
"resource": ""
} |
q44838 | main_encrypt | train | def main_encrypt(A):
"Encrypt to recipient list using primary key OR prompted key. Recipients may be IDs or petnames."
profile = get_profile(A)
localKeys = profile.get('local keys', [])
if not localKeys:
localKeys = [make_lock_securely(warn_only = A.ignore_entropy)]
else:
localKeys = [crypto.UserLock.private_from_b64(k['private_key']) for k in localKeys]
# First key is considered "main"
userKey = localKeys[0]
print("User ID:", userKey.userID)
if not os.path.exists(A.path):
error_out("File or directory '{}' does not exist.".format(A.path))
# Create, fetch or error out for recipient list:
recipients = resolve_recipients(profile, A.recipient)
recipients.append(userKey)
print("Recipients:", *set(k.userID if isinstance(k, crypto.UserLock) else k for k in recipients))
# Do files OR folders
if os.path.isfile(A.path):
crypted = encrypt_file(A.path, userKey, recipients)
elif os.path.isdir(A.path):
crypted = encrypt_folder(A.path, userKey, recipients)
else:
error_out("Specified path '{}' is neither a file nor a folder.".format(A.path))
if A.base64:
crypted = crypto.b64encode(crypted)
if not A.output:
A.output = hex(int.from_bytes(os.urandom(6),'big'))[2:] + ".minilock"
print("Saving output to", A.output)
with open(A.output, "wb") as O:
O.write(crypted) | python | {
"resource": ""
} |
q44839 | main_decrypt | train | def main_decrypt(A):
"Get all local keys OR prompt user for key, then attempt to decrypt with each."
profile = get_profile(A)
localKeys = profile.get('local keys', [])
if not localKeys:
localKeys = [make_lock_securely(warn_only = A.ignore_entropy)]
else:
localKeys = [crypto.UserLock.private_from_b64(k['private_key']) for k in localKeys]
if not os.path.exists(A.path):
error_out("File or directory '{}' does not exist.".format(A.path))
if os.path.isfile(A.path):
for k in localKeys:
print("Attempting decryption with:", k.userID)
try:
filename, senderID, decrypted = decrypt_file(A.path, k, base64 = A.base64)
break
except ValueError as E:
pass
else:
error_out("Failed to decrypt with all available keys.")
else:
error_out("Specified path '{}' is not a file.".format(A.path))
print("Decrypted file from", senderID)
print("Saving output to", filename)
with open(filename, "wb") as O:
O.write(decrypted) | python | {
"resource": ""
} |
q44840 | collection | train | def collection(name=None):
"""Render the collection page.
It renders it either with a collection specific template (aka
collection_{collection_name}.html) or with the default collection
template (collection.html).
"""
if name is None:
collection = Collection.query.get_or_404(1)
else:
collection = Collection.query.filter(
Collection.name == name).first_or_404()
# TODO add breadcrumbs
# breadcrumbs = current_breadcrumbs + collection.breadcrumbs(ln=g.ln)[1:]
return render_template([
'invenio_collections/collection_{0}.html'.format(collection.id),
'invenio_collections/collection_{0}.html'.format(slugify(name, '_')),
current_app.config['COLLECTIONS_DEFAULT_TEMPLATE']
], collection=collection) | python | {
"resource": ""
} |
q44841 | Highscores.from_tibiadata | train | def from_tibiadata(cls, content, vocation=None):
"""Builds a highscores object from a TibiaData highscores response.
Notes
-----
Since TibiaData.com's response doesn't contain any indication of the vocation filter applied,
:py:attr:`vocation` can't be determined from the response, so the attribute must be assigned manually.
If the attribute is known, it can be passed for it to be assigned in this method.
Parameters
----------
content: :class:`str`
The JSON content of the response.
vocation: :class:`VocationFilter`, optional
The vocation filter to assign to the results. Note that this won't affect the parsing.
Returns
-------
:class:`Highscores`
The highscores contained in the page, or None if the content is for the highscores of a nonexistent world.
Raises
------
InvalidContent
If content is not a JSON string of the highscores response."""
json_content = parse_json(content)
try:
highscores_json = json_content["highscores"]
if "error" in highscores_json["data"]:
return None
world = highscores_json["world"]
category = highscores_json["type"]
highscores = cls(world, category)
for entry in highscores_json["data"]:
value_key = "level"
if highscores.category in [Category.ACHIEVEMENTS, Category.LOYALTY_POINTS, Category.EXPERIENCE]:
value_key = "points"
if highscores.category == Category.EXPERIENCE:
highscores.entries.append(ExpHighscoresEntry(entry["name"], entry["rank"], entry["voc"],
entry[value_key], entry["level"]))
elif highscores.category == Category.LOYALTY_POINTS:
highscores.entries.append(LoyaltyHighscoresEntry(entry["name"], entry["rank"], entry["voc"],
entry[value_key], entry["title"]))
else:
highscores.entries.append(HighscoresEntry(entry["name"], entry["rank"], entry["voc"],
entry[value_key]))
highscores.results_count = len(highscores.entries)
except KeyError:
raise InvalidContent("content is not a TibiaData highscores response.")
if isinstance(vocation, VocationFilter):
highscores.vocation = vocation
return highscores | python | {
"resource": ""
} |
q44842 | transform | train | def transform(string, transliterations=None):
"""
Transform the string to "upside-down" writing.
Example:
>>> import upsidedown
>>> print(upsidedown.transform('Hello World!'))
¡pꞁɹoM oꞁꞁǝH
For languages with diacritics you might want to supply a transliteration to
work around missing (rendering of) upside-down forms:
>>> import upsidedown
>>> print(upsidedown.transform('köln', transliterations={'ö': 'oe'}))
uꞁǝoʞ
"""
transliterations = transliterations or TRANSLITERATIONS
for character in transliterations:
string = string.replace(character, transliterations[character])
inputChars = list(string)
inputChars.reverse()
output = []
for character in inputChars:
if character in _CHARLOOKUP:
output.append(_CHARLOOKUP[character])
else:
charNormalised = unicodedata.normalize("NFD", character)
for c in charNormalised[:]:
if c in _CHARLOOKUP:
charNormalised = charNormalised.replace(c, _CHARLOOKUP[c])
elif c in _DIACRITICSLOOKUP:
charNormalised = charNormalised.replace(c,
_DIACRITICSLOOKUP[c])
output.append(unicodedata.normalize("NFC", charNormalised))
return ''.join(output) | python | {
"resource": ""
} |
q44843 | main | train | def main():
"""Main method for running upsidedown.py from the command line."""
import sys
output = []
line = sys.stdin.readline()
while line:
line = line.strip("\n")
output.append(transform(line))
line = sys.stdin.readline()
output.reverse()
print("\n".join(output)) | python | {
"resource": ""
} |
q44844 | capture_termination_signal | train | def capture_termination_signal(please_stop):
"""
WILL SIGNAL please_stop WHEN THIS AWS INSTANCE IS DUE FOR SHUTDOWN
"""
def worker(please_stop):
seen_problem = False
while not please_stop:
request_time = (time.time() - timer.START)/60 # MINUTES
try:
response = requests.get("http://169.254.169.254/latest/meta-data/spot/termination-time")
seen_problem = False
if response.status_code not in [400, 404]:
Log.alert("Shutdown AWS Spot Node {{name}} {{type}}", name=machine_metadata.name, type=machine_metadata.aws_instance_type)
please_stop.go()
except Exception as e:
e = Except.wrap(e)
if "Failed to establish a new connection: [Errno 10060]" in e or "A socket operation was attempted to an unreachable network" in e:
Log.note("AWS Spot Detection has shutdown, probably not a spot node, (http://169.254.169.254 is unreachable)")
return
elif seen_problem:
# IGNORE THE FIRST PROBLEM
Log.warning("AWS shutdown detection has more than one consecutive problem: (last request {{time|round(1)}} minutes since startup)", time=request_time, cause=e)
seen_problem = True
(Till(seconds=61) | please_stop).wait()
(Till(seconds=11) | please_stop).wait()
Thread.run("listen for termination", worker) | python | {
"resource": ""
} |
q44845 | SinonMock.restore | train | def restore(self):
"""
Destroy all inspectors in exp_list and SinonMock itself
"""
for expectation in self.exp_list:
try:
expectation.restore()
except ReferenceError:
pass #ignore removed expectation
self._queue.remove(self) | python | {
"resource": ""
} |
q44846 | execute_by_options | train | def execute_by_options(args):
"""execute by argument dictionary
Args:
args (dict): command line argument dictionary
"""
if args['subcommand'] == 'sphinx':
s = Sphinx(proj_info)
if args['quickstart']:
s.quickstart()
elif args['gen_code_api']:
s.gen_code_api()
elif args['rst2html']:
s.rst2html()
pass
elif args['subcommand'] == 'offline_dist':
pod = PyOfflineDist()
if args['freeze_deps']:
pod.freeze_deps()
elif args['download_deps']:
pod.download_deps()
elif args['install_deps']:
pod.install_deps()
elif args['clean_deps']:
pod.clean_deps()
elif args['mkbinary']:
pod.pyinstaller_mkbinary(args['mkbinary'])
elif args['clean_binary']:
pod.clean_binary()
pass | python | {
"resource": ""
} |
q44847 | Editor.editline_with_regex | train | def editline_with_regex(self, regex_tgtline, to_replace):
"""find the first matched line, then replace
Args:
regex_tgtline (str): regular expression used to match the target line
to_replace (str): line you wanna use to replace
"""
for idx, line in enumerate(self._swp_lines):
mobj = re.match(regex_tgtline, line)
if mobj:
self._swp_lines[idx] = to_replace
return | python | {
"resource": ""
} |
q44848 | SinonStub.onCall | train | def onCall(self, n): #pylint: disable=invalid-name
"""
Adds a condition for when the stub is called. When the condition is met, a special
return value can be returned. Adds the specified call number into the condition
list.
For example, when the stub function is called the second time, it will return "#":
stub.onCall(1).returns("#")
Without returns/throws at the end of the chain of functions, nothing will happen.
For example, in this case, although 2 is in the condition list, nothing will happen:
stub.onCall(2)
Args:
n: integer, the call # for which we want a special return value.
The first call has an index of 0.
Return:
a SinonStub object (able to be chained)
"""
cond_oncall = n + 1
return _SinonStubCondition(copy=self._copy, oncall=cond_oncall, cond_args=self._cond_args, cond_kwargs=self._cond_kwargs) | python | {
"resource": ""
} |
q44849 | _SinonStubCondition.returns | train | def returns(self, obj):
"""
Customizes the return values of the stub function. If conditions like withArgs or onCall
were specified, then the return value will only be returned when the conditions are met.
Args: obj (anything)
Return: a SinonStub object (able to be chained)
"""
self._copy._append_condition(self, lambda *args, **kwargs: obj)
return self | python | {
"resource": ""
} |
q44850 | assign_operation_ids | train | def assign_operation_ids(spec, operids):
""" used to assign caller provided operationId values into a spec """
empty_dict = {}
for path_name, path_data in six.iteritems(spec['paths']):
for method, method_data in six.iteritems(path_data):
oper_id = operids.get(path_name, empty_dict).get(method)
if oper_id:
method_data['operationId'] = oper_id | python | {
"resource": ""
} |
q44851 | Table.pretty_print | train | def pretty_print(self, printer: Optional[Printer] = None, align: int = ALIGN_CENTER, border: bool = False):
"""
Pretty prints the table.
:param printer: The printer to print with.
:param align: The alignment of the cells(Table.ALIGN_CENTER/ALIGN_LEFT/ALIGN_RIGHT)
:param border: Whether to add a border around the table
"""
if printer is None:
printer = get_printer()
table_string = self._get_pretty_table(indent=printer.indents_sum, align=align, border=border).get_string()
if table_string != '':
first_line = table_string.splitlines()[0]
first_line_length = len(first_line) - len(re.findall(Printer._ANSI_REGEXP, first_line)) * \
Printer._ANSI_COLOR_LENGTH
if self.title_align == self.ALIGN_CENTER:
title = '{}{}'.format(' ' * (first_line_length // 2 - len(self.title) // 2), self.title)
elif self.title_align == self.ALIGN_LEFT:
title = self.title
else:
title = '{}{}'.format(' ' * (first_line_length - len(self.title)), self.title)
printer.write_line(printer.YELLOW + title)
# We split the table to lines in order to keep the indentation.
printer.write_line(table_string) | python | {
"resource": ""
} |
q44852 | Table.rows | train | def rows(self) -> List[List[str]]:
"""
Returns the table rows.
"""
return [list(d.values()) for d in self.data] | python | {
"resource": ""
} |
q44853 | Table.set_column_size_limit | train | def set_column_size_limit(self, column_name: str, size_limit: int):
"""
Sets the size limit of a specific column.
:param column_name: The name of the column to change.
:param size_limit: The max size of the column width.
"""
if self._column_size_map.get(column_name):
self._column_size_map[column_name] = size_limit
else:
raise ValueError(f'There is no column named {column_name}!') | python | {
"resource": ""
} |
q44854 | Table.get_as_html | train | def get_as_html(self) -> str:
"""
Returns the table object as an HTML string.
:return: HTML representation of the table.
"""
table_string = self._get_pretty_table().get_html_string()
title = ('{:^' + str(len(table_string.splitlines()[0])) + '}').format(self.title)
return f'<center><h1>{title}</h1></center>{table_string}' | python | {
"resource": ""
} |
q44855 | Table.get_as_csv | train | def get_as_csv(self, output_file_path: Optional[str] = None) -> str:
"""
Returns the table object as a CSV string.
:param output_file_path: The output file to save the CSV to, or None.
:return: CSV representation of the table.
"""
output = StringIO() if not output_file_path else open(output_file_path, 'w')
try:
csv_writer = csv.writer(output)
csv_writer.writerow(self.columns)
for row in self.rows:
csv_writer.writerow(row)
output.seek(0)
return output.read()
finally:
output.close() | python | {
"resource": ""
} |
q44856 | Schedulable.schedule | train | def schedule(self, when=None, action=None, **kwargs):
"""
Schedule an update of this object.
when: The date for the update.
action: if provided it will be looked up
on the implementing class and called with
**kwargs. If action is not provided each k/v pair
in kwargs will be set on self and then self
is saved.
kwargs: any other arguments you would like passed
for this change. Saved as a json object so must cleanly
serialize.
"""
# when is empty or passed, just save it now.
if not when or when <= timezone.now():
self.do_scheduled_update(action, **kwargs)
else:
ctype = ContentType.objects.get_for_model(self.__class__)
Schedule(
content_type=ctype,
object_args=self.get_scheduled_filter_args(),
when=when,
action=action,
json_args=kwargs
).save() | python | {
"resource": ""
} |
q44857 | Schedulable.do_scheduled_update | train | def do_scheduled_update(self, action, **kwargs):
"""
Do the actual update.
action: if provided it will be looked up
on the implementing class and called with
**kwargs. If action is not provided each k/v pair
in kwargs will be set on self and then self
is saved.
kwargs: any other you passed for this update
passed along to whichever method performs
the update.
"""
action = getattr(self, action, None)
if callable(action):
return action(**kwargs)
else:
for k, v in kwargs.items():
setattr(self, k, v)
self.save() | python | {
"resource": ""
} |
q44858 | get_md5_hash | train | def get_md5_hash(file_path):
"""
Calculate the MD5 checksum for a file.
:param string file_path:
Path to the file
:return:
MD5 checksum
"""
checksum = hashlib.md5()
with open(file_path, 'rb') as f:
for chunk in iter(lambda: f.read(128 * checksum.block_size), b''):
checksum.update(chunk)
return checksum.hexdigest() | python | {
"resource": ""
} |
q44859 | FileRecordSearch.as_dict | train | def as_dict(self):
"""
Convert this FileRecordSearch to a dict, ready for serialization to JSON for use in the API.
:return:
Dict representation of this FileRecordSearch instance
"""
d = {}
_add_value(d, 'obstory_ids', self.obstory_ids)
_add_value(d, 'lat_min', self.lat_min)
_add_value(d, 'lat_max', self.lat_max)
_add_value(d, 'long_min', self.long_min)
_add_value(d, 'long_max', self.long_max)
_add_value(d, 'time_min', self.time_min)
_add_value(d, 'time_max', self.time_max)
_add_value(d, 'mime_type', self.mime_type)
_add_value(d, 'skip', self.skip)
_add_value(d, 'limit', self.limit)
_add_string(d, 'semantic_type', self.semantic_type)
_add_string(d, 'observation_type', self.observation_type)
_add_value(d, 'observation_id', self.observation_id)
_add_string(d, 'repository_fname', self.repository_fname)
_add_boolean(d, 'exclude_imported', self.exclude_imported)
_add_string(d, 'exclude_export_to', self.exclude_export_to)
d['meta'] = list((x.as_dict() for x in self.meta_constraints))
return d | python | {
"resource": ""
} |
q44860 | FileRecordSearch.from_dict | train | def from_dict(d):
"""
Builds a new instance of FileRecordSearch from a dict
:param Object d: the dict to parse
:return: a new FileRecordSearch based on the supplied dict
"""
obstory_ids = _value_from_dict(d, 'obstory_ids')
lat_min = _value_from_dict(d, 'lat_min')
lat_max = _value_from_dict(d, 'lat_max')
long_min = _value_from_dict(d, 'long_min')
long_max = _value_from_dict(d, 'long_max')
time_min = _value_from_dict(d, 'time_min')
time_max = _value_from_dict(d, 'time_max')
mime_type = _string_from_dict(d, 'mime_type')
skip = _value_from_dict(d, 'skip', 0)
limit = _value_from_dict(d, 'limit', 100)
semantic_type = _string_from_dict(d, 'semantic_type')
observation_type = _string_from_dict(d, 'observation_type')
observation_id = _value_from_dict(d, 'observation_id')
repository_fname = _string_from_dict(d, 'repository_fname')
exclude_imported = _boolean_from_dict(d, 'exclude_imported')
exclude_export_to = _string_from_dict(d, 'exclude_export_to')
if 'meta' in d:
meta_constraints = list((MetaConstraint.from_dict(x) for x in d['meta']))
else:
meta_constraints = []
return FileRecordSearch(obstory_ids=obstory_ids, lat_min=lat_min, lat_max=lat_max, long_min=long_min,
long_max=long_max, time_min=time_min, time_max=time_max, mime_type=mime_type,
semantic_type=semantic_type,
observation_type=observation_type,
observation_id=observation_id, repository_fname=repository_fname,
meta_constraints=meta_constraints, limit=limit, skip=skip,
exclude_imported=exclude_imported,
exclude_export_to=exclude_export_to) | python | {
"resource": ""
} |
q44861 | ObservationGroupSearch.as_dict | train | def as_dict(self):
"""
Convert this ObservationGroupSearch to a dict, ready for serialization to JSON for use in the API.
:return:
Dict representation of this ObservationGroupSearch instance
"""
d = {}
_add_string(d, 'obstory_name', self.obstory_name)
_add_string(d, 'semantic_type', self.semantic_type)
_add_value(d, 'time_min', self.time_min)
_add_value(d, 'time_max', self.time_max)
_add_string(d, 'group_id', self.group_id)
_add_string(d, 'observation_id', self.observation_id)
_add_value(d, 'skip', self.skip)
_add_value(d, 'limit', self.limit)
d['meta'] = list((x.as_dict() for x in self.meta_constraints))
return d | python | {
"resource": ""
} |
q44862 | ObservatoryMetadataSearch.as_dict | train | def as_dict(self):
"""
Convert this ObservatoryMetadataSearch to a dict, ready for serialization to JSON for use in the API.
:return:
Dict representation of this ObservatoryMetadataSearch instance
"""
d = {}
_add_value(d, 'obstory_ids', self.obstory_ids)
_add_string(d, 'field_name', self.field_name)
_add_value(d, 'lat_min', self.lat_min)
_add_value(d, 'lat_max', self.lat_max)
_add_value(d, 'long_min', self.long_min)
_add_value(d, 'long_max', self.long_max)
_add_value(d, 'time_min', self.time_min)
_add_value(d, 'time_max', self.time_max)
_add_string(d, 'item_id', self.item_id)
_add_value(d, 'skip', self.skip)
_add_value(d, 'limit', self.limit)
_add_boolean(d, 'exclude_imported', self.exclude_imported)
_add_string(d, 'exclude_export_to', self.exclude_export_to)
return d | python | {
"resource": ""
} |
q44863 | ObservatoryMetadata.type | train | def type(self):
"""Returns 'number', 'string', 'date' or 'unknown' based on the type of the value"""
if isinstance(self.value, numbers.Number):
return "number"
if isinstance(self.value, basestring):
return "string"
return "unknown" | python | {
"resource": ""
} |
q44864 | RiffIndexList.find | train | def find(self, header, list_type=None):
"""Find the first chunk with specified header and optional list type."""
for chunk in self:
if chunk.header == header and (list_type is None or (header in
list_headers and chunk.type == list_type)):
return chunk
elif chunk.header in list_headers:
try:
result = chunk.find(header, list_type)
return result
except chunk.NotFound:
pass
if list_type is None:
raise self.NotFound('Chunk \'{0}\' not found.'.format(header))
else:
raise self.NotFound('List \'{0} {1}\' not found.'.format(header,
list_type)) | python | {
"resource": ""
} |
q44865 | RiffIndexList.find_all | train | def find_all(self, header, list_type=None):
"""Find all direct children with header and optional list type."""
found = []
for chunk in self:
if chunk.header == header and (not list_type or (header in
list_headers and chunk.type == list_type)):
found.append(chunk)
return found | python | {
"resource": ""
} |
q44866 | RiffIndexList.replace | train | def replace(self, child, replacement):
"""Replace a child chunk with something else."""
for i in range(len(self.chunks)):
if self.chunks[i] == child:
self.chunks[i] = replacement | python | {
"resource": ""
} |
q44867 | RiffIndexList.remove | train | def remove(self, child):
"""Remove a child element."""
for i in range(len(self)):
if self[i] == child:
del self[i] | python | {
"resource": ""
} |
q44868 | RiffDataChunk.from_data | train | def from_data(data):
"""Create a chunk from data including header and length bytes."""
header, length = struct.unpack('4s<I', data[:8])
data = data[8:]
return RiffDataChunk(header, data) | python | {
"resource": ""
} |
q44869 | AdminMixin.get_urls | train | def get_urls(self):
"""Add our dashboard view to the admin urlconf. Deleted the default index."""
from django.conf.urls import patterns, url
from views import DashboardWelcomeView
urls = super(AdminMixin, self).get_urls()
del urls[0]
custom_url = patterns(
'',
url(r'^$', self.admin_view(DashboardWelcomeView.as_view()), name="index")
)
return custom_url + urls | python | {
"resource": ""
} |
q44870 | index_to_coordinate | train | def index_to_coordinate(dims):
"""
RETURN A FUNCTION THAT WILL TAKE AN INDEX, AND MAP IT TO A coordinate IN dims
:param dims: TUPLE WITH NUMBER OF POINTS IN EACH DIMENSION
:return: FUNCTION
"""
_ = divmod # SO WE KEEP THE IMPORT
num_dims = len(dims)
if num_dims == 0:
return _zero_dim
prod = [1] * num_dims
acc = 1
domain = range(0, num_dims)
for i in reversed(domain):
prod[i] = acc
acc *= dims[i]
commands = []
coords = []
for i in domain:
if i == num_dims - 1:
commands.append("\tc" + text_type(i) + " = index")
else:
commands.append("\tc" + text_type(i) + ", index = divmod(index, " + text_type(prod[i]) + ")")
coords.append("c" + text_type(i))
output = None
if num_dims == 1:
code = (
"def output(index):\n" +
"\n".join(commands) + "\n" +
"\treturn " + coords[0] + ","
)
else:
code = (
"def output(index):\n" +
"\n".join(commands) + "\n" +
"\treturn " + ", ".join(coords)
)
exec(code)
return output | python | {
"resource": ""
} |
q44871 | Matrix.groupby | train | def groupby(self, io_select):
"""
SLICE THIS MATRIX INTO ONES WITH LESS DIMENSIONALITY
io_select - 1 IF GROUPING BY THIS DIMENSION, 0 IF FLATTENING
return -
"""
# offsets WILL SERVE TO MASK DIMS WE ARE NOT GROUPING BY, AND SERVE AS RELATIVE INDEX FOR EACH COORDINATE
offsets = []
new_dim = []
acc = 1
for i, d in reversed(enumerate(self.dims)):
if not io_select[i]:
new_dim.insert(0, d)
offsets.insert(0, acc * io_select[i])
acc *= d
if not new_dim:
# WHEN groupby ALL DIMENSIONS, ONLY THE VALUES REMAIN
# RETURN AN ITERATOR OF PAIRS (c, v), WHERE
# c - COORDINATES INTO THE CUBE
# v - VALUE AT GIVEN COORDINATES
return ((c, self[c]) for c in self._all_combos())
else:
output = [[None, Matrix(dims=new_dim)] for i in range(acc)]
_groupby(self.cube, 0, offsets, 0, output, tuple(), [])
return output | python | {
"resource": ""
} |
q44872 | Matrix.items | train | def items(self):
"""
ITERATE THROUGH ALL coord, value PAIRS
"""
for c in self._all_combos():
_, value = _getitem(self.cube, c)
yield c, value | python | {
"resource": ""
} |
q44873 | Matrix._all_combos | train | def _all_combos(self):
"""
RETURN AN ITERATOR OF ALL COORDINATES
"""
combos = _product(self.dims)
if not combos:
return
calc = [(coalesce(_product(self.dims[i+1:]), 1), mm) for i, mm in enumerate(self.dims)]
for c in xrange(combos):
yield tuple(int(c / dd) % mm for dd, mm in calc) | python | {
"resource": ""
} |
q44874 | Publisher.send | train | def send(self, topic, message):
"""Publishes a pulse message to the proper exchange."""
if not message:
Log.error("Expecting a message")
message._prepare()
if not self.connection:
self.connect()
producer = Producer(
channel=self.connection,
exchange=Exchange(self.settings.exchange, type='topic'),
routing_key=topic
)
# The message is actually a simple envelope format with a payload and
# some metadata.
final_data = Data(
payload=message.data,
_meta=set_default({
'exchange': self.settings.exchange,
'routing_key': message.routing_key,
'serializer': self.settings.serializer,
'sent': time_to_string(datetime.datetime.now(timezone(self.settings.broker_timezone))),
'count': self.count
}, message.metadata)
)
producer.publish(jsons.scrub(final_data), serializer=self.settings.serializer)
self.count += 1 | python | {
"resource": ""
} |
q44875 | Extends.add_child | train | def add_child(self, child):
"""
Add a child to the tree. Extends discards all comments
and whitespace Text. On non-whitespace Text, and any
other nodes, raise a syntax error.
"""
if isinstance(child, Comment):
return
# ignore Text nodes with whitespace-only content
if isinstance(child, Text) and not child.text.strip():
return
super(Extends, self).add_child(child) | python | {
"resource": ""
} |
q44876 | MeteorExporter.handle_next_export | train | def handle_next_export(self):
"""
Retrieve and fully evaluate the next export task, including resolution of any sub-tasks requested by the
import client such as requests for binary data, observation, etc.
:return:
An instance of ExportStateCache, the 'state' field contains the state of the export after running as many
sub-tasks as required until completion or failure. If there were no jobs to run this returns None.
:complete:
A job was processed and completed. The job has been marked as complete in the database
:continue:
A job was processed, more information was requested and sent, but the job is still active
:failed:
A job was processed but an error occurred during processing
:confused:
A job was processed, but the importer returned a response which we couldn't recognise
"""
state = None
while True:
state = self._handle_next_export_subtask(export_state=state)
if state is None:
return None
elif state.export_task is None:
return state | python | {
"resource": ""
} |
q44877 | MeteorExporter._handle_next_export_subtask | train | def _handle_next_export_subtask(self, export_state=None):
"""
Process the next export sub-task, if there is one.
:param ExportState export_state:
If provided, this is used instead of the database queue, in effect directing the exporter to process the
previous export again. This is used to avoid having to query the database when we know already what needs
to be done. It also maintains a cache of the entity so we don't have to re-acquire it on multiple exports.
:return:
A :class:`meteorpi_db.exporter.MeteorExporter.ExportStateCache` representing the state of the export, or
None if there was nothing to do.
"""
# Use a cached state, or generate a new one if required
if export_state is None or export_state.export_task is None:
export = self.db.get_next_entity_to_export()
if export is not None:
export_state = self.ExportState(export_task=export)
else:
return None
try:
auth = (export_state.export_task.target_user,
export_state.export_task.target_password)
target_url = export_state.export_task.target_url
response = post(url=target_url, verify=False,
json=export_state.entity_dict,
auth=auth)
response.raise_for_status()
json = response.json()
state = json['state']
if state == 'complete':
return export_state.fully_processed()
elif state == 'need_file_data':
file_id = json['file_id']
file_record = self.db.get_file(repository_fname=file_id)
if file_record is None:
return export_state.failed()
with open(self.db.file_path_for_id(file_id), 'rb') as file_content:
multi = MultipartEncoder(fields={'file': ('file', file_content, file_record.mime_type)})
post(url="{0}/data/{1}/{2}".format(target_url, file_id, file_record.file_md5),
data=multi, verify=False,
headers={'Content-Type': multi.content_type},
auth=auth)
return export_state.partially_processed()
elif state == 'continue':
return export_state.partially_processed()
else:
return export_state.confused()
except HTTPError:
traceback.print_exc()
return export_state.failed()
except ConnectionError:
traceback.print_exc()
return export_state.failed() | python | {
"resource": ""
} |
q44878 | extract_options_dict | train | def extract_options_dict(template, options):
"""Extract options from a dictionary against the template"""
for option, val in template.items():
if options and option in options:
yield option, options[option]
else:
yield option, Default(template[option]['default'](os.environ)) | python | {
"resource": ""
} |
q44879 | VersionedProjectState.from_apps | train | def from_apps(cls, apps):
"Takes in an Apps and returns a VersionedProjectState matching it"
app_models = {}
for model in apps.get_models(include_swapped=True):
model_state = VersionedModelState.from_model(model)
app_models[(model_state.app_label, model_state.name.lower())] = model_state
return cls(app_models) | python | {
"resource": ""
} |
q44880 | AssetManager.search_tags | train | def search_tags(self, tags):
"""
Search assets by passing a list of one or more tags.
"""
qs = self.filter(tags__name__in=tags).order_by('file').distinct()
return qs | python | {
"resource": ""
} |
q44881 | Cloneable._gather_reverses | train | def _gather_reverses(self):
"""
Get all the related objects that point to this
object that we need to clone. Uses self.clone_related
to find those objects.
"""
old_reverses = {'m2m': {}, 'reverse': {}}
for reverse in self.clone_related:
ctype, name, l = self._gather_reverse(reverse)
old_reverses[ctype][reverse] = (name, l)
return old_reverses | python | {
"resource": ""
} |
q44882 | Cloneable._set_m2ms | train | def _set_m2ms(self, old_m2ms):
"""
Creates the same m2m relationships that the old
object had.
"""
for k, v in old_m2ms.items():
if v:
setattr(self, k, v) | python | {
"resource": ""
} |
q44883 | Cloneable._clone_reverses | train | def _clone_reverses(self, old_reverses):
"""
Clones all the objects that were previously gathered.
"""
for ctype, reverses in old_reverses.items():
for parts in reverses.values():
sub_objs = parts[1]
field_name = parts[0]
attrs = {}
for sub_obj in sub_objs:
if ctype != 'm2m' and not attrs:
field = sub_obj._meta.get_field(field_name)
attrs = {
field.column: getattr(self, field.rel.field_name)
}
sub_obj._clone(**attrs)
if ctype == 'm2m':
setattr(self, field_name, sub_objs) | python | {
"resource": ""
} |
q44884 | Cloneable._clone | train | def _clone(self, **attrs):
"""
Makes a copy of an model instance.
for every key in **attrs value will
be set on the new instance.
"""
with xact():
# Gather objs we'll need save after
old_m2ms = self._gather_m2ms()
old_reverses = self._gather_reverses()
for k, v in attrs.items():
setattr(self, k, v)
# Do the clone
self.prep_for_clone()
self.validate_unique()
# Prevent last save from changing
self.save(last_save=self.last_save)
# save m2ms
self._set_m2ms(old_m2ms)
# Prevent last save from changing
self.save(last_save=self.last_save)
# save reverses
self._clone_reverses(old_reverses) | python | {
"resource": ""
} |
q44885 | Cloneable._delete_reverses | train | def _delete_reverses(self):
"""
Delete all objects that would have been cloned
on a clone command. This is done separately because
there may be m2m and other relationships that
would have not been deleted otherwise.
"""
for reverse in self.clone_related:
self._delete_reverse(reverse)
for field in self._meta.local_many_to_many:
if field.rel.through and \
field.rel.through._meta.auto_created and not \
field.name in self.clone_related:
man = getattr(self, field.name)
man.clear() | python | {
"resource": ""
} |
q44886 | Cloneable.delete | train | def delete(self, *args, **kwargs):
"""
Delete clonable relations first, since they may be
objects that wouldn't otherwise be deleted.
Calls super to actually delete the object.
"""
skip_reverses = kwargs.pop('skip_reverses', False)
if not skip_reverses:
self._delete_reverses()
return super(Cloneable, self).delete(*args, **kwargs) | python | {
"resource": ""
} |
q44887 | Cloneable.register_related | train | def register_related(cls, related_name):
"""
Register a related item that should be cloned
when this model is.
:param related_name: Use the name you would use in filtering
i.e.: book not book_set.
"""
if not hasattr(cls, '_clone_related'):
cls._clone_related = []
if type(cls._clone_related) != type([]):
cls._clone_related = list(cls._clone_related)
if not related_name in cls._clone_related:
cls._clone_related.append(related_name) | python | {
"resource": ""
} |
q44888 | BaseModel.get_version | train | def get_version(self, state=None, date=None):
"""
Get a particular version of an item
:param state: The state you want to get.
:param date: Get a version that was published before or on this date.
"""
version_model = self._meta._version_model
q = version_model.objects.filter(object_id=self.pk)
if state:
q = version_model.normal.filter(object_id=self.pk, state=state)
if date:
q = q.filter(date_published__lte=date)
q = q.order_by('-date_published')
results = q[:1]
if results:
return results[0]
return None | python | {
"resource": ""
} |
q44889 | BaseVersionedModel.unpublish | train | def unpublish(self):
"""
Unpublish this item.
This will set and currently published versions to
the archived state and delete all currently scheduled
versions.
"""
assert self.state == self.DRAFT
with xact():
self._publish(published=False)
# Delete all scheduled items
klass = self.get_version_class()
for obj in klass.normal.filter(object_id=self.object_id, state=self.SCHEDULED):
obj.delete() | python | {
"resource": ""
} |
q44890 | BaseVersionedModel.publish | train | def publish(self, user=None, when=None):
"""
Publishes a item and any sub items.
A new transaction will be started if
we aren't already in a transaction.
Should only be run on draft items
"""
assert self.state == self.DRAFT
user_published = 'code'
if user:
user_published = user.username
now = timezone.now()
with xact():
# If this item hasn't got live yet and no new date was specified
# delete the old scheduled items and schedule this one on that date
published = False
if getattr(self._meta, '_is_view', False):
published = self.is_published
else:
published = self.object.is_published
if not when and not published and self.last_scheduled:
klass = self.get_version_class()
for obj in klass.normal.filter(object_id=self.object_id,
last_scheduled=self.last_scheduled,
state=self.SCHEDULED):
when = self.date_published
obj.delete()
when = when or now
# Drafts get preserved so save the
# time we last cloned this
if self.state == self.DRAFT:
self.last_scheduled = now
self.date_published = when
self.save(last_save=now)
self._clone()
self.user_published = user_published
self.state = self.SCHEDULED
self.save()
self.schedule(when=when) | python | {
"resource": ""
} |
q44891 | BaseVersionedModel.make_draft | train | def make_draft(self):
"""
Make this version the draft
"""
assert self.__class__ == self.get_version_class()
# If this is draft do nothing
if self.state == self.DRAFT:
return
with xact():
# Delete whatever is currently this draft
try:
klass = self.get_version_class()
old_draft = klass.normal.get(object_id=self.object_id,
state=self.DRAFT)
old_draft.delete()
except klass.DoesNotExist:
pass
# Set this to draft and save
self.state = self.DRAFT
# Make last_scheduled and last save match on draft
self.last_save = self.last_scheduled
self._clone() | python | {
"resource": ""
} |
q44892 | BaseVersionedModel.purge_archives | train | def purge_archives(self):
"""
Delete older archived items.
Use the class attribute NUM_KEEP_ARCHIVED to control
how many items are kept.
"""
klass = self.get_version_class()
qs = klass.normal.filter(object_id=self.object_id,
state=self.ARCHIVED).order_by('-last_save')[self.NUM_KEEP_ARCHIVED:]
for obj in qs:
obj._delete_reverses()
klass.normal.filter(vid=obj.vid).delete() | python | {
"resource": ""
} |
q44893 | BaseVersionedModel.status_line | train | def status_line(self):
"""
Returns a status line for an item.
Only really interesting when called for a draft
item as it can tell you if the draft is the same as
another version.
"""
date = self.date_published
status = self.state.title()
if self.state == self.DRAFT:
# Check if this item has changed since
# our last publish
status = "Draft saved"
date = self.last_save
if date and self.last_save == self.last_scheduled:
# We need to figure out if the item it is based on
# is either live now or will be live at some point.
# If last_scheduled is less than or equal to
# v_last_save this item is or will go live
# at some point. Otherwise it won't
# so we'll leave state as draft.
if self.v_last_save:
if self.last_scheduled >= self.v_last_save:
status = self.PUBLISHED.title()
# The date this was scheduled is greater than
# what is currently live, this will go live at
# some point
if self.last_scheduled > self.v_last_save:
status = "Publish Scheduled"
else:
status = "Publish Scheduled"
date = self.date_published
if date:
status = "%s: %s" % (status, formats.date_format(date, "SHORT_DATE_FORMAT"))
return status | python | {
"resource": ""
} |
q44894 | BaseVersionedModel.schedule | train | def schedule(self, when=None, action=None, **kwargs):
"""
Schedule this item to be published.
:param when: Date/time when this item should go live. None means now.
"""
action = '_publish'
super(BaseVersionedModel, self).schedule(when=when, action=action,
**kwargs) | python | {
"resource": ""
} |
q44895 | VersionModel.save | train | def save(self, *args, **kwargs):
"""
Saves this item.
Creates a default base if there isn't
one already.
"""
with xact():
if not self.vid:
self.state = self.DRAFT
if not self.object_id:
base = self._meta._base_model(is_published=False)
base.save(*args, **kwargs)
self.object = base
super(VersionModel, self).save(*args, **kwargs) | python | {
"resource": ""
} |
q44896 | parser | train | def parser():
"""Return search query parser."""
query_parser = current_app.config['COLLECTIONS_QUERY_PARSER']
if isinstance(query_parser, six.string_types):
query_parser = import_string(query_parser)
return query_parser | python | {
"resource": ""
} |
q44897 | query_walkers | train | def query_walkers():
"""Return query walker instances."""
return [
import_string(walker)() if isinstance(walker, six.string_types)
else walker() for walker in current_app.config[
'COLLECTIONS_QUERY_WALKERS']
] | python | {
"resource": ""
} |
q44898 | AESCipher.pad | train | def pad(cls, data):
"""
Pads data to match AES block size
"""
if sys.version_info > (3, 0):
try:
data = data.encode("utf-8")
except AttributeError:
pass
length = AES.block_size - (len(data) % AES.block_size)
data += bytes([length]) * length
return data
else:
return data + (AES.block_size - len(data) % AES.block_size) * chr(AES.block_size - len(data) % AES.block_size) | python | {
"resource": ""
} |
q44899 | AESCipher.unpad | train | def unpad(cls, data):
"""
Unpads data that has been padded
"""
if sys.version_info > (3, 0):
return data[:-ord(data[len(data)-1:])].decode()
else:
return data[:-ord(data[len(data)-1:])] | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.