_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q44600 | MySQLDriver._execute | train | def _execute(self, sql, params):
"""Execute statement with reconnecting by connection closed error codes.
2006 (CR_SERVER_GONE_ERROR): MySQL server has gone away
2013 (CR_SERVER_LOST): Lost connection to MySQL server during query
2055 (CR_SERVER_LOST_EXTENDED): Lost connection to MySQL server at '%s', system error: %d
"""
try:
return self._execute_unsafe(sql, params)
except MySQLdb.OperationalError as ex:
if ex.args[0] in (2006, 2013, 2055):
self._log("Connection with server is lost. Trying to reconnect.")
self.connect()
return self._execute_unsafe(sql, params)
raise | python | {
"resource": ""
} |
q44601 | MountSmbSharesOld.requirements | train | def requirements(self):
"""
Verifica che tutti i pacchetti apt necessari al "funzionamento" della
classe siano installati. Se cosi' non fosse li installa.
"""
cache = apt.cache.Cache()
for pkg in self.pkgs_required:
try:
pkg = cache[pkg]
if not pkg.is_installed:
try:
pkg.mark_install()
cache.commit()
except LockFailedException as lfe:
logging.error(
'Errore "{}" probabilmente l\'utente {} non ha i '
'diritti di amministratore'.format(lfe,
self.username))
raise lfe
except Exception as e:
logging.error('Errore non classificato "{}"'.format(e))
raise e
except KeyError:
logging.error('Il pacchetto "{}" non e\' presente in questa'
' distribuzione'.format(pkg)) | python | {
"resource": ""
} |
q44602 | makeOrmValuesSubqueryCondition | train | def makeOrmValuesSubqueryCondition(ormSession, column, values: List[Union[int, str]]):
""" Make Orm Values Subquery
:param ormSession: The orm session instance
:param column: The column from the Declarative table, eg TableItem.colName
:param values: A list of string or int values
"""
if isPostGreSQLDialect(ormSession.bind):
return column.in_(values)
if not isMssqlDialect(ormSession.bind):
raise NotImplementedError()
sql = _createMssqlSqlText(values)
sub_qry = ormSession.query(column) # Any column, it just assigns a name
sub_qry = sub_qry.from_statement(sql)
return column.in_(sub_qry) | python | {
"resource": ""
} |
q44603 | makeCoreValuesSubqueryCondition | train | def makeCoreValuesSubqueryCondition(engine, column, values: List[Union[int, str]]):
""" Make Core Values Subquery
:param engine: The database engine, used to determine the dialect
:param column: The column, eg TableItem.__table__.c.colName
:param values: A list of string or int values
"""
if isPostGreSQLDialect(engine):
return column.in_(values)
if not isMssqlDialect(engine):
raise NotImplementedError()
sql = _createMssqlSqlText(values)
return column.in_(sql) | python | {
"resource": ""
} |
q44604 | Connection._open_connection | train | def _open_connection(self):
""" Open a new connection socket to the CPS."""
if self._scheme == 'unix':
self._connection = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
self._connection.connect(self._path)
elif self._scheme == 'tcp':
self._connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.SOL_TCP)
self._connection.connect((self._host, self._port))
elif self._scheme == 'http':
self._connection = httplib.HTTPConnection(self._host, self._port, strict=False)
else:
raise ConnectionError("Connection scheme not recognized!") | python | {
"resource": ""
} |
q44605 | Connection._send_request | train | def _send_request(self, xml_request):
""" Send the prepared XML request block to the CPS using the corect protocol.
Args:
xml_request -- A fully formed xml request string for the CPS.
Returns:
The raw xml response string.
Raises:
ConnectionError -- Can't establish a connection with the server.
"""
if self._scheme == 'http':
return self._send_http_request(xml_request)
else:
return self._send_socket_request(xml_request) | python | {
"resource": ""
} |
q44606 | Connection._send_http_request | train | def _send_http_request(self, xml_request):
""" Send a request via HTTP protocol.
Args:
xml_request -- A fully formed xml request string for the CPS.
Returns:
The raw xml response string.
"""
headers = {"Host": self._host, "Content-Type": "text/xml", "Recipient": self._storage}
try: # Retry once if failed in case the socket has just gone bad.
self._connection.request("POST", self._selector_url, xml_request, headers)
response = self._connection.getresponse()
except (httplib.CannotSendRequest, httplib.BadStatusLine):
Debug.warn("\nRestarting socket, resending message!")
self._open_connection()
self._connection.request("POST", self._selector_url, xml_request, headers)
response = self._connection.getresponse()
data = response.read()
return data | python | {
"resource": ""
} |
q44607 | Connection.similar_text | train | def similar_text(self, *args, **kwargs):
""" Search for documents that are similar to directly supplied text or to the textual content of an existing document.
Args:
text -- Text to found something similar to.
len -- Number of keywords to extract from the source.
quota -- Minimum number of keywords matching in the destination.
Keyword args:
offset -- Number of results to skip before returning the following ones.
docs -- Number of documents to retrieve. Default is 10.
query -- An optional query that all found documents have to match against. See Search().
See Request.__init__()
Returns:
A ListResponse object.
"""
return SimilarRequest(self, *args, mode='text', **kwargs).send() | python | {
"resource": ""
} |
q44608 | makedirs_safe | train | def makedirs_safe(fulldir):
"""Creates a directory if it does not exists. Takes into consideration
concurrent access support. Works like the shell's 'mkdir -p'.
"""
try:
if not os.path.exists(fulldir): os.makedirs(fulldir)
except OSError as exc: # Python >2.5
import errno
if exc.errno == errno.EEXIST: pass
else: raise | python | {
"resource": ""
} |
q44609 | str_ | train | def str_(name):
"""Return the string representation of the given 'name'.
If it is a bytes object, it will be converted into str.
If it is a str object, it will simply be resurned."""
if isinstance(name, bytes) and not isinstance(name, str):
return name.decode('utf8')
else:
return name | python | {
"resource": ""
} |
q44610 | qstat | train | def qstat(jobid, context='grid'):
"""Queries status of a given job.
Keyword parameters:
jobid
The job identifier as returned by qsub()
context
The setshell context in which we should try a 'qsub'. Normally you don't
need to change the default. This variable can also be set to a context
dictionary in which case we just setup using that context instead of
probing for a new one, what can be fast.
Returns a dictionary with the specific job properties
"""
scmd = ['qstat', '-j', '%d' % jobid, '-f']
logger.debug("Qstat command '%s'", ' '.join(scmd))
from .setshell import sexec
data = str_(sexec(context, scmd, error_on_nonzero=False))
# some parsing:
retval = {}
for line in data.split('\n'):
s = line.strip()
if s.lower().find('do not exist') != -1: return {}
if not s or s.find(10*'=') != -1: continue
kv = QSTAT_FIELD_SEPARATOR.split(s, 1)
if len(kv) == 2: retval[kv[0]] = kv[1]
return retval | python | {
"resource": ""
} |
q44611 | qdel | train | def qdel(jobid, context='grid'):
"""Halts a given job.
Keyword parameters:
jobid
The job identifier as returned by qsub()
context
The setshell context in which we should try a 'qsub'. Normally you don't
need to change the default. This variable can also be set to a context
dictionary in which case we just setup using that context instead of
probing for a new one, what can be fast.
"""
scmd = ['qdel', '%d' % jobid]
logger.debug("Qdel command '%s'", ' '.join(scmd))
from .setshell import sexec
sexec(context, scmd, error_on_nonzero=False) | python | {
"resource": ""
} |
q44612 | update_state | train | def update_state(world):
"""
Increment the world state, determining which cells live, die, or appear.
Args:
world (list[list]): A square matrix of cells
Returns: None
"""
world_size = len(world)
def wrap(index):
"""Wrap an index around the other end of the array"""
return index % world_size
for x in range(world_size):
for y in range(world_size):
# Decide if this node cares about the rules right now
if not world[x][y].allow_change.get():
continue
live_neighbor_count = sum([
world[wrap(x)][wrap(y + 1)].value,
world[wrap(x + 1)][wrap(y + 1)].value,
world[wrap(x + 1)][wrap(y)].value,
world[wrap(x + 1)][wrap(y - 1)].value,
world[wrap(x)][wrap(y-1)].value,
world[wrap(x - 1)][wrap(y - 1)].value,
world[wrap(x - 1)][wrap(y)].value,
world[wrap(x - 1)][wrap(y + 1)].value
])
if world[x][y].value:
# Any live cell with fewer than two live neighbours dies
# Any live cell with more than three live neighbours dies
# Any live cell with two or three live neighbours lives
if not (live_neighbor_count == 2 or live_neighbor_count == 3):
world[x][y].value = False
else:
# Any dead cell with exactly three live neighbours comes alive
if live_neighbor_count == 3:
world[x][y].value = True | python | {
"resource": ""
} |
q44613 | click_event | train | def click_event(event):
"""On click, bring the cell under the cursor to Life"""
grid_x_coord = int(divmod(event.x, cell_size)[0])
grid_y_coord = int(divmod(event.y, cell_size)[0])
world[grid_x_coord][grid_y_coord].value = True
color = world[x][y].color_alive.get_as_hex()
canvas.itemconfig(canvas_grid[grid_x_coord][grid_y_coord], fill=color) | python | {
"resource": ""
} |
q44614 | draw_canvas | train | def draw_canvas():
"""Render the tkinter canvas based on the state of ``world``"""
for x in range(len(world)):
for y in range(len(world[x])):
if world[x][y].value:
color = world[x][y].color_alive.get_as_hex()
else:
color = world[x][y].color_dead.get_as_hex()
canvas.itemconfig(canvas_grid[x][y], fill=color) | python | {
"resource": ""
} |
q44615 | _make_context | train | def _make_context(context=None):
"""Create the namespace of items already pre-imported when using shell.
Accepts a dict with the desired namespace as the key, and the object as the
value.
"""
namespace = {'db': db, 'session': db.session}
namespace.update(_iter_context())
if context is not None:
namespace.update(context)
return namespace | python | {
"resource": ""
} |
q44616 | init | train | def init(**kwargs):
"""Initialize the specified names in the specified databases.
The general process is as follows:
- Ensure the database in question exists
- Ensure all tables exist in the database.
"""
# TODO: Iterate through all engines in name set.
database = kwargs.pop('database', False)
if database and not database_exists(engine['default'].url):
create_database(engine['default'].url, encoding='utf8')
clear_cache()
expression = lambda target, table: table.create(target)
test = lambda target, table: table.exists(target)
op(expression, test=test, primary='init', secondary='create', **kwargs) | python | {
"resource": ""
} |
q44617 | clear | train | def clear(**kwargs):
"""Clear the specified names from the specified databases.
This can be highly destructive as it destroys tables and when all names
are removed from a database, the database itself.
"""
database = kwargs.pop('database', False)
expression = lambda target, table: table.drop(target)
test = lambda x, tab: not database_exists(x.url) or not tab.exists(x)
# TODO: Iterate through all engines in name set.
if database and database_exists(engine['default'].url):
drop_database(engine['default'].url)
clear_cache()
op(expression, reversed(metadata.sorted_tables), test=test,
primary='clear', secondary='drop', **kwargs) | python | {
"resource": ""
} |
q44618 | flush | train | def flush(**kwargs):
"""Flush the specified names from the specified databases.
This can be highly destructive as it destroys all data.
"""
expression = lambda target, table: target.execute(table.delete())
test = lambda target, table: not table.exists(target)
op(expression, reversed(metadata.sorted_tables), test=test,
primary='flush', secondary='flush', **kwargs) | python | {
"resource": ""
} |
q44619 | is_table_included | train | def is_table_included(table, names):
"""Determines if the table is included by reference in the names.
A table can be named by its component or its model (using the short-name
or a full python path).
eg. 'package.models.SomeModel' or 'package:SomeModel' or 'package'
would all include 'SomeModel'.
"""
# No names indicates that every table is included.
if not names:
return True
# Introspect the table and pull out the model and component from it.
model, component = table.class_, table.class_._component
# Check for the component name.
if component in names:
return True
# Check for the full python name.
model_name = '%s.%s' % (model.__module__, model.__name__)
if model_name in names:
return True
# Check for the short name.
short_name = '%s:%s' % (component, model.__name__)
if short_name in names:
return True
return False | python | {
"resource": ""
} |
q44620 | CavageSignature.secret_loader | train | def secret_loader(self, callback):
"""
Decorate a method that receives a key id and returns a secret key
"""
if not callback or not callable(callback):
raise Exception("Please pass in a callable that loads secret keys")
self.secret_loader_callback = callback
return callback | python | {
"resource": ""
} |
q44621 | CavageSignature.context_loader | train | def context_loader(self, callback):
"""
Decorate a method that receives a key id and returns an object or dict
that will be available in the request context as g.cavage_context
"""
if not callback or not callable(callback):
raise Exception("Please pass in a callable that loads your context.")
self.context_loader_callback = callback
return callback | python | {
"resource": ""
} |
q44622 | CavageSignature.replay_checker | train | def replay_checker(self, callback):
"""
Decorate a method that receives the request headers and returns a bool
indicating whether we should proceed with the request. This can be used
to protect against replay attacks. For example, this method could check
the request date header value is within a delta value of the server time.
"""
if not callback or not callable(callback):
raise Exception("Please pass in a callable that protects against replays")
self.replay_checker_callback = callback
return callback | python | {
"resource": ""
} |
q44623 | makeicons | train | def makeicons(source):
"""
Create all the neccessary icons from source image
"""
im = Image.open(source)
for name, (_, w, h, func) in icon_sizes.iteritems():
print('Making icon %s...' % name)
tn = func(im, (w, h))
bg = Image.new('RGBA', (w, h), (255, 255, 255))
x = (w / 2) - (tn.size[0] / 2)
y = (h / 2) - (tn.size[1] / 2)
bg.paste(tn, (x, y))
bg.save(path.join(env.dir, name)) | python | {
"resource": ""
} |
q44624 | dump_element | train | def dump_element(element):
"""
Dumps the content of the given ElementBase object to a string
:param element: An ElementBase object
:return: A full description of its content
:raise TypeError: Invalid object
"""
# Check type
try:
assert isinstance(element, sleekxmpp.ElementBase)
except AssertionError:
raise TypeError("Not an ElementBase: {0}".format(type(element)))
# Prepare string
output = StringIO()
output.write("ElementBase : {0}\n".format(type(element)))
output.write("- name......: {0}\n".format(element.name))
output.write("- namespace.: {0}\n".format(element.namespace))
output.write("- interfaces:\n")
for itf in sorted(element.interfaces):
output.write("\t- {0}: {1}\n".format(itf, element[itf]))
if element.sub_interfaces:
output.write("- sub-interfaces:\n")
for itf in sorted(element.sub_interfaces):
output.write("\t- {0}: {1}\n".format(itf, element[itf]))
return output.getvalue() | python | {
"resource": ""
} |
q44625 | RoomCreator.create_room | train | def create_room(self, room, service, nick, config=None,
callback=None, errback=None, room_jid=None):
"""
Prepares the creation of a room.
The callback is a method with two arguments:
- room: Bare JID of the room
- nick: Nick used to create the room
The errback is a method with 4 arguments:
- room: Bare JID of the room
- nick: Nick used to create the room
- condition: error category (XMPP specification or "not-owner")
- text: description of the error
:param room: Name of the room
:param service: Name of the XMPP MUC service
:param config: Configuration of the room
:param callback: Method called back on success
:param errback: Method called on error
:param room_jid: Forced room JID
"""
self.__logger.debug("Creating room: %s", room)
with self.__lock:
if not room_jid:
# Generate/Format the room JID if not given
room_jid = sleekxmpp.JID(local=room, domain=service).bare
self.__logger.debug("... Room JID: %s", room_jid)
if not self.__rooms:
# First room to create: register to events
self.__xmpp.add_event_handler("presence", self.__on_presence)
# Store information
self.__rooms[room_jid] = RoomData(room_jid, nick, config,
callback, errback)
# Send the presence, i.e. request creation of the room
self.__muc.joinMUC(room_jid, nick) | python | {
"resource": ""
} |
q44626 | RoomCreator.__on_presence | train | def __on_presence(self, data):
"""
Got a presence stanza
"""
room_jid = data['from'].bare
muc_presence = data['muc']
room = muc_presence['room']
nick = muc_presence['nick']
with self.__lock:
try:
# Get room state machine
room_data = self.__rooms[room]
if room_data.nick != nick:
# Not about the room creator
return
except KeyError:
# Unknown room (or not a room)
return
else:
# Clean up, as we got what we wanted
del self.__rooms[room]
if not self.__rooms:
# No more rooms: no need to listen to presence anymore
self.__xmpp.del_event_handler("presence", self.__on_presence)
if data['type'] == 'error':
# Got an error: update the state machine and clean up
self.__safe_errback(room_data, data['error']['condition'],
data['error']['text'])
elif muc_presence['affiliation'] != 'owner':
# We are not the owner the room: consider it an error
self.__safe_errback(room_data, 'not-owner',
'We are not the owner of the room')
else:
# Success: we own the room
# Setup room configuration
try:
config = self.__muc.getRoomConfig(room_jid)
except ValueError:
# Can't differentiate IQ errors from a "no configuration"
# result: consider it OK
self.__logger.warning("Can't get the configuration form for "
"XMPP room %s", room_jid)
self.__safe_callback(room_data)
else:
# Prepare our configuration
custom_values = room_data.configuration or {}
# Filter options that are not known from the server
known_fields = config['fields']
to_remove = [key for key in custom_values
if key not in known_fields]
for key in to_remove:
del custom_values[key]
# Send configuration (use a new form to avoid OpenFire to have
# an internal error)
form = self.__xmpp['xep_0004'].make_form("submit")
form['values'] = custom_values
self.__muc.setRoomConfig(room_jid, form)
# Call back the creator
self.__safe_callback(room_data) | python | {
"resource": ""
} |
q44627 | MarksCallback.__call | train | def __call(self):
"""
Calls the callback method
"""
try:
if self.__callback is not None:
self.__callback(self.__successes, self.__errors)
except Exception as ex:
self.__logger.exception("Error calling back count down "
"handler: %s", ex)
else:
self.__called = True | python | {
"resource": ""
} |
q44628 | MarksCallback.__mark | train | def __mark(self, element, mark_set):
"""
Marks an element
:param element: The element to mark
:param mark_set: The set corresponding to the mark
:return: True if the element was known
"""
try:
# The given element can be of a different type than the original
# one (JID instead of str, ...), so we retrieve the original one
original = self.__elements.pop(element)
mark_set.add(original)
except KeyError:
return False
else:
if not self.__elements:
# No more elements to wait for
self.__call()
return True | python | {
"resource": ""
} |
q44629 | AwsEni.do_refresh | train | def do_refresh(self,args):
"""Refresh the view of the eni"""
pprint(AwsConnectionFactory.getEc2Client().describe_network_interfaces(NetworkInterfaceIds=[self.physicalId])); | python | {
"resource": ""
} |
q44630 | do_super | train | def do_super(parser, token):
'''
Access the parent templates block.
{% super name %}
'''
name = token.strip()
return ast.YieldFrom(
value=_a.Call(_a.Attribute(_a.Call(_a.Name('super')), name), [
# _a.Attribute(_a.Name('context'), 'parent'),
_a.Name('context'),
])
) | python | {
"resource": ""
} |
q44631 | macro | train | def macro(parser, token):
'''
Works just like block, but does not render.
'''
name = token.strip()
parser.build_method(name, endnodes=['endmacro'])
return ast.Yield(value=ast.Str(s='')) | python | {
"resource": ""
} |
q44632 | AbstractRouter.get_link | train | def get_link(self, peer):
"""
Retrieves a link to the peer
:param peer: A Peer description
:return: A link to the peer, None if none available
"""
assert isinstance(peer, Peer)
for protocol in self._protocols:
try:
# Try to get a link
return protocol.get_link(peer)
except ValueError:
# Peer can't be handled by this protocol
pass
# No link found
return None | python | {
"resource": ""
} |
q44633 | resolve_composed_functions | train | def resolve_composed_functions(data, recursive=True):
"""
Calls `ComposedFunction`s and returns its return value. By default, this
function will recursively iterate dicts, lists, tuples, and sets and
replace all `ComposedFunction`s with their return value.
"""
if isinstance(data, ComposedFunction):
data = data()
if recursive:
if isinstance(data, dict):
for key, value in data.items():
data[key] = resolve_composed_functions(
value,
recursive=recursive,
)
elif isinstance(data, (list, tuple, set)):
for index, value in enumerate(data):
data[index] = resolve_composed_functions(
value,
recursive=recursive,
)
return data | python | {
"resource": ""
} |
q44634 | SectionStruct._sync_and_resolve | train | def _sync_and_resolve(self, config, resolver):
'''Synchronize all items represented by the config according to the resolver and return a
set of keys that have been resolved.'''
resolved = set()
for key, theirs in config.items(self._name):
theirs = self._real_value_of(theirs)
if key in self:
mine = self[key]
value = resolver(self._name, key, mine, theirs)
else:
value = theirs
self._set_value(config, key, value)
resolved.add(key)
return resolved | python | {
"resource": ""
} |
q44635 | Work.upload | train | def upload(self, baseurl, filename):
"""Upload filename to this work"""
# Prof is really dirty, we need to re-get the project page before upload
payload = {
'id_projet': self.field
}
prof_session.post(baseurl+"/main.php", params=payload)
# We also need to get the upload page...
payload = {
'id': int(self.work_id)
}
prof_session.get(baseurl+"/upload.php", params=payload)
# Finally we can actually send
payload = {
'MAX_FILE_SIZE': 1000000
}
prof_session.post(baseurl+'/upload2.php', files={'fichier1': open(filename, 'rb')}, params=payload) | python | {
"resource": ""
} |
q44636 | get_all_classes | train | def get_all_classes(module_name):
"""Load all non-abstract classes from package"""
module = importlib.import_module(module_name)
return getmembers(module, lambda m: isclass(m) and not isabstract(m)) | python | {
"resource": ""
} |
q44637 | Tabs.get | train | def get(self, table_name):
"""Load table class by name, class not yet initialized"""
assert table_name in self.tabs, \
"Table not avaiable. Avaiable tables: {}".format(
", ".join(self.tabs.keys())
)
return self.tabs[table_name] | python | {
"resource": ""
} |
q44638 | Tabs._update_sys_path | train | def _update_sys_path(self, package_path=None):
"""Updates and adds current directory to sys path"""
self.package_path = package_path
if not self.package_path in sys.path:
sys.path.append(self.package_path) | python | {
"resource": ""
} |
q44639 | Tabs.find_tabs | train | def find_tabs(self, custom_table_classes=None):
"""Finds all classes that are subcalss of Table and loads them into
a dictionary named tables."""
for module_name in get_all_modules(self.package_path):
for name, _type in get_all_classes(module_name):
# pylint: disable=W0640
subclasses = [Table] + (custom_table_classes or list())
iss_subclass = map(lambda c: issubclass(_type, c), subclasses)
if isclass(_type) and any(iss_subclass):
self.tabs.update([[name, _type]]) | python | {
"resource": ""
} |
q44640 | SQLiteDB._connect | train | def _connect(self):
"""Try to create a connection to the database if not yet connected.
"""
if self._connection is not None:
raise RuntimeError('Close connection first.')
self._connection = connect(self._database, **self._kwds)
self._connection.isolation_level = None | python | {
"resource": ""
} |
q44641 | DiffCommand.diff | train | def diff(self, report_a, report_b):
"""
Generate a diff for two data reports.
"""
arguments = GLOBAL_ARGUMENTS + ['run_date']
output = OrderedDict([
('a', OrderedDict([(arg, report_a[arg]) for arg in arguments])),
('b', OrderedDict([(arg, report_b[arg]) for arg in arguments])),
('queries', [])
])
output['a']
for query_a in report_a['queries']:
for query_b in report_b['queries']:
if query_a['config'] == query_b['config']:
diff = OrderedDict()
diff['config'] = query_a['config']
diff['data_types'] = query_a['data_types']
diff['data'] = OrderedDict()
for metric, values in query_a['data'].items():
data_type = diff['data_types'][metric]
diff['data'][metric] = OrderedDict()
total_a = values['total']
total_b = query_b['data'][metric]['total']
for label, value in values.items():
a = value
try:
b = query_b['data'][metric][label]
# TODO: hack for when labels are different...
except KeyError:
continue
change = b - a
percent_change = float(change) / a if a > 0 else None
percent_a = float(a) / total_a if total_a > 0 else None
percent_b = float(b) / total_b if total_b > 0 else None
if label == 'total' or data_type == 'TIME' or percent_a is None or percent_b is None:
point_change = None
else:
point_change = percent_b - percent_a
diff['data'][metric][label] = OrderedDict([
('change', change),
('percent_change', percent_change),
('point_change', point_change),
])
output['queries'].append(diff)
query_b = report_b['queries']
return output | python | {
"resource": ""
} |
q44642 | Origin.copy_data | train | def copy_data(self):
"""
Copy the data from the it's point of origin, serializing it,
storing it serialized as well as in it's raw form and calculate
a running hash of the serialized representation
"""
HASH_FUNCTION = hashlib.sha256()
try:
raw_iterator = self.get_binary_iterator()
except AttributeError:
raw_iterator = self.get_non_binary_iterator()
self.copy_file = tempfile.NamedTemporaryFile(mode='w+')
for part in raw_iterator:
encoded_part = dbsafe_encode(part)
self.copy_file.write(encoded_part)
self.copy_file.write('\n')
HASH_FUNCTION.update(encoded_part)
self.copy_file.seek(0)
self.data_iterator = (dbsafe_decode(line) for line in self.copy_file)
else:
self.copy_file = tempfile.NamedTemporaryFile(mode='w+b')
for part in raw_iterator:
self.copy_file.write(part)
HASH_FUNCTION.update(part)
self.copy_file.seek(0)
self.data_iterator = self.copy_file
self.new_hash = HASH_FUNCTION.hexdigest() | python | {
"resource": ""
} |
q44643 | OriginURL.get_binary_iterator | train | def get_binary_iterator(self):
"""
Generator to stream the remote file piece by piece.
"""
CHUNK_SIZE = 1024
return (item for item in requests.get(self.url).iter_content(CHUNK_SIZE)) | python | {
"resource": ""
} |
q44644 | TaggedRelationWidget.get_add_link | train | def get_add_link(self):
"""
Appends the popup=1 query string to the url so the
destination url treats it as a popup.
"""
url = super(TaggedRelationWidget, self).get_add_link()
if url:
qs = self.get_add_qs()
if qs:
url = "%s&%s" % (url, urllib.urlencode(qs))
return url | python | {
"resource": ""
} |
q44645 | render_view | train | def render_view(parser, token):
"""
Return an string version of a View with as_string method.
First argument is the name of the view. Any other arguments
should be keyword arguments and will be passed to the view.
Example:
{% render_view viewname var1=xx var2=yy %}
"""
bits = token.split_contents()
n = len(bits)
if n < 2:
raise TemplateSyntaxError("'%s' takes at least one view as argument")
viewname = bits[1]
kwargs = {}
if n > 2:
for bit in bits[2:]:
match = kwarg_re.match(bit)
if not match:
raise TemplateSyntaxError("Malformed arguments to render_view tag")
name, value = match.groups()
if name:
kwargs[name] = parser.compile_filter(value)
return StringNode(viewname, kwargs) | python | {
"resource": ""
} |
q44646 | bundle_view | train | def bundle_view(parser, token):
"""
Returns an string version of a bundle view. This is done by
calling the `get_string_from_view` method of the provided bundle.
This tag expects that the request object as well as the
the original url_params are available in the context.
Requires two arguments bundle and the name of the view
you want to render. In addition, this tag also accepts
the 'as xxx' syntax.
Example:
{% bundle_url bundle main_list as html %}
"""
bits = token.split_contents()
if len(bits) < 3:
raise TemplateSyntaxError("'%s' takes at least two arguments"
" bundle and view_name" % bits[0])
bundle = parser.compile_filter(bits[1])
viewname = parser.compile_filter(bits[2])
asvar = None
bits = bits[2:]
if len(bits) >= 2 and bits[-2] == 'as':
asvar = bits[-1]
bits = bits[:-2]
return ViewNode(bundle, viewname, asvar) | python | {
"resource": ""
} |
q44647 | bundle_url | train | def bundle_url(parser, token):
"""
Returns an a url for given a bundle and a view name.
This is done by calling the `get_view_url` method
of the provided bundle.
This tag expects that the request object as well as the
the original url_params are available in the context.
Requires two arguments bundle and the name of the view
you want to render. In addition, this tag also accepts
the 'as xxx' syntax.
By default this tag will follow references to
parent bundles. To stop this from happening pass
`follow_parent=False`. Any other keyword arguments
will be used as url keyword arguments.
If no match is found a blank string will be returned.
Example:
{% bundle_url bundle "edit" obj=obj as html %}
"""
bits = token.split_contents()
if len(bits) < 3:
raise TemplateSyntaxError("'%s' takes at least two arguments"
" bundle and view_name" % bits[0])
bundle = parser.compile_filter(bits[1])
viewname = parser.compile_filter(bits[2])
kwargs = {}
asvar = None
bits = bits[2:]
if len(bits) >= 2 and bits[-2] == 'as':
asvar = bits[-1]
bits = bits[:-2]
if len(bits):
for bit in bits:
match = kwarg_re.match(bit)
if not match:
raise TemplateSyntaxError("Malformed arguments to url tag")
name, value = match.groups()
if name:
kwargs[name] = parser.compile_filter(value)
return URLNode(bundle, viewname, kwargs, asvar) | python | {
"resource": ""
} |
q44648 | user_url | train | def user_url(user, bundle):
"""
Filter for a user object. Checks if a user has
permission to change other users.
"""
if not user:
return False
bundle = bundle.admin_site.get_bundle_for_model(User)
edit = None
if bundle:
edit = bundle.get_view_url('main', user)
return edit | python | {
"resource": ""
} |
q44649 | Guild.from_content | train | def from_content(cls, content):
"""Creates an instance of the class from the HTML content of the guild's page.
Parameters
-----------
content: :class:`str`
The HTML content of the page.
Returns
----------
:class:`Guild`
The guild contained in the page or None if it doesn't exist.
Raises
------
InvalidContent
If content is not the HTML of a guild's page.
"""
if "An internal error has occurred" in content:
return None
parsed_content = parse_tibiacom_content(content)
try:
name_header = parsed_content.find('h1')
guild = Guild(name_header.text.strip())
except AttributeError:
raise InvalidContent("content does not belong to a Tibia.com guild page.")
if not guild._parse_logo(parsed_content):
raise InvalidContent("content does not belong to a Tibia.com guild page.")
info_container = parsed_content.find("div", id="GuildInformationContainer")
guild._parse_guild_info(info_container)
guild._parse_application_info(info_container)
guild._parse_guild_homepage(info_container)
guild._parse_guild_guildhall(info_container)
guild._parse_guild_disband_info(info_container)
guild._parse_guild_members(parsed_content)
if guild.guildhall and guild.members:
guild.guildhall.owner = guild.members[0].name
return guild | python | {
"resource": ""
} |
q44650 | Guild.from_tibiadata | train | def from_tibiadata(cls, content):
"""Builds a guild object from a TibiaData character response.
Parameters
----------
content: :class:`str`
The json string from the TibiaData response.
Returns
-------
:class:`Guild`
The guild contained in the description or ``None``.
Raises
------
InvalidContent
If content is not a JSON response of a guild's page.
"""
json_content = parse_json(content)
guild = cls()
try:
guild_obj = json_content["guild"]
if "error" in guild_obj:
return None
guild_data = guild_obj["data"]
guild.name = guild_data["name"]
guild.world = guild_data["world"]
guild.logo_url = guild_data["guildlogo"]
guild.description = guild_data["description"]
guild.founded = parse_tibiadata_date(guild_data["founded"])
guild.open_applications = guild_data["application"]
except KeyError:
raise InvalidContent("content does not match a guild json from TibiaData.")
guild.homepage = guild_data.get("homepage")
guild.active = not guild_data.get("formation", False)
if isinstance(guild_data["disbanded"], dict):
guild.disband_date = parse_tibiadata_date(guild_data["disbanded"]["date"])
guild.disband_condition = disband_tibadata_regex.search(guild_data["disbanded"]["notification"]).group(1)
for rank in guild_obj["members"]:
rank_name = rank["rank_title"]
for member in rank["characters"]:
guild.members.append(GuildMember(member["name"], rank_name, member["nick"] or None,
member["level"], member["vocation"],
joined=parse_tibiadata_date(member["joined"]),
online=member["status"] == "online"))
for invited in guild_obj["invited"]:
guild.invites.append(GuildInvite(invited["name"], parse_tibiadata_date(invited["invited"])))
if isinstance(guild_data["guildhall"], dict):
gh = guild_data["guildhall"]
guild.guildhall = GuildHouse(gh["name"], gh["world"], guild.members[0].name,
parse_tibiadata_date(gh["paid"]))
return guild | python | {
"resource": ""
} |
q44651 | Guild._parse_current_member | train | def _parse_current_member(self, previous_rank, values):
"""
Parses the column texts of a member row into a member dictionary.
Parameters
----------
previous_rank: :class:`dict`[int, str]
The last rank present in the rows.
values: tuple[:class:`str`]
A list of row contents.
"""
rank, name, vocation, level, joined, status = values
rank = previous_rank[1] if rank == " " else rank
title = None
previous_rank[1] = rank
m = title_regex.match(name)
if m:
name = m.group(1)
title = m.group(2)
self.members.append(GuildMember(name, rank, title, int(level), vocation, joined=joined,
online=status == "online")) | python | {
"resource": ""
} |
q44652 | Guild._parse_application_info | train | def _parse_application_info(self, info_container):
"""
Parses the guild's application info.
Parameters
----------
info_container: :class:`bs4.Tag`
The parsed content of the information container.
"""
m = applications_regex.search(info_container.text)
if m:
self.open_applications = m.group(1) == "opened" | python | {
"resource": ""
} |
q44653 | Guild._parse_guild_disband_info | train | def _parse_guild_disband_info(self, info_container):
"""
Parses the guild's disband info, if available.
Parameters
----------
info_container: :class:`bs4.Tag`
The parsed content of the information container.
"""
m = disband_regex.search(info_container.text)
if m:
self.disband_condition = m.group(2)
self.disband_date = parse_tibia_date(m.group(1).replace("\xa0", " ")) | python | {
"resource": ""
} |
q44654 | Guild._parse_guild_guildhall | train | def _parse_guild_guildhall(self, info_container):
"""
Parses the guild's guildhall info.
Parameters
----------
info_container: :class:`bs4.Tag`
The parsed content of the information container.
"""
m = guildhall_regex.search(info_container.text)
if m:
paid_until = parse_tibia_date(m.group("date").replace("\xa0", " "))
self.guildhall = GuildHouse(m.group("name"), self.world, paid_until_date=paid_until) | python | {
"resource": ""
} |
q44655 | Guild._parse_guild_homepage | train | def _parse_guild_homepage(self, info_container):
"""
Parses the guild's homepage info.
Parameters
----------
info_container: :class:`bs4.Tag`
The parsed content of the information container.
"""
m = homepage_regex.search(info_container.text)
if m:
self.homepage = m.group(1) | python | {
"resource": ""
} |
q44656 | Guild._parse_guild_info | train | def _parse_guild_info(self, info_container):
"""
Parses the guild's general information and applies the found values.
Parameters
----------
info_container: :class:`bs4.Tag`
The parsed content of the information container.
"""
m = founded_regex.search(info_container.text)
if m:
description = m.group("desc").strip()
self.description = description if description else None
self.world = m.group("world")
self.founded = parse_tibia_date(m.group("date").replace("\xa0", " "))
self.active = "currently active" in m.group("status") | python | {
"resource": ""
} |
q44657 | Guild._parse_logo | train | def _parse_logo(self, parsed_content):
"""
Parses the guild logo and saves it to the instance.
Parameters
----------
parsed_content: :class:`bs4.Tag`
The parsed content of the page.
Returns
-------
:class:`bool`
Whether the logo was found or not.
"""
logo_img = parsed_content.find('img', {'height': '64'})
if logo_img is None:
return False
self.logo_url = logo_img["src"]
return True | python | {
"resource": ""
} |
q44658 | Guild._parse_guild_members | train | def _parse_guild_members(self, parsed_content):
"""
Parses the guild's member and invited list.
Parameters
----------
parsed_content: :class:`bs4.Tag`
The parsed content of the guild's page
"""
member_rows = parsed_content.find_all("tr", {'bgcolor': ["#D4C0A1", "#F1E0C6"]})
previous_rank = {}
for row in member_rows:
columns = row.find_all('td')
values = tuple(c.text.replace("\u00a0", " ") for c in columns)
if len(columns) == COLS_GUILD_MEMBER:
self._parse_current_member(previous_rank, values)
if len(columns) == COLS_INVITED_MEMBER:
self._parse_invited_member(values) | python | {
"resource": ""
} |
q44659 | Guild._parse_invited_member | train | def _parse_invited_member(self, values):
"""
Parses the column texts of an invited row into a invited dictionary.
Parameters
----------
values: tuple[:class:`str`]
A list of row contents.
"""
name, date = values
if date != "Invitation Date":
self.invites.append(GuildInvite(name, date)) | python | {
"resource": ""
} |
q44660 | BaseIRGenerator.merge_text_nodes_on | train | def merge_text_nodes_on(self, node):
"""Merges all consecutive non-translatable text nodes into one"""
if not isinstance(node, ContainerNode) or not node.children:
return
new_children = []
text_run = []
for i in node.children:
if isinstance(i, Text) and not i.translatable:
text_run.append(i.escaped())
else:
if text_run:
new_children.append(EscapedText(''.join(text_run)))
text_run = []
new_children.append(i)
if text_run:
new_children.append(EscapedText(''.join(text_run)))
node.children = new_children
for i in node.children:
self.merge_text_nodes_on(i) | python | {
"resource": ""
} |
q44661 | BaseIRGenerator.push_state | train | def push_state(self):
"""
Push a copy of the topmost state on top of the state stack,
returns the new top.
"""
new = dict(self.states[-1])
self.states.append(new)
return self.state | python | {
"resource": ""
} |
q44662 | BaseDOMIRGenerator.enter_node | train | def enter_node(self, ir_node):
"""
Enter the given element; keeps track of `cdata`;
subclasses may extend by overriding
"""
this_is_cdata = (isinstance(ir_node, Element)
and ir_node.name in self.cdata_elements)
self.state['is_cdata'] = bool(self.state.get('is_cdata')) or this_is_cdata | python | {
"resource": ""
} |
q44663 | watch | train | def watch(static_root, watch_paths=None, on_reload=None, host='localhost', port=5555, server_base_path="/",
watcher_interval=1.0, recursive=True, open_browser=True, open_browser_delay=1.0):
"""Initialises an HttpWatcherServer to watch the given path for changes. Watches until the IO loop
is terminated, or a keyboard interrupt is intercepted.
Args:
static_root: The path whose contents are to be served and watched.
watch_paths: The paths to be watched for changes. If not supplied, this defaults to the static root.
on_reload: An optional callback to pass to the watcher server that will be executed just before the
server triggers a reload in connected clients.
host: The host to which to bind our server.
port: The port to which to bind our server.
server_base_path: If the content is to be served from a non-standard base path, specify it here.
watcher_interval: The maximum refresh rate of the watcher server.
recursive: Whether to monitor the watch path recursively.
open_browser: Whether or not to automatically attempt to open the user's browser at the root URL of
the project (default: True).
open_browser_delay: The number of seconds to wait before attempting to open the user's browser.
"""
server = httpwatcher.HttpWatcherServer(
static_root,
watch_paths=watch_paths,
on_reload=on_reload,
host=host,
port=port,
server_base_path=server_base_path,
watcher_interval=watcher_interval,
recursive=recursive,
open_browser=open_browser,
open_browser_delay=open_browser_delay
)
server.listen()
try:
tornado.ioloop.IOLoop.current().start()
except KeyboardInterrupt:
server.shutdown() | python | {
"resource": ""
} |
q44664 | Query.query | train | def query(self):
"""Parse query string using given grammar.
:returns: AST that represents the query in the given grammar.
"""
tree = pypeg2.parse(self._query, parser(), whitespace="")
for walker in query_walkers():
tree = tree.accept(walker)
return tree | python | {
"resource": ""
} |
q44665 | ConfigUtil.normalise_key | train | def normalise_key(self, key):
"""Make sure key is a valid python attribute"""
key = key.replace('-', '_')
if key.startswith("noy_"):
key = key[4:]
return key | python | {
"resource": ""
} |
q44666 | ConfigUtil.find_value | train | def find_value(self, key):
"""Find a value and return it"""
values = self.values
if key not in values:
raise AttributeError("Config has no value for {}".format(key))
val = values[key]
if isinstance(val, Default):
return val.val
else:
return val | python | {
"resource": ""
} |
q44667 | ConfigUtil.use_options | train | def use_options(self, options, extractor=None):
"""
If extractor isn't specified, then just update self.values with options.
Otherwise update values with whatever the result of calling extractor with
our template and these options returns
Also make sure all keys are transformed into valid python attribute names
"""
# Extract if necessary
if not extractor:
extracted = options
else:
extracted = extractor(self.template, options)
# Get values as [(key, val), ...]
if isinstance(extracted, dict):
extracted = extracted.items()
# Add our values if there are any
# Normalising the keys as we go along
if extracted is not None:
for key, val in extracted:
self.values[self.normalise_key(key)] = val | python | {
"resource": ""
} |
q44668 | ConfigUtil.find_config_file | train | def find_config_file(self):
"""
Find where our config file is if there is any
If the value for the config file is a default and it doesn't exist
then it is silently ignored.
If however, the value isn't a default and it doesn't exist, an error is raised
"""
filename = self.values.get('config_file', Default('noy.json'))
ignore_missing = False
if isinstance(filename, Default):
filename = filename.val
ignore_missing = True
filename = os.path.abspath(filename)
if os.path.exists(filename):
return filename
elif not ignore_missing:
raise MissingConfigFile("Config file doesn't exist at {}".format(filename)) | python | {
"resource": ""
} |
q44669 | ConfigUtil.apply_config_file | train | def apply_config_file(self, filename):
"""
Add options from config file to self.values
Leave alone existing values that are not an instance of Default
"""
def extractor(template, options):
"""Ignore things that are existing non default values"""
for name, val in options:
normalised = self.normalise_key(name)
if normalised in self.values and not isinstance(self.values[normalised], Default):
continue
else:
yield name, val
items = json.load(open(filename)).items()
self.use_options(items, extractor) | python | {
"resource": ""
} |
q44670 | ConfigUtil.use_config_file | train | def use_config_file(self):
"""Find and apply the config file"""
self.config_file = self.find_config_file()
if self.config_file:
self.apply_config_file(self.config_file) | python | {
"resource": ""
} |
q44671 | Config.setup | train | def setup(self, options=None, extractor=None):
"""
Put options onto the config and put anything from a config file onto the config.
If extractor is specified, it is used to extract values from the options dictionary
"""
# Get our programmatic options
self._util.use_options(options, extractor)
# Overwrite non defaults in self.values with values from config
self._util.use_config_file() | python | {
"resource": ""
} |
q44672 | recursive_asdict | train | def recursive_asdict(d):
"""Convert Suds object into serializable format."""
out = {}
for k, v in asdict(d).iteritems():
if hasattr(v, '__keylist__'):
out[k] = recursive_asdict(v)
elif isinstance(v, list):
out[k] = []
for item in v:
if hasattr(item, '__keylist__'):
out[k].append(recursive_asdict(item))
else:
out[k].append(item)
else:
out[k] = v
return out | python | {
"resource": ""
} |
q44673 | index | train | def index():
"""Query Elasticsearch using "collection" param in query string."""
collection_names = request.values.getlist('collection')
# Validation of collection names.
collections = Collection.query
if collection_names:
collections = collections.filter(
Collection.name.in_(collection_names))
assert len(collection_names) == collections.count()
response = search.client.search(
body={
'query': {
'filtered': {
'filter': {
'terms': {
'_collections': collection_names
}
}
}
}
}
)
return jsonify(**response) | python | {
"resource": ""
} |
q44674 | World._parse_battleye_status | train | def _parse_battleye_status(self, battleye_string):
"""Parses the BattlEye string and applies the results.
Parameters
----------
battleye_string: :class:`str`
String containing the world's Battleye Status.
"""
m = battleye_regexp.search(battleye_string)
if m:
self.battleye_protected = True
self.battleye_date = parse_tibia_full_date(m.group(1))
else:
self.battleye_protected = False
self.battleye_date = None | python | {
"resource": ""
} |
q44675 | World._parse_tables | train | def _parse_tables(cls, parsed_content):
"""
Parses the information tables found in a world's information page.
Parameters
----------
parsed_content: :class:`bs4.BeautifulSoup`
A :class:`BeautifulSoup` object containing all the content.
Returns
-------
:class:`OrderedDict`[:class:`str`, :class:`list`[:class:`bs4.Tag`]]
A dictionary containing all the table rows, with the table headers as keys.
"""
tables = parsed_content.find_all('div', attrs={'class': 'TableContainer'})
output = OrderedDict()
for table in tables:
title = table.find("div", attrs={'class': 'Text'}).text
title = title.split("[")[0].strip()
inner_table = table.find("div", attrs={'class': 'InnerTableContainer'})
output[title] = inner_table.find_all("tr")
return output | python | {
"resource": ""
} |
q44676 | AssetBase.reset_crops | train | def reset_crops(self):
"""
Reset all known crops to the default crop.
If settings.ASSET_CELERY is specified then
the task will be run async
"""
if self._can_crop():
if settings.CELERY or settings.USE_CELERY_DECORATOR:
# this means that we are using celery
tasks.reset_crops.apply_async(args=[self.pk], countdown=5)
else:
tasks.reset_crops(None, asset=self) | python | {
"resource": ""
} |
q44677 | AssetBase.ensure_crops | train | def ensure_crops(self, *required_crops):
"""
Make sure a crop exists for each crop in required_crops.
Existing crops will not be changed.
If settings.ASSET_CELERY is specified then
the task will be run async
"""
if self._can_crop():
if settings.CELERY or settings.USE_CELERY_DECORATOR:
# this means that we are using celery
args = [self.pk]+list(required_crops)
tasks.ensure_crops.apply_async(args=args, countdown=5)
else:
tasks.ensure_crops(None, *required_crops, asset=self) | python | {
"resource": ""
} |
q44678 | AssetBase.create_crop | train | def create_crop(self, name, x, x2, y, y2):
"""
Create a crop for this asset.
"""
if self._can_crop():
spec = get_image_cropper().create_crop(name, self.file, x=x,
x2=x2, y=y, y2=y2)
ImageDetail.save_crop_spec(self, spec) | python | {
"resource": ""
} |
q44679 | AssetBase.save | train | def save(self, *args, **kwargs):
"""
For new assets, creates a new slug.
For updates, deletes the old file from storage.
Calls super to actually save the object.
"""
if not self.pk and not self.slug:
self.slug = self.generate_slug()
if self.__original_file and self.file != self.__original_file:
self.delete_real_file(self.__original_file)
file_changed = True
if self.pk:
new_value = getattr(self, 'file')
if hasattr(new_value, "file"):
file_changed = isinstance(new_value.file, UploadedFile)
else:
self.cbversion = 0
if file_changed:
self.user_filename = os.path.basename(self.file.name)
self.cbversion = self.cbversion + 1
if not self.title:
self.title = self.user_filename
super(AssetBase, self).save(*args, **kwargs)
if file_changed:
signals.file_saved.send(self.file.name)
utils.update_cache_bust_version(self.file.url, self.cbversion)
self.reset_crops()
if self.__original_file and self.file.name != self.__original_file.name:
with manager.SwitchSchemaManager(None):
for related in self.__class__._meta.get_all_related_objects(
include_hidden=True):
field = related.field
if getattr(field, 'denormalize', None):
cname = field.get_denormalized_field_name(field.name)
if getattr(field, 'denormalize'):
related.model.objects.filter(**{
field.name: self.pk
}).update(**{
cname: self.file.name
}) | python | {
"resource": ""
} |
q44680 | AssetBase.delete | train | def delete(self, *args, **kwargs):
"""
Deletes the actual file from storage after the object is deleted.
Calls super to actually delete the object.
"""
file_obj = self.file
super(AssetBase, self).delete(*args, **kwargs)
self.delete_real_file(file_obj) | python | {
"resource": ""
} |
q44681 | _get_not_annotated | train | def _get_not_annotated(func, annotations=None):
"""Return non-optional parameters that are not annotated."""
argspec = inspect.getfullargspec(func)
args = argspec.args
if argspec.defaults is not None:
args = args[:-len(argspec.defaults)]
if inspect.isclass(func) or inspect.ismethod(func):
args = args[1:] # Strip off ``cls`` or ``self``.
kwonlyargs = argspec.kwonlyargs
if argspec.kwonlydefaults is not None:
kwonlyargs = kwonlyargs[:-len(argspec.kwonlydefaults)]
annotations = annotations or argspec.annotations
return [arg for arg in args + kwonlyargs if arg not in annotations] | python | {
"resource": ""
} |
q44682 | _parse_args | train | def _parse_args(func, variables, annotations=None):
"""Return a list of arguments with the variable it reads.
NOTE: Multiple arguments may read the same variable.
"""
arg_read_var = []
for arg_name, anno in (annotations or func.__annotations__).items():
if arg_name == 'return':
continue
var, read = _parse_arg(func, variables, arg_name, anno)
arg = Argument(name=arg_name, read=read)
arg_read_var.append((arg, var))
return arg_read_var | python | {
"resource": ""
} |
q44683 | _parse_arg | train | def _parse_arg(func, variables, arg_name, anno):
"""Parse an argument's annotation."""
if isinstance(anno, str):
var = variables[anno]
return var, var.read_latest
elif (isinstance(anno, list) and len(anno) == 1 and
isinstance(anno[0], str)):
var = variables[anno[0]]
return var, var.read_all
# For now, be very strict about annotation format (e.g.,
# allow list but not tuple) because we might want to use
# tuple for other meanings in the future.
raise StartupError(
'cannot parse annotation %r of parameter %r for %r' %
(anno, arg_name, func)) | python | {
"resource": ""
} |
q44684 | _parse_ret | train | def _parse_ret(func, variables, annotations=None):
"""Parse func's return annotation and return either None, a variable,
or a tuple of variables.
NOTE:
* _parse_ret() also notifies variables about will-writes.
* A variable can be written multiple times per return annotation.
"""
anno = (annotations or func.__annotations__).get('return')
if anno is None:
return None
elif isinstance(anno, str):
writeto = variables[anno]
writeto.notify_will_write()
return writeto
elif (isinstance(anno, tuple) and
all(isinstance(name, str) for name in anno)):
writeto = tuple(variables[name] for name in anno)
for var in writeto:
var.notify_will_write()
return writeto
# Be very strict about annotation format for now.
raise StartupError(
'cannot parse return annotation %r for %r' % (anno, func)) | python | {
"resource": ""
} |
q44685 | _write_values | train | def _write_values(kwargs, variables):
"""Write values of kwargs and return thus-satisfied closures."""
writeto = []
for var_name, value in kwargs.items():
var = variables[var_name]
var.notify_will_write()
var.write(value)
writeto.append(var)
return _notify_reader_writes(writeto) | python | {
"resource": ""
} |
q44686 | _notify_reader_writes | train | def _notify_reader_writes(writeto):
"""Notify reader closures about these writes and return a sorted
list of thus-satisfied closures.
"""
satisfied = []
for var in writeto:
if var.readable:
for reader in var.readers:
reader.notify_read_ready()
if reader.satisfied:
satisfied.append(reader)
return Closure.sort(satisfied) | python | {
"resource": ""
} |
q44687 | Startup._release | train | def _release(self):
"""Destroy self since closures cannot be called again."""
del self.funcs
del self.variables
del self.variable_values
del self.satisfied | python | {
"resource": ""
} |
q44688 | Startup.call | train | def call(self, **kwargs):
"""Call all the functions that have previously been added to the
dependency graph in topological and lexicographical order, and
then return variables in a ``dict``.
You may provide variable values with keyword arguments. These
values will be written and can satisfy dependencies.
NOTE: This object will be **destroyed** after ``call()`` returns
and should not be used any further.
"""
if not hasattr(self, 'funcs'):
raise StartupError('startup cannot be called again')
for name, var in self.variables.items():
var.name = name
self.variable_values.update(kwargs)
for name in self.variable_values:
self.variables[name].name = name
queue = Closure.sort(self.satisfied)
queue.extend(_write_values(self.variable_values, self.variables))
while queue:
closure = queue.pop(0)
writeto = closure.call()
self.funcs.remove(closure.func)
queue.extend(_notify_reader_writes(writeto))
if self.funcs:
raise StartupError('cannot satisfy dependency for %r' % self.funcs)
values = {
name: var.read_latest() for name, var in self.variables.items()
}
# Call _release() on normal exit only; otherwise keep the dead body for
# forensic analysis.
self._release()
return values | python | {
"resource": ""
} |
q44689 | extract_tonnikala | train | def extract_tonnikala(fileobj, keywords, comment_tags, options):
"""Extract messages from Tonnikala files.
:param fileobj: the file-like object the messages should be extracted
from
:param keywords: a list of keywords (i.e. function names) that should
be recognized as translation functions
:param comment_tags: a list of translator tags to search for and
include in the results
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)``
tuples
:rtype: ``iterator``
"""
extractor = TonnikalaExtractor()
for msg in extractor(filename=None, fileobj=fileobj, options=Options()):
msgid = msg.msgid,
prefix = ''
if msg.msgid_plural:
msgid = (msg.msgid_plural,) + msgid
prefix = 'n'
if msg.msgctxt:
msgid = (msg.msgctxt,) + msgid
prefix += 'p'
yield (msg.location[1], prefix + 'gettext', msgid, msg.comment) | python | {
"resource": ""
} |
q44690 | KanaConv._clear_char | train | def _clear_char(self):
'''
Clears the current character and makes the machine ready
to accept the next character.
'''
self.lvmarker_count = 0
self.geminate_count = 0
self.next_char_info = None
self.next_char_type = None
self.active_vowel = None
self.active_vowel_info = None
self.active_vowel_ro = None
self.active_xvowel = None
self.active_xvowel_info = None
self.active_char = None
self.active_char_info = None
self.active_char_type = None
self.active_dgr_a_info = None
self.active_dgr_b_info = None
self.has_xvowel = False
self.has_digraph_b = False
self.has_u_lvm = False
self.unknown_char = None | python | {
"resource": ""
} |
q44691 | KanaConv._append_unknown_char | train | def _append_unknown_char(self):
'''
Appends the unknown character, in case one was encountered.
'''
if self.unknown_strategy == UNKNOWN_INCLUDE and \
self.unknown_char is not None:
self._append_to_stack(self.unknown_char)
self.unknown_char = None | python | {
"resource": ""
} |
q44692 | KanaConv._promote_solitary_xvowel | train | def _promote_solitary_xvowel(self):
'''
"Promotes" the current xvowel to a regular vowel, in case
it is not otherwise connected to a character.
Used to print small vowels that would otherwise get lost;
normally small vowels always form a pair, but in case one is
by itself it should basically act like a regular vowel.
'''
char_type = self.active_char_type
# Only promote if we actually have an xvowel, and if the currently
# active character is not a consonant-vowel pair or vowel.
if char_type == VOWEL or char_type == CV or self.active_xvowel is None:
return
self._set_char(self.active_xvowel, XVOWEL)
self.active_xvowel = None
self.active_xvowel_info = None | python | {
"resource": ""
} |
q44693 | KanaConv._add_unknown_char | train | def _add_unknown_char(self, string):
'''
Adds an unknown character to the stack.
'''
if self.has_xvowel:
# Ensure an xvowel gets printed if we've got an active
# one right now.
self._promote_solitary_xvowel()
self.unknown_char = string
self._flush_char() | python | {
"resource": ""
} |
q44694 | KanaConv._set_digraph_b | train | def _set_digraph_b(self, char):
'''
Sets the second part of a digraph.
'''
self.has_digraph_b = True
# Change the active vowel to the one provided by the second part
# of the digraph.
self.active_vowel_ro = di_b_lt[char][0]
self.active_dgr_b_info = di_b_lt[char] | python | {
"resource": ""
} |
q44695 | KanaConv._postprocess_output | train | def _postprocess_output(self, output):
'''
Performs the last modifications before the output is returned.
'''
# Replace long vowels with circumflex characters.
if self.vowel_style == CIRCUMFLEX_STYLE:
try:
output = output.translate(vowels_to_circumflexes)
except TypeError:
# Python 2 will error out here if there are no
# macron characters in the string to begin with.
pass
# Output the desired case.
if self.uppercase:
output = output.upper()
return output | python | {
"resource": ""
} |
q44696 | KanaConv._flush_stack | train | def _flush_stack(self):
'''
Returns the final output and resets the machine's state.
'''
output = self._postprocess_output(''.join(self.stack))
self._clear_char()
self._empty_stack()
if not PYTHON_2:
return output
else:
return unicode(output) | python | {
"resource": ""
} |
q44697 | KanaConv._preprocess_input | train | def _preprocess_input(self, input):
'''
Preprocesses the input before it's split into a list.
'''
if not re.search(preprocess_chars, input):
# No characters that we need to preprocess, so continue without.
return input
input = self._add_punctuation_spacing(input)
return input | python | {
"resource": ""
} |
q44698 | KanaConv._add_punctuation_spacing | train | def _add_punctuation_spacing(self, input):
'''
Adds additional spacing to punctuation characters. For example,
this puts an extra space after a fullwidth full stop.
'''
for replacement in punct_spacing:
input = re.sub(replacement[0], replacement[1], input)
return input | python | {
"resource": ""
} |
q44699 | BaseView.get_render_data | train | def get_render_data(self, **kwargs):
"""
Because of the way mixin inheritance works
we can't have a default implementation of
get_context_data on the this class, so this
calls that method if available and returns
the resulting context.
"""
if hasattr(self, 'get_context_data'):
data = self.get_context_data(**kwargs)
else:
data = kwargs
return data | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.