_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q44900 | simple_locking | train | def simple_locking(lock_id, expiration=None):
"""
A decorator that wraps a function in a single lock getting algorithm
"""
def inner_decorator(function):
def wrapper(*args, **kwargs):
try:
# Trying to acquire lock
lock = Lock.acquire_lock(lock_id, expiration)
except LockError:
# Unable to acquire lock - non fatal
pass
else:
# Lock acquired, proceed normally, release lock afterwards
logger.debug('acquired lock: %s' % lock_id)
try:
return function(*args, **kwargs)
except:
# Re raise any exception that occured when calling wrapped
# function
raise
finally:
lock.release()
return wraps(function)(wrapper)
return inner_decorator | python | {
"resource": ""
} |
q44901 | adjust_locations | train | def adjust_locations(ast_node, first_lineno, first_offset):
"""
Adjust the locations of the ast nodes, offsetting them
to the new lineno and column offset
"""
line_delta = first_lineno - 1
def _fix(node):
if 'lineno' in node._attributes:
lineno = node.lineno
col = node.col_offset
# adjust the offset on the first line
if lineno == 1:
col += first_offset
lineno += line_delta
node.lineno = lineno
node.col_offset = col
for child in iter_child_nodes(node):
_fix(child)
_fix(ast_node) | python | {
"resource": ""
} |
q44902 | coalesce_outputs | train | def coalesce_outputs(tree):
"""
Coalesce the constant output expressions
__output__('foo')
__output__('bar')
__output__(baz)
__output__('xyzzy')
into
__output__('foobar', baz, 'xyzzy')
"""
coalesce_all_outputs = True
if coalesce_all_outputs:
should_coalesce = lambda n: True
else:
should_coalesce = lambda n: n.output_args[0].__class__ is Str
class OutputCoalescer(NodeVisitor):
def visit(self, node):
# if - else expression also has a body! it is not we want, though.
if hasattr(node, 'body') and isinstance(node.body, Iterable):
# coalesce continuous string output nodes
new_body = []
output_node = None
def coalesce_strs():
if output_node:
output_node.value.args[:] = \
coalesce_strings(output_node.value.args)
for i in node.body:
if hasattr(i, 'output_args') and should_coalesce(i):
if output_node:
if len(output_node.value.args) + len(i.output_args) > 250:
coalesce_strs()
output_node = i
else:
output_node.value.args.extend(i.output_args)
continue
output_node = i
else:
coalesce_strs()
output_node = None
new_body.append(i)
coalesce_strs()
node.body[:] = new_body
NodeVisitor.visit(self, node)
def check(self, node):
"""
Coalesce __TK__output(__TK__escape(literal(x))) into
__TK__output(x).
"""
if not ast_equals(node.func, NameX('__TK__output')):
return
for i in range(len(node.args)):
arg1 = node.args[i]
if not arg1.__class__.__name__ == 'Call':
continue
if not ast_equals(arg1.func, NameX('__TK__escape')):
continue
if len(arg1.args) != 1:
continue
arg2 = arg1.args[0]
if not arg2.__class__.__name__ == 'Call':
continue
if not ast_equals(arg2.func, NameX('literal')):
continue
if len(arg2.args) != 1:
continue
node.args[i] = arg2.args[0]
def visit_Call(self, node):
self.check(node)
self.generic_visit(node)
OutputCoalescer().visit(tree) | python | {
"resource": ""
} |
q44903 | remove_locations | train | def remove_locations(node):
"""
Removes locations from the given AST tree completely
"""
def fix(node):
if 'lineno' in node._attributes and hasattr(node, 'lineno'):
del node.lineno
if 'col_offset' in node._attributes and hasattr(node, 'col_offset'):
del node.col_offset
for child in iter_child_nodes(node):
fix(child)
fix(node) | python | {
"resource": ""
} |
q44904 | Character.from_content | train | def from_content(cls, content):
"""Creates an instance of the class from the html content of the character's page.
Parameters
----------
content: :class:`str`
The HTML content of the page.
Returns
-------
:class:`Character`
The character contained in the page, or None if the character doesn't exist
Raises
------
InvalidContent
If content is not the HTML of a character's page.
"""
parsed_content = parse_tibiacom_content(content)
tables = cls._parse_tables(parsed_content)
char = Character()
if "Could not find character" in tables.keys():
return None
if "Character Information" in tables.keys():
char._parse_character_information(tables["Character Information"])
else:
raise InvalidContent("content does not contain a tibia.com character information page.")
char._parse_achievements(tables.get("Account Achievements", []))
char._parse_deaths(tables.get("Character Deaths", []))
char._parse_account_information(tables.get("Account Information", []))
char._parse_other_characters(tables.get("Characters", []))
return char | python | {
"resource": ""
} |
q44905 | Character._parse_account_information | train | def _parse_account_information(self, rows):
"""
Parses the character's account information
Parameters
----------
rows: :class:`list` of :class:`bs4.Tag`, optional
A list of all rows contained in the table.
"""
acc_info = {}
if not rows:
return
for row in rows:
cols_raw = row.find_all('td')
cols = [ele.text.strip() for ele in cols_raw]
field, value = cols
field = field.replace("\xa0", "_").replace(" ", "_").replace(":", "").lower()
value = value.replace("\xa0", " ")
acc_info[field] = value
created = parse_tibia_datetime(acc_info["created"])
loyalty_title = None if acc_info["loyalty_title"] == "(no title)" else acc_info["loyalty_title"]
position = acc_info.get("position")
self.account_information = AccountInformation(created, loyalty_title, position) | python | {
"resource": ""
} |
q44906 | Character._parse_achievements | train | def _parse_achievements(self, rows):
"""
Parses the character's displayed achievements
Parameters
----------
rows: :class:`list` of :class:`bs4.Tag`
A list of all rows contained in the table.
"""
for row in rows:
cols = row.find_all('td')
if len(cols) != 2:
continue
field, value = cols
grade = str(field).count("achievement-grade-symbol")
name = value.text.strip()
self.achievements.append(Achievement(name, grade)) | python | {
"resource": ""
} |
q44907 | Character._parse_character_information | train | def _parse_character_information(self, rows):
"""
Parses the character's basic information and applies the found values.
Parameters
----------
rows: :class:`list` of :class:`bs4.Tag`
A list of all rows contained in the table.
"""
int_rows = ["level", "achievement_points"]
char = {}
house = {}
for row in rows:
cols_raw = row.find_all('td')
cols = [ele.text.strip() for ele in cols_raw]
field, value = cols
field = field.replace("\xa0", "_").replace(" ", "_").replace(":", "").lower()
value = value.replace("\xa0", " ")
# This is a special case cause we need to see the link
if field == "house":
house_text = value
paid_until = house_regexp.search(house_text).group(1)
paid_until_date = parse_tibia_date(paid_until)
house_link = cols_raw[1].find('a')
url = urllib.parse.urlparse(house_link["href"])
query = urllib.parse.parse_qs(url.query)
house = {"id": int(query["houseid"][0]), "name": house_link.text.strip(),
"town": query["town"][0], "paid_until": paid_until_date}
continue
if field in int_rows:
value = int(value)
char[field] = value
# If the character is deleted, the information is fouund with the name, so we must clean it
m = deleted_regexp.match(char["name"])
if m:
char["name"] = m.group(1)
char["deletion_date"] = parse_tibia_datetime(m.group(2))
if "guild_membership" in char:
m = guild_regexp.match(char["guild_membership"])
char["guild_membership"] = GuildMembership(m.group(2), m.group(1))
if "former_names" in char:
former_names = [fn.strip() for fn in char["former_names"].split(",")]
char["former_names"] = former_names
if "never" in char["last_login"]:
char["last_login"] = None
else:
char["last_login"] = parse_tibia_datetime(char["last_login"])
char["vocation"] = try_enum(Vocation, char["vocation"])
char["sex"] = try_enum(Sex, char["sex"])
char["account_status"] = try_enum(AccountStatus, char["account_status"])
for k, v in char.items():
try:
setattr(self, k, v)
except AttributeError:
pass
if house:
self.house = CharacterHouse(house["id"], house["name"], self.world, house["town"], self.name,
house["paid_until"]) | python | {
"resource": ""
} |
q44908 | Character._parse_deaths | train | def _parse_deaths(self, rows):
"""
Parses the character's recent deaths
Parameters
----------
rows: :class:`list` of :class:`bs4.Tag`
A list of all rows contained in the table.
"""
for row in rows:
cols = row.find_all('td')
death_time_str = cols[0].text.replace("\xa0", " ").strip()
death_time = parse_tibia_datetime(death_time_str)
death = str(cols[1]).replace("\xa0", " ")
death_info = death_regexp.search(death)
if death_info:
level = int(death_info.group("level"))
killers_desc = death_info.group("killers")
else:
continue
death = Death(self.name, level, time=death_time)
assists_name_list = []
# Check if the killers list contains assists
assist_match = death_assisted.search(killers_desc)
if assist_match:
# Filter out assists
killers_desc = assist_match.group("killers")
# Split assists into a list.
assists_name_list = self._split_list(assist_match.group("assists"))
killers_name_list = self._split_list(killers_desc)
for killer in killers_name_list:
killer_dict = self._parse_killer(killer)
death.killers.append(Killer(**killer_dict))
for assist in assists_name_list:
# Extract names from character links in assists list.
assist_dict = {"name": link_content.search(assist).group(1), "player": True}
death.assists.append(Killer(**assist_dict))
try:
self.deaths.append(death)
except ValueError:
# Some pvp deaths have no level, so they are raising a ValueError, they will be ignored for now.
continue | python | {
"resource": ""
} |
q44909 | Character._parse_killer | train | def _parse_killer(cls, killer):
"""Parses a killer into a dictionary.
Parameters
----------
killer: :class:`str`
The killer's raw HTML string.
Returns
-------
:class:`dict`: A dictionary containing the killer's info.
"""
# If the killer contains a link, it is a player.
if "href" in killer:
killer_dict = {"name": link_content.search(killer).group(1), "player": True}
else:
killer_dict = {"name": killer, "player": False}
# Check if it contains a summon.
m = death_summon.search(killer)
if m:
killer_dict["summon"] = m.group("summon")
return killer_dict | python | {
"resource": ""
} |
q44910 | Character._parse_other_characters | train | def _parse_other_characters(self, rows):
"""
Parses the character's other visible characters.
Parameters
----------
rows: :class:`list` of :class:`bs4.Tag`
A list of all rows contained in the table.
"""
for row in rows:
cols_raw = row.find_all('td')
cols = [ele.text.strip() for ele in cols_raw]
if len(cols) != 5:
continue
name, world, status, __, __ = cols
name = name.replace("\xa0", " ").split(". ")[1]
self.other_characters.append(OtherCharacter(name, world, status == "online", status == "deleted")) | python | {
"resource": ""
} |
q44911 | Character._parse_tables | train | def _parse_tables(cls, parsed_content):
"""
Parses the information tables contained in a character's page.
Parameters
----------
parsed_content: :class:`bs4.BeautifulSoup`
A :class:`BeautifulSoup` object containing all the content.
Returns
-------
:class:`OrderedDict`[str, :class:`list`of :class:`bs4.Tag`]
A dictionary containing all the table rows, with the table headers as keys.
"""
tables = parsed_content.find_all('table', attrs={"width": "100%"})
output = OrderedDict()
for table in tables:
title = table.find("td").text
output[title] = table.find_all("tr")[1:]
return output | python | {
"resource": ""
} |
q44912 | Character._split_list | train | def _split_list(cls, items, separator=",", last_separator=" and "):
"""
Splits a string listing elements into an actual list.
Parameters
----------
items: :class:`str`
A string listing elements.
separator: :class:`str`
The separator between each item. A comma by default.
last_separator: :class:`str`
The separator used for the last item. ' and ' by default.
Returns
-------
:class:`list` of :class:`str`
A list containing each one of the items.
"""
if items is None:
return None
items = items.split(separator)
last_item = items[-1]
last_split = last_item.split(last_separator)
if len(last_split) > 1:
items[-1] = last_split[0]
items.append(last_split[1])
return [e.strip() for e in items] | python | {
"resource": ""
} |
q44913 | create_many_to_many_intermediary_model | train | def create_many_to_many_intermediary_model(field, klass):
"""
Copied from django, but uses FKToVersion for the
'from' field. Fields are also always called 'from' and 'to'
to avoid problems between version combined models.
"""
managed = True
if (isinstance(field.remote_field.to, basestring) and
field.remote_field.to != related.RECURSIVE_RELATIONSHIP_CONSTANT):
to_model = field.remote_field.to
to = to_model.split('.')[-1]
def set_managed(field, model, cls):
managed = model._meta.managed or cls._meta.managed
if issubclass(cls, VersionView):
managed = False
field.remote_field.through._meta.managed = managed
related.add_lazy_relation(klass, field, to_model, set_managed)
elif isinstance(field.remote_field.to, basestring):
to = klass._meta.object_name
to_model = klass
managed = klass._meta.managed
else:
to = field.remote_field.to._meta.object_name
to_model = field.remote_field.to
managed = klass._meta.managed or to_model._meta.managed
if issubclass(klass, VersionView):
managed = False
name = '%s_%s' % (klass._meta.object_name, field.name)
if (field.remote_field.to == related.RECURSIVE_RELATIONSHIP_CONSTANT or
to == klass._meta.object_name):
from_ = 'from_%s' % to.lower()
to = 'to_%s' % to.lower()
else:
from_ = klass._meta.object_name.lower()
to = to.lower()
meta = type('Meta', (object,), {
'db_table': field._get_m2m_db_table(klass._meta),
'managed': managed,
'auto_created': klass,
'app_label': klass._meta.app_label,
'db_tablespace': klass._meta.db_tablespace,
'unique_together': ('from', 'to'),
'verbose_name': '%(from)s-%(to)s relationship' % {
'from': from_, 'to': to},
'verbose_name_plural': '%(from)s-%(to)s relationships' % {
'from': from_, 'to': to},
'apps': field.model._meta.apps,
})
# Construct and return the new class.
return type(str(name), (models.Model,), {
'Meta': meta,
'__module__': klass.__module__,
'from': FKToVersion(klass, related_name='%s+' % name,
db_tablespace=field.db_tablespace,
db_constraint=field.remote_field.db_constraint),
'to': models.ForeignKey(to_model, related_name='%s+' % name,
db_tablespace=field.db_tablespace,
db_constraint=field.remote_field.db_constraint)
}) | python | {
"resource": ""
} |
q44914 | FKToVersion.deconstruct | train | def deconstruct(self):
"""
FK to version always points to a version table
"""
name, path, args, kwargs = super(FKToVersion, self).deconstruct()
if not kwargs['to'].endswith('_version'):
kwargs['to'] = '{0}_version'.format(kwargs['to'])
return name, path, args, kwargs | python | {
"resource": ""
} |
q44915 | M2MFromVersion.update_rel_to | train | def update_rel_to(self, klass):
"""
If we have a string for a model, see if we know about it yet,
if so use it directly otherwise take the lazy approach.
This check is needed because this is called before
the main M2M field contribute to class is called.
"""
if isinstance(self.remote_field.to, basestring):
relation = self.remote_field.to
try:
app_label, model_name = relation.split(".")
except ValueError:
# If we can't split, assume a model in current app
app_label = klass._meta.app_label
model_name = relation
model = None
try:
model = klass._meta.apps.get_registered_model(app_label, model_name)
# For django < 1.6
except AttributeError:
model = models.get_model(
app_label, model_name,
seed_cache=False, only_installed=False)
except LookupError:
pass
if model:
self.remote_field.model = model | python | {
"resource": ""
} |
q44916 | M2MFromVersion.contribute_to_class | train | def contribute_to_class(self, cls, name):
"""
Because django doesn't give us a nice way to provide
a through table without losing functionality. We have to
provide our own through table creation that uses the
FKToVersion field to be used for the from field.
"""
self.update_rel_to(cls)
# Called to get a name
self.set_attributes_from_name(name)
self.model = cls
# Set the through field
if not self.remote_field.through and not cls._meta.abstract:
self.remote_field.through = create_many_to_many_intermediary_model(
self, cls)
# Do the rest
super(M2MFromVersion, self).contribute_to_class(cls, name) | python | {
"resource": ""
} |
q44917 | Bucket.metas | train | def metas(self, prefix=None, limit=None, delimiter=None):
"""
RETURN THE METADATA DESCRIPTORS FOR EACH KEY
"""
limit = coalesce(limit, TOO_MANY_KEYS)
keys = self.bucket.list(prefix=prefix, delimiter=delimiter)
prefix_len = len(prefix)
output = []
for i, k in enumerate(k for k in keys if len(k.key) == prefix_len or k.key[prefix_len] in [".", ":"]):
output.append({
"key": strip_extension(k.key),
"etag": convert.quote2string(k.etag),
"expiry_date": Date(k.expiry_date),
"last_modified": Date(k.last_modified)
})
if i >= limit:
break
return wrap(output) | python | {
"resource": ""
} |
q44918 | make_middleware | train | def make_middleware(app=None, *args, **kw):
""" Given an app, return that app wrapped in RaptorizeMiddleware """
app = RaptorizeMiddleware(app, *args, **kw)
return app | python | {
"resource": ""
} |
q44919 | RaptorizeMiddleware.should_raptorize | train | def should_raptorize(self, req, resp):
""" Determine if this request should be raptorized. Boolean. """
if resp.status != "200 OK":
return False
content_type = resp.headers.get('Content-Type', 'text/plain').lower()
if not 'html' in content_type:
return False
if random.random() > self.random_chance:
return False
if self.only_on_april_1st:
now = datetime.datetime.now()
if now.month != 20 and now.day != 1:
return False
return True | python | {
"resource": ""
} |
q44920 | bind | train | def bind(context, block=False):
"""
Given the context, returns a decorator wrapper;
the binder replaces the wrapped func with the
value from the context OR puts this function in
the context with the name.
"""
if block:
def decorate(func):
name = func.__name__.replace('__TK__block__', '')
if name not in context:
context[name] = func
return context[name]
return decorate
def decorate(func):
name = func.__name__
if name not in context:
context[name] = func
return context[name]
return decorate | python | {
"resource": ""
} |
q44921 | build_homogeneisation_vehicules | train | def build_homogeneisation_vehicules(temporary_store = None, year = None):
assert temporary_store is not None
"""Compute vehicule numbers by type"""
assert year is not None
# Load data
bdf_survey_collection = SurveyCollection.load(
collection = 'budget_des_familles', config_files_directory = config_files_directory)
survey = bdf_survey_collection.get_survey('budget_des_familles_{}'.format(year))
if year == 1995:
vehicule = None
# L'enquête BdF 1995 ne contient pas d'information sur le type de carburant utilisé par les véhicules.
if year == 2000:
vehicule = survey.get_values(table = "depmen")
kept_variables = ['ident', 'carbu01', 'carbu02']
vehicule = vehicule[kept_variables]
vehicule.rename(columns = {'ident': 'ident_men'}, inplace = True)
vehicule.rename(columns = {'carbu01': 'carbu1'}, inplace = True)
vehicule.rename(columns = {'carbu02': 'carbu2'}, inplace = True)
vehicule["veh_tot"] = 1
vehicule["veh_essence"] = 1 * (vehicule['carbu1'] == 1) + 1 * (vehicule['carbu2'] == 1)
vehicule["veh_diesel"] = 1 * (vehicule['carbu1'] == 2) + 1 * (vehicule['carbu2'] == 2)
vehicule.index = vehicule.index.astype(ident_men_dtype)
if year == 2005:
vehicule = survey.get_values(table = "automobile")
kept_variables = ['ident_men', 'carbu']
vehicule = vehicule[kept_variables]
vehicule["veh_tot"] = 1
vehicule["veh_essence"] = (vehicule['carbu'] == 1)
vehicule["veh_diesel"] = (vehicule['carbu'] == 2)
if year == 2011:
try:
vehicule = survey.get_values(table = "AUTOMOBILE")
except:
vehicule = survey.get_values(table = "automobile")
kept_variables = ['ident_me', 'carbu']
vehicule = vehicule[kept_variables]
vehicule.rename(columns = {'ident_me': 'ident_men'}, inplace = True)
vehicule["veh_tot"] = 1
vehicule["veh_essence"] = (vehicule['carbu'] == 1)
vehicule["veh_diesel"] = (vehicule['carbu'] == 2)
# Compute the number of cars by category and save
if year != 1995:
vehicule = vehicule.groupby(by = 'ident_men')["veh_tot", "veh_essence", "veh_diesel"].sum()
vehicule["pourcentage_vehicule_essence"] = 0
vehicule.pourcentage_vehicule_essence.loc[vehicule.veh_tot != 0] = vehicule.veh_essence / vehicule.veh_tot
# Save in temporary store
temporary_store['automobile_{}'.format(year)] = vehicule | python | {
"resource": ""
} |
q44922 | Client.get_projects_list | train | def get_projects_list(self):
""" Get projects list """
try:
result = self._request('/getprojectslist/')
return [TildaProject(**p) for p in result]
except NetworkError:
return [] | python | {
"resource": ""
} |
q44923 | Client.get_project | train | def get_project(self, project_id):
""" Get project info """
try:
result = self._request('/getproject/',
{'projectid': project_id})
return TildaProject(**result)
except NetworkError:
return [] | python | {
"resource": ""
} |
q44924 | Client.get_project_export | train | def get_project_export(self, project_id):
""" Get project info for export """
try:
result = self._request('/getprojectexport/',
{'projectid': project_id})
return TildaProject(**result)
except NetworkError:
return [] | python | {
"resource": ""
} |
q44925 | Client.get_pages_list | train | def get_pages_list(self, project_id):
""" Get pages list """
try:
result = self._request('/getpageslist/',
{'projectid': project_id})
return [TildaPage(**p) for p in result]
except NetworkError:
return [] | python | {
"resource": ""
} |
q44926 | Client.get_page | train | def get_page(self, page_id):
""" Get short page info and body html code """
try:
result = self._request('/getpage/',
{'pageid': page_id})
return TildaPage(**result)
except NetworkError:
return [] | python | {
"resource": ""
} |
q44927 | Client.get_page_full | train | def get_page_full(self, page_id):
""" Get full page info and full html code """
try:
result = self._request('/getpagefull/',
{'pageid': page_id})
return TildaPage(**result)
except NetworkError:
return [] | python | {
"resource": ""
} |
q44928 | Client.get_page_export | train | def get_page_export(self, page_id):
""" Get short page info for export and body html code """
try:
result = self._request('/getpageexport/',
{'pageid': page_id})
return TildaPage(**result)
except NetworkError:
return [] | python | {
"resource": ""
} |
q44929 | Client.get_page_full_export | train | def get_page_full_export(self, page_id):
""" Get full page info for export and body html code """
try:
result = self._request('/getpagefullexport/',
{'pageid': page_id})
return TildaPage(**result)
except NetworkError:
return [] | python | {
"resource": ""
} |
q44930 | _flatten | train | def _flatten(l):
"""helper to flatten a list of lists
"""
res = []
for sublist in l:
if isinstance(sublist, whaaaaat.Separator):
res.append(sublist)
else:
for item in sublist:
res.append(item)
return res | python | {
"resource": ""
} |
q44931 | read_thrift | train | def read_thrift(file_obj, ttype):
"""Read a thrift structure from the given fo."""
from thrift.transport.TTransport import TFileObjectTransport, TBufferedTransport
starting_pos = file_obj.tell()
# set up the protocol chain
ft = TFileObjectTransport(file_obj)
bufsize = 2 ** 16
# for accelerated reading ensure that we wrap this so that the CReadable transport can be used.
bt = TBufferedTransport(ft, bufsize)
pin = TCompactProtocol(bt)
# read out type
obj = ttype()
obj.read(pin)
# The read will actually overshoot due to the buffering that thrift does. Seek backwards to the correct spot,.
buffer_pos = bt.cstringio_buf.tell()
ending_pos = file_obj.tell()
blocks = ((ending_pos - starting_pos) // bufsize) - 1
if blocks < 0:
blocks = 0
file_obj.seek(starting_pos + blocks * bufsize + buffer_pos)
return obj | python | {
"resource": ""
} |
q44932 | write_thrift | train | def write_thrift(fobj, thrift):
"""Write binary compact representation of thiftpy structured object
Parameters
----------
fobj: open file-like object (binary mode)
thrift: thriftpy object to write
Returns
-------
Number of bytes written
"""
t0 = fobj.tell()
pout = TCompactProtocol(fobj)
try:
thrift.write(pout)
fail = False
except TProtocolException as e:
typ, val, tb = sys.exc_info()
frames = []
while tb is not None:
frames.append(tb)
tb = tb.tb_next
frame = [tb for tb in frames if 'write_struct' in str(tb.tb_frame.f_code)]
variables = frame[0].tb_frame.f_locals
obj = variables['obj']
name = variables['fname']
fail = True
if fail:
raise ParquetException('Thrift parameter validation failure %s'
' when writing: %s-> Field: %s' % (
val.args[0], obj, name
))
return fobj.tell() - t0 | python | {
"resource": ""
} |
q44933 | thrift_print | train | def thrift_print(structure, offset=0):
"""
Handy recursive text ouput for thrift structures
"""
if not is_thrift_item(structure):
return str(structure)
s = str(structure.__class__) + '\n'
for key in dir(structure):
if key.startswith('_') or key in ['thrift_spec', 'read', 'write',
'default_spec', 'validate']:
continue
s = s + ' ' * offset + key + ': ' + thrift_print(getattr(structure, key)
, offset+2) + '\n'
return s | python | {
"resource": ""
} |
q44934 | macho_dependencies_list | train | def macho_dependencies_list(target_path, header_magic=None):
""" Generates a list of libraries the given Mach-O file depends on.
In that list a single library is represented by its "install path": for some
libraries it would be a full file path, and for others it would be a relative
path (sometimes with dyld templates like @executable_path or @rpath in it).
Note: I don't know any reason why would some architectures of a fat Mach-O depend
on certain libraries while others don't, but *it's technically possible*.
So that's why you may want to specify the `header_magic` value for a particular header.
Returns an object with two properties: `weak` and `strong` that hold lists of weak
and strong dependencies respectively.
"""
MachODeprendencies = namedtuple("MachODeprendecies", "weak strong")
# Convert the magic value into macholib representation if needed
if isinstance(header_magic, basestring):
header_magic = _MH_MAGIC_from_string(header_magic)
macho = MachO(target_path)
# Obtain a list of headers for the required magic value (if any)
suggestions = filter(lambda t: t.header.magic == header_magic
or # just add all headers if user didn't specifiy the magic
header_magic == None, macho.headers)
header = None if len(suggestions) <= 0 else suggestions[0]
# filter() above *always* returns a list, so we have to check if it's empty
if header is None:
raise Exception("Unable to find a header for the given MAGIC value in that Mach-O file")
return None
def decodeLoadCommandData(data):
# Also ignore trailing zeros
return data[:data.find(b"\x00")].decode(sys.getfilesystemencoding())
def strongReferencesFromHeader(h):
# List of LC_LOAD_DYLIB commands
list = filter(lambda (lc,cmd,data): lc.cmd == LC_LOAD_DYLIB, h.commands)
# Their contents (aka data) as a file path
return map(lambda (lc,cmd,data): decodeLoadCommandData(data), list)
def weakReferencesFromHeader(h):
list = filter(lambda (lc,cmd,data): lc.cmd == LC_LOAD_WEAK_DYLIB, h.commands)
return map(lambda (lc,cmd,data): decodeLoadCommandData(data), list)
strongRefs = strongReferencesFromHeader(header)
weakRefs = weakReferencesFromHeader(header)
return MachODeprendencies(weak = weakRefs, strong = strongRefs) | python | {
"resource": ""
} |
q44935 | insert_load_command_into_header | train | def insert_load_command_into_header(header, load_command):
""" Inserts the given load command into the header and adjust its size. """
lc, cmd, path = load_command
header.commands.append((lc, cmd, path))
header.header.ncmds += 1
header.changedHeaderSizeBy(lc.cmdsize) | python | {
"resource": ""
} |
q44936 | SignupModelForm.clean_username | train | def clean_username(self):
"""
Validate that the username is alphanumeric and is not already in use.
Also validates that the username is not listed in
ACCOUNTS_FORBIDDEN_USERNAMES list.
"""
try:
get_user_model().objects.get(
username__iexact=self.cleaned_data['username'])
except get_user_model().DoesNotExist:
pass
else:
raise forms.ValidationError(_('This username is already taken.'))
if self.cleaned_data['username'].lower() in accounts_settings.ACCOUNTS_FORBIDDEN_USERNAMES:
raise forms.ValidationError(_('This username is not allowed.'))
return self.cleaned_data['username'] | python | {
"resource": ""
} |
q44937 | CustomResponse.get_authenticate_header | train | def get_authenticate_header(self, request):
"""
If a request is unauthenticated, determine the WWW-Authenticate
header to use for 401 responses, if any.
"""
authenticators = self.get_authenticators()
if authenticators:
return authenticators[0].authenticate_header(request) | python | {
"resource": ""
} |
q44938 | create_dir | train | def create_dir(dst):
"""create directory if necessary
:param dst:
"""
directory = os.path.dirname(dst)
if directory and not os.path.exists(directory):
os.makedirs(directory) | python | {
"resource": ""
} |
q44939 | Clogger.recompute_table_revnums | train | def recompute_table_revnums(self):
'''
Recomputes the revnums for the csetLog table
by creating a new table, and copying csetLog to
it. The INTEGER PRIMARY KEY in the temp table auto increments
as rows are added.
IMPORTANT: Only call this after acquiring the
lock `self.working_locker`.
:return:
'''
with self.conn.transaction() as t:
t.execute('''
CREATE TABLE temp (
revnum INTEGER PRIMARY KEY,
revision CHAR(12) NOT NULL,
timestamp INTEGER
);''')
t.execute(
"INSERT INTO temp (revision, timestamp) "
"SELECT revision, timestamp FROM csetlog ORDER BY revnum ASC"
)
t.execute("DROP TABLE csetLog;")
t.execute("ALTER TABLE temp RENAME TO csetLog;") | python | {
"resource": ""
} |
q44940 | getPathOfExecutable | train | def getPathOfExecutable(executable):
"""
Returns the full path of the executable, or None if the executable
can not be found.
"""
exe_paths = os.environ['PATH'].split(':')
for exe_path in exe_paths:
exe_file = os.path.join(exe_path, executable)
if os.path.isfile(exe_file) and os.access(exe_file, os.X_OK):
return exe_file
return None | python | {
"resource": ""
} |
q44941 | runCommandReturnOutput | train | def runCommandReturnOutput(cmd):
"""
Runs a shell command and return the stdout and stderr
"""
splits = shlex.split(cmd)
proc = subprocess.Popen(
splits, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
raise subprocess.CalledProcessError(stdout, stderr)
return stdout, stderr | python | {
"resource": ""
} |
q44942 | getYamlDocument | train | def getYamlDocument(filePath):
"""
Return a yaml file's contents as a dictionary
"""
with open(filePath) as stream:
doc = yaml.load(stream)
return doc | python | {
"resource": ""
} |
q44943 | zipLists | train | def zipLists(*lists):
"""
Checks to see if all of the lists are the same length, and throws
an AssertionError otherwise. Returns the zipped lists.
"""
length = len(lists[0])
for i, list_ in enumerate(lists[1:]):
if len(list_) != length:
msg = "List at index {} has length {} != {}".format(
i + 1, len(list_), length)
raise AssertionError(msg)
return zip(*lists) | python | {
"resource": ""
} |
q44944 | getLinesFromLogFile | train | def getLinesFromLogFile(stream):
"""
Returns all lines written to the passed in stream
"""
stream.flush()
stream.seek(0)
lines = stream.readlines()
return lines | python | {
"resource": ""
} |
q44945 | getFilePathsWithExtensionsInDirectory | train | def getFilePathsWithExtensionsInDirectory(dirTree, patterns, sort=True):
"""
Returns all file paths that match any one of patterns in a
file tree with its root at dirTree. Sorts the paths by default.
"""
filePaths = []
for root, dirs, files in os.walk(dirTree):
for filePath in files:
for pattern in patterns:
if fnmatch.fnmatch(filePath, pattern):
fullPath = os.path.join(root, filePath)
filePaths.append(fullPath)
break
if sort:
filePaths.sort()
return filePaths | python | {
"resource": ""
} |
q44946 | performInDirectory | train | def performInDirectory(dirPath):
"""
Change the current working directory to dirPath before performing
an operation, then restore the original working directory after
"""
originalDirectoryPath = os.getcwd()
try:
os.chdir(dirPath)
yield
finally:
os.chdir(originalDirectoryPath) | python | {
"resource": ""
} |
q44947 | Par2File.filenames | train | def filenames(self):
"""Returns the filenames that this par2 file repairs."""
return [p.name for p in self.packets if isinstance(p, FileDescriptionPacket)] | python | {
"resource": ""
} |
q44948 | Domain._set_slots_to_null | train | def _set_slots_to_null(self, cls):
"""
WHY ARE SLOTS NOT ACCESIBLE UNTIL WE ASSIGN TO THEM?
"""
if hasattr(cls, "__slots__"):
for s in cls.__slots__:
self.__setattr__(s, Null)
for b in cls.__bases__:
self._set_slots_to_null(b) | python | {
"resource": ""
} |
q44949 | main | train | def main():
"""Play Conway's Game of Life on the terminal."""
def die((x, y)):
"""Pretend any out-of-bounds cell is dead."""
if 0 <= x < width and 0 <= y < height:
return x, y
LOAD_FACTOR = 9 # Smaller means more crowded.
NUDGING_LOAD_FACTOR = LOAD_FACTOR * 3 # Smaller means a bigger nudge.
term = Terminal()
width = term.width
height = term.height
board = random_board(width - 1, height - 1, LOAD_FACTOR)
detector = BoredomDetector()
cells = cell_strings(term)
with nested(term.fullscreen(), term.hidden_cursor()):
try:
while True:
frame_end = time() + 0.05
board = next_board(board, die)
draw(board, term, cells)
# If the pattern is stuck in a loop, give it a nudge:
if detector.is_bored_of(board):
board.update(random_board(width - 1,
height - 1,
NUDGING_LOAD_FACTOR))
stdout.flush()
sleep_until(frame_end)
clear(board, term, height)
except KeyboardInterrupt:
pass | python | {
"resource": ""
} |
q44950 | cell_strings | train | def cell_strings(term):
"""Return the strings that represent each possible living cell state.
Return the most colorful ones the terminal supports.
"""
num_colors = term.number_of_colors
if num_colors >= 16:
funcs = term.on_bright_red, term.on_bright_green, term.on_bright_cyan
elif num_colors >= 8:
funcs = term.on_red, term.on_green, term.on_blue
else:
# For black and white, use the checkerboard cursor from the vt100
# alternate charset:
return (term.reverse(' '),
term.smacs + term.reverse('a') + term.rmacs,
term.smacs + 'a' + term.rmacs)
# Wrap spaces in whatever pretty colors we chose:
return [f(' ') for f in funcs] | python | {
"resource": ""
} |
q44951 | random_board | train | def random_board(max_x, max_y, load_factor):
"""Return a random board with given max x and y coords."""
return dict(((randint(0, max_x), randint(0, max_y)), 0) for _ in
xrange(int(max_x * max_y / load_factor))) | python | {
"resource": ""
} |
q44952 | clear | train | def clear(board, term, height):
"""Clear the droppings of the given board."""
for y in xrange(height):
print term.move(y, 0) + term.clear_eol, | python | {
"resource": ""
} |
q44953 | draw | train | def draw(board, term, cells):
"""Draw a board to the terminal."""
for (x, y), state in board.iteritems():
with term.location(x, y):
print cells[state], | python | {
"resource": ""
} |
q44954 | next_board | train | def next_board(board, wrap):
"""Given a board, return the board one interation later.
Adapted from Jack Diedrich's implementation from his 2012 PyCon talk "Stop
Writing Classes"
:arg wrap: A callable which takes a point and transforms it, for example
to wrap to the other edge of the screen. Return None to remove a point.
"""
new_board = {}
# We need consider only the points that are alive and their neighbors:
points_to_recalc = set(board.iterkeys()) | set(chain(*map(neighbors, board)))
for point in points_to_recalc:
count = sum((neigh in board) for neigh in
(wrap(n) for n in neighbors(point) if n))
if count == 3:
state = 0 if point in board else 1
elif count == 2 and point in board:
state = 2
else:
state = None
if state is not None:
wrapped = wrap(point)
if wrapped:
new_board[wrapped] = state
return new_board | python | {
"resource": ""
} |
q44955 | BoredomDetector.is_bored_of | train | def is_bored_of(self, board):
"""Return whether the simulation is probably in a loop.
This is a stochastic guess. Basically, it detects whether the
simulation has had the same number of cells a lot lately. May have
false positives (like if you just have a screen full of gliders) or
take awhile to catch on sometimes. I've even seen it totally miss the
boat once. But it's simple and fast.
"""
self.iteration += 1
if len(board) == self.num:
self.times += 1
is_bored = self.times > self.REPETITIONS
if self.iteration > self.REPETITIONS * self.PATTERN_LENGTH or is_bored:
# A little randomness in case things divide evenly into each other:
self.iteration = randint(-2, 0)
self.num = len(board)
self.times = 0
return is_bored | python | {
"resource": ""
} |
q44956 | Queue.push | train | def push(self, value):
"""
SNEAK value TO FRONT OF THE QUEUE
"""
if self.closed and not self.allow_add_after_close:
Log.error("Do not push to closed queue")
with self.lock:
self._wait_for_queue_space()
if not self.closed:
self.queue.appendleft(value)
return self | python | {
"resource": ""
} |
q44957 | Queue._wait_for_queue_space | train | def _wait_for_queue_space(self, timeout=DEFAULT_WAIT_TIME):
"""
EXPECT THE self.lock TO BE HAD, WAITS FOR self.queue TO HAVE A LITTLE SPACE
"""
wait_time = 5
(DEBUG and len(self.queue) > 1 * 1000 * 1000) and Log.warning("Queue {{name}} has over a million items")
now = time()
if timeout != None:
time_to_stop_waiting = now + timeout
else:
time_to_stop_waiting = now + DEFAULT_WAIT_TIME
if self.next_warning < now:
self.next_warning = now + wait_time
while not self.closed and len(self.queue) >= self.max:
if now > time_to_stop_waiting:
Log.error(THREAD_TIMEOUT)
if self.silent:
self.lock.wait(Till(till=time_to_stop_waiting))
else:
self.lock.wait(Till(seconds=wait_time))
if len(self.queue) >= self.max:
now = time()
if self.next_warning < now:
self.next_warning = now + wait_time
Log.alert(
"Queue by name of {{name|quote}} is full with ({{num}} items), thread(s) have been waiting {{wait_time}} sec",
name=self.name,
num=len(self.queue),
wait_time=wait_time
) | python | {
"resource": ""
} |
q44958 | PriorityQueue.pop | train | def pop(self, till=None, priority=None):
"""
WAIT FOR NEXT ITEM ON THE QUEUE
RETURN THREAD_STOP IF QUEUE IS CLOSED
RETURN None IF till IS REACHED AND QUEUE IS STILL EMPTY
:param till: A `Signal` to stop waiting and return None
:return: A value, or a THREAD_STOP or None
"""
if till is not None and not isinstance(till, Signal):
Log.error("expecting a signal")
with self.lock:
while True:
if not priority:
priority = self.highest_entry()
if priority:
value = self.queue[priority].queue.popleft()
return value
if self.closed:
break
if not self.lock.wait(till=till | self.closed):
if self.closed:
break
return None
(DEBUG or not self.silent) and Log.note(self.name + " queue stopped")
return THREAD_STOP | python | {
"resource": ""
} |
q44959 | Random.weight | train | def weight(weights):
"""
RETURN RANDOM INDEX INTO WEIGHT ARRAY, GIVEN WEIGHTS
"""
total = sum(weights)
p = SEED.random()
acc = 0
for i, w in enumerate(weights):
acc += w / total
if p < acc:
return i
return len(weights) - 1 | python | {
"resource": ""
} |
q44960 | AESCipher.cipher_block | train | def cipher_block (self, state):
"""Perform AES block cipher on input"""
# PKCS7 Padding
state=state+[16-len(state)]*(16-len(state))# Fails test if it changes the input with +=
self._add_round_key(state, 0)
for i in range(1, self._Nr):
self._sub_bytes(state)
self._shift_rows(state)
self._mix_columns(state, False)
self._add_round_key(state, i)
self._sub_bytes(state)
self._shift_rows(state)
self._add_round_key(state, self._Nr)
return state | python | {
"resource": ""
} |
q44961 | AESCipher.decipher_block | train | def decipher_block (self, state):
"""Perform AES block decipher on input"""
if len(state) != 16:
Log.error(u"Expecting block of 16")
self._add_round_key(state, self._Nr)
for i in range(self._Nr - 1, 0, -1):
self._i_shift_rows(state)
self._i_sub_bytes(state)
self._add_round_key(state, i)
self._mix_columns(state, True)
self._i_shift_rows(state)
self._i_sub_bytes(state)
self._add_round_key(state, 0)
return state | python | {
"resource": ""
} |
q44962 | path | train | def path(name):
"""Print path to root."""
try:
coll = Collection.query.filter(Collection.name == name).one()
tr = LeftAligned(
traverse=CollTraversalPathToRoot(coll.path_to_root().all()))
click.echo(tr(coll))
except NoResultFound:
raise click.UsageError('Collection {0} not found'.format(name)) | python | {
"resource": ""
} |
q44963 | create | train | def create(name, dry_run, verbose, query=None, parent=None):
"""Create new collection."""
if parent is not None:
parent = Collection.query.filter_by(name=parent).one().id
collection = Collection(name=name, dbquery=query, parent_id=parent)
db.session.add(collection)
if verbose:
click.secho('New collection: {0}'.format(collection)) | python | {
"resource": ""
} |
q44964 | delete | train | def delete(name, dry_run, verbose):
"""Delete a collection."""
collection = Collection.query.filter_by(name=name).one()
if verbose:
tr = LeftAligned(traverse=AttributeTraversal())
click.secho(tr(collection), fg='red')
db.session.delete(collection) | python | {
"resource": ""
} |
q44965 | query | train | def query(name):
"""Print the collection query."""
collection = Collection.query.filter_by(name=name).one()
click.echo(collection.dbquery) | python | {
"resource": ""
} |
q44966 | StyleMixin.get_text | train | def get_text(self, node):
"""Get node text representation."""
return click.style(
repr(node), fg='green' if node.level > 1 else 'red'
) | python | {
"resource": ""
} |
q44967 | cli_char | train | def cli_char(name, tibiadata, json):
"""Displays information about a Tibia character."""
name = " ".join(name)
char = _fetch_and_parse(Character.get_url, Character.from_content,
Character.get_url_tibiadata, Character.from_tibiadata,
tibiadata, name)
if json and char:
print(char.to_json(indent=2))
return
print(get_character_string(char)) | python | {
"resource": ""
} |
q44968 | cli_guild | train | def cli_guild(name, tibiadata, json):
"""Displays information about a Tibia guild."""
name = " ".join(name)
guild = _fetch_and_parse(Guild.get_url, Guild.from_content,
Guild.get_url_tibiadata, Guild.from_tibiadata,
tibiadata, name)
if json and guild:
print(guild.to_json(indent=2))
return
print(get_guild_string(guild)) | python | {
"resource": ""
} |
q44969 | cli_guilds | train | def cli_guilds(world, tibiadata, json):
"""Displays the list of guilds for a specific world"""
world = " ".join(world)
guilds = _fetch_and_parse(ListedGuild.get_world_list_url, ListedGuild.list_from_content,
ListedGuild.get_world_list_url_tibiadata, ListedGuild.list_from_tibiadata,
tibiadata, world)
if json and guilds:
import json as _json
print(_json.dumps(guilds, default=dict, indent=2))
return
print(get_guilds_string(guilds)) | python | {
"resource": ""
} |
q44970 | _unseen_event_ids | train | def _unseen_event_ids(medium):
"""
Return all events that have not been seen on this medium.
"""
query = '''
SELECT event.id
FROM entity_event_event AS event
LEFT OUTER JOIN (SELECT *
FROM entity_event_eventseen AS seen
WHERE seen.medium_id=%s) AS eventseen
ON event.id = eventseen.event_id
WHERE eventseen.medium_id IS NULL
'''
unseen_events = Event.objects.raw(query, params=[medium.id])
ids = [e.id for e in unseen_events]
return ids | python | {
"resource": ""
} |
q44971 | EventQuerySet.mark_seen | train | def mark_seen(self, medium):
"""
Creates EventSeen objects for the provided medium for every event
in the queryset.
Creating these EventSeen objects ensures they will not be
returned when passing ``seen=False`` to any of the medium
event retrieval functions, ``events``, ``entity_events``, or
``events_targets``.
"""
EventSeen.objects.bulk_create([
EventSeen(event=event, medium=medium) for event in self
]) | python | {
"resource": ""
} |
q44972 | EventManager.create_event | train | def create_event(self, actors=None, ignore_duplicates=False, **kwargs):
"""
Create events with actors.
This method can be used in place of ``Event.objects.create``
to create events, and the appropriate actors. It takes all the
same keywords as ``Event.objects.create`` for the event
creation, but additionally takes a list of actors, and can be
told to not attempt to create an event if a duplicate event
exists.
:type source: Source
:param source: A ``Source`` object representing where the
event came from.
:type context: dict
:param context: A dictionary containing relevant
information about the event, to be serialized into
JSON. It is possible to load additional context
dynamically when events are fetched. See the
documentation on the ``ContextRenderer`` model.
:type uuid: str
:param uuid: A unique string for the event. Requiring a
``uuid`` allows code that creates events to ensure they do
not create duplicate events. This id could be, for example
some hash of the ``context``, or, if the creator is
unconcerned with creating duplicate events a call to
python's ``uuid1()`` in the ``uuid`` module.
:type time_expires: datetime (optional)
:param time_expires: If given, the default methods for
querying events will not return this event after this time
has passed.
:type actors: (optional) List of entities or list of entity ids.
:param actors: An ``EventActor`` object will be created for
each entity in the list. This allows for subscriptions
which are only following certain entities to behave
appropriately.
:type ignore_duplicates: (optional) Boolean
:param ignore_duplicates: If ``True``, a check will be made to
ensure that an event with the give ``uuid`` does not exist
before attempting to create the event. Setting this to
``True`` allows the creator of events to gracefully ensure
no duplicates are attempted to be created. There is a uniqueness constraint on uuid
so it will raise an exception if duplicates are allowed and submitted.
:rtype: Event
:returns: The created event. Alternatively if a duplicate
event already exists and ``ignore_duplicates`` is
``True``, it will return ``None``.
"""
kwargs['actors'] = actors
kwargs['ignore_duplicates'] = ignore_duplicates
events = self.create_events([kwargs])
if events:
return events[0]
return None | python | {
"resource": ""
} |
q44973 | FormView.get_object_url | train | def get_object_url(self):
"""
Returns the url where this object can be edited.
"""
if self.kwargs.get(self.slug_url_kwarg, False) == \
unicode(getattr(self.object, self.slug_field, "")) \
and not self.force_add:
url = self.request.build_absolute_uri()
else:
url = self.bundle.get_view_url('edit', self.request.user,
{'object': self.object},
self.kwargs)
return url | python | {
"resource": ""
} |
q44974 | FormView.get_cancel_url | train | def get_cancel_url(self):
"""
Returns the cancel url for this view.
if `self.cancel_view` is None the current url will
be used. Otherwise the get_view_url will be called with
the current bundle using `self.cancel_view` as the
view name.
"""
if self.cancel_view:
url = self.bundle.get_view_url(self.cancel_view,
self.request.user, {},
self.kwargs)
else:
url = self.request.build_absolute_uri()
return self.customized_return_url(url) | python | {
"resource": ""
} |
q44975 | FormView.get_success_url | train | def get_success_url(self):
"""
Returns the url to redirect to after a successful update.
if `self.redirect_to_view` is None the current url will
be used. Otherwise the get_view_url will be called
on the current bundle using `self.redirect_to_view` as the
view name. If the name is "main" or "main_list" no object
will be passed. Otherwise `self.object` will be passed as
a kwarg.
"""
if self.redirect_to_view:
kwargs = {}
if self.redirect_to_view != 'main' and \
self.redirect_to_view != 'main_list':
kwargs['object'] = self.object
return self.bundle.get_view_url(self.redirect_to_view,
self.request.user, kwargs,
self.kwargs)
else:
return self.request.build_absolute_uri() | python | {
"resource": ""
} |
q44976 | FormView.get_object | train | def get_object(self):
"""
Get the object we are working with. Makes sure
get_queryset is called even when in add mode.
"""
if not self.force_add and self.kwargs.get(self.slug_url_kwarg, None):
return super(FormView, self).get_object()
else:
self.queryset = self.get_queryset()
return None | python | {
"resource": ""
} |
q44977 | FormView.get_fieldsets | train | def get_fieldsets(self):
"""
Hook for specifying fieldsets. If 'self.fieldsets' is
empty this will default to include all the fields in
the form with a title of None.
"""
if self.fieldsets:
return self.fieldsets
form_class = self.get_form_class()
form = self.get_form(form_class)
fields = form.base_fields.keys()
readonly_fields = self.get_readonly_fields()
if readonly_fields:
fields.extend(readonly_fields)
return [(None, {'fields': fields})] | python | {
"resource": ""
} |
q44978 | FormView.get_form_class | train | def get_form_class(self):
"""
Returns the form class to use in this view. Makes
sure that the form_field_callback is set to use
the `formfield_for_dbfield` method and that any
custom form classes are prepared by the
`customize_form_widgets` method.
"""
if self.fieldsets:
fields = flatten_fieldsets(self.get_fieldsets())
else:
if (self.form_class and
getattr(self.form_class, 'Meta', None) and
getattr(self.form_class.Meta, 'fields', None)):
fields = self.form_class.Meta.fields
else:
fields = []
exclude = None
if self.parent_field:
exclude = (self.parent_field,)
readonly_fields = self.get_readonly_fields()
if readonly_fields:
if exclude:
exclude = list(exclude)
else:
exclude = []
for field in readonly_fields:
try:
try:
f = self.model._meta.get_field(field)
if fields:
fields.remove(field)
else:
exclude.append(field)
except models.FieldDoesNotExist:
if fields:
fields.remove(field)
except ValueError:
pass
params = {'fields': fields or '__all__',
'exclude': exclude,
'formfield_callback': self.formfield_for_dbfield}
if self.form_class:
if issubclass(self.form_class, forms.ModelForm) and \
getattr(self.form_class._meta, 'model', None):
model = self.form_class.Meta.model
else:
model = self.model
fc = self.customize_form_widgets(self.form_class, fields=fields)
params['form'] = fc
else:
if self.model is not None:
# If a model has been explicitly provided, use it
model = self.model
elif hasattr(self, 'object') and self.object is not None:
# If this view is operating on a single object, use
# the class of that object
model = self.object.__class__
else:
# Try to get a queryset and extract the model class
# from that
model = self.get_queryset().model
return model_forms.modelform_factory(model, **params) | python | {
"resource": ""
} |
q44979 | FormView.save_form | train | def save_form(self, form):
"""
Save a valid form. If there is a parent attribute,
this will make sure that the parent object is added
to the saved object. Either as a relationship before
saving or in the case of many to many relations after
saving. Any forced instance values are set as well.
Returns the saved object.
"""
# Add any force_instance_values
force = self.get_force_instance_values()
if force:
for k, v in force.items():
setattr(form.instance, k, v)
# Are we adding to an attr or manager
should_add = False
if self.parent_object:
m2ms = [f.name for f in form.instance._meta.many_to_many]
m2ms.extend(
[f.field.rel.related_name for f in
[
f for f in form.instance._meta.get_fields(include_hidden=True)
if f.many_to_many and f.auto_created
]
]
)
if self.parent_field in m2ms:
should_add = True
else:
try:
form.instance._meta.get_field(self.parent_field)
setattr(form.instance, self.parent_field,
self.parent_object)
except FieldDoesNotExist:
pass
obj = form.save()
# Do we need to add this to a m2m
if should_add:
getattr(obj, self.parent_field).add(self.parent_object)
return obj | python | {
"resource": ""
} |
q44980 | FormView.save_formsets | train | def save_formsets(self, form, formsets, auto_tags=None):
"""
Hook for saving formsets. Loops through
all the given formsets and calls their
save method.
"""
for formset in formsets.values():
tag_handler.set_auto_tags_for_formset(formset, auto_tags)
formset.save() | python | {
"resource": ""
} |
q44981 | FormView.form_valid | train | def form_valid(self, form, formsets):
"""
Response for valid form. In one transaction this will
save the current form and formsets, log the action
and message the user.
Returns the results of calling the `success_response` method.
"""
# check if it's a new object before it save the form
new_object = False
if not self.object:
new_object = True
instance = getattr(form, 'instance', None)
auto_tags, changed_tags, old_tags = tag_handler.get_tags_from_data(
form.data, self.get_tags(instance))
tag_handler.set_auto_tags_for_form(form, auto_tags)
with transaction.commit_on_success():
self.object = self.save_form(form)
self.save_formsets(form, formsets, auto_tags=auto_tags)
url = self.get_object_url()
self.log_action(self.object, CMSLog.SAVE, url=url)
msg = self.write_message()
# get old and new tags
if not new_object and changed_tags and old_tags:
tag_handler.update_changed_tags(changed_tags, old_tags)
return self.success_response(msg) | python | {
"resource": ""
} |
q44982 | FormView.success_response | train | def success_response(self, message=None):
"""
Returns a 'render redirect' to the result of the
`get_success_url` method.
"""
return self.render(self.request,
redirect_url=self.get_success_url(),
obj=self.object,
message=message,
collect_render_data=False) | python | {
"resource": ""
} |
q44983 | FormView.render | train | def render(self, request, **kwargs):
"""
Renders this view. Adds cancel_url to the context.
If the request get parameters contains 'popup' then
the `render_type` is set to 'popup'.
"""
if request.GET.get('popup'):
self.render_type = 'popup'
kwargs['popup'] = 1
kwargs['cancel_url'] = self.get_cancel_url()
if not self.object:
kwargs['single_title'] = True
return super(FormView, self).render(request, **kwargs) | python | {
"resource": ""
} |
q44984 | FormView.get | train | def get(self, request, *args, **kwargs):
"""
Method for handling GET requests.
Calls the `render` method with the following
items in context.
* **adminForm** - The main form wrapped in an helper class \
that helps with fieldset iteration and html attributes.
* **obj** - The object being edited.
* **formsets** - Any attached formsets.
"""
self.object = self.get_object()
form_class = self.get_form_class()
form = self.get_form(form_class)
formsets = self.get_formsets(form)
adminForm = self.get_admin_form(form)
adminFormSets = self.get_admin_formsets(formsets)
context = {
'adminForm': adminForm,
'obj': self.object,
'formsets': adminFormSets,
}
return self.render(request, **context) | python | {
"resource": ""
} |
q44985 | FormView.post | train | def post(self, request, *args, **kwargs):
"""
Method for handling POST requests.
Validates submitted form and
formsets. Saves if valid, re displays
page with errors if invalid.
"""
self.object = self.get_object()
form_class = self.get_form_class()
form = self.get_form(form_class)
formsets = self.get_formsets(form, saving=True)
valid_formsets = True
for formset in formsets.values():
if not formset.is_valid():
valid_formsets = False
break
if self.is_valid(form, formsets):
return self.form_valid(form, formsets)
else:
adminForm = self.get_admin_form(form)
adminFormSets = self.get_admin_formsets(formsets)
context = {
'adminForm': adminForm,
'formsets': adminFormSets,
'obj': self.object,
}
return self.form_invalid(form=form, **context) | python | {
"resource": ""
} |
q44986 | PreviewWrapper.get_preview_kwargs | train | def get_preview_kwargs(self, **kwargs):
"""
Gets the url keyword arguments to pass to the
`preview_view` callable. If the `pass_through_kwarg`
attribute is set the value of `pass_through_attr` will
be looked up on the object.
So if you are previewing an item Obj<id=2> and
::
self.pass_through_kwarg = 'object_id'
self.pass_through_attr = 'pk'
This will return
::
{ 'object_id' : 2 }
"""
if not self.pass_through_kwarg:
return {}
obj = self.get_object()
return {
self.pass_through_kwarg: getattr(obj, self.pass_through_attr)
} | python | {
"resource": ""
} |
q44987 | PreviewWrapper.get | train | def get(self, request, *args, **kwargs):
"""
Method for handling GET requests.
Sets the renderer to be a RenderResponse instance
that uses `default_template` as the template.
The `preview_view` callable is called and passed to `render`
method as the data keyword argument.
"""
self.renders = {
'response': renders.RenderResponse(template=self.default_template),
}
kwargs = self.get_preview_kwargs(**kwargs)
view = self.preview_view.as_string()
return self.render(request, data=view(request, **kwargs)) | python | {
"resource": ""
} |
q44988 | VersionsList.revert | train | def revert(self, version, url):
"""
Set the given version to be the active draft.
This is done by calling the object's `make_draft` method.
Logs the revert as a 'save' and messages the user.
"""
message = "Draft replaced with %s version. This revert has not been published." % version.date_published
version.make_draft()
# Log action as a save
self.log_action(self.object, CMSLog.SAVE, url=url)
return self.write_message(message=message) | python | {
"resource": ""
} |
q44989 | VersionsList.delete | train | def delete(self, version):
"""
Deletes the given version, not the object itself.
No log entry is generated but the user is notified
with a message.
"""
# Shouldn't be able to delete live or draft version
if version.state != version.DRAFT and \
version.state != version.PUBLISHED:
version.delete()
message = "%s version deleted." % version.date_published
return self.write_message(message=message) | python | {
"resource": ""
} |
q44990 | VersionsList.post | train | def post(self, request, *args, **kwargs):
"""
Method for handling POST requests.
Expects the 'vid' of the version to act on
to be passed as in the POST variable 'version'.
If a POST variable 'revert' is present this will
call the revert method and then return a 'render
redirect' to the result of the `get_done_url` method.
If a POST variable 'delete' is present this will
call the delete method and return a 'render redirect'
to the result of the `get_done_url` method.
If this method receives unexpected input, it will
silently redirect to the result of the `get_done_url`
method.
"""
versions = self._get_versions()
url = self.get_done_url()
msg = None
try:
vid = int(request.POST.get('version', ''))
version = versions.get(vid=vid)
if request.POST.get('revert'):
object_url = self.get_object_url()
msg = self.revert(version, object_url)
elif request.POST.get('delete'):
msg = self.delete(version)
# Delete should redirect back to itself
url = self.request.build_absolute_uri()
# If the give version isn't valid we'll just silently redirect
except (ValueError, versions.model.DoesNotExist):
pass
return self.render(request, redirect_url=url,
message=msg,
obj=self.object,
collect_render_data=False) | python | {
"resource": ""
} |
q44991 | Collection.validate_parent_id | train | def validate_parent_id(self, key, parent_id):
"""Parent has to be different from itself."""
id_ = getattr(self, 'id', None)
if id_ is not None and parent_id is not None:
assert id_ != parent_id, 'Can not be attached to itself.'
return parent_id | python | {
"resource": ""
} |
q44992 | _to_encoded_string | train | def _to_encoded_string(o):
"""
Build an encoded string suitable for use as a URL component. This includes double-escaping the string to
avoid issues with escaped backslash characters being automatically converted by WSGI or, in some cases
such as default Apache servers, blocked entirely.
:param o: an object of any kind, if it has an as_dict() method this will be used, otherwise uses __dict__
:return: an encoded string suitable for use as a URL component
:internal:
"""
_dict = o.__dict__
if o.as_dict:
_dict = o.as_dict()
return urllib.quote_plus(urllib.quote_plus(json.dumps(obj=_dict, separators=(',', ':')))) | python | {
"resource": ""
} |
q44993 | MeteorClient.list_observatories | train | def list_observatories(self):
"""
Get the IDs of all observatories with have stored observations on this server.
:return: a sequence of strings containing observatories IDs
"""
response = requests.get(self.base_url + '/obstories').text
return safe_load(response) | python | {
"resource": ""
} |
q44994 | MeteorClient.get_observatory_status | train | def get_observatory_status(self, observatory_id, status_time=None):
"""
Get details of the specified camera's status
:param string observatory_id:
a observatory ID, as returned by list_observatories()
:param float status_time:
optional, if specified attempts to get the status for the given camera at a particular point in time
specified as a datetime instance. This is useful if you want to retrieve the status of the camera at the
time a given event or file was produced. If this is None or not specified the time is 'now'.
:return:
a dictionary, or None if there was either no observatory found.
"""
if status_time is None:
response = requests.get(
self.base_url + '/obstory/{0}/statusdict'.format(observatory_id))
else:
response = requests.get(
self.base_url + '/obstory/{0}/statusdict/{1}'.format(observatory_id, str(status_time)))
if response.status_code == 200:
d = safe_load(response.text)
if 'status' in d:
return d['status']
return None | python | {
"resource": ""
} |
q44995 | Scheme.build_defaults | train | def build_defaults(self):
"""Build a dictionary of default values from the `Scheme`.
Returns:
dict: The default configurations as set by the `Scheme`.
Raises:
errors.InvalidSchemeError: The `Scheme` does not contain
valid options.
"""
defaults = {}
for arg in self.args:
if not isinstance(arg, _BaseOpt):
raise errors.InvalidSchemeError('Unable to build default for non-Option type')
# if there is a default set, add it to the defaults dict
if not isinstance(arg.default, NoDefault):
defaults[arg.name] = arg.default
# if we have a dict option, build the defaults for its scheme.
# if any defaults exist, use them.
if isinstance(arg, DictOption):
if arg.scheme:
b = arg.scheme.build_defaults()
if b:
defaults[arg.name] = b
return defaults | python | {
"resource": ""
} |
q44996 | Scheme.flatten | train | def flatten(self):
"""Flatten the scheme into a dictionary where the keys are
compound 'dot' notation keys, and the values are the corresponding
options.
Returns:
dict: The flattened `Scheme`.
"""
if self._flat is None:
flat = {}
for arg in self.args:
if isinstance(arg, Option):
flat[arg.name] = arg
elif isinstance(arg, ListOption):
flat[arg.name] = arg
elif isinstance(arg, DictOption):
flat[arg.name] = arg
if arg.scheme:
for k, v in arg.scheme.flatten().items():
flat[arg.name + '.' + k] = v
self._flat = flat
return self._flat | python | {
"resource": ""
} |
q44997 | Scheme.validate | train | def validate(self, config):
"""Validate the given config against the `Scheme`.
Args:
config (dict): The configuration to validate.
Raises:
errors.SchemeValidationError: The configuration fails
validation against the `Schema`.
"""
if not isinstance(config, dict):
raise errors.SchemeValidationError(
'Scheme can only validate a dictionary config, but was given '
'{} (type: {})'.format(config, type(config))
)
for arg in self.args:
# the option exists in the config
if arg.name in config:
arg.validate(config[arg.name])
# the option does not exist in the config
else:
# if the option is not required, then it is fine to omit.
# otherwise, its omission constitutes a validation error.
if arg.required:
raise errors.SchemeValidationError(
'Option "{}" is required, but not found.'.format(arg.name)
) | python | {
"resource": ""
} |
q44998 | Option.cast | train | def cast(self, value):
"""Cast a value to the type required by the option, if one is set.
This is used to cast the string values gathered from environment
variable into their required type.
Args:
value: The value to cast.
Returns:
The value casted to the expected type for the option.
"""
# if there is no type set for the option, return the given
# value unchanged.
if self.type is None:
return value
# cast directly
if self.type in (str, int, float):
try:
return self.type(value)
except Exception as e:
raise errors.BisonError(
'Failed to cast {} to {}'.format(value, self.type)
) from e
# for bool, can't cast a string, since a string is truthy,
# so we need to check the value.
elif self.type == bool:
return value.lower() == 'true'
# the option type is currently not supported
else:
raise errors.BisonError('Unsupported type for casting: {}'.format(self.type)) | python | {
"resource": ""
} |
q44999 | medianscore | train | def medianscore(inlist):
"""
Returns the 'middle' score of the passed list. If there is an even
number of scores, the mean of the 2 middle scores is returned.
Usage: lmedianscore(inlist)
"""
newlist = copy.deepcopy(inlist)
newlist.sort()
if len(newlist) % 2 == 0: # if even number of scores, average middle 2
index = len(newlist) / 2 # integer division correct
median = float(newlist[index] + newlist[index - 1]) / 2
else:
index = len(newlist) / 2 # int divsion gives mid value when count from 0
median = newlist[index]
return median | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.