_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q37300 | _build_join | train | def _build_join(t):
""" Populates join token fields. """
t.source.name = t.source.parsed_name
t.source.alias = t.source.parsed_alias[0] if t.source.parsed_alias else ''
return t | python | {
"resource": ""
} |
q37301 | substitute_vids | train | def substitute_vids(library, statement):
""" Replace all of the references to tables and partitions with their vids.
This is a bit of a hack -- it ought to work with the parser, but instead it just looks for
common SQL tokens that indicate an identifier.
:param statement: an sqlstatement. String.
:return: tuple: new_statement, set of table vids, set of partition vids.
"""
from ambry.identity import ObjectNumber, TableNumber, NotObjectNumberError
from ambry.orm.exc import NotFoundError
try:
stmt_str = statement.to_unicode()
except AttributeError:
stmt_str = statement
parts = stmt_str.strip(';').split()
new_parts = []
tables = set()
partitions = set()
while parts:
token = parts.pop(0).strip()
if token.lower() in ('from', 'join', 'materialize', 'install'):
ident = parts.pop(0).strip(';')
new_parts.append(token)
try:
obj_number = ObjectNumber.parse(token)
if isinstance(obj_number, TableNumber):
table = library.table(ident)
tables.add(table.vid)
new_parts.append(table.vid)
else:
# Do not care about other object numbers. Assume partition.
raise NotObjectNumberError
except NotObjectNumberError:
# assume partition
try:
partition = library.partition(ident)
partitions.add(partition.vid)
new_parts.append(partition.vid)
except NotFoundError:
# Ok, maybe it is just a normal identifier...
new_parts.append(ident)
else:
new_parts.append(token)
return ' '.join(new_parts).strip(), tables, partitions | python | {
"resource": ""
} |
q37302 | find_indexable_materializable | train | def find_indexable_materializable(sql, library):
"""
Parse a statement, then call functions to install, materialize or create indexes for partitions
referenced in the statement.
:param sql:
:param materialize_f:
:param install_f:
:param index_f:
:return:
"""
derefed, tables, partitions = substitute_vids(library, sql)
if derefed.lower().startswith('create index') or derefed.lower().startswith('index'):
parsed = parse_index(derefed)
return FIMRecord(statement=derefed, indexes=[(parsed.source, tuple(parsed.columns))])
elif derefed.lower().startswith('materialize'):
_, vid = derefed.split()
return FIMRecord(statement=derefed, materialize=set([vid]))
elif derefed.lower().startswith('install'):
_, vid = derefed.split()
return FIMRecord(statement=derefed, install=set([vid]))
elif derefed.lower().startswith('select'):
rec = FIMRecord(statement=derefed)
parsed = parse_select(derefed)
elif derefed.lower().startswith('drop'):
return FIMRecord(statement=derefed, drop=derefed)
elif derefed.lower().startswith('create table'):
parsed = parse_view(derefed)
rec = FIMRecord(statement=derefed, drop='DROP TABLE IF EXISTS {};'.format(parsed.name), views=1)
elif derefed.lower().startswith('create view'):
parsed = parse_view(derefed)
rec = FIMRecord(statement=derefed, drop='DROP VIEW IF EXISTS {};'.format(parsed.name), views=1)
else:
return FIMRecord(statement=derefed, tables=set(tables), install=set(partitions))
def partition_aliases(parsed):
d = {}
for source in parsed.sources:
if source.alias:
d[source.alias] = source.name
for j in parsed.joins:
if j.source.alias:
d[j.source.alias] = j.source.name
return d
def indexable_columns(aliases, parsed):
indexes = []
for j in parsed.joins:
if j and j.join_cols:
for col in j.join_cols:
if '.' in col:
try:
alias, col = col.split('.')
if alias:
indexes.append((aliases[alias], (col,)))
except KeyError:
pass
return indexes
aliases = partition_aliases(parsed)
indexes = indexable_columns(aliases, parsed)
rec.joins = len(parsed.joins)
install = set(partitions)
rec.update(tables=tables, install=install, indexes=indexes)
return rec | python | {
"resource": ""
} |
q37303 | FIMRecord.update | train | def update(self, rec=None, drop=None, tables=None, install=None, materialize=None,
indexes=None, joins=0, views=0):
""" Updates current record.
Args:
rec (FIMRecord):
"""
if not drop:
drop = []
if not tables:
tables = set()
if not install:
install = set()
if not materialize:
materialize = set()
if not indexes:
indexes = set()
if rec:
self.update(
drop=rec.drop, tables=rec.tables, install=rec.install, materialize=rec.materialize,
indexes=rec.indexes, joins=rec.joins
)
self.drop += drop
self.tables |= set(tables)
self.install |= set(install)
self.materialize |= set(materialize)
self.indexes |= set(indexes)
self.joins += joins
self.views += views
# Joins or views promote installed partitions to materialized partitions
if self.joins > 0 or self.views > 0:
self.materialize |= self.install
self.install = set() | python | {
"resource": ""
} |
q37304 | ProductReleaseRest.support_level | train | def support_level(self, support_level):
"""
Sets the support_level of this ProductReleaseRest.
:param support_level: The support_level of this ProductReleaseRest.
:type: str
"""
allowed_values = ["UNRELEASED", "EARLYACCESS", "SUPPORTED", "EXTENDED_SUPPORT", "EOL"]
if support_level not in allowed_values:
raise ValueError(
"Invalid value for `support_level` ({0}), must be one of {1}"
.format(support_level, allowed_values)
)
self._support_level = support_level | python | {
"resource": ""
} |
q37305 | callLater | train | def callLater(self, when, what, *a, **kw):
"""
Copied from twisted.internet.task.Clock, r20480. Fixes the bug
where the wrong DelayedCall would sometimes be returned.
"""
dc = base.DelayedCall(self.seconds() + when,
what, a, kw,
self.calls.remove,
lambda c: None,
self.seconds)
self.calls.append(dc)
self.calls.sort(lambda a, b: cmp(a.getTime(), b.getTime()))
return dc | python | {
"resource": ""
} |
q37306 | clockIsBroken | train | def clockIsBroken():
"""
Returns whether twisted.internet.task.Clock has the bug that
returns the wrong DelayedCall or not.
"""
clock = Clock()
dc1 = clock.callLater(10, lambda: None)
dc2 = clock.callLater(1, lambda: None)
if dc1 is dc2:
return True
else:
return False | python | {
"resource": ""
} |
q37307 | shrink_patch | train | def shrink_patch(patch_path, target_file):
"""
Shrinks a patch on patch_path to contain only changes for target_file.
:param patch_path: path to the shrinked patch file
:param target_file: filename of a file of which changes should be kept
:return: True if the is a section containing changes for target_file, Flase otherwise
"""
logging.debug("Shrinking patch file %s to keep only %s changes.", patch_path, target_file)
shrinked_lines = []
patch_file = None
try:
patch_file = open(patch_path)
adding = False
search_line = "diff --git a/%s b/%s" % (target_file, target_file)
for line in patch_file.read().split("\n"):
if adding and line.startswith("diff --git a/") and line != search_line:
adding = False
elif line == search_line:
adding = True
if adding:
shrinked_lines.append(line)
finally:
if patch_file:
patch_file.close()
if len(shrinked_lines):
patch_file = None
try:
patch_file = open(patch_path, "w")
content = "\n".join(shrinked_lines)
if not content.endswith("\n"):
content = content + "\n"
patch_file.write(content)
finally:
if patch_file:
patch_file.close()
return True
else:
return False | python | {
"resource": ""
} |
q37308 | BuildSetStatusChangedEvent.new_status | train | def new_status(self, new_status):
"""
Sets the new_status of this BuildSetStatusChangedEvent.
:param new_status: The new_status of this BuildSetStatusChangedEvent.
:type: str
"""
allowed_values = ["NEW", "DONE", "REJECTED"]
if new_status not in allowed_values:
raise ValueError(
"Invalid value for `new_status` ({0}), must be one of {1}"
.format(new_status, allowed_values)
)
self._new_status = new_status | python | {
"resource": ""
} |
q37309 | BuildSetStatusChangedEvent.old_status | train | def old_status(self, old_status):
"""
Sets the old_status of this BuildSetStatusChangedEvent.
:param old_status: The old_status of this BuildSetStatusChangedEvent.
:type: str
"""
allowed_values = ["NEW", "DONE", "REJECTED"]
if old_status not in allowed_values:
raise ValueError(
"Invalid value for `old_status` ({0}), must be one of {1}"
.format(old_status, allowed_values)
)
self._old_status = old_status | python | {
"resource": ""
} |
q37310 | find_previous | train | def find_previous(element, l):
"""
find previous element in a sorted list
>>> find_previous(0, [0])
0
>>> find_previous(2, [1, 1, 3])
1
>>> find_previous(0, [1, 2])
>>> find_previous(1.5, [1, 2])
1
>>> find_previous(3, [1, 2])
2
"""
length = len(l)
for index, current in enumerate(l):
# current is the last element
if length - 1 == index:
return current
# current is the first element
if index == 0:
if element < current:
return None
if current <= element < l[index+1]:
return current | python | {
"resource": ""
} |
q37311 | Remote.update | train | def update(self):
"""Cache the list into the data section of the record"""
from ambry.orm.exc import NotFoundError
from requests.exceptions import ConnectionError, HTTPError
from boto.exception import S3ResponseError
d = {}
try:
for k, v in self.list(full=True):
if not v:
continue
d[v['vid']] = {
'vid': v['vid'],
'vname': v.get('vname'),
'id': v.get('id'),
'name': v.get('name')
}
self.data['list'] = d
except (NotFoundError, ConnectionError, S3ResponseError, HTTPError) as e:
raise RemoteAccessError("Failed to update {}: {}".format(self.short_name, e)) | python | {
"resource": ""
} |
q37312 | Remote.list | train | def list(self, full=False):
"""List all of the bundles in the remote"""
if self.is_api:
return self._list_api(full=full)
else:
return self._list_fs(full=full) | python | {
"resource": ""
} |
q37313 | Remote._update_fs_list | train | def _update_fs_list(self):
"""Cache the full list for http access. This creates a meta file that can be read all at once,
rather than requiring a list operation like S3 access does"""
from json import dumps
full_list = [ e[1] for e in self._list_fs(full=True) ]
remote = self._fs_remote(self.url)
remote.setcontents(os.path.join('_meta', 'list.json'), dumps(full_list, indent = 4)) | python | {
"resource": ""
} |
q37314 | Remote.checkin | train | def checkin(self, package, no_partitions=False, force=False, cb=None):
"""
Check in a bundle package to the remote.
:param package: A Database, referencing a sqlite database holding the bundle
:param cb: a two argument progress callback: cb(message, num_records)
:return:
"""
from ambry.orm.exc import NotFoundError
if not os.path.exists(package.path):
raise NotFoundError("Package path does not exist: '{}' ".format(package.path))
if self.is_api:
return self._checkin_api(package, no_partitions=no_partitions, force=force, cb=cb)
else:
return self._checkin_fs(package, no_partitions=no_partitions, force=force, cb=cb) | python | {
"resource": ""
} |
q37315 | Remote._put_metadata | train | def _put_metadata(self, fs_remote, ds):
"""Store metadata on a pyfs remote"""
from six import text_type
from fs.errors import ResourceNotFoundError
identity = ds.identity
d = identity.dict
d['summary'] = ds.config.metadata.about.summary
d['title'] = ds.config.metadata.about.title
meta_stack = self._meta_infos(ds)
def do_metadata():
for path, ident in meta_stack:
fs_remote.setcontents(path, ident)
try:
# Assume the directories already exist
do_metadata()
except ResourceNotFoundError:
# Nope, make them and try again.
parts = ['vid', 'id', 'vname', 'name']
for p in parts:
dirname = os.path.join('_meta', p)
fs_remote.makedir(dirname, allow_recreate=True, recursive=True)
do_metadata() | python | {
"resource": ""
} |
q37316 | Remote.checkout | train | def checkout(self, ref, cb=None):
"""Checkout a bundle from the remote. Returns a file-like object"""
if self.is_api:
return self._checkout_api(ref, cb=cb)
else:
return self._checkout_fs(ref, cb=cb) | python | {
"resource": ""
} |
q37317 | Remote.remove | train | def remove(self, ref, cb=None):
"""Check in a bundle to the remote"""
if self.is_api:
return self._remove_api(ref, cb)
else:
return self._remove_fs(ref, cb) | python | {
"resource": ""
} |
q37318 | Remote.s3 | train | def s3(self, url, account_acessor=None, access=None, secret=None):
"""Setup an S3 pyfs, with account credentials, fixing an ssl matching problem"""
from ambry.util.ambrys3 import AmbryS3FS
from ambry.util import parse_url_to_dict
import ssl
pd = parse_url_to_dict(url)
if account_acessor:
account = account_acessor(pd['hostname'])
assert account['account_id'] == pd['hostname']
aws_access_key = account['access_key'],
aws_secret_key = account['secret']
else:
aws_access_key = access
aws_secret_key = secret
assert access, url
assert secret, url
s3 = AmbryS3FS(
bucket=pd['netloc'],
prefix=pd['path'].strip('/')+'/',
aws_access_key=aws_access_key,
aws_secret_key=aws_secret_key,
)
return s3 | python | {
"resource": ""
} |
q37319 | withdict | train | def withdict(parser, token):
"""
Take a complete context dict as extra layer.
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("{% withdict %} expects one argument")
nodelist = parser.parse(('endwithdict',))
parser.delete_first_token()
return WithDictNode(
nodelist=nodelist,
context_expr=parser.compile_filter(bits[1])
) | python | {
"resource": ""
} |
q37320 | WithDictNode.render | train | def render(self, context):
"""
Render the tag, with extra context layer.
"""
extra_context = self.context_expr.resolve(context)
if not isinstance(extra_context, dict):
raise TemplateSyntaxError("{% withdict %} expects the argument to be a dictionary.")
with context.push(**extra_context):
return self.nodelist.render(context) | python | {
"resource": ""
} |
q37321 | oscillating_setpoint | train | def oscillating_setpoint(_square_wave=False, shift=0):
"""A basic example of a target that you may want to approximate.
If you have a thermostat, this is a temperature setting.
This target can't change too often
"""
import math
c = 0
while 1:
if _square_wave:
yield ((c % 300) < 150) * 30 + 20
c += 1
else:
yield 10 * math.sin(2 * 3.1415926 * c + shift) \
+ 20 + 5 * math.sin(2 * 3.1415926 * c * 3 + shift)
c += .001 | python | {
"resource": ""
} |
q37322 | bash_echo_metric | train | def bash_echo_metric():
"""A very basic example that monitors
a number of currently running processes"""
import subprocess
# import random
# more predictable version of the metric
cmd = (
'set -o pipefail '
' ; pgrep -f "^bash.*sleep .*from bash: started relay launcher"'
' | wc -l '
)
# less predictable version of the metric
# cmd = 'ps aux|wc -l'
while True:
yield (
int(subprocess.check_output(cmd, shell=True, executable='bash'))
# + random.choice([-2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8])
) | python | {
"resource": ""
} |
q37323 | bash_echo_warmer | train | def bash_echo_warmer(n):
"""A very basic example of how to create n additional tasks.
This is a warmer function with randomly delayed effects on the
bash_echo_metric and random task lengths to make the metric less
predictable
"""
import subprocess
import random
cmd = (
'set -o pipefail '
" ; sleep %s "
" ; sh -c 'echo from bash: started relay launcher task && sleep %s'"
)
for i in range(n):
subprocess.Popen(
cmd % ((1 + random.random()) * 1, (1 + random.random()) * 4),
shell=True, stdout=subprocess.PIPE, executable='bash') | python | {
"resource": ""
} |
q37324 | bash_echo_cooler | train | def bash_echo_cooler(n):
"""A very basic example of how to destroy n running tasks
This is a cooler function
"""
import subprocess
cmd = (
'set -o pipefile '
' ; kill `pgrep -f "from bash: started relay launcher task"'
' | tail -n %s` 2>/dev/null' % n)
subprocess.Popen(cmd, shell=True, executable='bash').wait() | python | {
"resource": ""
} |
q37325 | stop_if_mostly_diverging | train | def stop_if_mostly_diverging(errdata):
"""This is an example stop condition that asks Relay to quit if
the error difference between consecutive samples is increasing more than
half of the time.
It's quite sensitive and designed for the demo, so you probably shouldn't
use this is a production setting
"""
n_increases = sum([
abs(y) - abs(x) > 0 for x, y in zip(errdata, errdata[1:])])
if len(errdata) * 0.5 < n_increases:
# most of the time, the next sample is worse than the previous sample
# relay is not healthy
return 0
else:
# most of the time, the next sample is better than the previous sample
# realy is in a healthy state
return -1 | python | {
"resource": ""
} |
q37326 | Task.has_resolved_dependencies | train | def has_resolved_dependencies(self):
"""Return True if all dependencies are in State.DONE"""
for dependency in self.dependencies:
if dependency.state != Task.State.DONE:
return False
return True | python | {
"resource": ""
} |
q37327 | Task.dependencies_as_list | train | def dependencies_as_list(self):
"""Returns a list of dependency names."""
dependencies = []
for dependency in self.dependencies:
dependencies.append(dependency.name)
return dependencies | python | {
"resource": ""
} |
q37328 | Tasks.get_task | train | def get_task(self, name):
"""Get task by name or create it if it does not exists."""
if name in self.tasks.keys():
task = self.tasks[name]
else:
task = Task(name)
self.tasks[name] = task
return task | python | {
"resource": ""
} |
q37329 | Tasks.get_next | train | def get_next(self):
"""Return next task from the stack that has all dependencies resolved.
Return None if there are no tasks with resolved dependencies or is there are no more tasks on stack.
Use `count` to check is there are still some task left on the stack.
raise ValueError if total ordering is not possible."""
self.update_tasks_status()
if self.dirty:
self.tsort()
self.dirty = False
for key, task in self.tasks.iteritems():
if task.is_new() and task.has_resolved_dependencies():
return task
return None | python | {
"resource": ""
} |
q37330 | Tasks.count_buildable_tasks | train | def count_buildable_tasks(self):
"""Count tasks that are new and have dependencies in non FAILED state."""
self.update_tasks_status()
buildable_tasks_count = 0
for key, task in self.tasks.iteritems():
if task.state is Task.State.NEW:
if self.are_dependencies_buildable(task):
buildable_tasks_count += 1
logging.debug("Buildable task: %s" % task.name )
else:
logging.debug("Task %s has broken dependencies." % task.name )
return buildable_tasks_count | python | {
"resource": ""
} |
q37331 | Tasks.tsort | train | def tsort(self):
"""Given a partial ordering, return a totally ordered list.
part is a dict of partial orderings. Each value is a set,
which the key depends on.
The return value is a list of sets, each of which has only
dependencies on items in previous entries in the list.
raise ValueError if ordering is not possible (check for circular or missing dependencies)"""
task_dict = {}
for key, task in self.tasks.iteritems():
task_dict[task] = task.dependencies
# parts = parts.copy()
parts = task_dict.copy()
result = []
while True:
level = set([name for name, deps in parts.iteritems() if not deps])
if not level:
break
result.append(level)
parts = dict([(name, deps - level) for name, deps in parts.iteritems() if name not in level])
if parts:
raise ValueError('total ordering not possible (check for circular or missing dependencies)')
return result | python | {
"resource": ""
} |
q37332 | Config._load_config | train | def _load_config(self):
"""
Load project's config and return dict.
TODO: Convert the original dotted representation to hierarchical.
"""
config = import_module('config')
variables = [var for var in dir(config) if not var.startswith('_')]
return {var: getattr(config, var) for var in variables} | python | {
"resource": ""
} |
q37333 | Config.get | train | def get(name, default=None):
"""
Return variable by name from the project's config.
Name can be a dotted path, like: 'rails.db.type'.
"""
if '.' not in name:
raise Exception("Config path should be divided by at least one dot")
section_name, var_path = name.split('.', 1)
section = Config._data.get(section_name)
return section.get(var_path) | python | {
"resource": ""
} |
q37334 | SortedCollection.find_lt | train | def find_lt(self, k):
"""Return last item with a key < k.
Raise ValueError if not found.
"""
i = bisect_left(self._keys, k)
if i:
return self._items[i - 1]
raise ValueError('No item found with key below: %r' % (k,)) | python | {
"resource": ""
} |
q37335 | SortedCollection.find_ge_index | train | def find_ge_index(self, k):
'Return first item with a key >= equal to k. Raise ValueError if not found'
i = bisect_left(self._keys, k)
if i != len(self):
return i
raise ValueError('No item found with key at or above: %r' % (k,)) | python | {
"resource": ""
} |
q37336 | start | train | def start(
loop: abstract_loop = None,
interval: float = 0.5,
hook: hook_type = None) -> asyncio.Task:
"""
Start the reloader.
Create the task which is watching loaded modules
and manually added files via ``watch()``
and reloading the process in case of modification.
Attach this task to the loop.
If ``hook`` is provided, it will be called right before
the application goes to the reload stage.
"""
if loop is None:
loop = asyncio.get_event_loop()
global reload_hook
if hook is not None:
reload_hook = hook
global task
if not task:
modify_times = {}
executor = ThreadPoolExecutor(1)
task = call_periodically(
loop,
interval,
check_and_reload,
modify_times,
executor,
)
return task | python | {
"resource": ""
} |
q37337 | strftime | train | def strftime(dt, fmt):
'''
`strftime` implementation working before 1900
'''
if _illegal_s.search(fmt):
raise TypeError("This strftime implementation does not handle %s")
if dt.year > 1900:
return dt.strftime(fmt)
fmt = fmt.replace('%c', '%a %b %d %H:%M:%S %Y')\
.replace('%Y', str(dt.year))\
.replace('%y', '{:04}'.format(dt.year)[-2:])
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6*(delta // 100 + delta // 400)
year = year + off
# Move to around the year 2000
year = year + ((2000 - year)//28)*28
timetuple = dt.timetuple()
return time.strftime(fmt, (year,) + timetuple[1:]) | python | {
"resource": ""
} |
q37338 | _detect_timezone_windows | train | def _detect_timezone_windows():
"""Detect timezone on the windows platform."""
# pylint: disable=global-statement
global win32timezone_to_en
# Try and fetch the key_name for the timezone using
# Get(Dynamic)TimeZoneInformation
tzi = DTZI_c()
kernel32 = ctypes.windll.kernel32
getter = kernel32.GetTimeZoneInformation
getter = getattr(kernel32, "GetDynamicTimeZoneInformation", getter)
# code is for daylight savings: 0 means disabled/not defined, 1 means enabled
# but inactive, 2 means enabled and active
_ = getter(ctypes.byref(tzi))
win32tz_key_name = tzi.key_name
if not win32tz_key_name:
if win32timezone is None:
return None
# We're on Windows before Vista/Server 2008 - need to look up the
# standard_name in the registry.
# This will not work in some multilingual setups if running in a language
# other than the operating system default
win32tz_name = tzi.standard_name
if not win32timezone_to_en:
win32timezone_to_en = dict(
win32timezone.TimeZoneInfo._get_indexed_time_zone_keys("Std"))
win32tz_key_name = win32timezone_to_en.get(win32tz_name, win32tz_name)
territory = locale.getdefaultlocale()[0].split("_", 1)[1]
olson_name = win32tz_map.win32timezones.get((win32tz_key_name, territory), win32tz_map.win32timezones.get(win32tz_key_name, None))
if not olson_name:
return None
if not isinstance(olson_name, str):
olson_name = olson_name.encode("ascii")
return pytz.timezone(olson_name) | python | {
"resource": ""
} |
q37339 | CookieAuth.login | train | def login(self, template='login'):
'''
This property will return component which will handle login requests.
auth.login(template='login.html')
'''
def _login(env, data):
form = self._login_form(env)
next = env.request.GET.get('next', '/')
login_failed = False
if env.request.method == 'POST':
if form.accept(env.request.POST):
user_identity = self.get_user_identity(
env, **form.python_data)
if user_identity is not None:
response = HTTPSeeOther(location=next)
return self.login_identity(user_identity, response)
login_failed = True
data.form = form
data.login_failed = login_failed
data.login_url = env.root.login.as_url.qs_set(next=next)
return env.template.render_to_response(template, data.as_dict())
return web.match('/login', 'login') | _login | python | {
"resource": ""
} |
q37340 | CookieAuth.logout | train | def logout(self, redirect_to='/'):
'''
This property will return component which will handle logout requests.
It only handles POST requests and do not display any rendered content.
This handler deletes session id from `storage`. If there is no
session id provided or id is incorrect handler silently redirects to
login url and does not throw any exception.
'''
def _logout(env, data):
location = redirect_to
if location is None and env.request.referer:
location = env.request.referer
elif location is None:
location = '/'
response = HTTPSeeOther(location=str(location))
self.logout_user(env.request, response)
return response
return web.match('/logout', 'logout') | web.method('post') | _logout | python | {
"resource": ""
} |
q37341 | SupportLevelPage.content | train | def content(self, content):
"""
Sets the content of this SupportLevelPage.
:param content: The content of this SupportLevelPage.
:type: list[str]
"""
allowed_values = ["UNRELEASED", "EARLYACCESS", "SUPPORTED", "EXTENDED_SUPPORT", "EOL"]
if not set(content).issubset(set(allowed_values)):
raise ValueError(
"Invalid values for `content` [{0}], must be a subset of [{1}]"
.format(", ".join(map(str, set(content)-set(allowed_values))),
", ".join(map(str, allowed_values)))
)
self._content = content | python | {
"resource": ""
} |
q37342 | copy_file_or_flo | train | def copy_file_or_flo(input_, output, buffer_size=64 * 1024, cb=None):
""" Copy a file name or file-like-object to another
file name or file-like object"""
assert bool(input_)
assert bool(output)
input_opened = False
output_opened = False
try:
if isinstance(input_, string_types):
if not os.path.isdir(os.path.dirname(input_)):
os.makedirs(os.path.dirname(input_))
input_ = open(input_, 'r')
input_opened = True
if isinstance(output, string_types):
if not os.path.isdir(os.path.dirname(output)):
os.makedirs(os.path.dirname(output))
output = open(output, 'wb')
output_opened = True
# shutil.copyfileobj(input_, output, buffer_size)
def copyfileobj(fsrc, fdst, length=buffer_size):
cumulative = 0
while True:
buf = fsrc.read(length)
if not buf:
break
fdst.write(buf)
if cb:
cumulative += len(buf)
cb(len(buf), cumulative)
copyfileobj(input_, output)
finally:
if input_opened:
input_.close()
if output_opened:
output.close() | python | {
"resource": ""
} |
q37343 | TropoBackend.execute_tropo_program | train | def execute_tropo_program(self, program):
"""
Ask Tropo to execute a program for us.
We can't do this directly;
we have to ask Tropo to call us back and then give Tropo the
program in the response body to that request from Tropo.
But we can pass data to Tropo and ask Tropo to pass it back
to us when Tropo calls us back. So, we just bundle up the program
and pass it to Tropo, then when Tropo calls us back, we
give the program back to Tropo.
We also cryptographically sign our program, so that
we can verify when we're called back with a program, that it's
one that we sent to Tropo and has not gotten mangled.
See https://docs.djangoproject.com/en/1.4/topics/signing/ for more
about the signing API.
See https://www.tropo.com/docs/webapi/passing_in_parameters_text.htm
for the format we're using to call Tropo, pass it data, and ask
them to call us back.
:param program: A Tropo program, i.e. a dictionary with a 'tropo'
key whose value is a list of dictionaries, each representing
a Tropo command.
"""
# The signer will also "pickle" the data structure for us
signed_program = signing.dumps(program)
params = {
'action': 'create', # Required by Tropo
'token': self.config['messaging_token'], # Identify ourselves
'program': signed_program, # Additional data
}
data = json.dumps(params)
# Tell Tropo we'd like our response in JSON format
# and our data is in that format too.
headers = {
'accept': 'application/json',
'content-type': 'application/json',
}
response = requests.post(base_url,
data=data,
headers=headers)
# If the HTTP request failed, raise an appropriate exception - e.g.
# if our network (or Tropo) are down:
response.raise_for_status()
result = json.loads(response.content)
if not result['success']:
raise Exception("Tropo error: %s" % result.get('error', 'unknown')) | python | {
"resource": ""
} |
q37344 | TropoBackend.send | train | def send(self, id_, text, identities, context=None):
"""
Send messages when using RapidSMS 0.14.0 or later.
We can send multiple messages in one Tropo program, so we do
that.
:param id_: Unused, included for compatibility with RapidSMS.
:param string text: The message text to send.
:param identities: A list of identities to send the message to
(a list of strings)
:param context: Unused, included for compatibility with RapidSMS.
"""
# Build our program
from_ = self.config['number'].replace('-', '')
commands = []
for identity in identities:
# We'll include a 'message' command for each recipient.
# The Tropo doc explicitly says that while passing a list
# of destination numbers is not a syntax error, only the
# first number on the list will get sent the message. So
# we have to send each one as a separate `message` command.
commands.append(
{
'message': {
'say': {'value': text},
'to': identity,
'from': from_,
'channel': 'TEXT',
'network': 'SMS'
}
}
)
program = {
'tropo': commands,
}
self.execute_tropo_program(program) | python | {
"resource": ""
} |
q37345 | CensusTractGeoid.dotted | train | def dotted(self):
"""Return just the tract number, excluding the state and county, in the dotted format"""
v = str(self.geoid.tract).zfill(6)
return v[0:4] + '.' + v[4:] | python | {
"resource": ""
} |
q37346 | GeoCensusVT.subclass | train | def subclass(cls, vt_code, vt_args):
"""Return a dynamic subclass that has the extra parameters built in"""
from geoid import get_class
import geoid.census
parser = get_class(geoid.census, vt_args.strip('/')).parse
cls = type(vt_code.replace('/', '_'), (cls,), {'vt_code': vt_code, 'parser': parser})
globals()[cls.__name__] = cls
assert cls.parser
return cls | python | {
"resource": ""
} |
q37347 | TailedFile._open | train | def _open(self, path, skip_to_end = True, offset = None):
"""Open `path`, optionally seeking to the end if `skip_to_end` is True."""
fh = os.fdopen(os.open(path, os.O_RDONLY | os.O_NONBLOCK))
# If the file is being opened for the first time, jump to the end.
# Otherwise, it is being reopened after a rotation, and we want
# content from the beginning.
if offset is None:
if skip_to_end:
fh.seek(0, 2)
self._offset = fh.tell()
else:
self._offset = 0
else:
fh.seek(offset)
self._offset = fh.tell()
self._fh = fh
self._lastsize = fh.tell()
self._inode = os.stat(self._path).st_ino | python | {
"resource": ""
} |
q37348 | TailedFile._read | train | def _read(self, limit = None):
"""Checks the file for new data and refills the buffer if it finds any."""
# The code that used to be here was self._fh.read(limit)
# However, this broke on OSX. os.read, however, works fine, but doesn't
# take the None argument or have any way to specify "read to the end".
# This emulates that behaviour.
while True:
# Check that we haven't closed this file
if not self._fh:
return False
dataread = os.read(self._fh.fileno(), limit or 65535)
if len(dataread) > 0:
self._buf += dataread
if limit is not None:
return True
else:
return False | python | {
"resource": ""
} |
q37349 | TailedFile.hasBeenRotated | train | def hasBeenRotated(self):
"""Returns a boolean indicating whether the file has been removed and recreated during the time it has been open."""
try:
# If the inodes don't match, it means the file has been replaced.
# The inode number cannot be recycled as long as we hold the
# filehandle open, so this test can be trusted.
return os.stat(self._path).st_ino != self._inode
except OSError:
# If the file doesn't exist, let's call it "rotated".
return True | python | {
"resource": ""
} |
q37350 | TailedFile.reopen | train | def reopen(self):
"""Reopens the file. Usually used after it has been rotated."""
# Read any remaining content in the file and store it in a buffer.
self._read()
# Close it to ensure we don't leak file descriptors
self._close()
# Reopen the file.
try:
self._open(self._path, skip_to_end = False)
return True
except OSError:
# If opening fails, it was probably deleted.
return False | python | {
"resource": ""
} |
q37351 | TailedFile.readlines | train | def readlines(self):
"""A generator producing lines from the file."""
# If the file is not open, there's nothing to return
if not self._fh:
raise StopIteration
at_eof = False
while True:
# Clean the buffer sometimes.
if self._bufoffset > (self._maxreadsize / 2):
self._buf = self._buf[self._bufoffset:]
self._bufoffset = 0
# Fill up the buffer if necessary.
if len(self._buf) < self._maxreadsize:
at_eof = not self._read(self._maxreadsize)
# Look for the next line.
try:
next_newline = self._buf.index("\n", self._bufoffset)
line = self._buf[self._bufoffset:next_newline]
self._bufoffset = next_newline + 1
# Save the current file offset for yielding and advance the file offset.
offset = self._offset
self._offset += len(line) + 1
if self._longline:
# This is the remaining chunk of a long line, we're not going
# to yield it.
self._longline = False
else:
yield line, offset
except ValueError:
# Reached the end of the buffer without finding any newlines.
if not at_eof:
# Line is longer than the half the buffer size? - Nope
logger.warning("Skipping over longline at %s:%d", self._path,
self._offset)
self._bufoffset = len(self._buf) - 1
self._longline = True
raise StopIteration | python | {
"resource": ""
} |
q37352 | MultiTail._rescan | train | def _rescan(self, skip_to_end = True):
"""Check for new files, deleted files, and rotated files."""
# Get listing of matching files.
paths = []
for single_glob in self._globspec:
paths.extend(glob.glob(single_glob))
# Remove files that don't appear in the new list.
for path in self._tailedfiles.keys():
if path not in paths:
self._tailedfiles[path]._close()
del self._tailedfiles[path]
# Add any files we don't have open yet.
for path in paths:
try:
# If the file has been rotated, reopen it.
if self._tailedfiles[path].hasBeenRotated():
# If it can't be reopened, close it.
if not self._tailedfiles[path].reopen():
del self._tailedfiles[path]
except KeyError:
# Open a file that we haven't seen yet.
self._tailedfiles[path] = TailedFile(path, skip_to_end = skip_to_end, offset = self._offsets.get(path, None)) | python | {
"resource": ""
} |
q37353 | Route.connectTo | train | def connectTo(self, remoteRouteName):
"""
Set the name of the route which will be added to outgoing boxes.
"""
self.remoteRouteName = remoteRouteName
# This route must not be started before its router is started. If
# sender is None, then the router is not started. When the router is
# started, it will start this route.
if self.router._sender is not None:
self.start() | python | {
"resource": ""
} |
q37354 | Route.sendBox | train | def sendBox(self, box):
"""
Add the route and send the box.
"""
if self.remoteRouteName is _unspecified:
raise RouteNotConnected()
if self.remoteRouteName is not None:
box[_ROUTE] = self.remoteRouteName.encode('ascii')
self.router._sender.sendBox(box) | python | {
"resource": ""
} |
q37355 | Router.bindRoute | train | def bindRoute(self, receiver, routeName=_unspecified):
"""
Create a new route to associate the given route name with the given
receiver.
@type routeName: C{unicode} or L{NoneType}
@param routeName: The identifier for the newly created route. If
C{None}, boxes with no route in them will be delivered to this
receiver.
@rtype: L{Route}
"""
if routeName is _unspecified:
routeName = self.createRouteIdentifier()
# self._sender may yet be None; if so, this route goes into _unstarted
# and will have its sender set correctly in startReceivingBoxes below.
route = Route(self, receiver, routeName)
mapping = self._routes
if mapping is None:
mapping = self._unstarted
mapping[routeName] = route
return route | python | {
"resource": ""
} |
q37356 | Router.startReceivingBoxes | train | def startReceivingBoxes(self, sender):
"""
Initialize route tracking objects.
"""
self._sender = sender
for routeName, route in self._unstarted.iteritems():
# Any route which has been bound but which does not yet have a
# remote route name should not yet be started. These will be
# started in Route.connectTo.
if route.remoteRouteName is not _unspecified:
route.start()
self._routes = self._unstarted
self._unstarted = None | python | {
"resource": ""
} |
q37357 | push_build | train | def push_build(id, tag_prefix):
"""
Push build to Brew
"""
req = swagger_client.BuildRecordPushRequestRest()
req.tag_prefix = tag_prefix
req.build_record_id = id
response = utils.checked_api_call(pnc_api.build_push, 'push', body=req)
if response:
return utils.format_json_list(response) | python | {
"resource": ""
} |
q37358 | push_build_set | train | def push_build_set(id, tag_prefix):
"""
Push build set to Brew
"""
req = swagger_client.BuildConfigSetRecordPushRequestRest()
req.tag_prefix = tag_prefix
req.build_config_set_record_id = id
response = utils.checked_api_call(pnc_api.build_push, 'push_record_set', body=req)
if response:
return utils.format_json_list(response) | python | {
"resource": ""
} |
q37359 | push_build_status | train | def push_build_status(id):
"""
Get status of Brew push.
"""
response = utils.checked_api_call(pnc_api.build_push, 'status', build_record_id=id)
if response:
return utils.format_json(response) | python | {
"resource": ""
} |
q37360 | FileManager.create_transient | train | def create_transient(self, input_stream, original_name, length=None):
'''Create TransientFile and file on FS from given input stream and
original file name.'''
ext = os.path.splitext(original_name)[1]
transient = self.new_transient(ext)
if not os.path.isdir(self.transient_root):
os.makedirs(self.transient_root)
self._copy_file(input_stream, transient.path, length=length)
return transient | python | {
"resource": ""
} |
q37361 | FileManager.new_transient | train | def new_transient(self, ext=''):
'''Creates empty TransientFile with random name and given extension.
File on FS is not created'''
name = random_name(self.transient_length) + ext
return TransientFile(self.transient_root, name, self) | python | {
"resource": ""
} |
q37362 | FileManager.get_transient | train | def get_transient(self, name):
'''Restores TransientFile object with given name.
Should be used when form is submitted with file name and no file'''
# security checks: basically no folders are allowed
assert not ('/' in name or '\\' in name or name[0] in '.~')
transient = TransientFile(self.transient_root, name, self)
if not os.path.isfile(transient.path):
raise OSError(errno.ENOENT, 'Transient file has been lost',
transient.path)
return transient | python | {
"resource": ""
} |
q37363 | FileManager.store | train | def store(self, transient_file, persistent_file):
'''Makes PersistentFile from TransientFile'''
#for i in range(5):
# persistent_file = PersistentFile(self.persistent_root,
# persistent_name, self)
# if not os.path.exists(persistent_file.path):
# break
#else:
# raise Exception('Unable to find free file name')
dirname = os.path.dirname(persistent_file.path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
os.rename(transient_file.path, persistent_file.path)
return persistent_file | python | {
"resource": ""
} |
q37364 | trace | train | def trace(fn): # pragma: no cover
""" Prints parameteters and return values of the each call of the wrapped function.
Usage:
decorate appropriate function or method:
@trace
def myf():
...
"""
def wrapped(*args, **kwargs):
msg = []
msg.append('Enter {}('.format(fn.__name__))
if args:
msg.append(', '.join([str(x) for x in args]))
if kwargs:
kwargs_str = ', '.join(['{}={}'.format(k, v) for k, v in list(kwargs.items())])
if args:
msg.append(', ')
msg.append(kwargs_str)
msg.append(')')
print(''.join(msg))
ret = fn(*args, **kwargs)
print('Return {}'.format(ret))
return ret
return wrapped | python | {
"resource": ""
} |
q37365 | patch_file_open | train | def patch_file_open(): # pragma: no cover
"""A Monkey patch to log opening and closing of files, which is useful for
debugging file descriptor exhaustion."""
openfiles = set()
oldfile = builtins.file
class newfile(oldfile):
def __init__(self, *args, **kwargs):
self.x = args[0]
all_fds = count_open_fds()
print('### {} OPENING {} ( {} total )###'.format(
len(openfiles), str(self.x), all_fds))
oldfile.__init__(self, *args, **kwargs)
openfiles.add(self)
def close(self):
print('### {} CLOSING {} ###'.format(len(openfiles), str(self.x)))
oldfile.close(self)
openfiles.remove(self)
def newopen(*args, **kwargs):
return newfile(*args, **kwargs)
builtins.file = newfile
builtins.open = newopen | python | {
"resource": ""
} |
q37366 | PasswordChallengeResponse.determineFrom | train | def determineFrom(cls, challenge, password):
"""
Create a nonce and use it, along with the given challenge and password,
to generate the parameters for a response.
@return: A C{dict} suitable to be used as the keyword arguments when
calling this command.
"""
nonce = secureRandom(16)
response = _calcResponse(challenge, nonce, password)
return dict(cnonce=nonce, response=response) | python | {
"resource": ""
} |
q37367 | _AMPUsernamePassword.checkPassword | train | def checkPassword(self, password):
"""
Check the given plaintext password against the response in this
credentials object.
@type password: C{str}
@param password: The known correct password associated with
C{self.username}.
@return: A C{bool}, C{True} if this credentials object agrees with the
given password, C{False} otherwise.
"""
if isinstance(password, unicode):
password = password.encode('utf-8')
correctResponse = _calcResponse(self.challenge, self.nonce, password)
return correctResponse == self.response | python | {
"resource": ""
} |
q37368 | CredReceiver.passwordLogin | train | def passwordLogin(self, username):
"""
Generate a new challenge for the given username.
"""
self.challenge = secureRandom(16)
self.username = username
return {'challenge': self.challenge} | python | {
"resource": ""
} |
q37369 | CredReceiver._login | train | def _login(self, credentials):
"""
Actually login to our portal with the given credentials.
"""
d = self.portal.login(credentials, None, IBoxReceiver)
def cbLoggedIn((interface, avatar, logout)):
self.logout = logout
self.boxReceiver = avatar
self.boxReceiver.startReceivingBoxes(self.boxSender)
return {}
d.addCallback(cbLoggedIn)
return d | python | {
"resource": ""
} |
q37370 | CredReceiver.passwordChallengeResponse | train | def passwordChallengeResponse(self, cnonce, response):
"""
Verify the response to a challenge.
"""
return self._login(_AMPUsernamePassword(
self.username, self.challenge, cnonce, response)) | python | {
"resource": ""
} |
q37371 | CredReceiver.connectionLost | train | def connectionLost(self, reason):
"""
If a login has happened, perform a logout.
"""
AMP.connectionLost(self, reason)
if self.logout is not None:
self.logout()
self.boxReceiver = self.logout = None | python | {
"resource": ""
} |
q37372 | LiveLyric.on_position_changed | train | def on_position_changed(self, position):
"""bind position changed signal with this"""
if not self._lyric:
return
pos = find_previous(position*1000 + 300, self._pos_list)
if pos is not None and pos != self._pos:
self.current_sentence = self._pos_s_map[pos]
self._pos = pos | python | {
"resource": ""
} |
q37373 | LiveLyric.on_song_changed | train | def on_song_changed(self, song):
"""bind song changed signal with this"""
if song is None or song.lyric is None:
self._lyric = None
self._pos_s_map = {}
else:
self._lyric = song.lyric.content
self._pos_s_map = parse(self._lyric)
self._pos_list = sorted(list(self._pos_s_map.keys()))
self._pos = None
self.current_sentence = '' | python | {
"resource": ""
} |
q37374 | sanitizeStructTime | train | def sanitizeStructTime(struct):
"""
Convert struct_time tuples with possibly invalid values to valid
ones by substituting the closest valid value.
"""
maxValues = (9999, 12, 31, 23, 59, 59)
minValues = (1, 1, 1, 0, 0, 0)
newstruct = []
for value, maxValue, minValue in zip(struct[:6], maxValues, minValues):
newstruct.append(max(minValue, min(value, maxValue)))
return tuple(newstruct) + struct[6:] | python | {
"resource": ""
} |
q37375 | Time.fromHumanly | train | def fromHumanly(klass, humanStr, tzinfo=None, now=None):
"""Return a new Time instance from a string a human might type.
@param humanStr: the string to be parsed.
@param tzinfo: A tzinfo instance indicating the timezone to assume if
none is specified in humanStr. If None, assume UTC.
@param now: A Time instance to be considered "now" for when
interpreting relative dates like "tomorrow". If None, use the real now.
Total crap now, it just supports weekdays, "today" and "tomorrow" for
now. This is pretty insufficient and useless, but good enough for some
demo functionality, or something.
"""
humanStr = humanStr.strip()
if now is None:
now = Time()
if tzinfo is None:
tzinfo = FixedOffset(0, 0)
for pattern, creator in klass.humanlyPatterns:
match = pattern.match(humanStr)
if not match \
or match.span()[1] != len(humanStr):
continue
try:
return creator(klass, match, tzinfo, now)
except ValueError:
continue
raise ValueError, 'could not parse date: %r' % (humanStr,) | python | {
"resource": ""
} |
q37376 | Time.fromStructTime | train | def fromStructTime(klass, structTime, tzinfo=None):
"""Return a new Time instance from a time.struct_time.
If tzinfo is None, structTime is in UTC. Otherwise, tzinfo is a
datetime.tzinfo instance coresponding to the timezone in which
structTime is.
Many of the functions in the standard time module return these things.
This will also work with a plain 9-tuple, for parity with the time
module. The last three elements, or tm_wday, tm_yday, and tm_isdst are
ignored.
"""
dtime = datetime.datetime(tzinfo=tzinfo, *structTime[:6])
self = klass.fromDatetime(dtime)
self.resolution = datetime.timedelta(seconds=1)
return self | python | {
"resource": ""
} |
q37377 | Time.fromDatetime | train | def fromDatetime(klass, dtime):
"""Return a new Time instance from a datetime.datetime instance.
If the datetime instance does not have an associated timezone, it is
assumed to be UTC.
"""
self = klass.__new__(klass)
if dtime.tzinfo is not None:
self._time = dtime.astimezone(FixedOffset(0, 0)).replace(tzinfo=None)
else:
self._time = dtime
self.resolution = datetime.timedelta.resolution
return self | python | {
"resource": ""
} |
q37378 | Time.fromPOSIXTimestamp | train | def fromPOSIXTimestamp(klass, secs):
"""Return a new Time instance from seconds since the POSIX epoch.
The POSIX epoch is midnight Jan 1, 1970 UTC. According to POSIX, leap
seconds don't exist, so one UTC day is exactly 86400 seconds, even if
it wasn't.
@param secs: a number of seconds, represented as an integer, long or
float.
"""
self = klass.fromDatetime(_EPOCH + datetime.timedelta(seconds=secs))
self.resolution = datetime.timedelta()
return self | python | {
"resource": ""
} |
q37379 | Time.fromRFC2822 | train | def fromRFC2822(klass, rfc822string):
"""
Return a new Time instance from a string formated as described in RFC 2822.
@type rfc822string: str
@raise ValueError: if the timestamp is not formatted properly (or if
certain obsoleted elements of the specification are used).
@return: a new L{Time}
"""
# parsedate_tz is going to give us a "struct_time plus", a 10-tuple
# containing the 9 values a struct_time would, i.e.: (tm_year, tm_mon,
# tm_day, tm_hour, tm_min, tm_sec, tm_wday, tm_yday, tm_isdst), plus a
# bonus "offset", which is an offset (in _seconds_, of all things).
maybeStructTimePlus = parsedate_tz(rfc822string)
if maybeStructTimePlus is None:
raise ValueError, 'could not parse RFC 2822 date %r' % (rfc822string,)
structTimePlus = sanitizeStructTime(maybeStructTimePlus)
offsetInSeconds = structTimePlus[-1]
if offsetInSeconds is None:
offsetInSeconds = 0
self = klass.fromStructTime(
structTimePlus,
FixedOffset(
hours=0,
minutes=offsetInSeconds // 60))
self.resolution = datetime.timedelta(seconds=1)
return self | python | {
"resource": ""
} |
q37380 | Time.asDatetime | train | def asDatetime(self, tzinfo=None):
"""Return this time as an aware datetime.datetime instance.
The returned datetime object has the specified tzinfo, or a tzinfo
describing UTC if the tzinfo parameter is None.
"""
if tzinfo is None:
tzinfo = FixedOffset(0, 0)
if not self.isTimezoneDependent():
return self._time.replace(tzinfo=tzinfo)
else:
return self._time.replace(tzinfo=FixedOffset(0, 0)).astimezone(tzinfo) | python | {
"resource": ""
} |
q37381 | Time.asRFC2822 | train | def asRFC2822(self, tzinfo=None, includeDayOfWeek=True):
"""Return this Time formatted as specified in RFC 2822.
RFC 2822 specifies the format of email messages.
RFC 2822 says times in email addresses should reflect the local
timezone. If tzinfo is a datetime.tzinfo instance, the returned
formatted string will reflect that timezone. Otherwise, the timezone
will be '-0000', which RFC 2822 defines as UTC, but with an unknown
local timezone.
RFC 2822 states that the weekday is optional. The parameter
includeDayOfWeek indicates whether or not to include it.
"""
dtime = self.asDatetime(tzinfo)
if tzinfo is None:
rfcoffset = '-0000'
else:
rfcoffset = '%s%02i%02i' % _timedeltaToSignHrMin(dtime.utcoffset())
rfcstring = ''
if includeDayOfWeek:
rfcstring += self.rfc2822Weekdays[dtime.weekday()] + ', '
rfcstring += '%i %s %4i %02i:%02i:%02i %s' % (
dtime.day,
self.rfc2822Months[dtime.month - 1],
dtime.year,
dtime.hour,
dtime.minute,
dtime.second,
rfcoffset)
return rfcstring | python | {
"resource": ""
} |
q37382 | Time.asISO8601TimeAndDate | train | def asISO8601TimeAndDate(self, includeDelimiters=True, tzinfo=None,
includeTimezone=True):
"""Return this time formatted as specified by ISO 8861.
ISO 8601 allows optional dashes to delimit dates and colons to delimit
times. The parameter includeDelimiters (default True) defines the
inclusion of these delimiters in the output.
If tzinfo is a datetime.tzinfo instance, the output time will be in the
timezone given. If it is None (the default), then the timezone string
will not be included in the output, and the time will be in UTC.
The includeTimezone parameter coresponds to the inclusion of an
explicit timezone. The default is True.
"""
if not self.isTimezoneDependent():
tzinfo = None
dtime = self.asDatetime(tzinfo)
if includeDelimiters:
dateSep = '-'
timeSep = ':'
else:
dateSep = timeSep = ''
if includeTimezone:
if tzinfo is None:
timezone = '+00%s00' % (timeSep,)
else:
sign, hour, min = _timedeltaToSignHrMin(dtime.utcoffset())
timezone = '%s%02i%s%02i' % (sign, hour, timeSep, min)
else:
timezone = ''
microsecond = ('%06i' % (dtime.microsecond,)).rstrip('0')
if microsecond:
microsecond = '.' + microsecond
parts = [
('%04i' % (dtime.year,), datetime.timedelta(days=366)),
('%s%02i' % (dateSep, dtime.month), datetime.timedelta(days=31)),
('%s%02i' % (dateSep, dtime.day), datetime.timedelta(days=1)),
('T', datetime.timedelta(hours=1)),
('%02i' % (dtime.hour,), datetime.timedelta(hours=1)),
('%s%02i' % (timeSep, dtime.minute), datetime.timedelta(minutes=1)),
('%s%02i' % (timeSep, dtime.second), datetime.timedelta(seconds=1)),
(microsecond, datetime.timedelta(microseconds=1)),
(timezone, datetime.timedelta(hours=1))
]
formatted = ''
for part, minResolution in parts:
if self.resolution <= minResolution:
formatted += part
return formatted | python | {
"resource": ""
} |
q37383 | Time.asStructTime | train | def asStructTime(self, tzinfo=None):
"""Return this time represented as a time.struct_time.
tzinfo is a datetime.tzinfo instance coresponding to the desired
timezone of the output. If is is the default None, UTC is assumed.
"""
dtime = self.asDatetime(tzinfo)
if tzinfo is None:
return dtime.utctimetuple()
else:
return dtime.timetuple() | python | {
"resource": ""
} |
q37384 | Time.asHumanly | train | def asHumanly(self, tzinfo=None, now=None, precision=Precision.MINUTES):
"""Return this time as a short string, tailored to the current time.
Parts of the date that can be assumed are omitted. Consequently, the
output string depends on the current time. This is the format used for
displaying dates in most user visible places in the quotient web UI.
By default, the current time is determined by the system clock. The
current time used for formatting the time can be changed by providing a
Time instance as the parameter 'now'.
@param precision: The smallest unit of time that will be represented
in the returned string. Valid values are L{Time.Precision.MINUTES} and
L{Time.Precision.SECONDS}.
@raise InvalidPrecision: if the specified precision is not either
L{Time.Precision.MINUTES} or L{Time.Precision.SECONDS}.
"""
try:
timeFormat = Time._timeFormat[precision]
except KeyError:
raise InvalidPrecision(
'Use Time.Precision.MINUTES or Time.Precision.SECONDS')
if now is None:
now = Time().asDatetime(tzinfo)
else:
now = now.asDatetime(tzinfo)
dtime = self.asDatetime(tzinfo)
# Same day?
if dtime.date() == now.date():
if self.isAllDay():
return 'all day'
return dtime.strftime(timeFormat).lower()
else:
res = str(dtime.date().day) + dtime.strftime(' %b') # day + month
# Different year?
if not dtime.date().year == now.date().year:
res += dtime.strftime(' %Y')
if not self.isAllDay():
res += dtime.strftime(', %s' % (timeFormat,)).lower()
return res | python | {
"resource": ""
} |
q37385 | Time.getBounds | train | def getBounds(self, tzinfo=None):
"""
Return a pair describing the bounds of self.
This returns a pair (min, max) of Time instances. It is not quite the
same as (self, self + self.resolution). This is because timezones are
insignificant for instances with a resolution greater or equal to 1
day.
To illustrate the problem, consider a Time instance::
T = Time.fromHumanly('today', tzinfo=anything)
This will return an equivalent instance independent of the tzinfo used.
The hour, minute, and second of this instance are 0, and its resolution
is one day.
Now say we have a sorted list of times, and we want to get all times
for 'today', where whoever said 'today' is in a timezone that's 5 hours
ahead of UTC. The start of 'today' in this timezone is UTC 05:00. The
example instance T above is before this, but obviously it is today.
The min and max times this returns are such that all potentially
matching instances are within this range. However, this range might
contain unmatching instances.
As an example of this, if 'today' is April first 2005, then
Time.fromISO8601TimeAndDate('2005-04-01T00:00:00') sorts in the same
place as T from above, but is not in the UTC+5 'today'.
TIME IS FUN!
"""
if self.resolution >= datetime.timedelta(days=1) \
and tzinfo is not None:
time = self._time.replace(tzinfo=tzinfo)
else:
time = self._time
return (
min(self.fromDatetime(time), self.fromDatetime(self._time)),
max(self.fromDatetime(time + self.resolution),
self.fromDatetime(self._time + self.resolution))
) | python | {
"resource": ""
} |
q37386 | Time.oneDay | train | def oneDay(self):
"""Return a Time instance representing the day of the start of self.
The returned new instance will be set to midnight of the day containing
the first instant of self in the specified timezone, and have a
resolution of datetime.timedelta(days=1).
"""
day = self.__class__.fromDatetime(self.asDatetime().replace(
hour=0, minute=0, second=0, microsecond=0))
day.resolution = datetime.timedelta(days=1)
return day | python | {
"resource": ""
} |
q37387 | expiring_memoize | train | def expiring_memoize(obj):
"""Like memoize, but forgets after 10 seconds."""
cache = obj.cache = {}
last_access = obj.last_access = defaultdict(int)
@wraps(obj)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if last_access[key] and last_access[key] + 10 < time():
if key in cache:
del cache[key]
last_access[key] = time()
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer | python | {
"resource": ""
} |
q37388 | md5_for_file | train | def md5_for_file(f, block_size=2 ** 20):
"""Generate an MD5 has for a possibly large file by breaking it into
chunks."""
md5 = hashlib.md5()
try:
# Guess that f is a FLO.
f.seek(0)
return md5_for_stream(f, block_size=block_size)
except AttributeError:
# Nope, not a FLO. Maybe string?
file_name = f
with open(file_name, 'rb') as f:
return md5_for_file(f, block_size) | python | {
"resource": ""
} |
q37389 | make_acro | train | def make_acro(past, prefix, s): # pragma: no cover
"""Create a three letter acronym from the input string s.
Args:
past: A set object, for storing acronyms that have already been created
prefix: A prefix added to the acronym before storing in the set
s: The string to create the acronym from.
"""
def _make_acro(s, t=0):
"""Make an acronym of s for trial t"""
# Really should cache these ...
v = ['a', 'e', 'i', 'o', 'u', 'y']
c = [chr(x) for x in six_xrange(ord('a'), ord('z') + 1) if chr(x) not in v]
s = re.sub(r'\W+', '', s.lower())
vx = [x for x in s if x in v] # Vowels in input string
cx = [x for x in s if x in c] # Consonants in input string
if s.startswith('Mc'):
if t < 1:
return 'Mc' + v[0]
if t < 2:
return 'Mc' + c[0]
if s[0] in v: # Starts with a vowel
if t < 1:
return vx[0] + cx[0] + cx[1]
if t < 2:
return vx[0] + vx[1] + cx[0]
if s[0] in c and s[1] in c: # Two first consonants
if t < 1:
return cx[0] + cx[1] + vx[0]
if t < 2:
return cx[0] + cx[1] + cx[2]
if t < 3:
return cx[0] + vx[0] + cx[1]
if t < 4:
return cx[0] + cx[1] + cx[2]
if t < 5:
return cx[0] + vx[0] + vx[1]
if t < 6:
return cx[0] + cx[1] + cx[-1]
# These are punts; just take a substring
if t < 7:
return s[0:3]
if t < 8:
return s[1:4]
if t < 9:
return s[2:5]
if t < 10:
return s[3:6]
return None
for t in six_xrange(11): # Try multiple forms until one isn't in the past acronyms
try:
a = _make_acro(s, t)
if a is not None:
if prefix:
aps = prefix + a
else:
aps = a
if aps not in past:
past.add(aps)
return a
except IndexError:
pass
raise Exception('Could not get acronym.') | python | {
"resource": ""
} |
q37390 | ensure_dir_exists | train | def ensure_dir_exists(path):
"""Given a file, ensure that the path to the file exists"""
import os
f_dir = os.path.dirname(path)
if not os.path.exists(f_dir):
os.makedirs(f_dir)
return f_dir | python | {
"resource": ""
} |
q37391 | init_log_rate | train | def init_log_rate(output_f, N=None, message='', print_rate=None):
"""Initialze the log_rate function. Returnas a partial function to call for
each event.
If N is not specified but print_rate is specified, the initial N is
set to 100, and after the first message, the N value is adjusted to
emit print_rate messages per second
"""
if print_rate and not N:
N = 100
if not N:
N = 5000
d = [0, # number of items processed
time(), # start time. This one gets replaced after first message
N, # ticker to next message
N, # frequency to log a message
message,
print_rate,
deque([], maxlen=4) # Deque for averaging last N rates
]
assert isinstance(output_f, Callable)
f = partial(_log_rate, output_f, d)
f.always = output_f
f.count = lambda: d[0]
return f | python | {
"resource": ""
} |
q37392 | _log_rate | train | def _log_rate(output_f, d, message=None):
"""Log a message for the Nth time the method is called.
d is the object returned from init_log_rate
"""
if d[2] <= 0:
if message is None:
message = d[4]
# Average the rate over the length of the deque.
d[6].append(int(d[3] / (time() - d[1])))
rate = sum(d[6]) / len(d[6])
# Prints the processing rate in 1,000 records per sec.
output_f(message + ': ' + str(rate) + '/s ' + str(d[0] / 1000) + 'K ')
d[1] = time()
# If the print_rate was specified, adjust the number of records to
# aproximate that rate.
if d[5]:
target_rate = rate * d[5]
d[3] = int((target_rate + d[3]) / 2)
d[2] = d[3]
d[0] += 1
d[2] -= 1 | python | {
"resource": ""
} |
q37393 | count_open_fds | train | def count_open_fds():
"""return the number of open file descriptors for current process.
.. warning: will only work on UNIX-like os-es.
http://stackoverflow.com/a/7142094
"""
pid = os.getpid()
procs = subprocess.check_output(
['lsof', '-w', '-Ff', '-p', str(pid)])
nprocs = len(
[s for s in procs.split('\n') if s and s[0] == 'f' and s[1:].isdigit()]
)
return nprocs | python | {
"resource": ""
} |
q37394 | parse_url_to_dict | train | def parse_url_to_dict(url):
"""Parse a url and return a dict with keys for all of the parts.
The urlparse function() returns a wacky combination of a namedtuple
with properties.
"""
p = urlparse(url)
return {
'scheme': p.scheme,
'netloc': p.netloc,
'path': p.path,
'params': p.params,
'query': p.query,
'fragment': p.fragment,
'username': p.username,
'password': p.password,
'hostname': p.hostname,
'port': p.port
} | python | {
"resource": ""
} |
q37395 | set_url_part | train | def set_url_part(url, **kwargs):
"""Change one or more parts of a URL"""
d = parse_url_to_dict(url)
d.update(kwargs)
return unparse_url_dict(d) | python | {
"resource": ""
} |
q37396 | filter_url | train | def filter_url(url, **kwargs):
"""filter a URL by returning a URL with only the parts specified in the keywords"""
d = parse_url_to_dict(url)
d.update(kwargs)
return unparse_url_dict({k: v for k, v in list(d.items()) if v}) | python | {
"resource": ""
} |
q37397 | print_yaml | train | def print_yaml(o):
"""Pretty print an object as YAML."""
print(yaml.dump(o, default_flow_style=False, indent=4, encoding='utf-8')) | python | {
"resource": ""
} |
q37398 | qualified_class_name | train | def qualified_class_name(o):
"""Full name of an object, including the module"""
module = o.__class__.__module__
if module is None or module == str.__class__.__module__:
return o.__class__.__name__
return module + '.' + o.__class__.__name__ | python | {
"resource": ""
} |
q37399 | drop_empty | train | def drop_empty(rows):
"""Transpose the columns into rows, remove all of the rows that are empty after the first cell, then
transpose back. The result is that columns that have a header but no data in the body are removed, assuming
the header is the first row. """
return zip(*[col for col in zip(*rows) if bool(filter(bool, col[1:]))]) | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.