_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q14900
|
CopyBuilder.get_items
|
train
|
def get_items(self, source=None, target=None, crit=None):
"""Copy records from source to target collection.
:param source: Input collection
:type source: QueryEngine
:param target: Output collection
:type target: QueryEngine
:param crit: Filter criteria, e.g. "{ 'flag': True }".
:type crit: dict
"""
self._target_coll = target.collection
if not crit: # reduce any False-y crit value to None
crit = None
cur = source.query(criteria=crit)
_log.info("source.collection={} crit={} source_records={:d}"
.format(source.collection, crit, len(cur)))
return cur
|
python
|
{
"resource": ""
}
|
q14901
|
parse_fn_docstring
|
train
|
def parse_fn_docstring(fn):
"""Get parameter and return types from function's docstring.
Docstrings must use this format::
:param foo: What is foo
:type foo: int
:return: What is returned
:rtype: double
:return: A map of names, each with keys 'type' and 'desc'.
:rtype: tuple(dict)
"""
doc = fn.__doc__
params, return_ = {}, {}
param_order = []
for line in doc.split("\n"):
line = line.strip()
if line.startswith(":param"):
_, name, desc = line.split(":", 2)
name = name[6:].strip() # skip 'param '
params[name] = {'desc': desc.strip()}
param_order.append(name)
elif line.startswith(":type"):
_, name, desc = line.split(":", 2)
name = name[5:].strip() # skip 'type '
if not name in params:
raise ValueError("'type' without 'param' for {}".format(name))
params[name]['type'] = desc.strip()
elif line.startswith(":return"):
_1, _2, desc = line.split(":", 2)
return_['desc'] = desc
elif line.startswith(":rtype"):
_1, _2, desc = line.split(":", 2)
return_['type'] = desc.strip()
return params
|
python
|
{
"resource": ""
}
|
q14902
|
merge_tasks
|
train
|
def merge_tasks(core_collections, sandbox_collections, id_prefix, new_tasks, batch_size=100, wipe=False):
"""Merge core and sandbox collections into a temporary collection in the sandbox.
:param core_collections: Core collection info
:type core_collections: Collections
:param sandbox_collections: Sandbox collection info
:type sandbox_collections: Collections
"""
merged = copy.copy(sandbox_collections)
# create/clear target collection
target = merged.database[new_tasks]
if wipe:
_log.debug("merge_tasks.wipe.begin")
target.remove()
merged.database['counter'].remove()
_log.debug("merge_tasks.wipe.end")
# perform the merge
batch = []
for doc in core_collections.tasks.find():
batch.append(doc)
if len(batch) == batch_size:
target.insert(batch)
batch = []
if batch:
target.insert(batch)
batch = []
for doc in sandbox_collections.tasks.find():
doc['task_id'] = id_prefix + '-' + str(doc['task_id'])
batch.append(doc)
if len(batch) == batch_size:
target.insert(batch)
batch = []
if batch:
target.insert(batch)
|
python
|
{
"resource": ""
}
|
q14903
|
alphadump
|
train
|
def alphadump(d, indent=2, depth=0):
"""Dump a dict to a str,
with keys in alphabetical order.
"""
sep = '\n' + ' ' * depth * indent
return ''.join(
("{}: {}{}".format(
k,
alphadump(d[k], depth=depth+1) if isinstance(d[k], dict)
else str(d[k]),
sep)
for k in sorted(d.keys()))
)
|
python
|
{
"resource": ""
}
|
q14904
|
HasExamples.validate_examples
|
train
|
def validate_examples(self, fail_fn):
"""Check the examples against the schema.
:param fail_fn: Pass failure messages to this function
:type fail_fn: function(str)
"""
for collection, doc in self.examples():
_log.debug("validating example in collection {}".format(collection))
sch = schema.get_schema(collection) # with more err. checking
result = sch.validate(doc)
_log.debug("validation result: {}".format("OK" if result is None else result))
if result is not None:
fail_fn("Failed to validate sample document: {}".format(result))
|
python
|
{
"resource": ""
}
|
q14905
|
Builder.run
|
train
|
def run(self, user_kw=None, build_kw=None):
"""Run the builder.
:param user_kw: keywords from user
:type user_kw: dict
:param build_kw: internal settings
:type build_kw: dict
:return: Number of items processed
:rtype: int
"""
user_kw = {} if user_kw is None else user_kw
build_kw = {} if build_kw is None else build_kw
n = self._build(self.get_items(**user_kw), **build_kw)
finalized = self.finalize(self._status.has_failures())
if not finalized:
_log.error("Finalization failed")
return n
|
python
|
{
"resource": ""
}
|
q14906
|
Builder.connect
|
train
|
def connect(self, config):
"""Connect to database with given configuration, which may be a dict or
a path to a pymatgen-db configuration.
"""
if isinstance(config, str):
conn = dbutil.get_database(config_file=config)
elif isinstance(config, dict):
conn = dbutil.get_database(settings=config)
else:
raise ValueError("Configuration, '{}', must be a path to "
"a configuration file or dict".format(config))
return conn
|
python
|
{
"resource": ""
}
|
q14907
|
Builder._build
|
train
|
def _build(self, items, chunk_size=10000):
"""Build the output, in chunks.
:return: Number of items processed
:rtype: int
"""
_log.debug("_build, chunk_size={:d}".format(chunk_size))
n, i = 0, 0
for i, item in enumerate(items):
if i == 0:
_log.debug("_build, first item")
if 0 == (i + 1) % chunk_size:
if self._seq:
self._run(0)
else:
self._run_parallel_fn() # process the chunk
if self._status.has_failures():
break
n = i + 1
self._queue.put(item)
# process final chunk
if self._seq:
self._run(0)
else:
self._run_parallel_fn()
if not self._status.has_failures():
n = i + 1
return n
|
python
|
{
"resource": ""
}
|
q14908
|
Builder._run_parallel_multiprocess
|
train
|
def _run_parallel_multiprocess(self):
"""Run processes from queue
"""
_log.debug("run.parallel.multiprocess.start")
processes = []
ProcRunner.instance = self
for i in range(self._ncores):
self._status.running(i)
proc = multiprocessing.Process(target=ProcRunner.run, args=(i,))
proc.start()
processes.append(proc)
for i in range(self._ncores):
processes[i].join()
code = processes[i].exitcode
self._status.success(i) if 0 == code else self._status.fail(i)
_log.debug("run.parallel.multiprocess.end states={}".format(self._status))
|
python
|
{
"resource": ""
}
|
q14909
|
Builder._run
|
train
|
def _run(self, index):
"""Run method for one thread or process
Just pull an item off the queue and process it,
until the queue is empty.
:param index: Sequential index of this process or thread
:type index: int
"""
while 1:
try:
item = self._queue.get(timeout=2)
self.process_item(item)
except Queue.Empty:
break
except Exception as err:
_log.error("In _run(): {}".format(err))
if _log.isEnabledFor(logging.DEBUG):
_log.error(traceback.format_exc())
self._status.fail(index)
raise
self._status.success(index)
|
python
|
{
"resource": ""
}
|
q14910
|
csv_dict
|
train
|
def csv_dict(d):
"""Format dict to a string with comma-separated values.
"""
if len(d) == 0:
return "{}"
return "{" + ', '.join(["'{}': {}".format(k, quotable(v))
for k, v in d.items()]) + "}"
|
python
|
{
"resource": ""
}
|
q14911
|
kvp_dict
|
train
|
def kvp_dict(d):
"""Format dict to key=value pairs.
"""
return ', '.join(
["{}={}".format(k, quotable(v)) for k, v in d.items()])
|
python
|
{
"resource": ""
}
|
q14912
|
MaxValueBuilder.get_items
|
train
|
def get_items(self, source=None, target=None):
"""Get all records from source collection to add to target.
:param source: Input collection
:type source: QueryEngine
:param target: Output collection
:type target: QueryEngine
"""
self._groups = self.shared_dict()
self._target_coll = target.collection
self._src = source
return source.query()
|
python
|
{
"resource": ""
}
|
q14913
|
MaxValueBuilder.process_item
|
train
|
def process_item(self, item):
"""Calculate new maximum value for each group,
for "new" items only.
"""
group, value = item['group'], item['value']
if group in self._groups:
cur_val = self._groups[group]
self._groups[group] = max(cur_val, value)
else:
# New group. Could fetch old max. from target collection,
# but for the sake of illustration recalculate it from
# the source collection.
self._src.tracking = False # examine entire collection
new_max = value
for rec in self._src.query(criteria={'group': group},
properties=['value']):
new_max = max(new_max, rec['value'])
self._src.tracking = True # back to incremental mode
# calculate new max
self._groups[group] = new_max
|
python
|
{
"resource": ""
}
|
q14914
|
MaxValueBuilder.finalize
|
train
|
def finalize(self, errs):
"""Update target collection with calculated maximum values.
"""
for group, value in self._groups.items():
doc = {'group': group, 'value': value}
self._target_coll.update({'group': group}, doc, upsert=True)
return True
|
python
|
{
"resource": ""
}
|
q14915
|
Projection.add
|
train
|
def add(self, field, op=None, val=None):
"""Update report fields to include new one, if it doesn't already.
:param field: The field to include
:type field: Field
:param op: Operation
:type op: ConstraintOperator
:return: None
"""
if field.has_subfield():
self._fields[field.full_name] = 1
else:
self._fields[field.name] = 1
if op and op.is_size() and not op.is_variable():
# get minimal part of array with slicing,
# but cannot use slice with variables
self._slices[field.name] = val + 1
if op and op.is_variable():
# add the variable too
self._fields[val] = 1
|
python
|
{
"resource": ""
}
|
q14916
|
Projection.to_mongo
|
train
|
def to_mongo(self):
"""Translate projection to MongoDB query form.
:return: Dictionary to put into a MongoDB JSON query
:rtype: dict
"""
d = copy.copy(self._fields)
for k, v in self._slices.items():
d[k] = {'$slice': v}
return d
|
python
|
{
"resource": ""
}
|
q14917
|
ConstraintViolationGroup.add_violations
|
train
|
def add_violations(self, violations, record=None):
"""Add constraint violations and associated record.
:param violations: List of violations
:type violations: list(ConstraintViolation)
:param record: Associated record
:type record: dict
:rtype: None
"""
rec = {} if record is None else record
for v in violations:
self._viol.append((v, rec))
|
python
|
{
"resource": ""
}
|
q14918
|
ConstraintSpec._add_complex_section
|
train
|
def _add_complex_section(self, item):
"""Add a section that has a filter and set of constraints
:raise: ValueError if filter or constraints is missing
"""
# extract filter and constraints
try:
fltr = item[self.FILTER_SECT]
except KeyError:
raise ValueError("configuration requires '{}'".format(self.FILTER_SECT))
sample = item.get(self.SAMPLE_SECT, None)
constraints = item.get(self.CONSTRAINT_SECT, None)
section = ConstraintSpecSection(fltr, constraints, sample)
key = section.get_key()
if key in self._sections:
self._sections[key].append(section)
else:
self._sections[key] = [section]
|
python
|
{
"resource": ""
}
|
q14919
|
Validator.validate
|
train
|
def validate(self, coll, constraint_spec, subject='collection'):
"""Validation of a collection.
This is a generator that yields ConstraintViolationGroups.
:param coll: Mongo collection
:type coll: pymongo.Collection
:param constraint_spec: Constraint specification
:type constraint_spec: ConstraintSpec
:param subject: Name of the thing being validated
:type subject: str
:return: Sets of constraint violation, one for each constraint_section
:rtype: ConstraintViolationGroup
:raises: ValidatorSyntaxError
"""
self._spec = constraint_spec
self._progress.set_subject(subject)
self._build(constraint_spec)
for sect_parts in self._sections:
cvg = self._validate_section(subject, coll, sect_parts)
if cvg is not None:
yield cvg
|
python
|
{
"resource": ""
}
|
q14920
|
Validator._validate_section
|
train
|
def _validate_section(self, subject, coll, parts):
"""Validate one section of a spec.
:param subject: Name of subject
:type subject: str
:param coll: The collection to validate
:type coll: pymongo.Collection
:param parts: Section parts
:type parts: Validator.SectionParts
:return: Group of constraint violations, if any, otherwise None
:rtype: ConstraintViolationGroup or None
"""
cvgroup = ConstraintViolationGroup()
cvgroup.subject = subject
# If the constraint is an 'import' of code, treat it differently here
# if self._is_python(parts):
# num_found = self._run_python(cvgroup, coll, parts)
# return None if num_found == 0 else cvgroup
query = parts.cond.to_mongo(disjunction=False)
query.update(parts.body.to_mongo())
cvgroup.condition = parts.cond.to_mongo(disjunction=False)
self._log.debug('Query spec: {}'.format(query))
self._log.debug('Query fields: {}'.format(parts.report_fields))
# Find records that violate 1 or more constraints
cursor = coll.find(query, parts.report_fields, **self._find_kw)
if parts.sampler is not None:
cursor = parts.sampler.sample(cursor)
nbytes, num_dberr, num_rec = 0, 0, 0
while 1:
try:
record = next(cursor)
nbytes += total_size(record)
num_rec += 1
except StopIteration:
self._log.info("collection {}: {:d} records, {:d} bytes, {:d} db-errors"
.format(subject, num_rec, nbytes, num_dberr))
break
except pymongo.errors.PyMongoError as err:
num_dberr += 1
if num_dberr > self._max_dberr > 0:
raise DBError("Too many errors")
self._log.warn("DB.{:d}: {}".format(num_dberr, err))
continue
# report progress
if self._progress:
self._progress.update(num_dberr, nbytes)
# get reasons for badness
violations = self._get_violations(parts.body, record)
cvgroup.add_violations(violations, record)
return None if nbytes == 0 else cvgroup
|
python
|
{
"resource": ""
}
|
q14921
|
Validator._get_violations
|
train
|
def _get_violations(self, query, record):
"""Reverse-engineer the query to figure out why a record was selected.
:param query: MongoDB query
:type query: MongQuery
:param record: Record in question
:type record: dict
:return: Reasons why bad
:rtype: list(ConstraintViolation)
"""
# special case, when no constraints are given
if len(query.all_clauses) == 0:
return [NullConstraintViolation()]
# normal case, check all the constraints
reasons = []
for clause in query.all_clauses:
var_name = None
key = clause.constraint.field.name
op = clause.constraint.op
fval = mongo_get(record, key)
if fval is None:
expected = clause.constraint.value
reasons.append(ConstraintViolation(clause.constraint, 'missing', expected))
continue
if op.is_variable():
# retrieve value for variable
var_name = clause.constraint.value
value = mongo_get(record, var_name, default=None)
if value is None:
reasons.append(ConstraintViolation(clause.constraint, 'missing', var_name))
continue
clause.constraint.value = value # swap out value, temporarily
# take length for size
if op.is_size():
if isinstance(fval, str) or not hasattr(fval, '__len__'):
reasons.append(ConstraintViolation(clause.constraint, type(fval), 'sequence'))
if op.is_variable():
clause.constraint.value = var_name # put original value back
continue
fval = len(fval)
ok, expected = clause.constraint.passes(fval)
if not ok:
reasons.append(ConstraintViolation(clause.constraint, fval, expected))
if op.is_variable():
clause.constraint.value = var_name # put original value back
return reasons
|
python
|
{
"resource": ""
}
|
q14922
|
Validator._build
|
train
|
def _build(self, constraint_spec):
"""Generate queries to execute.
Sets instance variables so that Mongo query strings, etc. can now
be extracted from the object.
:param constraint_spec: Constraint specification
:type constraint_spec: ConstraintSpec
"""
self._sections = []
# For each condition in the spec
for sval in constraint_spec:
rpt_fld = self._base_report_fields.copy()
#print("@@ CONDS = {}".format(sval.filters))
#print("@@ MAIN = {}".format(sval.constraints))
# Constraints
# If the constraint is an external call to Python code
if self._is_python(sval.constraints):
query, proj = self._process_python(sval.constraints)
rpt_fld.update(proj.to_mongo())
# All other constraints, e.g. 'foo > 12'
else:
query = MongoQuery()
if sval.constraints is not None:
groups = self._process_constraint_expressions(sval.constraints)
projection = Projection()
for cg in groups.values():
for c in cg:
projection.add(c.field, c.op, c.value)
query.add_clause(MongoClause(c))
if self._add_exists:
for c in cg.existence_constraints:
query.add_clause(MongoClause(c, exists_main=True))
rpt_fld.update(projection.to_mongo())
# Filters
cond_query = MongoQuery()
if sval.filters is not None:
cond_groups = self._process_constraint_expressions(sval.filters, rev=False)
for cg in cond_groups.values():
for c in cg:
cond_query.add_clause(MongoClause(c, rev=False))
# Done. Add a new 'SectionPart' for the filter and constraint
result = self.SectionParts(cond_query, query, sval.sampler, rpt_fld)
self._sections.append(result)
|
python
|
{
"resource": ""
}
|
q14923
|
Validator._process_constraint_expressions
|
train
|
def _process_constraint_expressions(self, expr_list, conflict_check=True, rev=True):
"""Create and return constraints from expressions in expr_list.
:param expr_list: The expressions
:conflict_check: If True, check for conflicting expressions within each field
:return: Constraints grouped by field (the key is the field name)
:rtype: dict
"""
# process expressions, grouping by field
groups = {}
for expr in expr_list:
field, raw_op, val = parse_expr(expr)
op = ConstraintOperator(raw_op)
if field not in groups:
groups[field] = ConstraintGroup(Field(field, self._aliases))
groups[field].add_constraint(op, val)
# add existence constraints
for cgroup in groups.values():
cgroup.add_existence(rev)
# optionally check for conflicts
if conflict_check:
# check for conflicts in each group
for field_name, group in groups.items():
conflicts = group.get_conflicts()
if conflicts:
raise ValueError('Conflicts for field {}: {}'.format(field_name, conflicts))
return groups
|
python
|
{
"resource": ""
}
|
q14924
|
Validator._is_python
|
train
|
def _is_python(self, constraint_list):
"""Check whether constraint is an import of Python code.
:param constraint_list: List of raw constraints from YAML file
:type constraint_list: list(str)
:return: True if this refers to an import of code, False otherwise
:raises: ValidatorSyntaxError
"""
if len(constraint_list) == 1 and \
PythonMethod.constraint_is_method(constraint_list[0]):
return True
if len(constraint_list) > 1 and \
any(filter(PythonMethod.constraint_is_method, constraint_list)):
condensed_list = '/'.join(constraint_list)
err = PythonMethod.CANNOT_COMBINE_ERR
raise ValidatorSyntaxError(condensed_list, err)
return False
|
python
|
{
"resource": ""
}
|
q14925
|
Validator.set_aliases
|
train
|
def set_aliases(self, new_value):
"Set aliases and wrap errors in ValueError"
try:
self.aliases = new_value
except Exception as err:
raise ValueError("invalid value: {}".format(err))
|
python
|
{
"resource": ""
}
|
q14926
|
Sampler.sample
|
train
|
def sample(self, cursor):
"""Extract records randomly from the database.
Continue until the target proportion of the items have been
extracted, or until `min_items` if this is larger.
If `max_items` is non-negative, do not extract more than these.
This function is a generator, yielding items incrementally.
:param cursor: Cursor to sample
:type cursor: pymongo.cursor.Cursor
:return: yields each item
:rtype: dict
:raise: ValueError, if max_items is valid and less than `min_items`
or if target collection is empty
"""
count = cursor.count()
# special case: empty collection
if count == 0:
self._empty = True
raise ValueError("Empty collection")
# special case: entire collection
if self.p >= 1 and self.max_items <= 0:
for item in cursor:
yield item
return
# calculate target number of items to select
if self.max_items <= 0:
n_target = max(self.min_items, self.p * count)
else:
if self.p <= 0:
n_target = max(self.min_items, self.max_items)
else:
n_target = max(self.min_items, min(self.max_items, self.p * count))
if n_target == 0:
raise ValueError("No items requested")
# select first `n_target` items that pop up with
# probability self.p
# This is actually biased to items at the beginning
# of the file if n_target is smaller than (p * count),
n = 0
while n < n_target:
try:
item = next(cursor)
except StopIteration:
# need to keep looping through data until
# we get all our items!
cursor.rewind()
item = next(cursor)
if self._keep():
yield item
n += 1
|
python
|
{
"resource": ""
}
|
q14927
|
available_backends
|
train
|
def available_backends():
"""Lists the currently available backend types"""
print 'The following LiveSync agents are available:'
for name, backend in current_plugin.backend_classes.iteritems():
print cformat(' - %{white!}{}%{reset}: {} ({})').format(name, backend.title, backend.description)
|
python
|
{
"resource": ""
}
|
q14928
|
agents
|
train
|
def agents():
"""Lists the currently active agents"""
print 'The following LiveSync agents are active:'
agent_list = LiveSyncAgent.find().order_by(LiveSyncAgent.backend_name, db.func.lower(LiveSyncAgent.name)).all()
table_data = [['ID', 'Name', 'Backend', 'Initial Export', 'Queue']]
for agent in agent_list:
initial = (cformat('%{green!}done%{reset}') if agent.initial_data_exported else
cformat('%{yellow!}pending%{reset}'))
if agent.backend is None:
backend_title = cformat('%{red!}invalid backend ({})%{reset}').format(agent.backend_name)
else:
backend_title = agent.backend.title
table_data.append([unicode(agent.id), agent.name, backend_title, initial,
unicode(agent.queue.filter_by(processed=False).count())])
table = AsciiTable(table_data)
table.justify_columns[4] = 'right'
print table.table
if not all(a.initial_data_exported for a in agent_list):
print
print "You need to perform the initial data export for some agents."
print cformat("To do so, run "
"%{yellow!}indico livesync initial_export %{reset}%{yellow}<agent_id>%{reset} for those agents.")
|
python
|
{
"resource": ""
}
|
q14929
|
initial_export
|
train
|
def initial_export(agent_id, force):
"""Performs the initial data export for an agent"""
agent = LiveSyncAgent.find_first(id=agent_id)
if agent is None:
print 'No such agent'
return
if agent.backend is None:
print cformat('Cannot run agent %{red!}{}%{reset} (backend not found)').format(agent.name)
return
print cformat('Selected agent: %{white!}{}%{reset} ({})').format(agent.name, agent.backend.title)
if agent.initial_data_exported and not force:
print 'The initial export has already been performed for this agent.'
print cformat('To re-run it, use %{yellow!}--force%{reset}')
return
agent.create_backend().run_initial_export(Event.find(is_deleted=False))
agent.initial_data_exported = True
db.session.commit()
|
python
|
{
"resource": ""
}
|
q14930
|
run
|
train
|
def run(agent_id, force=False):
"""Runs the livesync agent"""
if agent_id is None:
agent_list = LiveSyncAgent.find_all()
else:
agent = LiveSyncAgent.find_first(id=agent_id)
if agent is None:
print 'No such agent'
return
agent_list = [agent]
for agent in agent_list:
if agent.backend is None:
print cformat('Skipping agent: %{red!}{}%{reset} (backend not found)').format(agent.name)
continue
if not agent.initial_data_exported and not force:
print cformat('Skipping agent: %{red!}{}%{reset} (initial export not performed)').format(agent.name)
continue
print cformat('Running agent: %{white!}{}%{reset}').format(agent.name)
try:
agent.create_backend().run()
db.session.commit()
except:
db.session.rollback()
raise
|
python
|
{
"resource": ""
}
|
q14931
|
Task._serialize_inputs
|
train
|
def _serialize_inputs(inputs):
"""Serialize task input dictionary"""
serialized_inputs = {}
for input_id, input_value in inputs.items():
if isinstance(input_value, list):
serialized_list = Task._serialize_input_list(input_value)
serialized_inputs[input_id] = serialized_list
else:
if isinstance(input_value, File):
input_value = Task._to_api_file_format(input_value)
serialized_inputs[input_id] = input_value
return serialized_inputs
|
python
|
{
"resource": ""
}
|
q14932
|
Task._serialize_input_list
|
train
|
def _serialize_input_list(input_value):
"""Recursively serialize task input list"""
input_list = []
for item in input_value:
if isinstance(item, list):
input_list.append(Task._serialize_input_list(item))
else:
if isinstance(item, File):
item = Task._to_api_file_format(item)
input_list.append(item)
return input_list
|
python
|
{
"resource": ""
}
|
q14933
|
PiwikQueryReportEventGraphBase.get_result
|
train
|
def get_result(self):
"""Perform the call and return the graph data
:return: Encoded PNG graph data string to be inserted in a `src`
atribute of a HTML img tag.
"""
png = self.call()
if png is None:
return
if png.startswith('GD extension must be loaded'):
current_plugin.logger.warning('Piwik server answered on ImageGraph.get: %s', png)
return
return 'data:image/png;base64,{}'.format(b64encode(png))
|
python
|
{
"resource": ""
}
|
q14934
|
Chatroom.server
|
train
|
def server(self):
"""The server name of the chatroom.
Usually the default one unless a custom one is set.
"""
from indico_chat.plugin import ChatPlugin
return self.custom_server or ChatPlugin.settings.get('muc_server')
|
python
|
{
"resource": ""
}
|
q14935
|
ChatroomEventAssociation.find_for_event
|
train
|
def find_for_event(cls, event, include_hidden=False, **kwargs):
"""Returns a Query that retrieves the chatrooms for an event
:param event: an indico event (with a numeric ID)
:param include_hidden: if hidden chatrooms should be included, too
:param kwargs: extra kwargs to pass to ``find()``
"""
query = cls.find(event_id=event.id, **kwargs)
if not include_hidden:
query = query.filter(~cls.hidden)
return query
|
python
|
{
"resource": ""
}
|
q14936
|
ChatroomEventAssociation.delete
|
train
|
def delete(self, reason=''):
"""Deletes the event chatroom and if necessary the chatroom, too.
:param reason: reason for the deletion
:return: True if the associated chatroom was also
deleted, otherwise False
"""
db.session.delete(self)
db.session.flush()
if not self.chatroom.events:
db.session.delete(self.chatroom)
db.session.flush()
delete_room(self.chatroom, reason)
return True
return False
|
python
|
{
"resource": ""
}
|
q14937
|
obj_deref
|
train
|
def obj_deref(ref):
"""Returns the object identified by `ref`"""
from indico_livesync.models.queue import EntryType
if ref['type'] == EntryType.category:
return Category.get_one(ref['category_id'])
elif ref['type'] == EntryType.event:
return Event.get_one(ref['event_id'])
elif ref['type'] == EntryType.session:
return Session.get_one(ref['session_id'])
elif ref['type'] == EntryType.contribution:
return Contribution.get_one(ref['contrib_id'])
elif ref['type'] == EntryType.subcontribution:
return SubContribution.get_one(ref['subcontrib_id'])
else:
raise ValueError('Unexpected object type: {}'.format(ref['type']))
|
python
|
{
"resource": ""
}
|
q14938
|
clean_old_entries
|
train
|
def clean_old_entries():
"""Deletes obsolete entries from the queues"""
from indico_livesync.plugin import LiveSyncPlugin
from indico_livesync.models.queue import LiveSyncQueueEntry
queue_entry_ttl = LiveSyncPlugin.settings.get('queue_entry_ttl')
if not queue_entry_ttl:
return
expire_threshold = now_utc() - timedelta(days=queue_entry_ttl)
LiveSyncQueueEntry.find(LiveSyncQueueEntry.processed,
LiveSyncQueueEntry.timestamp < expire_threshold).delete(synchronize_session='fetch')
|
python
|
{
"resource": ""
}
|
q14939
|
get_excluded_categories
|
train
|
def get_excluded_categories():
"""Get excluded category IDs."""
from indico_livesync.plugin import LiveSyncPlugin
return {int(x['id']) for x in LiveSyncPlugin.settings.get('excluded_categories')}
|
python
|
{
"resource": ""
}
|
q14940
|
compound_id
|
train
|
def compound_id(obj):
"""Generate a hierarchical compound ID, separated by dots."""
if isinstance(obj, (Category, Session)):
raise TypeError('Compound IDs are not supported for this entry type')
elif isinstance(obj, Event):
return unicode(obj.id)
elif isinstance(obj, Contribution):
return '{}.{}'.format(obj.event_id, obj.id)
elif isinstance(obj, SubContribution):
return '{}.{}.{}'.format(obj.contribution.event_id, obj.contribution_id, obj.id)
|
python
|
{
"resource": ""
}
|
q14941
|
track_download_request
|
train
|
def track_download_request(download_url, download_title):
"""Track a download in Piwik"""
from indico_piwik.plugin import PiwikPlugin
if not download_url:
raise ValueError("download_url can't be empty")
if not download_title:
raise ValueError("download_title can't be empty")
request = PiwikRequest(server_url=PiwikPlugin.settings.get('server_api_url'),
site_id=PiwikPlugin.settings.get('site_id_events'),
api_token=PiwikPlugin.settings.get('server_token'),
query_script=PiwikPlugin.track_script)
action_url = quote(download_url)
dt = datetime.now()
request.call(idsite=request.site_id,
rec=1,
action_name=quote(download_title.encode('utf-8')),
url=action_url,
download=action_url,
h=dt.hour, m=dt.minute, s=dt.second)
|
python
|
{
"resource": ""
}
|
q14942
|
LiveSyncAgent.backend
|
train
|
def backend(self):
"""Returns the backend class"""
from indico_livesync.plugin import LiveSyncPlugin
return LiveSyncPlugin.instance.backend_classes.get(self.backend_name)
|
python
|
{
"resource": ""
}
|
q14943
|
notify_created
|
train
|
def notify_created(room, event, user):
"""Notifies about the creation of a chatroom.
:param room: the chatroom
:param event: the event
:param user: the user performing the action
"""
tpl = get_plugin_template_module('emails/created.txt', chatroom=room, event=event, user=user)
_send(event, tpl)
|
python
|
{
"resource": ""
}
|
q14944
|
notify_attached
|
train
|
def notify_attached(room, event, user):
"""Notifies about an existing chatroom being attached to an event.
:param room: the chatroom
:param event: the event
:param user: the user performing the action
"""
tpl = get_plugin_template_module('emails/attached.txt', chatroom=room, event=event, user=user)
_send(event, tpl)
|
python
|
{
"resource": ""
}
|
q14945
|
notify_modified
|
train
|
def notify_modified(room, event, user):
"""Notifies about the modification of a chatroom.
:param room: the chatroom
:param event: the event
:param user: the user performing the action
"""
tpl = get_plugin_template_module('emails/modified.txt', chatroom=room, event=event, user=user)
_send(event, tpl)
|
python
|
{
"resource": ""
}
|
q14946
|
notify_deleted
|
train
|
def notify_deleted(room, event, user, room_deleted):
"""Notifies about the deletion of a chatroom.
:param room: the chatroom
:param event: the event
:param user: the user performing the action; `None` if due to event deletion
:param room_deleted: if the room has been deleted from the jabber server
"""
tpl = get_plugin_template_module('emails/deleted.txt', chatroom=room, event=event, user=user,
room_deleted=room_deleted)
_send(event, tpl)
|
python
|
{
"resource": ""
}
|
q14947
|
UPartedFile.get_parts
|
train
|
def get_parts(self):
"""
Partitions the file and saves the parts to be uploaded
in memory.
"""
parts = []
start_byte = 0
for i in range(1, self.total + 1):
end_byte = start_byte + self.part_size
if end_byte >= self.file_size - 1:
end_byte = self.file_size
parts.append({
'part': i,
'offset': start_byte,
'limit': end_byte
})
start_byte = end_byte
return parts
|
python
|
{
"resource": ""
}
|
q14948
|
Upload._verify_part_number
|
train
|
def _verify_part_number(self):
"""
Verifies that the total number of parts is smaller then 10^5 which
is the maximum number of parts.
"""
total = int(math.ceil(self._file_size / self._part_size))
if total > PartSize.MAXIMUM_TOTAL_PARTS:
self._status = TransferState.FAILED
raise SbgError(
'Total parts = {}. Maximum number of parts is {}'.format(
total, PartSize.MAXIMUM_TOTAL_PARTS)
)
|
python
|
{
"resource": ""
}
|
q14949
|
Upload._verify_part_size
|
train
|
def _verify_part_size(self):
"""
Verifies that the part size is smaller then the maximum part size
which is 5GB.
"""
if self._part_size > PartSize.MAXIMUM_UPLOAD_SIZE:
self._status = TransferState.FAILED
raise SbgError('Part size = {}b. Maximum part size is {}b'.format(
self._part_size, PartSize.MAXIMUM_UPLOAD_SIZE)
)
|
python
|
{
"resource": ""
}
|
q14950
|
Upload._verify_file_size
|
train
|
def _verify_file_size(self):
"""
Verifies that the file is smaller then 5TB which is the maximum
that is allowed for upload.
"""
if self._file_size > PartSize.MAXIMUM_OBJECT_SIZE:
self._status = TransferState.FAILED
raise SbgError('File size = {}b. Maximum file size is {}b'.format(
self._file_size, PartSize.MAXIMUM_OBJECT_SIZE)
)
|
python
|
{
"resource": ""
}
|
q14951
|
Upload._initialize_upload
|
train
|
def _initialize_upload(self):
"""
Initialized the upload on the API server by submitting the information
about the project, the file name, file size and the part size that is
going to be used during multipart upload.
"""
init_data = {
'name': self._file_name,
'part_size': self._part_size,
'size': self._file_size
}
if self._project:
init_data['project'] = self._project
elif self._parent:
init_data['parent'] = self._parent
init_params = {}
if self._overwrite:
init_params['overwrite'] = self._overwrite
try:
response = self._api.post(
self._URL['upload_init'], data=init_data, params=init_params
)
self._upload_id = response.json()['upload_id']
except SbgError as e:
self._status = TransferState.FAILED
raise SbgError(
'Unable to initialize upload! Failed to get upload id! '
'Reason: {}'.format(e.message)
)
|
python
|
{
"resource": ""
}
|
q14952
|
Upload._finalize_upload
|
train
|
def _finalize_upload(self):
"""
Finalizes the upload on the API server.
"""
from sevenbridges.models.file import File
try:
response = self._api.post(
self._URL['upload_complete'].format(upload_id=self._upload_id)
).json()
self._result = File(api=self._api, **response)
self._status = TransferState.COMPLETED
except SbgError as e:
self._status = TransferState.FAILED
raise SbgError(
'Failed to complete upload! Reason: {}'.format(e.message)
)
|
python
|
{
"resource": ""
}
|
q14953
|
Upload._abort_upload
|
train
|
def _abort_upload(self):
"""
Aborts the upload on the API server.
"""
try:
self._api.delete(
self._URL['upload_info'].format(upload_id=self._upload_id)
)
except SbgError as e:
self._status = TransferState.FAILED
raise SbgError(
'Failed to abort upload! Reason: {}'.format(e.message)
)
|
python
|
{
"resource": ""
}
|
q14954
|
Upload.add_callback
|
train
|
def add_callback(self, callback=None, errorback=None):
"""
Adds a callback that will be called when the upload
finishes successfully or when error is raised.
"""
self._callback = callback
self._errorback = errorback
|
python
|
{
"resource": ""
}
|
q14955
|
process_records
|
train
|
def process_records(records):
"""Converts queue entries into object changes.
:param records: an iterable containing `LiveSyncQueueEntry` objects
:return: a dict mapping object references to `SimpleChange` bitsets
"""
changes = defaultdict(int)
cascaded_update_records = set()
cascaded_delete_records = set()
for record in records:
if record.change != ChangeType.deleted and record.object is None:
# Skip entries which are not deletions but have no corresponding objects.
# Probably they are updates for objects that got deleted afterwards.
continue
if record.change == ChangeType.created:
assert record.type != EntryType.category
changes[record.object] |= SimpleChange.created
elif record.change == ChangeType.deleted:
assert record.type != EntryType.category
cascaded_delete_records.add(record)
elif record.change in {ChangeType.moved, ChangeType.protection_changed}:
cascaded_update_records.add(record)
elif record.change == ChangeType.data_changed:
assert record.type != EntryType.category
changes[record.object] |= SimpleChange.updated
for obj in _process_cascaded_category_contents(cascaded_update_records):
changes[obj] |= SimpleChange.updated
for obj in _process_cascaded_event_contents(cascaded_delete_records):
changes[obj] |= SimpleChange.deleted
return changes
|
python
|
{
"resource": ""
}
|
q14956
|
_process_cascaded_category_contents
|
train
|
def _process_cascaded_category_contents(records):
"""
Travel from categories to subcontributions, flattening the whole event structure.
Yields everything that it finds (except for elements whose protection has changed
but are not inheriting their protection settings from anywhere).
:param records: queue records to process
"""
category_prot_records = {rec.category_id for rec in records if rec.type == EntryType.category
and rec.change == ChangeType.protection_changed}
category_move_records = {rec.category_id for rec in records if rec.type == EntryType.category
and rec.change == ChangeType.moved}
changed_events = set()
category_prot_records -= category_move_records # A move already implies sending the whole record
# Protection changes are handled differently, as there may not be the need to re-generate the record
if category_prot_records:
for categ in Category.find(Category.id.in_(category_prot_records)):
cte = categ.get_protection_parent_cte()
# Update only children that inherit
inheriting_categ_children = (Event.query
.join(cte, db.and_((Event.category_id == cte.c.id),
(cte.c.protection_parent == categ.id))))
inheriting_direct_children = Event.find((Event.category_id == categ.id) & Event.is_inheriting)
changed_events.update(itertools.chain(inheriting_direct_children, inheriting_categ_children))
# Add move operations and explicitly-passed event records
if category_move_records:
changed_events.update(Event.find(Event.category_chain_overlaps(category_move_records)))
for elem in _process_cascaded_event_contents(records, additional_events=changed_events):
yield elem
|
python
|
{
"resource": ""
}
|
q14957
|
VidyoPlugin.create_room
|
train
|
def create_room(self, vc_room, event):
"""Create a new Vidyo room for an event, given a VC room.
In order to create the Vidyo room, the function will try to do so with
all the available identities of the user based on the authenticators
defined in Vidyo plugin's settings, in that order.
:param vc_room: VCRoom -- The VC room from which to create the Vidyo
room
:param event: Event -- The event to the Vidyo room will be attached
"""
client = AdminClient(self.settings)
owner = retrieve_principal(vc_room.data['owner'])
login_gen = iter_user_identities(owner)
login = next(login_gen, None)
if login is None:
raise VCRoomError(_("No valid Vidyo account found for this user"), field='owner_user')
extension_gen = iter_extensions(self.settings.get('indico_room_prefix'), event.id)
extension = next(extension_gen)
while True:
room_mode = {
'isLocked': False,
'hasPIN': bool(vc_room.data['room_pin']),
'hasModeratorPIN': bool(vc_room.data['moderation_pin'])
}
if room_mode['hasPIN']:
room_mode['roomPIN'] = vc_room.data['room_pin']
if room_mode['hasModeratorPIN']:
room_mode['moderatorPIN'] = vc_room.data['moderation_pin']
room_obj = client.create_room_object(
name=vc_room.name,
RoomType='Public',
ownerName=login,
extension=extension,
groupName=self.settings.get('room_group_name'),
description=vc_room.data['description'],
RoomMode=room_mode)
if room_obj.RoomMode.hasPIN:
room_obj.RoomMode.roomPIN = vc_room.data['room_pin']
if room_obj.RoomMode.hasModeratorPIN:
room_obj.RoomMode.moderatorPIN = vc_room.data['moderation_pin']
try:
client.add_room(room_obj)
except APIException as err:
err_msg = err.message
if err_msg.startswith('Room exist for name'):
raise VCRoomError(_("Room name already in use"), field='name')
elif err_msg.startswith('Member not found for ownerName'):
login = next(login_gen, None)
if login is None:
raise VCRoomError(_("No valid Vidyo account found for this user"), field='owner_user')
elif err_msg.startswith('Room exist for extension'):
extension = next(extension_gen)
else:
raise
else:
# get room back, in order to fetch Vidyo-set parameters
created_room = client.find_room(extension)
if not created_room:
raise VCRoomNotFoundError(_("Could not find newly created room in Vidyo"))
vc_room.data.update({
'vidyo_id': unicode(created_room.roomID),
'url': created_room.RoomMode.roomURL,
'owner_identity': created_room.ownerName
})
flag_modified(vc_room, 'data')
vc_room.vidyo_extension = VidyoExtension(vc_room_id=vc_room.id, extension=int(created_room.extension),
owned_by_user=owner)
client.set_automute(created_room.roomID, vc_room.data['auto_mute'])
break
|
python
|
{
"resource": ""
}
|
q14958
|
LiveSyncQueueEntry.object
|
train
|
def object(self):
"""Return the changed object."""
if self.type == EntryType.category:
return self.category
elif self.type == EntryType.event:
return self.event
elif self.type == EntryType.session:
return self.session
elif self.type == EntryType.contribution:
return self.contribution
elif self.type == EntryType.subcontribution:
return self.subcontribution
|
python
|
{
"resource": ""
}
|
q14959
|
LiveSyncQueueEntry.object_ref
|
train
|
def object_ref(self):
"""Return the reference of the changed object."""
return ImmutableDict(type=self.type, category_id=self.category_id, event_id=self.event_id,
session_id=self.session_id, contrib_id=self.contrib_id, subcontrib_id=self.subcontrib_id)
|
python
|
{
"resource": ""
}
|
q14960
|
LiveSyncQueueEntry.create
|
train
|
def create(cls, changes, ref, excluded_categories=set()):
"""Create a new change in all queues.
:param changes: the change types, an iterable containing
:class:`ChangeType`
:param ref: the object reference (returned by `obj_ref`)
of the changed object
:param excluded_categories: set of categories (IDs) whose items
will not be tracked
"""
ref = dict(ref)
obj = obj_deref(ref)
if isinstance(obj, Category):
if any(c.id in excluded_categories for c in obj.chain_query):
return
else:
event = obj if isinstance(obj, Event) else obj.event
if event.category not in g.setdefault('livesync_excluded_categories_checked', {}):
g.livesync_excluded_categories_checked[event.category] = excluded_categories & set(event.category_chain)
if g.livesync_excluded_categories_checked[event.category]:
return
try:
agents = g.livesync_agents
except AttributeError:
agents = g.livesync_agents = LiveSyncAgent.query.all()
for change in changes:
for agent in agents:
entry = cls(agent=agent, change=change, **ref)
db.session.add(entry)
db.session.flush()
|
python
|
{
"resource": ""
}
|
q14961
|
decompose_code
|
train
|
def decompose_code(code):
"""
Decomposes a MARC "code" into tag, ind1, ind2, subcode
"""
code = "%-6s" % code
ind1 = code[3:4]
if ind1 == " ": ind1 = "_"
ind2 = code[4:5]
if ind2 == " ": ind2 = "_"
subcode = code[5:6]
if subcode == " ": subcode = None
return (code[0:3], ind1, ind2, subcode)
|
python
|
{
"resource": ""
}
|
q14962
|
InvenioConnector._init_browser
|
train
|
def _init_browser(self):
"""
Ovveride this method with the appropriate way to prepare a logged in
browser.
"""
self.browser = mechanize.Browser()
self.browser.set_handle_robots(False)
self.browser.open(self.server_url + "/youraccount/login")
self.browser.select_form(nr=0)
try:
self.browser['nickname'] = self.user
self.browser['password'] = self.password
except:
self.browser['p_un'] = self.user
self.browser['p_pw'] = self.password
# Set login_method to be writable
self.browser.form.find_control('login_method').readonly = False
self.browser['login_method'] = self.login_method
self.browser.submit()
|
python
|
{
"resource": ""
}
|
q14963
|
InvenioConnector.get_record
|
train
|
def get_record(self, recid, read_cache=True):
"""
Returns the record with given recid
"""
if recid in self.cached_records or not read_cache:
return self.cached_records[recid]
else:
return self.search(p="recid:" + str(recid))
|
python
|
{
"resource": ""
}
|
q14964
|
InvenioConnector.upload_marcxml
|
train
|
def upload_marcxml(self, marcxml, mode):
"""
Uploads a record to the server
Parameters:
marcxml - *str* the XML to upload.
mode - *str* the mode to use for the upload.
"-i" insert new records
"-r" replace existing records
"-c" correct fields of records
"-a" append fields to records
"-ir" insert record or replace if it exists
"""
if mode not in ["-i", "-r", "-c", "-a", "-ir"]:
raise NameError, "Incorrect mode " + str(mode)
# Are we running locally? If so, submit directly
if self.local:
(code, marcxml_filepath) = tempfile.mkstemp(prefix="upload_%s" % \
time.strftime("%Y%m%d_%H%M%S_",
time.localtime()))
marcxml_file_d = os.fdopen(code, "w")
marcxml_file_d.write(marcxml)
marcxml_file_d.close()
return task_low_level_submission("bibupload", "", mode, marcxml_filepath)
else:
params = urllib.urlencode({'file': marcxml,
'mode': mode})
## We don't use self.browser as batchuploader is protected by IP
opener = urllib2.build_opener()
opener.addheaders = [('User-Agent', CFG_USER_AGENT)]
return opener.open(self.server_url + "/batchuploader/robotupload", params,)
|
python
|
{
"resource": ""
}
|
q14965
|
InvenioConnector._validate_server_url
|
train
|
def _validate_server_url(self):
"""Validates self.server_url"""
try:
request = requests.head(self.server_url)
if request.status_code >= 400:
raise InvenioConnectorServerError(
"Unexpected status code '%d' accessing URL: %s"
% (request.status_code, self.server_url))
except (InvalidSchema, MissingSchema) as err:
raise InvenioConnectorServerError(
"Bad schema, expecting http:// or https://:\n %s" % (err,))
except ConnectionError as err:
raise InvenioConnectorServerError(
"Couldn't establish connection to '%s':\n %s"
% (self.server_url, err))
except InvalidURL as err:
raise InvenioConnectorServerError(
"Invalid URL '%s':\n %s"
% (self.server_url, err))
except RequestException as err:
raise InvenioConnectorServerError(
"Unknown error connecting to '%s':\n %s"
% (self.server_url, err))
|
python
|
{
"resource": ""
}
|
q14966
|
PiwikQueryReportEventMetricReferrers.get_result
|
train
|
def get_result(self):
"""Perform the call and return a list of referrers"""
result = get_json_from_remote_server(self.call)
referrers = list(result)
for referrer in referrers:
referrer['sum_visit_length'] = stringify_seconds(referrer['sum_visit_length'])
return sorted(referrers, key=itemgetter('nb_visits'), reverse=True)[0:10]
|
python
|
{
"resource": ""
}
|
q14967
|
PiwikQueryReportEventMetricPeakDateAndVisitors.get_result
|
train
|
def get_result(self):
"""Perform the call and return the peak date and how many users"""
result = get_json_from_remote_server(self.call)
if result:
date, value = max(result.iteritems(), key=itemgetter(1))
return {'date': date, 'users': value}
else:
return {'date': "No Data", 'users': 0}
|
python
|
{
"resource": ""
}
|
q14968
|
PiwikRequest.call
|
train
|
def call(self, default_response=None, **query_params):
"""Perform a query to the Piwik server and return the response.
:param default_response: Return value in case the query fails
:param query_params: Dictionary with the parameters of the query
"""
query_url = self.get_query_url(**query_params)
return self._perform_call(query_url, default_response)
|
python
|
{
"resource": ""
}
|
q14969
|
PiwikRequest.get_query
|
train
|
def get_query(self, query_params=None):
"""Return a query string"""
if query_params is None:
query_params = {}
query = ''
query_params['idSite'] = self.site_id
if self.api_token is not None:
query_params['token_auth'] = self.api_token
for key, value in query_params.iteritems():
if isinstance(value, list):
value = ','.join(value)
query += '{}={}&'.format(str(key), str(value))
return query[:-1]
|
python
|
{
"resource": ""
}
|
q14970
|
PiwikRequest._perform_call
|
train
|
def _perform_call(self, query_url, default_response=None, timeout=10):
"""Returns the raw results from the API"""
try:
response = requests.get(query_url, timeout=timeout)
except socket.timeout:
current_plugin.logger.warning("Timeout contacting Piwik server")
return default_response
except Exception:
current_plugin.logger.exception("Unable to connect")
return default_response
return response.content
|
python
|
{
"resource": ""
}
|
q14971
|
Uploader.run
|
train
|
def run(self, records):
"""Runs the batch upload
:param records: an iterable containing queue entries
"""
self_name = type(self).__name__
for i, batch in enumerate(grouper(records, self.BATCH_SIZE, skip_missing=True), 1):
self.logger.info('%s processing batch %d', self_name, i)
try:
for j, proc_batch in enumerate(grouper(
process_records(batch).iteritems(), self.BATCH_SIZE, skip_missing=True), 1):
self.logger.info('%s uploading chunk #%d (batch %d)', self_name, j, i)
self.upload_records({k: v for k, v in proc_batch}, from_queue=True)
except Exception:
self.logger.exception('%s could not upload batch', self_name)
return
self.logger.info('%s finished batch %d', self_name, i)
self.processed_records(batch)
self.logger.info('%s finished', self_name)
|
python
|
{
"resource": ""
}
|
q14972
|
Uploader.run_initial
|
train
|
def run_initial(self, events):
"""Runs the initial batch upload
:param events: an iterable containing events
"""
self_name = type(self).__name__
for i, batch in enumerate(grouper(events, self.INITIAL_BATCH_SIZE, skip_missing=True), 1):
self.logger.debug('%s processing initial batch %d', self_name, i)
for j, processed_batch in enumerate(grouper(
batch, self.BATCH_SIZE, skip_missing=True), 1):
self.logger.info('%s uploading initial chunk #%d (batch %d)', self_name, j, i)
self.upload_records(processed_batch, from_queue=False)
|
python
|
{
"resource": ""
}
|
q14973
|
Uploader.processed_records
|
train
|
def processed_records(self, records):
"""Executed after successfully uploading a batch of records from the queue.
:param records: a list of queue entries
"""
for record in records:
self.logger.debug('Marking as processed: %s', record)
record.processed = True
db.session.commit()
|
python
|
{
"resource": ""
}
|
q14974
|
cli
|
train
|
def cli():
"""Migrate data to S3.
Use the `copy` subcommand to copy data to S3. This can be done
safely while Indico is running. At the end it will show you what
you need to add to your `indico.conf`.
Once you updated your config with the new storage backends, you
can use the `apply` subcommand to update your database so files
will actually be loaded using the new S3 storage backends.
In case you ever need to switch back to your previous storage,
you can use `revert` to undo the database changes.
"""
if config.DB_LOG:
click.secho('Warning: The database logger is currently enabled (DB_LOG = True).\n'
'This will slow down the migration. Unless you database is very small, please disable it.',
fg='yellow')
click.confirm('Continue anyway?', abort=True)
|
python
|
{
"resource": ""
}
|
q14975
|
DeconzSession.start
|
train
|
def start(self) -> None:
"""Connect websocket to deCONZ."""
if self.config:
self.websocket = self.ws_client(
self.loop, self.session, self.host,
self.config.websocketport, self.async_session_handler)
self.websocket.start()
else:
_LOGGER.error('No deCONZ config available')
|
python
|
{
"resource": ""
}
|
q14976
|
DeconzSession.async_load_parameters
|
train
|
async def async_load_parameters(self) -> bool:
"""Load deCONZ parameters.
Returns lists of indices of which devices was added.
"""
data = await self.async_get_state('')
_LOGGER.debug(pformat(data))
config = data.get('config', {})
groups = data.get('groups', {})
lights = data.get('lights', {})
sensors = data.get('sensors', {})
if not self.config:
self.config = DeconzConfig(config)
# Update scene for existing groups
for group_id, group in groups.items():
if group_id in self.groups:
self.groups[group_id].async_add_scenes(
group.get('scenes'), self.async_put_state)
self.groups.update({
group_id: DeconzGroup(group_id, group, self.async_put_state)
for group_id, group in groups.items()
if group_id not in self.groups
})
self.lights.update({
light_id: DeconzLight(light_id, light, self.async_put_state)
for light_id, light in lights.items()
if light_id not in self.lights
})
self.update_group_color(self.lights.keys())
self.scenes.update({
group.id + '_' + scene.id: scene
for group in self.groups.values()
for scene in group.scenes.values()
if group.id + '_' + scene.id not in self.scenes
})
self.sensors.update({
sensor_id: create_sensor(sensor_id, sensor, self.async_put_state)
for sensor_id, sensor in sensors.items()
if supported_sensor(sensor) and sensor_id not in self.sensors
})
|
python
|
{
"resource": ""
}
|
q14977
|
DeconzSession.async_put_state
|
train
|
async def async_put_state(self, field: str, data: dict) -> dict:
"""Set state of object in deCONZ.
Field is a string representing a specific device in deCONZ
e.g. field='/lights/1/state'.
Data is a json object with what data you want to alter
e.g. data={'on': True}.
See Dresden Elektroniks REST API documentation for details:
http://dresden-elektronik.github.io/deconz-rest-doc/rest/
"""
session = self.session.put
url = self.api_url + field
jsondata = json.dumps(data)
response_dict = await async_request(session, url, data=jsondata)
return response_dict
|
python
|
{
"resource": ""
}
|
q14978
|
DeconzSession.async_get_state
|
train
|
async def async_get_state(self, field: str) -> dict:
"""Get state of object in deCONZ.
Field is a string representing an API endpoint or lower
e.g. field='/lights'.
See Dresden Elektroniks REST API documentation for details:
http://dresden-elektronik.github.io/deconz-rest-doc/rest/
"""
session = self.session.get
url = self.api_url + field
response_dict = await async_request(session, url)
return response_dict
|
python
|
{
"resource": ""
}
|
q14979
|
DeconzSession.async_session_handler
|
train
|
def async_session_handler(self, signal: str) -> None:
"""Signalling from websocket.
data - new data available for processing.
state - network state has changed.
"""
if signal == 'data':
self.async_event_handler(self.websocket.data)
elif signal == 'state':
if self.async_connection_status_callback:
self.async_connection_status_callback(
self.websocket.state == 'running')
|
python
|
{
"resource": ""
}
|
q14980
|
DeconzSession.async_event_handler
|
train
|
def async_event_handler(self, event: dict) -> None:
"""Receive event from websocket and identifies where the event belong.
{
"t": "event",
"e": "changed",
"r": "sensors",
"id": "12",
"state": { "buttonevent": 2002 }
}
"""
if event['e'] == 'added':
if event['r'] == 'lights' and event['id'] not in self.lights:
device_type = 'light'
device = self.lights[event['id']] = DeconzLight(
event['id'], event['light'], self.async_put_state)
elif event['r'] == 'sensors' and event['id'] not in self.sensors:
if supported_sensor(event['sensor']):
device_type = 'sensor'
device = self.sensors[event['id']] = create_sensor(
event['id'], event['sensor'], self.async_put_state)
else:
_LOGGER.warning('Unsupported sensor %s', event)
return
else:
_LOGGER.debug('Unsupported event %s', event)
return
if self.async_add_device_callback:
self.async_add_device_callback(device_type, device)
elif event['e'] == 'changed':
if event['r'] == 'groups' and event['id'] in self.groups:
self.groups[event['id']].async_update(event)
elif event['r'] == 'lights' and event['id'] in self.lights:
self.lights[event['id']].async_update(event)
self.update_group_color([event['id']])
elif event['r'] == 'sensors' and event['id'] in self.sensors:
self.sensors[event['id']].async_update(event)
else:
_LOGGER.debug('Unsupported event %s', event)
elif event['e'] == 'deleted':
_LOGGER.debug('Removed event %s', event)
else:
_LOGGER.debug('Unsupported event %s', event)
|
python
|
{
"resource": ""
}
|
q14981
|
DeconzSession.update_group_color
|
train
|
def update_group_color(self, lights: list) -> None:
"""Update group colors based on light states.
deCONZ group updates don't contain any information about the current
state of the lights in the group. This method updates the color
properties of the group to the current color of the lights in the
group.
For groups where the lights have different colors the group color will
only reflect the color of the latest changed light in the group.
"""
for group in self.groups.values():
# Skip group if there are no common light ids.
if not any({*lights} & {*group.lights}):
continue
# More than one light means load_parameters called this method.
# Then we take first best light to be available.
light_ids = lights
if len(light_ids) > 1:
light_ids = group.lights
for light_id in light_ids:
if self.lights[light_id].reachable:
group.update_color_state(self.lights[light_id])
break
|
python
|
{
"resource": ""
}
|
q14982
|
async_get_api_key
|
train
|
async def async_get_api_key(session, host, port, username=None, password=None, **kwargs):
"""Get a new API key for devicetype."""
url = 'http://{host}:{port}/api'.format(host=host, port=str(port))
auth = None
if username and password:
auth = aiohttp.BasicAuth(username, password=password)
data = b'{"devicetype": "pydeconz"}'
response = await async_request(session.post, url, auth=auth, data=data)
api_key = response[0]['success']['username']
_LOGGER.info("API key: %s", api_key)
return api_key
|
python
|
{
"resource": ""
}
|
q14983
|
async_delete_api_key
|
train
|
async def async_delete_api_key(session, host, port, api_key):
"""Delete API key from deCONZ."""
url = 'http://{host}:{port}/api/{api_key}/config/whitelist/{api_key}'.format(
host=host, port=str(port), api_key=api_key)
response = await async_request(session.delete, url)
_LOGGER.info(response)
|
python
|
{
"resource": ""
}
|
q14984
|
async_delete_all_keys
|
train
|
async def async_delete_all_keys(session, host, port, api_key, api_keys=[]):
"""Delete all API keys except for the ones provided to the method."""
url = 'http://{}:{}/api/{}/config'.format(host, str(port), api_key)
response = await async_request(session.get, url)
api_keys.append(api_key)
for key in response['whitelist'].keys():
if key not in api_keys:
await async_delete_api_key(session, host, port, key)
|
python
|
{
"resource": ""
}
|
q14985
|
async_get_bridgeid
|
train
|
async def async_get_bridgeid(session, host, port, api_key, **kwargs):
"""Get bridge id for bridge."""
url = 'http://{}:{}/api/{}/config'.format(host, str(port), api_key)
response = await async_request(session.get, url)
bridgeid = response['bridgeid']
_LOGGER.info("Bridge id: %s", bridgeid)
return bridgeid
|
python
|
{
"resource": ""
}
|
q14986
|
async_discovery
|
train
|
async def async_discovery(session):
"""Find bridges allowing gateway discovery."""
bridges = []
response = await async_request(session.get, URL_DISCOVER)
if not response:
_LOGGER.info("No discoverable bridges available.")
return bridges
for bridge in response:
bridges.append({'bridgeid': bridge['id'],
'host': bridge['internalipaddress'],
'port': bridge['internalport']})
_LOGGER.info("Discovered the following bridges: %s.", bridges)
return bridges
|
python
|
{
"resource": ""
}
|
q14987
|
AIOWSClient.retry
|
train
|
def retry(self):
"""Retry to connect to deCONZ."""
self.state = STATE_STARTING
self.loop.call_later(RETRY_TIMER, self.start)
_LOGGER.debug('Reconnecting to deCONZ in %i.', RETRY_TIMER)
|
python
|
{
"resource": ""
}
|
q14988
|
WSClient.stop
|
train
|
def stop(self):
"""Close websocket connection."""
self.state = STATE_STOPPED
if self.transport:
self.transport.close()
|
python
|
{
"resource": ""
}
|
q14989
|
WSClient.connection_made
|
train
|
def connection_made(self, transport):
"""Do the websocket handshake.
According to https://tools.ietf.org/html/rfc6455
"""
randomness = os.urandom(16)
key = base64encode(randomness).decode('utf-8').strip()
self.transport = transport
message = "GET / HTTP/1.1\r\n"
message += "Host: " + self.host + ':' + str(self.port) + '\r\n'
message += "User-Agent: Python/3.5 websockets/3.4\r\n"
message += "Upgrade: Websocket\r\n"
message += "Connection: Upgrade\r\n"
message += "Sec-WebSocket-Key: " + key + "\r\n"
message += "Sec-WebSocket-Version: 13\r\n"
message += "\r\n"
_LOGGER.debug('Websocket handshake: %s', message)
self.transport.write(message.encode())
|
python
|
{
"resource": ""
}
|
q14990
|
WSClient.data_received
|
train
|
def data_received(self, data):
"""Data received over websocket.
First received data will allways be handshake accepting connection.
We need to check how big the header is so we can send event data
as a proper json object.
"""
if self.state == STATE_STARTING:
self.state = STATE_RUNNING
_LOGGER.debug('Websocket handshake: %s', data.decode())
return
_LOGGER.debug('Websocket data: %s', data)
while len(data) > 0:
payload, extra_data = self.get_payload(data)
self._data = payload ###
self.async_session_handler_callback('data')###
#self.async_callback(payload)
data = extra_data
|
python
|
{
"resource": ""
}
|
q14991
|
WSClient.get_payload
|
train
|
def get_payload(self, data):
"""Parse length of payload and return it."""
start = 2
length = ord(data[1:2])
if length == 126:
# Payload information are an extra 2 bytes.
start = 4
length, = unpack(">H", data[2:4])
elif length == 127:
# Payload information are an extra 6 bytes.
start = 8
length, = unpack(">I", data[2:6])
end = start + length
payload = json.loads(data[start:end].decode())
extra_data = data[end:]
return payload, extra_data
|
python
|
{
"resource": ""
}
|
q14992
|
notify_owner
|
train
|
def notify_owner(plugin, vc_room):
"""Notifies about the deletion of a Vidyo room from the Vidyo server."""
user = vc_room.vidyo_extension.owned_by_user
tpl = get_plugin_template_module('emails/remote_deleted.html', plugin=plugin, vc_room=vc_room, event=None,
vc_room_event=None, user=user)
_send('delete', user, plugin, None, vc_room, tpl)
|
python
|
{
"resource": ""
}
|
q14993
|
rooms
|
train
|
def rooms(status=None):
"""Lists all Vidyo rooms"""
room_query = VCRoom.find(type='vidyo')
table_data = [['ID', 'Name', 'Status', 'Vidyo ID', 'Extension']]
if status:
room_query = room_query.filter(VCRoom.status == VCRoomStatus.get(status))
for room in room_query:
table_data.append([unicode(room.id), room.name, room.status.name,
unicode(room.data['vidyo_id']), unicode(room.vidyo_extension.extension)])
table = AsciiTable(table_data)
for col in (0, 3, 4):
table.justify_columns[col] = 'right'
print table.table
|
python
|
{
"resource": ""
}
|
q14994
|
render_engine_or_search_template
|
train
|
def render_engine_or_search_template(template_name, **context):
"""Renders a template from the engine plugin or the search plugin
If the template is available in the engine plugin, it's taken
from there, otherwise the template from this plugin is used.
:param template_name: name of the template
:param context: the variables that should be available in the
context of the template.
"""
from indico_search.plugin import SearchPlugin
assert current_plugin == SearchPlugin.instance
templates = ('{}:{}'.format(SearchPlugin.instance.engine_plugin.name, template_name),
template_name)
return render_plugin_template(templates, **context)
|
python
|
{
"resource": ""
}
|
q14995
|
iter_user_identities
|
train
|
def iter_user_identities(user):
"""Iterates over all existing user identities that can be used with Vidyo"""
from indico_vc_vidyo.plugin import VidyoPlugin
providers = authenticators_re.split(VidyoPlugin.settings.get('authenticators'))
done = set()
for provider in providers:
for _, identifier in user.iter_identifiers(check_providers=True, providers={provider}):
if identifier in done:
continue
done.add(identifier)
yield identifier
|
python
|
{
"resource": ""
}
|
q14996
|
get_user_from_identifier
|
train
|
def get_user_from_identifier(settings, identifier):
"""Get an actual User object from an identifier"""
providers = list(auth.strip() for auth in settings.get('authenticators').split(','))
identities = Identity.find_all(Identity.provider.in_(providers), Identity.identifier == identifier)
if identities:
return sorted(identities, key=lambda x: providers.index(x.provider))[0].user
for provider in providers:
try:
identity_info = multipass.get_identity(provider, identifier)
except IdentityRetrievalFailed:
continue
if identity_info is None:
continue
if not identity_info.provider.settings.get('trusted_email'):
continue
emails = {email.lower() for email in identity_info.data.getlist('email') if email}
if not emails:
continue
user = User.find_first(~User.is_deleted, User.all_emails.in_(list(emails)))
if user:
return user
|
python
|
{
"resource": ""
}
|
q14997
|
update_room_from_obj
|
train
|
def update_room_from_obj(settings, vc_room, room_obj):
"""Updates a VCRoom DB object using a SOAP room object returned by the API"""
vc_room.name = room_obj.name
if room_obj.ownerName != vc_room.data['owner_identity']:
owner = get_user_from_identifier(settings, room_obj.ownerName) or User.get_system_user()
vc_room.vidyo_extension.owned_by_user = owner
vc_room.data.update({
'description': room_obj.description,
'vidyo_id': unicode(room_obj.roomID),
'url': room_obj.RoomMode.roomURL,
'owner_identity': room_obj.ownerName,
'room_pin': room_obj.RoomMode.roomPIN if room_obj.RoomMode.hasPIN else "",
'moderation_pin': room_obj.RoomMode.moderatorPIN if room_obj.RoomMode.hasModeratorPIN else "",
})
vc_room.vidyo_extension.extension = int(room_obj.extension)
|
python
|
{
"resource": ""
}
|
q14998
|
pack_ip
|
train
|
def pack_ip(ipstr):
"""Converts an ip address given in dotted notation to a four byte
string in network byte order.
>>> len(pack_ip("127.0.0.1"))
4
>>> pack_ip("foo")
Traceback (most recent call last):
...
ValueError: given ip address has an invalid number of dots
@type ipstr: str
@rtype: bytes
@raises ValueError: for badly formatted ip addresses
"""
if not isinstance(ipstr, basestring):
raise ValueError("given ip address is not a string")
parts = ipstr.split('.')
if len(parts) != 4:
raise ValueError("given ip address has an invalid number of dots")
parts = [int(x) for x in parts] # raises ValueError
return int_seq_to_bytes(parts)
|
python
|
{
"resource": ""
}
|
q14999
|
unpack_ip
|
train
|
def unpack_ip(fourbytes):
"""Converts an ip address given in a four byte string in network
byte order to a string in dotted notation.
>>> unpack_ip(b"dead")
'100.101.97.100'
>>> unpack_ip(b"alive")
Traceback (most recent call last):
...
ValueError: given buffer is not exactly four bytes long
@type fourbytes: bytes
@rtype: str
@raises ValueError: for bad input
"""
if not isinstance(fourbytes, bytes):
raise ValueError("given buffer is not a string")
if len(fourbytes) != 4:
raise ValueError("given buffer is not exactly four bytes long")
return ".".join([str(x) for x in bytes_to_int_seq(fourbytes)])
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.