code stringlengths 51 2.34k | docstring stringlengths 11 171 |
|---|---|
def compileGSUB(self):
from ufo2ft.util import compileGSUB
compiler = self.context.compiler
if compiler is not None:
if hasattr(compiler, "_gsub"):
return compiler._gsub
glyphOrder = compiler.ttFont.getGlyphOrder()
else:
glyphOrder = so... | Compile a temporary GSUB table from the current feature file. |
def cmd_init(self, *args):
if exists('buildozer.spec'):
print('ERROR: You already have a buildozer.spec file.')
exit(1)
copyfile(join(dirname(__file__), 'default.spec'), 'buildozer.spec')
print('File buildozer.spec created, ready to customize!') | Create a initial buildozer.spec in the current directory |
def can_fetch(self, url_info: URLInfo, user_agent: str):
key = self.url_info_key(url_info)
parser = self._parsers[key]
return parser.is_allowed(user_agent, url_info.url) | Return whether the URL can be fetched. |
def autosave_all(self):
for index in range(self.stack.get_stack_count()):
self.autosave(index) | Autosave all opened files. |
def display(self):
"Renders the scene once every refresh"
self.compositor.waitGetPoses(self.poses, openvr.k_unMaxTrackedDeviceCount, None, 0)
hmd_pose0 = self.poses[openvr.k_unTrackedDeviceIndex_Hmd]
if not hmd_pose0.bPoseIsValid:
return
if True:
gl... | Renders the scene once every refresh |
def generative(func):
def wrap(inst, *args, **kw):
clone = type(inst).__new__(type(inst))
clone.__dict__ = inst.__dict__.copy()
return func(clone, *args, **kw)
return update_wrapper(wrap, func) | Marks an instance method as generative. |
def push(self):
tx = Tx(self.tx_project_slug)
template = babel.messages.catalog.Catalog()
for topic in self.desk.topics():
if topic.show_in_portal:
template.add(topic.name)
template_po = StringIO()
babel.messages.pofile.write_po(template_po, template)
... | Push topics to Transifex. |
def restore_row(self, row, schema):
row = list(row)
for index, field in enumerate(schema.fields):
if self.__dialect == 'postgresql':
if field.type in ['array', 'object']:
continue
row[index] = field.cast_value(row[index])
return row | Restore row from SQL |
def update_consumer_offsets(self, partition_offsets):
self.logger.debug("Updating consumer offsets to: %s", partition_offsets)
for partition, offset in partition_offsets.items():
self.consumer.offsets[partition] = offset
self.consumer.seek(0, 1) | Update consumer offsets to explicit positions. |
def __store_processing_state(self):
steps = self.Application_Progress_Status_processing.Processing_progressBar.maximum()
value = self.Application_Progress_Status_processing.Processing_progressBar.value()
message = self.Application_Progress_Status_processing.Processing_label.text()
state ... | Stores the processing state. |
def _gen_keys_from_multicol_key(key_multicol, n_keys):
keys = [('{}{:03}of{:03}')
.format(key_multicol, i+1, n_keys) for i in range(n_keys)]
return keys | Generates single-column keys from multicolumn key. |
def cache_key(self, request, method=None):
if method is None:
method = request.method
return "bettercache_page:%s:%s" %(request.build_absolute_uri(), method) | the cache key is the absolute uri and the request method |
def _requirements_to_dict(rs):
out = []
added = set([])
for r in rs:
if r["class"] == "DockerRequirement" and "docker" not in added:
added.add("docker")
out.append({"requirement_type": "docker", "value": r["dockerImageId"]})
elif r["class"] == "ResourceRequirement":
... | Convert supported requirements into dictionary for output. |
def ok_button_status(self):
if not self.layer.currentLayer():
self.button_box.button(
QtWidgets.QDialogButtonBox.Ok).setEnabled(False)
elif (self.hazard_class_form.count() > 0
and self.layer.currentLayer().name()
and len(self.output_form.text()) >=... | Function to enable or disable OK button. |
def draw_footer(canvas):
note = (
u'Bank Details: Street address, Town, County, POSTCODE',
u'Sort Code: 00-00-00 Account No: 00000000 (Quote invoice number).',
u'Please pay via bank transfer or cheque. All payments should be made in CURRENCY.',
u'Make cheques payable to Company Name ... | Draws the invoice footer |
def _prep_mod_opts(self):
mod_opts = {}
for key, val in six.iteritems(self.opts):
if key == 'logger':
continue
mod_opts[key] = val
return mod_opts | Returns a copy of the opts with key bits stripped out |
def describe_all(self, refresh=True):
tables = self.connection.list_tables()
descs = []
for tablename in tables:
descs.append(self.describe(tablename, refresh))
return descs | Describe all tables in the connected region |
def prepare_buckets(self):
for mdl in self.registry.get_base_models():
bucket = mdl(super_context).objects.adapter.bucket
self.buckets[bucket.name] = bucket | loads buckets to bucket cache. |
def delete_relation(sender, instance, **kwargs):
def process_signal(relation_id):
try:
relation = Relation.objects.get(pk=relation_id)
except Relation.DoesNotExist:
return
if relation.entities.count() == 0:
relation.delete()
transaction.on_commit(lambd... | Delete the Relation object when the last Entity is removed. |
def targetwords(self, index, targetwords, alignment):
return [ targetwords[x] for x in alignment[index] ] | Return the aligned targetwords for a specified index in the source words |
def rested_filter(name, location, size, unsize):
ver = slack_ver()
if _meta_.slack_rel == "current":
ver = "current"
path_pkg = "pkg"
if _meta_.arch == "x86_64":
path_pkg = "pkg64"
(fname, flocation, fsize, funsize) = ([] for i in range(4))
for n, l, s, u in zip(name, location, s... | Filter Alien"s repository data |
def attach(gandi, name, vhost, remote):
return gandi.paas.attach(name, vhost, remote) | Add remote for an instance's default vhost to the local git repository. |
def intersection (set1, set2):
assert is_iterable(set1)
assert is_iterable(set2)
result = []
for v in set1:
if v in set2:
result.append (v)
return result | Removes from set1 any items which don't appear in set2 and returns the result. |
def _get(self, id):
"Return keys and value for karma id"
VALUE_SQL = "SELECT karmavalue from karma_values where karmaid = ?"
KEYS_SQL = "SELECT karmakey from karma_keys where karmaid = ?"
value = self.db.execute(VALUE_SQL, [id]).fetchall()[0][0]
keys_cur = self.db.execute(KEYS_SQL, [id]).fetchall()
keys = s... | Return keys and value for karma id |
def default_decode(events, mode='full'):
event, elem = next(events)
root = elem
while (event, elem.tag) not in [('start', 'igt'), ('end', 'xigt-corpus')]:
event, elem = next(events)
igts = None
if event == 'start' and elem.tag == 'igt':
igts = (
decode_igt(e)
... | Decode a XigtCorpus element. |
def _FindPartition(self, key):
hash_value = self.hash_generator.ComputeHash(key)
return self._LowerBoundSearch(self.partitions, hash_value) | Finds the partition from the byte array representation of the partition key. |
def replicasResource(self):
if self._replicasResource is None:
self._replicasResource = {}
for replica in self.replicas:
self._replicasResource["replicaName"] = replica.name
self._replicasResource["replicaID"] = replica.guid
return self._replicasRe... | returns a list of replices |
def populate_tree(self, master, parent, element,from_file=False):
data = WidgetDescr(None, None)
data.from_xml_node(element)
cname = data.get_class()
uniqueid = self.get_unique_id(cname, data.get_id())
data.set_property('id', uniqueid)
if cname in builder.CLASS_MAP:
... | Reads xml nodes and populates tree item |
def from_string(self, value):
if value.startswith('[') and value.endswith(']'):
text = value[1:-1].strip()
else:
text = value.strip()
result = []
if text.startswith('('):
tokens = text.split(',')
if len(tokens) % 2 != 0:
rai... | Convert string to list. |
def _get_span_name(servicer_context):
method_name = servicer_context._rpc_event.call_details.method[1:]
if isinstance(method_name, bytes):
method_name = method_name.decode('utf-8')
method_name = method_name.replace('/', '.')
return '{}.{}'.format(RECV_PREFIX, method_name) | Generates a span name based off of the gRPC server rpc_request_info |
def _extract_parameters_from_properties(properties):
new_properties = {}
parameters = []
for key, value in six.iteritems(properties):
if key.startswith(_PARAMETER_PREFIX):
parameters.append((key.replace(_PARAMETER_PREFIX, ""), value))
else:
new_properties[key] = value... | Extracts parameters from properties. |
def obj(x):
j = np.arange(1, 6)
tmp1 = np.dot(j, np.cos((j+1)*x[0] + j))
tmp2 = np.dot(j, np.cos((j+1)*x[1] + j))
return tmp1 * tmp2 | Two Dimensional Shubert Function |
def build_dictionary(self):
d = {}
for t in self.all_tags_of_type(DefinitionTag, recurse_into_sprites = False):
if t.characterId in d:
raise ValueError('illegal redefinition of character')
d[t.characterId] = t
return d | Return a dictionary of characterIds to their defining tags. |
def _check_key(self, key):
if not len(key) == 2:
raise TypeError('invalid key: %r' % key)
elif key[1] not in TYPES:
raise TypeError('invalid datatype: %s' % key[1]) | Ensures well-formedness of a key. |
def triplify_object(binding):
triples = []
if binding.uri:
triples.append((binding.subject, RDF.type, binding.uri))
if binding.parent is not None:
parent = binding.parent.subject
if binding.parent.is_array:
parent = binding.parent.parent.subject
triples.append((pa... | Create bi-directional bindings for object relationships. |
def maybe_convert_platform(values):
if isinstance(values, (list, tuple)):
values = construct_1d_object_array_from_listlike(list(values))
if getattr(values, 'dtype', None) == np.object_:
if hasattr(values, '_values'):
values = values._values
values = lib.maybe_convert_objects(... | try to do platform conversion, allow ndarray or list here |
def rm_raw(ctx, dataset, kwargs):
"removes the raw unprocessed data"
kwargs = parse_kwargs(kwargs)
data(dataset, **ctx.obj).rm_raw(**kwargs) | removes the raw unprocessed data |
def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
self.fileExtension = extension
with open(path, 'r') as f:
for line in f:
sline = line.strip().split()
if len(sline) == 1:
s... | Replace Param File Read from File Method |
def close(self):
if self._wrapped_connection and self._pool:
logger.debug("Returning connection %s to pool %s" % (self._wrapped_connection, self._pool))
self._pool.putconn(self._wrapped_connection)
self._wrapped_connection = None | Override to return the connection to the pool rather than closing it. |
def use_plenary_asset_view(self):
self._object_views['asset'] = PLENARY
for session in self._get_provider_sessions():
try:
session.use_plenary_asset_view()
except AttributeError:
pass | Pass through to provider AssetLookupSession.use_plenary_asset_view |
def lock_pid(self):
if os.path.exists(self.lock_filename):
return int(open(self.lock_filename).read())
else:
return None | Get the pid of the lock. |
def _get_site_dummy_variables(self, vs30):
s_b = np.zeros_like(vs30)
s_c = np.zeros_like(vs30)
s_d = np.zeros_like(vs30)
s_b[np.logical_and(vs30 >= 360., vs30 < 800.)] = 1.0
s_c[np.logical_and(vs30 >= 180., vs30 < 360.)] = 1.0
s_d[vs30 < 180] = 1.0
return s_b, s_c... | Returns the Eurocode 8 site class dummy variable |
def store(self, installed_stuff, metadata, interpreter, options):
new_content = {
'timestamp': int(time.mktime(time.localtime())),
'installed': installed_stuff,
'metadata': metadata,
'interpreter': interpreter,
'options': options
}
logg... | Store the virtualenv metadata for the indicated installed_stuff. |
def _bin_op(name, doc="binary operator"):
def _(self, other):
jc = other._jc if isinstance(other, Column) else other
njc = getattr(self._jc, name)(jc)
return Column(njc)
_.__doc__ = doc
return _ | Create a method for given binary operator |
def extract_email(text):
result = list()
for tp in re.findall(_regex_extract_email, text.lower()):
for email in tp:
if re.match(_regex_validate_email, email):
result.append(email)
return result | Extract email from text. |
def getAnalysisCategories(self):
bsc = api.get_tool("bika_setup_catalog")
cats = []
for st in bsc(portal_type="AnalysisCategory",
is_active=True,
sort_on="sortable_title"):
cats.append((st.UID, st.Title))
return DisplayList(cats) | Return all available analysis categories |
def _parse_plan(self, match):
expected_tests = int(match.group("expected"))
directive = Directive(match.group("directive"))
if directive.text and not directive.skip:
return Unknown()
return Plan(expected_tests, directive) | Parse a matching plan line. |
def adev(self, tau0, tau):
prefactor = self.adev_from_qd(tau0=tau0, tau=tau)
c = self.c_avar()
avar = pow(prefactor, 2)*pow(tau, c)
return np.sqrt(avar) | return predicted ADEV of noise-type at given tau |
def hashes_above(path, line_number):
def hash_lists(path):
hashes = []
with open(path) as file:
for lineno, line in enumerate(file, 1):
match = HASH_COMMENT_RE.match(line)
if match:
hashes.append(match.groupdict()['hash'])
... | Yield hashes from contiguous comment lines before line ``line_number``. |
def IsFleetspeakEnabledClient(grr_id, token=None):
if grr_id is None:
return False
if data_store.RelationalDBEnabled():
md = data_store.REL_DB.ReadClientMetadata(grr_id)
if not md:
return False
return md.fleetspeak_enabled
else:
with aff4.FACTORY.Create(
rdf_client.ClientURN(grr_... | Returns whether the provided GRR id is a Fleetspeak client. |
def processLibraryDetails(details):
for includeDir in details.includeDirs:
for pattern in CUSTOM_FLAGS_FOR_INCLUDE_DIRS:
if pattern in includeDir:
flag = '-D' + CUSTOM_FLAGS_FOR_INCLUDE_DIRS[pattern] + '=' + includeDir
details.cmakeFlags.append(flag)
for lib in details.libs:
filename = os.path.b... | Processes the supplied ThirdPartyLibraryDetails instance and sets any custom CMake flags |
def edge_val_set(self, graph, orig, dest, idx, key, branch, turn, tick, value):
if (branch, turn, tick) in self._btts:
raise TimeError
self._btts.add((branch, turn, tick))
graph, orig, dest, key, value = map(self.pack, (graph, orig, dest, key, value))
self._edgevals2set.appen... | Set this key of this edge to this value. |
def _enum_attached_rows(self, indices):
records = self._records
i = 0
for i, line in self._enum_lines():
if i in indices:
row = records[i]
if row is None:
row = decode_row(line)
yield (i, row)
for j in range(... | Enumerate on-disk and in-memory records. |
def __findRange(self, excelLib, start, end):
inc = 1
low = 0
high = 0
dates = excelLib.readCol(0, 1)
for index, date in enumerate(dates):
if int(start) <= int(date):
low = index + inc
break
if low:
for inde... | return low and high as excel range |
def _read_header(self):
self._header = self.cdmrf.fetch_header()
self.load_from_stream(self._header) | Get the needed header information to initialize dataset. |
def entity_from_snapshot(snapshot):
assert isinstance(snapshot, AbstractSnapshop), type(snapshot)
if snapshot.state is not None:
entity_class = resolve_topic(snapshot.topic)
return reconstruct_object(entity_class, snapshot.state) | Reconstructs domain entity from given snapshot. |
def press(button=LEFT):
location = get_position()
button_code, button_down, _, _ = _button_mapping[button]
e = Quartz.CGEventCreateMouseEvent(
None,
button_down,
location,
button_code)
if _last_click["time"] is not None and datetime.datetime.now() - _last_click["time"] < ... | Sends a down event for the specified button, using the provided constants |
def _context_source_file_url(path_or_url):
if path_or_url.startswith('http'):
return path_or_url
if path_or_url.startswith('/'):
return "file://" + path_or_url
return "file://" + os.path.join(os.path.realpath(os.getcwd()), path_or_url) | Returns a URL for a remote or local context CSV file |
def _should_allocate_port(pid):
if pid <= 0:
log.info('Not allocating a port to invalid pid')
return False
if pid == 1:
log.info('Not allocating a port to init.')
return False
try:
os.kill(pid, 0)
except ProcessLookupError:
log.info('Not allocating a port ... | Determine if we should allocate a port for use by the given process id. |
def _add_metadata_as_attrs_da(data, units, description, dtype_out_vert):
if dtype_out_vert == 'vert_int':
if units != '':
units = '(vertical integral of {0}): {0} kg m^-2)'.format(units)
else:
units = '(vertical integral of quantity with unspecified units)'
data.attrs['un... | Add metadata attributes to DataArray |
def __normalize_list(self, msg):
if isinstance(msg, list):
msg = "".join(msg)
return list(map(lambda x: x.strip(), msg.split(","))) | Split message to list by commas and trim whitespace. |
def update_datasources_cache():
from superset.models.core import Database
for database in db.session.query(Database).all():
if database.allow_multi_schema_metadata_fetch:
print('Fetching {} datasources ...'.format(database.name))
try:
database.all_table_names_in_d... | Refresh sqllab datasources cache |
async def eval(self, text, opts=None):
async for pode in self.cell.iterStormPodes(text, opts=opts, user=self.user):
yield pode | Evalute a storm query and yield packed nodes. |
def use_federated_repository_view(self):
self._repository_view = FEDERATED
for session in self._get_provider_sessions():
try:
session.use_federated_repository_view()
except AttributeError:
pass | Pass through to provider AssetLookupSession.use_federated_repository_view |
def addPos(self, dp_x=None, dy=None, dz=None):
p = np.array(self.GetPosition())
if dz is None:
self.SetPosition(p + dp_x)
else:
self.SetPosition(p + [dp_x, dy, dz])
if self.trail:
self.updateTrail()
return self | Add vector to current actor position. |
def statsId(obj):
if hasattr(obj, ID_KEY):
return getattr(obj, ID_KEY)
newId = next(NEXT_ID)
setattr(obj, ID_KEY, newId)
return newId | Gets a unique ID for each object. |
def forum_topic_get_by_tag_for_user(self, tag=None, author=None):
if not tag:
return None
if author:
r = self._request('ebuio/forum/search/bytag/' + tag + '?u=' + author)
else:
r = self._request('ebuio/forum/search/bytag/' + tag)
if not r:
... | Get all forum topics with a specific tag |
def convert_reaction_entry(self, reaction):
d = OrderedDict()
d['id'] = reaction.id
def is_equation_valid(equation):
return (equation is not None and (
not isinstance(equation, Reaction) or
len(equation.compounds) > 0))
order = {
... | Convert reaction entry to YAML dict. |
def check_apm_out(self):
now = time.time()
if now - self.last_apm_send_time < 0.02:
return
self.last_apm_send_time = now
if self.hil_state_msg is not None:
self.master.mav.send(self.hil_state_msg) | check if we should send new data to the APM |
def plos_doi_to_xmlurl(doi_string):
doi_url = 'http://dx.doi.org/{0}'.format(doi_string)
log.debug('DOI URL: {0}'.format(doi_url))
try:
resolved_page = urllib.request.urlopen(doi_url)
except urllib.error.URLError as err:
print('Unable to resolve DOI URL, or could not connect')
ra... | Attempts to resolve a PLoS DOI into a URL path to the XML file. |
def in_query(expression):
def _in(index, expression=expression):
ev = expression() if callable(expression) else expression
try:
iter(ev)
except TypeError:
raise AttributeError('$in argument must be an iterable!')
hashed_ev = [index.get_hash_for(v) for v in ev]... | Match any of the values that exist in an array specified in query. |
def add_vertex_buffer(self, material, vertex_format, byte_offset, byte_length):
self._vertex_buffers.append({
"material": material,
"vertex_format": vertex_format,
"byte_offset": byte_offset,
"byte_length": byte_length,
}) | Add a vertex buffer |
def stress(syllabified_simplex_word):
syllables = syllabified_simplex_word.split('.')
stressed = '\'' + syllables[0]
try:
n = 0
medial = syllables[1:-1]
for i, syll in enumerate(medial):
if (i + n) % 2 == 0:
stressed += '.' + syll
else:
... | Assign primary and secondary stress to 'syllabified_simplex_word'. |
def manager(self, **kwargs):
return PreferencesManager(registry=self, model=self.preference_model, **kwargs) | Return a preference manager that can be used to retrieve preference values |
def make_export(self, exports):
sql = 'drop table if exists export'
logging.debug(sql)
self.cursor.execute(sql)
sql = 'create table if not exists export ' \
'(func text unique, module text)'
logging.debug(sql)
self.cursor.execute(sql)
for module in e... | Populate library exported function data. |
def AddAttributePath(self, **_):
attribute_path = self.current_expression.attribute
if not attribute_path:
attribute_path = []
attribute_path.append(self.string)
self.current_expression.SetAttribute(attribute_path) | Adds a path component to the current attribute. |
def branch_inlet_outlet(data, commdct, branchname):
objkey = 'Branch'.upper()
theobjects = data.dt[objkey]
theobject = [obj for obj in theobjects if obj[1] == branchname]
theobject = theobject[0]
inletindex = 6
outletindex = len(theobject) - 2
return [theobject[inletindex], theobject[outleti... | return the inlet and outlet of a branch |
def remove_empty_dir(path):
try:
if not os.path.isdir(path):
return
files = os.listdir(path)
if len(files) == 0:
os.rmdir(path)
elif len(files) > 0:
for f in files:
abspath = os.path.join(path, f)
if os.path.isdir(ab... | Function to remove empty folders |
def lookup_ids(handles):
ids = set()
for handle_list in [handles[100 * i:100 * i + 100] for i in range(len(handles))]:
if len(handle_list) > 0:
while True:
r = twapi.request('users/lookup', {'screen_name': ','.join(handle_list)})
if r.status_code in [88, 130, ... | Fetch the twitter ids of each screen_name. |
def connection(self):
try:
con = self.thread.connection
except AttributeError:
con = self.steady_connection()
self.thread.connection = con
return con | Get a steady, persistent PyGreSQL connection. |
def _authenticate():
global url, port, ticket, csrf, verify_ssl
url = config.get_cloud_config_value(
'url', get_configured_provider(), __opts__, search_global=False
)
port = config.get_cloud_config_value(
'port', get_configured_provider(), __opts__,
default=8006, search_global=Fa... | Retrieve CSRF and API tickets for the Proxmox API |
def update_datatype(self, datatype, w=None, dw=None, pw=None,
return_body=None, timeout=None, include_context=None):
raise NotImplementedError | Updates a Riak Datatype by sending local operations to the server. |
def create_toggle_view_action(self):
title = self.get_plugin_title()
if self.CONF_SECTION == 'editor':
title = _('Editor')
if self.shortcut is not None:
action = create_action(self, title,
toggled=lambda checked: self.toggle_view(checked),
... | Associate a toggle view action with each plugin |
def _get_spades_circular_nodes(self, fastg):
seq_reader = pyfastaq.sequences.file_reader(fastg)
names = set([x.id.rstrip(';') for x in seq_reader if ':' in x.id])
found_fwd = set()
found_rev = set()
for name in names:
l = name.split(':')
if len(l) != 2:
... | Returns set of names of nodes in SPAdes fastg file that are circular. Names will match those in spades fasta file |
def cat_acc(y, z):
weights = _cat_sample_weights(y)
_acc = K.cast(K.equal(K.argmax(y, axis=-1),
K.argmax(z, axis=-1)),
K.floatx())
_acc = K.sum(_acc * weights) / K.sum(weights)
return _acc | Classification accuracy for multi-categorical case |
def markdown_2_rst(lines):
out = []
code = False
for line in lines:
if line.strip() == "```":
code = not code
space = " " * (len(line.rstrip()) - 3)
if code:
out.append("\n\n%s.. code-block:: none\n\n" % space)
else:
out... | Convert markdown to restructured text |
def _construct_X_M(self, omega, **kwargs):
X = self._construct_X(omega, weighted=True, **kwargs)
M = np.dot(X.T, X)
if getattr(self, 'regularization', None) is not None:
diag = M.ravel(order='K')[::M.shape[0] + 1]
if self.regularize_by_trace:
diag += diag.... | Construct the weighted normal matrix of the problem |
def bounds_overlap(bound1, bound2):
(x1,y1,w1,h1) = bound1
(x2,y2,w2,h2) = bound2
if x1+w1 < x2:
return False
if x2+w2 < x1:
return False
if y1+h1 < y2:
return False
if y2+h2 < y1:
return False
return True | return true if two bounding boxes overlap |
def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
if _enclosing_tpu_context() is None:
if hasattr(self._primary_var, '_dense_var_to_tensor'):
return self._primary_var._dense_var_to_tensor(dtype, name, as_ref)
else:
return ops.convert_to_tensor(self._primary_var)
if... | Converts a variable to a tensor. |
def flip(f):
ensure_callable(f)
result = lambda *args, **kwargs: f(*reversed(args), **kwargs)
functools.update_wrapper(result, f, ('__name__', '__module__'))
return result | Flip the order of positonal arguments of given function. |
def from_git_rev_read(path):
if ":" not in path:
raise ValueError("Path identifier must start with a revision hash.")
cmd = "git", "show", "-t", path
try:
return subprocess.check_output(cmd).rstrip().decode("utf-8")
except subprocess.CalledProcessError:
raise ValueError | Retrieve given file path contents of certain Git revision. |
def _make_function(instr, queue, stack, body, context):
assert stack, "Empty stack before MAKE_FUNCTION."
prev = stack[-1]
expect(prev, instrs.LOAD_CONST, "before MAKE_FUNCTION")
stack.append(instr)
if is_lambda_name(prev.arg):
return
return context.update(
make_function_context=... | Set a make_function_context, then push onto the stack. |
def to_dict(self):
d = {'sequence': self.sequence,
'targetComponent': self.target_component.to_dict()}
props = []
for name in self.properties:
p = {'name': name}
if self.properties[name]:
p['value'] = str(self.properties[name])
... | Save this condition into a dictionary. |
def update_record(self, zeroconf, now, record):
if record is not None and not record.is_expired(now):
if record.type == _TYPE_A:
if record.name == self.name:
if not record.address in self.address:
self.address.append(record.address)
... | Updates service information from a DNS record |
def package_version(self):
vbase = self.base_version
if self.ncommits:
vbase += '.dev{0}+{1}'.format(self.ncommits, self.sha)
return vbase | Returns the well formed PEP-440 version |
def convert(self, value, view):
if isinstance(value, BASESTRING):
if self.pattern and not self.regex.match(value):
self.fail(
u"must match the pattern {0}".format(self.pattern),
view
)
return value
else:
... | Check that the value is a string and matches the pattern. |
def authenticate(self):
if self.__token:
try:
resp = self._refresh_token()
except exceptions.TVDBRequestException as err:
if getattr(err.response, 'status_code', 0) == 401:
resp = self._login()
else:
... | Aquire authorization token for using thetvdb apis. |
def init_sources(path):
for f in dir_list(path):
if(os.path.splitext(f)[1][1:] == config.source_ext):
print "Source file discovered: %s" % (f)
script = Script(f)
if (script.filename not in config.sources.keys()):
config.sources[script.path] = script
... | initializes array of groups and their associated js files |
def to_datetime(value):
if value is None:
return None
if isinstance(value, six.integer_types):
return parser.parse(value)
return parser.isoparse(value) | Converts a string to a datetime. |
def register_segment_dcnm(self, cfg, seg_id_min, seg_id_max):
orch_id = cfg.dcnm.orchestrator_id
try:
segid_range = self.dcnm_client.get_segmentid_range(orch_id)
if segid_range is None:
self.dcnm_client.set_segmentid_range(orch_id, seg_id_min,
... | Register segmentation id pool with DCNM. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.