content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import argparse
def parse_args():
"""Process input arguments"""
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('functional', metavar='F',
help="Functionally annotated mutations file")
parser.add_argument('--genomic', '-g',
help="Genomic mutations file")
parser.add_argument('--mutfunc', '-m', default='mutfunc/',
help="Compiled Mutfunc directory")
parser.add_argument('--genes', '-n',
help="Table of gene positions")
parser.add_argument('--filter', '-f',
help="List of gene IDs to include")
parser.add_argument('--effect', '-e', action='store_true',
help="Functional file gives a list of mutations and effects")
return parser.parse_args() | 5e80c5baf4591cdc2d9f1219d4a5235f151d7f28 | 41,641 |
def get_default_query_ids(source_id, model):
"""
Returns set of Strings, representing source_id's default queries
Keyword Parameters:
source_id -- String, representing API ID of the selection source
model -- Dict, representing current DWSupport configuration
>>> from copy import deepcopy
>>> no_query_model = { 'queries': []
... ,'associations': []
... ,'tables': [{'name': 'foo_fact', 'type': 'fact'}]
... ,'variables': [ { 'table': 'foo_fact'
... ,'column': 'foo_ml'}]
... ,'variable_custom_identifiers': []}
>>> source = 'my_example.foo_fact'
>>> # Check unspecified 'defaults' on a source without 'queries'
>>> empty_defaults = []
>>> get_default_query_ids(source, no_query_model)
set()
>>> # Check a source with a 'query'
>>> test_model = { 'queries': [{ 'table': 'foo_fact', 'name': 'core'
... ,'variables': {'foo_fact': ['foo_ml']}
... }]
... ,'associations': []
... ,'tables': [{'name': 'foo_fact', 'type': 'fact'}]
... ,'variables': [ { 'table': 'foo_fact'
... ,'column': 'foo_ml'}
... ,{ 'table': 'foo_fact'
... ,'column': 'foo_operation_code'}]
... ,'variable_custom_identifiers': []}
>>> get_default_query_ids(source, test_model)
{'core'}
>>> # Check a source with multiple 'queries'
>>> multiple_query_model = deepcopy(test_model)
>>> multiple_query_model['queries'].append({
... 'table': 'foo_fact'
... ,'name': 'everything'
... ,'variables': {
... 'foo_fact': [
... 'foo_ml'
... ,'foo_operation_code']}
... })
>>> defaults_output = get_default_query_ids(source, multiple_query_model)
>>> defaults_output == {'core', 'everything'}
True
"""
defaults = set()
source_project, source_table_name = source_id.split('.')
for query in model['queries']:
if query['table'] == source_table_name: #add it
set_of_one_identifier_name_to_add = {query['name']}
defaults.update(set_of_one_identifier_name_to_add)
return defaults | 766b69d32433d586dc086eec7ee1c27c312b7724 | 41,644 |
def celsius_to_kelvin(temperature):
"""Convert from Celsius to Kelvin
:param temperature: degree understood to be in Celsius
NOTE:
Celsius to Kelvin formula: K = °C + 273.15
Add 273.15.
"""
return temperature + 273.15 | 5ddca76af51ef4199c1c6e1568c938c69bcea2d1 | 41,645 |
def constant_series(z):
"""
returns 1 + z + z ** 2 + ...
"""
return 1 / (1 - z) | d1dcf5f45068f2b5b24bbef35e71b2c1d329a743 | 41,646 |
def multiplica(x, y, z=None):
"""Multiplica x,y,z
Multiplica x, y, z,. O programador pode omitir a variavel z caso nao tenho
necessidade de usa-la
:param x: float
:param y: float
:param z: float
:return: float
"""
if not z:
return x * y * z
else:
return x * y | c7ed1bb7c259b27e4a1c802db9b85eaefc9555fd | 41,648 |
import aiohttp
from typing import List
from typing import Tuple
async def get_html_blog(
session: aiohttp.ClientSession,
url: str,
contests: List[int],
) -> Tuple[str, str, List[int]]:
"""Get html from a blog url.
Args:
session (aiohttp.ClientSession) : session
url (str) : blog url
contests (List[int]) : list of contests
Returns:
Tuple[str, str, List[int]] : url, html, contests list
"""
async with session.get(url) as resp:
return url, await resp.text(), contests | 1e0a0a573f1733370a1a59f37ca39a5cd0a5bb9c | 41,650 |
def query_string_to_kwargs(query_string: str) -> dict:
"""
Converts URL query string to keyword arguments.
Args:
query_string (str): URL query string
Returns:
dict: Generated keyword arguments
"""
key_value_pairs = query_string[1:].split("&")
output = {}
for key_value_pair in key_value_pairs:
if "=" in key_value_pair:
key, value = key_value_pair.split("=")
if value == "None":
value = None
else:
value = value.replace("+", " ")
output[key] = value
return output | 9382d1c12cc2a534d9edbf183a3fcd3ea7b38968 | 41,651 |
import collections
def group_transcripts_by_name2(tx_iter):
"""Takes a iterable of GenePredTranscript objects and groups them by name2"""
r = collections.defaultdict(list)
for tx in tx_iter:
r[tx.name2].append(tx)
return r | a2fce0dfa8ba5e0b7321e977d1dbc8455ab6dfd1 | 41,653 |
def run_operation(task):
"""Run a particular task, simulating some failures on its execution."""
return task.run() | cbb7c33b6ee71356a7f609f02a3ed529af4942b4 | 41,654 |
def get_var_mode(prefix):
"""Returns False if { in prefix.
``prefix`` -- Prefix for the completion
Variable completion can be done in two ways and completion
depends on which way variable is written. Possible variable
complations are: $ and ${}. In last cursor is between
curly braces.
"""
return False if '{' in prefix else True | ab080ae0ea2bce0ce2b267a2e7be839a6a982fb2 | 41,655 |
def lol2str(doc):
"""Transforms a document in the list-of-lists format into
a block of text (str type)."""
return " ".join([word for sent in doc for word in sent]) | 61f58f715f1a923c368fb0643a1cf4d7eddefb73 | 41,657 |
def mrr_voltage_to_delta_lambda(v, alpha, k, gamma, n_g, lambda_0):
"""
description: micro-ring resonator (MRR) wavelength modulation, \delta\lambda=\delta\n_eff\times\lambda/n_g, \deltan_eff=\gamma k \delta T=\gamma k \alpha v^2\\
v {torch.Tensor ro np.ndarray} voltage \\
alpha {scalar} voltage square to temperature change coefficient \\
k {scalar} parameter \\
gamma {scalar} power to phase shift coefficient \\
n_g {scalar} group index, typically from 4 to 4.5\\
lambda_0 {torch.Tensor or np.ndarray} central wavelength\\
return delta_lambda {torch.Tensor or np.ndarray} resonance wavelength drift
"""
delta_neff = gamma * k * alpha * v * v
delta_lambda = delta_neff * lambda_0 / n_g
return delta_lambda | bff733aec6d6ea88d5239a264cd44e241329a53e | 41,658 |
import textwrap
def _pretty_longstring(defstr, prefix='', wrap_at=65):
"""
Helper function for pretty-printing a long string.
:param defstr: The string to be printed.
:type defstr: str
:return: A nicely formated string representation of the long string.
:rtype: str
"""
outstr = ""
for line in textwrap.fill(defstr, wrap_at).split('\n'):
outstr += prefix + line + '\n'
return outstr | 19008289620b86b8760a36829cbaa97d117a8139 | 41,659 |
def kronecker(x, y):
"""
Returns 1 if x==y, and 0 otherwise.
Note that this should really only be used for integer expressions.
"""
if x == y:
return 1
return 0 | d28e9c101f61e04445b4d87d91cc7919e1921714 | 41,661 |
def drop_unadjusted_fields(mapping):
"""Drops all fields beyond mapping[0:12], except for the cigar and alignment type field.
Args:
mapping ([type]): [description]
"""
# print("mapping before drop:", mapping)
# Find the cigar and mapping-type fields.
keep_fields = list()
for field in range(len(mapping[12:])):
field += 12
if mapping[field][:5] == "cg:Z:" or mapping[field][:5] == "tp:A:":
keep_fields.append(field)
fixed_mapping = mapping[:12]
for kept_field in keep_fields:
fixed_mapping.append(mapping[kept_field])
# print("mapping after drop:", fixed_mapping)
# drop all fields after field 11, except for the cigar.
return fixed_mapping | 3716cb16cd77865ab48df918ef448cf8cfe7f400 | 41,662 |
import struct
import os
def get_image_size(data):
"""Determine dimensions from image file data."""
b = data.read(56)
size = len(b)
width = height = 0
if size >= 10 and b[:6] in [b'GIF87a', b'GIF89a']:
width, height = struct.unpack('<hh', b[6:10])
elif size >= 24 and b.startswith(b'\x89PNG') and b[12:16] == b'IHDR':
width, height = struct.unpack('>LL', b[16:24])
elif size >= 2 and b.startswith(b'\xff\xd8'):
data.seek(0)
size = 2
ftype = 0
while not 0xC0 <= ftype <= 0xCF or ftype in [0xC4, 0xC8, 0xCC]:
data.seek(size, os.SEEK_CUR)
while True:
b = data.read(1)
if b != b'\xff':
break
ftype = ord(b)
size = struct.unpack('>H', data.read(2))[0] - 2
data.seek(1, os.SEEK_CUR)
height, width = struct.unpack('>HH', data.read(4))
elif size >= 12 and b.startswith(b'\x00\x00\x00\x0cjP'):
height, width = struct.unpack('>LL', b[48:])
elif b.startswith(b'BM'):
width, height = struct.unpack('<II', b[18:26])
else:
raise ValueError("Unsupported image file format.")
return width, height | f7b9b9a96521251f0bceef9cab21907467661de8 | 41,665 |
from pathlib import Path
import sys
def update_item_image(item_name, slot, items):
"""
This function will update the item image in the item slot selection window.
:param item_name: the name of the item image to update
:param slot: the slot the image for
:param items: the items list
:return: the items
"""
item_name = item_name.replace("remove", "")
items[int(slot.replace("-", "").replace("SLOT", "")) - 1] = item_name
return items, "-" + slot.replace("-", "") + "_IMG-", rf"{Path(sys.argv[0]).parent}\Images\Assets\{item_name}_item.png" if item_name else fr"{Path(sys.argv[0]).parent}\Images\Assets\unknown_item.png" | 48a1583e97d86a513358c01fe0ec6d4f2eb78e0d | 41,666 |
import argparse
def argument_setup():
"""Set up the command line argument parser.
Parameters:
None
Returns:
The parsed arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--type", type=str,
help="File type")
parser.add_argument("--test", action="store_true", default=False,
help="Run validation and/or verification tests")
return parser.parse_args() | a5973ddd348f5c25b8e1ceb7dc1c424d3d02b7d3 | 41,668 |
def space_join(conllu,sentence_wise=False):
"""Takes conllu input and returns:
All tokens separated by space (if sentence_wise is False), OR
A list of sentences, each a space separated string of tokens
"""
lines = conllu.replace("\r","").strip().split("\n")
lines.append("") # Ensure last blank
just_text = []
sentences = []
length = 0
for line in lines:
if "\t" in line:
fields = line.split("\t")
if "." in fields[0]: # ellipsis tokens
continue
if "-" in fields[0]: # need to get super token and ignore next n tokens
just_text.append(fields[1])
start, end = fields[0].split("-")
start = int(start)
end = int(end)
length = end-start+1
else:
if length > 0:
length -= 1
continue
just_text.append(fields[1])
elif len(line.strip())==0 and sentence_wise: # New sentence
sent_text = " ".join(just_text)
sentences.append(sent_text)
just_text = []
if sentence_wise:
return sentences
else:
text = " ".join(just_text)
return text | e3e752906b57090f56c2678031f295c5a8e66f29 | 41,669 |
def cumsum(arr):
""" Cumulative sum. Start at zero. Exclude arr[-1]. """
return [sum(arr[:i]) for i in range(len(arr))] | b629695089e731855b47c8fb3e39be222aeba1db | 41,670 |
def first(it):
"""Get the first element of an iterable."""
return next(iter(it)) | 535f9028d96e0e78bc310b4a8e75e558c9217172 | 41,671 |
def _rkck(y,dydt,t,f,dt,args=()):
"""
Take one 5th-order Cash-Karp Runge-Kutta step.
Returns
-------
array_like
The change in `y` during this step.
array_like
An error estimate for `y`.
"""
a2=0.2;a3=0.3;a4=0.6;a5=1.0;a6=0.875;b21=0.2 # noqa
b31=3.0/40.0;b32=9.0/40.0;b41=0.3;b42 = -0.9;b43=1.2; # noqa
b51 = -11.0/54.0; b52=2.5;b53 = -70.0/27.0;b54=35.0/27.0; # noqa
b61=1631.0/55296.0;b62=175.0/512.0;b63=575.0/13824.0; # noqa
b64=44275.0/110592.0;b65=253.0/4096.0;c1=37.0/378.0; # noqa
c3=250.0/621.0;c4=125.0/594.0;c6=512.0/1771.0; # noqa
dc5 = -277.00/14336.0; # noqa
dc1=c1-2825.0/27648.0;dc3=c3-18575.0/48384.0; # noqa
dc4=c4-13525.0/55296.0;dc6=c6-0.25 # noqa
ytemp = y+b21*dt*dydt
ak2 = f(ytemp, t+a2*dt, *args)
ytemp = y+dt*(b31*dydt+b32*ak2)
ak3 = f(ytemp, t+a3*dt, *args)
ytemp = y+dt*(b41*dydt+b42*ak2+b43*ak3)
ak4 = f(ytemp, t+a4*dt, *args)
ytemp = y + dt*(b51*dydt+b52*ak2+b53*ak3+b54*ak4)
ak5 = f(ytemp, t+a5*dt, *args)
ytemp = y + dt*(b61*dydt+b62*ak2+b63*ak3+b64*ak4+b65*ak5)
ak6 = f(ytemp, t+a6*dt, *args)
dyout = dt*(c1*dydt+c3*ak3+c4*ak4+c6*ak6)
yerr = dt*(dc1*dydt+dc3*ak3+dc4*ak4+dc5*ak5+dc6*ak6)
return dyout, yerr | 70abfe7b29b03dc92341fce914a3315d4edf8fb6 | 41,672 |
import os
def basename(file_name):
"""
Returns basename of a file without the preceding directory
"""
return os.path.basename(file_name) | fd0786cfe15d4fee27cbb2189a3a6740545c90e3 | 41,673 |
from typing import Dict
import hashlib
import base64
def headers_sdc_artifact_upload(base_header: Dict[str, str], data: str):
"""
Create the right headers for sdc artifact upload.
Args:
base_header (Dict[str, str]): the base header to use
data (str): payload data used to create an md5 content header
Returns:
Dict[str, str]: the needed headers
"""
headers = base_header.copy()
headers["Accept"] = "application/json, text/plain, */*"
headers["Accept-Encoding"] = "gzip, deflate, br"
headers["Content-Type"] = "application/json; charset=UTF-8"
md5_content = hashlib.md5(data.encode('UTF-8')).hexdigest()
content = base64.b64encode(md5_content.encode('ascii')).decode('UTF-8')
headers["Content-MD5"] = content
return headers | 6895fff6d1a26cfb8009a3fedd2dcc9357efbe40 | 41,674 |
def valid_boolean(boolean_as_hopefully_int) -> int:
"""Use this to parse an argument as if it were boolean, and get back an int 0 or 1.
In sqlite a 'boolean' is actually an int which takes the value 1 or 0, with 1 representing True.
Raises ValueError if passed an un-parse-able value.
"""
try:
# This handles cases where the argument was an int or literal True or False
my_boolean = int(boolean_as_hopefully_int)
if my_boolean > 1 or my_boolean < 0:
raise ValueError("Value out of range for a valid (sqlite) boolean argument.")
else:
return my_boolean
except TypeError:
raise ValueError("Could not parse value as into a valid (sqlite) boolean argument (invalid type).")
except ValueError:
# This tries to parse the argument as textual true or false.
if str(boolean_as_hopefully_int).lower() == "true":
return 1
if str(boolean_as_hopefully_int).lower() == "false":
return 0
raise ValueError("Could not parse value as into a valid (sqlite) boolean argument.") | 7f58ff0bd969bcbf07f8c11d07bf939d723504d4 | 41,675 |
def content_disposition_value(file_name):
"""Return the value of a Content-Disposition HTTP header."""
return 'attachment;filename="{}"'.format(file_name.replace('"', '_')) | 9d7f50b95ab409013af39a08d02d6e7395abe545 | 41,676 |
def decode_dataset_id(dataset_id):
"""Decode a dataset ID encoded using `encode_dataset_id()`.
"""
dataset_id = list(dataset_id)
i = 0
while i < len(dataset_id):
if dataset_id[i] == '_':
if dataset_id[i + 1] == '_':
del dataset_id[i + 1]
else:
char_hex = dataset_id[i + 1:i + 3]
dataset_id[i + 1:i + 3] = []
char_hex = ''.join(char_hex)
dataset_id[i] = chr(int(char_hex, 16))
i += 1
return ''.join(dataset_id) | 08b31b17f8bda58379e7541eaedb1d1a128e9777 | 41,679 |
def _add_tag(tags, label: str) -> bool:
"""Adds the tag to the repeated field of tags.
Args:
tags: Repeated field of Tags.
label: Label of the tag to add.
Returns:
True if the tag is added.
"""
for tag in tags:
if tag.label == label:
# Episode already has the tag.
return False
tags.add().label = label
return True | 932399e97ae823ef0922929dc5123a587c06b211 | 41,680 |
def get_admin_net(neutron_client):
"""Return admin netowrk.
:param neutron_client: Authenticated neutronclient
:type neutron_client: neutronclient.Client object
:returns: Admin network object
:rtype: dict
"""
for net in neutron_client.list_networks()['networks']:
if net['name'].endswith('_admin_net'):
return net | bbb96a3da0967e74d9229537ca8afbcbfbd786e8 | 41,681 |
def _partial(middleware, handler):
"""
Custom partial in order to propagate original fn attributes
"""
async def partial(request):
response = await middleware(request, handler=handler)
return response
partial.__name__ = handler.__name__
partial.__module__ = handler.__module__
partial.__doc__ = handler.__doc__
return partial | 7f36a03733fa0020c2e2211a007eb47f8b9a3a52 | 41,683 |
import os
def join(iterable):
"""Join iterable's items as a path string
>>> join(('a', 'b')) == os.path.join('a', 'b')
True
"""
items = tuple(iterable)
if not items:
return ''
return os.path.join(*items) | 425eb1bd753e9c1d98d55f32bc31612dc7519449 | 41,684 |
def listener_protocol(protocol):
"""
Property: Listener.Protocol
"""
valid_protocols = ["TCP", "UDP"]
if protocol not in valid_protocols:
raise ValueError('Protocol must be one of: "%s"' % (", ".join(valid_protocols)))
return protocol | 2dd6709f3fb508ece3ebd04e5d129f9201c9687a | 41,685 |
def fitdict(target_keys, input):
"""
Assigns values of the required words in target_keys to
a new list, indexed indentically with sortedKeys()
Meant to be used to assign an object values to the merged
frequency set
"""
list = [0] * len(target_keys)
count = 0
for x in target_keys:
if x in input:
list[count] = input[x]
count += 1
return list | 387ab0c856301844e0cad6d4545e1334a8ba7975 | 41,686 |
def binary_search(target,bounds,fn,eps=1e-2):
""" Perform binary search to find the input corresponding to the target output
of a given monotonic function fn up to the eps precision. Requires initial bounds
(lower,upper) on the values of x."""
lb,ub = bounds
i = 0
while ub-lb>eps:
guess = (ub+lb)/2
y = fn(guess)
if y<target:
lb = guess
elif y>=target:
ub = guess
i+=1
if i>500: assert False
return (ub+lb)/2 | be5416bd6cdd53ff5fd8f58c489d7cf036dcd6a6 | 41,687 |
def inverse(x):
"""Returns whatever input as one over the input"""
return 1/x | d53824471643d8a21b5f21a9dd04f8a3a5411cb9 | 41,689 |
from typing import List
def format_learning_rates(learning_rates: List[float]) -> str:
"""
Converts a list of learning rates to a human readable string. Multiple entries are separated by semicolon.
:param learning_rates: An iterable of learning rate values.
:return: An empty string if the argument is None or empty, otherwise the string representation of the rates,
formatted as {:0.2e}
"""
if learning_rates is None or len(learning_rates) == 0:
return ""
return "; ".join("{:0.2e}".format(lr) for lr in learning_rates) | 06c7395ba609be49ae57db618fe0e739b247de66 | 41,691 |
def add_buffer_to_intervals(ranges, n_channels, pad_channels=5):
"""Extend interval range on both sides by a number of channels.
Parameters
----------
ranges : list
List of intervals [(low, upp), ...].
n_channels : int
Number of spectral channels.
pad_channels : int
Number of channels by which an interval (low, upp) gets extended on both sides, resulting in (low - pad_channels, upp + pad_channels).
Returns
-------
ranges_new : list
New list of intervals [(low - pad_channels, upp + pad_channels), ...].
"""
ranges_new, intervals = ([] for i in range(2))
for i, (low, upp) in enumerate(ranges):
low, upp = low - pad_channels, upp + pad_channels
if low < 0:
low = 0
if upp > n_channels:
upp = n_channels
intervals.append((low, upp))
# merge intervals if they are overlapping
sorted_by_lower_bound = sorted(intervals, key=lambda tup: tup[0])
for higher in sorted_by_lower_bound:
if not ranges_new:
ranges_new.append(higher)
else:
lower = ranges_new[-1]
# test for intersection between lower and higher:
# we know via sorting that lower[0] <= higher[0]
if higher[0] <= lower[1]:
upper_bound = max(lower[1], higher[1])
ranges_new[-1] = (lower[0], upper_bound) # replace by merged interval
else:
ranges_new.append(higher)
return ranges_new | 44548e70f0cbdc8d1f3df59ba7402ebc6aae0f41 | 41,692 |
def has_required_vowels(text, no_of_vowels=3):
"""Check if there are the required number of vowels"""
# Set up vowels and vowel count
vowels = 'aeiou'
vowel_count = 0
# count vowels in text until it reaches no_of_vowels,
# at which point, return True, or return False otherwise
for character in text:
if character in vowels:
vowel_count += 1
if vowel_count == no_of_vowels:
return True
return False | 395958ac12f6015bec2d6122670d485d41b2d1b8 | 41,695 |
import torch
def _unpack_from_convolution(x, batch_size):
"""
Moves the last dimension to the desired position while preserving ordering of the others.
Parameters
----------
x : :class:`torch.Tensor`
Input tensor of shape ... x n
dim : int
Dimension to move the last location.
Returns
-------
x : :class:`torch.Tensor`
Permuted tensor
"""
x = torch.transpose(x, 1, 2)
xs = x.shape
num_N = int(xs[0] / batch_size)
x = x.reshape((batch_size, num_N) + xs[1:])
return x | 8df80e8f6e6037774027fd49f368d16c1a825b55 | 41,698 |
def myfunc():
"""
Demonstrates how to write a doctest.
Prefix with `>>>` and ideally place in an `Example:` block.
You can also change Example, Ignore will
Prefix with `>>>` and ideally place in an `Example:` block.
CommandLine:
# it would be nice if sphinx.ext.napoleon could handle this
xdoctest -m ~/code/xdoctest/xdoctest/demo.py myfunc
Example:
>>> result = myfunc()
>>> assert result == 123
Ignore:
>>> # it would be nice if sphinx.ext.napoleon could ignore this
>>> print('this test is not run')
"""
return 123 | 9598cc794439bcde8a43f1a8d2a87f402ef42796 | 41,699 |
def update_settings(settings, updates, files):
"""Return updated settings."""
updates = {key: str(files.get(val, val)) for key, val in updates.items()}
for attr, value in updates.items():
setattr(settings, attr, value)
return settings | 7d9ddd6559a1bacc751a58c6f119ef78bb3a0437 | 41,700 |
def image_prepare(image):
"""Clip negative values to 0 to reduce noise and use 0 to 255 integers."""
image[image < 0] = 0
image *= 255 / image.sum()
return image.astype(int) | c244a9a9e613215796b1a72b4576e0e39885d65e | 41,701 |
def parse_value(value):
"""
Tries to convert `value` to a float/int/str, otherwise returns as is.
"""
for convert in (int, float, str):
try:
value = convert(value)
except ValueError:
continue
else:
break
return value | 7bab352a5bbcfe0eb9de19f7252d9d759950b01e | 41,702 |
import os
def get_basename(obj):
"""Get the :func:`~os.path.basename` of a file.
Parameters
----------
obj : :term:`path-like <path-like object>` or :term:`file-like <file object>`
The object to get the :func:`~os.path.basename` of. If the object does not
support the :func:`~os.path.basename` function then the
:attr:`__name__ <definition.__name__>` of the `obj` is returned.
Returns
-------
:class:`str`
The basename of `obj`.
"""
try:
return os.path.basename(obj)
except (TypeError, AttributeError):
try:
return os.path.basename(obj.name)
except AttributeError:
return obj.__class__.__name__ | 679794de917dd27f0f7f5c75372c845e708956de | 41,703 |
def flatten(xs):
"""Flatten a 2D list"""
return [x for y in xs for x in y] | 5b3fc103f699cbb0ef56f0457f482f35d0c3e4af | 41,707 |
def _encoding_base_info(base_info):
"""
Encoding base info
Args:
base_info(dict): base info
"""
encoded_info = list()
for symbol, base in base_info.iteritems():
item = {
'symbol': symbol,
'base': base
}
encoded_info.append(item)
return encoded_info | e4a56a6063e06f858811c278468cc8cd19055d07 | 41,709 |
def caption_fmt(caption):
""" Format a caption """
if caption:
return "\n== {} ==\n".format(caption)
return "" | e6e94efa5f16f9b0590e912e68ee1e3acfb4d54a | 41,712 |
def rgbi2rgbf(rgbf):
"""Converts a RGB/integer color into a RGB/float.
"""
return (int(rgbf[0]*255.0), int(rgbf[1]*255.0), int(rgbf[2]*255.0)) | eca27cee4f43653f42b96c84c72a5332927e1dc0 | 41,713 |
def node_count(shape):
"""Total number of nodes.
The total number of nodes in a structured grid with dimensions given
by the tuple, *shape*. Where *shape* is the number of node rows and
node columns.
>>> from landlab.utils.structured_grid import node_count
>>> node_count((3, 4))
12
"""
assert len(shape) == 2
return shape[0] * shape[1] | f3f460967346afbb25098db6ebb7ea8db45e2870 | 41,715 |
def serialize(f):
"""
Turn the abstract syntax tree into tidy data (i.e. one row per record).
"""
records = []
for core in f['cores']:
for sample in core['samples']:
record = {}
record['well name'] = f['file']['well name']
record['country'] = f['file']['country']
record['company'] = f['file']['company']
record['date'] = f['file']['date']
record['core'] = core['meta']['seq']
for i, field in enumerate(f['fields']):
record['depth'] = sample['depth']
record['descr'] = sample['descr']
record[field] = sample['data'][i]
records.append(record)
return records | 081ff42fde7cbe6fc499d88b077bb2ae6e8da81b | 41,716 |
def Madd(M1, M2):
"""Matrix addition (elementwise)"""
return [[a+b for a, b in zip(c, d)] for c, d in zip(M1, M2)] | 0e01c5be64f7c7b7c0487080ef08c560bacd6fc1 | 41,717 |
def handle_exceptions(err):
"""Custom Application Error handler."""
return err.to_dict() | 457421ed26cffc82c140c2da34df6131285220ae | 41,719 |
def convert_to_pronoun(day):
"""Take input "day" as a datatype integer, and convert to datatype string"""
if day == 0:
return "Monday"
if day == 1:
return "Tuesday"
if day == 2:
return "Wednesday"
if day == 3:
return "Thursday"
if day == 4:
return "Friday"
if day == 5:
return "Saturday"
if day == 6:
return "Sunday" | 80f26ec77e563063ae790d2908f0fc4fcca92ab8 | 41,720 |
def bubble_sort(li):
""" [list of int] => [list of int]
Bubble sort: starts at the beginning of the data set.
It compares the first two elements, and if the first is
greater than the second, it swaps them. It continues doing
this for each pair of adjacent elements to the end of the
data set. It then starts again with the first two elements,
repeating until no swaps have occurred on the last pass.
"""
# boolean to keep track of whether the algorithm is sorted
unsorted = True
while unsorted:
# assume it's sorted
unsorted = False
for i in range(len(li) - 1):
if li[i] > li[i + 1]:
# it is unsorted
unsorted = True
# swap elements
li[i], li[i + 1] = li[i + 1], li[i]
return li | d2730adae8e2ddec4943d5e9a39e2a5e34eaaaaa | 41,721 |
def get_bgp_attrs(g, node):
"""Return a dict of all BGP related attrs given to a node"""
if 'bgp' not in g.node[node]:
g.node[node]['bgp'] = {'asnum': None, 'neighbors': {}, 'announces': {}}
return g.node[node]['bgp'] | 9345cb12b263a2aec48f000e84b5b115a158c62f | 41,722 |
def convert_event_schema(schema):
""" Convert event schmea to record schema
"""
spots = list()
asocs = set()
spot_asoc_map = dict()
for event_type, definition in schema.items():
spots += [event_type]
spot_asoc_map[event_type] = list()
for arg in definition['参数']:
asocs.add(arg)
spot_asoc_map[event_type] += [arg]
return spots, list(asocs), spot_asoc_map | a06c437d598ff2b39504c82c5234ac98254cb4eb | 41,726 |
def GenerateAndroidResourceStringsXml(names_to_utf8_text, namespaces=None):
"""Generate an XML text corresponding to an Android resource strings map.
Args:
names_to_text: A dictionary mapping resource names to localized
text (encoded as UTF-8).
namespaces: A map of namespace prefix to URL.
Returns:
New non-Unicode string containing an XML data structure describing the
input as an Android resource .xml file.
"""
result = '<?xml version="1.0" encoding="utf-8"?>\n'
result += '<resources'
if namespaces:
for prefix, url in sorted(namespaces.iteritems()):
result += ' xmlns:%s="%s"' % (prefix, url)
result += '>\n'
if not names_to_utf8_text:
result += '<!-- this file intentionally empty -->\n'
else:
for name, utf8_text in sorted(names_to_utf8_text.iteritems()):
result += '<string name="%s">"%s"</string>\n' % (name, utf8_text)
result += '</resources>\n'
return result | 3561769bff337d71a3ee02acd39f30746e89baf9 | 41,728 |
def cross(u, v):
"""
return the cross product of 2 vectors.
"""
dim = len(u)
s = []
for i in range(0, dim):
if i == 0:
j,k = 1,2
s.append(u[j]*v[k] - u[k]*v[j])
elif i == 1:
j,k = 2,0
s.append(u[j]*v[k] - u[k]*v[j])
else:
j,k = 0,1
s.append(u[j]*v[k] - u[k]*v[j])
return s | 81759774df8ab2b9d79e802fb811024fb45bbf3e | 41,729 |
def similar_lists(list1, list2, eps=1e-3):
"""Checks elementwise whether difference between two elements is at most some number"""
return all(map(lambda pair: abs(pair[0]-pair[1])<eps, zip(list1, list2))) | 09f8d5eea57eb19e3c64ce6f76603ab13a7e37ac | 41,730 |
def normalized_difference(x, y):
"""
Normalized difference helper function for computing an index such
as NDVI.
Example
-------
>>> import descarteslabs.workflows as wf
>>> col = wf.ImageCollection.from_id("landsat:LC08:01:RT:TOAR",
... start_datetime="2017-01-01",
... end_datetime="2017-05-30")
>>> nir, red = col.unpack_bands("nir red")
>>> # geoctx is an arbitrary geocontext for 'col'
>>> wf.normalized_difference(nir, red).compute(geoctx) # doctest: +SKIP
ImageCollectionResult of length 2:
* ndarray: MaskedArray<shape=(2, 1, 512, 512), dtype=float64>
* properties: 2 items
* bandinfo: 'nir_sub_red_div_nir_add_red'
* geocontext: 'geometry', 'key', 'resolution', 'tilesize', ...
"""
return (x - y) / (x + y) | bfb74cbdae173d31811533f42f88e5165489f5ae | 41,733 |
import numpy
def gen_symm_lookup(n_symm_el, orb_symm):
"""Generate a list of all spatial orbitals of each irrep in the point
group.
Parameters
----------
n_symm_el : (unsigned int)
number of distinct irreps in this group
orb_symm : (numpy.ndarray, unsigned int)
irrep of each orbital
Returns
-------
(numpy.ndarray, unsigned int)
a matrix with rows indexed by the irrep indices.
The 0th number in each row is the number of orbitals with this
irrep. The remaining numbers are the indices of those orbitals.
"""
symm_cp = orb_symm.copy()
symm_cp.shape = (-1, 1)
max_same = numpy.amax(numpy.sum(symm_cp == orb_symm, axis=1))
symm_table = numpy.zeros((n_symm_el, max_same + 1), dtype=numpy.uint8)
for i in range(n_symm_el):
matching_orbs = numpy.nonzero(orb_symm == i)[0]
n_match = matching_orbs.shape[0]
symm_table[i, 0] = n_match
symm_table[i, 1:(1+n_match)] = matching_orbs
return symm_table | da3a02e8da71837b793ed67a6ba317a505a3f545 | 41,734 |
import re
def match(message):
"""
"ceph package"
:returns: a package name if we matched, or None if not.
"""
pattern = re.compile('^(\S+) package\??$')
m = re.match(pattern, message)
if not m:
return
return m.group(1) | 0c085e9dff42415611a2b9749ade793cbb080676 | 41,735 |
def _jd2mjd( jd ):
"""
The `_jd2mjd` function converts the Julian date (serial day number)into a
Modified Julian date (serial day number).
Returns a float.
"""
return jd - 2400000.5 | 01f4db238d092ab235374c5ab64517754ef075de | 41,736 |
def tokenTextToDict(text):
"""
Prepares input text for use in latent semantic analysis by shifting it from
a list to a dictionary data structure
Parameters
----------
text : list of strings
A list of strings where each string is a word and the list is a document
Returns
-------
wordD : dictionary
A dictionary structure where the key is a word, and the item is the word count
"""
wordD = {}
for word in text:
if word in wordD:
wordD[word] += 1
else:
wordD[word] = 1
return wordD | c712eaa06d540486792d59d45f0b489ddb12df56 | 41,738 |
def search(array, value, dir="-"):
"""
Searches a sorted (ascending) array for a value, or if value is not found,
will attempt to find closest value.
Specifying dir="-" finds index of greatest value in array less than
or equal to the given value.
Specifying dir="+" means find index of least value in array greater than
or equal to the given value.
Specifying dir="*" means find index of value closest to the given value.
"""
if value < array[0]:
if dir == "+":
return 0
else:
raise IndexError(f"No value found before {value}.")
if value > array[-1]:
if dir == "-":
return len(array) - 1
else:
raise IndexError(f"No value found after {value}.")
J = 0
K = len(array) - 1
while True:
if value == array[J]:
return J
elif value == array[K]:
return K
elif K == J + 1:
if dir == "-":
return J
elif dir == "+":
return K
elif dir == "*":
return min((J, K), key=lambda n: abs(n - value))
N = (J + K)//2
if value < array[N]:
K = N
elif value > array[N]:
J = N
elif value == array[N]:
return N | 9284ed8f826f3a472d9149531b29f12a8875870c | 41,739 |
def readFile(filename, offset, length):
""" Read length bytes from the file named by filename starting at
offset """
absoffset = abs(offset)
abslength = abs(length)
try:
f = open(filename, 'rb')
if absoffset != offset:
# negative offset returns offset bytes from tail of the file
if length:
raise ValueError('BAD_ARGUMENTS')
f.seek(0, 2)
sz = f.tell()
pos = int(sz - absoffset)
if pos < 0:
pos = 0
f.seek(pos)
data = f.read(absoffset)
else:
if abslength != length:
raise ValueError('BAD_ARGUMENTS')
if length == 0:
f.seek(offset)
data = f.read()
else:
sz = f.seek(offset)
data = f.read(length)
except (OSError, IOError):
raise ValueError('FAILED')
return data | 50260817872e34eb74c812e791ef62a829d1219c | 41,740 |
import logging
def first_good_peak(peaks):
"""return only the relevant peak information in the form of a dict."""
if len(peaks[0]) == 0:
logging.warning('FIRST_PEAK: No peaks found, returning empty peak information')
return {'peak': 0, 'left': 0, 'right': 0}
if peaks[1]['left_bases'][0] == 0 and len(peaks[0]) > 1:
peak_index = 1
elif peaks[1]['left_bases'][0] == 0 and len(peaks[0]) == 1:
peak_index = 0
logging.warning('FIRST_PEAK: Signal only contains one peak, which starts at 0')
else:
peak_index = 0
peak_location = peaks[0][peak_index]
left_base = peaks[1]['left_bases'][peak_index]
right_base = peaks[1]['right_bases'][peak_index]
return {'peak': peak_location, 'left': left_base, 'right': right_base} | 5df5fe03b4eb2028aa5891f186686de3d8151f50 | 41,742 |
def _make_ss_flux(reaction_str):
"""Format reaction identifier to match steady state flux parameter.
Warnings
--------
This method is intended for internal use only.
"""
return "v_" + reaction_str | 68270b4767ec7bc9c860b0063e17197583afdf69 | 41,743 |
def query_params(params, key, def_value, short_hand=None):
"""
updates params dict to use
:param params:
:param key:
:param def_value:
:param short_hand:
:return:
"""
if key not in params and short_hand:
# value is associated with shorthand, move to key
params[key] = params.get(short_hand, def_value)
del params[short_hand]
elif key not in params and not short_hand:
params[key] = def_value
elif key in params:
# key is there, also possibly shorthand
# assume def value at this point is not needed
if short_hand in params:
del params[short_hand]
return params | 35a5e3f42b779a439896a1e3a17aa7442194f945 | 41,744 |
from typing import Tuple
def adjust_range(in_range: Tuple[float, float]) -> Tuple[float, float]:
""" 将y方向的显示范围扩大到1.1 """
ret_range: Tuple[float, float]
diff = abs(in_range[0] - in_range[1])
ret_range = (in_range[0] - diff * 0.05, in_range[1] + diff * 0.05)
return ret_range | e82002ec7d6ca5a17bae8e147d1c2bd2cf1fff94 | 41,745 |
from pathlib import Path
def get_cache_dir() -> Path:
"""
Returns a cache dir that can be used as a local buffer.
Example use cases are intermediate results and downloaded files.
At the moment this will be a folder that lies in the root of this package.
Reason for this location and against `~/.cache/paderbox` are:
- This repo may lie on an faster filesystem.
- The user has limited space in the home directory.
- You may install paderbox multiple times and do not want to get side
effects.
- When you delete this repo, also the cache is deleted.
Returns:
Path to a cache dir
>>> get_cache_dir() # doctest: +SKIP
PosixPath('.../paderbox/cache')
"""
dirname = Path(__file__).resolve().absolute().parents[2]
path = dirname / "cache"
if not path.exists():
path.mkdir()
return path | 6f8655b9d8a145e7c018293456ac88546c78a34d | 41,747 |
def create_description(body) :
"""
Artificially create a description from main content of page (only, in case of no meta description).
"""
# extract all long sentences (possible candidates)
candidates = sorted([sentence for sentence in body.split('.')],key=lambda s : s.count(" "), reverse=True)
# return the best candidate or nothing
if not candidates :
return None
return candidates[0] | 550700bdb3087895582c7cf98a660e2a3ebbd47a | 41,749 |
def img_resize_bilinear(grid, w2, h2):
"""
From techalgorithm.com
"""
w = len(grid[0])
h = len(grid)
newgrid = []
x_ratio = (w-1)/float(w2)
y_ratio = (h-1)/float(h2)
for i in range(0, h2):
y = int(y_ratio * i)
y_diff = (y_ratio * i) - y
newrow = []
for j in range(0, w2):
x = int(x_ratio * j)
x_diff = (x_ratio * j) - x
A = grid[y][x]
B = grid[y][x+1]
C = grid[y+1][x]
D = grid[y+1][x+1]
# Y = A(1-w)(1-h) + B(w)(1-h) + C(h)(1-w) + Dwh
newval = A*(1-x_diff)*(1-y_diff) + B*(x_diff)*(1-y_diff) + C*(y_diff)*(1-x_diff) + D*(x_diff*y_diff)
newrow.append(newval)
newgrid.append(newrow)
return newgrid | 175d82d2cd36cf74fd858743f9269ed3bca18a1b | 41,751 |
import sys
import os
def GetFlagValue(flagvalue, strip=True):
"""Converts a raw flag string to a useable value.
1. Expand @filename style flags to the content of filename.
2. Cope with Python3 strangness of sys.argv.
sys.argv is not actually proper str types on Unix with Python3
The bytes of the arg are each directly transcribed to the characters of
the str. It is actually more complex than that, as described in the docs.
https://docs.python.org/3/library/sys.html#sys.argv
https://docs.python.org/3/library/os.html#os.fsencode
https://www.python.org/dev/peps/pep-0383/
Args:
flagvalue: (str) raw flag value
strip: (bool) Strip white space.
Returns:
Python2: unicode
Python3: str
"""
if flagvalue:
if sys.version_info[0] < 3:
# python2 gives us raw bytes in argv.
flagvalue = flagvalue.decode('utf-8')
# assertion: py2: flagvalue is unicode
# assertion: py3: flagvalue is str, but in weird format
if flagvalue[0] == '@':
# Subtle: We do not want to re-encode the value here, because it
# is encoded in the right format for file open operations.
with open(flagvalue[1:], 'rb') as f:
flagvalue = f.read().decode('utf-8')
else:
# convert fs specific encoding back to proper unicode.
if sys.version_info[0] > 2:
flagvalue = os.fsencode(flagvalue).decode('utf-8')
if strip:
return flagvalue.strip()
return flagvalue | 15dcb966c8246260ef3748af0c0993c46d89f487 | 41,752 |
def represents_int(string: str):
"""Checks if a `string can be turned into an integer
Args:
string (str): String to test
Returns:
bool
"""
try:
int(string)
return True
except ValueError:
return False | bc5cfb39e3b89309a7d29608d9bafb28ae67cd18 | 41,754 |
def parse_brackets(line: str) -> tuple[int, str]:
"""
Returns the index at which closing brackets don't align anymore,
or if all closing ones do match, the string needed to complete it.
"""
stack: list[str] = []
opening_map = {
")": "(",
"]": "[",
"}": "{",
">": "<",
}
for index, bracket in enumerate(line):
if bracket in "([{<":
stack.append(bracket)
continue
opening_bracket = opening_map[bracket]
if stack.pop() != opening_bracket:
return index, ""
# Now, there's stuff left on the stack. Which means, we need more
# closing brackets here, for part 2. Disregard the integer.
closing_map = {value: key for key, value in opening_map.items()}
autocomplete = ""
while len(stack) > 0:
bracket = stack.pop()
autocomplete += closing_map[bracket]
return -1, autocomplete | 3df3bc7bf6d1513b3d387023485949645b19e81c | 41,755 |
import requests
import re
def pubchemsyns(identifier):
"""this function allows retreival of data from the PugRest API @ PubChem"""
apipath = "https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/"
# retrieve full record if available based on name
searchpath = 'name/' + identifier + '/synonyms/json'
response = requests.get(apipath + searchpath).json()
syns = response["InformationList"]["Information"][0]["Synonym"]
inchikey = ""
for k in syns:
m = re.search('^[A-Z]{14}-[A-Z]{10}-[A-Z]$', k)
if m:
inchikey = k
return inchikey | b802e20354155f0382c124da6b36374c6dbf4a2e | 41,756 |
def string_is_float(str_float):
"""
判断一个字符串是否是浮点数的字符串,整型也不是浮点数
:param str_float:
:return:
"""
is_float = True
try:
int(str_float)
is_float = False
except ValueError:
try:
float(str_float)
except ValueError:
is_float = False
return is_float | d816be827ffef75b10686d6b8e6beb5d2ca95a7d | 41,757 |
def client_error(message, http_status_code=400):
"""
Simple util function to return an error in a consistent format to the calling
system (API Gateway in this case.
:param message:
:param http_status_code:
:return:
"""
return "ERROR::CLIENT::{}::{}".format(http_status_code, message) | ea6f1a3073b856eb2f08b84b62cf9c95d9a62de4 | 41,759 |
def checkPrice(p):
"""
:param p: users entered price
Takes the expiration date and checks if it meets the required specifications.
If it does then return true, if it does not they return false.
"""
if (10 <= int(p) <= 100):
return True
else:
return False | ae750c2c37289c2568cbaa6af9d1f0f5603e5d33 | 41,760 |
def set_of_county_tuples(cluster_list):
"""
Input: A list of Cluster objects
Output: Set of sorted tuple of counties corresponds to counties in each cluster
"""
set_of_clusters = set([])
for cluster in cluster_list:
counties_in_cluster = cluster.fips_codes()
# convert to immutable representation before adding to set
county_tuple = tuple(sorted(list(counties_in_cluster)))
set_of_clusters.add(county_tuple)
return set_of_clusters | e141b3bf4184052acac388259eb90b69577f3332 | 41,761 |
def air_to_vac(air_wvs):
"""
following the vald conversion
http://www.astro.uu.se/valdwiki/Air-to-vacuum%20conversion
"""
s = air_wvs/1.0e4
n = 1 + 0.00008336624212083 + 0.02408926869968 / (130.1065924522 - s**2) + 0.0001599740894897 / (38.92568793293 - s**2)
return air_wvs*n | e5a4c11d580a96bff7d523bb1b9aafc2bedcf4a5 | 41,762 |
def get_zamid_s(zamid):
"""Gets the state of a nuclide from a its z-a-m id. 1 = first excited state, 0 = ground state.
Parameters
----------
zamid: str
z-a-m id of a nuclide
"""
s = int(zamid[-1])
return s | 750108113619f5176bf75e479b52989f9546f876 | 41,763 |
import os
def get_exec_path():
""" Get the path to this executable
Returns:
the path as a string of this script
"""
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), __file__)
if path.endswith('.pyc'):
return path[:-1]
else:
return path | 0789c252d3714450fba722a2ef568a559c5fa940 | 41,766 |
import os
def buildmap(start_location, map_string, partial_match=True):
"""
Given a start location and a string value, this function returns a list of
file paths containing the given string value, down in the directory
structure from the start location.
:param start_location: directory from where to start looking for the file
:param map_string: value to match in the file path
:param partial_match: (bool) Turn on partial matching.
: Ex: 'foo' matches 'foo' and 'foo.old'. Defaults true. False adds a '/' to the end of the string.
:return:
list of file paths containing the given value.
"""
if not partial_match:
map_string = "{}/".format(map_string)
fs_map = []
for fs_path, dirs, filelist in os.walk(start_location, topdown=False):
for fs_file in filelist:
fs_path_to_file = (os.path.join(fs_path, fs_file))
if map_string in fs_path_to_file and '.git' not in fs_path_to_file:
fs_map.append(fs_path_to_file)
return fs_map | 77f75917dc08ceaa60ffb5b9ebd97781e9f0b54b | 41,767 |
def parse_range_expr(range_expr):
""" Parse a range expression into a (min,max) pair """
not_found = -1
min_val = None
max_val = None
if range_expr is None:
return (min_val, max_val)
sep = "+/-"
pos = range_expr.find(sep)
if pos >= 0:
mid_str = range_expr[:pos]
ext_str = range_expr[pos+len(sep):]
mid_val = 0.0
if mid_str:
mid_val = float(mid_str)
ext_val = float(ext_str)
min_val = mid_val - ext_val
max_val = mid_val + ext_val
return (min_val, max_val)
sep = "+"
pos = range_expr.find(sep)
if pos >= 0:
min_str = range_expr[:pos]
max_str = range_expr[pos+len(sep):]
min_val = 0.0
if min_str:
min_val = float(min_str)
max_val = float(max_str)
return (min_val, max_val)
min_val = 0.0
max_val = float(range_expr)
return (min_val, max_val) | 2a11f95d6567d85b20abb49fc9a73f4167e72b06 | 41,768 |
def extract_ep_dataset_args(args, split):
""" Extract dataset specific args to see if the dataset can be loaded from a cached copy"""
ep_attrs = ["seed", "data_path", "tokenizer_path", "tokenizer_type", "context_tokenizer_path",
"max_text_len", "episode_len", "pretrained_context_embedding_path"]
final_dict = {}
for atr in ep_attrs:
if not hasattr(args, atr):
continue
final_dict[atr] = getattr(args, atr)
final_dict["split"] = split
return final_dict | b9a891fbcbdde634becbc446d608d39f60141d07 | 41,769 |
def show_mac_to_dictionary(show_mac_address=''):
""" from show mac address returns a dictionary, Indexed by Interface(short name) as per output.
Dictionary: [Int_name_short], List
List: (mac_address, Vlan_number_text). (('0100.0ccc.cccc','345'),('0100.0ccc.cccc','345'),(,),...)
:param show_mac_address:
:return: a dictionary of index int_name_short
show_mac_dictionary[vlan]: [[mac_add, vlan_num], [mac_add, vlan_num],... ]
vlan mac address type learn age ports
------+----------------+--------+-----+----------+--------------------------
2123 0008.aaaa.aaaa dynamic Yes 170 Po7
* 345 0100.eeee.ffff static Yes - Po1,Po3,Po7,Po8,Po9,Router
Vlan Mac Address Type Ports
---- ----------- -------- -----
All 0100.0ccc.cccc STATIC CPU
All 0100.0ccc.cccd STATIC CPU
All 0100.0ccc.ccce STATIC CPU
"""
show_mac_dictionary = {}
for line in show_mac_address:
if len(line) > 0:
line_split = line.split()
if len(line_split) > 3:
if line_split[-1].find(",") < 0: # doesn't find multiple ports entry
if line_split[0].isnumeric():
if line_split[-1] in show_mac_dictionary.keys():
if not [line_split[1], line_split[0]] in show_mac_dictionary[line_split[-1]]:
show_mac_dictionary[line_split[-1]].append([line_split[1], line_split[0]])
else:
show_mac_dictionary[line_split[-1]] = []
show_mac_dictionary[line_split[-1]].append([line_split[1], line_split[0]])
elif line_split[0] in ("R", "S", "D" "*") and line_split[1].isnumeric():
if line_split[-1] in show_mac_dictionary.keys():
if not [line_split[2], line_split[1]] in show_mac_dictionary[line_split[-1]]:
show_mac_dictionary[line_split[-1]].append([line_split[2], line_split[1]])
else:
show_mac_dictionary[line_split[-1]] = []
show_mac_dictionary[line_split[-1]].append([line_split[2], line_split[1]])
return show_mac_dictionary | ad5d26a5369e7e1071fdd82511c322b97237b62f | 41,770 |
import os
def give_unique_name(file_name):
"""
Создание уникального имени файла для исходного имени файла.
Функция создана для корректности работы сервера в случае получения двух разных файлов с одинаковыми именами.
"Уникальность" обеспечивается добавлением числа в начало имени файла.
Соответственно, предполагается, что совпадения имен файлов, получаемых сервером, будут происходить редко.
:param file_name: исходное имя файла
:return: уникальное имя файла, полученное на основе исходного
"""
if os.path.exists(file_name):
i = 1
while True:
if os.path.exists(str(i) + '_' + file_name):
i += 1
else:
break
return str(i) + '_' + file_name
else:
return file_name | 28eab07e2fda60f02f91607194a85465ebaad034 | 41,772 |
def set_slices(DA,**axes2inds):
"""
return a copy of DataArray DA, where several slices are taken along named axes,
specified by keys ax1=ind1, ax2=ind2, etc.
"""
Out = DA
for (ax,ind) in axes2inds.items():
Out = Out.axis[ax][ind:(ind+1)]
return Out | f3aa91ee51d58e7e3926a049a8b10bcb556a9190 | 41,773 |
def get_buff_header():
"""something to keep things straight for raw data from socket deal"""
s = [
'recv',
'seqnum',
'sync',
'msiz',
'cksum',
' source',
'destin',
'sel',
'dsiz',
' tsh',
' counter',
(18 * ' ') + 'start',
'pstat',
'num',
'fsrate',
'cutoff',
'gain',
' input',
' unit',
' adjustment',
(20 * ' ') + 'end',
'rec',
'LOB',
'def',
'need']
return ' '.join(s) | 3e2b70be4bae0fbac6eed5c408012e4e1697e486 | 41,776 |
def ceil(a, b):
"""
Returns the ceiling of a on b
"""
c = float(a) / float(b)
if c == int(c):
return int(c)
return int(c) + 1 | f6c7d46f05512d0766d4f9b39cdff224ed7d9bb7 | 41,777 |
def market_cap(df, min_cap=200):
"""filters out stocks that don't meet the min_cap minimum capitalization
criteria.
:df: dataframe
:min_cap: minimum market_cap number (*1 million) for stocks to include in
stock universe (default = 200)
:returns: Dataframe with stocks that have market capitalization>min_cap
"""
df_allstocks = df[df.Market_Cap_Q1>200]
return df_allstocks | 01f716510f3d172e262fcd220ce6affab1e22e19 | 41,778 |
def _get_edge_weight_distributions(observed_network,
permuted_networks,
max_possible_edge_weight):
"""Helper function that returns the null distribution of each
observed, weighted edge.
"""
edge_weight_distributions = {}
for edge in observed_network.edges.keys():
edge_weight_distributions[edge] = [0] * max_possible_edge_weight
for permuted_network in permuted_networks:
vertex_conversion = observed_network.convert_pathway_mapping(
permuted_network.pathways)
for permuted_edge_id, edge in permuted_network.edges.items():
edge_id = observed_network.remapped_edge(
vertex_conversion, permuted_edge_id)
if edge_id in edge_weight_distributions:
edge_weight_distributions[edge_id][edge.weight] += 1
return edge_weight_distributions | 9984f7636da417264cf6ce59382b329a74d50beb | 41,779 |
def attach(item, action):
"""Attaches a parse action to an item."""
return item.copy().addParseAction(action) | 7cbfbcee8316a009168ce577a6a178aa9d8384f1 | 41,780 |
import struct
def convert_short(value: int):
""" Range of smallint in Pg is the same with short in c,
although python type is int, but need to pack the value in short format """
return bytearray(struct.pack("h", value)) | 7c9939323b08e70d5e1ffc49049569da6f656e71 | 41,782 |
from typing import List
def arraysum(array: List[int]) -> int:
""" Get the sum of all the elements in the array.
arraysum
========
The `arraysum` function takes an array and returns the sum of all of its
elements using divide and concuer method.
Parameters
----------
array: List[int]
An array/list of integers
Returns
-------
sum: int
Sum of all the elements in the array
"""
if len(array) == 0: # The base case: if the length of the
return 0 # array is 0 then stop
return array.pop() + arraysum(array) # Divide and conquer: divide the array
# into first element and rest of the
# elements and call itself with them | 8a2ea3bd6391f7ed402f07b381d561c816a7c68d | 41,783 |
import torch
def save_model(network, path):
"""
Save (trained) neural network to a file.
Intended to save trained models, but can save untrained ones as well.
Parameters
----------
network: pytorch object
Pytorch object or custom pytorch object containing model information.
path: str
Location for saving the file, including file name.
Returns
-------
None
"""
PATH = path
torch.save(network.state_dict(), PATH)
return None | 173c123b00f18635d3aa271f4b5d1789ee9a81f8 | 41,784 |
def related_action(action=None, **kwargs):
""" Attach a *Related Action* to a job.
A *Related Action* will appear as a button on the Odoo view.
The button will execute the action, usually it will open the
form view of the record related to the job.
The ``action`` must be a method on the `queue.job` model.
Example usage:
.. code-block:: python
class QueueJob(models.Model):
_inherit = 'queue.job'
@api.multi
def related_action_partner(self):
self.ensure_one()
model = self.model_name
partner = self.env[model].browse(self.record_ids)
# possibly get the real ID if partner_id is a binding ID
action = {
'name': _("Partner"),
'type': 'ir.actions.act_window',
'res_model': model,
'view_type': 'form',
'view_mode': 'form',
'res_id': partner.id,
}
return action
class ResPartner(models.Model):
_inherit = 'res.partner'
@api.multi
@job
@related_action(action='related_action_partner')
def export_partner(self):
# ...
The kwargs are transmitted to the action:
.. code-block:: python
class QueueJob(models.Model):
_inherit = 'queue.job'
@api.multi
def related_action_product(self, extra_arg=1):
assert extra_arg == 2
model = self.model_name
...
class ProductProduct(models.Model):
_inherit = 'product.product'
@api.multi
@job
@related_action(action='related_action_product', extra_arg=2)
def export_product(self):
# ...
"""
def decorate(func):
func.related_action = action
func.kwargs = kwargs
return func
return decorate | 1ba1f85580572cc698bd66b5793fbb74c5785de8 | 41,785 |
def ReadPairs(filename):
"""Read pairs and match labels from the given file.
"""
pairs = []
labels = []
with open(filename) as f:
for line in f:
parts = [p.strip() for p in line.split()]
pairs.append((parts[0], parts[3]))
labels.append(1 if parts[1] == parts[4] else 0)
return pairs, labels | ab1c0a34f94c61b2d999a10a2fabab57790280b7 | 41,786 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.