content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def frame_as_object(frame):
"""
Return a pandas DataFrame as NumPy `dtype` ``object``.
Useful for casting from a categorical frame.
"""
return frame.apply(lambda x: x.astype('object'))
|
0d165e571e73ab320a25245ce9ab2e76ca1f8f97
| 27,489
|
def get_main(heads ):
"""Returns the indices of the tokens of the head of the sentence, or None if the sentence has 0 or several heads
Not in use for now, but could be useful to design tasks"""
r = []
s1 = False
for i,h in enumerate(heads.split(" ")):
l = int(h.strip("&;~"))
if l == 0 :
if s1 : # checks for tokens with head 0 after the end of first word
return None
s1 = s1 or ";" in h #";" marks the end of a word
r.append(i) #account for multi-tokens words
return r or None
|
055c7e868e1ad9bc33ff0cc1374c012439d3d948
| 27,491
|
from typing import Mapping
from typing import Any
from typing import List
from typing import Iterable
from typing import Tuple
def all_key_pairs_dot_notation(dict_obj: Mapping) -> Mapping[str, Any]:
"""
Recursively iterate through a dictionary and return a dictionary of all key-value pairs in dot notation.
keys are prefixed with the list of keys passed in as prefix.
"""
def _all_key_pairs_dot_notation(_dict_obj: Mapping, prefix: List[str] = []) -> Iterable[Tuple[str, Any]]:
for key, value in _dict_obj.items():
if isinstance(value, dict):
prefix.append(str(key))
yield from _all_key_pairs_dot_notation(value, prefix)
prefix.pop()
else:
prefix.append(str(key))
yield ".".join(prefix), value
prefix.pop()
return {k: v for k, v in _all_key_pairs_dot_notation(dict_obj)}
|
7b1566676ff61ab1768b9387edb7d4701ebd8c7a
| 27,492
|
def index(m, val):
"""Return the indices of all ``val`` in m"""
m = list(m)
idx = []
if m.count(val) > 0:
idx = [i for i, j in enumerate(m) if j == val]
return idx
|
703d1eab466a622325648935f88bac0f72f62e7a
| 27,493
|
def parse_packet3(filp):
"""
Parse PKT3 commands from the given header file.
"""
packets = []
for line in filp:
if not line.startswith('#define '):
continue
line = line[8:].strip()
if line.startswith('PKT3_') and line.find('0x') != -1 and line.find('(') == -1:
packets.append(line.split()[0])
return packets
|
c2b7a1cbe94f06c8a9b822bdeabc12a2e4a10518
| 27,494
|
import math
def transform_features(df):
""" Add log and sqrt values
"""
# add log values for ols linear regression
df['log_star_ratings'] = df['star_ratings'].apply(lambda x: math.log(x+1, 10))
df['log_ticks'] = df['ticks'].apply(lambda x: math.log(x+1, 10))
df['log_avg_stars'] = df['avg_stars'].apply(lambda x: math.log(x+1, 10))
df['log_length'] = df['length_'].apply(lambda x: math.log(x+1, 10))
df['log_grade'] = df['grade'].apply(lambda x: math.log(x+2, 10))
df['log_on_to_do_lists'] = df['on_to_do_lists'].apply(lambda x: math.log(x+1, 10)) # Target
# add sqrt values for Poisson regression
df['sqrt_star_ratings'] = df['star_ratings'].apply(lambda x: math.sqrt(x))
df['sqrt_ticks'] = df['ticks'].apply(lambda x: math.sqrt(x))
df['sqrt_avg_stars'] = df['avg_stars'].apply(lambda x: math.sqrt(x))
df['sqrt_length'] = df['length_'].apply(lambda x: math.sqrt(x))
df['sqrt_grade'] = df['grade'].apply(lambda x: math.sqrt(x+1))
return df
|
6bed8b92e12cba62816dbd1a70c260d138f02ea4
| 27,496
|
import subprocess
import os
def guess_organization():
""" Guess the organization from `git config`. If that can't be found,
fall back to $USER environment variable.
"""
try:
stdout = subprocess.check_output('git config --get user.name'.split())
org = stdout.strip()
except OSError:
org = os.environ["USER"]
return org
|
cef0ede8f728ffd38d90fbf772b2ba4a33ffa656
| 27,497
|
def devilry_user_verbose_inline(user):
"""
Returns the user wrapped in HTML formatting tags perfect for showing
the user inline.
"""
return {
'user': user
}
|
a44ca3efd53b9380dd85fe2e4b9908222d633c8d
| 27,500
|
def get_hidden_fields(form):
"""
Returns all the hidden fields of the form.
"""
return form.hidden_fields()
|
b7d0315a232ab0199e9b575a954664e478151ab2
| 27,501
|
def lines_into_traces (lines):
"""Convert a list of split ASCII text lines into traces (a list of lists of floats)"""
traces = []
num_of_traces = len(lines[0]) #work out how many traces from the no of columns
## make an empty list
for i in range(num_of_traces):
traces.append([])
## transpose lines into traces made from columns
for line in lines:
#print (line)
for i in range (num_of_traces):
#NEW AP
#print (line[i])
try:
traces[i].append (float(line[i]))
except:
#element is empty or not a number, so skip
continue
return traces
|
a14c1926b529b4bb198a12da7a6f7fab05373b8d
| 27,502
|
def extract_yyyy_mm_dd_hh_mm_ss_from_datetime64(dt64):
"""
Extract separate fields for year, monday, day, hour, min, sec from
a datetime64 object
Parameters
----------
dt64 : numpy.datetime64
a datetime64 object
Returns
-------
year, mon, day, hh, mm, ss : int
"""
s = str(dt64)
year = int(s[0:4])
mon = int(s[5:7])
day = int(s[8:10])
hh = int(s[11:13])
mm = int(s[14:16])
ss = int(s[17:18])
#print year, mon, day, hh, mm, ss
return year,mon,day,hh,mm,ss
|
1cfca13ae4472d99df131acb07483d82aa8040c4
| 27,503
|
def _dirname_is_valid(testname, includenames, excludenames):
"""includenames and excludenames are sequence types. If either is empty,
nothing is excluded and/or all is included"""
if (
(not includenames or testname in includenames) and
testname not in excludenames
):
return True
else:
return False
|
759cfe91ea8c7fe4ffede0a126f05d72256e21c2
| 27,504
|
import os
import torch
import sys
def load_model(model,args,path="./train_test/trained_model/",name=None):
"""load the compressed model from input file path
:param model: model to load
:param args: argument user decided
:param path: string file path where the model exists
:param name: string filename to load
"""
name=f"comp_vmmodel_wRank{args.wRank}_uRank_{args.uRanks}_data_{args.data}\
_layer{args.layer_seed}_seed{args.seed}" if name is None else name
file=path+name+".pkl"
if os.path.exists(file):
state_dict=torch.load(file)
model.load_state_dict(state_dict)
print("model restored from {file}")
else:
print(name+'pkl does not exists.')
print("Testing can only be done when the trained model exists.")
sys.exit()
return model
|
5844dacfc48e8e3bd2cefd60597cfbba70a5124c
| 27,505
|
import importlib
def import_term(name: str):
"""Return the fully qualified term
Examples
--------
>>> import_term("math.sin") # doctest: +SKIP
<function math.sin(x, /)>
"""
try:
module_name, attr_name = name.rsplit(".", 1)
except ValueError:
return importlib.import_module(name)
module = importlib.import_module(module_name)
return getattr(module, attr_name)
|
758c424c04a9faac74c88cf433dc0526adbca274
| 27,506
|
def prune_directed_edges(network):
"""removes edges that go against the direction of biosynthesis\n
| Arguments:
| :-
| network (networkx object): biosynthetic network with directed edges\n
| Returns:
| :-
| Returns a network with pruned edges that would go in the wrong direction
"""
nodes = list(network.nodes())
for k in nodes:
for j in nodes:
if network.has_edge(k,j) and (len(k)>len(j)):
network.remove_edge(k,j)
return network
|
fb6d9e348ee0a2bb87e1773cff7fa257b56699fa
| 27,507
|
import platform
def extra_link_args() -> list:
"""Platform dependent extras
Returns:
list: Extra link arguments
"""
args = []
if platform.system() == 'Darwin':
# Something with OS X Mojave causes libstd not to be found
args += ['-stdlib=libc++', '-mmacosx-version-min=10.12']
return args
|
86a4f6615a3bd67dd6740bc84fe1f4e7b44c8ab6
| 27,508
|
import configparser
def getConfig(configPath):
"""获取配置方法"""
conf = configparser.ConfigParser() # import ConfigParser
conf.read(configPath)
return conf
|
41da127ab4ed5d7d20e57e23e65129bb622562f2
| 27,509
|
def forward(images, config, forward_fn, decode_fn, is_training=True, verbose=0):
"""Forward-pass for one stage. Returns a dictionnary of output Tensors.
Args:
inputs: Dictionnary of inputs
configuration`: configuration dictionnary
forward_fn: one of the backbone network
decode_fn: one of the decoding functions (either with or without groups)
is_training: Whether the model is in training mode (for batch norm)
verbose: verbosity level
"""
embeddings = forward_fn(images,
is_training=is_training,
verbose=verbose,
**config)
outputs = { k: v for (k, v) in decode_fn(embeddings,
is_training=is_training,
verbose=verbose,
**config)
if v is not None}
if verbose == 2:
print('\n'.join(" \033[32m%s\033[0m: shape=%s, dtype=%s" % (
key, value.get_shape().as_list(), value.dtype)
for key, value in outputs.items()))
elif verbose == 1:
print('\n'.join(" *%s*: shape=%s, dtype=%s" % (
key, value.get_shape().as_list(), value.dtype)
for key, value in outputs.items()))
return outputs
|
618210f6de43f1cafac5c53b1966398e99239905
| 27,510
|
def overlapping(bins):
"""
Given a sorted list bins, check whether any are overlapping [....{;;;]----}.
Touching is OK: [....]{----}"""
s, e = 0, 0
for b in bins:
if s < b[1] and b[0] < e:
return True
s, e = b[0], b[1]
return False
|
a05d299dc2e25bfdbe9e50aa9dfe509b32635cbb
| 27,512
|
def template_name(_template_name):
"""custom template filename"""
def decorator(action_function):
action_function.template_name = _template_name
return action_function
return decorator
|
cc843c23413f6fd29ff8cab947266ba9711d11f7
| 27,514
|
import numpy
def getMSE(y, mean):
"""
Calculates the MSE (Mean Square Error) of the data given
This is the impurity function used for regression trees
"""
# print("\nMY Y\n",mean)
if len(y) == 0: # Done in order to avoid obtaining nan values if there are no elements contained in y
return 0
mse = numpy.average((y-mean)**2)
# print("mse\t",mse)
return mse
|
706d6f92c2d240f6487ccb58b31a258f5f578c48
| 27,515
|
def _UTMLetterDesignator(Lat):
"""This routine determines the correct UTM letter designator for the given latitude
returns 'Z' if latitude is outside the UTM limits of 84N to 80S
Written by Chuck Gantz- chuck.gantz@globalstar.com"""
if 84 >= Lat >= 72: return 'X'
elif 72 > Lat >= 64: return 'W'
elif 64 > Lat >= 56: return 'V'
elif 56 > Lat >= 48: return 'U'
elif 48 > Lat >= 40: return 'T'
elif 40 > Lat >= 32: return 'S'
elif 32 > Lat >= 24: return 'R'
elif 24 > Lat >= 16: return 'Q'
elif 16 > Lat >= 8: return 'P'
elif 8 > Lat >= 0: return 'N'
elif 0 > Lat >= -8: return 'M'
elif -8> Lat >= -16: return 'L'
elif -16 > Lat >= -24: return 'K'
elif -24 > Lat >= -32: return 'J'
elif -32 > Lat >= -40: return 'H'
elif -40 > Lat >= -48: return 'G'
elif -48 > Lat >= -56: return 'F'
elif -56 > Lat >= -64: return 'E'
elif -64 > Lat >= -72: return 'D'
elif -72 > Lat >= -80: return 'C'
else: return 'Z' # if the Latitude is outside the UTM limits
|
c632a1af990ffa4c26c9f14cc9bfa82bd4825891
| 27,516
|
from pathlib import Path
def get_readme():
"""
Retrieve content of projects readme file.
Returns:
str: The content of README.md as a string.
"""
readme_dir = Path(__file__).parent
return (readme_dir / "README.md").read_text()
|
6e870a4b2026728de7163a5246f1144690b84fa2
| 27,517
|
def insert_usid_policy(database, lr_dst, rl_dst, lr_nodes, rl_nodes,
table=None, metric=None, l_grpc_ip=None,
l_grpc_port=None, l_fwd_engine=None,
r_grpc_ip=None, r_grpc_port=None,
r_fwd_engine=None, decap_sid=None, locator=None):
"""
Insert a uSID policy into the 'usid_policies' collection of a Arango
database.
:param database: Database where the uSID policy must be saved.
:type database: arango.database.StandardDatabase
:param lr_dst: Destination (IP address or network prefix) for the
left-to-right path.
:type lr_dst: str
:param rl_dst: Destination (IP address or network prefix) for the
right-to-left path.
:type rl_dst: str
:param lr_nodes: List of nodes (names or uN sids) making the left-to-right
path.
:type lr_nodes: list
:param rl_nodes: List of nodes (names or uN sids) making the right-to-left
path.
:type rl_nodes: list
:param table: FIB table where the policy must be saved.
:type table: int, optional
:param metric: Metric (weight) to be used for the policy.
:type metric: int, optional
:param l_grpc_ip: gRPC IP address of the left node, required if the left
node is expressed numerically in the nodes list.
:type l_grpc_ip: str, optional
:param l_grpc_port: gRPC port of the left node, required if the left
node is expressed numerically in the nodes list.
:type l_grpc_port: str, optional
:param l_fwd_engine: forwarding engine of the left node, required if the
left node is expressed numerically in the nodes list.
:type l_fwd_engine: str, optional
:param r_grpc_ip: gRPC IP address of the right node, required if the right
node is expressed numerically in the nodes list.
:type r_grpc_ip: str, optional
:param r_grpc_port: gRPC port of the right node, required if the right
node is expressed numerically in the nodes list.
:type r_grpc_port: str, optional
:param r_fwd_engine: Forwarding engine of the right node, required if the
right node is expressed numerically in the nodes
list.
:type r_fwd_engine: str, optional
:param decap_sid: uSID used for the decap behavior (End.DT6).
:type decap_sid: str, optional
:param locator: Locator prefix (e.g. 'fcbb:bbbb::').
:type locator: str, optional
:return: True.
:rtype: bool
:raises arango.exceptions.arango.exceptions.DocumentInsertError: If insert
fails.
"""
# Build a dict-representation of the uSID policy
policy = {
'lr_dst': lr_dst,
'rl_dst': rl_dst,
'lr_nodes': lr_nodes,
'rl_nodes': rl_nodes,
'table': table,
'metric': metric,
'l_grpc_ip': l_grpc_ip,
'l_grpc_port': l_grpc_port,
'l_fwd_engine': l_fwd_engine,
'r_grpc_ip': r_grpc_ip,
'r_grpc_port': r_grpc_port,
'r_fwd_engine': r_fwd_engine,
'decap_sid': decap_sid,
'locator': locator
}
# Get the uSID policy collection
# This returns an API wrapper for "usid_policies" collection
usid_policies = database.collection(name='usid_policies')
# Insert the policy
# The parameter silent is set to True to avoid to return document metadata
# This allows us to sav resources
return usid_policies.insert(document=policy, silent=True)
|
c44e20e010b3c146e4eebe729a7bb8cadbda0646
| 27,518
|
def isArgumentlessJavaOption(line):
""" Determine whether a given line contains a command line option that does
not take arguments.
Parameters
----------
line : str
A line of the build output
Returns
-------
bool
True if the line contains an option that doesn't take arguments
"""
argumentlessOptions = ["agentlib",
"agentpath",
"disableassertions",
"D",
"da",
"enableassertions",
"ea",
"enablesystemassertions",
"esa",
"disablesystemassertions",
"dsa",
"javaagent",
"jre-restrict-search",
"no-jre-restrict-search",
"showversion",
"splash",
"verbose",
"version",
"X"]
for a in argumentlessOptions:
if line.startswith("-{}".format(a)):
return True
return False
|
3e967cecba22413e25022e9e36154e2a78f7ad13
| 27,520
|
def rssError(yArr,yHatArr):
""" 计算预测误差大小(均方误差)
INPUT:
yArr:实际值(数组)
yHatArr:预测值(数组)
OUTPUT:
((yArr-yHatArr)**2).sum():均方误差
"""
return ((yArr-yHatArr)**2).sum()
|
cd5fc94e1120db8e5e23455eb4aa4dd110c02660
| 27,521
|
import random
def handle_float(x):
"""
handle_float returns a random float between 0 and 2x.
"""
return x * random.uniform(0, 2)
|
7f0ba1521e5afde3d7bd75cd03a8e3cc76a72dcf
| 27,523
|
def curl(vect, coord_sys):
"""
Returns the curl of a vector field computed wrt the base scalars
of the given coordinate system.
Parameters
==========
vect : Vector
The vector operand
coord_sys : CoordSysCartesian
The coordinate system to calculate the curl in
Examples
========
>>> R = CoordSysCartesian('R')
>>> v1 = R.y*R.z*R.i + R.x*R.z*R.j + R.x*R.y*R.k
>>> curl(v1, R)
0
>>> v2 = R.x*R.y*R.z*R.i
>>> curl(v2, R)
R.x*R.y*R.j + (-R.x*R.z)*R.k
"""
return coord_sys.delop.cross(vect).doit()
|
18af3e538e3d7a6d9970d2bc6f5c2e72b2ebb6c7
| 27,525
|
def substract(a,b):
"""Subtracts b from a and returns the result."""
return a - b
|
8c8c4ba7692671ea1c26ba8faceed0b04d5c16d7
| 27,526
|
import os
import json
def __is_nikerun_valid(file_path):
"""Check if it is a valid format for activity files.
Parameters
----------
file_path : str
Path to the file to be read.
Returns
-------
It returns True if the it is a valid format for activities handling.
"""
_, ext = os.path.splitext(file_path)
if ext not in [".json"]:
return False
with open(file_path) as json_file:
activity = json.load(json_file)
if not activity.get("metrics"):
return False
metrics = [
metric["type"]
for metric in activity["metrics"]
if metric["type"] in ["latitude", "longitude"]
]
if len(metrics) != 2:
return False
return True
|
a88f7b23b69d67d1a60184f166f2d4876dc45574
| 27,527
|
def get_module_pkg_release(kernelPackage, driverPackage):
"""In our scheme, we build up the kmod package release field from the
kernel release field as well as the driver version."""
start = kernelPackage.release[:kernelPackage.release.rfind('.')]
end = kernelPackage.release[kernelPackage.release.rfind('.'):]
return start + '.r' + driverPackage.version + end
|
7e02dc20d30a27456eb13df3a26cb4bba71bb6d4
| 27,529
|
import itertools
def _grouper(n, iterable, fillvalue=None):
"""Collects data into fixed-length chunks or blocks.
This private function is wrapped in :func:`_show_mallet_document_topics()`.
Args:
n (int): Length of chunks or blocks
iterable (object): Iterable object
fillvalue (boolean): If iterable can not be devided into evenly-sized chunks fill chunks with value.
Returns: n-sized chunks
"""
args = [iter(iterable)] * n
return itertools.zip_longest(*args, fillvalue=fillvalue)
|
c8747931fa430791927f5706f5380717fd4d51f8
| 27,531
|
def a1000_fetch_child_metadata(a1000_fetch_function, interesting_children):
"""
:param a1000_fetch_function: this is a function that reads ticore result from a1000 for specific sha1 hash.
function is called with only one parameter - sample sha1 hash.
:param interesting_children: list of interesting_children
:return: interesting_children with ticore results
"""
enriched_children = []
for child in interesting_children:
child_meta = a1000_fetch_function(child.sha1)
if child_meta:
child.add_static_analysis(child_meta)
enriched_children.append(child)
return enriched_children
|
ad96b024f0397b63c525522c75a546b44c9310ff
| 27,532
|
def one_flash(lma_df, flash_id):
"""
Given a lma file and the flash id, it returns the lma dataframe with only the VHF sources with the specified flsah id.
"""
return lma_df[lma_df.flash_id == flash_id]
|
c9acea38ac399a8030b0616c74906ad3d6c1f915
| 27,534
|
def _mpas_to_netcdf_calendar(calendar):
"""
Convert from MPAS calendar to NetCDF4 calendar names.
"""
if calendar == 'gregorian_noleap':
calendar = 'noleap'
elif calendar != 'gregorian':
raise ValueError('Unsupported calendar {}'.format(calendar))
return calendar
|
bdeaf343deeb4beb8c04654ab5104425396e98be
| 27,535
|
def aslist(item):
"""
aslist wraps a single value in a list, or just returns the list
"""
return item if type(item) == list else [item]
|
e9b3a9f189f74243d713e896dfbbd002e78abada
| 27,536
|
from typing import Any
def is_operation(obj_or_type: Any) -> bool:
"""Return if object represents a resource operation."""
return getattr(obj_or_type, "_fondat_operation", None) is not None
|
57cd493fb99d0f0d54e6d032cf27c65f18f9c031
| 27,539
|
def edges_on_ring(ring):
"""
Get all the edges from a ring.
:param ring: THe ring in which to obtain the edges.
:return: A set of edges..
"""
edges = set()
prev = ring[-1]
for c in ring:
edges.add(frozenset({prev, c}))
prev = c
return edges
|
ab976c485f1424e7073f93bf429cfe4efdf65813
| 27,540
|
import re
def get_sortable_version(version):
"""
Add leading zeros to version number components to
ensure that 1.10.x is > 1.2.x
"""
version_components = re.findall(r"\d+", version)
print(version_components)
sortable = []
for component in version_components:
sortable.append(format(int(component), "04d"))
return ".".join(sortable)
|
9b275f62784218daeb4cfae7527be8f86e88a9f7
| 27,541
|
def rint(value):
"""
>>> rint(0.5) == 0
True
>>> rint(0.501) == 1
True
>>> rint(1.5) == 2
True
"""
ret = round(value)
return 2.0 * round(value / 2.0) if abs(ret - value) == 0.5 else ret
|
e97253c1fb043ac5664489e7f80e945b787381b7
| 27,542
|
def negate(fn):
""" negate(fn) -> not bool(fn) """
def wrap(*args, **kwargs):
return not fn(*args, **kwargs)
return wrap
|
532ab3e656ae4651a59be8ed735c3c35fdb447ba
| 27,543
|
import shlex
import subprocess
def execute_process(command, input=None):
"""
A helper function for subprocess.
Args:
command (str): The command line level syntax that would be written in a
shell script or a terminal window
Returns:
dict: Results in a dictionary
"""
# Validate that command is not a string
if not isinstance(command, str):
raise TypeError("Command must be a str type")
# Format the command
command = shlex.split(command)
# Run the command
process = subprocess.Popen(
command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False,
universal_newlines=True
)
if input:
(stdout, stderr) = process.communicate(input=input)
else:
(stdout, stderr) = process.communicate()
return {
"stdout": (stdout).strip(),
"stderr": (stderr).strip() if stderr != None else None,
"exitcode": process.returncode,
"success": True if process.returncode == 0 else False
}
|
bc0232542523945b5ead595645de7245c3b2310c
| 27,544
|
def package_country_rollup_json(data):
"""
packages your dictionary nicely for writing out to json
"""
out = {}
for k, d in data.items():
timseries = []
for y, v in d.items():
timseries.append({"year": y, "emissions": v})
out[k] = timseries
return out
|
aee12b283bca8fee858bb117a98d78c52c98b00b
| 27,546
|
def obj_label_to_kitti_fmt(obj_label):
"""Converts an object label to kitti format
[type, truncation, occlusion, alpha(observation angle),
(x1, y1, x2, y2),
(h, w, l),
(x, y, z), ry,
score]
"""
return [
obj_label.type, -1, -1, obj_label.alpha,
obj_label.x1, obj_label.y1, obj_label.x2, obj_label.y2,
obj_label.h, obj_label.w, obj_label.l,
obj_label.t[0], obj_label.t[1], obj_label.t[2], obj_label.ry,
obj_label.score
]
|
9f9c1545cd0c098055014f52b980115fd7469d98
| 27,547
|
def get_ranking16():
"""
Return the ranking with ID 16.
"""
return [
("COORD.PRoPHET", 0.475401),
("DF.PRoPHET", 0.472054),
("CnR.LTS", 0.380770),
("SimBetTS.L8", 0.380006),
("SimBetTS.L16", 0.379992),
("CnR.DestEnc", 0.379448),
("LSF-SnW.L16", 0.377400),
("DF.DestEnc", 0.373788),
("COORD.DestEnc", 0.373536),
("SimBetTS.L4", 0.372440),
("LSF-SnW.L8", 0.368945),
("DF.LTS", 0.366043),
("COORD.LTS", 0.365320),
("LSF-SnW.L4", 0.344986),
("CnF.PRoPHET", 0.344899),
("CnF.DestEnc", 0.340809),
("CnF.LTS", 0.336824),
("SnF.L8", 0.333813),
("SnF.L4", 0.331080),
("CnR.PRoPHET", 0.328371),
("SnF.L2", 0.328271),
("SnF.L16", 0.325965),
("SimBetTS.L2", 0.319820),
("LSF-SnW.L2", 0.283363),
("CnR.Enc", 0.253889),
("DF.Enc", 0.196428),
("COORD.Enc", 0.185271),
("Epidemic", 0.176182),
("Direct", 0.144637),
("EBR.L16", 0.144275),
("SnW.L16", 0.144196),
("EBR.L2", 0.139577),
("SnW.L2", 0.139347),
("SnW.L8", 0.137288),
("EBR.L8", 0.137283),
("EBR.L4", 0.136547),
("SnW.L4", 0.136425),
("CnF.Enc", 0.117134),
]
|
988d26f7a123da18f0adb025161bedc1be310876
| 27,548
|
import time
def timestamp_from_objectid(objectid):
"""提取objectid中的时间戳"""
result = 0
try:
result = time.mktime(objectid.generation_time.timetuple()) + 28800
except:
pass
return result
|
2cc46ba7644a8075d624909b3722ab83f79d49c4
| 27,549
|
def annotations_to_lower(annotations):
"""
Recibe una lista de textos y los pasa a minuscula
"""
return [ann.lower() for ann in annotations]
|
f6175012ddf4acb973de70179eea5438894b4358
| 27,550
|
def task_release():
"""Release wheel and source distribution to pypi"""
return {
"actions": ["python -m twine upload --repository pypi dist/*"],
"task_dep": ['wheel', 'source']
}
|
a78f026a0a682eee94efe0cf940206bbb8eb8716
| 27,551
|
import re
def _read_words_from_file(filename: str):
"""extract 'words' (space-separated) from file, return as string array"""
with open(filename, 'r') as file:
text = ''.join(file.readlines())
text = text.replace('\n', ' ').lower()
text = re.sub(" +", " ", text)
words = text.split(' ')
return words
|
d5de6b5b20bcfd10cd4ec8d2b7f51267ea2edd3e
| 27,552
|
def do_func(args_dict, diy_func, modify=True):
"""
执行自定义函数,一般用来修正某些入参
:param args_dict:
:param diy_func:
:param modify:
:return:
"""
if not args_dict:
return True, None
if modify:
for k in args_dict:
if k in diy_func:
args_dict[k] = diy_func[k](args_dict[k])
|
cbd18b1ab2201f35fd810d199cbf4cf58748eae9
| 27,554
|
import six
def fix_keys(fix_func, conf):
"""Apply fix_func on every key of a dict"""
return {fix_func(field): value for field, value in six.iteritems(conf)}
|
0329661b5f2ccb4e1f260a92dc8fe36d9c945d31
| 27,555
|
import argparse
def _create_argument_parser():
"""Create an argparse argument parser."""
parser = argparse.ArgumentParser(
description="""\
al-btn-api, Alabama data API
""",
)
parser.add_argument(
"-f",
"--file",
dest="file",
type=str,
default="",
# nargs=1,
help="Local data file.",
)
parser.add_argument(
"-p",
"--print",
dest="print",
default=False,
action="store_true",
help="Print data to be uploaded, as valid JSON, without uploading the data.",
)
# MongoDB name.
parser.add_argument(
"-d",
"--database",
dest="database",
type=str,
default="albtn",
nargs=1,
help="MongoDB database name.",
)
# MongoDB connection string.
parser.add_argument(
"-c",
"--connection",
dest="connection",
type=str,
default="mongodb://localhost:27017/",
nargs=1,
help="MongoDB connection string.",
)
# url to slurp
parser.add_argument(
"-u",
"--url",
dest="url",
type=str,
default="https://en.wikipedia.org/wiki/List_of_geographic_centers_of_the_United_States",
nargs=1,
help="Source URL containing centroid estimate data.",
)
return parser
|
ea5278c8e24b5674204957ac0dda5ade5feab466
| 27,557
|
import logging
import subprocess
def _check_output(*args, **kwargs):
"""Wrapper for subprocess.check_output, with logging."""
logging.info("Running: %s, %s; cmdline: %s.", args, kwargs, " ".join(*args))
return subprocess.check_output(*args, **kwargs)
|
7c3f1b6cd26ff2060251cd3d6cded1af424ee137
| 27,558
|
import os
def getCurrentFileName():
""" Will return the file name of file_Utils. Copy into file needed."""
return os.path.basename (__file__)
|
373339f1e0090d869fc3ae7e14fd7d688ecfbf50
| 27,560
|
def check_and_get_directory(files):
"""
Check if all provided files have the same directory and return it.
:param files: A list of files to check and get directory from.
:return: Base directory of the files.
:raise: RuntimeException if files does not have same base directory.
"""
if not files:
raise ValueError('Files must not be empty!')
head, *tail = files
if not tail:
return head.parent
tail_parent = check_and_get_directory(tail)
if head.parent == tail_parent:
return head.parent
raise RuntimeError('Files do not have the same directory: {} != {}'.format(head.parent, tail_parent))
|
2e13d63fccaf18213c9b9108869774b15bdc801a
| 27,561
|
def subsetDf(data_input):
"""
Take a DataFrame and if it's under 500 unique objects, simply return it.
If the DataFrame is over 500 unique objects, return the first
500 unique objects.
"""
unique_obj = data_input.index.unique()
unique_obj_list = list(unique_obj)
if len(unique_obj) <= 500:
return data_input
else:
first500 = unique_obj_list[0:500]
data_input = data_input[data_input.index.isin(first500)]
return data_input
|
e077880b35941a3032cd799f43d979225f163bde
| 27,562
|
def create_response(status_code, status_description="", body=""):
"""Configure a response JSON object."""
response = {"isBase64Encoded": False, "headers": {"Content-Type": "text/html;"}}
if not status_description:
description = {
200: "200 OK",
400: "400 Bad Request",
401: "401 Unauthorized",
405: "405 Method Not Allowed",
500: "500 Internal Server Error",
}
status_description = description.get(status_code)
response["statusCode"] = status_code
response["statusDescription"] = status_description
response["body"] = body
return response
|
55732ec27a46dd0d971a1f5eccde33414de7af71
| 27,563
|
def stem_word(word: str) -> str:
"""
Stemming words
:param word: word
:return: stemmed word
"""
if len(word) < 4:
return word
# remove 'ට'
if word[-1] == 'ට':
return word[:-1]
# remove 'ද'
if word[-1] == 'ද':
return word[:-1]
# remove 'ටත්'
if word[-3:] == 'ටත්':
return word[:-3]
# remove 'එක්'
if word[-3:] == 'ෙක්':
return word[:-3]
# remove 'එ'
if word[-1:] == 'ෙ':
return word[:-1]
# remove 'ක්'
if word[-2:] == 'ක්':
return word[:-2]
# remove 'ගෙ' (instead of ගේ because this step comes after simplifying text)
if word[-2:] == 'ගෙ':
return word[:-2]
# else
return word
|
4f1f6897f553b864c6a3f626c5b5c1aac3fb5ff1
| 27,565
|
from typing import Counter
def representative(table):
"""
Given a table with members of the same clonotype, return a representative
as a dict.
"""
n = len(table)
if n == 1:
return table.iloc[0]
elif n == 2:
result = table.iloc[0]
else:
c = Counter()
for row in table.itertuples():
c[row.VDJ_nt] += row.count
most_common_vdj_nt = c.most_common(1)[0][0]
result = table[table['VDJ_nt'] == most_common_vdj_nt].iloc[0]
result.at['count'] = table['count'].sum()
return result
|
e3ec73a47d77e1c0b9f5907c3d57bc2e244888cf
| 27,567
|
def get_foreground(prop):
"""
Foreground Color property
"""
return prop
|
fed76da4aabc3d93186bd4d413879c801ccaf784
| 27,568
|
def HV(in_: list):
"""
Outputs H-V, computes difference of first 2 elements of a list.
"""
out = in_[0] - in_[1]
return out
|
ed58cc24297fbe1b7f6a28ed98b9ffa0dcb80050
| 27,569
|
import os
def extract_images(post_block):
"""
Extracts image filename + uploaded image name for all images in a post/reference.
Returns a list of objects containing filename + uploaded name
"""
images_container = post_block.find('div', 'images', recursive=False)
if not images_container:
return None
# well laid out figs + figcaptions make life easy for images + image names
images = images_container.findAll('figure', recursive=False)
return [{
'file': os.path.split(image.find('a')['href'])[1], # filename on disk
'name': image.find('figcaption').getText() # filename as posted
} for image in images]
|
30f7dfcb76bd6a25953cad97ac9a639c49736e24
| 27,570
|
def should_attach_entry_state(current_api_id, session):
"""Returns wether or not entry state should be attached
:param current_api_id: Current API selected.
:param session: Current session data.
:return: True/False
"""
return (
current_api_id == 'cpa' and
bool(session.get('editorial_features', None))
)
|
c22c592c2b65f143d0df5c0735a0c21f7347ee71
| 27,573
|
def gen_sol(create, min_constraints, max_constraints):
""" Generate a random individual for the population. """
return create(min_constraints, max_constraints)
|
75c0364b09a1a16d95ae28c1bead7685b54fdcf8
| 27,574
|
def create_flanking_regions_fasta(genome, dataframe, flanking_region_size):
"""
Makes batch processing possible, pulls down small
region of genome for which to design primers around.
This is based on the chromosome and position of input file.
Each Fasta record will contain:
>Sample_Gene_chr:pos__
Seq of flanking region
Args:
genome (list): genome list of tuples (header, seq).
dataframe (pandas object): dataframe with sample info.
flanking_region_size (int): length of sequence upstream and downstream of
input coordindate position to pull as sequence to design primers around.
Returns:
output (list): list of tuples with (header, seq) where seq is flanking region
and header is sample ID.
"""
output = []
for headers, seqs in genome:
chrm = str(headers)
seq = str(seqs)
for gene, sample, chrom, pos in zip(dataframe.Gene, dataframe.Sample,
dataframe.Chr, dataframe.Pos):
if str(chrom) == chrm:
header = str(str(sample)+"_"+str(gene)+"_"+str(chrom)+":"+str(pos)+"__")
flank_seq = seq[int(pos)-int(flanking_region_size):\
int(pos)+int(flanking_region_size)]
output.append((header, flank_seq.upper()))
return output
|
82a09f847c4533e95f7de542fc80654e19a96bf6
| 27,575
|
import random
def get_random_choice(word_dist):
"""
Given a word distribution, pick one at random based on how common
it is.
Args:
word_dist (FreqDist) - a frequency distribution of words
Returns:
string - a random word from word_dist
"""
total_samples = word_dist.N()
random_sample = random.randint(0, total_samples)
running_total = 0
# iterate over all the possible bins
for word_bin in word_dist.most_common(word_dist.B()):
# add the number of incidences of the current word to the total
running_total += word_bin[1]
# if the random number falls into the current bucket, return this word
if random_sample <= running_total:
return word_bin[0]
|
23f72c57fb7bbac8e896621fcd4e235c2efa9008
| 27,578
|
def sizesim(sizeA, sizeB):
"""
Calculate the size similarity pct for the two entries
compares the longer of entryA's two alleles (REF or ALT)
backwards compat
"""
return min(sizeA, sizeB) / float(max(sizeA, sizeB)), sizeA - sizeB
|
1bde43c34a61d2b1bf8aeff56bda31b453c81085
| 27,579
|
import re
def replace_argument(string, *, command_infos, command_id, **_):
"""Replaces links to other arguments in the same command."""
def replace(match):
argument_name = match.group(1).lstrip("-")
matching_args = [
arg
for info in command_infos
if info['id'] == command_id
for arg in info['arguments']
if arg['flags'][0].lstrip("-") == argument_name
]
if len(matching_args) != 1:
raise TypeError(
"{} argument{} named {} in {}".format(
"more than one" if len(matching_args) > 0 else "no",
"" if len(matching_args) > 0 else "s",
argument_name,
command_id,
)
)
arg = matching_args[0]
return "[{}](#{})".format(arg['flags'][0], arg['id'])
return re.sub(r"@argument\(([^)]*)\)", replace, string)
|
51fde687e40176489eac9e43b429eb63dc441649
| 27,580
|
def sum_list(list_of_list):
"""Concatenates a list of python list."""
final_list = []
for l in list_of_list:
final_list += l
return final_list
|
243cc9e88f62a2b0323335703204c4e2b416a5e1
| 27,581
|
import pickle
def pickleload(pkl_file):
""" Load objects from file with pickle """
with open(pkl_file, 'rb') as output:
data2 = pickle.load(output)
return data2
|
4e4e8e97840fd3f1ab4933132c88b38a9618f8dd
| 27,582
|
def decode_to_string(obj):
"""
Convert any type to string
"""
if isinstance(obj, bytes):
return obj.decode()
return str(obj)
|
a55f9e0246a5eb3a84e7363f478962e134eb386a
| 27,584
|
def create_url_with_query_parameters(base_url: str, params: dict) -> str:
"""Creates a url for the given base address with given parameters
as a query string."""
parameter_string = "&".join(f"{key}={value}"
for key, value in params.items())
url = f"{base_url}?{parameter_string}"
return url
|
e148540316cc38e344228ed82d7e5f9769de5fe6
| 27,586
|
def get_patched_get_url(patched_urlopen, testcase=None):
"""Get the URL of the GET request made to the patched urlopen() function.
Expects that the patched function should have been called a single time with the url as the only
positional argument and no keyword arguments.
:param patched_urlopen: value returned when entering the context manager created by patch_urlopen.
:type patched_urlopen: unittest.mock.Mock
:param testcase: Test case currently being run, which is used to make asserts
:type testcase: unittest.TestCase
"""
args, kwargs = patched_urlopen.call_args
if testcase is not None:
testcase.assertEqual(patched_urlopen.call_count, 1)
testcase.assertEqual(len(args), 1)
testcase.assertEqual(len(kwargs), 0)
return args[0]
|
6d03cb418214d0f5d463a9cfe3fd6d8c31c7830e
| 27,588
|
def looks_like_sql(s: str) -> bool:
"""
Determine if string `s` looks like an SQL query.
:param str s: The string to detect.
:return: True if the string looks like an SQL, False otherwise.
"""
sql_keywords = {'select', 'update', 'union', 'delete', 'from', 'table', 'insert', 'into'}
s = s.lower()
for k in sql_keywords:
if k in s:
k_index = s.find(k)
# what's before k? is it a whitespace if it's not empty?
if k_index > 0:
before = s[k_index - 1]
if before not in " /;":
continue
# what's after k? is it a whitespace?
following = s[k_index + len(k) : ]
if not following or following.startswith(" "):
return True
return False
|
e7c89d9f1d36aad38cdd3b8e282ecfb528580f04
| 27,589
|
def serialize_dict(the_dict, full=True, offset='small'):
"""serialize_dict."""
if the_dict:
text = []
for key in sorted(the_dict):
if offset == 'small':
line = '%10s: %s' % (key, the_dict[key])
elif offset == 'large':
line = '%25s: %s' % (key, the_dict[key])
elif offset == 'very_large':
line = '%50s: %s' % (key, the_dict[key])
else:
raise Exception('unrecognized option: %s' % offset)
line = line.replace('\n', ' ')
if full is False:
if len(line) > 100:
line = line[:100] + ' ... ' + line[-20:]
text.append(line)
return '\n'.join(text)
else:
return ""
|
15b3fe46043d28c5a2a5c4e802b289b22704d9b1
| 27,590
|
import six
def _check_input(idadf, target, features, ignore_indexer=True):
"""
Check if the input is valid, i.e. if each column in target and features
exists in idadf.
Parameters
----------
target: str or list of str
A column or list of columns to be used as target
features: str or list of str
A column or list of columns to be used as feature
ignore_indexer: bool, default: True
If True, remove the indexer from the features set, as long as an
indexer is defined in idadf
"""
#import pdb ; pdb.set_trace()
if target is not None:
if isinstance(target, six.string_types):
if target not in idadf.columns:
raise ValueError("Unknown target column %s"%target)
target = [target]
else:
if hasattr(target, '__iter__'):
target = list(target)
for x in target:
if x not in idadf.columns:
raise ValueError("Unknown target column %s"%x)
if features is not None:
if isinstance(features, six.string_types):
if features not in idadf.columns:
raise ValueError("Unknown feature column %s"%features)
features = [features]
else:
if hasattr(features, '__iter__'):
features = list(features)
for x in features:
if x not in idadf.columns:
raise ValueError("Unknown feature column %s"%x)
if target is None:
if len(features) == 1:
raise ValueError("Cannot compute correlation coefficients of only one"+
" column (%s), need at least 2"%features[0])
else:
if target is not None:
if len(target) == 1:
features = [x for x in idadf.columns if x not in target]
else:
features = list(idadf.columns)
else:
features = list(idadf.columns)
## Remove indexer from feature list
# This is useless and expensive to compute with a primary key
if ignore_indexer is True:
if idadf.indexer:
if idadf.indexer in features:
features.remove(idadf.indexer)
# Catch the case where users ask for the correlation between the two same columns
#import pdb ; pdb.set_trace()
if target == features:
if len(target) == 1:
raise ValueError("The correlation value of two same columns is always maximal")
if target is None:
if features is None:
target = list(idadf.columns)
else:
target = features
return target, features
|
1f133839bf0c10396bdcf036db30251d71c7ff3f
| 27,591
|
from typing import Iterable
def _n_name(invars: Iterable[str]) -> str:
"""Make sure that name does not exist in invars"""
name = "n"
while name in invars:
name = "n" + name
return name
|
fc3b5da0e762e1b212248c403ceda66c311a60f9
| 27,592
|
def _rub_str_ ( bins ) :
"""Printout for RooUniformBinning"""
l = bins. lowBound ()
h = bins.highBound ()
n = bins.numBoundaries () - 1
x = bins.GetName()
if not x : return "RooUniformBinning(%s,%s,%d)" % ( l , h , n )
return "RooUniformBinning(%s,%s,%d,'%s')" % ( l , h , n , x )
|
d2ffdbf12e63dcc1f038f670a92ec1cf13da3fd7
| 27,593
|
import re
def get_NT_from_str(string):
"""
Given a string of a snippet, return the NT of that snippet.
in: '[1, cython_backup] <RE>'
out: '<RE>'
:param string: A string defining a snippet.
:return: The NT of that snippet.
"""
# Get index portion of string
index = re.findall("\<.+\>", string)
return index[0]
|
3d62f7171e5a053a5f69b8dffb406ae7df215494
| 27,595
|
import re
def find_map_surroundings(room_name, all_corners=False):
"""
:param room_name: 방이름 이 방이름 근방 상하좌우 또는 대각선까지 포함해서 지도목록 반환.
:param all_corners: 대각선방도 반환? 아니면 4면만
:return: [방이름들]
"""
# 방 제대로 들어옮?
if (room_name.includes('E') or room_name.includes('W')) \
and (room_name.includes('S') or room_name.includes('N')):
if room_name.includes('E'):
garo = 'E'
else:
garo = 'W'
if room_name.includes('S'):
sero = 'S'
else:
sero = 'N'
room_nums = re.split(r'[EWSN]', room_name)
room_nums = list(filter(None, room_nums))
arranged_room_nums = []
# 0일때 동거 숫자 돌리고 있고 1일때 북거.
counter = 0
for r in room_nums:
if r:
for n in range(-1, 2):
# 초기화
int_ = int(r)
# 0이면 현 방이니 통과
if n == 0:
continue
# 포문에 따라 -1, +1
int_ += n
# 0보다 적을경우
change_map_name = False
if int_ < 0:
change_map_name = True
if counter == 0:
if 'E' in garo:
garo = 'W'
else:
garo = 'E'
else:
if 'S' in sero:
sero = 'N'
else:
sero = 'S'
int_ += 1
# 카운터가 0일땐 W/E 부분을 세고있다. 한마디로 S/N부분이 고정.
# 저 int_ 빼고는 나머지는 다 똑같아야 함...
if counter == 0:
result = garo + str(int_) + sero + room_nums[1]
arranged_room_nums.append(result)
else:
result = garo + room_nums[0] + sero + str(int_)
arranged_room_nums.append(result)
# 이름 바꿨으면 초기화
if change_map_name:
if counter == 0:
if 'E' in garo:
garo = 'W'
else:
garo = 'E'
else:
if 'S' in sero:
sero = 'N'
else:
sero = 'S'
counter += 1
return arranged_room_nums
|
f3a790c7d5ffad301ea19204dc80ce51662383b8
| 27,596
|
def andlist(list,conjunction="and"):
"""Turn list of strings into English text."""
if len(list) == 0:
return "(empty list!)"
if len(list) == 1:
return list[0]
elif len(list) == 2:
return (' '+conjunction+' ').join(list)
else:
return ', '.join(list[:-1]+[conjunction+' '+list[-1]])
|
068197cf3a972f5a79fa259885fde2e8cdc7bfbb
| 27,597
|
import torch
def inference_agent(model, env, s_init, supp_init, device='cpu'):
"""
s_init (3,96,96) cuda
supp_init (2,) cuda
"""
model.eval()
s_curr = s_init.clone().unsqueeze(0).to(device)
supp_curr = supp_init.clone().unsqueeze(0)
with torch.no_grad():
act = torch.argmax(model(s_curr), dim=1)
s_curr, supp_curr, reward = env.step(s_curr, supp_curr, act)
return s_curr, supp_curr, reward, act
|
29580f57b540cbba442b0751c718ff4a5c082008
| 27,598
|
from typing import List
from typing import Dict
from typing import Any
from typing import Set
def get_human_readable_headers(outputs: List[Dict]) -> List[Any]:
"""
Retrieves all of the keys that their value is not dict recursively
Args:
outputs (List[Dict]): Input list of dictionaries.
Returns:
List with all of the keys that don't have inner dictionaries.
"""
def contains_dict(entry: Any) -> bool:
if isinstance(entry, dict):
return True
elif isinstance(entry, list):
return any(contains_dict(item) for item in entry)
return False
human_readable_keys: List[Set] = [{k for k, v in output.items() if not contains_dict(v)} for output in outputs]
if not human_readable_keys:
return []
return list(set.intersection(*human_readable_keys))
|
e1236b9b99dea9dd926a268a2b9210a78e8bd971
| 27,599
|
from typing import Sequence
def _count_run(li: Sequence, lo: int, hi: int) -> int:
"""Count the length of the run beginning at lo, in the slice [lo, hi).
lo < hi is required on entry.
"""
# "A run" is either the longest non-decreasing sequence, or the longest strictly
# decreasing sequence. `descending` is False in the former case, True in the latter.
# Note: This function is not required by tim_sort(), so we make it internal.
assert lo < hi
# descending = False
lo += 1
if lo == hi:
return 1
n = 2 # run count
if li[lo] < li[lo-1]:
# descending = True
for lo in range(lo+1, hi):
if li[lo] >= li[lo-1]:
break
n += 1
else:
for lo in range(lo+1, hi):
if li[lo] < li[lo-1]:
break
n += 1
return n
|
bf93346487d275152f221971ab1bd4d678cdf290
| 27,600
|
import os
def is_directory(b):
"""
Return if the given buffer is a directory
"""
return os.path.isdir(b.name)
|
0b88a016a6aabcf7bd3a6a8d16101afd798f7531
| 27,601
|
import torch
def get_taylor_criterion(W):
""" Get the pruning criterion based on taylor expansion.
It requires that W.grad is accessible.
"""
assert isinstance(W, torch.Tensor)
assert W.grad is not None
assert W.dim() == 4
# first-order term of the taylor expansion
C = torch.mul(W.grad, W)
# L1-norm taken on the taylor criterion
C = torch.abs(C).sum(dim=(2, 3))
return C
|
9d1d748e458a68451a1ad7b12cf845a4edf9d442
| 27,603
|
import torch
def k_nearest_neighbour_accuracy(y_pred, y):
"""Calculates k-NN accuracy.
# Arguments:
y_pred: Prediction categories [size, categories]
y: Ground truth categories. Must have shape [size,]
"""
# 取最多的类别:
y_pred, _ = torch.mode(y_pred, dim=1)
return torch.eq(y_pred, y).sum().item() / y_pred.shape[0]
|
67e0a3231ae94bdfb648707e35813ee76f112066
| 27,604
|
def get_ith_minibatch_ixs(i, num_data, batch_size):
"""Split data into minibatches.
@param i: integer
iteration index
@param num_data: integer
number of data points
@param batch_size: integer
number of data points in a batch
@return batch_slice: slice object
"""
num_minibatches = num_data / batch_size + ((num_data % batch_size) > 0)
i = i % num_minibatches
start = i * batch_size
stop = start + batch_size
return slice(start, stop)
|
2583cada078a0725e2dfe40c48111696bef48f8f
| 27,606
|
import tempfile
import base64
def unpack_resource(data):
"""Convert base64 encoded data into a file handle, and a temporary file name to access the data"""
file_handle = tempfile.NamedTemporaryFile()
file_handle.write(base64.b64decode(data))
file_handle.seek(0)
return (file_handle,file_handle.name)
|
372d1e48e8e67e71f3f0bbdd1e15e9cbc9369973
| 27,611
|
def has_same_letter_repeated(box, times):
"""Check if box has any letter repeated number of times."""
return any(
box.count(letter) == times
for letter in box
)
|
9b0450d3ab8f276facde656dad9f9938d9fd1c20
| 27,612
|
def flatten_(structure):
"""Combine all leaves of a nested structure into a tuple.
The nested structure can consist of any combination of tuples, lists, and
dicts. Dictionary keys will be discarded but values will ordered by the
sorting of the keys.
Args:
structure: Nested structure.
Returns:
Flat tuple.
"""
if isinstance(structure, dict):
result = ()
for key in sorted(list(structure.keys())):
result += flatten_(structure[key])
return result
if isinstance(structure, (tuple, list)):
result = ()
for element in structure:
result += flatten_(element)
return result
return (structure,)
|
6d085c290603116d91d400a4e974956b6adc265e
| 27,613
|
def help_for_command(command):
""" generate help doc for command """
help = command.description()
if command.options():
help += '\n\nOptions:'
for option in command.options():
if option.long and option.short:
option_flag = option.long + '/' + option.short
elif option.longName:
option_flag = option.long
else:
option_flag = option.short
help += '\n ' + option_flag + ' '
if option.type.__name__ == 'str_to_bool':
help += '<' + str(option.dest) + '>; Type: bool'
else:
help += '<' + str(option.dest) + '>; Type: ' + option.type.__name__
help += '; ' + option.help
return help
|
6cca7088b9a5a71dd14a3e60f47e9d06cb120da3
| 27,614
|
def replace_tree_marks(key, arguments):
""" Replace TREE markers from key to proper argument
:param key: The currently processed key from .yaml file
:param arguments: Arguments already typed for command
:return: Key string with replaced marks
"""
tree_mark_index = key.find('TREE~')
while tree_mark_index >= 0:
tree_index = -int(key[tree_mark_index+5])
key = key[:tree_mark_index] \
+ arguments[tree_index] \
+ key[tree_mark_index+6:]
tree_mark_index = key.find('TREE~')
return key
|
58b5fa36a2f95665d585608f1103ec00f3c47218
| 27,615
|
def get_groups(groups_collection):
"""Returns a list of group names
:param groups_collection: Mongo collection that maintains groups
:return: List of group names
"""
groups = groups_collection.find()
return list(groups)
|
75ff224e383eaf2f4fd9e4d345231aa4a7ea587f
| 27,617
|
def invert_angle(angle):
"""
Inverts the steering angle for the flip_img()
:param angle:
:return: inverted angle
"""
return angle * -1.0
|
8628d1b745b32a2b4cdf94b11a9453c87b2e6c2e
| 27,619
|
def channels(channel):
"""Return a mock of channels."""
return [channel("level", 8), channel("on_off", 6)]
|
fab1959c3f8f6f3f219c7f3c0b401707e9cbb5af
| 27,620
|
def solve(puzzle, row, col, num):
"""If we find same num in the same row or same column or in the specific
3*3 matrix, ‘false’ will be returned."""
# Check for same number in row
for x in range(9):
if puzzle[row][x] == num:
return False
# Check for same number in column
for x in range(9):
if puzzle[x][col] == num:
return False
# Check for same number matrix
start_row = row - row % 3
start_col = col - col % 3
for i in range(3):
for j in range(3):
if puzzle[i + start_row][j + start_col] == num:
return False
return True
|
fea9b13ff7816478d9dd4fcab5c9bfe5682b1ef2
| 27,621
|
def compute_mol_ids(job_id, n_jobs, total_n_molecules):
""" assume job_id is 0-indexed """
# construct batch indices for all jobs
batch_size = int(total_n_molecules / n_jobs)
batches = []
current_batch = []
for i in range(total_n_molecules):
if (len(current_batch) >= batch_size) or (i == (total_n_molecules - 1)):
batches.append(current_batch)
current_batch = []
else:
current_batch.append(i)
# get the batch indices for the current job
this_job = batches[job_id]
return this_job
|
10c6ae9a064bdafe4ccfb134bf3945979e48673e
| 27,623
|
import random
def randrequest():
"""Generates random request type from list."""
request_list = ["GET", "POST"]
return random.choice(request_list)
|
17d55b6a49dad352cc0a6b8003fecd5b8da62cad
| 27,626
|
def demo_2(x, y) -> bool:
"""
Demo function which writes output to file
>>> demo_2(3, 5)
16
"""
sum = x + y
if sum < 100:
sum = sum * 2
elif sum < 50:
sum = sum * 3
else:
sum = sum * 4
return sum
|
30cb5dd2c2b397b6a21fe23f1ef5d9c19414332d
| 27,627
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.