content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
import codecs
import re
def get_academic_titles_parser():
"""Returns a regular expression for parsing academic titles."""
# Read list of academic titles from the data file.
path_titles = 'prod_generation/academic_titles.txt'
with codecs.open(path_titles, 'r') as f:
titles = set(line.strip() for line in f.readlines())
# Compile the regular expression.
re_titles = "|".join(titles)
re_name = ("^(?P<titles_pre>((%s)\.?( |,))*)"
"(?P<name_clean>.*?)"
"(?P<titles_suffix>(( |,)*(%s)\.?)*)$" % (
re_titles, re_titles))
return re.compile(re_name)
|
4477c7e95849d957dd1250e7432962d2cc50e816
| 23,856
|
import configparser
def get_param_value_from_conf_ini_file(conf_ini_file_path: str, section: str, param: str) -> str:
"""
Returns the value of the specified param from the specified .ini configuration file.
:param conf_ini_file_path: Path to the .ini configuration file.
:param section: Name of the section in the .ini file. For example: '[MALLET]'.
:param param: Name of the param inside that section. For example: 'SOURCE_CODE_PATH'.
:return: A str with the value specified in the .ini file for that param.
Example:
; demo-conf.ini
[MALLET]
SOURCE_CODE_PATH = /path/to/mallet
To access that value, execute:
>>> get_param_value_from_conf_ini_file('MALLET', 'SOURCE_CODE_PATH')
"""
config = configparser.ConfigParser()
config.read(conf_ini_file_path)
return config[section][param]
|
aa141586ca97250c0c80b12dab0eb37cb7a0be9b
| 23,859
|
import numpy
def myDownsampleWithSmooth(y, N) :
"""
yds = myDownsampleWithSmooth(y,N)
yds is y sampled at every Nth index, starting with yds[0]=y[0] with y[range(0,len(y),N)], but y is first averaged across all N points centered on the selected index. Boundary conditions: no wrap around.
Ted Golfinopoulos, 16 June 2012
"""
jj=0;
ynew=numpy.zeros( 1 + int((len(y)-1)/N) )
for ii in range(0,len(y),N) :
lowerBound=min(max(0,ii-numpy.ceil(N*0.5)), len(y)-1-numpy.ceil(N*0.5))
upperBound=min(lowerBound+numpy.floor(N*0.5),len(y)-1)
ynew[jj]=numpy.mean( y[ lowerBound:upperBound ] )
jj=jj+1
return ynew
|
6b64b2ee48565554278d78ad83dba88036099dcf
| 23,861
|
def _normalize_number_format(number_format):
"""
pptx.chartの内部で、'\\'が’\'扱いになってしまうため、更にエスケープ処理を行う
"""
return number_format.replace('\\','\\\\') if number_format != None else number_format
|
7205e81709ddbf68a77a2540a3c3e2c4987fdf74
| 23,862
|
def check_answer(user_guess, start_a_followers, start_b_followers):
"""Take user guess and follwer count and return it they guess right"""
if start_a_followers > start_b_followers:
return user_guess == 'a'
else:
return user_guess == "b"
|
023e7c9b3430608e858ce780a828e26623f93664
| 23,863
|
def map_to_guass(x, u, sigma):
"""
Maps a gaussian to a different mean and sigma
:param x:
:param u:
:param sigma:
:return:
"""
# First change variance
x = x * sigma
# then change the mean
x = x + u
return x
|
35168cae5e39a3393269b52d08c26eb79894477c
| 23,864
|
def _get_color(request):
"""
Get color from request
Args:
request (Request): contains info about the conversation up to this point (e.g. domain,
intent, entities, etc)
Returns:
string: resolved location entity
"""
color_entity = next((e for e in request.entities if e['type'] == 'color'), None)
return color_entity['text'] if color_entity else None
|
64a48c91b01d658b905b7aa5e47c06898ff2ff0a
| 23,865
|
def get_normalization(normaliser, message):
"""Normalizes the message.
Args:
normaliser {rivescript.RiveScript} -- [instance of Rivescript]
message {string} -- [original message]
Returns:
message [string or None] -- [normalized string]
"""
try:
username = "dummy"
return normaliser.reply(username, message)
except AttributeError:
return None
|
90c29e36ccc190412a5bdfb76ad840cc9d7b4eca
| 23,866
|
import numpy
def calcDist(vertices):
"""Compute down-dip distance from the trench.
"""
dist = numpy.zeros(vertices.shape[0])
pt0 = vertices[:-1,:]
pt1 = vertices[1:,:]
dx = ((pt1[:,0]-pt0[:,0])**2 + (pt1[:,1]-pt0[:,1])**2)**0.5
dist[1:] = numpy.cumsum(dx)
return dist
|
33875b3f42eca8824a621ff1e9ab3e15676a59ff
| 23,867
|
def fake_child_node_input(request):
"""
Fixture that yields two item tuples containing fake input to create a child :class:`~pai_lang.syntax.Node` node.
"""
return request.param
|
28b4cc9e9b0adac23ec886f39925b04ef1376d25
| 23,868
|
import random
def algorithmeTrois(lst) :
""" Construit un arbre aléatoire. """
if lst == []:
return lst
elif len(lst) == 1:
return lst[0]
if len(lst) == 2:
if random.randint(0, 1) == 0:
return lst
else:
return lst[::-1]
else:
b = random.randint(0, len(lst) - 1)
if random.randint(0, 1) == 0:
return [algorithmeTrois(lst[:b] + lst[b+1:]), lst[b]]
else:
return [lst[b], algorithmeTrois(lst[:b] + lst[b+1:])]
|
ee2335a3473e8055d75fc5938e272e9c2529b539
| 23,869
|
def username_2(names):
"""
>>> username_2(["", "Marina"])
"uncle roger's biggest fan"
>>> username_2(["LaiCaiJDanHenRoLu", "JJ~", "Chilli Jam Haiyah"])
'hilli Jam Haiya'
>>> username_2(["TUTU", "QIQI", "CECE"])
'EC'
"""
adjusted_name = "uncle roger's biggest fan"
i = 0
while i < len(names):
if len(names[i]) < 3:
break
adjusted_name = names[i][1:len(names[i]) - 1]
i += 1
return adjusted_name
|
71d3bd98e25455d41050c4db53e853fe7cd2fca5
| 23,870
|
import subprocess
def run_script_and_get_returncode(cmd_list):
"""Runs script and returns the returncode of the task.
Args:
cmd_list: list(str). The cmd list containing the command to be run.
Returns:
int. The return code from the task executed.
"""
task = subprocess.Popen(cmd_list)
task.communicate()
task.wait()
return task.returncode
|
80e520f2d81a5c4705bc41f1528d5b9420be2ba7
| 23,871
|
import os
def datadir(filename=""):
"""
Returns the absolute path of python-crn's data directory. Creates the
folder if it does not exist.
"""
datadir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
if not os.path.isdir(datadir):
os.makedirs(datadir)
if filename:
return os.path.join(datadir, filename)
return datadir
|
e276c2ee66bc7748fa1c1bac655eb1fe9e55e96c
| 23,872
|
import torch
def max_repres(repre_cos):
"""
Args:
repre_cos - (q_repres, cos_simi_q)|(a_repres, cos_simi)
Size: ([bsz, q_len, context_dim], [bsz, a_len, question_len])| ...
Return:
size - [bsz, a_len, context_dim] if question else [bsz, q_len, context_dim]
"""
def tf_gather(input, index):
"""
The same as tensorflow gather sometimes...
Args:
- input: dim - 3
- index: dim - 2
Return: [input.size(0), index.size(1), input.size(2)]
"""
bsz = input.size(0)
sent_size = input.size(1)
dim_size = input.size(2)
for n, i in enumerate(index):
index.data[n] = i.data.add(n*sent_size)
input = input.view(-1, dim_size)
index = index.view(-1)
temp = input.index_select(0 ,index)
return temp.view(bsz, -1, dim_size)
repres, cos_simi = repre_cos
index = torch.max(cos_simi, 2)[1] # max_index
return tf_gather(repres, index)
|
81e0511ae9f26cd41afa836ac41cb7235fd970da
| 23,873
|
def _npairs(n_items):
"""Return the number of pairs given n_items; corresponds to the length
of a triu matrix (diagonal included)"""
if n_items < 2:
raise ValueError("More than two items required, "
"passed {0}".format(n_items))
n_pairs = int(n_items * (n_items - 1) / 2. + n_items)
return n_pairs
|
fe8698a025e78d959dcaa93d440ac79cc3460245
| 23,874
|
import re
def groups():
"""regex.groups: Number of different match groups."""
regex = re.compile(r'day: ((\d)\d)')
return "{} recursive matchings possible".format(regex.groups)
|
eab99849632a85adc44701dd2e1a0eba7812dee6
| 23,876
|
def adjust_release_version(release_name):
"""
Adjust release_name to match the build version from the executable.
executable: 1.8.0_212-b04 release_name: jdk8u212-b04
executable: 11.0.3+7 release_name: jdk-11.0.3+7
executable: 12.0.1+12 release_name: jdk-12.0.1+12
"""
if release_name.startswith('jdk8u'):
return release_name.replace('jdk8u', '1.8.0_')
else:
return release_name[4:]
|
f225eef29a67b2e4bf7869ff673a53cc7c7d7869
| 23,877
|
def draw_on_pattern(shape, pattern):
""" Draws a shape on a pattern.
>>> draw_on_pattern([(0, 0, 1), (0, 1, 3), (1, 1, 8)], [[0, 0, 0], [0, 0, 0]])
[[1, 3, 0], [0, 8, 0]]
"""
y_size = len(pattern)
x_size = len(pattern[0])
new_pattern = pattern.copy()
for cell in shape:
y, x, colour = cell
if 0 <= y < y_size and 0 <= x < x_size:
new_pattern[y][x] = colour
return new_pattern
|
85c8f8f59b3bc241684798898f1780c7eda475f4
| 23,879
|
def image_to_normalized_device_coordinates(image):
"""Map image value from [0, 255] -> [-1, 1].
"""
return (image / 127.5) - 1.0
|
c92fe3ef499164c957b1b4330cce584a6b6d9f1f
| 23,884
|
def first_bad_pair(sequence, k):
"""Return the first index of a pair of elements in sequence[]
for indices k-1, k+1, k+2, k+3, ... where the earlier element is
not less than the later element. If no such pair exists, return -1."""
if 0 < k < len(sequence) - 1:
if sequence[k-1] >= sequence[k+1]:
return k-1
for i in range(k+1, len(sequence)-1):
if sequence[i] >= sequence[i+1]:
return i
return -1
|
0fe2957d8feb95fe285088be0c5d85ddf4b657ad
| 23,886
|
def stitle(self, nline="", title="", **kwargs):
"""Defines subtitles.
APDL Command: /STITLE
Parameters
----------
nline
Subtitle line number (1 to 4). Defaults to 1.
title
Input up to 70 alphanumeric characters. Parameter substitution may
be forced within the title by enclosing the parameter name or
parametric expression within percent (%) signs. If Title is blank,
this subtitle is deleted.
Notes
-----
Subtitles (4 maximum) are displayed in the output along with the main
title [/TITLE]. Subtitles do not appear in GUI windows or in ANSYS
plot displays. The first subtitle is also written to various ANSYS
files along with the main title. Previous subtitles may be overwritten
or deleted. Issue /STATUS to display titles.
This command is valid in any processor.
"""
command = "/STITLE,%s,%s" % (str(nline), str(title))
return self.run(command, **kwargs)
|
80775dca1d9de3bd2d1b88fbb9843d047d6d3766
| 23,888
|
def getDetailedChannelBoxAttrs(node):
"""
Return the list of attributes that are included
when the 'detailed channel box' is enabled for a node.
"""
attrs = [
# rotate order
'ro',
# rotate axis
'rax', 'ray', 'raz',
# rotate pivot
'rpx', 'rpy', 'rpz',
# scale pivot
'spx', 'spy', 'spz',
# rotate pivot translate
'rptx', 'rpty', 'rptz',
# scale pivot translate
'sptx', 'spty', 'sptz',
]
if node.nodeType() == 'joint':
attrs += [
# joint orient
'jox', 'joy', 'joz',
]
return attrs
|
649eff52fcc43243891ce853732c2cf914ecc60a
| 23,889
|
import torch
def get_class_weights(target: torch.Tensor, class_weight_power: float = 1.0) -> torch.Tensor:
"""
Returns class weights inversely proportional to some power of the number of pixels in each class.
:param target: one-hot tensor of shape (B, C, Z, X, Y); thus class dimension (of size C) is dimension 1
:param class_weight_power: power to raise 1/c to, for each class count c
"""
with torch.no_grad():
class_counts = target.sum([0] + list(range(2, target.dim()))).float() # sum over all except class dimension
class_counts[class_counts == 0.0] = 1.0 # prevent 1/0 when invert - value doesn't matter if no voxels
class_weights = class_counts ** (-class_weight_power)
# Normalize so mean of class weights is 1.0
class_weights *= class_weights.shape[0] / class_weights.sum()
return class_weights
|
5199722699da87a57c8ddebfbaa5b44c11707393
| 23,890
|
def _prepare_sets(sets, *rest):
""" Turns the accepted argument format into one set of frozensets."""
def _itercons(x, xs):
yield x
yield from xs # in earlier versions you must replace this line by a loop
return {frozenset(s) for s in (_itercons(sets, rest) if rest else sets)}
|
9fa99416b3b7d62db8d01fb26948308961bd75b3
| 23,891
|
import shlex
def split_string(s):
"""split string to list"""
if s is None:
return []
else:
return shlex.split(s)
|
e9cbd0c1928d16c673d70cc9aeffc0275de43f30
| 23,892
|
def get_collection_id(collection):
"""Return id attribute of the object if it is collection, otherwise return given value."""
return collection.id if type(collection).__name__ == 'Collection' else collection
|
5c7b344ff89d28609b21962fda58acc3cd7684a1
| 23,893
|
def trainings(states):
"""Creates a dict from training_id to a list of game states."""
ret_value = {}
for state in states:
if state.training_id not in ret_value:
ret_value[state.training_id] = []
ret_value[state.training_id].append(state)
return ret_value
|
d58b815bcb5176989c581e12901863fb00da31d0
| 23,894
|
def lerp(value, start, end, minimum=0.0, maximum=1.0):
"""
Linear Interpolation between two points
"""
value = float(value)
start = float(start)
end = float(end)
return minimum+((1.0-value) * start +value * end)*maximum
|
a27ef478a51790c2bb5c449c6d73a15ac0ab02d0
| 23,895
|
def get_charset(message):
"""Get the charset defined for the message.
The charset can be retrieved in two ways. Try the preferred method
first and, if that fails, try the other method.
Args:
message (Message): An email Message object.
Returns:
(unicode) The charset that was found or `None` if not found.
"""
charset = message.get_content_charset()
if not charset:
charset = message.get_charset()
return charset
|
447fa14e12792737f0c8aeb9a0c80214cb8e04bf
| 23,897
|
import os
def is_git_repo(directory: str) -> bool:
"""
Test, if the directory is a git repo.
(Has .git subdirectory?)
"""
return os.path.isdir(os.path.join(directory, ".git"))
|
d3a374e4fc869075afc473e623e089c503c417e9
| 23,898
|
import os
def ChangeDirectory(path):
"""
Library function to change to a directory
:param arg1 : path
:type arg1 : string
:return: dictionary
data: - Dictionary:
Keys: returnCode
:returnType: dictionary
"""
retDataStruct = dict()
try:
os.chdir(path)
retDataStruct['returnCode'] = 0
except:
retDataStruct['returnCode'] = 1
return retDataStruct
|
7f067148926d91e8240daffaac914ee3f1a4f685
| 23,899
|
from typing import Tuple
def is_valid_choice(options):
"""
Returns a function that tests if the config value is an element of the passed options.
:param options: The options that are considered as valid choices.
:return: A functions that takes a value and tests if it is within the specified choices. This function returns True
if the value in the config is in the passed options.
"""
def validator(value) -> Tuple[bool, str]:
return value in options, f"Value is not contained in the options {options}"
return validator
|
4c1fb6c32a21e068256443c0110fa09a6b31d60b
| 23,901
|
def synthesize_thermal(results_df):
""" Reduce thermal metrics table to one time-point like table
where for each metric, only the worst metric value of all time-points
is recorded
"""
filter_cols = ["name",
"substation",
"feeder",
"placement",
"sample",
"penetration_level",
"scenario"]
df = (
results_df.groupby(filter_cols)[
[
c for c in results_df.columns if (c not in filter_cols) and (c != "time_point")
]
]
.max()
.reset_index()
)
return df
|
88b7c090a635d4410b91e69ff85462f4329e9051
| 23,902
|
def pat_inv_map(data):
"""Build patent(idx) mapping to list of inventors for a-t model"""
pat_inv_dict = {}
for patent in data:
idx = data.index(patent)
inventors = [inventor['inventor_id']
for inventor in patent['inventors']]
# pat_number = int(patent['patent_number'])
pat_inv_dict[idx] = inventors
return pat_inv_dict
|
f4a35f0f61e1f639a6a153b7b8c261657f5a3f35
| 23,903
|
def Keck_distortion(measured_wave, cutoff=10000.):
"""Telescope dependent distortion function for the Keck sample."""
slope1 = .0600
intercept1 = -100
slope2 = .160
intercept2 = -1500
if measured_wave < cutoff:
return measured_wave * slope1 + intercept1
else:
return measured_wave * slope2 + intercept2
|
b0592bc8e9fde3d3f04234f6627ea8c5f7e7717a
| 23,904
|
def get_ratings_sparsity(ratings):
"""
Calculates the sparsity of the ratings matrix
:param ratings: The user x item ratings DataFrame
:type ratings: DataFrame
:return: The percentage sparsity of the DataFrame
"""
sparsity = float(len(ratings.values.nonzero()[0]))
sparsity /= (ratings.shape[0] * ratings.shape[1])
sparsity *= 100
return sparsity
|
7cc1cc91a61decfebece6e40c5a2a4d22b60cdc8
| 23,905
|
def simple_energy(x0, x1, y0, y1):
"""e(I) = |deltax I| + |deltay I| The first energy function introduced in
https://inst.eecs.berkeley.edu/~cs194-26/fa14/hw/proj4-seamcarving/imret.pdf
:params
The east/west/north/south neighbors of the pixel whose energy to calculate.
Each is an len-3 array [r,g,b]
:returns float
simple energy of pixel with those neighbors
"""
return sum(abs(x0-x1) + abs(y0-y1))
|
8376a51a565c00a9b3d9c06f85376f0e29da04c5
| 23,906
|
import re
def strip_tags(html):
"""Tries to return a string with all HTML tags stripped from a given string.
"""
return re.sub(r'<[^<]+?>', '', html)
|
13b8247027b070e0ddb5388f8d36a36b8ff9d4d2
| 23,908
|
def page_link(request, page, querystr):
"""Create an absolute url to a given page with optional action
@param page: a page to link to
@type page: MoinMoin.Page.Page
@param querystr: a dict passed to wikiutil.makeQueryString
"""
return request.getQualifiedURL(page.url(request, querystr))
|
efbbe5bf472c3a9fe25a5561f08308f8200a961d
| 23,909
|
def wpoffs(self, xoff="", yoff="", zoff="", **kwargs):
"""Offsets the working plane.
APDL Command: WPOFFS
Parameters
----------
xoff, yoff, zoff
Offset increments defined in the working plane coordinate system.
If only ZOFF is used, the working plane will be redefined parallel
to the present plane and offset by ZOFF.
Notes
-----
Changes the origin of the working plane by translating the working
plane along its coordinate system axes.
This command is valid in any processor.
"""
command = f"WPOFFS,{xoff},{yoff},{zoff}"
return self.run(command, **kwargs)
|
e7382911339938e0c7f89606f8ffd2b66923c955
| 23,910
|
import argparse
import sys
def get_args():
"""Command line interface to get arguments
Command line interface to collect the robot IP address and the path
of the firmware file.
Returns
-------
string
Path the the robot firmware file for the robot update.
string
HTTP address of the robot to update.
"""
parser = argparse.ArgumentParser(
description=f'Run the firmware Update of the robot.',
epilog=f'exemple: python FirmwareUpdate.py --robot_fw_path ../../fw/v8.3.2.update --robot_ip_address m500-test-1.')
parser.add_argument(
'--robot_fw_path',
metavar='robot_fw_path',
type=str,
nargs='+',
default='.',
help='The path of the firmware to update the robot.')
parser.add_argument(
'--robot_ip_address',
metavar='robot_ip_address',
type=str,
nargs='+',
default=['192.168.0.100'],
help='The IP of the robot that will be update.')
args = parser.parse_args(sys.argv[1:])
return [args.robot_fw_path[0], args.robot_ip_address[0]]
|
eaee1cc60cb9c817f120bf002caefd3266721440
| 23,911
|
def get_dtype(idf, col):
"""
Parameters
----------
idf
Input Dataframe
col
Column Name for datatype detection
Returns
-------
"""
return [dtype for name, dtype in idf.dtypes if name == col][0]
|
c99373b134ad8d038202d2a5a1935cae3ffa18d8
| 23,912
|
def convert_to_list(data):
"""Converts 2d list to a request"""
return "?" + "&".join(["{}={}".format(key, value)
for key, value in data.items()])
|
5bac04e0da13005b77a221bbf39676c9a3514a55
| 23,914
|
import sys
def stderrConnectedToTerm():
"""!Return True if stderr is connected to a terminal, False otherwise."""
return sys.stderr.isatty()
|
4afe2dd2f8abf1d161a4c16d45dc84d9fc408e49
| 23,915
|
def parse_email(address):
"""
Returns "Marco Polo", "marco@polo.com" from "Marco Polo <marco@polo.com"
Returns "", "marco@polo.com" from "marco@polo.com"
"""
if ' ' in address.strip():
assert '<' in address and '>' in address, "Invalid address structure: %s" % (address,)
parts = address.split(' ')
name = ' '.join(parts[:-1])
address = parts[-1][1:-1]
return name, address
else:
return "", address
|
ba06dca314e7178fd4ab4fe4a58ebeb48fd2a088
| 23,916
|
def resample_dataframe(df, resolution):
"""
Resamples a dataframe with a sampling frquency of 'resolution'
-> Smoothes the plots
:param df: Dataframe to be resampled. Must contain numbers only
:param resolution: Resolution of the sampling to be done
:return: Resampled dataframe
"""
df = df.set_index('timedelta', drop=True) # set timedelta as new index
resampled = df.resample(str(resolution)+'S').mean()
resampled.reset_index(inplace=True)
# timedelta was resampled, so we need to do the same with the Time-column
resampled['Time'] = resampled['timedelta'].apply(lambda time: time.total_seconds())
return resampled
|
17a2c555e45a9067e68c97958200508525bef873
| 23,917
|
import re
import os
import glob
def autocomplete(text, state):
"""Enables autocompletion of lines"""
if "~" in text:
text = re.sub(r'~', os.path.expanduser("~"), text)
if os.path.isdir(text) and not text.endswith("/"):
text += "/"
return glob.glob(text + "*")[state]
|
a2b23b3db580ac7ca58f46491c144582ad9f4330
| 23,918
|
def load_org1_gateway(network_config):
""" Loads the `org1_gw` Gateway """
return network_config.get_gateway('org1_gw')
|
6e49274170b46031dfe01b49ccd63aa22d01dbb1
| 23,921
|
def equal_zip(*args):
"""
Zip which also verifies that all input lists are of the same length
"""
# make sure that all lists are of the same length
assert len(set(map(len, args))) == 1, "lists are of different lengths {}".format(args)
return zip(*args)
|
f50ad8586d24516ba641e1bbef5d62879a0f3d6b
| 23,922
|
def dict_(delegate, *tuples):
""":yaql:dict
Returns dict built from tuples.
:signature: dict([args])
:arg [args]: chain of tuples to be interpreted as (key, value) for dict
:argType [args]: chain of tuples
:returnType: dictionary
.. code::
yaql> dict(a => 1, b => 2)
{"a": 1, "b": 2}
yaql> dict(tuple(a, 1), tuple(b, 2))
{"a": 1, "b": 2}
"""
return delegate('dict', tuples)
|
4c8dcc2dd6cad6d6e6cf052696f14bf8f36762c3
| 23,923
|
import os
def get_all_file(root_node):
"""
通过递归的方式获取root_node下所有的文件名
:param root_node:
:return:
"""
ab_root = os.path.abspath(root_node)
all_path_record = []
next_level_paths = os.listdir(ab_root)
next_level_abs_paths = [os.path.join(ab_root, filename) for filename in next_level_paths]
# print(next_level_paths)
# 空文件夹直接返回[]
if not next_level_abs_paths:
return []
for tmp_path in next_level_abs_paths:
# print("--", tmp_path)
# 文件,直接添加
if os.path.isfile(tmp_path):
all_path_record.append(tmp_path)
# 非文件,递归获取
else:
tmp_file_path = get_all_file(tmp_path)
all_path_record += tmp_file_path
return all_path_record
|
4ed7a6c31c57748513496523c7cc05b5e9cd9fde
| 23,924
|
import hashlib
from re import M
def hash_key(integer):
"""Hash the given integers and trim to l digits
Arguments:
integer {Integer}
Returns:
Integer -- Hashed Integer Value
"""
name = str(integer)
m = hashlib.sha1(name.encode('utf-8'))
key_hash = m.hexdigest()[:M // 4]
return int(key_hash, 16)
|
d37361d02d270e75758b3268efbe0e18c7723d63
| 23,925
|
def _swap_ending(s, ending, delim="_"):
"""
Replace the ending of a string, delimited into an arbitrary
number of chunks by `delim`, with the ending provided
Parameters
----------
s : string
string to replace endings
ending : string
string used to replace ending of `s`
delim : string
string that splits s into one or more parts
Returns
-------
new string where the final chunk of `s`, delimited by `delim`, is replaced
with `ending`.
"""
parts = [x for x in s.split(delim)[:-1] if x != ""]
parts.append(ending)
return delim.join(parts)
|
ffce5c55d2f914668a4daa5634ad379785fd3f2a
| 23,928
|
import os
def write_file(current_working_directory, filename, contents):
"""Write file with specified contents in specified directory."""
with open(os.path.join(current_working_directory, filename), "w") as wfile:
wfile.write(contents)
return filename
|
4cd944b86791044a2c5c89586ade0401bf42b04d
| 23,929
|
def generateDihedrals(molecule, angles):
"""
Generate dihedrals angles form the bonds list
Parameters
----------
molecule : RDkit molecule
RDkit molecule
bonds : list
Bonds index list
Returns
-------
dihedrals : list
Index angles atoms list
"""
dihedrals = []
for angle in angles:
atomAindex = angle[0]
atomBindex = angle[1]
atomC = molecule.GetAtomWithIdx(angle[2])
bondedToC = [x.GetIdx() for x in atomC.GetNeighbors() if x.GetIdx() != atomBindex and x.GetIdx() != atomAindex]
dihedrals.extend([angle+[atom] for atom in bondedToC])
for angle in angles:
atomCindex = angle[2]
atomBindex = angle[1]
atomA = molecule.GetAtomWithIdx(angle[0])
bondedToA = [x.GetIdx() for x in atomA.GetNeighbors() if x.GetIdx() != atomBindex and x.GetIdx() != atomCindex]
dihedrals.extend([[atom]+angle for atom in bondedToA])
return dihedrals
|
52f933683f6698a88032f6e306b1238ec2388ef7
| 23,932
|
def extract_testing_substructures(doc):
""" Extract the search space for predicting with the mention pair model,
The mention ranking model consists in computing the optimal antecedent
for an anaphor, which corresponds to predicting an edge in graph. This
functions extracts the search space for each such substructure (one
substructure corresponds to one antecedent decision for an anaphor).
The search space is represented as a nested list of mention pairs. The
mention pairs are candidate arcs in the graph. The ith list contains the
ith mention pair, where we assume the following order:
(m_2, m_1), (m_3, m_2), (m_3, m_1), (m_4, m_3), ...
Args:
doc (CoNLLDocument): The document to extract substructures from.
Returns:
(list(list(Mention, Mention))): The nested list of mention pairs
describing the search space for the substructures.
"""
substructures = []
# iterate over mentions
for i, ana in enumerate(doc.system_mentions):
# iterate in reversed order over candidate antecedents
for ante in sorted(doc.system_mentions[1:i], reverse=True):
substructures.append([(ana, ante)])
return substructures
|
b18e9ef7bf825580618925ff2889b3df53fe4f89
| 23,933
|
import six
def explode(_string):
"""Explodes a string into a list of one-character strings."""
if not _string or not isinstance(_string, six.string_types):
return _string
else:
return list(_string)
|
c9c46382f2be8362e8f271983e32f1b1d2c2f7cc
| 23,934
|
import functools
def memoize(func):
"""Generic memoizer for any function with any number of arguments including zero."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
class MemoizeFuncArgs(dict):
def __missing__(self, _key):
self[_key] = func(*args, **kwargs)
return self[_key]
key = str((args, kwargs)) if args and kwargs else "no_argument_response"
return MemoizeFuncArgs().__getitem__(key)
return wrapper
|
fc194eb1a4ba42589adee88fd716409b056d2b91
| 23,936
|
def validation(size, training):
""" Obtain the validation set corresponding to the given training set """
result = []
for i in range(0, size):
if i not in training:
result.append(i)
return result
|
7d40be54aa5f54139f24c1b4b20695f70586d97f
| 23,937
|
from typing import Callable
from typing import Any
def call_safe(cb: Callable, *args, **argv) -> Any:
"""Safely call a Callable"""
try:
cb(*args, **argv)
except TypeError:
return None
|
5cdcca9fc5f3b834d161e7a596d20af73e5ed3b5
| 23,938
|
import os
def check_dir(path):
"""
check dir exists
:param path:
:type path:str
:return:
:rtype: bool
"""
return os.path.exists(path)
|
fc15e76099604fce79e58c404db920feeb2de3dd
| 23,940
|
def verify_hdf_files(gen_file, annots_file, chrom, start, stop, max_indel):
"""
Compares the hdf files, and makes sure the hdf files contain
variants in the specified range.
"""
if gen_file.shape != annots_file.shape:
annots_file = annots_file.merge(gen_file, on=['chrom','pos','ref','alt'], how='right')[annots_file.columns]
return gen_file, annots_file
else:
indel_too_large = [
all(len(i) <= max_indel for i in (row["ref"], row["alt"]))
for _, row in gen_file.iterrows()
]
return gen_file[indel_too_large], annots_file[indel_too_large]
|
6980fb12a549a6b92de8c5217b0001ba1cdb3700
| 23,941
|
def _avoid_duplicate_arrays(types):
"""Collapse arrays when we have multiple types.
"""
arrays = [t for t in types if isinstance(t, dict) and t["type"] == "array"]
others = [t for t in types if not (isinstance(t, dict) and t["type"] == "array")]
if arrays:
items = set([])
for t in arrays:
if isinstance(t["items"], (list, tuple)):
items |= set(t["items"])
else:
items.add(t["items"])
if len(items) == 1:
items = items.pop()
else:
items = sorted(list(items))
arrays = [{"type": "array", "items": items}]
return others + arrays
|
3b88a0a0a67e17eafee3ec307a67364f11b69dd5
| 23,943
|
def three_way_quick_sort(array, low, high):
"""
Sort array in ascending order by quick sort
:param array: given unsorted array
:type array: list
:param low: starting index of array to sort
:type low: int
:param high: ending index of array to sort
:type high: int
:return: sorted array in ascending order
:rtype: list
"""
def three_way_partition(arr, l, h):
"""
Partition both from left and right
This function partitions arr[] in three parts
- arr[l..i] contains all elements smaller than pivot
- arr[i+1..j-1] contains all occurrences of pivot
- arr[j..r] contains all elements greater than pivot
:param arr: iven unsorted array
:type arr: list
:param l: starting index of array to sort
:type l: int
:param h: ending index of array to sort
:type h: int
:return: index of correctly positioned pivot element
:rtype: int
"""
p = arr[h]
i = l
j = h - 1
u = l - 1
v = h
print(p)
while True:
print(arr[l:h + 1], i, j, u, v)
# from left, find the first element greater than
# or equal to v. This loop will definitely terminate
# as v is last element
while arr[i] < p:
i += 1
# from right, find the first element smaller than or
# equal to v
while arr[j] > p:
j -= 1
if j == l:
break
# if i and j cross, then we are done
if i >= j:
break
# swap, so that smaller goes on left greater goes on right
arr[i], arr[j] = arr[j], arr[i]
# move all same left occurrence of pivot to beginning of
# array and keep count using p
if arr[i] == p:
print(arr[l:h + 1], i, j, u, v)
u += 1
arr[i], arr[u] = arr[u], arr[i]
# move all same right occurrence of pivot to end of array
# and keep count using q
if arr[j] == p:
print(arr[l:h + 1], i, j, u, v)
v -= 1
arr[j], arr[v] = arr[v], arr[j]
# move pivot element to its correct index
print(arr[l:h + 1], i, j, u, v)
print('move pivot element to its correct index')
arr[i], arr[h] = arr[h], arr[i]
print(arr[l:h + 1], i, j, u, v)
print('move same occurrences')
# move all left same occurrences from beginning
# to adjacent to arr[i]
j = i - 1
for k in range(l, u):
print(arr[l:h + 1], i, j, u, v)
arr[k], arr[j] = arr[j], arr[k]
j -= 1
# move all right same occurrences from end
# to adjacent to arr[i]
i = i + 1
for k in range(h - 1, v, -1):
print(arr[l:h + 1], i, j, u, v)
arr[k], arr[i] = arr[i], arr[k]
i += 1
print('result')
print(arr[l:h + 1], i, j, u, v)
print('---')
return i, j
if low < high:
pivot_high, pivot_low = three_way_partition(array, low, high)
three_way_quick_sort(array, low, pivot_low)
three_way_quick_sort(array, pivot_high, high)
return array
|
913dcd917897d6e238df1d9fb3970eb87f358298
| 23,945
|
from typing import Dict
from typing import Any
def track_data() -> Dict[str, Any]:
"""Get track data to instantiate a Track."""
return dict(
title="Python 3.8 new features", release_date="2016-07-30", podcast_id=1
)
|
61b41f23d25e56bbb33b4179f2f24c4f643ff44e
| 23,947
|
import tempfile
import shlex
import subprocess
def map_on_alleles(
alleles:str,
reads:str,
cores:int=4):
"""
Maps reads to sequences in .fasta format using minimap2. The output is written to a temp file handle and returned.
alleles: path to fasta-formatted file with expected subsequences (alleles)
reads: path to fastq-formatted reads
cores: number of threads used by minimap2
"""
tmp_alignment = tempfile.NamedTemporaryFile(mode='w+')
# map reads on extracted loci (alleles.fasta)
c0 = shlex.split(f"minimap2 -t {cores} -a -x map-ont {alleles} {reads} -o {tmp_alignment.name}")
# extract unique reads
p0 = subprocess.Popen(
c0, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8"
)
output, error = p0.communicate()
return tmp_alignment
|
c5b1e78f882dbae1f23986a26ad12c6e49fd75ae
| 23,948
|
def _residual_str(name):
"""Makes a residual symbol."""
return '\\mathcal{R}(%s)' % name
|
385c7a0f4f1ea3e9ed37be73978eaa34ded9afa4
| 23,949
|
def update_df(df, dirname, filename, coverage, mean, std_dev, segmented_pixel_count):
"""
update the coverage, mean, std_dev in a given df for a specific segmentation type (dirname)
"""
df.loc[df['names'] == filename, f'{dirname}_coverage'] = coverage
df.loc[df['names'] == filename, f'{dirname}_mean'] = mean
df.loc[df['names'] == filename, f'{dirname}_std_dev'] = std_dev
df.loc[df['names'] == filename, f'{dirname}_segmentation_count'] = segmented_pixel_count
return df
|
d803dc4ab0c6946a3b9ac0e70fa5c1eb327d85a7
| 23,950
|
def dist_between(h,seg1,seg2):
"""
Calculates the distance between two segments. I stole this function from
a post by Michael Hines on the NEURON forum
(www.neuron.yale.edu/phpbb/viewtopic.php?f=2&t=2114)
"""
h.distance(0, seg1.x, sec=seg1.sec)
return h.distance(seg2.x, sec=seg2.sec)
|
7c8ca520ea27f6297740eaffacb52a2ceeead287
| 23,951
|
import platform
import os
def get_cuda_paths(env):
"""Determines CUDA {bin,lib,include} paths
returns (bin_path,lib_path,inc_path)
"""
cuda_path = env['cuda_path']
bin_path = cuda_path + '/bin'
lib_path = cuda_path + '/lib'
inc_path = cuda_path + '/include'
# fix up the name of the lib directory on 64b platforms
if platform.machine()[-2:] == '64':
if os.name == 'posix' and platform.system() != 'Darwin':
lib_path += '64'
elif os.name == 'nt':
lib_path += '/x64'
# override with environment variables
if 'CUDA_BIN_PATH' in os.environ:
bin_path = os.path.abspath(os.environ['CUDA_BIN_PATH'])
if 'CUDA_LIB_PATH' in os.environ:
lib_path = os.path.abspath(os.environ['CUDA_LIB_PATH'])
if 'CUDA_INC_PATH' in os.environ:
inc_path = os.path.abspath(os.environ['CUDA_INC_PATH'])
return (bin_path,lib_path,inc_path)
|
d361eaf59f33b1eb093a097ad4da1ad625bc31c9
| 23,952
|
def search_shortest_paths_bfs(graph, start, end):
"""search path between starting station to goal station
Args:
graph(dictionary): graph of train stations
start(string): the name of the starting station
end(string): the name of the gaol station
Return:
answer(list): a path between the starting station to gaol station
"""
searched_list = []
data = {start: []}
queue = [start]
while queue:
current = queue.pop(0)
if current == end:
data[current] += [end]
return data[current]
if current not in searched_list:
searched_list.append(current)
queue += graph[current]
for station in graph[current]:
if station not in data.keys():
data[station] = data[current] + [current]
|
f1c5914c4f80ae2b79728f342e675747b211227d
| 23,953
|
import numpy
def relativize(x: numpy.ndarray) -> numpy.ndarray:
"""Normalize the data using a custom smoothing technique."""
std = x.std()
if float(std) == 0:
return numpy.ones(len(x), dtype=type(std))
standard = (x - x.mean()) / std
standard[standard > 0] = numpy.log(1.0 + standard[standard > 0]) + 1.0
standard[standard <= 0] = numpy.exp(standard[standard <= 0])
return standard
|
9df57f39ad63cfbc57994443f9cad2641fc70171
| 23,954
|
def candidate_thermal_capacity_rule(m, g, i):
"""Discrete candidate thermal unit investment decisions"""
return m.x_C[g, i] == sum(m.d[g, n, i] * m.X_C_THERM[g, n] for n in m.G_C_THERM_SIZE_OPTIONS)
|
5cc39fa342be77b2ee3a44b073a08cd4ba50f5c0
| 23,955
|
def locate(tlsToFind, sets):
"""return
- the set in which the given traffic light exists
- the pair in which it was found
- the index within the pair
"""
for s in sets:
for pair in s:
if tlsToFind == pair.otl:
return s, pair, 0
elif tlsToFind == pair.tl:
return s, pair, 1
return None, None, None
|
00956c33433bb34e7a5b8dd8a200a90d36bad801
| 23,956
|
def margin_fee(self, **kwargs):
"""Query Cross Margin Fee Data (USER_DATA)
Get cross margin fee data collection with any vip level or user's current specific data as https://www.binance.com/en/margin-fee
GET /sapi/v1/margin/crossMarginData
https://binance-docs.github.io/apidocs/spot/en/#query-cross-margin-fee-data-user_data
Keyword Args:
vipLevel (int, optional): User's current specific margin data will be returned if vipLevel is omitted
coin (str, optional)
recvWindow (int, optional): The value cannot be greater than 60000
"""
return self.sign_request("GET", "/sapi/v1/margin/crossMarginData", kwargs)
|
f2ca69c13b77fffadf6aa319f1847004ed5615da
| 23,958
|
def list2string(list_of_strings):
"""
Return a string (OUTPUT) from a list of strings (INPUT).
E.g.,
["I think,", "Therefore, I am."] => "I think. Therefore, I am"
"""
return " ".join(list_of_strings)
|
10efd017e6c09a2d17ec7f9135376f0a2f57c789
| 23,960
|
def clean_df(df):
"""
Takes in a Pandas Dataframe from Dreamclinic
and cleans it for aggregation.
"""
# remove rows where HrsWorked = 0
# because they are just used by the front desk staff somehow
df = df[df['HrsWorked'] != 0]
# fill NaN values in 'Service_Category with 'Massage'
df['Service_Category'] = df['Service_Category'].fillna(value='Massage')
# remove white space from Therapist names
df['Therapist'] = df['Therapist'].str.strip()
# make all therapist names lowercase to avoid typos in data entry
df['Therapist'] = df['Therapist'].str.lower()
# find and replace nicknames with domain knowledge
df = df.replace('abby thomson', 'abigail thomson')
# Drop Address_City and Addres_State Columns from Dataframe
df.drop(['Address_City', 'Address_State', 'Invoice_Category'],
axis=1,
inplace=True)
# Drop rows without a clientID
df = df.dropna()
return df
|
0db35bdd25fcba3eb3c366bb1830d8402e019d9c
| 23,962
|
def LocationToGene(scaffold, pos, sortedGenes):
"""
Description:
Given scaffold, pos, and dict of scaffold to list of sorted genes,
returns the locusId and the fraction through the gene it is
in (as a list of 2 elements) [locusId, fraction]
If the location overlaps multiple genes or no genes,
returns locusId = "", f = "".
Each gene should be a hash that contains begin, end, strand, and locusId
This code does not support orfs that wrap around the origin, and it
may not give correct results if there are complicated overlaps between ORFs.
In particular, it only checks the adjacent ORF on either side to see if there
is an overlap.
If the strand is the "-" strand, we return 1 - f
"""
if scaffold == "pastEnd":
return ["",""]
if scaffold in sortedGenes:
genelist = sortedGenes[scaffold]
if genelist == None:
return ["",""]
else:
return ["",""]
# binary search
# at all times, either the true index is between lo and hi,
# or there is no hit
# We search the middle of current search loc
nGenes = len(genelist)
lo = 0
hi = nGenes - 1
for nRound in range(100000):
mid = int((lo+hi)/2)
iBegin = int(genelist[mid]['begin'])
iEnd = int(genelist[mid]['end'])
if pos < iBegin:
if mid == lo:
return ["",""]
hi = mid - 1
elif pos > iEnd:
if mid == hi:
return ["",""]
lo = mid + 1
else:
# Does the previous or next gene also overlap this position?
if (mid > 0) and (int(genelist[mid-1]['begin']) <= pos) and \
pos <= int(genelist[mid-1]['end']):
return ["",""]
if (mid < nGenes - 1) and (int(genelist[mid+1]['begin']) <= pos) \
and (pos <= int(genelist[mid+1]['end'])):
return ["",""]
if iBegin == iEnd:
f = 0
else:
f = (pos - iBegin)/(iEnd - iBegin)
strand = genelist[mid]['strand']
# insertions near N terminus of gene should have f near 0
# regardless of strand
if strand == "-":
f = 1.0 - f
return [genelist[mid]['locusId'],f]
raise Exception("Unreachable gene in scf: {}, pos: {}".format(
scaffold, pos))
|
a4e79b6a68013533b74b651a93f17a6bd5df29d4
| 23,964
|
def standard_dict(text):
"""Count with standard dict.
"""
d = {}
for key in text:
d.setdefault(key, 0)
d[key] += 1
return d
|
9cd2a00e889cd28df7133fb0ea29480611bbe851
| 23,965
|
def a_send_line(text, ctx):
"""Send text line to the controller followed by `os.linesep`."""
ctx.ctrl.sendline(text)
return True
|
79f40657fe2b346c695d808bb3cdc7650077b76e
| 23,966
|
import json
def pack_string_response(string):
"""打包字符串
"""
reason = {
'ret': 0,
'reason': 'OK',
'data': string,
}
return json.dumps(reason)
|
33b7354cf88d3849f3f8f0dbdcf1b1e7071a5d38
| 23,967
|
def substract(tensor, value):
"""Substract a tensor image with a value."""
for t, v in zip(tensor, value):
t.sub_(v)
return tensor
|
3f1c31349f2b6d05c877119c64099b146853706f
| 23,968
|
def horisontal_check(board: list) -> bool:
"""
Check if any line in board has repeatable number.
If yes, return False, otherwise - True.
>>> horisontal_check(["**** ****",\
"***1 ****",\
"** 3****",\
"* 4 2****",\
" 9 5 ",\
" 6 83 *",\
"3 1 **",\
" 8 2***",\
" 2 ****"])
True
"""
for line in board:
lst_line = list(line)
for el in line:
if el == "*" or el == " ":
lst_line.remove(el)
if len(set(lst_line)) != len(lst_line):
return False
return True
|
9acf1a3e3973db2ee9d122311696df2b345d1696
| 23,969
|
from typing import Optional
async def _get_key(request) -> Optional[str]:
"""Gets the API key either from header or url path
:param request: request
:return: key or none
"""
path = str(request.url.path)
key = request.headers.get('X-API-KEY', path.split('/api/')[-1].split('/', 1)[0])
return key
|
009255dc87525ccbeed212b6862ec2fdb203a4fe
| 23,971
|
def memstr_to_kbytes(text):
""" Convert a memory text to it's value in kilobytes.
"""
kilo = 1024
units = dict(K=1, M=kilo, G=kilo ** 2)
try:
size = int(units[text[-1]] * float(text[:-1]))
except (KeyError, ValueError):
raise ValueError(
"Invalid literal for size give: %s (type %s) should be "
"alike '10G', '500M', '50K'." % (text, type(text))
)
return size
|
58f7bacce980836613e22b8a53bafb06cab88bf5
| 23,972
|
def simpleStatistics(X, pre_state=None):
"""
:param pre_state: list - [0, [0]*d, [0]*d] [N, SUM_vector, SUMSQ_vector]
"""
X = list(X)
d = len(X[0]) # dimension
state = None
if pre_state == None:
state = [0, [0]*d, [0]*d]
else:
state = pre_state
for x in X:
state[0] += 1
for i in range(d):
state[1][i] += x[i]
state[2][i] += x[i] ** 2
return state
|
9d8f3b2692f091db472cc4e394ee76adfef4ddbb
| 23,974
|
import numpy
def L2(x):
"""
Quadratic lagrange basis function.
:param x: points to interpolate
:type x: numpy array (npoints)
:return: basis weights
:rtype: numpy array(npoints, 3)
"""
L1, L2 = 1-x, x
Phi = numpy.array([
L1 * (2.0 * L1 - 1),
4.0 * L1 * L2,
L2 * (2.0 * L2 - 1)])
return Phi.T
|
0cb3154aeb6604c5077928ae57e33a6b18a0414b
| 23,977
|
def invertMask(mask):
"""
Inverts a numpy binary mask.
"""
return mask == False
|
f6a668a9b2f0928e2a71dc7e4de4d1e04cf307de
| 23,978
|
def _counter(metric):
"""Create a signal handler that counts metrics"""
def signal(sender, **kwargs):
metric.inc(job=sender.name)
return signal
|
5eac04e45fe3d992d26b576e81b891416bb0bcef
| 23,979
|
from typing import List
def get_data_from_csv_file(file_name) -> List[List]:
"""
:param file_name: file name to read
:return: list of lists that can take from csv file
"""
with open(file_name, 'r') as file:
lines = [
[element[1:-1] for element in line.strip().split(',')]
for line in file.readlines()
]
return lines[1:]
|
02b356d209c6038725f7edd1f80bb561fbf85103
| 23,980
|
def get_full_name_from_any_step(
step):
"""Gets the full name of a protobuf from a google.protobuf.Any step.
An any step is of the form (foo.com/bar.Baz). In this case the result would
be bar.Baz.
Args:
step: the string of a step in a path.
Returns:
the full name of a protobuf if the step is an any step, or None otherwise.
"""
if not step:
return None
if step[0] != "(":
return None
if step[-1] != ")":
return None
step_without_parens = step[1:-1]
return step_without_parens.split("/")[-1]
|
2be9ac75331947b4fa89cdb51bc62cc41c1935e5
| 23,981
|
import inspect
def configurable(pickleable: bool = False):
"""Class decorator to allow injection of constructor arguments.
Example usage:
>>> @configurable()
... class A:
... def __init__(self, b=None, c=2, d='Wow'):
... ...
>>> set_env_params(A, {'b': 10, 'c': 20})
>>> a = A() # b=10, c=20, d='Wow'
>>> a = A(b=30) # b=30, c=20, d='Wow'
Args:
pickleable: Whether this class is pickleable. If true, causes the pickle
state to include the constructor arguments.
"""
# pylint: disable=protected-access,invalid-name
def cls_decorator(cls):
assert inspect.isclass(cls)
# Overwrite the class constructor to pass arguments from the config.
base_init = cls.__init__
def __init__(self, *args, **kwargs):
if pickleable:
self._pkl_env_args = args
self._pkl_env_kwargs = kwargs
base_init(self, *args, **kwargs)
cls.__init__ = __init__
# If the class is pickleable, overwrite the state methods to save
# the constructor arguments
if pickleable:
# Use same pickle keys as gym.utils.ezpickle for backwards compat.
PKL_ARGS_KEY = '_ezpickle_args'
PKL_KWARGS_KEY = '_ezpickle_kwargs'
def __getstate__(self):
return {
PKL_ARGS_KEY: self._pkl_env_args,
PKL_KWARGS_KEY: self._pkl_env_kwargs,
}
cls.__getstate__ = __getstate__
def __setstate__(self, data):
saved_args = data[PKL_ARGS_KEY]
saved_kwargs = data[PKL_KWARGS_KEY]
inst = type(self)(*saved_args, **saved_kwargs)
self.__dict__.update(inst.__dict__)
cls.__setstate__ = __setstate__
return cls
# pylint: enable=protected-access,invalid-name
return cls_decorator
|
8c037f92f070d36ecb9c1efa3586cb3e4a8a7a09
| 23,982
|
def getSchedule(filename):
"""
Expects a single line of the form
(1,4,3,2,6,4,5,0,8)
"""
with open(filename) as filly:
line = filly.readlines()[0]
line = line.replace('(', ' ').replace(')', ' ').replace(',', ' ')
return [int(x) for x in line.strip().split()]
|
0435cb08093a262b1e6c2224998b361b3fc77853
| 23,983
|
def highest_sum_list(lst: list, n: int) -> int:
"""
Returns highest sum for n consecutive numbers in a list
Using sliding window technique that is an O(n) in time complexity
Args:
lst (list): list of ints
n (int): n, consecutive nums
Returns:
highest sum for n consecutive numbers
"""
initial = sum(lst[:n])
max_num = initial
i = 0
while i+n < len(lst):
max_num = max_num - lst[i] + lst[i+n]
i += 1
return max_num
|
fa453d717ffbfbceb47effc6559bd22a4d7542ba
| 23,985
|
def fds_crc(data, checksum=0x8000):
"""
Do not include any existing checksum, not even the blank checksums 00 00 or FF FF.
The formula will automatically count 2 0x00 bytes without the programmer adding them manually.
Also, do not include the gap terminator (0x80) in the data.
If you wish to do so, change sum to 0x0000.
"""
size = len(data)
for i in range(size + 2):
if i < size:
byte = data[i]
else:
byte = 0x00
for bit_index in range(8):
bit = (byte >> bit_index) & 0x1
carry = checksum & 0x1
checksum = (checksum >> 1) | (bit << 15)
if carry:
checksum ^= 0x8408
return checksum.to_bytes(2, "little")
|
0465498dfd47730b3f19fc3ee78d3aae30ab4166
| 23,986
|
def writedataset(h5group, name, data, overwrite=True):
"""
Write a dataset to a hdf5 file.
The `overwrite` parameter controls the behaviour when the dataset
already exists.
"""
if name in h5group:
if overwrite:
del h5group[name]
else:
return False
h5group[name] = data
return True
|
ae53c15155b79e850bc01a4ddadc6dc23adec7ea
| 23,987
|
import six
import sys
def upath(path):
"""
Always return a unicode path.
"""
if not six.PY3:
fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
return path.decode(fs_encoding)
return path
|
9919147e770a6eb1f526b9d185cb9e40a42c66ce
| 23,988
|
def schemadict2rst(sd):
"""
Return a readable presentation of a property schema
Args:
:sd: (dict) 'schemadict'
Returns:
:rst: (str) RST documentation
"""
max_key_len = max([len(key) for key in sd.keys()]) + 4
max_value_len = max([len(str(value)) for value in sd.values()])
n1 = max_key_len
n2 = max_value_len
table_env = f"{'='*n1} {'='*n2}\n"
rst = ''
rst += table_env
for key, schema in sd.items():
key_string = f"**{key}**"
rst += f"{key_string.center(max_key_len, ' ')} {str(schema).center(max_value_len, ' ')}\n"
rst += table_env
rst += '\n'
return rst
|
291c846bcb3753bace61095cecfe8abcd8470e09
| 23,989
|
import os
def data_dir():
"""Returns the data directory within the test folder"""
return os.path.join(os.path.dirname(__file__), "data", "configurations")
|
a985c370251bd0197cc49e10b0242ec60cec91fe
| 23,992
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.