content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def middle(t):
"""Returns all but the first and last elements of t.
t: list
returns: new list
"""
return t[1:-1]
|
99fd08614830e3b6d932289f95e6d38b95175fc4
| 23,730
|
def check_mapped_read(read):
"""Check read mapping flags."""
if read.is_unmapped or read.is_secondary or read.is_supplementary:
return False
return True
|
a097d691ad4958dac8c469d1688d0771b4e1ac65
| 23,731
|
def make_type_entity_id(type_id=None, entity_id=None):
"""
Assemble a type_id and entity_id and return a composite identifier.
If the entity Id is blank, ignore the supplied type id
>>> make_type_entity_id(type_id="type_id", entity_id="entity_id") == "type_id/entity_id"
True
>>> make_type_entity_id(type_id="type_id", entity_id="") == ""
True
"""
assert type_id is not None, "make_type_entity_id: no type id (%s, %s)"%(type_id, entity_id)
assert entity_id is not None, "make_type_entity_id: no entity id (%s, %s)"%(type_id, entity_id)
if entity_id != "":
return type_id + "/" + entity_id
return ""
|
8de40843e2bc35431333a2ea21947e5cd91d2db2
| 23,732
|
def sublist_generator(input_list: list) -> list:
"""
Given a list generates all possible combinations
:param input_list: input list
:return: all possible combinations of lists
"""
list_of_lists = []
list_max_pos = len(input_list)+1
for initial_sublist_pos in range(list_max_pos):
for final_sublist_pos in range(initial_sublist_pos+1, list_max_pos):
list_of_lists.append(input_list[initial_sublist_pos:final_sublist_pos])
return list_of_lists
|
5a2ab952ce7cae55433f364371abc1951caa29ce
| 23,735
|
import socket
def hostexists(hostname):
""" is this host in DNS """
try:
ret = socket.gethostbyname(hostname)
return True
except socket.gaierror:
return False
|
2a9ae97754417562a2d4bd2ebbaf90db7d37c378
| 23,737
|
def compress(dirname):
"""
Compress the directory into a .tar.gz file.
"""
return dirname
|
d54f810a9b8fedcaa36700ca3cc34ef63da0f37e
| 23,739
|
import uuid
def validate_uuid(content=None, **kwargs):
"""
This logic is a demo, the uuid must be obtained of the API of madkting
:param content:
:param kwargs:
:return:
"""
if 't' not in kwargs:
return {}
if content is None:
# generate a UUID
return {
kwargs['t']: str(uuid.uuid1())[0:16]
}
else:
return {
kwargs['t']: content
}
|
bea74b67e5d3b45e11382ef1058600c65ce530d1
| 23,740
|
import click
def validate_argument(context, param, argument):
"""Validate that every argument follows the correct format.
This is a Click compatible callback.
"""
try:
for arg in argument:
split = arg.split(':')
if len(split) != 2:
raise click.BadParameter(
'Argument must follow the format "a:b"')
return argument
except AttributeError:
raise click.BadParameter('Argument must follow the format "a:b"')
|
f41212d8500b8c45e269c73fd87b0061a9d9a042
| 23,741
|
def must(allowed, fn):
""" Use 'fn' to compute a value, and then check that it is in the list 'allowed'
"""
def do_it(process, longname, flag, value):
new_val = fn(process, longname, flag, value)
if new_val not in allowed:
raise ValueError("new_val '%s' is not one of the allowed values ('%s') for %s in %s. Something must be wrong!" % (new_val, allowed, flag, process))
return new_val
return do_it
|
bc367892beb24270225311d329ccb6169e101dc3
| 23,742
|
from typing import List
def all_one_aligned(digits: List[int]) -> bool:
"""
Checks if all the 1 are aligned from the beginning of the list.
"""
to_check: List[int] = digits[0:digits.count(1)]
return all(x == 1 for x in to_check)
|
2c9a3d7e094f97a40048aa83561ff0ff5c31f40f
| 23,743
|
import pathlib
import yaml
import json
def read_file(file: pathlib.Path) -> dict:
"""Read a json/yaml file without the znjson.Decoder
Parameters
----------
file: pathlib.Path
The file to read
Returns
-------
dict:
Content of the json/yaml file
"""
if file.suffix in [".yaml", ".yml"]:
with file.open("r") as f:
file_content = yaml.safe_load(f)
elif file.suffix == ".json":
file_content = json.loads(file.read_text())
else:
raise ValueError(f"File with suffix {file.suffix} is not supported")
return file_content
|
9fce2b658ce26bb9c65b1f54de2d9d362fa06f57
| 23,744
|
def make_conditional(req, response, last_modified=None, etag=None):
""" Makes the provided response conditional based upon the request,
and mandates revalidation from clients
Uses Werkzeug's own :meth:`ETagResponseMixin.make_conditional`, after
setting ``last_modified`` and ``etag`` correctly on the response object
:param req: OpenERP request
:type req: web.common.http.WebRequest
:param response: Werkzeug response
:type response: werkzeug.wrappers.Response
:param datetime.datetime last_modified: last modification date of the response content
:param str etag: some sort of checksum of the content (deep etag)
:return: the response object provided
:rtype: werkzeug.wrappers.Response
"""
response.cache_control.must_revalidate = True
response.cache_control.max_age = 0
if last_modified:
response.last_modified = last_modified
if etag:
response.set_etag(etag)
return response.make_conditional(req.httprequest)
|
d858e4e42f10e364617c8675ce8126fb94784c97
| 23,745
|
import numpy
def extract_object(image, object_mask):
"""Crop an image to only contain an object.
Parameters
----------
image
The image to extract the object from.
object_mask
True where the object is.
Returns
-------
The cropped image.
"""
rows = numpy.any(object_mask, axis=1)
columns = numpy.any(object_mask, axis=0)
ymin, ymax = numpy.where(rows)[0][[0, -1]]
xmin, xmax = numpy.where(columns)[0][[0, -1]]
return image[ymin : ymax + 1, xmin : xmax + 1]
|
8ee6f717d60deafce6f7500f2ef3d94ffc47695b
| 23,747
|
def validate_issue_year(iyr):
"""
Validate issue year - four digits; at least 2010 and at most 2020.
"""
if len(iyr) != 4:
return False
return 2010 <= int(iyr) <= 2020
|
ef86d9a6bdda3372b83cf2ab960fa43ac17016ec
| 23,748
|
def get_command_name(command):
"""
Only alpha/number/-/_/+ will be kept. Spaces will be replaced to ".".
Others will be replaced to "+".
"""
name = ""
for char in command:
if char.isalnum() or char == "-" or char == "_":
name += char
elif char == " ":
name += "."
else:
name += "+"
return name
|
143d7c1a2dd4d0c6199a0f1f3932597a66726ecd
| 23,749
|
def overrides(parent_class):
"""Inherits the docstring from the method of the same name in the indicated
parent class.
"""
def overriding(method):
assert(method.__name__ in dir(parent_class))
method.__doc__ = getattr(parent_class, method.__name__).__doc__
return method
return overriding
|
d4fd36109fc892743f23cdd2a4786828f5c3f75c
| 23,750
|
def check_keys(args, length):
"""Check if dict keys are provided
"""
params = ['email', 'username', 'password', 'old_password', 'fullname']
for key in args.keys():
if key not in params or len(args) != length:
return True
return False
|
5a3286ae994906eae39b81485bf451ef1ca30dec
| 23,751
|
def get_permutation_tuple(src, dst):
"""get_permtation_tuple(src, dst)
Parameters:
src (list): The original ordering of the axes in the tiff.
dst (list): The desired ordering of the axes in the tiff.
Returns:
result (tuple): The required permutation so the axes are ordered as desired.
"""
result = []
for i in dst:
result.append(src.index(i))
result = tuple(result)
return result
|
2bdd06162b41a10e7f92e1b25f1b9e4f821670a3
| 23,752
|
def get_parameter_dict():
"""Returns a dictionary with keys expected by all parameter dicts and values set to None."""
custom_dict = {name : None for name in ["constant", "linear", "annual (sin)", "annual (cos)", "bi-annual (sin)", "bi-annual (cos)"]}
return custom_dict
|
7b2682b2dcf765595da9461990b7f0d4852f3280
| 23,753
|
def delitem(obj, index):
"""
Deletes the element in obj at index, and returns obj for chaining
>>> spam = [1, 2, 3]
>>> delitem(spam, 1)
[1, 3]
>>> spam = {'one': 1, 'two': 2}
>>> delitem(spam, 'one')
{'two': 2}
>>> (delitem(globals(), 'spam'), None)[-1]
>>> try:
... spam
... assert False
... except NameError as ne:
... Print(repr(ne))
NameError("name 'spam' is not defined",)
"""
del obj[index]
return obj
|
74e6bc54cda8257c23e434b7d07826aa7d9a248c
| 23,755
|
import io
def latexify_results(eval_data, id_column='run_id'):
"""Take a data frame produced by `EvaluationProtocol.eval_data()` and
produce a LaTeX table of results for this method. Will use the `run_id`
column as an algorithm name (or to get algorithm names, if there's more
than one algorithm present in the given data). You can override that by
specifying the `id_column` keyword argument."""
# Each column of the LaTeX table corresponds to a particular evaluation
# environment, while each row corresponds to an algorithm. In contrast,
# each row of the given Pandas frame is represents a series of rollouts by
# one particular algorithm on one particular test configuration.
test_envs = eval_data['test_env'].unique()
col_names = [r'\textbf{%s}' % e for e in test_envs]
alg_names = eval_data[id_column].unique()
# write to buffer so we can use print()
fp = io.StringIO()
# prefix is just LaTeX table setup
print(r"\centering", file=fp)
print(r"\begin{tabular}{l@{\hspace{1em}}%s}" % ("c" * len(col_names)),
file=fp)
print(r"\toprule", file=fp)
# first line: env names
print(r'\textbf{Randomisation} & ', end='', file=fp)
print(' & '.join(col_names), end='', file=fp)
print('\\\\', file=fp)
print(r'\midrule', file=fp)
# next lines: actual results
for alg_name in alg_names:
alg_mask = eval_data[id_column] == alg_name
stat_parts = []
for env_name in test_envs:
full_mask = alg_mask & (eval_data['test_env'] == env_name)
relevant_rows = list(eval_data[full_mask].iterrows())
if len(relevant_rows) != 1:
raise ValueError(
f"got {len(relevant_rows)} rows corresponding to "
f"{id_column}={alg_name} and test_env={env_name}, but "
f"expected one (maybe IDs in column {id_column} aren't "
f"unique?)")
(_, row), = relevant_rows
std = row['std_score']
stat_parts.append(f'{row["mean_score"]:.2f} ($\\pm$ {std:.2f})')
print(r'\textbf{%s} & ' % alg_name, end='', file=fp)
print(' & '.join(stat_parts), end='', file=fp)
print('\\\\', file=fp)
print(r'\bottomrule', file=fp)
print(r'\end{tabular}', file=fp)
return fp.getvalue()
|
f2bb66f0a97392414bbbf3b4e449da3bd37c1954
| 23,756
|
import re
def ireplace(text, old, new, count=None):
"""
A case-insensitive replace() clone. Return a copy of text with all occurrences of substring
old replaced by new. If the optional argument count is given, only the first count
occurrences are replaced.
"""
pattern = re.compile(re.escape(old), re.IGNORECASE)
if count:
return pattern.sub(new, text, count=count)
else:
return pattern.sub(new, text)
|
d2ec2e4ea0a7393f79ea9223df666f1ea58730d5
| 23,757
|
def backtested_periods(backtest_statistics):
"""
Helper function for organizing column labels and their corresponding index in backtest_statistics.
:param backtest_statistics: (pd.DataFrame) Ex-post performance of efficient frontier portfolios.
:return: (dict) Dictionary of column labels as keys and their corresponding index in backtest_statistics.
"""
periods = list(backtest_statistics.keys())
columns = ['1WK', '1MO', '3MO', '6MO', '1YR', '3YR', '5YR', '10YR']
dictionary = dict(zip(columns, periods))
return dictionary
|
45fabfa0ec5ccbcc6ad2f802a40ac97d94d81a80
| 23,759
|
def operatingCost(fuel, oilLube, tires, maint, H):
"""
fuel = annual fuel cost
oilLube = annual oil and lubricant costs
tires = tire cost/hour inc. maintenance
maint = maintenance and repair costs
H = Productive hours
"""
hMaint = maint/H
return {'Hourly maintenance and repair': [hMaint],
'Fuel': [fuel],
'Oil & lubricants': [oilLube],
'Tires': [tires],
'Operating cost': [fuel+hMaint+oilLube+tires]}
|
559c85de88436b170b9c3a8d5ad8226323dcd562
| 23,760
|
def strategy(history, memory):
"""
If opponent defected, respond with defection. *UNLESS* we defected the turn before.
"""
opponents_last_move = history[1, -1] if history.shape[1] >= 1 else 1
our_second_last_move = history[0, -2] if history.shape[1] >= 2 else 1
choice = 1 if (opponents_last_move == 1 or our_second_last_move == 0) else 0
return choice, None
|
56af54fe2ae78b389da440c10b6bc9f4560141c9
| 23,761
|
import argparse
def parseArgs():
""" Parses arguments from commandline using the 'argparse' module.
The argparse module is part of the Python standard library, meaning that it is
included in all python installations.
Examples:
python .\test.py 'Hello, world' -r 2
>>> Hello, world
>>> Hello, world
python .\test.py 'Hello, world'
>>> Hello, world
python .\test.py -h
>>> usage: test.py [-h] [-r [REPS]] text
>>>
>>> positional arguments:
>>> text
>>>
>>> optional arguments:
>>> -h, --help show this help message and exit
>>> -r [REPS], --reps [REPS]
>>> specify number of times text should be printed
"""
parser = argparse.ArgumentParser()
# simple positional argument
# parser.add_argument('text') # required by default
parser.add_argument('-t', '--text') # required by default
# more complex keyword argument
parser.add_argument('-r', '--reps', type=int, required=False, # '-' and '--' indicate flags
nargs='?', # nargs='?' means 0-1 arguments possible
const=1, # const=<...> gives a default value
help="specify number of times text should be printed") # help text available from the commandline
args = parser.parse_args()
if args.reps is None or args.text is None:
return("text here", 1)
else:
return(args.text, args.reps)
|
b1b6a5ae572f7fffcb0e93e509ba9f38af0068e6
| 23,762
|
def get_furniture_data():
"""
demonstration data
"""
furniture_data = [
{
'product': 'Red couch',
'description': 'Leather low back',
'monthly_rental_cost': 12.99,
'in_stock_quantity': 10
},
{
'product': 'Blue couch',
'description': 'Cloth high back',
'monthly_rental_cost': 9.99,
'in_stock_quantity': 3
},
{
'product': 'Coffee table',
'description': 'Plastic',
'monthly_rental_cost': 2.50,
'in_stock_quantity': 25
},
{
'product': 'Red couch',
'description': 'Leather high back',
'monthly_rental_cost': 15.99,
'in_stock_quantity': 17
},
{
'product': 'Blue recliner',
'description': 'Leather high back',
'monthly_rental_cost': 19.99,
'in_stock_quantity': 6
},
{
'product': 'Chair',
'description': 'Plastic',
'monthly_rental_cost': 1.00,
'in_stock_quantity': 45
}
]
return furniture_data
|
2114705a85ea94623985c4b70903d8e017eb374b
| 23,765
|
import os
def get_extra_info(entry):
"""Split entry string at delimiter : and return a 2-element list.
First element is tuple containing a filename split by extension.
If the delimiter is not found, second element returned is an empty
string.
"""
entry = entry.split(" :",1)
# Split filename by extension.
entry[0] = os.path.splitext(entry[0].replace("\\","/"))
if len(entry) > 1:
return entry
else:
return [entry[0],""]
|
98638e89cf724d6ddca3c5c9b29c48e0eaa6339b
| 23,766
|
def check_args(args):
"""Check arguments validity"""
if len(args["src"]) != len(args["dst"]):
print("Error: SRC and DEST must have same number of files")
return False
return True
|
fa544ce3b884ee3fbf0719fad72e42cf33b4d040
| 23,767
|
def update_position_avg_price_2way(cma_price, position_qty, trade_action, trade_direction, trade_price, trade_qty):
"""
Update position quantity and calculate average prices with a new trade.
Long/short positions are updated together, i.e. sell long == buy short.
Moving average price of current position is only updated when the position direction flips.
:param cma_price: Cumulative moving average prices of current position, either long or short.
:param position_qty: Position qty. Positive: long, negative: short.
:param trade_action: 0 - buy, 1 - sell
:param trade_direction: 0 - long, 1 -short
:param trade_price: float
:param trade_qty: int
:return: int, float, float. New position qty, average price and realized gain.
**Note**: Returned realized gain is not scaled with contract unit.
"""
if trade_action != trade_direction: # short
trade_qty *= -1
position_qty_new = position_qty + trade_qty
if position_qty_new == 0:
cma_price_new = 0.0
elif position_qty == 0 or (position_qty > 0) != (position_qty_new > 0):
cma_price_new = float(trade_price)
elif (position_qty > 0) == (trade_qty > 0):
cma_price_new = float(cma_price * position_qty + trade_price * trade_qty) / position_qty_new
else:
cma_price_new = cma_price
if position_qty != 0 and ((position_qty > 0) != (trade_qty > 0)):
realized_gain = (trade_price - cma_price) * (
2 * int(position_qty > 0) - 1) * min(abs(position_qty), abs(trade_qty))
else:
realized_gain = 0
return cma_price_new, position_qty_new, realized_gain
|
b4352da4a20ede5bb178d185d40fc3de43dc720d
| 23,769
|
def celsius_to_fahrenheit(degrees):
"""
Given a temperature in celsius, return it in fahrenheit
:param degrees:
:return:
"""
return (degrees * 1.8) + 32.
|
548d4c9a67e353c54ee8bdc17291b2e881db154e
| 23,770
|
def simple_table(row, col, cell_factory):
"""
Create and return a simple table, like: [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
"""
return [
[cell_factory(i, j) for j in range(col)]
for i in range(row)
]
|
3133bc3178ab9ac53b64a4a1ef2289f1d46d2f67
| 23,771
|
def patch_trait_docstrings(cls):
"""Put the help string as docstring in all traits of a class"""
for trait_name, trait in cls.class_traits().items():
if 'help' in trait.metadata:
trait.__doc__ = trait.metadata['help']
return cls
|
0a1f43d3ba44a45bbf7e67f96b4fec7fe78b6552
| 23,773
|
def dump(value: bytes) -> bytes:
"""
Assume that object is bytes that has been dumped by other schema
so just return this bytes
"""
return value
|
c7dc6bde68f296f5943aca2fce2b262e2c941742
| 23,774
|
def tostr(cls):
"""
Decorator function to create a str representation for an object
:param cls: The class to be passed to the function
:return: The updated class
"""
def __str__(self):
obj_name = type(self).__name__
attr = ', '.join('{}={}'.format(*item) for item in vars(self).items())
return '{}({})'.format(obj_name, attr)
cls.__str__ = __str__
cls.__repr__ = __str__
return cls
|
8c10893af9025820ec05786a71d101d04544235a
| 23,775
|
import click
def update(func):
"""[Experimental] Update an existing streaming Cloud Dataflow job."""
return click.option(
"--update/--no-update",
default=None,
is_flag=True,
help="[Experimental] Update an existing streaming Cloud Dataflow job.",
)(func)
|
2df0f460c381ea4eedb3fb8ec07e34a6fa2b4c8d
| 23,776
|
def _get_image_lib_name_from_object(obj):
"""
Hackish way to determine from which image lib 'obj' come from
without importing each lib module individually.
"""
result = ()
if obj is not None:
# PIL/Pillow Image
if hasattr(obj, "_close_exclusive_fp_after_loading"):
result = ("pil", "PIL/Pillow")
# wxPython Image
elif hasattr(obj, "FindFirstUnusedColour") and hasattr(
obj, "GetImageExtWildcard"
):
result = ("wx", "wxPython")
# PyQt4, PyQt5 or PySide QImage
elif hasattr(obj, "createHeuristicMask") and hasattr(obj, "setDotsPerMeterX"):
result = ("qt", "PyQt(4-5)/PySide(1.x)")
# OpenCV Image (NumPy ndarray)
elif hasattr(obj, "argpartition") and hasattr(obj, "newbyteorder"):
result = ("cv", "OpenCV")
return result
|
afb7d1a5f1a1723190b71a046361c442d8ea0161
| 23,777
|
def uses_requirement(requirement, field):
"""
Check if a given database field uses the specified requirement
(IS_IN_SET, IS_INT_IN_RANGE, etc)
"""
if hasattr(field.requires, "other") or requirement in str(field.requires):
if hasattr(field.requires, "other"):
if requirement in str(field.requires.other):
return True
elif requirement in str(field.requires):
return True
return False
|
99286b46992de285c0b202240530f44d0143e30d
| 23,778
|
import math
def equation_of_time(
var_y,
solar_geometric_mean_longitude,
eccentricity_earth_orbit,
solar_geometric_mean_anomaly,
):
"""Returns Equation Of Time, in minutes, with Var Y, var_y,
Solar Geometric Mean Longitude, solar_geometric_mean_longitude,
Eccentricity Earth Orbit, eccentricity_earth_orbit, Solar Geometric
Mean Anomaly, solar_geometric_mean_anomaly.
"""
equation_of_time = 4 * math.degrees(
var_y * math.sin(2 * math.radians(solar_geometric_mean_longitude))
- 2
* eccentricity_earth_orbit
* math.sin(math.radians(solar_geometric_mean_anomaly))
+ 4
* eccentricity_earth_orbit
* var_y
* math.sin(math.radians(solar_geometric_mean_anomaly))
* math.cos(2 * math.radians(solar_geometric_mean_longitude))
- 0.5
* var_y
* var_y
* math.sin(4 * math.radians(solar_geometric_mean_longitude))
- 1.25
* eccentricity_earth_orbit
* eccentricity_earth_orbit
* math.sin(2 * math.radians(solar_geometric_mean_anomaly))
)
return equation_of_time
|
113ec8bc755341336323bd73180239f249144115
| 23,779
|
from typing import Counter
def sum_all_counts(counters):
"""Sum up all the counter objects into a single one ignoring the courts.
:param counters: A dict of name-counter pairs.
:return: A counter object with counts for every year across all courts.
"""
c = Counter()
for court in counters.values():
c.update(court)
return c
|
39ddf048df859ca6a99ca374074aa66cf968af17
| 23,780
|
def index(state, imgs, url):
"""
Indexes a file in the state
"""
for img in imgs:
if 'Imported' in imgs[img]['url']:
print('Importing...', img)
state[img] = imgs[img]
if not imgs[img]['url'] in state:
imgs[img]['target'] = url
state[imgs[img]['url']] = imgs[img]
return state
|
1a1a730afad839dedd95de4a14bbf2b5fca2f8b7
| 23,781
|
def hm_to_float(hours: int, minutes: int) -> float:
"""Convert time in hours and minutes to a fraction of a day
:type hours: int
:type minutes: int
:rtype: float
"""
if not (0 <= hours < 24 and 0 <= minutes < 60):
raise ValueError("Incorrect time")
return hours / 24 + minutes / 60 / 24
|
2fb0c03e0b6570a0b801a742dfcb06b0b27badf7
| 23,782
|
def clock_angle(hour, minute):
"""
Calculate Clock Angle between hour hand and minute hand
param hour: hour
type: int
param minute: number of minutes pas the hour
type: int
return: the smallest angle of the two possible angles
"""
if (hour < 0 or minute < 0 or hour > 12 or minute > 59):
raise ValueError("Invalid Input: Time is based on a 12-hour clock")
if hour == 12:
hour = 0
# calculate the angle moved by the hour and minute hands
# with reference to 12:00
hour_angle = 0.5 * (hour * 60 + minute)
minute_angle = 6 * minute
# Find the difference between the two angles
angle = abs(minute_angle - hour_angle)
return angle
|
2defecdead7b7283913f8f69cc4d12456c8fc4aa
| 23,783
|
def list_methods(interface):
"""
List all explicit methods of `interface`, hidding methods that starts with '_'
Parameters:
----------
interface : interface of CST Studio
Returns:
----------
methods : array of string
Explict methods of interface
"""
methods = []
for key in dir(interface):
method = str(key)
if not method.startswith('_'):
methods.append(method)
return methods
|
34d96913cc31eeb837de3bcc4ad1a276d86ebfc7
| 23,784
|
def get_tuple_py_string(param_obj, curr_depth=0):
"""
Put together the tuple string based on the number of elements in
the given elements list. if this was a one demensional array, the
list would simply be, for example [3], where 3 is the number of
elements in the array. A 3x3 2D array would be [3, 3] a 3x3x3 3D
array is: [3, 3, 3]. The output for these examples would be:
(xxx)
((xxx)(xxx)(xxx))
(((xxx)(xxx)(xxx))((xxx)(xxx)(xxx))((xxx)(xxx)(xxx)))
"""
list = param_obj.num_elements
char = param_obj.py_string
t = "("
for i in range(0, list[curr_depth]):
if curr_depth < (len(list) - 1):
t += get_tuple_py_string(param_obj, (curr_depth+1))
else:
t += char
t += ")"
return t
|
cee35e0ffeb8629e5cf9fc9f42b7d931721e5ef9
| 23,785
|
import psutil
def get_used_files():
"""Get files used by processes with name scanpy."""
loop_over_scanpy_processes = (proc for proc in psutil.process_iter()
if proc.name() == 'scanpy')
filenames = []
for proc in loop_over_scanpy_processes:
try:
flist = proc.open_files()
for nt in flist:
filenames.append(nt.path)
# This catches a race condition where a process ends
# before we can examine its files
except psutil.NoSuchProcess as err:
pass
return set(filenames)
|
cbe0f4f87206cb4587ffaae002fb66b14ec4e2e4
| 23,786
|
from typing import Union
from typing import Type
from typing import List
def normalize_string_list(string_list: str, required_type: Union[Type, List[Type]]) -> str:
"""Add wrap string list into brackets if missing."""
if not isinstance(string_list, str):
return string_list
if isinstance(required_type, list):
while not string_list.startswith("[["):
string_list = "[" + string_list
while not string_list.endswith("]]"):
string_list += "]"
return string_list
if not string_list.startswith("["):
string_list = "[" + string_list
if not string_list.endswith("]"):
string_list += "]"
return string_list
|
e3d1ce7fde19a6f748c8bdc667cf01bc1d62fe3e
| 23,787
|
import re
def words(text: str) -> list[str]:
"""
Splits text into an array of its words. Splits at all spaces and trims any punctuation.
Parameters
------------
text: str
The text to be split up.
Returns
------------
str[]
An array of strings (words)
"""
list_words = re.split(' |\n|\t', text) #Splits along spaces
for i in range(0, len(list_words)):
list_words[i] = re.sub(r'\.|\?|!|\,|\;|\"|\(|\)|\:|\/|\“|\”', '', list_words[i])
list_words = filter(str.strip, list_words) #Strips leading/trailing whitespaces
#and filters out 'sentences' that are only whitespace
return list(list_words)
|
d7a58575b7c4a7bbf802cc4e24628206e3c6b19c
| 23,788
|
def build_service_catalog_parameters(parameters: dict) -> list:
"""Updates the format of the parameters to allow Service Catalog to consume them
Args:
parameters (dict): List of parameters in the format of
{"key1":"value1", "key2":"value2"}
Returns:
list: Parameters in the format of {"Key":"string", "Value":"string"}
"""
new_parameters = list()
for key, value in parameters.items():
y = dict()
y['Key'] = key
y['Value'] = value
new_parameters.append(y)
return new_parameters
|
7529be4639300a88ad2f7409dd5355fb6791ba4c
| 23,789
|
import json
def extract_item_from_array_to_int(value, key) -> int:
"""Extract item from array to int."""
return int(json.loads(value)[int(key)])
|
d2a5a3fa95faf4bf0c9d6787a0d8b530817f71d8
| 23,790
|
import math
def get_distance_charge(delivery_distance: int) -> float:
"""
This function calculates distance fee and adds it to delivery fee.
---
Args:
delivery_distance (int): distance of the delivery
Returns:
distance_charge (float): fee for additional distance
"""
first_kilometer = 1000
additional_distance = 500
additional_distance_fee = 100
minimum_fee = 200
if delivery_distance > 1000:
additional_distance = math.ceil((delivery_distance - first_kilometer) / additional_distance)
return additional_distance * additional_distance_fee + minimum_fee
return minimum_fee
|
bc433a2a2dead852d2d5b63723319afffc1ce97e
| 23,791
|
import re
def standardize_name(name):
"""Replace invalid characters with underscores."""
return re.sub("[^0-9a-zA-Z]+", "_", name)
|
f7e207ed109cdc893d1ed299b2b60b187c2ffafa
| 23,793
|
def _get_tile_translation(tile_grid_dict, img_size, img_overlap):
"""Calculates translation for each tile depending on their position
in a grid, their size and overlap.
Translations are returned as values in a dictionary with tile positions as keys.
Args:
tile_grid_dict (dict): Tile numbers and their respective positions in the grid.
img_size (tuple): Size of images (pixel in y- and x-direction).
img_overlap (float): Overlap between tiles in percentage.
Returns:
(dict): Tiles and their translation (x and y direction).
"""
assert len(img_size) == 2, f"img_size should be a tuple with two integers for y- and x-dimensions of a tile image."
assert (img_overlap < 1) and (img_overlap > 0), ("img_overlap should be a float thats represents tile overlap"
f"in percentage, instead got: {img_overlap}")
y_size, x_size = img_size
tile_trans = {}
# iterate over tiles and find translations in x and y direction
for tile, (row, col) in tile_grid_dict.items():
x_trans = ((col - 1) * x_size) - ((col - 1) * (x_size * img_overlap))
y_trans = ((row - 1) * y_size) - ((row - 1) * (y_size * img_overlap))
tile_trans[tile] = (x_trans, y_trans)
return tile_trans
|
64d661454833776e895453c821bc6b8c5b1e835a
| 23,794
|
def getNodeClassName():
"""
getNodeClassName() -> None
gets the class name for the currently selected node
@return: a string containing the name.
"""
return str()
|
24c8806607cf423c465dfc2808b78e2de966845c
| 23,795
|
def PF_op_pw(u, df, inverses, x):
"""
PERRON-FROBENIUS OPERATOR POINTWISE
Arguments:
- <u> a function with one argument
- <df> a function: the derivative of the dynamical system function f
(should take one arg)
- <inverses> a list of functions, each taking one argument, that find the
inverse of x under each branch of f
- <x> a float
Returns:
- a float, which is the value of PF(u) at the point x -- where PF is the
PF-operator associated to the system f.
NOTES:
- Uses a formula for the PF-operator that only works if f is piecewise
monotonic.
"""
y = 0
for inv in inverses:
z = inv(x)
y += u(z) / abs(df(z))
return y
|
58c3870ef6d09d0153e1e3ea403562781280dda7
| 23,797
|
import numpy
def _AverageResultsForSection(lines, results_section_header_index):
"""Return the average bandwidth for a specific section of results
Args:
lines: output of bandwidthTest, split by lines and stripped of whitespace
results_section_header_index: line number of results section header.
The actual results, in MB/s, should begin three lines after the header.
Returns:
average bandwidth, in MB/s, for the section beginning at
results_section_header_index
"""
RESULTS_OFFSET_FROM_HEADER = 3
results = []
for line in lines[results_section_header_index + RESULTS_OFFSET_FROM_HEADER:]:
if not line:
break # done with this section if line is empty
results.append(float(line.split()[1]))
return numpy.mean(results)
|
5c6083e42afe6608a32a5c25cd590c1631d8db51
| 23,798
|
def singleton(klass):
"""
Singleton class decorator.
"""
instances = {}
def get():
if klass not in instances:
instances[klass] = klass()
return instances[klass]
return get
|
263d89b90b5da86a2df644eacff33e0899fd33c7
| 23,800
|
def highlight_dtype(series, dtype='object'):
"""highlight rows in blue if dtype==dtype"""
return [
'color: #1f78b4' if series["dtype"] == dtype else ''
]*series.shape[0]
|
244fa702b9f88b129f2e75bb7430a6e807b2f327
| 23,801
|
def _gzipped(stream):
"""Return True if stream is a gzip file."""
initial_pos = stream.tell()
gzip_magic = b"\x1f\x8b"
file_magic = stream.read(2)
stream.seek(initial_pos) # rewind back 2 bytes
return file_magic == gzip_magic
|
1b837da396b16d10d3382cba1c804b89c5026dc4
| 23,802
|
def get_epoch_catalogues(epochs_file):
"""
Read a file which contains a list of the catalogues to be read
parameters
----------
epochs_files : str
A file which has a list of catalogues, one per line.
returns
-------
files : list
A list of filenames
"""
files = list(map(str.strip, open(epochs_file).readlines()))
return files
|
b64d96e823cff1ee2b5d43780e7b96ff36397784
| 23,803
|
def convert_alert_to_servicenow(args):
"""
Converts a PANW IoT alert to ServiceNow table formatted.
"""
incident = args.get('incident')
asset_list = args.get('asset_list')
alert = asset_list[0]
comments_and_work_notes = str(incident['comments_and_work_notes'])
url = str(incident['url'])
urgency = str(incident['urgency'])
incident.setdefault('user_email', 'cannot find any email')
# user_email = incident['user_email']
zb_ticketid = incident['correlation_id']
alert.setdefault('msg', {}).setdefault('impact', 'Sorry, no impact available to display so far!')
alert.setdefault('msg', {}).setdefault('recommendation', {}).setdefault(
'content', ['Sorry, no recommendation available to display so far!'])
alert.setdefault('location', 'Sorry, location is not provided')
alert.setdefault('category', 'Sorry, category is not provided')
alert.setdefault('profile', 'Sorry, profile is not provided')
alert.setdefault('description', 'Sorry, description is not provided')
alert.setdefault('name', '')
impact = alert['msg']['impact']
recommendations = alert['msg']['recommendation']['content']
recommendation_text = ''
alert_description = str(alert['description'])
category = str(alert['category'])
profile = str(alert['profile'])
location = str(alert['location'])
short_description = str(alert['name'])
for rec in recommendations:
recommendation_text += '*' + rec + '\n'
new_line = '\n'
description = (
f'Summary{new_line}{alert_description}{new_line}{new_line}Category: {category} Profile: {profile}'
f'{new_line}{new_line}Impact{new_line}{impact}{new_line}{new_line}Recommendations{new_line}'
f'recommendation_text{new_line}URL{new_line}{url}'
)
data = (
f'urgency={urgency};location={location};short_description={short_description};'
f'comments_and_work_notes={comments_and_work_notes};description={description};'
f'correlation_id={zb_ticketid};impact=3;company=Palo Alto Networks;opened_by=svc_panw_iot;'
)
return data
|
aaa84ce8e2e002f11ffbb273e71229ae9fde29bf
| 23,804
|
from typing import List
def domain_param_list_from_dict(dp_dict: dict, dp_mapping: dict) -> List:
"""
Map a dict of domain parameter keys and its values to a list which has the same ordering as `dp_mapping`.
:param dp_dict: A dict assigning a value to a domain-parameter name.
:param dp_mapping: A dict assinging a domain-paramter name to an integer. The integer specifies the order in which
the domain parameter should be listed
:return: Ordered list of the `dp_dict` values based on the dp_mapping
"""
return [dp_dict[dp_mapping[val]] for val in sorted(dp_mapping.keys())]
|
b9666aa681229d76404b153b807ddd7bc439e0dd
| 23,806
|
def repr_attributes(obj: object, *anonymous_elements: object, **named_elements: object) -> str:
"""
A simple helper function that constructs a :func:`repr` form of an object. Used widely across the library.
>>> class Aa: pass
>>> assert repr_attributes(Aa()) == 'Aa()'
>>> assert repr_attributes(Aa(), 123) == 'Aa(123)'
>>> assert repr_attributes(Aa(), foo=123) == 'Aa(foo=123)'
>>> assert repr_attributes(Aa(), 456, foo=123, bar='abc') == "Aa(456, foo=123, bar='abc')"
"""
fld = list(map(repr, anonymous_elements)) + list(f'{name}={value!r}' for name, value in named_elements.items())
return f'{type(obj).__name__}(' + ', '.join(fld) + ')'
|
251efd2086a3d5ec179317cbe6d88c456be8498e
| 23,807
|
import six
import string
def name_for_filesystem(input_name):
"""Generate a name 'safe' and 'clean' for filesystem usage (no spaces,
only lowercase, etc.)
:param input_name: name to use as input
:type input_name: str
:raises TypeError: if the input name is not a valid string
:return: the safe name
:rtype: str
"""
if not isinstance(input_name, six.string_types):
raise TypeError("Please provide a valid string. "
"Received: %s, %s" % (input_name, type(input_name)))
input_name = input_name.replace(" ", "_")
input_name = input_name.replace("-", "_")
letters = [
a for a in input_name
if a in string.ascii_letters or a in string.digits or a == "_"
]
return "".join(letters).lower()
|
7691ed2835b18a4d9efd0380ed0d20d5a6c6bfa4
| 23,808
|
def is_probably_graphql(location: str) -> bool:
"""Detect whether it is likely that the given location is a GraphQL endpoint."""
return location.endswith(("/graphql", "/graphql/"))
|
195c3bb14cc5129531cd92218f90177fbb08a900
| 23,810
|
def strip_subreddit_url(permalink):
"""
Strip a subreddit name from the subreddit's permalink.
This is used to avoid submission.subreddit.url making a seperate API call.
"""
subreddit = permalink.split('/')[4]
return '/r/{}'.format(subreddit)
|
e1bbc64a0607518bc8bb8f12a27439377a2fc6b7
| 23,811
|
def load_smiles_from_txt(file):
"""Load SMILES from a txt file.
Parameters
----------
file : str
Path to a txt file where each line has a SMILES string.
Returns
-------
list of str
List of SMILES
"""
smiles = []
with open(file, 'r') as f:
for line in f.readlines():
smiles.append(line.strip())
return smiles
|
32fc375cddc244a750ad86b009d1019b6f131365
| 23,812
|
def compute_blendw_sample_loss(rets):
"""
Compute a sparsity loss to encourage blendw to be low for every sample point
"""
loss = 0.
for label in ['coarse', 'fine']:
ret = rets[label]
loss += (ret['blendw'] ** 2).mean()
return loss / 2.
|
7579c72112fd6eae61c70c43bbb20a0401ccaedc
| 23,813
|
import os
def file_types(path):
""" Takes a path and returns a list of the file types of files at or below
that path
"""
file_types_list = []
for root, directories, files in os.walk(path):
for name in files:
filename, file_extension = os.path.splitext(name)
if(not(file_extension in file_types_list)):
file_types_list.append(file_extension)
return file_types_list
|
dd15768f2254e14a6af934d0987ce9bcd5a5041e
| 23,814
|
import math
def pack_size(value):
"""Returns the number of bytes required to represent a given value.
Args:
value (int): the natural number whose size to get
Returns:
The minimal number of bytes required to represent the given integer.
Raises:
ValueError: if ``value < 0``.
TypeError: if ``value`` is not a number.
"""
if value == 0:
return 1
elif value < 0:
raise ValueError('Expected non-negative integer.')
return int(math.log(value, 256)) + 1
|
cf0a83259ce7d76d0ec1b7ac4a69b5d2a6099d92
| 23,816
|
def inr(r,s,t):
"""r is in range of s and t left inclusive"""
return (r < t) and (r >= s)
|
7a75021a9b0e22c0a580fc658aafe349d34cb2a0
| 23,817
|
def scaled_image(width, height):
"""face_recognition face detection slow AF compare to MTCNN
scaling the image as per resolution to process fast"""
total = width*height
scale_dict = {0.2: 6500000, 0.3: 4000000, 0.4: 2250000, 0.5: 1000000, 0.8: 500000, 1: 0}
for k, v in scale_dict.items():
if total > v:
return k
|
b8e97599a2046e4eb1427a73495a00753cb3ee08
| 23,818
|
from datetime import datetime
def create_date(date_info: datetime) -> str:
"""Converts a datetime object to create a custom string
@param date_info: Date object to convert
@return: Custom date format
"""
return date_info.strftime("%m-%d-%Y %H:%M:%S") if date_info else "No date provided"
|
a1b37e79f7eded3d348e92b966006a886a341625
| 23,819
|
def prime_number(number):
"""
Checks if a number is prime Checking for prime numbers
"""
for i in range(2, number - 1):
if number % i == 0:
return False
return number >= 1
|
2229c599b034bced9f20fd489b190bdd0e69e491
| 23,820
|
def pe_6():
"""Return the difference between the sum of all squares of all
numbers from 1 to 100 and the square of the sum of all numbers from 1 to
100."""
integers = [i for i in range(1, 101)]
sum_of_squares = 0
square_of_sum = (sum(integers))**2
for i in integers:
sum_of_squares += i**2
answer = square_of_sum - sum_of_squares
return f'The solution for project euler 6 is {answer:,}.'
|
cd150e828b5915295bd488cd096e01e5a0da416e
| 23,821
|
import re
import os
def get_file_date_regex(filename):
"""Get regex from filename"""
# regex for ""%Y-%m-%d--%H:%M:%S"
reg0 = r"\d{4}.\d{2}.\d{2}..\d{2}.\d{2}.\d{2}"
# regex for "%Y%m%d%H%M%S"
reg1 = r"\d{14}"
match = re.search(reg0, os.path.basename(filename))
return reg1 if match is None else reg0
|
72d042ab5403585fc95cbde009f7af3a35c0a4e0
| 23,823
|
import sys
import os
import subprocess
def call(command):
"""
Wraps subprocess.call to run a command.
Parameters
----------
command : str
Returns
-------
int
Exit code of subprocess.
Examples
--------
>>> from libtbx.easy_run import call
>>> ret = call("echo 1")
1
>>> print ret
0
"""
for s in [sys.stdout, sys.stderr]:
flush = getattr(s, "flush", None)
if (flush is not None): flush()
if (sys.platform == 'darwin'): # bypass SIP on OS X 10.11
command = ("DYLD_LIBRARY_PATH=%s exec "%\
os.environ.get("DYLD_LIBRARY_PATH","")) + command
return subprocess.call(args=command, shell=True)
|
0284dc5dd080e7fc0f81c94b009e20c521a1493d
| 23,824
|
def clean_noise(text):
"""Cleans noisy nodes."""
noise = {",", "|", ".", "-", "*", "", "/", " ", ":", ";", ">", "<"}
if text in noise:
return ""
else:
return text
|
88b9143fbb7e4dbc9c3b1ea0d2b8948217321143
| 23,825
|
def _generate_citrine_to_preparation_step_map():
"""Generate a dictionary mapping db column name to integer
position of correspoding column in the CSV """
cmap = {}
cmap["name"] = 0
cmap["timestamp"] = 1
cmap["furnace_temperature"] = 2
cmap["furnace_pressure"] = 3
cmap["sample_location"] = 4
cmap["helium_flow_rate"] = 5
cmap["helium_flow_rate"] = 6
cmap["hydrogen_flow_rate"] = 7
cmap["hydrogen_flow_rate"] = 8
cmap["carbon_source"] = 9
cmap["carbon_source_flow_rate"] = 10
cmap["carbon_source_flow_rate"] = 11
cmap["argon_flow_rate"] = 12
cmap["argon_flow_rate"] = 13
return cmap
|
ce3c1906364b0bcaef70f5a3ab3f5e618b4def4a
| 23,827
|
def padded(a_list, n, value=None):
"""
Return a copy of `a_list` with length `n`.
"""
a_list = list(a_list)
padding_length = n - len(a_list)
if padding_length <= 0:
return a_list
padding = [value] * padding_length
return a_list + padding
|
23db171f3b3768ff34d9c0eaaa0c6676a052769f
| 23,829
|
import inspect
def _is_class(module, member, clazz):
"""
Validates if a module member is a class and an instance of a CoreService.
:param module: module to validate for service
:param member: member to validate for service
:param clazz: clazz type to check for validation
:return: True if a valid service, False otherwise
:rtype: bool
"""
if not inspect.isclass(member):
return False
if not issubclass(member, clazz):
return False
if member.__module__ != module.__name__:
return False
return True
|
bdfc269e28c08d72ad683b76e143a09c86da9d6e
| 23,830
|
def verifyIsCloseEnough(number1, number2, margin = 0.05):
"""
Return true if number1 is within margin of number 2.
"""
max_diff = number2 * margin
return (abs(number1 - number2) < max_diff)
|
70ea34f182f77508a7b1b10dfaeccbed09c0c239
| 23,832
|
def process_text(raw_text: str) -> list[str]:
"""
Parses each line in `raw_text` and adjusts chars as needed to get a csv format
:param raw_text: string to parse lines from.
:return: List of rows (strings) with tabular data in csv format.
"""
results = list()
text = raw_text
for c in ('\r', '-', '—'):
text = text.replace(c, '')
text = text.replace(',', '.')
lines = text.split('\n')
for line in lines:
words = line.split('\t')
csv_string = ", ".join(words)
results.append(csv_string + '\n')
return results
|
e6abb52f2581fedd9b69bbfd642bd26db06b466e
| 23,834
|
import logging
def is_status(search_data):
"""
判断是否有参数,且为正常还是停用
:param search_data:s
:return:
"""
logging.info('is_status')
if search_data:
if search_data == '正常':
return '0'
elif search_data == '停用':
return '1'
else:
return search_data
else:
return ''
|
c1d1c8e4c7925e15822723646ae15f3650491501
| 23,835
|
def call_once(f):
"""Cache the result of the function, so that it's called only once"""
result = []
def wrapper(*args, **kwargs):
if len(result) == 0:
ret = f(*args, **kwargs)
result.append(ret)
return result[0]
return wrapper
|
1713445b4dbc1837ed76a33aa93aeab07bedd693
| 23,836
|
import torch
from typing import Sequence
def crop(data: torch.Tensor, corner: Sequence[int], size: Sequence[int]) -> torch.Tensor:
"""
Extract crop from last dimensions of data
Args:
data: input tensor
corner: top left corner point
size: size of patch
Returns:
torch.Tensor: cropped data
"""
_slices = []
if len(corner) < data.ndim:
for i in range(data.ndim - len(corner)):
_slices.append(slice(0, data.shape[i]))
_slices = _slices + [slice(c, c + s) for c, s in zip(corner, size)]
return data[_slices]
|
2f596db499e3b1d59475477e71a95e8a170242da
| 23,838
|
def find_subtree_indices(doc, dependency_type):
"""
This function finds and returns the indices of the entire clause
(each token) in the subtree to be removed.
Args:
doc: spaCy Doc of the clean sentence
dependency_type:str Options are "appos", "acl", "relcl", "advcl"
Return:
indices_to_remove_subtree: list of indices of the subtree
"""
# List of indices of clause tokens to be removed in the sentence
indices_to_remove_subtree = []
# List of unique spaCy hashes for string tokens in the doc
# Position remains the same from original doc
hash_ids_of_tokens = [token.orth for token in doc]
# Iterate through the doc to get the dep clause subtree
for index, token in enumerate(doc):
# Check for dependency label
if token.dep_ == dependency_type:
# Get the indices of subtree- all tokens of the clause
for subtree_token in token.subtree:
# Get the unique hash id for the subtree token
subtree_token_id = subtree_token.orth
# Look up the token's index in the doc
subtree_token_index_in_doc = hash_ids_of_tokens.index(subtree_token_id)
# Add to list of indices to be removed
indices_to_remove_subtree.append(subtree_token_index_in_doc)
# Return list of indices
return indices_to_remove_subtree
|
31c6b3c7120075f5abf2ddada2113c73177b031b
| 23,840
|
def check_table_exists(connection, table_name):
"""
Returns a Boolean to tell if a certain table exists already.
"""
data = None
with connection.cursor() as cursor:
cursor.execute(
f'SELECT * '
f'FROM information_schema.tables '
f"WHERE table_schema = 'public' AND table_name = '{table_name}'"
'LIMIT 1;')
data = cursor.fetchone()
return data is not None
|
3580dbe9b84d521fb9f16da7b82f400525852e23
| 23,841
|
def to_callable(obj):
"""Turn an object into a callable.
Args:
obj: This can be
* **a symbolic expression**, in which case the output callable
evaluates the expression with symbols taking values from the
callable's arguments (listed arguments named according to their
numerical index, keyword arguments named according to their
string keys),
* **a callable**, in which case the output callable is just the
input object, or
* **anything else**, in which case the output callable is a
constant function which always returns the input object.
Returns:
callable
Examples:
>>> to_callable(Symbol(0) + Symbol('x'))(3, x=4)
7
>>> to_callable(lambda x: x + 1)(10)
11
>>> to_callable(12)(3, x=4)
12
"""
if hasattr(obj, '_eval'):
return lambda *args, **kwargs: obj._eval(dict(enumerate(args), **kwargs))
elif callable(obj):
return obj
else:
return lambda *args, **kwargs: obj
|
37e305726cdf409623bf764864ab1ec5989b4690
| 23,842
|
def degree_of_total_leverage(financial_leverage, operating_leverage):
"""
Summary: Calculate the degree of total leverage.
PARA financial_leverage: Firms degree of financial leverage.
PARA type: float
PARA operating_leverage: Firms degree of operating leverage.
PARA type: float
"""
return financial_leverage * operating_leverage
|
dc5493f53969dbe480cf268f13b84f06202f6cdb
| 23,843
|
def toml_dict(prob_path, problem_name, path):
"""
domain = "clutter_ml/clutter.pddl"
problem = "clutter_ml/clutter_prob0.pddl"
scene = "clutter_ml/clutter_prob0.xml"
objdir = "clutter_ml/"
semantics = "clutter_ml.lua"
output = "clutter_ml_solution.json"
perf_file = "clutter_ml.csv"
params = "params.toml"
blacklist = "models/pr2_collision_blacklist.csv"
cont_state_file = "clutter_ml_cont_state.txt"
disc_state_file = "clutter_ml_disc_state.txt"
time_file = "cultter_ml_time.txt"
cfg_file = "clutter_ml_cfg.txt"
"""
res = {}
res['domain'] = path + 'clutter_ml/clutter.pddl'
res['problem'] = prob_path + "clutter_%s.pddl" % (problem_name)
res['scene'] = prob_path + "clutter_%s.xml" % (problem_name)
res['objdir'] = path + "clutter_ml/"
res['semantics'] = path + "clutter_ml.lua"
res['output'] = prob_path + "clutter_ml_solution.json"
res['perf_file'] = prob_path + "clutter_ml.csv"
res['params'] = path + "params.toml"
res['blacklist'] = path + "models/pr2_collision_blacklist.csv"
res['cont_state_file'] = prob_path + "clutter_ml_cont_state.txt"
res['disc_state_file'] = prob_path + "clutter_ml_disc_state.txt"
res['time_file'] = prob_path + "cultter_ml_time.txt"
res['cfg_file'] = prob_path + "clutter_ml_cfg.txt"
return res
|
17d2e11a2d7a198ca9ba8b263b9cad580e424e3e
| 23,844
|
from datetime import datetime
def datetime_unserializer(d_str):
"""Convert a string representation to a datetime object for JSON unserialization.
:param d_str: the datetime string we want to unserialize
:type d_str: str
:return: the datetime unserialized to a datetime
:rtype: datetime
"""
return datetime.fromisoformat(d_str)
|
5555425063a73cedff6a87b3e70ba7ea8429b01e
| 23,845
|
def is_pangram(sentence: str) -> bool:
"""Return True if given string is a panagram."""
# a pangram is a sentence using every letter of the alphabet at least once
sentence = sentence.lower()
alphabet = 'abcdefghijklmnopqrstuvwxyz'
return set(sentence).issuperset(alphabet)
|
85484fcc50d41d92495fc298c2318f8a9edff08b
| 23,846
|
def get_shuffle_together(config):
"""Get which parameters need to be shuffled together, including multi-step values."""
shuffle_together = config.get('shuffle_together', [])
data_multistep = config.get('rl_multistep', [])
for steps in data_multistep:
group = []
for s in range(1, steps + 1):
group.append(f'multistep_{steps}_obs_{s}')
for s in range(1, steps):
group.append(f'multistep_{steps}_act_{s}')
group.append(f'multistep_{steps}_rew_{s}')
group.append(f'multistep_{steps}_done_{s}')
shuffle_together.append(group)
return shuffle_together
|
2279bcc762d99d8642f6841b7a80c3a2cb053bb3
| 23,847
|
from typing import Tuple
def split_hex_row(
row_hex: str,
start_index: int,
stop_index: int
) -> Tuple[str, str, str]:
"""Helper for figuring out how to cleanly split a hex row for highlights"""
# bytes are 2 hex chars plus 1 space, plus extra space every 8 bytes
start_index = start_index * 3 + int(start_index / 8)
stop_index = stop_index * 3 + int(stop_index / 8)
before_hex = row_hex[:start_index]
during_hex = row_hex[start_index:stop_index]
after_hex = row_hex[stop_index:]
return before_hex, during_hex, after_hex
|
11fdfaeeade207477615a8014cc537937d9a80e4
| 23,848
|
import re
def get_segments_from_text(text):
"""Return list of segments for this text"""
tokens = re.findall("([0-9]{8})", text.replace("\n", ""))
# First we generate a list of segments, based on what we found
segments = []
pts = []
for token in tokens:
lat = float(token[:4]) / 100.0
lon = 0 - (float(token[-4:]) / 100.0)
if lon > -30:
lon -= 100.0
if token == "99999999":
if len(pts) > 1:
segments.append(pts)
pts = []
else:
pts.append([lon, lat])
if len(pts) > 1:
segments.append(pts)
return segments
|
93310d58bca652b4bc70da533359ccfcc43af189
| 23,850
|
def write(response, file, chunk_size=8192):
"""
Load the data from the http request and save it to disk
:param response: data returned from the server
:param file:
:param chunk_size: size chunk size of the data
:return:
"""
bytes_written = 0
while 1:
chunk = response.read(chunk_size)
bytes_written += len(chunk)
if not chunk:
break
file.write(chunk)
return bytes_written
|
5f889b76431b889a9b07db33b98d626b5d0a0c91
| 23,851
|
import random
def roll_die():
"""
Represents a die and returns a random number between 1 and 6
Returns:
(int): a random number between 1 and 6
"""
return random.randint(1, 6)
|
80ba3ff2400f0eabfc84a08bd01f16bd548daa41
| 23,852
|
def strip_action(action: str) -> str:
"""
removes whitespace and changes all characters to lower case
:param action: the name of the action taken on a position
:return: the input string minus the above mentioned
"""
action = action.replace(" ", "")
action = action.casefold()
return action
|
ef85ba082043f5f25cb3f33c73504de48e3cf530
| 23,853
|
def is_valid_app_config(app_config):
"""
Checks if provided app_config is valid. Tries to get namespace.
"""
return getattr(app_config, 'namespace', None)
|
95a658d87e51f39d06b9e66d4671a95c07bdbf12
| 23,855
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.