content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def get_outdir_simupara(fp, line):
"""
get output directory and simulation parameters (in format: time steps)
:param fp: opened file
:param line: current line
:return: output directory and simulation parameters
Note: output directory is '' if not specified and
list of simulation parameters is empty if not specified
"""
outdir = ''
simupara = []
while line:
line = fp.readline()
if not line:
break
if line == '--\n':
continue
line = line.strip('\n')
line = line.split(' ')
if len(line) == 1:
outdir = str(line[0])
else:
simupara = line
return outdir, simupara
|
22da06da0466657a5905ba3333b270787cf98a58
| 17,854
|
import re
def ShortenDsn(connect_str_clear):
"""
This must be very fast because used in loops.
It abbreviates the DSN especially if this is a connection string.
"""
mtch_dsn = re.match(".*DSN=([^;]+).*", connect_str_clear, re.IGNORECASE)
if mtch_dsn:
return mtch_dsn.group(1)
mtch_dsn = re.match(".*SERVER=([^;]+).*", connect_str_clear, re.IGNORECASE)
if mtch_dsn:
return mtch_dsn.group(1)
return connect_str_clear
|
1041a7e3e731d24593268150e074624ef92d0b86
| 17,855
|
def grid_coordinates(bbox: tuple, gridsize: int) -> list:
"""
Returns a matrix of gridsize with the real world coordinates of each cell's center
:param bbox:
:param gridsize
:return:
"""
x_min = bbox[0]
y_min = bbox[1]
x_max = bbox[2]
y_max = bbox[3]
cell_width = (x_max-x_min)/gridsize
cell_height = (y_max-y_min)/gridsize
# "array" gridsize*gridsize with a tuple (x, y) for each cell center's coordinates
row = [None]*gridsize
grid = []
for i in range(gridsize):
grid.append(list(row)) # list(list) trick to copy instead of reusing the list
# see http://stackoverflow.com/questions/2612802/how-to-clone-or-copy-a-list-in-python
for u in range(gridsize):
for v in range(gridsize):
x = x_min + cell_width/2 + (cell_width*u)
y = y_min + cell_height/2 + (cell_height*v)
grid[u][v] = (x, y)
return grid
|
9c522d9a24307e34d564f2fc8d1a135168e8f8ff
| 17,856
|
def get_indexing(string):
"""
Parse numpy-like fancy indexing from a string.
Args:
string (str): string represent the indices to take
a subset of from array. Indices for each dimension
are separated by `,`; indices for different dimensions
are separated by `;`.
e.g.: For a numpy array `arr` of shape (3,3,3), the string "1,2;1,2"
means taking the sub-array `arr[[1,2], [1,2]]
Returns:
final_indexing (tuple): the parsed indexing.
"""
index_ls = string.strip().split(";")
final_indexing = []
for index in index_ls:
index_single_dim = index.split(",")
index_single_dim = [int(i) for i in index_single_dim]
final_indexing.append(index_single_dim)
return tuple(final_indexing)
|
327054d0387f4bda1f3345d2886541a20e96e962
| 17,857
|
import pytz
def timezonize(timezone):
"""Convert a string representation of a timezone to its pytz object, or do nothing if the argument is already a pytz timezone."""
# Check if timezone is a valid pytz object is hard as it seems that they are spread arount the pytz package.
# Option 1): Try to convert if string or unicode, else try to
# instantiate a datetiem object with the timezone to see if it is valid
# Option 2): Get all memebers of the pytz package and check for type, see
# http://stackoverflow.com/questions/14570802/python-check-if-object-is-instance-of-any-class-from-a-certain-module
# Option 3) perform a hand.made test. We go for this one, tests would fail if it gets broken
if not 'pytz' in str(type(timezone)):
timezone = pytz.timezone(timezone)
return timezone
|
1145bb17bf9c50985d88770e8fa0437bfb3e9a18
| 17,859
|
import numpy
def multivariate_gaussian(x_array, mean_mu, corr_sigma):
"""
Computes the probability density function of the multivariate gaussian distribution.
Parameters
----------
x_array : array_like
The dataset of shape (m x n). Where there are m examples of n-dimensions.
mean_mu : array_like
A vector of shape (n,) contains the means for each dimension (feature).
corr_sigma : array_like
Either a vector of shape (n,) containing the variances of independent features
(i.e. it is the diagonal of the correlation matrix), or the full
correlation matrix of shape (n x n) which can represent dependent features.
Returns
------
p : array_like
A vector of shape (m,) which contains the computed probabilities at each of the
provided examples.
"""
k = mean_mu.size
# if sigma is given as a diagonal, compute the matrix
if corr_sigma.ndim == 1:
corr_sigma = numpy.diag(corr_sigma)
x_array = x_array - mean_mu
factor = (2*numpy.pi)**(-k/2)*numpy.linalg.det(corr_sigma)**(-0.5)
exp_part = numpy.exp(-0.5*numpy.sum(numpy.dot(x_array, numpy.linalg.pinv(corr_sigma))*x_array, axis=1))
probabilities = factor*exp_part
return probabilities
|
7c2c08642531e2790289db1179924cff440fd23a
| 17,860
|
import inspect
from typing import get_type_hints
import sys
def _class_get_type_hints(obj, globalns=None, localns=None): # noqa: MAN001,MAN002
"""
Return type hints for an object.
For classes, unlike :func:`typing.get_type_hints` this will attempt to
use the global namespace of the modules where the class and its parents
were defined until it can resolve all forward references.
"""
if not inspect.isclass(obj):
return get_type_hints(obj, localns=localns, globalns=globalns)
mro_stack = list(obj.__mro__)
if localns is None:
localns = {}
while True:
try: # pylint: disable=R8203
return get_type_hints(obj.__init__, localns=localns, globalns=globalns)
except NameError:
if not mro_stack:
raise
klasse = mro_stack.pop(0)
if klasse is object or klasse.__module__ == "builtins":
raise
localns = {**sys.modules[klasse.__module__].__dict__, **localns}
|
c04d6d391c7b06980fa33407e6bca809ff86c07e
| 17,861
|
import time
def convert_to_timestamp(seconds):
"""
Converts an integer into a string 'HH:MM:SS'.
We differ a bit from python's standard time library
in that if a number is negative, instead of reversing
back into the 24hr mark, we return 00:00:00.
"""
if seconds < 0:
return '0:00'
tt = time.strftime('%H:%M:%S', time.gmtime(seconds))
if tt[:2] == '00':
tt = tt[3:] # slice off leading ':' also
if tt[:1] == '0':
tt = tt[1:]
return tt
|
45428766c440c15366030b07cac95c304cefee89
| 17,862
|
def unpack_string(byte_stream):
"""Return decoded ASCII string from bytestring.
Decode a bytes object via UTF-8 into a string
Args:
byte_stream (bytes): arbitrary length
Returns:
string: UTF-8 decoded string
"""
out_string = byte_stream.decode("utf-8", "replace")
return out_string
|
5caaf20a41a05fb0026cb9131dd4825f036ec837
| 17,863
|
from sys import getsizeof
from typing import Mapping
from typing import Iterable
def get_size(obj, seen=None):
"""Return size of any Python object."""
if seen is None:
seen = set()
size = getsizeof(obj)
if id(obj) in seen:
return 0
seen.add(id(obj))
if hasattr(obj, "__dict__"):
size += get_size(obj.__dict__, seen)
if hasattr(obj, "__slots__"):
size += sum(
get_size(getattr(obj, attr), seen)
for attr in obj.__slots__
if hasattr(obj, attr)
)
if isinstance(obj, Mapping):
size += sum(
get_size(k, seen) + get_size(v, seen) for k, v in obj.items()
)
elif isinstance(obj, Iterable) and not isinstance(obj, (str, bytes)):
size += sum(get_size(item, seen) for item in obj)
return size
|
8bb39d45f1a5a44199e81a65bedc7005d22eaeba
| 17,864
|
import os
def _make_concatenated_data_dirs(suite_locs, area):
"""Create dirs to hold cubeList files."""
suites_locations = {}
supermeans_locations = {}
for suite_dir in suite_locs:
suite_data = os.path.join(suite_locs[suite_dir], area)
if not os.path.exists(suite_data):
os.makedirs(suite_data)
suites_locations[suite_dir] = suite_data
# create supermeans directory: [area]_supermeans
sup_data = os.path.join(suite_locs[suite_dir], area + '_supermeans')
if not os.path.exists(sup_data):
os.makedirs(sup_data)
supermeans_locations[suite_dir] = sup_data
return suites_locations, supermeans_locations
|
0977fc4cec9f0b5bd1baadf5b7b24a76fccb19f8
| 17,865
|
import importlib
def import_from_module_name(module_name):
"""Imports a module and returns it and its name."""
module = importlib.import_module(module_name)
return module, module_name
|
87d38536dc23c3ef86bcc1d0b5b0ec937ba397d6
| 17,866
|
def get_external_results_priorities(results):
"""
Return additional, external results priorities.
Results priorities from external plugins (e.g. DonPedro) are coming
together with results.
"""
priorities = {}
for plugin_name, plugin_results in results.iteritems():
if 'results_priority' in plugin_results:
priorities[plugin_name] = plugin_results['results_priority']
return priorities
|
f18927a19929b52229aa10ea17ddb5d5e16bc832
| 17,869
|
def get_range_around(range_value, current_item, padding):
"""
Returns a range of numbers around the given number.
This is useful for pagination, where you might want to show something
like this::
<< < ... 4 5 (6) 7 8 .. > >>
In this example `6` would be the current page and we show 2 items around
that page (including the page itself).
Usage::
{% load libs_tags %}
{% get_range_around page_obj.paginator.num_pages page_obj.number 5
as pages %}
:param range_amount: Number of total items in your range (1 indexed)
:param current_item: The item around which the result should be centered
(1 indexed)
:param padding: Number of items to show left and right from the current
item.
"""
total_items = 1 + padding * 2
left_bound = padding
right_bound = range_value - padding
if range_value <= total_items:
range_items = range(1, range_value + 1)
return {
'range_items': range_items,
'left_padding': False,
'right_padding': False,
}
if current_item <= left_bound:
range_items = range(1, range_value + 1)[:total_items]
return {
'range_items': range_items,
'left_padding': range_items[0] > 1,
'right_padding': range_items[-1] < range_value,
}
if current_item >= right_bound:
range_items = range(1, range_value + 1)[-total_items:]
return {
'range_items': range_items,
'left_padding': range_items[0] > 1,
'right_padding': range_items[-1] < range_value,
}
range_items = range(current_item - padding, current_item + padding + 1)
return {
'range_items': range_items,
'left_padding': True,
'right_padding': True,
}
|
28a18ff5e998ed6b5eb7ec7a0aaf540037ac1946
| 17,870
|
import math
def _scale_gaussian_param(snr, data):
"""
A helper function to convert given gaussian parameters in SNR dB
according to standard deviation, and scale to correct values
according to given data set features.
"""
return math.sqrt(data.std() ** 2 / math.pow(10, snr / 10.0))
|
e08c8fe67ef37cfe98c9bae35be86818cea541ff
| 17,872
|
def purify(dirty_word):
"""
Очищає від пробільних символів та конвертурє в lowercase
"""
purify = dirty_word.strip().lower()
for i in ",.[]_()'":
purify = purify.replace(i, '')
if not purify:
raise ValueError("Не може бути пустий рядок")
return purify
|
2d1ad098d971d5473cadea4463893d26f1fa50d0
| 17,874
|
import logging
def create_tair10_featureset(
genes, config, dfu, gsu
): # pylint: disable=too-many-locals
"""Create an Arabidopsis thaliana featureset from a list of genes."""
params = config.get("params")
workspace_id = params["workspace_id"]
genome_ref = "Phytozome_Genomes/Athaliana_TAIR10"
genome_features = gsu.search(
{
"ref": genome_ref,
"limit": len(genes),
"structured_query": {"$or": [{"feature_id": gene} for gene in genes]},
"sort_by": [["feature_id", True]],
}
)["features"]
genes_found = {feature.get("feature_id") for feature in genome_features}
genes_matched = [gene for gene in genes_found if gene in genes_found]
genes_unmatched = set(genes).difference(genes_found)
if len(genes_unmatched) > 0:
genes_unmatched_str = ", ".join(genes_unmatched)
logging.warning(
"""The following genes were not found in the genome: """
f"""{genes_unmatched_str}"""
)
new_feature_set = dict(
description=params.get("description", ""),
element_ordering=genes_matched,
elements={gene: [genome_ref] for gene in genes_matched},
)
save_objects_params = {
"id": workspace_id,
"objects": [
{
"type": "KBaseCollections.FeatureSet",
"data": new_feature_set,
"name": params["output_name"],
}
],
}
dfu_resp = dfu.save_objects(save_objects_params)[0]
featureset_obj_ref = f"{dfu_resp[6]}/{dfu_resp[0]}/{dfu_resp[4]}"
return [{"ref": featureset_obj_ref, "description": "Feature Set"}]
|
c42a01cdc1ceef25806cd8055d6e6cf30a93fd40
| 17,875
|
import requests
def get_ticket(tgt: str) -> str:
""" Get ticket """
params = {'service': 'http://umlsks.nlm.nih.gov'}
headers = {
'Content-type': 'application/x-www-form-urlencoded',
'Accept': 'text/plain',
'User-Agent': 'python'
}
req = requests.post(tgt, data=params, headers=headers)
return req.text
|
52adcb7d87c97f3d0706260e972cc25caadc362e
| 17,876
|
def get_solved (sol_dict):
"""Returns the solved variables of a solution dictionary"""
return filter(lambda k: sol_dict[k] is not None, sol_dict)
|
a712caa09be029f0ad7054248cce7762644a6644
| 17,877
|
def generate_legend_marker_colour_orders(plot_order, method_legend_colour_marker_dict):
""" Generates a list of legends, colours, and markers based off the dictionary. """
plot_legends = [method_legend_colour_marker_dict[meth]['legend'] for meth in plot_order]
plot_colours = [method_legend_colour_marker_dict[meth]['colour'] for meth in plot_order]
plot_markers = [method_legend_colour_marker_dict[meth]['marker'] for meth in plot_order]
plot_linestyles = [method_legend_colour_marker_dict[meth]['linestyle']
for meth in plot_order]
return plot_legends, plot_colours, plot_markers, plot_linestyles
|
60d957517f97a7ba5c78aa31ea4f222d37ad7195
| 17,879
|
import os
def create_dir(*args) -> str:
"""Creates directory if it does not exist yet. Recursive.
Input:
- unknown number of string parameters
Output:
- names: full path name
"""
names = os.path.join(*args)
if not os.path.exists(names):
os.makedirs(names, exist_ok=True)
print(f' -> created directory: {names}')
return names
|
209c292192751f69b19e097908b64f91420ff8b4
| 17,880
|
def strip_stac_item(item: dict) -> dict:
"""
Strips a stac item, removing not stored fields
:param item dict: input stac item
:rtype: dict
:return: stripped stac item
"""
strip = item
s3_key = None
for link in item["links"]:
if link["rel"] == "self":
s3_key = link["href"]
assert s3_key is not None, "Can't find self key"
# Remove fields that will not be stored
strip.pop("stac_version")
strip.pop("stac_extensions")
strip.pop("type")
strip.pop("links")
strip.pop("bbox")
strip.pop("assets")
# https://cbers-stac-0-6.s3.amazonaws.com/CBERS4/PAN5M/156/107/CBERS_4_PAN5M_20150610_156_107_L4.json
strip["s3_key"] = "/".join(s3_key.split("/")[3:])
return strip
|
1909b44f316875f0cd0d65fff8bd62329cd229e5
| 17,881
|
import numpy
def sort_visibility(vis, order=None):
""" Sort a visibility on a given column
:param vis:
:param order: Array of string of column to be used for sortin
:return:
"""
if order is None:
order = ['index']
vis.data = numpy.sort(vis.data, order=order)
return vis
|
c5f7f835c2cff1bb4d0daac5fda85af77ab09e74
| 17,882
|
def no_zipped():
"""
No-zipped strings
"""
return [
('', []),
('123', [123]),
('123,456', [123, 456])
]
|
9aa18599d98f9d22e5597cc6078489617fb1e3aa
| 17,883
|
import subprocess
def execute_cmdline(lst, output):
"""execute_cmdline(lst: list of str)-> int Builds a command line
enquoting the arguments properly and executes it using popen4. It
returns the output on output. popen4 doesn't return a code, so it
will always return 0
"""
proc = subprocess.Popen(lst, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
proc.wait()
if proc.stdout:
output.extend(proc.stdout.readlines())
return proc.returncode
|
769d5aab23b416894933fcd59dbba7559fdbe2d6
| 17,886
|
def mul(a, b):
"""
>>> mul(2, 3)
6
>>> mul('a', 2)
'aa'
"""
return a * b
|
990465cb677ea9b0f848b0a9bfc6bfdc5d8b373f
| 17,887
|
import numpy
def gsim_imt_dt(sorted_gsims, sorted_imts):
"""
Build a numpy dtype as a nested record with keys 'idx' and nested
(gsim, imt).
:param sorted_gsims: a list of GSIM instances, sorted lexicographically
:param sorted_imts: a list of intensity measure type strings
"""
dtlist = [(imt, numpy.float32) for imt in sorted_imts]
imt_dt = numpy.dtype(dtlist)
return numpy.dtype([(str(gsim), imt_dt) for gsim in sorted_gsims])
|
e5a885efc5f404ef19f931ebe194141c88980c46
| 17,888
|
def prepare(raw_input):
"""
Input is organized in three chunks: (1) rules, (2) my ticket, (3) tickets nearby.
Must be prepared following different rules.
(1) label ':' lower 'or' upper ',' lower 'or' upper
(2) header line "your ticket:" followed by one line of comma separated integers
(3) same as (2), but many lines of comma separated integers
The CSV lines have always as many fields as there are rules in (1).
"""
rules, my_ticket, tickets_nearby = [line for line in raw_input.split('\n\n')]
return rules, my_ticket, tickets_nearby
|
a784da2ad5ef864d3114187766acd8af2b69b868
| 17,889
|
import codecs
def rot_13(char):
"""Special case of shift alphabet by half."""
return codecs.encode(char, "rot_13")
|
7a08262ece3c2151cfdf7ac66361802aa93c6ee1
| 17,890
|
import os
def list_files(camera, path="/"):
"""List all files in a camera directory."""
result = []
# get files
for name, value in camera.folder_list_files(path):
result.append(os.path.join(path, name))
# read folders
folders = []
for name, value in camera.folder_list_folders(path):
folders.append(name)
# recurse over subfolders
for name in folders:
result.extend(list_files(camera, os.path.join(path, name)))
return result
|
42060038d3645badb5bc9a7121331024deb813e0
| 17,892
|
import os
def __get_files(root_dir, file_types, filters):
"""__get_files.
file_types = ['.css', '.js', '.htm']
filters = ['', '.min.', 'sweet-alert']
"""
def is_vaild_file(filename, file_types):
if not file_types:
return False
for x in file_types:
if filename.endswith(x):
return True
return False
def is_filter_file(file_path, filters):
if not filters:
return False
for y in filters:
if file_path.find(y) > 0:
return True
return False
for root, _, files in os.walk(root_dir):
for filename in files:
file_path = os.path.join(root, filename)
if not is_vaild_file(filename, file_types):
continue
if is_filter_file(file_path, filters):
continue
yield (file_path, filename, filename[filename.rfind('.'):])
|
95b7c8c4057b724d46e15d4a73b7a213c5f9dd40
| 17,894
|
def string_empty(string: str) -> bool:
"""Return True if the input string is None or whitespace."""
return not (bool(string) or isinstance(string, str) and bool(string.strip()))
|
cca19df1eda7039f7299c49fcb9832ce11847524
| 17,895
|
import sys
def src_mul_edge(src, edge, out):
"""Builtin message function that computes message by performing
binary operation mul between src feature and edge feature.
Notes
-----
This function is deprecated. Please use :func:`~dgl.function.u_mul_e` instead.
Parameters
----------
src : str
The source feature field.
edge : str
The edge feature field.
out : str
The output message field.
Examples
--------
>>> import dgl
>>> message_func = dgl.function.src_mul_edge('h', 'e', 'm')
"""
return getattr(sys.modules[__name__], "u_mul_e")(src, edge, out)
|
4a703a09b0ebce8fb9cdf24739d8449873bf91b6
| 17,896
|
def get_attr_flows(results, key='variable_costs'):
"""
Return all flows of an EnergySystem for a given attribute,
which is not zero.
Parameters
----------
results : dict
Results dicionary of the oemof.solph optimisation including the
Parameters with key 'param'.
key : str
Returns
-------
list : List of flows, where a non zero attribute value is given either
at the 'scalars' or 'sequences'.
"""
param = results['param']
list_keys = list(param.keys())
var_scalars = [
x for x in list_keys
if key in param[x]['scalars'].keys()
if abs(param[x]['scalars'][key]) > 0
]
var_sequences = [
x for x in list_keys
if key in param[x]['sequences'].keys()
if abs(param[x]['sequences'][key].sum()) > 0
]
var_cost_flows = var_scalars + var_sequences
return var_cost_flows
|
756ec66cdf7b01cdbc6b872b4f30cf27e4ff524f
| 17,897
|
from typing import Callable
import hmac
import hashlib
def generate_hash_key(chain_url: str, privkey: bytes, strategy: Callable):
"""Generate a hash key to use as `client_id`, using the :mod:`hmac` library.
The message is the concatenation of `chain_url` plus the `__name__` attribute of the
`strategy`.
The `privkey` is used to sign it using `sha256`.
The result is hexdigested before we return it.
"""
k = hmac.new(privkey, (chain_url + strategy.__name__).encode("UTF-8"), hashlib.sha256)
return k.hexdigest()
|
1bb6b69f06c6ecb245b8ea8b4c772ede872c460c
| 17,898
|
import hashlib
def hash(filepath):
"""
function responsible for generating a hash of a given file in SHA256.
param filepath: directory where the file is located.
return: SHA256 requested file hash
"""
with open(filepath, 'rb') as stream:
hash = hashlib.sha256(stream.read())
return hash.hexdigest()
|
320bcb258a5eb13e29c1f53c0caa4d3ed0b33a5e
| 17,899
|
def _rename_artifact(ctx, tpl_string, src_file, packaging_type):
"""Rename the artifact to match maven naming conventions."""
artifact = ctx.new_file(ctx.bin_dir, tpl_string % (ctx.attr.artifact_id, ctx.attr.version, packaging_type))
ctx.action(
inputs = [src_file],
outputs = [artifact],
command = "cp %s %s" % (src_file.path, artifact.path),
)
return artifact
|
6c5bf6928e79bfaf20f222bea9815643aaff3dc5
| 17,901
|
def get_config_data(config_data):
"""
Parameters
----------
config_data : list containing config parameters
Returns
-------
[config parameters] : various parameters and values from the config file
"""
exp_type = config_data['RobotSettings']['commandFile']
reward_dur = config_data['ExperimentSettings']['rewardWinDur']
x_p = config_data['RobotSettings']['xCommandPos']
y_p = config_data['RobotSettings']['yCommandPos']
z_p = config_data['RobotSettings']['zCommandPos']
x0 = config_data['RobotSettings']['x0']
y0 = config_data['RobotSettings']['y0']
z0 = config_data['RobotSettings']['z0']
r = config_data['RobotSettings']['x']
t1 = config_data['RobotSettings']['y']
t2= config_data['RobotSettings']['z']
return exp_type, reward_dur, x_p, y_p, z_p, x0, y0, z0,r,t1,t2
|
b6f8199547d8177660336c8c0aab09deddf96118
| 17,902
|
import socket
import struct
import random
def randip():
"""Return random IP address
Returns:
str -- IP address
"""
return socket.inet_ntoa(struct.pack('>I', random.randint(1, 0xffffffff)))
|
e1baec43b7616a7f7345d5bf83c79f868bf95c80
| 17,903
|
def dumb_portfolio_evaluate(assets, weights):
"""
Takes weighted sum of asset reward and risk.
:param assets: Cassandra ORM model for asset stat table.
:param weights: Vector of weights for each asset (should sum to 1)
:return: Dictionary containing portfolio reward, risk and weight vector.
"""
reward = 0
risk = 0
for i in range(len(assets)):
asset = assets[i]
reward += asset.reward * weights[i]
risk += asset.risk * weights[i] * weights[i]
portfolio_stat = {'reward': reward, 'risk': risk, 'weights': weights}
return portfolio_stat
|
6945974d40247d585a5595a5eb7b0e5ff4625e41
| 17,904
|
import six
def is_list_of_strings(vals):
"""Returns True if val is a list (or enumerable) of strings. False otherwise"""
try:
# check if everything is a string
for val in vals:
if not isinstance(val, six.string_types):
return False
except:
# vals is not enumerable
return False
# everything is a string
return True
|
61c48b7b43acc1ab8f86c47273606a830ccb0bab
| 17,905
|
def clean_url(raw_url):
"""
Returns a clean URL from garbage
"""
return 'https' + raw_url.split(' https')[1].split('#')[0]
|
f34581e230182c026ff26d2f8e8da91137417930
| 17,907
|
def removeSearchIndice(doc):
""" Remove search indices for isotopes Uranium and Thorium """
if (doc.has_key("sort_indices")):
del doc["sort_indices"]
return doc
|
cd0239b9e96d3eed70fbf41b5d0aec9bd7589c35
| 17,908
|
def getPruning(table, index):
"""Extract pruning value"""
if ((index & 1) == 0):
res = table[index // 2] & 0x0f
else:
res = (table[index // 2] & 0xf0) >> 4
return res
# return table[index] & 0xf
|
45e9e81bef153edfa7003f5256a3e9b169a6a2c0
| 17,909
|
import torch
def append_homog(tensor: torch.Tensor, homog_value: float = 1.) -> torch.Tensor:
"""Appends a homogeneous coordinate to the last dimension of a Tensor.
Args:
tensor: A Tensor.
homog_value: Value to append as homogeneous coordinate to the last dimension
of `tensor`. (Default: 1.0)
Returns:
A Tensor identical to the input but one larger in the last dimension. The
new entries are filled with ones.
"""
shape = list(tensor.shape)
shape[-1] = 1
appendage = torch.ones(shape, dtype=tensor.dtype, device=tensor.device) * homog_value
return torch.cat([tensor, appendage], -1)
|
d07361ded84608d0aa2d4587b5418be0a6f5c395
| 17,910
|
def get_fitness(solution):
"""Return the fitness value of the passed solution"""
if not solution:
return 0
# the fitness of a valid solution coincides with its value
return solution.value
|
bd3abcb2600ee074ce33b543c7a19b58924e3415
| 17,911
|
def sortByHeight(a):
"""
>>> sortByHeight([-1, 150, 190, 170, -1, -1, 160, 180])
[-1, 150, 160, 170, -1, -1, 180, 190]
"""
tree_positions = sorted([index for index, value in enumerate(a) if value == -1])
a = sorted(a)[len(tree_positions) :]
for tree_pos in tree_positions:
a.insert(tree_pos, -1)
return a
|
a98de199fc5a9580986b91548693fe05f60f2340
| 17,912
|
def white_spaces(x):
"""
Blank Spaces
"""
x -= 2
espac = ' ' * x
return espac
|
a9a7af435add1400429116b04884cac3241d9c8b
| 17,913
|
def pretty_size(size, unit=1024):
"""
This function returns a pretty representation of a size value
:param int|long|float size: the number to to prettify
:param int unit: 1000 or 1024 (the default)
:rtype: str
"""
suffixes = ["B"] + [i + {1000: "B", 1024: "iB"}[unit] for i in "KMGTPEZY"]
if unit == 1000:
suffixes[1] = 'kB' # special case kB instead of KB
# cast to float to avoid losing decimals
size = float(size)
for suffix in suffixes:
if abs(size) < unit or suffix == suffixes[-1]:
if suffix == suffixes[0]:
return "%d %s" % (size, suffix)
else:
return "%.1f %s" % (size, suffix)
else:
size /= unit
|
abeafa97d45212c0170b981e9448e63acc1a54d2
| 17,914
|
import copy
def json_row_to_dict(row):
"""Convert a sqlalchemy row object into a plain python dictionary.
Assumes that row object contains the following columns:
- id (int(64))
- version (int(64))
- data (JSON) Contains the rest of the fields as a single JSON column in postgres
Args:
row (BaseModel): The db row object
"""
d = copy.deepcopy(row.data) if row.data else {}
if row.id:
d['id'] = row.id
return d
|
902d45a9beb717e56ad8d8468eb0815b9171013c
| 17,915
|
import logging
import requests
def request_image(image_url):
"""Attempts to download the file at the URL specified and, if available,
returns it as a raw response object."""
if image_url is None:
logging.error("Image URL is None")
return None
logging.info(f"Downloading roll image {image_url}")
response = requests.get(image_url, stream=True)
if response.status_code == 200:
response.raw.decode_content = True
return response
logging.error(f"Unable to download {image_url} - {response}")
return None
|
c5d0847859b9e036ebf0121f1c8a71492d0923f6
| 17,916
|
import binascii
import os
import requests
import base64
def test_kvstore(put_host_addr, get_host_addr):
"""Executes a simple test against the key/value store running on the given
Tendermint node."""
url = "http://%s" % put_host_addr
# generate a random hex value to send through to the kv store
test_value = binascii.b2a_hex(os.urandom(15)).decode('utf-8')
payload = {'tx': '"test_value=%s"' % test_value}
r = requests.get(url+"/broadcast_tx_commit", params=payload)
if r.status_code >= 400:
print("Failed to send broadcast_tx_commit request. Got status: %d" % r.status_code)
return 2
# now try to fetch the value
url = "http://%s" % get_host_addr
payload = {'data': '"test_value"'}
r = requests.get(url+'/abci_query', params=payload)
if r.status_code >= 400:
print("Failed to send abci_query request. Got status: %d" % r.status_code)
return 3
# check the response value
stored_value = base64.b64decode(r.json()['result']['response']['value']).decode('utf-8')
if stored_value != test_value:
print("Expected %s, but got %s" % (test_value, stored_value))
return 4
print("kvstore test completed successfully!")
return 0
|
5386e80439490f3180cbf6dcbee673b2ba2d6ba8
| 17,917
|
def phrase_feature(sentences, phrase_list):
""" List of values from 0 to 1 rating the number of phrases that appear in the sentence from a list """
total_number_words = 0
phrase_frequency = []
# Calculate the number of words of the text
# Number of phrase that appear in that sentence
for sentence in sentences:
count_phrase_per_sentence = 0
for phrase in phrase_list:
if phrase in sentence.original:
count_phrase_per_sentence += 1
phrase_frequency.append(count_phrase_per_sentence/len(sentence.bag_of_words))
return phrase_frequency
|
36d21a6dfd0c7beefa1739e522783d2ecc6241cb
| 17,918
|
def set_title(title=None, property_name=None, channel=None):
"""Set title for map.
Parameters:
title (str):
If ``None``, try to set automatically from property (and channel)
name(s). For no title, set to ''. Default is ``None``.
property_str (str):
Map property name. Default is ``None``.
channel (str):
Map channel name. Default is ``None``.
Returns:
str
"""
if title is None:
property_name = property_name if property_name is not None else ''
channel = channel if channel is not None else ''
title = ' '.join((property_name, channel))
title = ' '.join(title.split('_')).strip()
return title
|
6483c8df9fafaa7e46ef667801a24a921ae871da
| 17,919
|
from pathlib import Path
def ready_for_postinstall(chroot):
"""True if the chroot path exists and nothing mounted inside, False otherwise"""
path = Path(chroot)
return path.exists() and not any(path.iterdir())
|
c684cf9f4940f4f6e102752a2456737b5de56cff
| 17,920
|
import sys
def modules(modulePath):
"""Load a module and retrieve a reference to that module."""
__import__(modulePath)
return sys.modules[modulePath]
|
f25fe4f7ce16086949583e69bc7d60537b2cdaab
| 17,921
|
def null_action(env, ob):
""" Do nothing. """
if hasattr(env.action_space, 'n'): # Is descrete
return 0
if hasattr(env.action_space, 'low') and hasattr(env.action_space, 'high'): # Is box
return (env.action_space.low + env.action_space.high) / 2.0 # Return the most average action
raise NotImplementedError()
|
1ed066f6719af853a539ee077a47320d84bf8bfd
| 17,922
|
def transition_model(corpus, page, damping_factor):
"""
Return a probability distribution over which page to visit next,
given a current page.
With probability `damping_factor`, choose a link at random
linked to by `page`. With probability `1 - damping_factor`, choose
a link at random chosen from all pages in the corpus.
"""
damping_prob = 1 / len(corpus)
if len(corpus[page]) == 0: # If page has no links, all pages have equal prob
distribution = {ipage: damping_prob for ipage in corpus}
else:
linked_prob = 1 / len(corpus[page])
distribution = {ipage: damping_prob * (1 - damping_factor) for ipage in corpus}
for ipage in corpus[page]: # Add links probabilities
distribution[ipage] += damping_factor * linked_prob
return distribution
|
c7ea20809acce0f9290f3b5890122c7d34acd416
| 17,923
|
import uuid
def new_uuid():
"""Return a string UUID."""
return uuid.uuid4().hex
|
cdbe6cb239d719b0a6a31be60bcb8585971a6e1c
| 17,924
|
import torch
def generate_spixel(logits, img_in):
"""
generate superpixels (Not used)
Args:
logits: torch.Tensor
A Tensor of shape (b, nSpixel, h, w)
that could be regarded as a soft assignment matrix
img_in: torch.Tensor
A Tensor of shape (b, c, h*w)
whose prototype is normalized rgbxy image in default
Return:
avg_spixel_feat: tensor.Tensor
A Tensor of shape (b, c, nSpixel)
"""
# prob is a row-normalized (b, h*w, nSpixel) Tensor
prob = logits.view(*logits.shape[:2], -1).permute(0, 2, 1)
# avoid calculating invalid spixels
spixel_cardinality = prob.sum(1, keepdim=True)
spixel_mask = spixel_cardinality > 1e-5
avg_spixel_feat = torch.bmm(img_in, prob) / (spixel_cardinality + 1e-5)
avg_spixel_feat *= spixel_mask.float()
return avg_spixel_feat
|
6f8c24dde059203bb52028747191161fa90b0d9f
| 17,925
|
def is_developer_certificate_getter(self):
"""Return whether this certificate is a developer certificate or not.
:param self: Instance of the entity for which this is a custom method.
:type self: mbed_cloud.foundation.TrustedCertificate
:return: True if a developer certificate, False otherwise.
:rtype: bool
"""
return self._device_execution_mode.value == 1
|
cebb9ba74150c0a9ae5d3fb2b99e504977525b1f
| 17,927
|
from typing import OrderedDict
def dismantle_dict_values_to_deep_list(dict_):
""" returns a list of dict values even if the values of the dict are dicts again. """
dict_ = OrderedDict(sorted(dict_.items()))
return [val if not isinstance(val, dict) else dismantle_dict_values_to_deep_list(
val) for val in dict_.values()]
|
045268625aa63266ef988fb56ae9eaeb329ac412
| 17,928
|
import os
def get_whence_string(whence):
"""Retrieves a human readable string representation of the whence."""
if whence == os.SEEK_CUR:
whence_string = "SEEK_CUR"
elif whence == os.SEEK_END:
whence_string = "SEEK_END"
elif whence == os.SEEK_SET:
whence_string = "SEEK_SET"
else:
whence_string = "UNKNOWN"
return whence_string
|
b02cc04ddfb7b028d626f750ea3da612e908b542
| 17,929
|
def average_speed(s1 : float, s0 : float, t1 : float, t0 : float) -> float:
"""
[FUNC] average_speed:
Returns the average speed.
Where:
Delta Space = (space1[s1] - space0[s0])
Delta Time = (time1[t1] - time0[t0])
"""
return ((s1-s0)/(t1-t0));
|
8125fb0454433288b506ffd277fa6b1bd21b06c9
| 17,930
|
from typing import Coroutine
from typing import Any
import asyncio
def await_async(task: Coroutine[Any, Any, Any]) -> Any:
"""Await async task in sync function.
Parameters
----------
task : Coroutine
task to be awaited, eg f() where f is async function.
Returns
-------
Any
Result returned from awaited task.
"""
try:
loop = asyncio.get_running_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop.run_until_complete(task)
|
9dc6c3301dcc91036e05059b695b3edba9de61a1
| 17,931
|
def multiply2by2Matricies(A, B):
"""This performs matrix multiplication in only 7 multiplies."""
##Thank you the internet and Strassen...
M_1 = (A[0][0]+A[1][1])*(B[0][0]+B[1][1])
M_2 = (A[1][0]+A[1][1])*B[0][0]
M_3 = A[0][0]*(B[0][1]-B[1][1])
M_4 = A[1][1]*(B[1][0]-B[0][0])
M_5 = (A[0][0]+A[0][1])*B[1][1]
M_6 = (A[1][0]-A[0][0])*(B[0][0]+B[0][1])
M_7 = (A[0][1]-A[1][1])*(B[1][0]+B[1][1])
return [[M_1+M_4-M_5+M_7, M_3+M_5],
[M_2+M_4 , M_1-M_2+M_3+M_6]]
|
cfdf29632265c1161ea7808aa9241c04c0cb47ed
| 17,932
|
from argparse import ArgumentParser
from pathlib import Path
def parse_args_berry(args):
"""Argument parser for `berry` subcommand."""
parser = ArgumentParser(prog='pwproc berry')
parser.add_argument('in_file', nargs='+', type=Path)
parser.add_argument('--phase', type=Path)
parser.add_argument('--vec', type=Path)
parser.add_argument('--pol', type=Path)
parser.add_argument('--out', type=str)
return parser.parse_args(args)
|
de7ca37b6882154c2da49820ef8c8f373775d11e
| 17,933
|
import json
def wrap_error_data(mes):
"""包装错误消息"""
return json.dumps({
'mes': mes
}).encode()
|
dfc1274d38859fc15d06474b34336f9af132a694
| 17,936
|
def create_arrays_from_input(dir_arr):
"""
This function gets:
:param dir_arr: an array of 24 strings: matchsticks' directions: up('U')/down('D')/left('L')/right('R')
:return: 3 arrays of 24 integers:
Each index represents a matchstick
The values represent rows, columns and diagonals: matchsticks' directions:
row - must be 1 - 4
col - must be 1 - 4
di - must be 1 or 2 (0 - not pointing to any diagonal)
"""
match_rows = []
match_cols = []
match_dis = []
row = 0
col = 0
di = 0
for i in range(0, 24):
if 0 <= i <= 2:
row = 1
if 3 <= i <= 6:
if dir_arr[i] == 'U':
row = 1
elif dir_arr[i] == 'D':
row = 2
if 7 <= i <= 9:
row = 2
if 10 <= i <= 13:
if dir_arr[i] == 'U':
row = 2
elif dir_arr[i] == 'D':
row = 3
if 14 <= i <= 16:
row = 3
if 17 <= i <= 20:
if dir_arr[i] == 'U':
row = 3
elif dir_arr[i] == 'D':
row = 4
if 21 <= i <= 23:
row = 4
match_rows.append(row)
if i % 7 == 3:
col = 1
if i % 7 == 4:
col = 2
if i % 7 == 5:
col = 3
if i % 7 == 6:
col = 4
if i % 7 == 0:
if dir_arr[i] == 'L':
col = 1
elif dir_arr[i] == 'R':
col = 2
if i % 7 == 1:
if dir_arr[i] == 'L':
col = 2
elif dir_arr[i] == 'R':
col = 3
if i % 7 == 2:
if dir_arr[i] == 'L':
col = 3
elif dir_arr[i] == 'R':
col = 4
match_cols.append(col)
if i == 1 or i == 10 or i == 13 or i == 22:
di = 0
if i == 8:
if dir_arr[i] == 'R':
di = 2
if dir_arr[i] == 'L':
di = 1
if i == 15:
if dir_arr[i] == 'R':
di = 1
if dir_arr[i] == 'L':
di = 2
if i == 11:
if dir_arr[i] == 'U':
di = 1
if dir_arr[i] == 'D':
di = 2
if i == 12:
if dir_arr[i] == 'U':
di = 2
if dir_arr[i] == 'D':
di = 1
if i in [0, 3, 4, 7, 16, 19, 20, 23]:
if match_cols[i] == match_rows[i]:
di = 1
else:
di = 0
if i in [2, 5, 6, 9, 14, 17, 18, 21]:
if match_cols[i] == 5 - match_rows[i]:
di = 2
else:
di = 0
match_dis.append(di)
return match_rows, match_cols, match_dis
|
d16f1bd28d53d92dff675defff2e91a1bedf5f55
| 17,937
|
def multiline_rstrip(s: str) -> str:
"""
Takes a multiline string and returns a copy with trailing spaces from all lines
removed.
"""
return "\n".join((l.rstrip() for l in s.splitlines()))
|
51ee39528918e1a5a35f840d1524a17941a7a170
| 17,938
|
import sys
def strip_indent(s):
"""
Given a multiline string C{s}, find the minimum indentation for
all non-blank lines, and return a new string formed by stripping
that amount of indentation from all lines in C{s}.
"""
# Strip indentation from the template.
minindent = sys.maxsize
lines = s.split('\n')
for line in lines:
stripline = line.lstrip()
if stripline:
minindent = min(minindent, len(line)-len(stripline))
return '\n'.join([l[minindent:] for l in lines])
|
ca128a349bb9f0a1538e9fc8e3638b7f64806538
| 17,939
|
import socket
import struct
def send_packet(mac_str):
"""Send magic packet to given mac address
:param mac_str: mac address string separated by :
:return:
"""
def broadcast(magic):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.sendto(magic, ('<broadcast>', 9))
def build_packet(mac_str):
bytes = [255] * 6 + [int(x, 16) for x in mac_str.split(':')] * 16
magic = struct.pack('B' * 102, *bytes)
return magic
magic_packet = build_packet(mac_str)
broadcast(magic_packet)
|
cd6a8fe49d43b1970995068302f611839b454187
| 17,940
|
import argparse
def parse_args(args):
"""
Parse and return command-line arguments.
"""
parser = argparse.ArgumentParser(
description="Execute clang-format on your working copy changes."
)
parser.add_argument(
"-d",
"--diff",
action="store_true",
default=False,
help="Determine whether running clang-format would produce changes",
)
parser.add_argument("--verbose", "-v", action="store_true", default=False)
parser.add_argument("--max-processes", type=int, default=50,
help="Maximum number of subprocesses to create to format files in parallel")
return parser.parse_args(args)
|
a5663ab8c1df586b334d5a608a7ccc8c46d83a31
| 17,941
|
def gaussian2D_from_shape(shape = (100,100), amplitude = 3000, position = (100,100), sigma = (5,5), dtype = 'uint16'):
"""
return 2D gaussian function in a given 'position' on the provided image. The input image can be made of all zeros.
"""
from numpy import sqrt,exp,copy,zeros
r_mu = position[0]
c_mu = position[1]
r_sigma = sigma[0]
c_sigma = sigma[1]
gaussian = zeros(shape = shape)
for r in range(gaussian.shape[0]):
for c in range(gaussian.shape[1]):
gaussian[r,c] = amplitude*exp(-((r-r_mu)**2/(2.0*r_sigma**2))-( (c-c_mu)**2 /( 2.0 * c_sigma**2) ) )
return gaussian.astype(dtype)
|
9557b6df7ae575740be97e21cca8065561051ed8
| 17,942
|
import hashlib
def hash_file(file_content):
"""Create an md5 of file contents"""
the_hash = hashlib.md5(file_content.encode('utf-8'))
return the_hash.hexdigest()
|
ba3b84edb78ccb70e4fb37b8cc6c8eb73c0d108b
| 17,944
|
def visit(event, *args):
""" This decorator is used to indicate which nodes the function should
examine. The function should accept (self, node) and return the relevant
node or None. """
def _decorate(fn):
fn._visit_event = event
fn._visit_nodes = args
return fn
return _decorate
|
a2ee19945446d100e3282f5cb5bc080ff4e568c2
| 17,945
|
from datetime import datetime
def unrets_date(rets_date):
"""
Converts a RETS date (ISO 8601 format) into a Python datetime
:param rets_date: a RETS/ISO 8601 date
:type rets_date: str
:rtype: datetime.datetime
:return: rets_date as a Python datetime
"""
# For datetimes with microseconds
return datetime.strptime(rets_date, '%Y-%m-%dT%H:%M:%S.%f')
|
9f8cae6880ac4d2f285eff856db09da0a39ec4ee
| 17,946
|
def compute_mae(y_pred, y, center_to_border_dict=None):
"""
Returns the absolute distance of predicted value to ground truth.
Args center_to_border_dict is for compatibility issues in quanfification error analysis.
"""
return abs(y_pred - y)
|
a28f638779aa11dafdfb8d430eb0ad1a5515aefa
| 17,947
|
def add_nones(word):
"""Change word into a list and add None at its beginning, end, and between every other pair of elements. Works whether the word is a str or a list.
"""
def yield_it(word_string):
yield None
it = iter(word_string)
yield next(it)
for x in it:
yield None
yield x
yield None
if isinstance(word, str):
return list(yield_it(word.split(' ')))
else:
return list(yield_it(word))
|
63c8437361f2f0cc200665d7cdf6a41aab7eaf67
| 17,948
|
import sys
def is_py3():
"""
Function for checking if we are in python3. This is used in other
part of Bip for compatibility between python2 and python3.
"""
return sys.version_info[0] == 3
|
5264206d68c4a77aa9243ce97f95819941146555
| 17,949
|
import six
def make_key(*args, **kwargs):
"""
Given any number of lists and strings will join them in order as one
string separated by the sep kwarg. sep defaults to u"_".
Add exclude_last_string=True as a kwarg to exclude the last item in a
given string after being split by sep. Note if you only have one word
in your string you can end up getting an empty string.
Example uses:
>>> from mongonaut.forms.form_utils import make_key
>>> make_key('hi', 'my', 'firend')
>>> u'hi_my_firend'
>>> make_key('hi', 'my', 'firend', sep='i')
>>> 'hiimyifirend'
>>> make_key('hi', 'my', 'firend',['this', 'be', 'what'], sep='i')
>>> 'hiimyifirendithisibeiwhat'
>>> make_key('hi', 'my', 'firend',['this', 'be', 'what'])
>>> u'hi_my_firend_this_be_what'
"""
sep = kwargs.get('sep', u"_")
exclude_last_string = kwargs.get('exclude_last_string', False)
string_array = []
for arg in args:
if isinstance(arg, list):
string_array.append(six.text_type(sep.join(arg)))
else:
if exclude_last_string:
new_key_array = arg.split(sep)[:-1]
if len(new_key_array) > 0:
string_array.append(make_key(new_key_array))
else:
string_array.append(six.text_type(arg))
return sep.join(string_array)
|
b5c48386304ab248518a7330605fbcfb37ecad23
| 17,950
|
def has_restart_file(job):
"""Check if the job has a restart file."""
return job.isfile("fort.77")
|
d6d5bd748722ab5bdc83461728d5a02e539d25ed
| 17,951
|
from typing import List
def fixture_sample_tag_names(vcf_tag_name: str, sample_tag_name: str) -> List[str]:
"""Return a list of the sample tag names"""
return [vcf_tag_name, sample_tag_name]
|
40f38b1c5e17254b1a0c9974216b98bdc8d27c43
| 17,952
|
import hashlib
def compute_file_checksum(path):
""" Compute a SHA1 checksum for a file. """
with open(path, 'rb') as f:
checksum = hashlib.sha1(f.read()).hexdigest()
return checksum
|
d253d98039a916be19ee3ad1c3d050b2dd251c1e
| 17,955
|
def get_default_titles(combine, plot_data, nfunc_list, **kwargs):
"""Get some default titles for the plots."""
adfam = kwargs.pop('adfam', False)
adfam_nn = kwargs.pop('adfam_nn', False)
true_signal = kwargs.pop('true_signal', True)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
titles = []
if plot_data:
if true_signal:
titles.append('true signal')
else:
titles.append('signal')
titles.append('noisy data')
if combine:
titles.append('fit')
else:
if any([isinstance(nfunc, list) for nfunc in nfunc_list]):
assert all([isinstance(nfunc, list) for nfunc in nfunc_list])
nfunc_to_use = [nf[-1] for nf in nfunc_list]
else:
nfunc_to_use = nfunc_list
if not adfam:
for nfunc in nfunc_to_use:
titles.append('$N={}$'.format(nfunc))
else:
for nfunc in nfunc_to_use:
titles.append('$T=1,N={}$'.format(nfunc))
for nfunc in nfunc_to_use:
titles.append('$T=2,N={}$'.format(nfunc))
if adfam_nn:
# label adfam param L rather than T
titles = [title.replace('T', 'L') for title in titles]
return titles
|
236b29386f2ea4c38e803240a2a7fa167336c952
| 17,957
|
def FillAbstractObject(service, objectType, valueDict):
"""This method is useful for creating the "cartItems" and "payments" fields of
the CheckoutShoppingCart method. It takes in the array currently being built,
the name in the WSDL of the element to be added (e.g. DebitAccountInfo for payments
or Package for the Item field of a CartItem."""
newObject = service.factory.create(objectType)
for currField in valueDict.keys():
setattr(newObject, currField, valueDict[currField])
return newObject
|
d2a8d7625c05a70cad7e0e9a359e1326973ea3ff
| 17,958
|
import subprocess
def get_file_contents(branch, path):
"""Get the contents of a file on a particular branch."""
result = subprocess.run(
['git', 'show', f'{branch}:{path}'],
check=True,
capture_output=True,
)
return result.stdout.decode('utf-8')
|
9e92f041cef6e82ec48f66aadf22d296321a4c8d
| 17,959
|
import win32api, win32process
from typing import Optional
def _set_win_process_priority() -> Optional[bool]:
"""
Sets the process priority class to an elevated value.
Microbenchmarks are typically very short in duration and therefore are prone
to noise from other code running on the same machine. Setting the process priority
to an elevated level while running the benchmarks helps mitigate that noise
so the results are more accurate (and also more consistent between runs).
Returns
-------
success : bool, optional
Indication of whether (or not) setting the process priority class succeeded.
If the priority did not need to be elevated (because it was already), None is returned.
"""
# Psuedo-handle for the current process.
# Because this is a psuedo-handle (i.e. isn't a real handle), it doesn't need to be cleaned up.
curr_proc_hnd = win32api.GetCurrentProcess()
# We use the 'ABOVE_NORMAL_PRIORITY_CLASS' here, as that should be good enough to reduce general noise;
# if necessary, we can try the 'HIGH_PRIORITY_CLASS' but that class and higher can begin to cause the system
# to become unresponsive so we'll avoid it unless needed; or we can control it with something like a
# 'strong_hint' bool parameter which chooses between ABOVE_NORMAL_PRIORITY_CLASS and HIGH_PRIORITY.
target_priority_class: int = win32process.ABOVE_NORMAL_PRIORITY_CLASS
try:
# Get the current process priority class. If it's already equal to or higher than the class
# we were going to set to, don't bother -- we don't want to lower it.
current_priority_class = win32process.GetPriorityClass(curr_proc_hnd)
if current_priority_class >= target_priority_class:
return None
else:
# Try to set the priority level for the current process.
# It can fail if the user (or process) hasn't been granted the PROCESS_SET_INFORMATION right.
# https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-setpriorityclass
return win32process.SetPriorityClass(curr_proc_hnd, target_priority_class)
except:
return False
|
c69e2ffcf1de40507f77113ea9d4a15730ed3f1a
| 17,960
|
import random
def find_junk(a, nodes, edge_list, cur_cci_junk):
"""
"""
c = random.choice(nodes)
while [a, c] in nodes or [a, c] in cur_cci_junk:
c = random.choice(nodes)
return a, c
|
258cea57cbff782a4d26863f46c31483e0263e6a
| 17,962
|
def linear_interp(x, in_min, in_max, out_min, out_max):
""" linear interpolation function
maps `x` between `in_min` -> `in_max`
into `out_min` -> `out_max`
"""
return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
|
eff41353ee48e1c2b1372030fa5480ce8e3bb817
| 17,963
|
def get47Dfeatures():
"""
Returns list containing the names of the 47 features found in the data accessible through
`ttbarzp.import47Ddata()`
"""
return [
'pT b1', 'pT b2', 'pT b3', 'pT b4',
'sdEta b1 b2', 'sdEta b1 b3', 'sdEta b1 b4', 'sdEta b2 b3', 'sdEta b2 b4', 'sdEta b3 b4',
'sdPhi b1 b2', 'sdPhi b1 b3', 'sdPhi b1 b4', 'sdPhi b2 b3', 'sdPhi b2 b4', 'sdPhi b3 b4',
'dR b1 b2', 'dR b1 b3', 'dR b1 b4', 'dR b2 b3', 'dR b2 b4', 'dR b3 b4',
'MET', 'pT l', 'MT l MET',
'M b1 b2', 'M b1 b3', 'M b1 b4', 'M b2 b3', 'M b2 b4', 'M b3 b4',
'MT b1 l MET', 'MT b2 l MET', 'MT b3 l MET', 'MT b4 l MET',
'M j1 j2', 'pT j1', 'pT j2', 'dR j1 j2',
'dR b1 l', 'dR b2 l', 'dR b3 l', 'dR b4 l',
'sdPhi b1 l', 'sdPhi b2 l', 'sdPhi b3 l', 'sdPhi b4 l']
|
de433bf6e326978bf78205b7ed513a191a424c3b
| 17,964
|
def lrange(*args):
"""For compatibility with range() in python 2"""
return list(range(*args))
|
5b70d400cd9725284f9ba7b6d512386988f00b2e
| 17,965
|
def migrate_autoload_details(autoload_details, shell_name, shell_type):
""" Migrate autoload details. Add namespace for attributes
:param autoload_details:
:param shell_name:
:param shell_type:
:return:
"""
mapping = {}
for resource in autoload_details.resources:
resource.model = "{shell_name}.{model}".format(shell_name=shell_name, model=resource.model)
mapping[resource.relative_address] = resource.model
for attribute in autoload_details.attributes:
if not attribute.relative_address: # Root element
attribute.attribute_name = "{shell_type}.{attr_name}".format(shell_type=shell_type,
attr_name=attribute.attribute_name)
else:
attribute.attribute_name = "{model}.{attr_name}".format(model=mapping[attribute.relative_address],
attr_name=attribute.attribute_name)
return autoload_details
|
f316a0a63520a9b245d3000d23181c2fff3b2292
| 17,966
|
def new_list(a):
"""
Converts lists into number format with minimal decimal places
:param a: list
:return: new list with floats
"""
b = []
for i in a:
b.append(float(format(i, ".2f")))
return b
|
f1f80ea44f58f0780f02e8df698dbeb81d7ac934
| 17,967
|
def z2g(r_geoid, g0, z):
"""Calculate gravitational acceleration at elevation
Derived from atmlabs equivalent function
https://www.sat.ltu.se/trac/rt/browser/atmlab/trunk/geophysics/pt2z.m
:param r: surface radius at point [m]
:param g0: surface gravitational acceleration at point [m/s^2]
:param z: elevation [m]
:returns: gravitational acceleration at point [m/s^2]
"""
#137 function g = z2g(r_geoid,g0,z)
#138 %
#139 g = g0 * (r_geoid./(r_geoid+z)).^2;
return g0 * (r_geoid/(r_geoid+z))**2;
|
c04080156670e137f56ba4dfc872530bbada4d27
| 17,969
|
import re
def a_bytes_copied(plugin_ctx, fsm_ctx):
"""Capture number of bytes copied."""
m = re.search(r"\d+ bytes copied in .* secs", fsm_ctx.ctrl.before)
if m:
plugin_ctx.info('{}'.format(m.group(0)))
else:
plugin_ctx.info('{}'.format(fsm_ctx.ctrl.before))
return True
|
5b5f9135660a0ab7e634b2b0630f8af47ae02dd5
| 17,970
|
def get_crawled_date(df):
""" Extract crawled date from the 'scrape_id' field. """
df['crawled_date'] = df['scrape_id'].astype(str)
df['crawled_date'] = df['crawled_date'].apply(lambda x: x[:8])
return df
|
af5242fa98de713eaa0bbd8684f797b5db563033
| 17,971
|
import random
import math
def box_optimizer(f, starting_point, gs=[], x_range=[-1000, 1000], alpha=1.3, epsilon=1e-6, limit_iter=-1):
"""Finds minimum for function for given restrictions
Args:
f (TargetFunction): [function for which minimum needs to be found]
starting_point ([list]): [starting point for optimizations]
gs (list, optional): [implicit limitations, lambda expressions]. Defaults to [].
x_range (list, optional): [explicit limitations, range]. Defaults to [-1000, 1000].
alpha (float, optional): [alpha for calculating reflection]. Defaults to 1.3.
epsilon ([float], optional): [threshold for stopping criteria]. Defaults to 1e-6.
limit_iter (int, optional): [maximum iterations]. Defaults to -1.
Raises:
ValueError: [raised in case starting point doesn't match implicit or explicit restrictions]
Returns:
[list]: [point for which function is minimal]
"""
X0 = starting_point
for x in X0:
if x > x_range[1] or x < x_range[0]:
raise ValueError(
"Starting point doesn't meet x_range requirements")
if len(gs) != 0:
for g in gs:
if g(*X0) < 0:
raise ValueError("Starting point doesn't meet gs requirements")
n = len(X0)
Xc = X0
Xes = [X0]
for _ in range(2 * len(Xc) - 1):
r = random.random()
X_new = [x_range[0] + r * (x_range[1] - x_range[0])] * len(Xc)
while any([g(*X_new) < 0 for g in gs]):
X_new = [(1 / 2) * (x + c) for x, c in zip(X_new, Xc)]
Xes.append(X_new)
Xc = [sum([x[i] for x in Xes]) / len(Xes) for i in range(n)]
it = 0
while True:
f_vals = [f(*x) for x in Xes]
h = f_vals.index(sorted(f_vals, reverse=True)[0])
h2 = f_vals.index(sorted(f_vals, reverse=True)[1])
Xc = [sum([x[i] for x in Xes if Xes.index(x) != h]) /
(len(Xes) - 1) for i in range(n)]
Xr = [(1 + alpha) * Xc[i] - alpha * Xes[h][i] for i in range(n)]
for i in range(n):
if Xr[i] < x_range[0]:
Xr[i] = x_range[0]
elif Xr[i] > x_range[1]:
Xr[i] = x_range[1]
while any([g(*Xr) < 0 for g in gs]):
Xr = [(1 / 2) * (x + c) for x, c in zip(Xr, Xc)]
if f(*Xr) > f(*Xes[h2]):
Xr = [(1 / 2) * (x + c) for x, c in zip(Xr, Xc)]
Xes[h] = Xr
if math.sqrt((1/len(Xes)) * sum([(f(*Xes[i]) - f(*Xc)) ** 2 for i in range(len(Xes))])) <= epsilon:
f_vals = [f(*x) for x in Xes]
return Xes[f_vals.index(min(f_vals))]
if limit_iter > 0 and it >= limit_iter:
print(
'\033[93mMax iteration {} reached for box optimizer\033[0m'.format(limit_iter))
f_vals = [f(*x) for x in Xes]
return Xes[f_vals.index(min(f_vals))]
it += 1
|
9ff50db7711d2d94025bd956278749d6c59547c6
| 17,972
|
async def add_future_callback(fut, success_callback, fail_callback=None, need_fut_as_success_cb_args=True):
"""
adding a callback after future completed
:param fut:
:param callback:
:param args:
:return:
"""
try:
result = await fut
if need_fut_as_success_cb_args:
await success_callback(result)
else:
await success_callback()
except Exception:
if fail_callback:
await fail_callback()
raise
return result
|
a42c8e2cc31d91da7333842710ed8677cabb1baa
| 17,973
|
from typing import Union
from typing import Callable
from typing import List
from typing import Tuple
def make_cmd(
to_fn_call: Union[str, Callable[[List[str]], str]], arity: int
) -> Tuple[str, int]:
"""
Returns a tuple with the transpiled command and its arity.
:param to_fn_call
If Callable, takes a list of variables that hold values popped from the
stack (reversed) and returns a string representing a value created by
running some function.
If str, its format method will be called with the aforementioned list
of variables as arguments.
on those variables
:param arity The arity of the function
"""
var_names = [f"x{n}" for n in range(arity, 0, -1)]
if isinstance(to_fn_call, str):
if arity > 0:
fn_call = to_fn_call.format(*var_names)
else:
fn_call = to_fn_call
else:
fn_call = to_fn_call(var_names)
if arity > 0:
cmd = f"{', '.join(var_names[::-1])} = pop(vy_globals.stack, {arity});"
else:
cmd = ""
cmd += f"res = {fn_call}; vy_globals.stack.append(res);"
return cmd, arity
|
bf71c9cb40aee40df3cee1ee08e156bca62091ff
| 17,974
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.