content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
|---|---|---|
def dict_raise_on_duplicates(ordered_pairs):
"""Reject duplicate keys in a dictionary.
RFC4627 merely says that keys in a JSON file SHOULD be unique. As such, `json.loads()` permits duplicate keys, and overwrites earlier values with those later in the string.
In creating Rulesets, we wish to forbid duplicate keys. As such, this function may be used to do this.
Algorithm from https://stackoverflow.com/a/14902564
Args:
ordered_pairs (list(tuple)): A list of (key, value) pairs.
Raises:
ValueError: When there are duplicate keys.
Returns:
dict: A dictionary constructed from `ordered_pairs`.
"""
duplicate_free_dict = {}
for key, value in ordered_pairs:
if key in duplicate_free_dict:
raise ValueError("duplicate key: %r" % (key,))
else:
duplicate_free_dict[key] = value
return duplicate_free_dict
|
58003a27b60524fdbd8eaf3b6f4021b17a9dcd0f
| 412,937
|
def calculate_total_infections(new_infections):
"""Calculate the total infections at each location.
Args:
new_infections: a xr.DataArray containing new_infections and dimension
(locations, time)
Returns:
total_infections: a xr.DataArray containing the summed infections of
dimension (locations)
"""
return new_infections.sum('time')
|
935d629d662605cd5c1714e3705a1037d0c4bf1e
| 259,381
|
def has_subscriptions(session, *, topic_arn):
"""
Returns True if a topic ARN has any subscriptions (e.g. an SQS queue), False otherwise.
"""
sns_client = session.client("sns")
resp = sns_client.list_subscriptions_by_topic(TopicArn=topic_arn)
return len(resp["Subscriptions"]) > 0
|
89c2ff4f261fbc19cb118282e60ef5d32c7396db
| 237,780
|
import math
def can_be_written_as_sum_of_nth_power(number: int, exponent: int) -> bool:
"""Check if a given integer number `number` can be written
as the sum of `exponent`th powers of its digits."""
sum_of_digit_powers = sum([math.pow(int(digit), exponent) for digit in str(number)])
return number == sum_of_digit_powers
|
4144ae7ebb4e4ca4b19c0eadb94d75d02e0f1f39
| 260,792
|
def load_list_room_items(data, room_names):
"""
Loading each room respective list of items.
Parameters:
data(dict): Nested dictionaries containing
all information of the game.
room_names(list): List containg room names.
Returns:
items_list(list): Returns list of room items.
"""
items_list = []
for name in room_names:
item_list = data['Rooms'][name]['Items']
items_list.append(item_list)
return items_list
|
728c5903945cc992ed06505c60554557875aed8f
| 677,536
|
def convert_to_mixed_fraction(number, denominators):
"""
Convert floats to components of a mixed fraction representation
Returns the closest fractional representation using the
provided denominators. For example, 4.500002 would become
the whole number 4, the numerator 1 and the denominator 2
Args:
number (float): number for convert
denominators (iter of ints): denominators to use, default [1 .. 20]
Returns:
whole, numerator, denominator (int): Integers of the mixed fraction
"""
int_number = int(number)
if int_number == number:
return int_number, 0, 1 # whole number, no fraction
frac_number = abs(number - int_number)
if not denominators:
denominators = range(1, 21)
for denominator in denominators:
numerator = abs(frac_number) * denominator
if abs(numerator - round(numerator)) < 0.01: # 0.01 accuracy
break
else:
return None
return int_number, int(round(numerator)), denominator
|
bc3f111ff5b9cf68b37234b42d9daea2824c2bf4
| 61,237
|
def adjacency_dict(graph):
"""
Returns the adjancency list representation of the graph.
"""
adj = {node: [] for node in graph.nodes}
for edge in graph.edges:
node1, node2 = edge[0], edge[1]
adj[node1].append(node2)
if not graph.is_directed:
adj[node2].append(node1)
return adj
|
6ee235e491bce03a9a3b632230faa4618f1c1192
| 411,798
|
def evaluate_g6( kappa, nu, eta, tau, sigma, l, mu, s6 ):
"""
Evaluate the sixth constraint equation and also return the jacobians
:param float kappa: The value of the modulus kappa
:param float nu: The value of the modulus nu
:param float eta: The value of the modulus eta
:param float tau: The value of the modulus tau
:param float sigma: The value of the modulus sigma
:param float l: The value of the modulus lambda
:param float mu: The value of the modulus mu
:param float s6: The value of the constraint
"""
return ( kappa + nu + 2 * eta - 3 * tau - 2 * sigma ) * ( 3 * l + 2 * mu ) - ( 3 * tau + 2 * sigma )**2 - s6**2,\
{ 'kappa':( 3 * l + 2 * mu ), 'nu':( 3 * l + 2 * mu ), 'eta':2 * ( 3 * l + 2 * mu ),\
'tau':-3 * ( 3 * l + 2 * mu) - 6 * ( 3 * tau + 2 * sigma ),\
'sigma':-2 * ( 3 * l + 2 * mu ) - 4 * ( 3 * tau + 2 * sigma ),\
'lambda':3 * ( kappa + nu + 2 * eta - 3 * tau - 2 * sigma ),\
'mu':2 * ( kappa + nu + 2 * eta - 3 * tau - 2 * sigma ),\
's6':-2 * s6 }
|
933bbed9d1b687570d82ea6091303e80476d06f6
| 198,982
|
import inspect
def _do_doc(name, obj, module):
"""Check if object should be documented"""
# Do not document private objects
if name.startswith("_"):
return False
# Do not document no_doc types
no_docs = ["datadescriptor", "module", "memberdescriptor", "methoddescriptor", "getsetdescriptor"]
for no_doc in no_docs:
isfunc = getattr(inspect, f"is{no_doc}")
if isfunc(obj):
return False
# Only document the current module, not modules imported inside the current module
if hasattr(obj, "__module__"):
return obj.__module__ == module.__name__
return True
|
e1bf0246044349f35e1b2d2876da54bdb7cd1427
| 164,051
|
def utilization_threshold_abstract(f, limit, utilization):
""" The abstract utilization threshold algorithm.
:param f: A function to calculate the utilization threshold.
:type f: function
:param limit: The minimum allowed length of the utilization history.
:type limit: int
:param utilization: The utilization history to analize.
:type utilization: list(float)
:return: A decision of whether the host is overloaded.
:rtype: bool
"""
if (len(utilization) < limit):
return False
return f(utilization) <= utilization[-1]
|
22f065055798dca56881ea6ebb6c6cf328f94fb7
| 360,972
|
def index(x, axis, index_spec):
"""
Form indexing tuple for NumPy arrays.
Given an array `x`, generates the indexing tuple that has :py:class:`slice` in each axis except
`axis`, where `index_spec` is used instead.
Parameters
----------
x : :py:class:`~numpy.ndarray`
Array to index.
axis : int
Dimension along which to apply `index_spec`.
index_spec : int or :py:class:`slice`
Index/slice to use.
Returns
-------
indexer : tuple
Indexing tuple.
"""
idx = [slice(None)] * x.ndim
idx[axis] = index_spec
indexer = tuple(idx)
return indexer
|
8eb80e8284a8fd73a0ef0a52729b1317c012a3ef
| 193,096
|
def is_above_area(self, y):
""" Checks whether a given point is above the markdown area.
:param self: MarkdownRenderer
:param y: y-coordinate
:return: boolean
"""
return y < self.y
|
4fb8d8a697175b9678c6973e0cf72aa1f6a1ba74
| 392,635
|
import torch
def quaternions_to_rotation_matrices(quaternions):
"""
Arguments:
---------
quaternions: Tensor with size Kx4, where K is the number of quaternions
we want to transform to rotation matrices
Returns:
-------
rotation_matrices: Tensor with size Kx3x3, that contains the computed
rotation matrices
"""
K = quaternions.shape[0]
# Allocate memory for a Tensor of size Kx3x3 that will hold the rotation
# matrix along the x-axis
R = quaternions.new_zeros((K, 3, 3))
# A unit quaternion is q = w + xi + yj + zk
xx = quaternions[:, 1]**2
yy = quaternions[:, 2]**2
zz = quaternions[:, 3]**2
ww = quaternions[:, 0]**2
n = (ww + xx + yy + zz).unsqueeze(-1)
s = quaternions.new_zeros((K, 1))
s[n != 0] = 2 / n[n != 0]
xy = s[:, 0] * quaternions[:, 1] * quaternions[:, 2]
xz = s[:, 0] * quaternions[:, 1] * quaternions[:, 3]
yz = s[:, 0] * quaternions[:, 2] * quaternions[:, 3]
xw = s[:, 0] * quaternions[:, 1] * quaternions[:, 0]
yw = s[:, 0] * quaternions[:, 2] * quaternions[:, 0]
zw = s[:, 0] * quaternions[:, 3] * quaternions[:, 0]
xx = s[:, 0] * xx
yy = s[:, 0] * yy
zz = s[:, 0] * zz
idxs = torch.arange(K).to(quaternions.device)
R[idxs, 0, 0] = 1 - yy - zz
R[idxs, 0, 1] = xy - zw
R[idxs, 0, 2] = xz + yw
R[idxs, 1, 0] = xy + zw
R[idxs, 1, 1] = 1 - xx - zz
R[idxs, 1, 2] = yz - xw
R[idxs, 2, 0] = xz - yw
R[idxs, 2, 1] = yz + xw
R[idxs, 2, 2] = 1 - xx - yy
return R
|
0a2c35ee5ab2ff40997720f84384bd884e6c1dcc
| 503,252
|
def reassign_use(node):
"""Get the usage of the re-assigned test_var."""
return node.body[3].body[1].value
|
79f4e941cad102f652480eecdcdbcd4c7915c727
| 334,496
|
def gp_predict(gpmodel, para_array_rescaled):
"""Returns a Gaussian Process prediction of PCA weights
Parameters
----------
gpmodel: GPflow object
Trained GPflow model corresponding to a particular redshift
para_array_rescaled: ndarray of shape (5, )
Rescaled Cosmological parameters
Returns
_______
W_predArray: float or ndarray of shape (nRankMax, )
Mean GP prediction of the PCA weights
W_varArray: float or ndarray of shape (nRankMax, )
Variance in the GP prediction of the PCA weights
"""
m1p = gpmodel.predict_f(para_array_rescaled)
# [0] is the mean and [1] is the std of the prediction
W_predArray = m1p[0]
W_varArray = m1p[1]
return W_predArray, W_varArray
|
c4df97f3e9bb26c762dfe6751753ef8037863b6a
| 619,117
|
from datetime import datetime
import traceback
def convert_datetime(date_string, date_format='%A %d. %B %Y, %H:%M'):
"""converts date string of format WEEKDAY DD. MONTH YYYY, HH:MM into date format"""
try:
# Convert Dates of sort Sonntag 9. Juni 2019, 09:00 as date format
date_obj = datetime.strptime(date_string.strip(), date_format)
except:
date_obj = datetime(1900, 1, 1)
print(traceback.format_exc())
return date_obj
|
9cf7a294a0d80f53cd50a2306deb2e222a5c582e
| 142,349
|
import re
def is_slack_id(slack_id: str) -> bool:
"""
Check if id given is a valid slack id.
:param id: string of the object you want to check
:return: true if object is a slack id, false otherwise
"""
return re.match("^[UW][A-Z0-9]{8}$", slack_id) is not None
|
573dabea9a6e814d17cc91758d4a694f4737e7c0
| 599,221
|
def get_value(component, name):
""" Get a variable from a component """
variable = component.variables[name]
vr = [variable.valueReference]
if variable.type == 'Real':
return component.fmu.getReal(vr)[0]
elif variable.type in ['Integer', 'Enumeration']:
return component.fmu.getInteger(vr)[0]
elif variable.type == 'Boolean':
value = component.fmu.getBoolean(vr)[0]
return value != 0
else:
raise Exception("Unsupported type: %s" % variable.type)
|
5a36efdea0d36339a8b6f818742bb0bc9827e8ed
| 594,160
|
def scatter_geo(ds, prop="pressure", stat="count"):
"""
Takes in xarray Dataset and creates a tidy and clean pandas DataFrame for plotting with plotly
Parameters
----------
ds : xarray Dataset
A dataset of aggregated data with a specified format
prop : str, default 'pressure'
Atmospheric property of interest
stat : str, default 'count'
A statistic of interest to show on the plot. Options: count, mean, median, std, min, max
Returns
-------
df : pandas DataFrame
A 'tidy' dataframe suitable for plotting data
"""
if stat.lower()=="total count":
stat = "count"
da = ds.sel(stat=stat)[prop]
da = da.sum(dim="date")
else:
da = ds.sel(stat=stat)[prop]
# transform data to pandas DataFrame so it's easier to plot with plotly. And clean dataframe
df = da.to_dataframe(name=stat)
df = df.dropna().drop(["stat"], axis=1)
df = df[df[stat]>0]
if (df[stat].max() - df[stat].min()) > 100: # if values range is bigger than 2 orders of magnitude then scale column
df = df.scale_column(col=stat)
df = df.reset_index()
return df
|
8ad34b181df030d370a2014846f53a29b4b1496b
| 635,848
|
def day2deg(year_length, day):
"""Convert a calendar day into degrees that the planet has orbited."""
# return ((2*np.pi)/year_length) * day
return (day/year_length) * 360.0
|
eb2ebfde45679bbfd5f369996b34a59536f4c1cb
| 364,698
|
def parseList(txt):
"""
Parse a comma-separated line into a list.
:param str txt: String from config file to parse.
:return: List, based on the input string.
:rtype: list
"""
return txt.split(',')
|
411653b4c2b59ac1c39e458dbcc3b018816d2e98
| 492,576
|
def is_explicitly_rooted(path):
"""Return whether a relative path is explicitly rooted relative to the
cwd, rather than starting off immediately with a file or folder name.
It's nice to have paths start with "./" (or "../", "../../", etc.) so, if a
user is that explicit, we still find the path in the suffix tree.
"""
return path.startswith(('../', './')) or path in ('..', '.')
|
bde26849889ac5c951160e441cdd0c3c60871ab1
| 11,181
|
def any(seq, pred=None):
"""Returns True if pred(x) is true for at least one element in the iterable"""
for elem in filter(pred, seq):
return True
return False
|
3f878aeceb4f924b5209738f6336e674feb39209
| 679,024
|
def __metal_to_SI(vol, T):
"""
Converts LAMMPS metal units to SI units for thermal conductivity calculations.
Args:
vol (float):
Volume in angstroms^3
T (float):
Temperature in K
Returns:
float: Converted value
"""
kb = 1.38064852e-23 # m3*kg/(s2*K)
vol = vol/(1.0e10)**3 # to m3
# eV2*ns/(ps2*A4) to J2/(s*m4)
to_SI = (1.602e-19)**2.*1.0e12*(1.0e10)**4.0*1000.
return vol*to_SI/(kb*T**2)
|
a3a717e677749ecf1488b3aa7faf4a99b6f04810
| 472,742
|
from typing import Iterable
def lines_to_matrix(lines: Iterable) -> list:
"""Build the matrix from the sequencer output lines."""
matrix = []
for line in lines:
l_fields = line.split()
if len(l_fields) == 18:
matrix.append(l_fields)
return matrix
|
efff292a8241170f8b624e8269b92ecbdb4309f5
| 470,788
|
import time
def monotoniclocaltime(seconds=None):
"""monotoniclocaltime([seconds]) -> (tm_year,tm_mon,tm_mday,tm_hour,tm_min,
tm_sec,tm_wday,tm_yday,tm_isdst)
Convert seconds since the Epoch to a time tuple expressing monotonic
local time. Monotonicity is achieved by extending the day before the
end of DST with some extra hours (24, 25 etc) until after the switch."""
if seconds is None:
seconds = time.time()
res = time.localtime(seconds)
dayseconds = res.tm_sec + res.tm_min*60 + res.tm_hour*3600
nextmidnight = time.localtime(seconds - dayseconds + 86400)
if res.tm_isdst and not nextmidnight.tm_isdst:
tl = list(time.localtime(seconds - 86400)) # same time yesterday
tl[3] += 24 # plus 24h
res = time.struct_time(tl)
return res
|
cb78bd6f8e1603100b0fbc3cda2453162abfa944
| 548,518
|
import base64
def invoke_lambda_and_get_duration(lambda_client, payload, function_name):
"""
Invokes Lambda and return the duration.
:param lambda_client: Lambda client.
:param payload: payload to send.
:param function_name: function name.
:return: duration.
"""
response = lambda_client.invoke(
FunctionName=function_name,
InvocationType='RequestResponse',
LogType='Tail',
Payload=payload,
)
# Extract duration from Lambda log
lambda_log = base64.b64decode(response['LogResult']).decode('utf-8')
report_data = \
[line for line in lambda_log.split('\n')
if line.startswith('REPORT')
][0]
duration = \
[col for col in report_data.split('\t')
if col.startswith('Duration')
][0]
duration = float(duration.split()[1])
return duration
|
c8d2b77e4f7bc338efcdfd21db4f7297a625b05c
| 8,433
|
def sleep_interval(interval, now, wake_up_second):
"""
Work out how long to sleep (so we don't drift over time)
Only works properly when interval is at least one minute and the
runtime between calls is less than interval (although it will
return a reasonable value in both these situations).
:param interval: number of seconds between cycles
:param now: the current time
:param wake_up_second: modula time to wake up at
:return: number of seconds to sleep
"""
next_wakeup = float(now + interval)
if wake_up_second > interval:
return next_wakeup - now
next_wakeup = next_wakeup - (next_wakeup % wake_up_second)
return next_wakeup - now
|
4dd5e5bc0b45c374e28bc6b650283b1672077310
| 403,840
|
import math
def Rotate2D(ax, ay, ix, iy, angle):
"""
Rotate a point around the anchor point
Input
-----
ax, ay: coordinates of the anchor point
ix, iy: coordinates of the point to rotate
angle: angle in degree (counter-clockwise rotation is positive rotation)
Output
------
x, y : coordinates of a rotated point
"""
dx = ix - ax
dy = iy - ay
# Convert degree into radian
angle_rad = math.radians(angle)
# do the rotation
xp = dx * math.cos(angle_rad) - dy * math.sin(angle_rad) + ax
yp = dx * math.sin(angle_rad) + dy * math.cos(angle_rad) + ay
return xp, yp
|
1c7e0a3bd5acc6f7c85bdae4dc37e192f94535da
| 130,820
|
def _return_value(resource, key):
""" Return the value from the resource """
return resource[key]
|
80cc72be0955e1b931422288dc84ede1d2098334
| 29,339
|
def find_if(cond, seq):
"""
Return the first x in seq such that cond(x) holds, if there is one.
Otherwise return None.
"""
for x in seq:
if cond(x): return x
return None
|
0978689c29bc06fb1783083cec7f5e7f87eeb07e
| 689,846
|
def n_states_of_vec(l, nval):
""" Returns the amount of different states a vector of length 'l' can be
in, given that each index can be in 'nval' different configurations.
"""
if type(l) != int or type(nval) != int or l < 1 or nval < 1:
raise ValueError("Both arguments must be positive integers.")
return nval ** l
|
98770fa5a5e62501bf365a4a5a40a932b2ba2450
| 1,415
|
def support_count(itemset, transactions):
"""
Count support count for itemset
:param itemset: items to measure support count for
:param transactions: list of sets (all transactions)
>>> simple_transactions = ['ABC', 'BC', 'BD', 'D']
>>> [support_count(item, simple_transactions) for item in 'ABCDE']
[1, 3, 2, 2, 0]
>>> some_transactions = [set(['beer', 'bread', 'milk']), set(['beer']), set(['milk'])]
>>> support_count(set(['beer']), some_transactions)
2
"""
return len([row for row in transactions if set(itemset) <= set(row)])
|
2d533a81a646b1973980386c9b85998d8fb65be0
| 31,332
|
def _dropSynapseIndices(df):
"""
Removes synapse schema class 'ROW_VERSION' and 'ROW_ID' columns.
:param df: An entity-view data frame with the possible 'ROW_VERSION' and 'ROW_ID' columns
:return: An entity-view data frame without the 'ROW_VERSION' and 'ROW_ID' columns
"""
if {'ROW_VERSION', 'ROW_ID'}.issubset(df.columns):
df.drop(['ROW_VERSION', 'ROW_ID'], axis=1, inplace=True)
return df
|
fda11530fa63585eb49b7e9376da0095063275b5
| 470,978
|
import math
def distance(lat1, lng1, lat2, lng2):
"""
Calculates distance in miles between two lat, long pairs
Thanks to Philipp Jahoda of StackOverflow.com.
"""
earthRadius = 3958.75
dLat = math.radians(lat2 - lat1)
dLng = math.radians(lng2 - lng1)
sinDLat = math.sin(dLat / 2)
sinDLng = math.sin(dLng / 2)
a = (sinDLat ** 2) + (sinDLng ** 2) * math.cos(math.radians(lat1)) * math.cos(math.radians(lat2))
c = 2 * math.atan2(a ** .5, (1-a) ** .5)
dist = earthRadius * c
return dist
|
49e0181811b4f7680feb798e3016835a507917b7
| 356,473
|
def find_sum(inputs, target):
"""
given a list of input integers, find the (first) two numbers
which sum to the given target, and return them as a 2-tuple.
Return None if the sum could not be made.
"""
for i in inputs:
if i < target // 2 + 1:
if target - i in inputs:
return (i, target - i)
|
8961cb6dd02ef65afa14c1ac46280055960ed6a0
| 688,729
|
import ast
def valid_check(code):
"""Performs a check to ensure that the code provided by the user is syntactically correct.
:param code: Source code string block.
:type code: str
:returns: True is successfully compiled, False otherwise.
"""
try:
node = ast.parse(code)
# print(ast.dump(node))
except SyntaxError:
return False
return True
|
b4dad88c1be86eda28cbbc0a7eb60fcaed502f29
| 100,378
|
def strip_attributes(*names):
""" Strips all attributes with the keys provided as ``names`` from the element. """
def handler(element, previous_result):
for name in names:
element.attrib.pop(name, None)
return previous_result
return handler
|
8f7b915659195d559585a0cd676366a1b1fbb9d5
| 187,236
|
def list_children_nested(self, **kwargs):
"""
This method will return a list of all children (either ACCOUNT or ORGANIZATIONAL_UNIT) for the given ParentId. It
includes children, grandchildren lower levels of nesting.
:param self: organizations client
:param kwargs: these are passed onto the list_children method call
:return: list of children in the structure of [{'Id': "0123456789010"}, {'Id': "1009876543210"}]
"""
child_type = kwargs.get('ChildType')
parent_id = kwargs.get('ParentId')
if child_type == 'ACCOUNT':
response = self.list_children_single_page(ParentId=parent_id, ChildType='ACCOUNT')
my_account_children = response.get('Children')
response = self.list_children_single_page(ParentId=parent_id, ChildType='ORGANIZATIONAL_UNIT')
my_org_children = response.get('Children')
for my_org_child in my_org_children:
my_account_children += self.list_children_nested(ParentId=my_org_child.get('Id'), ChildType='ACCOUNT')
return my_account_children
elif child_type == 'ORGANIZATIONAL_UNIT':
my_account_children = [kwargs.get('ParentId')]
response = self.list_children_single_page(ParentId=parent_id, ChildType='ORGANIZATIONAL_UNIT')
my_org_children = response.get('Children')
for my_org_child in my_org_children:
my_account_children += self.list_children_nested(ParentId=my_org_child.get('Id'), ChildType='ORGANIZATIONAL_UNIT')
return my_account_children
else:
raise Exception('Unsupported ChildType: {}'.format(child_type))
|
6b338482e1673d4bedf6a4931c5f35ec3406978c
| 134,449
|
def get_commit_message(local, master, revid):
"""Returns the commit message of a branch revision."""
branch = local or master
revision = branch.repository.get_revision(revid)
return revision.message
|
ab3cbd3a1c281fde84dbc4028a9e70db9f3e3c54
| 273,061
|
def d2t(d):
"""Dict into tuple."""
return tuple(sorted(d.items()))
|
cdbea283c1618169f7c61136e966c03de9011ba1
| 422,066
|
def standardise_cell_values(pd_df, dict_of_nonstandard_standard):
"""
Maps non-standard values e.g. "Males" to standard values like "M".
Mapping is carried out on a column-specific basis.
"""
df = (pd_df.replace
(to_replace=dict_of_nonstandard_standard,
value=None))
return df
|
47cd855552efde6ad1c9190940c45c2c11750ae1
| 127,308
|
import itertools
def get_possible_labels(Y):
"""Computes the set of unique labels from the dataset labels."""
return list(set(itertools.chain(*Y)))
|
6b1cab26d11fdc7edca172b7ef1c186a41a4681a
| 254,340
|
import ast
def eq(*arguments): # pylint: disable=invalid-name
"""
Equals function.
"""
return ast.Eq(*arguments)
|
0f590adb54c5ec5374a620bbfefb8c8a7e452c4e
| 196,152
|
def to_bytes(bytes_or_string):
"""if string, return the encoded bytes. else return itself"""
if isinstance(bytes_or_string, str):
return bytes_or_string.encode('utf-8')
return bytes_or_string
|
20f1a7f9d045ee8e97a75ee8a7e43029314a65fe
| 309,923
|
def inject_string(string_input, index, injection):
"""
Inject one string into another at a given index
:param string_input: string to accept injections
:param index: index where injection will be applied
:param injection: string that will be injected
:return: string
"""
return string_input[:index] + str(injection) + string_input[index:]
|
e7e2cd0970029329cd3663831ef3ffebe2c87e81
| 548,842
|
def _format_lineno(session, line):
"""Helper function to format line numbers properly."""
if session == 0:
return str(line)
return "%s#%s" % (session, line)
|
f30e29eb016b5246ce94582f95ed0c4c4fe20432
| 62,819
|
from typing import Optional
import pathlib
def _log_figure_path(path: Optional[pathlib.Path]) -> Optional[pathlib.Path]:
"""Adds a suffix to a figure path to indicate the use of a logarithmic axis.
If the path is None (since figure should not be saved), it will stay None.
Args:
path (Optional[pathlib.Path]): original path to figure, or None if figure should
not be saved
Returns:
Optional[pathlib.Path]: new path to figure including _log suffix, or None if
original path is None
"""
if path is not None:
return path.with_name(path.stem + "_log" + path.suffix)
return None
|
381bde67a89f0f61d7bdb9a9f2ea033664e6fab0
| 691,342
|
import functools
import asyncio
def task_handled_errors(func):
"""
Decorator for custom tasks. Can *only* wrap coroutines. Any raised exceptions will call the
KazTron general error handler.
"""
@functools.wraps(func)
async def wrapper(*args, **kwargs):
# noinspection PyBroadException
try:
return await func(*args, **kwargs)
except (asyncio.CancelledError, KeyboardInterrupt, SystemExit):
raise
except Exception:
await args[0].core.on_error(func.__name__)
return wrapper
|
cfddb6cbe99e565f7c50f4da64b77cbb362f1ad5
| 145,740
|
def define_tflite_tensors(interpreter):
"""
Define input and output tensors (i.e. data) for the object detection classifier
"""
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Input tensor is the image
image_tensor = input_details[0]['index']
# Output tensors are the detection boxes, scores, and classes
# Each box represents a part of the image where a particular object was detected
detection_boxes = output_details[0]['index']
# Each score represents level of confidence for each of the objects.
# The score is shown on the result image, together with the class label.
detection_classes = output_details[1]['index']
detection_scores = output_details[2]['index']
return image_tensor, [detection_boxes, detection_classes, detection_scores]
|
48ad28909f69c60a955eeae273969dd1b6c03327
| 84,183
|
def depth_sort(files):
"""Sort a list of files by folder depth."""
return sorted(files, key=lambda x: x.count('/'), reverse=True)
|
599a9b25d3720d9d19fe9dcf5807149d34f53ee4
| 452,183
|
def to_int(value: int):
"""
Checks if an argument is of type int:
- If argument is of type int, simply returns argument
- If argument is of type float, if it's int representation is the
same as the float representation, returns int(float)
- If argument is of type str, if it can be converted to type float,
recursively calls function with float(str)
- Raises TypeError otherwise
:param value: int, float, str
argument to be converted to type int
:return: int
original argument if type int, otherwise converted string-to-int or float-to-int argument
"""
if isinstance(value, int):
return value
if isinstance(value, float):
if int(value) == value:
return int(value)
else:
raise TypeError('value {} is not an int'.format(value))
elif isinstance(value, (list, set, tuple)):
if len(value) == 1:
return to_int(value[0])
else:
raise TypeError('value {} is not an int'.format(value))
elif isinstance(value, str):
try:
value = float(value)
return to_int(float(value))
except (TypeError, ValueError):
raise TypeError('value {} is not an int'.format(value))
else:
raise TypeError('value {} is not an int'.format(value))
|
eac3527ee8f768dd7ed7fa7d0a236287120de9b0
| 502,385
|
from pathlib import Path
def find_bigfiles(data_folder, gte_size=100, verbose=False):
"""
Return a list of file paths (truncated from the
data_folder.name part) for all file exceeding
gte_size MB.
gte_size: multiple of 1 MB => 100 MB default.
:: github file size limit = 100 * 2^20
"""
if gte_size == 0:
print('No file size limit: gte_size=0.')
return
mb = 2**20 #kb = 2**10 #gb = 2**30
size_limit = gte_size * mb
out = []
p = Path(data_folder)
# output only parts from data_folder name, i.e.: data/file.csv
for f in p.glob('**/*.*'):
if f.stat().st_size >= size_limit:
parent_idx = f.parts.index(p.name)
fout = '/'.join(f for f in f.parts[parent_idx:])
out.append(fout)
if verbose: print(f, '\t', f.stat().st_size)
return out
|
d33a74fc3a8c0e26b5c2ac4a3cf2833a0b74f14e
| 244,581
|
from typing import List
from typing import Counter
def count_four_sum(A: List[int], B: List[int], C: List[int], D: List[int]) -> int:
"""https://leetcode.com/problems/4sum-ii/"""
counter_ab = Counter(a+b for a in A for b in B)
return sum(counter_ab[-(c+d)] for c in C for d in D)
|
581dbd5f35e72a073908fa012839a62302dd2b6f
| 262,811
|
def pretty_time(dt):
"""
Returns a human readable string for a time duration in seconds.
:param dt: duration in seconds
"""
if dt > 3600:
hours = dt / 3600.
if hours > 9:
return '%dh' % (int(hours))
minutes = 60 * (hours - int(hours))
return '%dh%2dm' % (int(hours), int(minutes))
elif dt > 60:
minutes = dt / 60.
if minutes > 9.5:
return '%dm' % (int(minutes))
seconds = 60 * (minutes - int(minutes))
return '%dm%2ds' % (int(minutes), int(seconds))
elif dt > 10:
return '%ds' % (int(dt))
elif dt > 0.05:
return '%4.1fs' % dt
else:
return '-'
|
ce6b719527579161ef0ed1b39d4503862098dbe5
| 141,049
|
def jet_id(jets, options):
"""
Loose jet ID taken from flashgg: https://github.com/cms-analysis/flashgg/blob/dd6661a55448c403b46d1155510c67a313cd44a8/DataFormats/src/Jet.cc#L140-L155
"""
nemf_cut = jets.neEmEF < 0.99
nh_cut = jets.neHEF < 0.99
chf_cut = jets.chHEF > 0
chemf_cut = jets.chEmEF < 0.99
n_constituent_cut = jets.nConstituents > 1
id_cut = nemf_cut & nh_cut & chf_cut & chemf_cut & n_constituent_cut
return id_cut
|
d522e56625a93f75460d83673bf89af5968c12eb
| 661,144
|
import torch
def create_mid_split_binary_mask(features):
"""
Creates a binary mask of a given dimension which splits its masking at the midpoint.
:param features: Dimension of mask.
:return: Binary mask split at midpoint of type torch.Tensor
"""
mask = torch.zeros(features).byte()
midpoint = features // 2 if features % 2 == 0 else features // 2 + 1
mask[:midpoint] += 1
return mask
|
ea3389ada50048b0d411e1716d70652dda31ec89
| 452,045
|
import six
def body_to_json(response):
"""
Returns the JSON-encoded contents of `response`, raising a detailed error on failure.
Parameters
----------
response : :class:`requests.Response`
HTTP response.
Returns
-------
contents : dict
JSON-encoded contents of `response`.
Raises
------
ValueError
If `response`'s contents are not JSON-encoded.
"""
try:
return response.json()
except ValueError: # not JSON response
msg = '\n'.join([
"expected JSON response from {}, but instead got:".format(response.url),
response.text or "<empty response>",
"",
"Please notify the Verta development team.",
])
msg = six.ensure_str(msg)
six.raise_from(ValueError(msg), None)
|
5735e60454397fd9400cdb36760ac5cac8c2eeee
| 610,535
|
def _comment(line: str) -> bool:
"""Returns True if line is a comment line (starts with '#')."""
return line.startswith("#")
|
1419e2785af617a815136f31b01a2103700f122d
| 346,414
|
import logging
def int_wlog(s, name):
"""
Convert a string to integer, wrapped so that it issues error to the logging module, with the variable name
:param s: string to convert
:param name: variable name, to indicate in the error message
:return: integer obtained as conversion of s
"""
try:
ans = int(s)
except ValueError as err:
error_msg = "{} value \"{}\" is not numeric, conversion attempt returned \"{}\""\
.format(name, s, str(err.args[0]))
logging.error(error_msg)
raise ValueError(error_msg)
return ans
|
48c44757fa6fc66f581b7315f39d88477870e6a3
| 275,046
|
def model_to_dictionary(db_model_obj, exclude_keys=None, relationships_to_include=None):
""" Converts the given SQLAlchemy model object into a dictionary. """
model_dict = {}
if exclude_keys is None:
exclude_keys = []
# make sure exclude_keys are actual fields
possible_keys = db_model_obj.__table__.columns.keys()
assert set(exclude_keys).issubset(set(possible_keys))
for column_name in possible_keys:
if column_name in exclude_keys:
continue
model_dict[column_name] = getattr(db_model_obj, column_name)
return model_dict
|
240a3d4584dff4f2d515921a1913305e149613a8
| 218,428
|
def get_output_objective_suffix(suffix=""):
"""
Returns dictionary of dictionarys of suffixes to output files produced.
Dict contains:
objective - initial(s) of corresponding objective(s)
filesuffix - output file suffix
Args (optional) :
suffix - appends the user defined suffix to one used by the output file
"""
outsuffix = [tuple(["dret_index", "EPNFG", "downslope_retention_index"]),
tuple(["upslope_source", "EPNFG", "upslope_source"]),
tuple(["riparian_index", "EPNFG", "riparian_index"]),
tuple(["slope_index", "FG", "slope_index"]), ]
outer = dict()
for sfx in outsuffix:
filesuffix = '_'.join([sfx[2], suffix]).rstrip('_') + '.tif'
outer[sfx[0]] = {'objective': sfx[1],
'filesuffix': filesuffix}
return outer
|
fb6aac0814e2e950165133677e7332d59b546feb
| 426,233
|
def get_directory(path: str) -> str:
"""Extract directory path."""
return path.rsplit(sep="/", maxsplit=1)[0] + "/"
|
811913178ff74c4b7f7701cc43f988887266147d
| 522,166
|
def semi_loss_func(ac, full_ob, semi_dataset, is_relative_actions=False):
"""
get the L2 loss between generated actions and semi supervised actions
:param ac: the semi supervised actions
:param full_ob: the full observations of the semi supervised dataset
:param semi_dataset: the semi supervised dataset
:return: the L2 loss if semi_dataset exists, 0 otherwise
"""
diff = ac - semi_dataset.full_ob_2_acs(full_ob) if not is_relative_actions else ac
return (diff ** 2).mean() if semi_dataset is not None else 0
|
6718b42cbe86ea75d1593694704ac6a7ffd881f8
| 660,228
|
def metric_to_height(meters, centimeters):
"""Converts height in m/cm to cm."""
return 100 * meters + centimeters
|
a07d4c53fce5f361ea95f1d0a15ca75a954d7cba
| 241,318
|
from typing import Any
def is_iterable(something: Any) -> bool:
"""
check if something is a list, tuple or set
:param something: any object
:return: bool. true if something is a list, tuple or set
"""
return isinstance(something, (list, tuple, set))
|
3670b2d7cf46c59d699b3704a657c372cb2e358f
| 449,372
|
import re
def to_snake_case(word: str) -> str:
"""
Convert a string to snake case
"""
return re.sub(r"(?<!^)(?=[A-Z])", "_", word).lower()
|
9768fa74dff873991ce3e4284c7e7a5e2199e4f2
| 366,730
|
def category_table_build(osm_table_name, categorysql):
"""
Returns an SQL OSM category table builder
Args:
osm_table_name (string): a OSM PostGIS table name
Returns:
sql (string): a sql statement
"""
sql = ("INSERT INTO category_%s (osm_id, cat) "
"SELECT DISTINCT osm_id, "
"%s as cat "
"FROM %s as osm")
sql = sql % (osm_table_name, categorysql, osm_table_name)
return sql
|
147812bb68845e3fd0dca7e805f610d26428eac3
| 11,601
|
import re
def split_id_strings(rawStrings):
"""
split_id_strings takes a list of strings each of the form "A=B" and splits them
along the "=" delimiting character. Returns the list formed by each of the "B"
components of the raw input strings. For standardization purposes, the "B" string
has the following done to it:
1. replace uppercase characters with lowercase
2. remove special characters (i.e non-alphanumeric)
Args:
rawStrings (list of strings): list of strings, each of the form "A=B"
Returns:
out (list of strings): list of strings formed by the "B" portion of each of the raw input strings
"""
out = []
for string in rawStrings:
rightHandString = string.split("=")[1].lower() # Grab "B" part of string, make lowercase
out.append(re.sub("[^A-Za-z0-9,]+", "", rightHandString)) # Remove special chars
return out
|
41bc0a2676e4ce1a4fd295e8abc058dfdb367e06
| 317,609
|
def childText(node, addTail=False):
"""Return the textContent of an lxml node"""
if node.text:
rv = node.text
else:
rv = ""
for child in node:
child_text = childText(child, True)
if child_text is not None:
rv += child_text
if addTail and node.tail is not None:
rv += node.tail
return rv
|
3d2580a0ae743057b14b07cb0648c646575fc258
| 393,799
|
import re
def items(s, func=str):
"""Returns items processed by func in a comma or space delimited string."""
return [func(x) for x in re.split(r"\s*,\s*|\s+", s)]
|
71a1d046fabb5dfe2c60b7a5e734b13a71343a73
| 583,799
|
import hashlib
def support_hash_file(hash_type, data):
"""Hashes the passed content using sha256 or md5"""
if hash_type == "sha256":
return "sha256:%s" % hashlib.sha256(data).hexdigest()
if hash_type == "md5":
return "md5:%s" % hashlib.md5(data).hexdigest()
|
6a06a717e7688b810c15f24c5600544f006f31b1
| 135,851
|
def get_class_name_from_filename(file_name):
"""Gets the class name from a file.
Args:
file_name: The file name to get the class name from.
ie. "american_pit_bull_terrier_105.jpg"
Returns:
A string of the class name.
"""
return file_name.split('_')[0]
# match = re.match(r'([A-Za-z0-9_]+)(_[0-9]+\.jpg)', file_name, re.I)
# return match.groups()[0]
|
b2117fc1f19bd78c15bc698e0c0bc0d9fe1ed5db
| 232,801
|
def namedtuple_to_dict(named_tuple):
"""Converts named tuple to flat dictionary"""
init_dict_flat = dict(named_tuple._asdict())
return init_dict_flat
|
37d1a38ddab12abeacf10da4c81b956ebe4efbd3
| 580,429
|
def aqi(concentration):
"""Convert PM 2.5 concentration to AQI."""
if concentration <= 12.:
return round(4.1667 * concentration)
elif concentration <= 35.4:
return round(2.1030 * (concentration - 12.1) + 51.)
elif concentration <= 55.4:
return round(2.4623 * (concentration - 35.5) + 101.)
elif concentration <= 150.4:
return round(0.5163 * (concentration - 55.5) + 151.)
elif concentration <= 250.4:
return round(0.9910 * (concentration - 150.5) + 201.)
elif concentration <= 500.4:
return round(0.7963 * (concentration - 250.5) + 301.)
else:
return 999.
|
4cebada496df49a46d12f3c15b7b9d460926cebe
| 539,474
|
def inherits_from(obj, a_class):
"""returns true if obj is a subclass of a_class, otherwise false"""
return(issubclass(type(obj), a_class) and type(obj) != a_class)
|
2c5852ad42f6eb9a4614a29e0fae1c156e2a9295
| 340,454
|
def find_extremes(d):
""" Input: a dict key:value
Output: min and max values from all elements in the dict
"""
values = d.values()
return min(values), max(values)
|
c41421498ef872bec61c23b10cef7c0626aeb903
| 354,347
|
def vel_final_dist_helper(initial_velocity, acceleration, dist):
"""
Calculates the final velocity given the initial velocity, acceleration and the distance traveled. And is a helper
function to be called by vel_final_dist()
:param initial_velocity: Integer initial velocity
:param acceleration: Integer acceleration
:param dist: Integer distance traveled
:return: final velocity
"""
vel = (initial_velocity ** 2 + 2 * acceleration * dist) ** 0.5
return vel
|
74cc521b37a58ffc766bb3fc1a9a7ad80fcfe35e
| 668,256
|
def drain_semaphore_filename(component: str) -> str:
"""Obtain the canonical drain semaphore filename for the specified component name."""
return f".lta-{component}-drain"
|
006f87e2edde04ad233ecdab90085fb4383b8144
| 282,260
|
def _unsigned24(data, littleEndian=False):
"""return a 24-bit unsigned integer with selectable Endian"""
assert len(data) >= 3
if littleEndian:
b0 = data[2]
b1 = data[1]
b2 = data[0]
else:
b0 = data[0]
b1 = data[1]
b2 = data[2]
val = (b0 << 16) + (b1 << 8) + b2
return val
|
3e31f440a604f0f2f6f7e3b2aab220e427ba88c6
| 292,018
|
def twit_interp(range_start: int, range_end: int, value_start: float, value_end: float, idx: int) -> float:
"""
Return the interpolated value from value_start to value_end along range at idx as a float. Range is a two entry
tuple of float or integer. If range_start equals range_end then returns value_start. Both or either of range and
v1 to v2 can be negative distances. Does not check that idx is in the range. In which case it can return values
outside the v[0] to v[1] range. Range and idx do not have to be integers.
"""
rspan = range_end - range_start
if rspan == 0:
return value_start
return value_start + (value_end - value_start) * (idx - range_start) / rspan
|
e90d5c2763996cc93cfd094b2ff5654104c49b76
| 411,226
|
def is_conflict(a, b):
"""
Check if appointments a and b overlap.
Parameters
----------
a : Appointment
b : Appointment
Returns
-------
bool
True if they overlap, otherwise False.
"""
a, b = min(a, b), max(a, b)
return a.end >= b.start
|
584c84294c5da5c804f5aaf9e6cb8ae6a6708db2
| 510,061
|
def sext_11(value):
"""Sign-extended 11 bit number.
"""
if value & 0x400:
return 0xfffff800 | value
return value
|
12f262a450e20bf8e5f6f57acf76ebb436260b45
| 554,709
|
def get_highest_fun_elem_exposing_fun_inter(fun_inter, fun_elem):
"""Retruns True if it's highest fun_elem exposing fun_inter"""
check = False
if not fun_elem.parent:
check = True
elif not any(a == fun_inter.id for a in fun_elem.parent.exposed_interface_list):
check = True
return check
|
b6d1be4be7a5dcdcbae8845b8cdc5df8265ef288
| 401,714
|
def validateInput(parserArgs,key,default=""):
""" Simple validation for a parser argument. Return value only when non empty value is found for key. """
if not parserArgs[key] is None:
value = parserArgs[key][0].strip(' \t\n\r')
if value:
return value
return default
|
4692b63603131e9c40d6aa2daf6adf99ebe08a70
| 417,188
|
import json
def read_input(path):
"""Utility function that reads a file and returns its json content.
Args:
path (string): the path of the file.
Returns:
The content of the file, parsed as json.
"""
with open(path) as f:
data = json.load(f)
return data
return []
|
65d4bf6c145147991c8fad8fb44391dbc38ef476
| 520,441
|
from typing import Tuple
def utm_zone_from_epsg(epsg: int) -> Tuple[int, bool]:
"""Get `(utm_zone, is_northern_hemisphere)` from given EPSG code."""
if not (32601 <= epsg <= 32660 or 32701 <= epsg <= 32760):
raise ValueError("Can not convert EPSG {e} to UTM zone".format(e=epsg))
return epsg % 100, epsg < 32700
|
933eacad941649d60590efad1e4d7a2988eb5036
| 162,558
|
def header_name_of_wsgi_key(wsgi_key: str) -> str:
"""
>>> header_name_of_wsgi_key('HTTP_ACCEPT_LANGUAGE')
'Accept-Language'
>>> header_name_of_wsgi_key('HTTP_AUTHORIZATION')
'Authorization'
"""
if wsgi_key.startswith('HTTP_'):
words_for_short = {'IM', 'HTTP2', 'MD5', 'TE', 'DNT', 'ATT', 'UIDH', 'XSS'}
return '-'.join((s if s in words_for_short else s.capitalize()) for s in wsgi_key[5:].split('_'))
else:
return ''
|
0f024732e21776fce127c7b3b4acd7a51173512f
| 439,965
|
import json
def _GenerateDefaultValue(value):
"""Converts a JSON object into a base::Value entry. Returns a tuple, the first
entry being a list of declaration statements to define the variable, the
second entry being a way to access the variable.
If no definition is needed, the first return value will be an empty list. If
any error occurs, the second return value will be None (ie, no way to fetch
the value).
|value|: The deserialized value to convert to base::Value."""
if type(value) == bool or type(value) == int:
return [], 'base::Value(%s)' % json.dumps(value)
elif type(value) == str:
return [], 'base::Value("%s")' % value
elif type(value) == list:
setup = ['base::Value default_value(base::Value::Type::LIST);']
for entry in value:
decl, fetch = _GenerateDefaultValue(entry)
# Nested lists are not supported.
if decl:
return [], None
setup.append('default_value.Append(%s);' % fetch)
return setup, 'std::move(default_value)'
return [], None
|
1334b235c439123822afba06e598f25c3d235275
| 358,343
|
import math
def sublist_split(items, parts=None, count_per_sublist=None):
"""
This method split a list of items in several sub lists to make paralleling
process easier.
Args:
items (list): A list of items to split in parts..
parts (int): Number of part.
count_per_sublist (int): Number of item per list.
Returns:
(list): A list of lists.
"""
# XOR
if not (
(parts and not count_per_sublist) or (not parts and count_per_sublist)
):
raise ValueError(u"You need to specify parts or count_per_sublist \
parameters")
output = []
# Mod 1
if parts:
count_per_sublist = int(math.ceil(len(items) / parts))
# Mod 2
if count_per_sublist:
parts = int(math.ceil(len(items) / count_per_sublist))
for index in range(0, parts + 1):
sub_part = items[
index * count_per_sublist:index * count_per_sublist +
count_per_sublist]
if sub_part:
output.append(sub_part)
return output
|
aaf25b6b13195a94a1aa533502d9ba081f9bc0e0
| 269,702
|
def _cleanUpAllPullRequests(results, all_pull_requests):
"""
Helper function for _getAllPullRequests(). Clean up pull requests and strip out cursor info.
GIVEN:
results (dict) -- initial query response with metadata
all_pull_requests (list) -- nested list of pull_requests with comments and metadata
RETURN:
results (dict) -- cleaned up results
"""
results["data"]["repository"]["pullRequests"]["edges"] = all_pull_requests
results["data"]["repository"]["pullRequests"].pop("pageInfo")
return results
|
ef9cbe33142d910a5a7b6db17e6fed04631df989
| 285,360
|
def qs_checks_ssl(qs):
"""
By default, we check SSL certificate validity. If ?x-sslVerify=false is found, we don't.
We prepend x- to the option because it's non-standard in MongoDB connection strings.
"""
for check_ssl_value in qs.get("x-sslVerify", []):
if check_ssl_value == "false":
return False
return True
|
5e42a8b488150e057a277da26a51667e60dc2604
| 457,493
|
def rotate_clockwise(shape):
""" Rotates a matrix clockwise """
return [[shape[y][x] for y in range(len(shape))] for x in range(len(shape[0]) - 1, -1, -1)]
|
20d7048d8c4ddbe9e7efbc4443bd7ac481cafad1
| 395,841
|
def my_key(t):
""" Customize your sorting logic using this function. The parameter to
this function is a tuple. Comment/uncomment the return statements to test
different logics.
"""
# return t[1] # sort by artist names of the songs
return t[1], t[0] # sort by artist names, then by the songs
# return len(t[0]) # sort by length of the songs
# return t[0][-1] # sort by last character of the songs
|
ae416506de1820290d97d558ff899ba930d68705
| 48,937
|
def service_stats_deserialize(generated):
"""Deserialize a ServiceStats objects into a dict.
"""
return {
'geo_replication': {
'status': generated.geo_replication.status,
'last_sync_time': generated.geo_replication.last_sync_time,
}
}
|
95a1e7e61ad481e01a1566603c1e79e74ec05d02
| 542,818
|
def format_res(apriori_results):
"""Convert apriori results to what Mb Diff expects."""
explanations = [rule.lhs for rule in apriori_results]
dict_form_explanations = []
for explanation in explanations:
d = {}
for term in explanation:
col, value = term.split(":")
d[col] = value
dict_form_explanations.append(d)
return dict_form_explanations
|
deb191ffda087d1bbb87ff8c5952416afd66c18e
| 390,753
|
def L1(yhat, y):
"""
Arguments:
yhat -- vector of size m (predicted labels)
y -- vector of size m (true labels)
Returns:
loss -- the value of the L1 loss function defined above
"""
### START CODE HERE ### (≈ 1 line of code)
loss = sum(abs(yhat - y))
### END CODE HERE ###
return loss
|
28a931c8cf3fbe94e19bb641b15653a17a9ef55c
| 592,533
|
def square_sector(r_out, r_in=0, L=1, L0=0, phi=360, angle=0, a=10):
"""Returns the shape function and start coords of a wire
with a square cross section.
Parameters
----------
r_out : int
Outer radius in nm.
r_in : int
Inner radius in nm.
L : int
Length of wire from L0 in nm, -1 if infinite in x-direction.
L0 : int
Start position in x.
phi : ignored
Ignored variable, to have same arguments as cylinder_sector.
angle : ignored
Ignored variable, to have same arguments as cylinder_sector.
a : int
Discretization constant in nm.
Returns
-------
(shape_func, *(start_coords))
"""
if r_in > 0:
def sector(site):
try:
x, y, z = site.pos
except AttributeError:
x, y, z = site
shape_yz = -r_in <= y < r_in and r_in <= z < r_out
return (shape_yz and L0 <= x < L) if L > 0 else shape_yz
return sector, (L - a, 0, r_in + a)
else:
def sector(site):
try:
x, y, z = site.pos
except AttributeError:
x, y, z = site
shape_yz = -r_out <= y < r_out and -r_out <= z < r_out
return (shape_yz and L0 <= x < L) if L > 0 else shape_yz
return sector, (L - a, 0, 0)
|
4e2ec31d36d87dab52dcba28ced3a95a51d30d33
| 553,971
|
def _findBeginning(file_name, loc):
"""Scan given TRY dat file and find end of header (start of data)
Arguments:
file_name {str} -- Name of TRY data file
loc {str} -- Location of TRY data file
Returns:
(int, string) -- (lines before data starts,
column header)
"""
with open(loc + file_name, 'r') as dat_file:
last_line = dat_file.readline()
current_line = dat_file.readline()
dat_start = 2
while current_line[:3] != '***':
last_line = current_line # save column header
current_line = dat_file.readline()
dat_start += 1
if dat_start == 100:
break
# get header as list of string
last_line = last_line.split()
return (dat_start, last_line)
|
20be3ccefafc61bfa3f769b993e5f53c667b37ba
| 23,409
|
def open_file(filePath,mode="a"):
"""
Open a file at @filePath with mode @mode and
return the file descriptor.
"""
try:
f = open(filePath, mode)
return f
except IOError:
print ("Error: could not open",filePath)
return None
|
f1572d4e6ca14ad8e18ad01c91abcc48785b2059
| 462,327
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.