content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def epsi_vapor(Fr):
"""
Calculates the vapor content of bubble layer
Parameters
----------
Fr : float
The Frudo criterion, [dimensionless]
Returns
-------
epsi_vapor : float
The vapor content of bubble layer, [dimensionless]
References
----------
Дытнерский, страница 207, формула 5.47
"""
return Fr**0.5 / (1 + Fr**0.5)
|
940d8d65b0ef914df3eda66e414af2c6a85f99b3
| 53,242
|
def to_typed(s):
"""Return a typed object from string `s` if possible."""
if not isinstance(s, str):
raise ValueError("input object has to be string.")
if '.' not in s:
try:
return int(s)
except ValueError:
pass
try:
return float(s)
except ValueError:
return s
|
0e9219feffada78ea96d1a8f7d362e63b7cd44ab
| 53,251
|
def array_reshape_to_dims(arr, *dims, k=2):
"""
Change the array shape to k dimensions.
ARGS:
arr: Original array.
*dims: Optional argument with the leading
dimensions' shapes. If not provided,
the original arrays principal ones are used.
k: int, the total new dimensions of the array.
"""
if len(arr.shape) != k:
if len(list(dims)) > 0:
# # use the dimensions provided for the array.
if len(dims) == k:
return arr.reshape(tuple(dims))
return arr.reshape(tuple(dims) + (-1,))
# # keep the principal k-1 dims unchanged.
sh = arr.shape[:k-1]
return arr.reshape(sh + (-1,))
return arr
|
49a9b123c0bfa50ed09c6591096af616893d0f02
| 53,263
|
from dateutil import tz
from datetime import datetime
def utctolocaltime(strtime, localzone='Australia/Melbourne'):
""" covert input string time to local time
eg.
- utctolocaltime('2021-10-06T17:33:00Z', localzone='Australia/Melbourne')
- output: 2021-10-07 04:33:00+11:00
"""
from_zone = tz.gettz('UTC')
to_zone = tz.gettz(localzone)
outtime = datetime.strptime(strtime, '%Y-%m-%dT%H:%M:%SZ')
outtime = outtime.replace(tzinfo=from_zone)
outtime = outtime.astimezone(to_zone)
#print(type(outtime))
#return outtime.strftime("%c")
return outtime
|
7586db423be7f0403823bf698a6bdf2504c83e61
| 53,265
|
from pathlib import Path
import json
def conj(frm, alph):
"""
Retrieve appropriate conjugation dictionaries
Parameters
----------
frm : str
A string used to determine the formality
alph : str
A string used to determine the type of alphabet
Returns
-------
list of dicts
a list of dictionaries containing appropriate conjugations
"""
dir_path = Path(__file__).parents[0]
path = f'{dir_path}/data/conjugations.json'
with open(path, 'r', encoding='utf-8') as file:
conjugations = json.load(file)['subjective']
# retrieve conjugations
past = conjugations[frm][alph]['past']
present = conjugations[frm][alph]['present']
perfect = conjugations[frm][alph]['perfect']
imperative = conjugations[frm][alph]['imperative']
return [past, present, perfect, imperative]
|
f0b49af4c09f13825cdf129d68d782229dc0809a
| 53,268
|
def date_in(d, dates):
"""
compare if d in dates. dates should be a tuple or a list, for example:
date_in(d, [d1, d2])
and this function will execute:
d1 <= d <= d2
and if d is None, then return False
"""
if not d:
return False
return dates[0] <= d <= dates[1]
|
cda34739762953fbddd903da9ae7ca37a5d7c92d
| 53,277
|
import re
def extract_version(doc):
"""
Extracts the version from the docstring of an Example.
The Version is specified in the following format:
LabOne Version >= <major>.<minor>[.<minor>]
Returns:
(major, minor, build)
Raises:
Exception if the Version has the wrong format.
Exception if there is no or more than one LabOne Version specified.
"""
results = re.findall(r"LabOne Version >= ([0-9\.]*)", doc)
if len(results) == 0:
raise Exception("No LabOne Version is defined in the docstring")
if len(results) > 1:
raise Exception(
"more than one LabOne version is defined but only one is allowed"
)
version = results[0]
major_minor_format = bool(re.match(r"^\d\d\.\d\d$", version))
major_minor_build_format = bool(re.match(r"^\d\d\.\d\d.\d+$", version))
if major_minor_format:
min_major, min_minor = map(int, version.split("."))
min_build = 0
elif major_minor_build_format:
min_major, min_minor, min_build = map(int, version.split("."))
else:
raise Exception(
f"Wrong ziPython version format: {version}. Supported format: MAJOR.MINOR or \
MAJOR.MINOR.BUILD"
)
return (min_major, min_minor, min_build)
|
a1e4319db4af680daf54c0f0edd801f08cb8a2b2
| 53,283
|
def are_equal_graphs(G1, G2):
"""
Check graph equality (equal node names, and equal edges between them).
:param G1: first graph
:param G2: second graph
:return: are they equal
"""
if set(G1.nodes()) != set(G2.nodes()):
return False
return all(map(lambda x: G1.has_edge(*x), G2.edges())) and all(map(lambda x: G2.has_edge(*x), G1.edges()))
|
382350eeccdaae0a0c060f7dc0c94020a6c82827
| 53,286
|
def _sanitizeName(name):
"""
Sanitize name. Remove . - ' and lowercases.
"""
name = name.lower() # lower.
name = name.replace('.','') # remove periods.
name = name.replace('-','') # remove dashes.
name = name.replace("'",'') # remove apostrophies.
# return it.
return name
|
66806fd4bb9a06a2159107852252c19591f2cae9
| 53,287
|
def mod_5(x):
"""Return the remainder of x after dividing by 5"""
return x % 5
|
d4aedd1349379ceca5b5af7f1b54bf55b8389aaf
| 53,288
|
import torch
def reduce_ctxt(
ctxt: torch.Tensor, mask: torch.Tensor, reduction_type: str
) -> torch.Tensor:
"""
Reduce ctxt tensor to appropriate dimension for bi-encoder scoring.
Functionality copied from TransformerEncoder.reduce_output.
:param ctxt:
all transformer outputs.
:param mask:
context mask
:param reduction_type:
how to reduce the context
:return reduced:
return reduced context tensor
"""
if reduction_type == 'first':
return ctxt[:, 0, :]
elif reduction_type == 'max':
return ctxt.max(dim=1)[0]
elif reduction_type == 'mean':
divisor = mask.float().sum(dim=1).unsqueeze(-1).clamp(min=1).type_as(ctxt)
output = ctxt.sum(dim=1) / divisor
return output
else:
raise ValueError("Can't handle --reduction-type {}".format(reduction_type))
|
f229531d8a3ed8dd05329d781c823c41ca25386e
| 53,296
|
def result_color( result ):
"""
Translate the result string into a color character. These colors must
be known to the preample "style" list.
"""
if result == 'pass': return 'g'
if result == 'ran': return 'g'
if result == 'start': return 'c'
if result == 'notdone': return 'c'
if result == 'diff': return 'y'
if result == 'fail': return 'r'
if result == 'notrun': return 'h'
if result == 'timeout': return 'm'
if result == 'MIA': return 'y'
return 'w'
|
3b10f528833134fc0753aaf58e20bc9719904f64
| 53,297
|
from typing import Union
from typing import List
from typing import Any
from typing import Dict
def get_sample_from_batched_tensors(tensors: Union[List[Any], Dict[str, Any]],
idx: int) -> Dict[str, Any]:
"""Collect sample at index idx of a given dictionary or list.
Args:
tensors: Rowified or columnarized format of a batch of inputs.
idx: Index to fetch the batch from
Returns:
Same dictionary where idx is chosen from values.
"""
if isinstance(tensors, list):
return tensors[idx] if idx < len(tensors) else {}
return {key: val[idx] for key, val in tensors.items()}
|
ecab6e7e51f0d956f19de1697263e58e4259f2e4
| 53,304
|
def gassmann(K0, Kin, Kfin, Kfout, phi):
"""
Use Gassmann's equation to perform fluid substitution. Use the bulk modulus
of a rock saturated with one fluid (or dry frame, Kfin=0) to preduct the
bulk modulus of a rock second with a second fluid.
:param K0: Frame mineral modulus (Gpa)
:param Kin: Input rock modulus (can be fluid saturated or dry)
:param Kfin: Bulk modulus of the pore-filling fluid of the inital rock
(0 if input is the dry-rock modulus)
:param Kfout: Bulk modulus of the pore-filling fluid of the output
(0 if output is dry-rock modulus)
:param phi: Porosity of the rock
"""
A = Kfout / (phi*(K0 - Kfout))
B = Kin / (K0 - Kin)
C = Kfin / (phi*(K0 - Kfin))
D = A + B - C
Kout = K0*D / (1 + D)
return(Kout)
|
5421459125533a73d13de1d2063b8332dc8dac31
| 53,307
|
from typing import Iterable
def flatten(nestedlist: Iterable) -> list:
"""Flatten a nested list."""
newlist = []
for item in nestedlist:
if isinstance(item, list):
newlist.extend(flatten(item))
else:
newlist.append(item)
return newlist
|
c6c3f3fdf5ecf019c8add6e0037fe7020041657a
| 53,308
|
def _line_to_list(line):
"""Converts a pla line to a list of ints.
Raises a ValueError if it encounters an unexpected character."""
l = []
for c in line:
if c == '0':
l.append(0)
elif c == '1':
l.append(1)
elif c == '-' or c == '~':
l.append(2)
else:
raise ValueError
return l
|
c7bed118f3134ba6299a95f43ece4986d83b1f16
| 53,319
|
def user_upload_directory(instance, filename):
"""Return the upload directory for a given File (should have an owner)."""
return 'user_{0}/{1}'.format(instance.owner.id, filename)
|
3357bd3fa6e55af0519c77759c9c661b72a08247
| 53,320
|
def sos_scalar_criterion(params):
"""Calculate the sum of squares."""
return (params["value"].to_numpy() ** 2).sum()
|
31ab98ce12459ab8e6bc4d20e8e70e4c97dbeeca
| 53,321
|
def substrings_present(data, snippets, exclude=False):
"""Return True if all the strings in `snippets` are found in `data`.
If `exclude` is True, instead return True if none of the strings
in `snippets` are found.
"""
results = (snip.encode("utf-8") in data for snip in snippets)
if exclude:
results = (not val for val in results)
return all(results)
|
e9893a9bc981e754d747999b2ed8eb95df64e4eb
| 53,329
|
def parse_and_append(cls, string, index, match):
"""Utility function which wraps a call to parse_from.
Call parse_from on the specified class, append the new instance to
the match list, and return the original return value of parse_from.
This can be used in complex parse_from implementations to avoid
writing repetitive list append statements for each parsed child.
"""
instance, index = cls.parse_from(string, index)
match.append(instance)
return instance, index
|
930ee53f024b9c98656a5f4bf968c110b7262354
| 53,334
|
def sort_by_x(df, x):
"""Sort a dataframe by a column """
return df.sort_values(by=x)
|
2b6ba21774394978e4fa52eea3efd855c4e54dc1
| 53,339
|
def team_corners(df, date, team):
"""
Returns a dict with average number of corners a team scored by date.
Corners are calculated separately as it is considered a team statistic.
"""
team_df = df[(df.team == team) & (df.kickoff < date)]
corners = team_df["COR"].sum()
matches = team_df.kickoff.unique().shape[0]
try:
corners_avg = round(corners/matches ,4)
except ZeroDivisionError:
corners_avg = round(corners, 4)
corners_dict = {"COR" : corners_avg}
return corners_dict
|
cc5096e379da6ac06d7febece0e69f34f67826f9
| 53,342
|
def get_nearest_coords(cube, latitude, longitude, iname, jname):
"""
Uses the iris cube method nearest_neighbour_index to find the nearest grid
points to a given latitude-longitude position.
Args:
cube (iris.cube.Cube):
Cube containing a representative grid.
latitude (float):
Latitude coordinates of spot data site of interest.
longitude (float):
Longitude coordinates of spot data site of interest.
iname (str):
String giving the name of the y coordinates to be searched.
jname (str):
String giving the names of the x coordinates to be searched.
Returns:
Tuple[int, int]: Grid coordinates of the nearest grid point to the
spot data site.
"""
i_latitude = cube.coord(iname).nearest_neighbour_index(latitude)
j_longitude = cube.coord(jname).nearest_neighbour_index(longitude)
return i_latitude, j_longitude
|
711cb2a7bd1dd85fc69992df87c972b5557e79ce
| 53,350
|
def sentence_error(source, target):
"""
Evaluate whether the target is identical to the source.
Args:
source (str): Source string.
target (str): Target string.
Returns:
int: 0 if the target is equal to the source, 1 otherweise.
"""
return 0 if target == source else 1
|
02cfc350763b176d0296007ef44321674ef65d27
| 53,351
|
def solution() -> int:
"""
For this solution we have the generalized equation
X (X-1) = 1
____ * _____ ___
N N - 1 2
we can turn it into
2x^2 - 2x - N^2 + N = 0
That's a quadratic diophantine equation
we can use this site to generate a recursive solution for our equation
https://www.alpertron.com.ar/QUAD.HTM
the input should be
2
0
-1
-2
1
0
it return the functions to retrieve X and N
Xn+1 = 3 X + 2 N - 2
Nn+1 = 4 X + 3 N - 3
so lets code that.
>>> solution()
756872327473
"""
"""
the number of the first arrange of blue disks that taken 2 in a row return 50%
of prob to happen
"""
blue_disks = 85
# the number of the first total disks to this probability happen
total_disks = 120
"""
the maximum length of total_disks to find the blue and Total
that has 0.5 probability to be picked in a row
"""
total_length = 1 * (10 ** 12)
while total_disks <= total_length:
"""
x = blue_disks
n = total_disks
Xn+1 = 3 X + 2 N - 2
Nn+1 = 4 X + 3 N - 3
apply this functions to get the correct values
"""
"""
We have to set this variables separated by commas so in total_disks attribution
the value in blue_disks
will be the value that was set before the current while iteration
"""
blue_disks, total_disks = (3 * blue_disks) + (2 * total_disks) - 2, (
4 * blue_disks
) + (3 * total_disks) - 3
return blue_disks
|
b72d2991864de5a220babe0c39b4b5c7f42c5378
| 53,352
|
def solSize(self, puzzle):
"""
This method returns the number of words used in the solution
"""
return puzzle.solSize
|
d98c2c8f6a855cdddd7cffa682d0ae048cf5213e
| 53,356
|
def CheckChangeWasUploaded(input_api, output_api):
"""Checks that the issue was uploaded before committing."""
if input_api.is_committing and not input_api.change.issue:
return [output_api.PresubmitError(
'Issue wasn\'t uploaded. Please upload first.')]
return []
|
39aef498aba2ebefb8da625a0bbff1a16b2c07cc
| 53,357
|
def width(image):
"""The width of the image"""
return image.shape[1]
|
46cce4ba43d8b56dc627c192f5ec83d1767fafad
| 53,364
|
import json
def load_mock(file_path):
"""Return the content of the mock file trying to validate JSON
Raises:
json.JSONDecodeError
IOError
"""
with open(file_path, 'rb') as f:
try:
json_decoded = json.load(f)
return json_decoded
except json.JSONDecodeError as e:
raise json.JSONDecodeError(e.msg, e.doc, e.pos)
|
febfa9f838270e4fbadcffc279b6fa30e858c7db
| 53,365
|
from unittest.mock import Mock
def _mock_soco_new(ip_address):
"""Helper function that replaces the SoCo constructor. Returns Mock objects for
Sonos devices at two specific IP addresses."""
if ip_address in ["192.168.0.1", "192.168.0.2"]:
return Mock(
visible_zones=["192.168.0.1"], all_zones=["192.168.0.1", "192.168.0.2"]
)
else:
raise ValueError
|
4604a9e6a20b248bc574478a5372b525e0ccc8b0
| 53,367
|
import json
def json_to_dict(string):
"""Evaluate the string as json
If string is empty, give an empty dict
>>> assert json_to_dict('') == {}
>>> assert json_to_dict('{"1": "2"}') == {"1": "2"}
"""
return json.loads(string or "{}")
|
65baee91e3c27a94b9a9eec533ade279a20468c2
| 53,373
|
def rgb2hex(rgb):
"""
Convert RGB(A) tuple to hex.
"""
if len(rgb) > 3:
rgb = rgb[:-1]
return "#{0:02x}{1:02x}{2:02x}".format(*(int(v*255) for v in rgb))
|
66810dbd90ef7f6b85c3c7a80ebff561aeaacc35
| 53,379
|
def division(first_term, second_term):
"""Division"""
return first_term / second_term
|
9e5fb56673ecdce512f5844d3f21e744f9041756
| 53,383
|
def calculate_issue_importance(num_trackers, user, light_user):
"""Calculates issue's importance, based on the changelog's popularity
and if it was reported by human or a robot.
"""
importance = 1 + num_trackers
if user:
importance *= 10
return importance
|
ea638d18f6df4b9138e41a42c6b2df01893dcd87
| 53,384
|
def compose(fun_f, fun_g):
"""
Composition of two functions.
:param fun_f: callable
:param fun_g: callable
:return: callable
"""
return lambda x: fun_f(fun_g(x))
|
8a40332d610fd4ae8bbe89b2843c95d7493822ff
| 53,388
|
def is_enz (node):
"""Tells if node is an enzyme (EC) or not"""
split = node.split(".")
if len(split) == 4 :#and np.all(np.array([sp.isdigit() for sp in split]) == True) :
return True
else:
return False
|
2ae0bac2fd53ef0df82e8415bb1e5c37cd1c9d12
| 53,392
|
def is_power_of_two(num):
"""Check whether a number is power of 2 or not."""
return (num & (num - 1) == 0) and num != 0
|
66b6264e4df7f74326cc8927f1cd1027fc8a8196
| 53,398
|
def _image_name(image):
"""Convert long image name to short one.
e.g 'dockerregistry-v2.my.domain.com.foobar/isilon-data-insights:latest@sha256:HASH' -> dockerreg.../isilon-data-insights:latest
"""
try:
# remove @sha256:....
image = image.split("@")[0]
except IndexError:
pass
try:
# reduce username or registry fqdn
reg, img = image.split("/")
image = reg[:10] + (reg[10:] and '...') + "/" + img
except ValueError:
pass
return image
|
28472f13659f3cfb011230d9ddc24480c48b03de
| 53,399
|
def undecorated_component(component):
"""Returns the given component string without leading/trailing whitespace
and quotation marks."""
return component.strip(" \t\r\n\"'")
|
31db62c005604bf1e46668d6a925fb24834d5b43
| 53,401
|
def incidental_reference_pressure(p_d, γ_inc):
"""Calculate DNVGL-ST-F101 «incidental reference pressure».
:param p_d: design pressure :math:`(p_d)`
:type p_d: float
:param γ_inc: incidental to design pressure ratio :math:`(\gamma_{inc})`
:type γ_inc: float
:returns: p_inc incidental reference pressure :math:`(p_{inc})`
:rtype: float
Reference:
DNVGL-ST-F101 (2017-12)
eq:4.3 sec:4.2.2.2 p:67 :math:`(p_{inc})`
.. doctest::
>>> incid_ref_press(100e5, 1.1)
11000000.0
"""
p_inc = p_d * γ_inc
return p_inc
|
dd9e5c48324d08ce8a581d102114f99d12a3cf29
| 53,403
|
def rh_dwyer(raw_value):
"""Returns Dwyer sensor relative humidity (RH) from a raw register value.
Range is 0-100%.
"""
# Humidity linear calibration = 100 / (2^15 - 1)
RH0 = 0.0
RHs = 100.0 / (2 ** 15 - 1)
return (RH0 + RHs * float(raw_value), "percent")
|
4d588b6d5a8b54a899b019e1cd04f992d5da34dc
| 53,404
|
from typing import Sequence
def convert_bitstring_to_int(bitstring: Sequence[int]) -> int:
"""Convert a bitstring to an integer.
Args:
bitstring (list): A list of integers.
Returns:
int: The value of the bitstring, where the first bit in the least
significant (little endian).
"""
return int("".join(str(bit) for bit in bitstring[::-1]), 2)
|
a94fe0db9e89678e276ae7a426124cbeb5c495bf
| 53,408
|
def hkl_type(h, k, l):
"""
Returns the general hkl type (e.g. 'hh0') for a given set of miller
indices. Valid for all Bravais lattices.
"""
zero_count = sum(1 if i == 0 else 0 for i in [h, k, l])
diff_count = len(set([h, k, l]))
if diff_count == 3:
if zero_count == 0:
return 'hkl'
else:
return 'hk0' if not l else 'h0l' if not k else '0kl'
elif diff_count == 2:
if zero_count == 0:
return 'hhl'
elif zero_count == 1:
return 'hh0' if h == k else '0kk'
else:
return 'h00' if h else '0k0' if k else '00l'
else:
return 'hhh'
|
a2ddfcc6b133182a39ca48fe00c4e5e1cad43c5e
| 53,409
|
def requests_length(requests):
"""Total number of pageviews across all sessions."""
return len([r for r in requests if 'is_pageview' in r and r['is_pageview'] == 'true'])
|
8980ebdf8fe71aa5ffcf3bcc54920d1b0c83cf65
| 53,410
|
def deduped_list(arr):
"""Remove duplicate values from a list."""
return list(set(arr))
|
89b7b87a86471db6a76bc6342b7c6b554c434686
| 53,412
|
def remove_single_chars(text: str) -> str:
"""
Returns the given text with the words that are simple chars (have length 1) removed.
:param text: The text to remove single chars. (String)
:return: The given text with all the single chars removed. (String)
"""
return ' '.join(word for word in text.split()
if len(word) > 1)
|
72299add5a04ae24619d1d20cb3689fe7c7e5742
| 53,415
|
def compute_range_bits(bits_per_sample):
"""
Compute list of values for unpacking samples.
[Used by decode_samples_red()]
Notes
-----
|
| **Example:**
|
| >>bits_per_sample=2
| >>list(compute_range_bits(bits_per_sample))
| [6, 4, 2, 0]
"""
return(reversed(range(0,8,bits_per_sample)))
|
46b45d62ea21c4f658c265d0a6b6a73f4bdf4115
| 53,421
|
from typing import Dict
from typing import Any
def geometries_match(geometry_a: Dict[str, Any],
geometry_b: Dict[str, Any],
likeness_tol: float = 0.001
) -> bool:
"""Determine whether two site geometries match.
Geometry data should be formatted the same as produced by
:meth:`robocrys.site.SiteAnalyzer.get_site_geometry`.
Args:
geometry_a: The first set of geometry data.
geometry_b: The second set of geometry data.
likeness_tol: The tolerance used to determine if two likeness parameters
are the same.
Returns:
Whether the two geometries are the same.
"""
return (geometry_a['type'] == geometry_b['type'] and
abs(geometry_a['likeness'] - geometry_b['likeness']) < likeness_tol)
|
9cc882e19a6530e02dfc52829e3bf9d2474b498c
| 53,423
|
def plot_poly(ploty, poly):
"""
Taken from the materials and modified.
Returns a set of plotx points calulated from the polynomial and input ploty data.
"""
fit_success = False
try:
plotx = poly[0]*ploty**2 + poly[1]*ploty + poly[2]
fit_success = True
except TypeError:
# Avoids an error if poly is still none or incorrect
print('The function failed to fit a line!')
plotx = 1*ploty**2 + 1*ploty
return plotx, fit_success
|
2d2314454d1ef08d728917849da4328587f0c6ca
| 53,426
|
def cubic_objective(x, a, b, c, d):
"""Cubic objective function."""
return a*x**3 + b*x**2 + c*x + d
|
ddd43f7cac4511649b46a68777f34d3c18679f94
| 53,428
|
def get_partition_lengths(partitions):
"""
Takes an array of partitions and returns and array with their lenghts.
Parameters
----------
partitions: array
Array containing the partitions.
Returns
-------
array
Array containing partition lengths.
"""
return [len(p) for p in partitions]
|
94f5c2097bf779636c68d3fa73550ffe26d61882
| 53,432
|
def in_close(x,l, epsilon=0.0001):
"""
returns true if x lies within epsilon of some element in the list l, otherwise false
"""
for i in l:
if abs(x-i)<epsilon:
return True
return False
|
23447d926725a4f7b25d960d82a4fe8bb72c5a4c
| 53,434
|
def luminance(rgb):
"""Calculates the brightness of an rgb 255 color. See https://en.wikipedia.org/wiki/Relative_luminance
Args:
rgb(:obj:`tuple`): 255 (red, green, blue) tuple
Returns:
luminance(:obj:`scalar`): relative luminance
Example:
.. code-block:: python
>>> rgb = (255,127,0)
>>> luminance(rgb)
0.5687976470588235
>>> luminance((0,50,255))
0.21243529411764706
"""
luminance = (0.2126*rgb[0] + 0.7152*rgb[1] + 0.0722*rgb[2])/255
return luminance
|
1921fabda06f277ba072720936e88aa2d5c6bc63
| 53,436
|
def only_keep_regions(df):
""" Only keeps geographical areas at the smalles aggregation levels
Args:
df (pd.DataFrame): pandas dataframe with digits smaller than 4 in the column "geo"
Returns:
df (pd.DataFrame): pandas dataframe with digits equal to 4 in the columns "geo"
"""
df = df[df['geo'].apply(lambda x: len(str(x)) == 4)]
return df
|
fd62d655df12a1823ca0e0622b93f581e5c55f8a
| 53,438
|
from datetime import datetime
def _day_or_night(timestamp):
"""
判断给定的时间戳是白天还是午夜。
日间:(6:00-22:00);午夜:(00:00-6:00, 22:00-24:00)
Parameters
-------
timestamp: int, 时间戳
Return
-------
'day': 白天
'night': 午夜
"""
h = datetime.fromtimestamp(timestamp).hour
return 'day' if h >= 6 and h < 22 else 'night'
|
7cd39a8694681394acc7b9f622f00e8c4731b6d0
| 53,447
|
import imghdr
def is_image(path):
"""Check if the path is an image file"""
path = str(path)
try:
return imghdr.what(path) is not None
except IsADirectoryError:
return False
|
12f34353974972b62951cd3c84649ffbcb8fc9b5
| 53,454
|
def convert_mug_to_cup(value):
"""Helper function to convert a string from mug to cup"""
if isinstance(value, str) and value.lower() == 'mug':
return 'cup'
else:
return value
|
9bc2831702e6a8223c3597f581d58f1c4adaf5cf
| 53,455
|
def as_str(raw):
"""Return a string for the given raw value."""
if not raw:
return ''
return str(raw)
|
b40756bb3fb0f0208e48391800d65c0417ab7cab
| 53,456
|
def get_obj_property_value(obj, field):
"""
Given an object and a field which may have nested properties
return value of nested property
>>> class demo: pass
>>> demo.ls = []
>>> get_obj_property_value(demo, 'ls.__class__')
<<< list
"""
_attr_list = field.split('.')
if len(_attr_list) > 1:
tmp_obj = getattr(obj, _attr_list[0])
return get_obj_property_value(tmp_obj, '.'.join(_attr_list[1:]))
return getattr(obj, field)
|
f04a600b69affe6a121f0c8814c3ee09d2c44e6f
| 53,457
|
def _normalize_paths(input_json):
"""Ensure that all paths are lower case (to prevent potential mis-matches,
since Windows is case-insensitive.
"""
normalized_json = input_json.copy()
for i in range(len(normalized_json['sources'])):
normalized_json['sources'][i] = normalized_json['sources'][i].lower()
return normalized_json
|
1c6bbe075e1409630a5b3778e6c2e86f4fe862ad
| 53,461
|
def getRecentDetections(dbManager, timestamp):
"""Return all recent (last 15 minutes) detections
Args:
dbManager (DbManager):
timestamp (int): time.time() value when image was taken
Returns:
List of alerts
"""
sqlTemplate = """SELECT * FROM detections where timestamp > %s order by timestamp desc"""
sqlStr = sqlTemplate % (timestamp - 15*60)
dbResult = dbManager.query(sqlStr)
return dbResult
|
85a28e7c79e56b37b9fb04d04a856376d8799042
| 53,466
|
def make_table(data, num_cols):
"""
Create two dimensional list
Inputs:
data - 1-dimensional sequence
num_cols - number of columns in the table to be created
"""
table = []
while data:
table.append([])
cols = num_cols if len(data) >= num_cols else len(data)
for col in range(cols):
table[-1].append(': '.join(data.pop(0)))
return table
|
76572682234151a742771f30b7bef86ef13e276e
| 53,468
|
def _extract_cdr3_nucseq_from_adaptive(adapt):
"""
Extracts the nucleotide sequence coding the CDR3, defined as starting at
canonical V-gene Cystine Residue
Parameters
----------
adapt : collections.namedtuple
instance of named tuple of fields expected in an adaptive file
Returns
-------
cdr3_b_nucseq : str
Notes
-----
ADAPTIVE's output does not provide explict indices for the cdr3 nucleotide
sequence. Thus, we use the length of the amino acid sequence
and the start index of cys_residue (see page 18 of their
manual for 'v_index' defining the start of Cys residue, 'jIndex' is
start of J gene.
"""
length_cdr3_nucseq = len(adapt.aminoAcid)*3
cys_residue = int(adapt.vIndex)
start_J_gene = int(adapt.jIndex)
length_partial_nuc = len(adapt.nucleotide[cys_residue:start_J_gene])
remaining_length_nuc = length_cdr3_nucseq - length_partial_nuc
end_index = start_J_gene + remaining_length_nuc
cdr3_nucseq = adapt.nucleotide[cys_residue:end_index]
return cdr3_nucseq
|
c42d93dd7e23d59c27629ff686cde2d75091b6da
| 53,474
|
import socket
def is_port_available(port):
"""
Check if the provided port is free in the host machine.
Params
------
port : int
Port number of the host.
Returns
--------
boolean
Return True is the port is free otherwise False.
"""
port = int(port)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
return s.connect_ex(("localhost", port)) != 0
|
23f57b8d942e3ad4e84e6c7d667af9b61348311c
| 53,478
|
from typing import Any
def _parse_value(value_raw: Any) -> str:
"""Parse a value to string
Notes
-----
Here are the conditions:
1. If the value is constrained by double quotes, it is a string;
2. If it is a `null`, return an empty string;
3. Otherwise it is a number. Return it as given.
"""
value = value_raw.strip()
# import ipdb; ipdb.set_trace()
if value[0] == '"' and value[-1] == '"':
return value[1:-1]
if value == "null":
return ""
return value
|
a3eaa083038d8b40fe4fa590deb4f18a3ad5492a
| 53,479
|
import math
def min_interacoes(a0,b0,E):
"""
min_interacoes funciona de acordo com elementos previamente
dados para retornar o valor mínimo de iterações necessárias
para a aproximação da raiz de acordo com a fórmula:
n >= log(a - b) - log (E) / log(2)
Argumentos:
a0: Início do intervalo
b0: Final do intervalo
E: Erro considerado
Retorna:
Um n(número mínimo) de iterações necessárias.
"""
conta = (math.log10(b0-a0)-math.log10(E))/math.log10(2)
res = int(math.ceil(conta))
return res
|
f946271c3b03c11968c3cf8d71559838036e534c
| 53,480
|
def __convert_string_to_numeric_array(string):
""" Converts a numeric string to an array of integers. """
return [int(char) for char in string]
|
e5e10b6fedcf9f29a1fc8801d14ec6acada908e2
| 53,482
|
def binary_search(c,b):
"""
Returns index of first occurrence of c in b; -1 if not
found.
Parameter b: The sequence to search
Precondition: b is a SORTED sequence
Parameter c: The value to search for
Precondition: NONE (c can be any value)
"""
# Quick way to check if a sequence; CANNOT easily check sorted
assert len(b) >= 0, repr(b)+' is a not a sequence (list, string, or tuple)'
# Store in i the value BEFORE beginning of range to search
i = 0
# Store in j the end of the range to search (element after)
j = len(b)
# The middle position of the range
mid = (i+j)//2
while j > i:
if b[mid] < c:
i = mid+1
else: # b[mid] >= c
j = mid
# Compute a new middle.
mid = (i+j)//2
if i < len(b) and b[i] == c:
return i
# NOT FOUND
return -1
|
05bafb01621b54ca74720f3aaefb6687d83eb953
| 53,483
|
from datetime import datetime
def text_to_date(date_string):
""" Given a string that represents a date in YMD format, return an
equivalent Date object. """
return datetime.strptime(str(date_string), '%Y%m%d').date()
|
e1a33bafcdad6456f7f8de0af70667dc5eeff598
| 53,484
|
def pipeline_none(model):
"""No transforms pipeline"""
return model
|
acadfcaa775ecd161911a6fab07d245bd28cf581
| 53,486
|
def get(lst, index, default=None):
"""
Retourne l'élément de `lst` situé à `index`.
Si aucun élément ne se trouve à `index`,
retourne la valeur par défaut.
"""
try:
return lst[index]
except IndexError:
return default
|
273345d2acf1c4aecbf584d600f67ff095a710ea
| 53,490
|
def generate_sql_II_statement(ID):
"""Generate an sql statement for a given id."""
return "select tmp_path from dataset where person_stable_id = '{}';".format(ID)
|
7a998967b926c2af6daebb77ca516a74cc39ddf7
| 53,498
|
def remove_postseason(clean_data):
"""Remove post season games."""
clean_data = clean_data[clean_data['game_postseason'] == False]
return clean_data
|
40934932ac3bce33c31531911a8612bbfac50bdd
| 53,502
|
def encode_revbytes(array: bytes) -> bytes:
""" Return bytes in reverse order.
"""
return bytes(reversed(array))
|
4f395994ec0de0276aaae8671dfc51ca081393d4
| 53,503
|
def ascendingRange(rangeStart, rangeEnd):
"""Get start and end into ascending order by switching them if necessary.
@param rangeStart: the start of the range
@type rangeStart: C{int}
@param rangeEnd: the end of the range
@type rangeEnd: C{int}
@return: range in ascending order
@rtype: C{tuple} of length 2
"""
if rangeStart > rangeEnd:
return rangeEnd, rangeStart
else:
return rangeStart, rangeEnd
|
afde68d3a9011418959c148a3c15ab36fd8279af
| 53,505
|
def get_node_def_from_graph(node_name, graph_def):
"""Returns the `NodeDef` instance for given node name in the graph def.
This method explores only the NodeDefs in `graph_def.node`.
Args:
node_name: Name of the NodeDef to search for.
graph_def: An instance of `GraphDef` proto.
Returns:
the `NodeDef` instance whose name field matches the given node_name or None.
"""
for node_def in graph_def.node:
if node_def.name == node_name:
return node_def
return None
|
0b66ee181af104796cc0e35ff8f2b83d6594a216
| 53,510
|
def escape_strings(escapist: str) -> str:
"""Escapes strings as required for ultisnips snippets
Escapes instances of \\, `, {, }, $
Parameters
----------
escapist: str
A string to apply string replacement on
Returns
-------
str
The input string with all defined replacements applied
"""
return (
escapist.replace("\\", "\\\\")
.replace("`", "\`")
.replace("{", "\{")
.replace("}", "\}")
.replace("$", "\$")
.replace("\"", "'")
)
|
48941d6bf43033aa3d8f29a1be91224dcacff3f4
| 53,511
|
import requests
def fetch_github_releases(author: str, reponame: str) -> dict:
"""
Fetch github releases
:param author
:param reponame
:returns dict
:raises requests.RequestException
"""
response = requests.get("https://api.github.com/repos/%s/%s/releases" % (author, reponame))
return response.json()
|
faa5bba6147464ae1311829dd5dd417f5a34f59a
| 53,512
|
def get_event(app_type):
"""Helper function to get a dict that is reflective of a valid input event for an App"""
return {
'app_type': app_type,
'schedule_expression': 'rate(10 minutes)',
'destination_function_name':
'unit_test_prefix_unit_test_cluster_streamalert_classifier'
}
|
778f397e5b938dad007c2fb308d6744d0bb781b3
| 53,514
|
def parse_flags(line):
"""
Parse a line from a config file containing compile flags.
Parameters
----------
line : str
A single line containing one or more compile flags.
Returns
-------
d : dict
Dictionary of parsed flags, split into relevant categories.
These categories are the keys of `d`:
* 'include_dirs'
* 'library_dirs'
* 'libraries'
* 'macros'
* 'ignored'
"""
d = {'include_dirs': [], 'library_dirs': [], 'libraries': [],
'macros': [], 'ignored': []}
flags = (' ' + line).split(' -')
for flag in flags:
flag = '-' + flag
if len(flag) > 0:
if flag.startswith('-I'):
d['include_dirs'].append(flag[2:].strip())
elif flag.startswith('-L'):
d['library_dirs'].append(flag[2:].strip())
elif flag.startswith('-l'):
d['libraries'].append(flag[2:].strip())
elif flag.startswith('-D'):
d['macros'].append(flag[2:].strip())
else:
d['ignored'].append(flag)
return d
|
696be268aaa54977a7e11a3dc44390c22fa35e31
| 53,520
|
def is_non_decreasing_sequence(vals):
""" Returns true if vals is nondecreasing. """
for i in range(len(vals)-1):
if vals[i] > vals[i+1]:
return False
return True
|
e3a462f49aec77ce28cfa96a7286c25098ab9de4
| 53,523
|
def calc_max_min_marks(marks):
""" Function which returns the minimum(excluding 0) and maximum score of the class in the form of a list. """
result = []
marks.sort()
min_max = []
min_max[:] = (value for value in marks if value != 0)
least_score = min_max[0]
highest_score = min_max[-1]
result.append(least_score)
result.append(highest_score)
return result
|
92c6581ea2d967772cda043adee5c5d5ce893335
| 53,528
|
def point_is_in(bbox, point):
"""
Check whether EPSG:4326 point is in bbox
"""
# bbox = normalize(bbox)[0]
return (
point[0] >= bbox[0]
and point[0] <= bbox[2]
and point[1] >= bbox[1]
and point[1] <= bbox[3]
)
|
ebec8166789aba006089bbd03092de4d9e35ae86
| 53,530
|
import codecs
def read_file_line(path):
"""
Read all lines from file
:param path: file path
:return: list of lines
"""
f = codecs.open(path, 'r', 'utf8')
lines = f.readlines()
f.close()
return lines
|
83a34e889874cec5d32fde596855a1bb8724eeba
| 53,533
|
def absolute_max(array):
"""
Returns absolute max value of a array.
:param array: the array.
:return: absolute max value
>>> absolute_max([1, -2, 5, -8, 7])
-8
>>> absolute_max([1, -2, 3, -4, 5])
5
"""
return max(array, key=abs)
|
e01246883d83becadcb55f15917be02b8d8a9876
| 53,534
|
def divisible_by(n):
"""
Function that returns a lambda function that checks if a given number is divisible by n
"""
return lambda x: x % n == 0
|
72a37d5145b44d8322347a077ac1fe239c5087d6
| 53,535
|
def cat2axis(cat):
"""
Axis is the dimension to sum (the pythonic way). Cat is the dimension that
remains at the end (the Keops way).
:param cat: 0 or 1
:return: axis: 1 or 0
"""
if cat in [0, 1]:
return (cat + 1) % 2
else:
raise ValueError("Category should be Vi or Vj.")
|
ad33bf7a70d468c5eb9a98529b751ab8254ae24c
| 53,536
|
def steps(image_shape, filter_shape, step_shape):
"""
Generates feature map coordinates that filters visit
Parameters
----------
image_shape: tuple of ints
Image height / width
filter_shape: tuple of ints
Filter height / width
step_shape: tuple of ints
Step height / width
Returns
-------
ys: Map coordinates along y axis
xs: Map coordinates along x axis
"""
h, w = image_shape
fh, fw = filter_shape
sh, sw = step_shape
ys = range(0, h-fh+1, sh)
xs = range(0, w-fw+1, sw)
return ys, xs
|
27c21d8699f33b592e133ee59aca05f470be04e9
| 53,538
|
import functools
def _define_aliases(alias_d, cls=None):
"""Class decorator for defining property aliases.
Use as ::
@cbook._define_aliases({"property": ["alias", ...], ...})
class C: ...
For each property, if the corresponding ``get_property`` is defined in the
class so far, an alias named ``get_alias`` will be defined; the same will
be done for setters. If neither the getter nor the setter exists, an
exception will be raised.
The alias map is stored as the ``_alias_map`` attribute on the class and
can be used by `~.normalize_kwargs` (which assumes that higher priority
aliases come last).
"""
if cls is None: # Return the actual class decorator.
return functools.partial(_define_aliases, alias_d)
def make_alias(name): # Enforce a closure over *name*.
@functools.wraps(getattr(cls, name))
def method(self, *args, **kwargs):
return getattr(self, name)(*args, **kwargs)
return method
for prop, aliases in alias_d.items():
exists = False
for prefix in ["get_", "set_"]:
if prefix + prop in vars(cls):
exists = True
for alias in aliases:
method = make_alias(prefix + prop)
method.__name__ = prefix + alias
method.__doc__ = "Alias for `{}`.".format(prefix + prop)
setattr(cls, prefix + alias, method)
if not exists:
raise ValueError(
"Neither getter nor setter exists for {!r}".format(prop))
if hasattr(cls, "_alias_map"):
# Need to decide on conflict resolution policy.
raise NotImplementedError("Parent class already defines aliases")
cls._alias_map = alias_d
return cls
|
3f3c5f1a5408a182310b715f5d7eaca12bddfe4b
| 53,542
|
import statistics
import math
def pooled_sample_variance(sample1, sample2):
"""Find the pooled sample variance for two samples.
Args:
sample1: one sample.
sample2: the other sample.
Returns:
Pooled sample variance, as a float.
"""
deg_freedom = len(sample1) + len(sample2) - 2
mean1 = statistics.mean(sample1)
squares1 = ((x - mean1) ** 2 for x in sample1)
mean2 = statistics.mean(sample2)
squares2 = ((x - mean2) ** 2 for x in sample2)
return (math.fsum(squares1) + math.fsum(squares2)) / float(deg_freedom)
|
cb2ebc885f7cff0169ec6f0b9e5b15efb6a66ecd
| 53,543
|
from typing import Any
def to_json_value(v: Any) -> Any:
"""To JSON value.
Args:
v (Any): Any value.
Returns:
Any: JSON like value.
"""
if isinstance(v, dict):
res = {}
for k, val in v.items():
if not any(isinstance(k, t) for t in [str, int]):
k = str(k)
res[k] = to_json_value(val)
return res
if any(isinstance(v, t) for t in [str, int, float, bool]):
return v
elif any(isinstance(v, t) for t in [list, tuple, set]):
return [to_json_value(val) for val in v]
elif v is None:
return v
else:
return str(v)
|
2141ec5df91969b307c91514ca118d76105977f3
| 53,551
|
import click
def failure_style(failure_string: str) -> str:
"""
Styling function to emphasise bullet names.
:param failure_string: The string to style
:type failure_string: str
:return: Styled failure string
:rtype: str
"""
return click.style(failure_string, fg="red")
|
6909b223062ce58dcc04fa0933947049bc7d0034
| 53,554
|
def _has_message_field(message, field_key):
"""
Returns whether the message has the given field.
Parameters
----------
message : ``Message``
The message to check whether it has the field.
field_key : `int`
Message field key to check.
Returns
-------
has_field : `bool`
"""
fields = message._fields
if (fields is None):
has_field = False
else:
has_field = (field_key in fields)
return has_field
|
0e9a5837380f08c5ed1e1c3124cae01cd0407010
| 53,555
|
from typing import Any
def to_float_if_float(x: Any) -> Any:
"""Return input as float if possible, otherwise return as is
Arguments:
x: Anything
Returns:
``x`` as float if possible, otherwise ``x``
"""
try:
return float(x)
except (ValueError, TypeError):
return x
|
45941958e65d50e5bf218c28e483fe2e717806c0
| 53,557
|
def _general_fuel_checker(mass: int) -> int:
"""Given the mass of a module, calculate the fuel requirement
Args:
- mass (int): the mass of the module
Returns:
int: the fuel requirement
"""
return (mass // 3) - 2
|
df962587b44316a277cbac0883cc4f6a784a737d
| 53,560
|
def get_free_residents(resident_prefs, matching):
""" Return a list of all residents who are currently unmatched but have a
non-empty preference list. """
return [
resident
for resident in resident_prefs
if resident_prefs[resident]
and not any([resident in match for match in matching.values()])
]
|
ff50bc4feae5dc5830a7d30ba32c359f8471495e
| 53,561
|
def get_atom_property_dicts(identifier):
"""
Get a Dictionary for properties by identifier.
Args:
identifier (str): Which property to get.
Returns:
dict: Dictionary of Properties.
"""
global_proton_dict = {'H': 1, 'He': 2, 'Li': 3, 'Be': 4, 'b': 5, 'C': 6, 'N': 7, 'O': 8, 'F': 9, 'Ne': 10, 'Na': 11,
'Mg': 12, 'Al': 13, 'Si': 14, 'P': 15, 'S': 16, 'Cl': 17, 'Ar': 18, 'K': 19, 'Ca': 20,
'Sc': 21, 'Ti': 22, 'V': 23, 'Cr': 24, 'Mn': 25, 'Fe': 26, 'Co': 27, 'Ni': 28, 'Cu': 29,
'Zn': 30, 'Ga': 31, 'Ge': 32, 'As': 33, 'Se': 34, 'Br': 35, 'Kr': 36, 'Rb': 37, 'Sr': 38,
'Y': 39, 'Zr': 40, 'Nb': 41, 'Mo': 42, 'Tc': 43, 'Ru': 44, 'Rh': 45, 'Pd': 46, 'Ag': 47,
'Cd': 48, 'In': 49, 'Sn': 50, 'Sb': 51, 'Te': 52, 'I': 53, 'Xe': 54, 'Cs': 55, 'Ba': 56,
'La': 57, 'Ce': 58, 'Pr': 59, 'Nd': 60, 'Pm': 61, 'Sm': 62, 'Eu': 63, 'Gd': 64, 'Tb': 65,
'Dy': 66, 'Ho': 67, 'Er': 68, 'Tm': 69, 'Yb': 70, 'Lu': 71, 'Hf': 72, 'Ta': 73, 'W': 74,
'Re': 75, 'Os': 76, 'Ir': 77, 'Pt': 78, 'Au': 79, 'Hg': 80, 'Tl': 81, 'Pb': 82, 'Bi': 83,
'Po': 84, 'At': 85, 'Rn': 86, 'Fr': 87, 'Ra': 88, 'Ac': 89, 'Th': 90, 'Pa': 91, 'U': 92,
'Np': 93, 'Pu': 94, 'Am': 95, 'Cm': 96, 'Bk': 97, 'Cf': 98, 'Es': 99, 'Fm': 100, 'Md': 101,
'No': 102, 'Lr': 103, 'Rf': 104, 'Db': 105, 'Sg': 106, 'Bh': 107, 'Hs': 108, 'Mt': 109,
'Ds': 110, 'Rg': 111, 'Cn': 112, 'Nh': 113, 'Fl': 114, 'Mc': 115, 'Lv': 116, 'Ts': 117,
'Og': 118, 'Uue': 119}
inverse_global_proton_dict = {value: key for key, value in global_proton_dict.items()}
if identifier == "ToProton":
return global_proton_dict
if identifier == "FromProton":
return inverse_global_proton_dict
|
78a275776c9c9b6db3ad1b01c7c95c7a103f5541
| 53,567
|
def triple2string(triple:list) -> str:
"""
Write a triple to string with tab separator between the slots
Args
----
triple: list
List of strings [subj, rel, obj]
Returns
-------
triple_str: str
String "subj \t rel \t obj" (without spaces)
"""
return triple[0] + "\t" + triple[1] + "\t" + triple[2]
|
71a5dee0f8e99545f0ba391f789eaf2727f3480f
| 53,571
|
from datetime import datetime
def strptime(timestring):
"""
Parse a datetime from an XPT format string.
All text in an XPT document are ASCII-encoded. This function
expects a bytes string in the "ddMMMyy:hh:mm:ss" format. For
example, ``b'16FEB11:10:07:55'``. Note that XPT supports only
2-digit years, which are expected to be either 1900s or 2000s.
"""
text = timestring.decode('ascii')
return datetime.strptime(text, '%d%b%y:%H:%M:%S')
|
365436c5cc45d64157f0e34fcf228da9cfa88936
| 53,572
|
def remove_directories(list_of_keys):
"""
Removes directories from a list of S3 keys.
"""
return [path for path in filter(lambda x: x[-1] != '/', list_of_keys)]
|
9b9bd4a441f778fa014a22c2bdad408420233456
| 53,581
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.