content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
import decimal
def dround(decimal_number, decimal_places):
"""Round decimal_number up to decimal_places
:type decimal_number: decimal.Decimal
:param decimal_number: Decimal number.
:type decimal_places: integer
:param decimal_places: Number of decimal places to keep.
:rtype: decimal.Decimal
:returns: Rounded decimal number.
"""
return decimal_number.quantize(decimal.Decimal(10) ** -decimal_places)
|
be35ff43dbc051cf51a71849312ed80a3ece25a7
| 74,700
|
def owns_post(self, post):
"""
Whether the user owns a post
:param (User) self: The acting user
:param (Post) post: the post
:return (bool): whether the user owns the post
"""
return self.id == post.author.id
|
a411860cfa2b53026714584ee36d8aa671fdef31
| 74,701
|
def _convert_snake_to_kebab(snake_case_string: str) -> str:
"""
Convert a string provided in snake_case to kebab-case
"""
return snake_case_string.replace('_', '-')
|
4539e049ead6a307bb4cea3cc5164b0925a4b600
| 74,706
|
def select_order(axtypes):
"""
Returns indices of the correct data order axis priority given a list of WCS CTYPEs.
For example, given ['HPLN-TAN', 'TIME', 'WAVE'] it will return
[1, 2, 0] because index 1 (time) has the lowest priority, followed by
wavelength and finally solar-x.
Parameters
----------
axtypes: str list
The list of CTYPEs to be modified.
"""
order = [(0, t) if t in ['TIME', 'UTC'] else
(1, t) if t == 'WAVE' else
(2, t) if t == 'HPLT-TAN' else
(axtypes.index(t) + 3, t) for t in axtypes]
order.sort()
result = [axtypes.index(s) for (_, s) in order]
return result
|
dd5baf190a7fcf6c5b59e0b3a1877c3ac32d93eb
| 74,707
|
def createFactorialTrialList(factors):
"""Create a trialList by entering a list of factors with names (keys)
and levels (values) it will return a trialList in which all factors
have been factorially combined (so for example if there are two factors
with 3 and 5 levels the trialList will be a list of 3*5 = 15, each
specifying the values for a given trial
Usage::
trialList = createFactorialTrialList(factors)
:Parameters:
factors : a dictionary with names (keys) and levels (values) of the
factors
Example::
factors={"text": ["red", "green", "blue"],
"letterColor": ["red", "green"],
"size": [0, 1]}
mytrials = createFactorialTrialList(factors)
"""
# the first step is to place all the factorial combinations in a list of
# lists
tempListOfLists = [[]]
for key in factors:
# this takes the levels of each factor as a set of values
# (a list) at a time
alist = factors[key]
tempList = []
for value in alist:
# now we loop over the values in a given list,
# and add each value of the other lists
for iterList in tempListOfLists:
tempList.append(iterList + [key, value])
tempListOfLists = tempList
# this second step is so we can return a list in the format of trialList
trialList = []
for atrial in tempListOfLists:
keys = atrial[0::2] # the even elements are keys
values = atrial[1::2] # the odd elements are values
atrialDict = {}
for i in range(len(keys)):
# this combines the key with the value
atrialDict[keys[i]] = values[i]
# append one trial at a time to the final trialList
trialList.append(atrialDict)
return trialList
|
e6019c97e6cf863df52575226b98d9324c31554b
| 74,713
|
import json
def _from_json(filepath):
"""Load a json file to a dictionary."""
if not filepath:
return dict()
with open(str(filepath)) as lic_json_file:
return json.loads(lic_json_file.read())
|
3d1a28922116782b3867cab64c573111e5f39fa9
| 74,714
|
def comprobar_colision(p_activo, area, evento):
"""
Comprueba si hay colision y devuelve un booleano
:param p_activo: booleano que modificara
:param area: area en la que comprobara la colision
:param evento: evento que colisiono
:return: booleano con los datos de la colision
"""
if area.collidepoint(evento.pos):
# activa la variable
p_activo = not p_activo
else:
p_activo = False
return p_activo
|
cc918eacf1d975bdd37d4f0535c46b361ff9da93
| 74,725
|
def group_member_ids(ppl_coll, grpname):
"""Get a list of all group member ids
Parameters
----------
ppl_coll: collection (list of dicts)
The people collection that should contain the group members
grp: string
The id of the group in groups.yml
Returns
-------
set:
The set of ids of the people in the group
Notes
-----
- Groups that are being tracked are listed in the groups.yml collection
with a name and an id.
- People are in a group during an educational or employment period.
- To assign a person to a tracked group during one such period, add
a "group" key to that education/employment item with a value
that is the group id.
- This function takes the group id that is passed and searches
the people collection for all people that have been
assigned to that group in some period of time and returns a list of
"""
grpmembers = set()
for person in ppl_coll:
for k in ["education", "employment"]:
for position in person.get(k, {}):
if position.get("group", None) == grpname:
grpmembers.add(person["_id"])
return grpmembers
|
1ffc8c04ad05986bd681c7775b72a688ec8e984f
| 74,726
|
from datetime import datetime
def age_check(agent_behavior, observation, scale):
"""
:param agent_behavior: Metrics to be used the agent.
:type agent_behavior: dict
:param observation: Content and metadata of message received and on which the trust is calculated.
:type observation: Observation
:param scale: The trust scale used by the agent
:type scale: Scale
:return: An age punishment value that is equal to the scale maximum value for recent publications and falls within
[default, max) if it exceeded the allowed lifetime.
:rtype: float or int
"""
now = datetime.utcnow().timestamp()
age = observation.details['content_trust.publication_date'] + \
agent_behavior['content_trust.max_lifetime_seconds']
if now < age: # within allowed lifetime
return scale.maximum_value()
else: # exceeded lifetime
if 'content_trust.age_grace_period_seconds' in agent_behavior:
grace_value = (
now - age) / agent_behavior['content_trust.age_grace_period_seconds']
if grace_value < 1.0: # within grace period
return (1-grace_value) * scale.maximum_value()
return scale.default_value()
|
86e15ac1d1e250a0fc9422887b04ce90148fea5c
| 74,729
|
def flatten_filter(value):
"""Combine incoming sequences in one."""
seq = []
for s in value:
seq.extend(s)
return seq
|
927a7f7e23154cd28f8b89e5867847abe17ef4c4
| 74,735
|
def EncodeFromLinkArray(m,N,L):
"""
EncodeFromLinkArray(m,N,L):
m: A list of 0s and 1s representing 'message bits'.
N: Length of the code.
L: A link array representing rows in a binary generator matrix.
Specifically, L[i] contains a list indicating the positions
in row i of the generator matrix which are a 1.
For example, consider the generator matrix
G = [1 1 1 0 1 0 0]
[0 1 1 1 0 1 0]
[0 0 1 1 1 0 1]
To encode the message vector [1,0,1], you would do
>>> EncodeFromLinkArray([1,0,1],7,[[0,1,2,4],[1,2,3,5],[2,3,4,6]])
[1, 1, 0, 1, 0, 0, 1]
"""
result = [0]*N
for i in range(len(m)):
assert(m[i] == 0 or m[i] == 1)
if (m[i]):
for index in L[i]:
result[index] = result[index] ^ m[i]
return result
|
74ff2dfd31596a7979cf48c5d491b049615a27b3
| 74,747
|
def get_answers(question):
"""extract unique answers from question parses."""
answers = set()
for parse in question["Parses"]:
for answer in parse["Answers"]:
answers.add((answer["AnswerArgument"],
answer["EntityName"]))
return answers
|
29331ad07a37a1c4124e8769e7bcd9e09a1bed63
| 74,749
|
def classify_turn_direction(relative_azimuth, straight_angle=20):
"""Classify turn directions based on a relative azimuth
"""
if (relative_azimuth < straight_angle) or (relative_azimuth > (360 - straight_angle)):
return 'straight'
elif (relative_azimuth >= straight_angle) and (relative_azimuth <= (180 - straight_angle)):
return 'left'
elif (relative_azimuth > (180 - straight_angle)) and (relative_azimuth < (180 + straight_angle)):
return 'U'
elif (relative_azimuth >= (180 + straight_angle)) and (relative_azimuth <= (360 - straight_angle)):
return 'right'
|
72150dffe470b2a70d4f0eff2875442506e6878a
| 74,758
|
import torch
def TorchComplexExp(phase):
"""
e^(i*phase) =
cos phase + i sin phase
:param phase:
:return: Complex output
"""
real = torch.cos(phase)
imag = torch.sin(phase)
return torch.stack((real, imag), dim=-1)
|
c845260b00a1ad15a8d18a51ba8100190d0f7627
| 74,759
|
def munge_ram32m_init(init):
""" RAM32M INIT is interleaved, while the underlying data is not.
INIT[::2] = INIT[:32]
INIT[1::2] = INIT[32:]
"""
bits = init.replace("64'b", "")[::-1]
assert len(bits) == 64
out_init = ['0' for _ in range(64)]
out_init[::2] = bits[:32]
out_init[1::2] = bits[32:]
return "64'b{}".format(''.join(out_init[::-1]))
|
04a310c594fdbbe003da68244af26519941824e0
| 74,761
|
import random
def generate_n_ints(num: int = 6) -> list:
"""Generate a list of `num` pseudo-random integers
Args:
num (int, optional): N integers to generate. Defaults to 6.
Returns:
list: list of pseudo-random integers
"""
return [random.randrange(256) for _ in range(num)]
|
162077cdcbc30465237379471f3797a2ed0c0ba9
| 74,762
|
def is_str(x):
"""judge x is a str type object
Args:
x : input x
Returns:
[bool]:
- True for x is str,
- False for x is not str
"""
return isinstance(x, str)
|
24da09fa762e550d7f543845a475dfb990f8cfcc
| 74,770
|
import re
def to_camel_case(s):
"""Converts a given string `s` to camel case."""
s = re.sub(r"(_|-)+", " ", s).title().replace(" ", "") # noqa
return ''.join(s)
|
fbc83a160239a79ca19a222728f0a25d56696ef7
| 74,772
|
import re
def extract_uuids_from_string(source_string):
"""
Extract uuids out of a given source string.
Args:
source_string (Source): string to locate UUIDs.
Returns:
([]) List of UUIDs found in the source string
"""
uuid_regex = "[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}"
found_uuid = re.findall(uuid_regex, source_string, re.IGNORECASE)
return found_uuid
|
81b22220ae361360812a2e2ff98d82512c003e99
| 74,773
|
def my_languages(results: dict) -> list:
"""Returns all languages with score at least 60
Args:
results (dict): mapped languages with scores
Examples:
>>> assert my_languages({"Java": 10, "Ruby": 80, "Python": 65}) == ["Ruby", "Python"]
"""
return [
language
for language, score in sorted(
results.items(), key=lambda pair: pair[1], reverse=True
)
if score > 59
]
|
135d798d83099be71a0bc07b9cf3907cf76d5945
| 74,778
|
def cursor_query_response_to_dict(cursor):
"""
Turns cursor list-tuple response into a list-dict response;
Exhausts the cursor
"""
columns = cursor.column_names
results = [{col : item for col,item in zip(columns, res)} for res in cursor.fetchall()]
return(results)
|
d146835035724226cfbb76088e806bb814532408
| 74,782
|
def format32BitHexStr(hexStr):
"""
format the given string which represents a valid 32-bit hexadecimal number.
prefix "0x" will be added and will replace any valid prefix.
alphabetic letter will be formatted into upper case.
"0" will be used to fill the hexadecimal number if this number is represented as less than 8-letter.
Exmaple usage:
input: 0Xff -> output:0x000000FF
input: Ab -> output: 0x000000AB
input 0xAf -> output: 0x000000AF
:param hexStr: a valid string representing a 32-bit hexadecimal number
:return: a formatted string representing 32-bit hexadecimal number as described
"""
# remove "0x" or "OX" prefix if it had any
hexStr = hexStr.replace("0x", "").replace("0X", "")
hexStr = hexStr[0:8].zfill(8)
hexStr = hexStr.upper()
hexStr = "0x" + hexStr
return hexStr
|
8005b20e34a29cd1a839c828dfcfa2c169ba0d66
| 74,790
|
def MODULE_PATH(analysis_module):
"""Returns the "module_path" used as a key to look up analysis in ACE."""
return '{}:{}'.format(analysis_module.__module__,
analysis_module.__name__ if isinstance(analysis_module, type) else type(analysis_module).__name__)
|
fe0efb189899364d4141fc0d13ae1669bf950394
| 74,797
|
def _get_scale(image, md_path_or_scale):
"""Get a valid scale from an image and a metadata path or scale.
Parameters
----------
image : np.ndarray
The input image.
md_path_or_scale : float or image filename
The path to the file containing the metadata, or the scale.
Returns
-------
scale : float
"""
scale = None
try:
scale = float(md_path_or_scale)
except ValueError:
pass
if md_path_or_scale is not None and scale is None:
md_path = md_path_or_scale.split(sep='/')
meta = image.meta
for key in md_path:
meta = meta[key]
scale = float(meta)
else:
if scale is None:
scale = 1 # measurements will be in pixel units
return scale
|
6552040bac03649d2493b3bb1dc4b7980c3f1a5e
| 74,801
|
from typing import Dict
from typing import Any
def purge_none(d: Dict[Any, Any]) -> Dict[Any, Any]:
"""Purge None entries from a dictionary."""
return {k: v for k, v in d.items() if v is not None}
|
2f23c0b43863dae7f9fe5cc5c43650a6fc1b99e1
| 74,804
|
import math
def get_angle(point1, point2, point3):
"""
Calcula o ângulo interno de um triângulo passando três pontos, ou o ângulo entre uma
reta formada pelos pontos 1 e 2 e reta formada pelos pontos 1 e 3.
:param point1: tupla com as coordenadas x e y do primeiro ponto
:param point2: tupla com as coordenadas x e y do segundo ponto
:param point3: tupla com as coordenadas x e y do terceiro ponto
:return: ângulo em degrau
"""
# Pontos
x1, y1 = point1
x2, y2 = point2
x3, y3 = point3
# Variações no eixo y e x
diff_y = y3 - y2
diff_x = x3 - x1
# Cálculo do ângulo
angle_radians = math.atan(diff_y / diff_x)
# Conversão de radiano para degrau
angle_degrees = round(math.degrees(angle_radians), 2)
return angle_degrees
|
2d75c405ad786df711c6a509d575121b6e7563f7
| 74,810
|
def get_missing_coordinate(x1: float, y1: float, x2: float, angular_coefficient: float = -1.0) -> float:
"""Returns the y2 coordinate at the point (x2, y2) of a line which has an angular coefficient of
"angular_coefficient" and passes through the point (x1, y1)."""
linear_coefficient = y1 - (angular_coefficient * x1)
y2 = linear_coefficient + (x2 * angular_coefficient)
return y2
|
75c7bc35b76af10c7ca315f268bff13ead3361b4
| 74,812
|
def _point_mass(x, threshold=0.1):
"""
Find point masses in pandas.Series with frequency exceeding
specified value
Parameters
----------
x : pandas.Series
threshold : float
If value frequency exceeds threshold, consider value to have
point mass
Returns
-------
1-D numpy array that contains the point masses
"""
cnts = x.value_counts(normalize=True)
v = cnts[cnts > threshold].index.values
v.sort()
return v
|
82ee24d1aaf9a3eb6497d5ef09c84bb6ef4c5323
| 74,819
|
def _generateSpecial_MeshOpts_flatShading(
overlayList, displayCtx, source, longArg):
"""Returns no argument - the :attr:`.MeshOpts.flatShading` option is
deprecated.
"""
return []
|
7389e7a33e8b000750c14923a267e35006f45c98
| 74,823
|
def remove(i_list: list)-> list:
"""
Given an input list return a list without all the negative numbers
:param i_list: The source list
:return: A list with all positive member of i_list
"""
_shallow_list = []
for element in i_list:
if element >= 0: _shallow_list.append(element)
return _shallow_list
|
14d8cb43711ca724a8161f748d59f790303c8c91
| 74,825
|
def calc_centers(edges):
"""
Simple method for returning centers from an array of bin edges. Calculates center between each point as difference between containing edges.
Example, plt.plot(bicorr.centers(edges),counts,'.k')
Serves as a shortcode to first producing array of bin centers.
Parameters
----------
edges : ndarray
Array of bin edges
Returns
-------
centers : ndarray
Array of bin edges
"""
return (edges[:-1]+edges[1:])/2
|
6c25fa0c5b771dbf2e80675d7a01ba8c8dd8aa19
| 74,831
|
def FRZRAIN(conf, value):
"""Get Freezing Rain Color code from config"""
if value == 1:
return conf.get_color("colors", "color_frrain1")
else:
return conf.get_color("colors", "color_frrain2")
|
71dcb4f5d0d8f03d5764c4e2ff8acdfdb57a66d4
| 74,833
|
def function_task_2(params):
""" Function given by task
:param params: [x, y]
:return: function value
"""
x, y = params
return (1.5 - x + x * y)**2 + (2.25 - x + x * y**2)**2 + (2.625 - x + x * y**3)**2
|
d289296aab94116e552b85f6dfb42f5ecf9a9096
| 74,836
|
import re
def process_column_labels(list_of_labels):
"""Removes undesired spaces.
Parameters:
list_of_labels: list
list with column labels
Returns:
list_of_cleaned_labels: list
A list with cleaned lables
"""
list_of_cleaned_labels = [
re.sub(" +", " ", lbl.strip()) if lbl is str else lbl
for lbl in list_of_labels
]
return list_of_cleaned_labels
|
893cbce75c9212da747d640630fe9f025df3a19f
| 74,838
|
from typing import Dict
def get_github_config(config: Dict[str, str]) -> Dict[str, str]:
"""
Get configuration keys for GitHub API.
:param config: app config
:type config: Config Class
:return: key-value dictionary of required GitHub keys
:rtype: dict
"""
return {
'deploy_key': config.get('GITHUB_DEPLOY_KEY', ''),
'ci_key': config.get('GITHUB_CI_KEY', ''),
'bot_token': config.get('GITHUB_TOKEN', ''),
'bot_name': config.get('GITHUB_BOT', ''),
'repository_owner': config.get('GITHUB_OWNER', ''),
'repository': config.get('GITHUB_REPOSITORY', '')
}
|
a426aee55dec84c1e89b91738e3c67cf60a91288
| 74,840
|
import logging
async def _download_text(url, session):
"""Asynchronously request a URL and get the encoded text content of the
body.
Parameters
----------
url : `str`
URL to download.
session : `aiohttp.ClientSession`
An open aiohttp session.
Returns
-------
content : `str`
Content downloaded from the URL.
"""
logger = logging.getLogger(__name__)
async with session.get(url) as response:
# aiohttp decodes the content to a Python string
logger.info('Downloading %r', url)
return await response.text()
|
a198948876ddaa79a1e6bcdb0ce29471e55c02f2
| 74,842
|
def is_default(method):
"""Check if a method is decorated with the `default` wrapper."""
return getattr(method, "_is_default", False)
|
933d9fd4a5b7eb3436e68b93f514cd4ce2acb5ca
| 74,845
|
def clean_visits(data, test, n_visits=100):
"""
Drops rows of `data` with too few entries for given `test`
Parameters
----------
data : pandas.DataFrame
Must have at least columns 'visit' and `test`
test : str
Column of `data` used to determine which rows to drop
n_visits : int, optional
Minimum number of participants with data for `test` at a given visit
required to retain that visit
Returns
-------
data : pandas.DataFrame
Cleaned input `data`
"""
if test not in data.columns:
raise ValueError('Provide test "{}" not in data'.format(test))
# determine which vists have sufficient datapoints
data = data.dropna(subset=[test])
visits = data.groupby('visit').count()
visits = list(visits[visits['participant'] > n_visits].index)
# drop "bad" visits and remove those as categories; also, convert the
# visits column to a numerical value
data = data.query(f'visit in {visits}')
visit_codes = data['visit'].cat.remove_unused_categories().cat.codes
data = data.assign(visit=visit_codes)
return data
|
021ed80298e9e5df7da79a169dac00a3d351656a
| 74,854
|
def less(compare_item):
"""
Ensures the filtered item is less than the compare_item
result < compare_item
:return: Callback to be used by the query search.
"""
def callback(item):
return item < compare_item
return callback
|
3b19cb2ae52845b6b5726b817e440e5a2dc2065b
| 74,858
|
def method_cache_counter(space, name):
"""Return a tuple (method_cache_hits, method_cache_misses) for calls to
methods with the name."""
assert space.config.objspace.std.withmethodcachecounter
ec = space.getexecutioncontext()
return space.newtuple([space.newint(ec.method_cache_hits.get(name, 0)),
space.newint(ec.method_cache_misses.get(name, 0)),])
|
b2a43132521a1ca255387036d878ce6b7b13ac7c
| 74,862
|
from typing import Union
from pathlib import Path
from typing import Any
import pickle
def load_pickle(filepath: Union[str, Path]) -> Any:
"""Load pickled data from disk
Args:
filepath: Path to pickle file
Returns:
Contents of pickle.
"""
with open(filepath, "rb") as f:
return pickle.load(f)
|
983b0879385685f942a89b61ea64c9a347f961c6
| 74,868
|
from typing import Union
def get_cell_sizes(cell_size: Union[int, list, tuple]):
"""Handle multiple type options of `cell_size`.
In order to keep the old API of following functions, as well as add
support for non-square grids we need to check cell_size type and
extend it appropriately.
Args:
cell_size: integer of tuple/list size of two with cell size
in horizontal and vertical direction.
Returns:
Horizontal and vertical cell size.
"""
if isinstance(cell_size, int):
cell_size_vertical = cell_size
cell_size_horizontal = cell_size
elif isinstance(cell_size, (tuple, list)) and len(cell_size) == 2:
# Flipping coordinates, because first coordinates coresponds with height (=vertical direction)
cell_size_vertical, cell_size_horizontal = cell_size
else:
raise TypeError("`cell_size` must be integer, tuple or list with length two.")
return cell_size_horizontal, cell_size_vertical
|
b372c2a960f59d861f08eaecbbc2f7499226272e
| 74,874
|
def get_severity(severity_int):
"""
helper meethod to swap severities
:param severity_int: integer value for severity
:return: collectd string for severity
"""
return {
1: "FAILURE",
2: "WARNING",
4: "OKAY"
}[severity_int]
|
801479225422222e7339d54c2391893624fb58f4
| 74,875
|
def get_trf_command(command, transformation=""):
"""
Return the last command in the full payload command string.
Note: this function returns the last command in job.command which is only set for containers.
:param command: full payload command (string).
:param transformation: optional name of transformation, e.g. Sim_tf.py (string).
:return: trf command (string).
"""
payload_command = ""
if command:
if not transformation:
payload_command = command.split(';')[-2]
else:
if transformation in command:
payload_command = command[command.find(transformation):]
# clean-up the command, remove '-signs and any trailing ;
payload_command = payload_command.strip()
payload_command = payload_command.replace("'", "")
payload_command = payload_command.rstrip(";")
return payload_command
|
8beedf188fcb3c24fd2bc855f0e2549484c18e3a
| 74,881
|
def flip_edge(graph, edge):
"""
Flips an edge in a networkx graph.
:param graph: a target graph
:param edge: edge to flip
:return: None
"""
if graph.has_edge(*edge):
graph.remove_edge(*edge)
else:
graph.add_edge(*edge)
return None
|
3f5bf91118137e12cc0367d9cc03b5befbf3e56a
| 74,882
|
def props_boot(props):
"""Accesses boot properties."""
return props["boot"]
|
aed52522fac4349ec88414edda227e7743194143
| 74,884
|
def wp_sortkey(string):
"""Help function to sort WPs. Use as
wps = ['Medium','Tight','Loose','VTight','VLoose','VVTight']
sorted(wps,key=wp_sortkey)
"""
lowstr = string.lower()
if lowstr.startswith('medium'):
return 0
elif lowstr.lstrip('v').startswith('loose'):
return -1*(lowstr.count('v')+1)
elif lowstr.startswith('tight'):
return 1*(lowstr.count('v')+1)
return 100+len(string)
|
220117582b9992e9a4db5487bb320fcbf3b21cf6
| 74,885
|
def merge_dictionaries(dict1: dict, dict2: dict) -> dict:
"""
Returns a merged dictionary
Parameters:
dict1 (dict): A dictionary.
dict2 (dict): A dictionary.
Returns:
result (dict): The merged dictionary.
"""
result = {**dict1, **dict2}
return result
|
aca9c885ec6102a4913e13fd1528a25bdc036b33
| 74,886
|
def P_trans(N, M, DeltaJ, DeltaM):
"""
Calculates transition probability.
Parameters
----------
N : int
Total rotational angular momentum quantum number
M : int
Magnetic quantum number
DeltaJ : int
Change in rotational quantum number
DeltaM : int
Change in magnetic quantum number
Returns
-------
float
Transition probability
"""
# fmt: off
if DeltaJ == 1: # Liebe 1981
if DeltaM == 1:
return 3 * (N + M + 1) * (N + M + 2) / (4 * (N + 1) * (2*N + 1) * (2*N + 3))
elif DeltaM == 0:
return 3 * ((N + 1)**2 - M**2) / ((N + 1) * (2 * N + 1) * (2 * N + 3))
elif DeltaM == -1:
return 3 * (N - M + 1) * (N - M + 2) / (4 * (N + 1) * (2*N + 1) * (2*N + 3))
elif DeltaJ == -1:
if DeltaM == 1:
return 3 * (N + 1) * (N - M) * (N - M - 1) / (4 * N * (2 * N + 1) * (2 * N**2 + N - 1))
elif DeltaM == 0:
return 3 * (N + 1) * (N**2 - M**2) / (N * (2 * N + 1) * (2 * N**2 + N - 1))
elif DeltaM == -1:
return 3 * (N + 1) * (N + M) * (N + M - 1) / (4 * N * (2 * N + 1) * (2 * N**2 + N - 1))
# fmt: on
|
9913ea43117a9bb322aec610c435501a2fe3d2b3
| 74,892
|
from typing import Optional
from typing import List
def parse_skip(line) -> Optional[List[int]]:
"""Returns None if the line is not a exdown-skip statement
Otherwise returns the lines to skip (numbering starts at 1).
The empty list indicates that the whole block must be skipped."""
line = line.lstrip()
searched = "exdown-skip"
idx = line.find(searched)
if idx < 0:
return None
suffix = line[idx + len(searched) :]
skipped_lines = []
for nb_line in suffix.lstrip().split(" "): # Parse to the right of exdown-skip
try:
nb_line = int(nb_line)
if nb_line < 1:
raise Exception(
f"Line numbers in {searched} start at 1, but received {nb_line}"
)
skipped_lines.append(nb_line)
except ValueError as _:
break
return skipped_lines
|
41d3d7d8252fa263b6befd654ef92bf866dde7fa
| 74,894
|
import requests
import json
def post(url, data = {}):
"""
Submit http post to a URL and parse the JSON that's returned
:param url: The URL to post to
:param data: Map defining the post params to submit. keys and values should be strings
:return: The JSON response
"""
r = requests.post(url, data)
return json.loads(r.text)
|
28b744294f9a49bd74a7ffe87478cd7762dc10fd
| 74,897
|
def _median(sorted_list):
"""Returns median value for specified sorted list.
Parameters
----------
arr: List[float]
Returns
-------
float
"""
assert sorted_list, "List is empty"
n_items = len(sorted_list)
return 0.5 * (sorted_list[(n_items - 1) // 2] + sorted_list[n_items // 2])
|
5e31d6390a4f490e48e6d172fd5f2f3dcdbf5742
| 74,899
|
def data(prod, data):
"""Parse data using production and return the unboxed value from
the result of read()."""
(result, _) = prod.read(data)
return result
|
870a28d5a09e92b9642ec16465be2f6441da69ca
| 74,901
|
from datetime import datetime
def get_model_name(params):
"""
Return the model name according to the hyperparameters and creation time.
"""
creation_time = datetime.now().strftime('%Y%m%d_%H%M%S')
# Name to be used for the model
model_name = "{}_nc{}_bs{}_ep{}_lr{}_ld{}_df{}_uf{}_bt{}_cr{}_bn{}_ts{}".format(
params['nn_type'],
params['n_classes'],
params['batch_size'],
params['num_epochs'],
params['learning_rate'],
params['lr_decay'],
params['doppler_filtering'],
params['undersampling_filtering'],
params['bound_undersampling_to'],
params['crop_strategy'],
params['batch_normalization'],
creation_time
)
return model_name
|
985aea874332ad43ca35ffc5b493ecaefe3e3291
| 74,905
|
def bezout(a, b):
"""Bézout coefficients for a and b
:param a,b: non-negative integers
:complexity: O(log a + log b)
"""
if b == 0:
return (1, 0)
u, v = bezout(b, a % b)
return (v, u - (a // b) * v)
|
bbfcd5e4f57d34291aced2356b021047f9bbd4c3
| 74,910
|
def isOnBoard(x, y):
"""Returns True if (x, y) is on the board, otherwise False."""
return x >= 0 and x <= 59 and y >= 0 and y <= 14
|
f37d4b0c95403d4549946fe49543238209576db2
| 74,918
|
import re
def clean_text(text: str):
"""Returns string:
1. Stripped of all whitespaces at start and end
2. Any excess whitespace in between are replaced with an underscore "_"
3. All characters are lowercased
"""
clean_text = re.sub(' +', '_', text.strip()).lower()
return clean_text
|
f61c6c4ed16ba0efce441c4c6398b4414dfb68cf
| 74,920
|
def _override_license_types_spec_to_json(override_license_types_spec):
"""
Given an override license types spec, returns the json serialization of the object.
"""
license_type_strings = []
for license_type in override_license_types_spec:
license_type_strings.append("\"" + license_type + "\"")
return ("[" + ", ".join(license_type_strings) + "]")
|
57ab635a35c44e9deddeb015f197a1b071f8d4ff
| 74,922
|
def channel_form(channels):
"""Construct HTML for the channel selection form."""
channel_checkbox_form_html = \
"<input type=\"checkbox\" name=\"channel\" value=\"{0}\">{0}"
channel_form_html = ""
for channel in channels:
channel_form_html = \
channel_form_html + \
channel_checkbox_form_html.format(channel) + "</br>"
eps = "Episodes per page (between 1 and 100):" + \
"<input type=\"number\" name=\"episodes\" " + \
"min=\"1\" max=\"100\" value=\"20\"></br>"
pages = "Pages (between 1 and 3):" + \
"<input type=\"number\" name=\"pages\" " + \
"min=\"1\" max=\"3\" value=\"1\"></br>"
form = "<form action=\"/rt-recently-added\" method=\"post\">{0}{1}{2}" + \
"<input type=\"submit\"></form>"
form = form.format(channel_form_html, eps, pages)
return form
|
6632e24c23639e7f328c3260627591ae61e994fc
| 74,925
|
def add_sample_dimension(F, array):
"""
Add an extra dimension with shape one in the front (axis 0) of an array representing samples.
:param F: the execution mode of MXNet.
:type F: mxnet.ndarray or mxnet.symbol
:param array: the array that the extra dimension is added to.
:type array: MXNet NDArray or MXNet Symbol
:returns: the array with the extra dimension.
:rtypes: the same type as the input array
"""
return F.expand_dims(array, axis=0)
|
a4377bdf3295cd37cdcb21e1a54c23bb04799209
| 74,928
|
def _is_sorted(arr):
""" Returns `True` if the array is sorted ascendingly and `False if it isn't."""
i = 0
while (i+1 < arr.shape[0]):
if arr[i] > arr[i+1]:
return False
i += 1
return True
|
20f6c3e29cffee88a88b95286f82f04053bd0eec
| 74,939
|
def join_if_not_empty(items, sep=" "):
"""
Joins a list of items with a provided separator.
Skips an empty item.
"""
joined = ""
for item in items:
if item and len(item) > 0:
if joined != "":
joined += sep
joined += item
return joined
|
f67d6ba33714dae9dce0c3a85172e8ae83562e24
| 74,941
|
def load_data(file_name: str = 'sample1') -> list:
"""
Loads example
:param file_name: file name of example
:return: list loaded data
"""
date = ''
file = open(file_name)
try:
date = file.readlines()
except:
print("Zły format pliku !")
finally:
file.close()
matrix = []
for line in date[1:]:
matrix.append(line.strip().split()[:])
return matrix
|
a4cce21023bc30a60fc47319d2d22919bf0facb8
| 74,945
|
def lorentzian(x,dc,a,gamma,centre):
"""Single Lorentzian function.
Parameters:
dc: float
baseline
gamma: float
linewidth
centre: float
peak centre
x: array
input parameter
returns a Lorentzian function
"""
lorentzian = dc + a*gamma**2 / ((x-centre)**2+gamma**2)
return lorentzian
|
371eb2a95a0381490099ff77d39560da093addf4
| 74,948
|
def _flatten(params):
"""Flatten the list objects in a list of DB-API parameters,
e.g. `[1, [2, 3]]` becomes `[1, 2, 3]`.
"""
params_flat = []
for param in params:
if isinstance(param, list):
for item in param:
params_flat.append(item)
else:
params_flat.append(param)
return params_flat
|
d9c87149e06c410014e70132c14b7143cbde8f67
| 74,951
|
import glob
import random
def get_images_paths(pattern='../data/*.jpg', size='full', test_percentage=0.2,
fast_set_size=300, verbose=False):
"""Return path of images that matches glob pattern.
Args:
pattern (str): glob pattern.
size (str): 'fast' for reduced dataset size, 'full' for entire data.
test_percentage (float): test set size percentage.
fast_set_size (int): size of the reduced set.
Returns:
training_paths (list): contains all training set image paths.
test_paths (list): contains all test set image paths.
"""
paths = glob.glob(pattern)
random.shuffle(paths)
if size == 'fast':
paths = paths[:fast_set_size]
test_set_size = int(len(paths)*test_percentage)
test_paths = paths[:test_set_size]
training_paths = paths[test_set_size:]
return training_paths, test_paths
|
ddf50d311b1be8fc6f3ad443634a524286f8f949
| 74,952
|
def get_elements(tree, tag_name):
"""
returns a list of all elements of an XML tree that have a certain tag name,
e.g. layers, edges etc.
Parameters
----------
tree : lxml.etree._ElementTree
an ElementTree that represents a complete SaltXML document
tag_name : str
the name of an XML tag, e.g. 'nodes', 'edges', 'labels'
"""
return tree.findall("//{0}".format(tag_name))
|
b51ee50855d7eb0c05c6b2ef9b4a7ab36f356c27
| 74,953
|
def float_or_int(v):
""" cast to int if possible or to float """
vf = float(v)
try:
vi = int(vf)
if vi == vf:
return vi
except:
return vf
|
a85929972b5458bc83f67dbeebc998f8af1a2b5b
| 74,956
|
def number_of_tokens(sentences, category=None):
"""
Count the number of words in sentences
If category is given, only count words of this category, otherwise count all
Args:
sentences (list) : the list of Sentence objects
category (str, default None) : the category to count
Return:
the count of words in sentences of category
"""
#si catégore = NON on prend tous les mots
#sinon on prends tous les mots avec la bonne catégorie
# return sum([word for word in sentence.words
# for sentence in sentences
# if category is None or word.upos == category])
somme = 0
for sentence in sentences:
for word in sentence.words:
if category is None:
somme =+1
elif word.upos == category:
somme += 1
return somme
|
4d3261355417d8c474f9861f1750a89092852e63
| 74,960
|
import re
def _filter_tre_measure_columns(df_experiments):
""" get columns related to TRE measures
:param DF df_experiments: experiment table
:return tuple(list(str),list(str)):
"""
# copy the initial to final for missing
cols_init = [col for col in df_experiments.columns if re.match(r'(r)?IRE', col)]
cols_final = [col.replace('IRE', 'TRE') for col in cols_init]
assert len(cols_final) == len(cols_init), 'columns do not match for future zip'
return cols_final, cols_init
|
f215a3474362d97066770112c01ccc1e4f7e532c
| 74,963
|
def get_accessor(identifier: str) -> str:
"""
Given a SeqRecord identifier string, return the access number as a string.
e.g. "ENSG00000004776|ENSG00000004776.13|ENST00000004982|ENST00000004982.6" -> "ENST00000004982.6"
"""
parts = identifier.split('|')
assert len(parts) == 4
return parts[3]
|
d0018b103145f805c52c13e5f1d95a0baa575c7f
| 74,965
|
def rectifier(Iload, fswitch, dVout):
"""
rectifier Function
Returns the capacitance (in Farads) for a needed capacitor in
a rectifier configuration given the system frequency (in Hz),
the load (in amps) and the desired voltage ripple.
Parameters
----------
Iload: float
The load current that must be met.
fswitch: float
The switching frequency of the system.
dVout: float
Desired delta-V on the output.
Returns
-------
C: float
Required capacitance (in Farads) to meet arguments.
"""
C = Iload / (fswitch * dVout)
return(C)
|
2b0ec7973d6d9a80df4f79cd6139245eeb7ababe
| 74,966
|
import csv
def read_csv(csv_path, id_column=0, delimiter=","):
"""
Reads tsv file content into a dict. Key is the id column value and the value is list of row values
Args:
csv_path: Path of the CSV file
id_column: Id column becomes the key of the dict. This column should be unique. Default is the first column.
delimiter: Value delimiter. Default is comma.
Returns:
CSV content dict. Key is the first column value and the value is list of row values.
"""
records = dict()
with open(csv_path) as fd:
rd = csv.reader(fd, delimiter=delimiter, quotechar='"')
for row in rd:
_id = row[id_column]
records[_id] = row
return records
|
cd0831e26082648e385fa9e345339d869daf9bec
| 74,968
|
def tan2tantwo(tan: float) -> float:
"""returns Tan[2*ArcTan[x]] assuming -pi/2 < x < pi/2."""
return 2 * tan / (1 + tan) / (1 - tan)
|
6f6317e0594c6965445d9e2588218af85671c198
| 74,972
|
def fao56_penman_monteith(net_radiation, temperature_mean, ws, latent_ht, sat_vp, avp,
delta_sat_vp, psy, sol_rad, shf=0.0, time_period="15min"):
"""
Estimate reference evapotranspiration (ETo) from a hypothetical
short grass reference surface using the FAO-56 Penman-Monteith equation.
Based on equation 6 in Allen et al (1998).
:param net_radiation: Net radiation at crop surface [MJ m-2 day-1]. If
necessary this can be estimated using ``net_rad()``.
:param temperature_mean: Air temperature at 2 m height [deg Celsius].
:param ws: Wind speed at 2 m height [m s-1]. If not measured at 2m,
convert using ``wind_speed_at_2m()``.
:param latent_ht: Letent heat Can be calculated using ``latent_heat(temperature_mean)``.
:param sat_vp: Saturation vapour pressure [kPa]. Can be estimated using
``svp_from_t()''.
:param avp: Actual vapour pressure [kPa]. Can be estimated using a range
of functions with names beginning with 'avp_from'.
:param delta_sat_vp: Slope of saturation vapour pressure curve [kPa degC-1].
Can be estimated using ``delta_svp()``.
:param psy: Psychrometric constant [kPa deg C]. Can be estimated using
``psy_const_of_psychrometer()`` or ``psy_const()``.
:param sol_rad: Solar Radiation to calculate the day and night period
:param shf: Soil heat flux (G) [MJ m-2 day-1] (default is 0.0, which is
reasonable for a daily or 10-day time steps). For monthly time steps
*shf* can be estimated using ``monthly_soil_heat_flux()`` or
``monthly_soil_heat_flux2()``.
:param time_period The period of time that will be used to calculate the result
( Supported values: daily, hourly, half_hourly and 15min )
:return: Reference evapotranspiration (ETo) from a hypothetical
grass reference surface [mm day-1].
:rtype: float
"""
# time period conversion
if time_period == "daily":
time_period_conversion = 900
elif time_period == "hourly":
time_period_conversion = 37.5
elif time_period == "half_hourly":
time_period_conversion = 18.75
else:
time_period_conversion = 9.375 # 15min period
cd = 0.24 if sol_rad > 1 else 0.96
a1 = 1 / latent_ht
a2 = (net_radiation - shf) * delta_sat_vp
a3 = (time_period_conversion / (temperature_mean + 273)) * psy * ws * (sat_vp - avp)
a4 = delta_sat_vp + (psy * (1 + cd * ws))
return (a1 * a2 + a3) / a4
|
3f3501e3753b63f35237cf595ec026a51bb51640
| 74,975
|
def date_filter_okay(df, start, end):
"""
Boolean check of whether DataFrame has data between 2 dates
Parameters
----------
df : pandas.DataFrame
data to check
start : datetime-like
start time
end : datetime-like
end time
Returns
-------
bool
"""
check = df[(df.index >= start) &
(df.index <= end)].copy()
return not check.empty
|
3abcea68df64f248b8eda1f7b491e98981835d6d
| 74,977
|
def data_reader(file_path, col_sep='\t'):
"""
Load data
:param file_path:
:param col_sep:
:return: list, list: contents, labels
"""
contents = []
labels = []
with open(file_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.rstrip()
parts = line.split(col_sep)
if len(parts) != 2:
continue
contents.append(parts[0])
labels.append(parts[1])
return contents, labels
|
30f93637eb3047f6d146942787e9377c78598c9b
| 74,978
|
import hashlib
def md5(filename):
"""
Creates the MD5 hash of a file.
:param filename: the name of the file, a string
"""
# Ensure that this script is not going to grab huge amounts of memory at once.
block_size = 65536 # 64 KB.
hash_function = hashlib.md5()
with open(filename, 'rb') as f:
buf = f.read(block_size)
while len(buf) > 0:
hash_function.update(buf)
buf = f.read(block_size)
return hash_function.hexdigest()
|
22caf4a1c00a15adb4bf1bab9f44b93f5538dd68
| 74,979
|
from typing import List
def check_version(framework_version: str, supported_versions: List[str]) -> bool:
"""Check if framework version is in supported versions list."""
return any(framework_version.startswith(version) for version in supported_versions)
|
34ed46c9d65cefac77ecdcd9daab9ac1ae0acf44
| 74,983
|
def median(numbers):
"""
Parameters
----------
numbers : list
a list of numbers
Returns
-------
median : double
the median of the list of numbers passed
"""
numbers.sort()
if len(numbers) % 2 == 1:
median = numbers[-round(-len(numbers) / 2)]
median = round(median, 1)
else:
median = (
numbers[int(len(numbers) / 2)] + numbers[int(len(numbers) / 2 - 1)]
) / 2
median = round(median, 1)
return median
|
717256a6f7b959bf80262006e512fc40c0d89bd0
| 74,988
|
def cumulative_sum(points):
"""
Returns the cumulative sum.
"""
# logger.info('calculating cumulative sum')
csum = [0,]
prev = 0
# logger.info('start appending points')
for p in points:
csum.append(p+prev)
prev = p+prev
# logger.info('returning sum')
return csum
|
18407079c0bcf2f4f5e2c0b2d3659afb3b537c08
| 74,989
|
def parse_int_value(value):
""" Parses string that is expected to be an integer.
"""
return int(value)
|
9888e129031a83f269c067d2697606fd17a6eb17
| 74,991
|
from typing import Any
from typing import get_args
def get_inner(hint: Any, *indexes: int) -> Any:
"""Return an inner type hint by indexes."""
if not indexes:
return hint
index, indexes = indexes[0], indexes[1:]
return get_inner(get_args(hint)[index], *indexes)
|
b3fcd0d183cb231b13d27f2d4d1f8d53cd9b684e
| 74,992
|
import math
def calculate_heat(score, seconds, score_base, decay_time):
"""
calculate_heat: The special sauce
This function will return the `seconds` + an adjustment such that things with more "heat" will have a higher
heat index than things that are either scored lower or are older. The algorithm is such that an object that has
a score equal to `score_base` will be devoid of "heat", plus or minus, at exactly `decay_time`, and just return
`seconds` with no adjustment. Anything with a higher score will still retain heat beyond `decay_time` and
anything with a lower score will lose all heat before `decay_time` has passed. Negative scores will cause the
adjustment to be negative immediately and will always return something less than `seconds`.
"""
# Store the sign for later use
sign = math.copysign(1, score)
# Calculate the inverse exponential power of the score so that scores above `score_base` will lose
# effectiveness and prevent this object from never losing heat, and becoming perpetual. Due to the use of
# logarithm, scores of 1, 0 and -1 are effectively the same.
score_power = sign * math.log(max(abs(score), 1), score_base)
# Multiply by the `decay_time` so that this object will lose "heat" as it gets older and closer to the
# target `decay_time`
heat_adjustment = score_power * decay_time
# Add the adjustment to the timestamp of this object so we can sort or filter by it
return seconds + heat_adjustment
|
c477c67994481c0076b4b675166bd5327d42fe03
| 75,000
|
import functools
def cached_property(fx):
"""Mimic the @property decorator but cache the result."""
@functools.wraps(fx)
def inner(self):
# Sanity check: If there is no cache at all, create an empty cache.
if not hasattr(self, '_cached_values'):
object.__setattr__(self, '_cached_values', {})
# If and only if the function's result is not in the cache,
# run the function.
if fx.__name__ not in self._cached_values:
self._cached_values[fx.__name__] = fx(self)
# Return the value from cache.
return self._cached_values[fx.__name__]
return property(inner)
|
619b08a6a8bbe0e01d08f550a25f021a7b8ad262
| 75,001
|
def generate_column_names_per_user(number_options=3):
"""
This function generates a list which will be used for columns,
Arguments:
number_options -- this determines the number of options a forecaster has to fultill (by default=3)
Returns:
column_names -- a list containing the column names
Example of return list:
'user_id',
'answer_option_a',
'value_a',
'fcast_date_a',
'answer_option_b',
'value_b',
'fcast_date_b',
'answer_option_c',
'value_c',
'fcast_date_c',
"""
if number_options < 1 or number_options > 26:
raise ValueError('Number of options out of range, sould be between 1 and 26')
column_names = []
for n_opt in range(number_options):
column_names.append("answer_option_{}".format(chr(n_opt+ord('a'))))
column_names.append("value_{}".format(chr(n_opt+ord('a'))))
column_names.append("fcast_date_{}".format(chr(n_opt+ord('a'))))
return column_names
|
542dd5b9749b98785716fba5298fcd5dee47caa3
| 75,009
|
def _JoinChildNodes(tag):
"""Join child nodes into a single text.
Applicable to leafs like 'summary' and 'detail'.
Args:
tag: parent node
Returns:
a string with concatenated nodes' text representation.
"""
return ''.join(c.toxml() for c in tag.childNodes).strip()
|
2572160e9addb9a92aa015621468ab3ff5ed1893
| 75,011
|
import binascii
def decode_base64(_base64):
"""Decode string from base64 to binary string format, then return it."""
return binascii.a2b_base64(_base64)
|
e2a395a44f98ac82a33374d86ce10a80bcbea1e1
| 75,013
|
import re
def abbreviate(words):
"""Returns an acronym for the words passed."""
words = words.upper()
# We split the string with delimiters of <space> or <hyphen>
sentence = re.split(' |-', words)
acronym = ''
for word in sentence:
acronym += word[0]
return acronym
|
65ffb7a4a7874e5a4011634db6965e5da955ec5a
| 75,014
|
def filter_by_name(cases, names):
"""Filter a sequence of Simulations by their names. That is, if the case
has a name contained in the given `names`, it will be selected.
"""
if isinstance(names, str):
names = [names]
return sorted(
[x for x in cases if x.name in names],
key=lambda x: names.index(x.name)
)
|
64c3f4b0b77ba8106b276b74e6a01bd3f6c91ce4
| 75,015
|
def _setup_animation_range_from_times(stage, times):
"""Set the start/end times for a USD stage using a provided start/end time.
Args:
stage (`pxr.Usd.Stage`):
The composed stage that will be modified by this function.
times (tuple[float or int, float or int]):
The start and end time that will be set onto `stage`.
Returns:
tuple[float or int, float or int]: Return the original start/end times.
"""
start, end = times
stage.SetStartTimeCode(start)
stage.SetEndTimeCode(end)
stage.SetMetadata(
"comment",
"This Layer's start/end time codes were set using an explicit start/end time.",
)
return (start, end)
|
b7be16aebc26d66b4595cbec0e83d5292f229850
| 75,025
|
def file_metrics(file_path):
"""Retrieve the file stats."""
return file_path.stat()
|
3a2765a2880659429a3eed9a714d57cc615835c6
| 75,026
|
import json
def get_prefix_counts(filename):
"""Builds a dictionary mapping key prefixes to their counts.
Args:
filename: The name of a JSON file.
Returns:
A dictionary mapping each key prefix to its count.
"""
prefixes = {}
f = open(filename)
keys = json.load(f)
total = 0
for key in keys:
prefix = key.split('.')[0]
if prefix != '@metadata':
weight = 1
if key.endswith('Tooltip'):
weight = .2
elif key.endswith('HelpUrl'):
weight = .1
prefixes[prefix] = prefixes.get(prefix, 0) + weight
total += weight
f.close()
prefixes['ALL'] = total
return prefixes
|
6b50160cc6aca551ae9189d4a66978f0bb955a07
| 75,029
|
def get_pixel(image, i, j):
"""
This function is used to get the pixel from an image.
:param image: Image from which pixel is to be extracted.
:param i: x-coordinate of the pixel in the image.
:param j: y-coordinate of the pixel in the image.
:return: Returns the extracted pixel to the calling function.
"""
width, height = image.size
if i > width or j > height:
return None
pixel = image.getpixel((i, j))
return pixel
|
36fe35a8b3cf46b10d199c26f165885e37f9c0e4
| 75,031
|
from typing import Any
def to_int(element: Any) -> int:
"""Convert given element into `int` data type."""
return int(element)
|
7420d22f5991cbedbcf15467b804bb6769af4f44
| 75,035
|
def CTL_CODE(DeviceType, Function, Method, Access):
"""Calculate a DeviceIoControl code just like in the driver's C code"""
return (((DeviceType) << 16) | ((Access) << 14) | ((Function) << 2) | (Method))
|
e135c96b94ab1d9ed083f19702129855a56f60e0
| 75,036
|
def SegmentContains(main_l, main_r, l, r):
"""Returns true if [l, r) is contained inside [main_l, main_r).
Args:
main_l: int. Left border of the first segment.
main_r: int. Right border (exclusive) of the second segment.
l: int. Left border of the second segment.
r: int. Right border (exclusive) of the second segment.
"""
return main_l <= l and main_r >= r
|
761b064131b7885327e59818f7838573ee292264
| 75,037
|
def transformToRGB(lst):
"""
Change representation from 0-255 to 0-1, for triples value corresponding to RGB.
"""
def normToOne(x): return float(x)/255.
return [(normToOne(x), normToOne(y), normToOne(z)) for x, y, z in lst]
|
ec83893155bfaca7cbbec8281bd84ccb8401a48f
| 75,050
|
import zlib
def crc32(filename, chunk_size=1024):
"""Generates CRC32 of a file and outputs it's hexadecimal value
in a string. Because it does it by chunks, it can read large
files without running out of memory"""
crc = 0
with open(filename, "rb") as f:
while True:
data = f.read(chunk_size)
if not data:
break
crc = zlib.crc32(data, crc) & 0xffffffff
return '{:08X}'.format(crc)
|
239642f03f8315f07dcce6cb95f9410000d51b1a
| 75,052
|
import six
def _bytes(*vals):
"""
This is a private utility function which takes a list of byte values
and creates a binary string appropriate for the current Python version.
It will return a `str` type in Python 2, but a `bytes` value in Python 3.
Values should be in byte range, (0x00-0xff)::
_bytes(1, 2, 3) == six.b('\\x01\\x02\\x03')
:param vals: arguments are a list of unsigned bytes.
:return: a `str` in Python 2, or `bytes` in Python 3
"""
return six.b('').join(six.int2byte(v) for v in vals)
|
49d3648ed733b1d80824d97695b32486d0e6d8b3
| 75,057
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.