content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def knapsack_dp(val, wt, wt_cap):
"""0-1 Knapsack Problem by bottom-up dynamic programming.
Time complexity: O(nC), where
- n is the number of items, and
- C is the weight capacity.
Space complexity: O(nC).
"""
n = len(wt)
# Create tabular T of n x (wt_cap+1).
T = [[None] * (wt_cap + 1) for i in range(n)]
# For empty cap, no value can be added.
for i in range(n):
T[i][0] = 0
# For 1s item only.
for j in range(1, wt_cap + 1):
if wt[0] <= j:
T[0][j] = val[0]
else:
T[0][j] = 0
for i in range(1, n):
for j in range(1, wt_cap + 1):
if wt[i] <= j:
# Can be put: to put or not to put.
T[i][j] = max(val[i] + T[i - 1][j - wt[i]], T[i - 1][j])
else:
# Cannot be put.
T[i][j] = T[i-1][j]
return T | e8e0231dfff6a3f63f2650677cc86aa0193c05a4 | 101,423 |
def split_s3_path(s3_path):
"""Get a bucket name and object path from an 's3 path'.
An S3 path is one in which the object path is appended to the bucket name.
For example: my-music-bucket/path/to/file.mp3
S3 paths may also begin with the character sequence "S3://" to disambiguate
it from a local path in arguments that might accept both.
Returns a tuple containing the bucket name and object path.
"""
# If the s3_path begins with an "S3://" or "s3://", strip it off.
if s3_path.startswith('S3://') or s3_path.startswith('s3://'):
s3_path = s3_path[5:]
# Split the s3 path after the *first* slash character. The bucket name
# can't have a slash in it; anything after the first slash is considered
# the S3 object name.
if '/' in s3_path:
return s3_path.split('/', 1)
else:
return (s3_path, None) | 209d69c66d9d795393007ffc46431874ffc56885 | 101,427 |
def get_files_from_response(response):
"""Returns a list of files from Slack API response
:param response: Slack API JSON response
:return: List of files
"""
return response.get('files', []) | f25bd83f1984f9bdd99ed8fb61dbb5889e0108fc | 101,428 |
import torch
from typing import Optional
def sentence_pattern(sent: torch.Tensor, pad_mask: Optional[torch.Tensor] = None):
"""
If sentence is
[[1, 2, 3, 2, 4, 5, 2],
[1, 2, 3, 4, 5, 0, 0]]
pattern will be
[[[1, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 0, 1],
[0, 0, 1, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 0, 1],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0, 1]],
[[1, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 1, 1]]]
:param sent: (batch, max_len) sentence
:param pad_mask: (batch, max_len) the boolean mask
:return: (batch, max_len, max_len) pattern
"""
max_len = sent.size(1)
pattern = torch.stack([sent] * max_len, dim=1) == sent.view(-1, max_len, 1)
if pad_mask is not None:
pattern = pattern.masked_fill(pad_mask.unsqueeze(1), 0)
return pattern | 2195b9976ace05981a2a36ad0284e2effd063b98 | 101,429 |
def read_file(filename: str):
"""
Read file and return content
:param filename: Filename to read (string)
:returns: file contents (string)
"""
with open(filename, "r") as file_object:
file_contents = file_object.read()
return file_contents | cce0b9f5478edb138239996b9496e04f7040905f | 101,430 |
def mouse_on_button(scaled_mouse, button_pos, button_size):
"""
Indique si le pointeur de la souris est sur le bouton.
Parameters
----------
scaled_mouse : int * int
Position du pointeur de la souris mise à l'échelle
button_pos : int * int
Position du bouton
button_size : int * int
Largeur et hauteur du bouton
Returns
-------
bool
True si le pointeur est sur le bouton
"""
return(button_pos[0] <= scaled_mouse[0]
<= button_pos[0] + button_size[0]
and button_pos[1] <= scaled_mouse[1]
<= button_pos[1] + button_size[1]) | 136a16f4cbd323d51723353e517c4db61c0171a8 | 101,433 |
import pathlib
def get_basename(path: str) -> str:
"""Extract basename from given path ``path``."""
return pathlib.Path(path).name.replace('.py', '').replace('test_', '') | 96428bfa919d1fe21d690e141477773dbd221a76 | 101,436 |
from typing import Tuple
def get_tuples(val01: int, val02: int) -> Tuple[int, int]:
""" Constructs a tuple out of integers. """
return (val01, val02) | d1569786ec9a884baf1c8d8f34b591fe321fa631 | 101,437 |
def getWords(filename, length=None):
"""
Returns all the words of a given length from the given global dictionary file
"""
with open(filename) as f:
l = []
for words in f:
word = words.strip().lower()
if (length is None) or (length is not None and len(word) == length):
l.append(word)
return l | 99092e3281cc8a4ce8588155ad6607288e82f162 | 101,438 |
from typing import List
from typing import Optional
from typing import Dict
def convert_sequences_to_polygons(sequences: List, height: Optional[int] = None, width: Optional[int] = None) -> Dict:
"""
Converts a list of polygons, encoded as a list of dictionaries of into a list of nd.arrays
of coordinates.
Parameters
----------
sequences: list
List of arrays of coordinates in the format [x1, y1, x2, y2, ..., xn, yn] or as a list of them
as [[x1, y1, x2, y2, ..., xn, yn], ..., [x1, y1, x2, y2, ..., xn, yn]]
height: int
Maximum height for a polygon coordinate
width: int
Maximum width for a polygon coordinate
Returns
-------
polygons: list[ndarray[float]]
List of coordinates in the format [[{x: x1, y:y1}, ..., {x: xn, y:yn}], ..., [{x: x1, y:y1}, ..., {x: xn, y:yn}]].
"""
if not sequences:
raise ValueError("No sequences provided")
# If there is a single sequences composing the instance then this is
# transformed to polygons = [[x1, y1, ..., xn, yn]]
if not isinstance(sequences[0], list):
sequences = [sequences]
if not isinstance(sequences[0][0], (int, float)):
raise ValueError("Unknown input format")
def grouped(iterable, n):
return zip(*[iter(iterable)] * n)
polygons = []
for sequence in sequences:
path = []
for x, y in grouped(sequence, 2):
# Clip coordinates to the image size
x = max(min(x, width - 1) if width else x, 0)
y = max(min(y, height - 1) if height else y, 0)
path.append({"x": x, "y": y})
polygons.append(path)
return {"path": polygons} | fb6203ebdee42cb2d59aef0bdda47224031a01a2 | 101,443 |
def broadcast(op, xs, ys):
"""Perform a binary operation `op` on elements of `xs` and `ys`. If `xs` or
`ys` has length 1, then it is repeated sufficiently many times to match the
length of the other.
Args:
op (function): Binary operation.
xs (sequence): First sequence.
ys (sequence): Second sequence.
Returns:
tuple: Result of applying `op` to every element of `zip(xs, ys)` after
broadcasting appropriately.
"""
if len(xs) == 1 and len(ys) > 1:
# Broadcast `xs`.
xs = xs * len(ys)
elif len(ys) == 1 and len(xs) > 1:
# Broadcast `ys.
ys = ys * len(xs)
# Check that `xs` and `ys` are compatible now.
if len(xs) != len(ys):
raise ValueError(f'Inputs "{xs}" and "{ys}" could not be broadcasted.')
# Perform operation.
return tuple(op(x, y) for x, y in zip(xs, ys)) | 59fb072f407b412029d14b553540350b1ac61bf0 | 101,444 |
def bellman_ford(g, src):
"""
Compute shortest-path distances from src to reachable vertices of g.
Graph g can be undirected or directed, but must be weighted (can be
negative) such that e.element() returns a numeric weight for each edge e.
If there is a negative-weight cycle, there is no unique shortest path.
Return 1) dictionary mapping each reachable vertex to its distance from src
2) shortest path tree rooted at vertex src.
Or negative cycle.
"""
d = {}
cloud = {}
for v in g.vertices():
if v is src:
d[v] = 0
else:
d[v] = float("inf")
size = g.vertex_count()
for _ in range(size - 1):
for edge in g.edges():
u, v = edge.endpoints()
w = edge.element()
if d[u] + w < d[v]:
d[v] = d[u] + w
cloud[v] = d[v]
# Check if there is a negative cycle
for edge in g.edges():
u, v = edge.endpoints()
w = edge.element()
if d[u] + w < d[v]:
return "There is a negative-weight cycle"
return cloud | 610a6777b88d322b916eec3fc09ff84e950f955c | 101,448 |
def get_high_score(empty_squares, scores):
"""
Returns high score for get_best_move function
"""
max_score = float("-inf")
for coord in empty_squares:
if scores[coord[0]][coord[1]] >= max_score:
max_score = scores[coord[0]][coord[1]]
return max_score | 53f0f3231b695bd516cfdab054e60984b301a700 | 101,449 |
from datetime import datetime
def get_years(months=0, refer=None):
"""
获取基准时月份增量的年月
:param:
* months: (int) 月份增量,正数为往后年月,整数为往前年月
* refer: (datetime obj) datetime 对象,或者有 month 和 year 属性的实例,默认为当前时间
:return:
* result: (string) 年月字符串
举例如下::
print('--- get_years demo ---')
print(get_years(-5))
print(get_years(7, datetime.now()))
print('---')
执行结果::
--- get_years demo ---
201801
201901
---
"""
if refer is None:
refer = datetime.now()
# 计算当前总月份数
try:
months_count = refer.year * 12 + refer.month
except Exception:
raise TypeError('refer except {}, got an {}'.format(type(datetime.now()), type(refer)))
# 计算结果总月分数
months_count += months
y, m = divmod(months_count, 12)
# 将m的值转换为1-12
if m == 0:
y -= 1
m = 12
return ''.join(['%04d' % y, '%02d' % m]) | 1f9f1f45e124fb253c35843215408dca0b29b916 | 101,452 |
def get_types_distribution(articles):
"""
:param articles: PyMongo collection of articles
:return: Dictionary with article types as keys, and number of articles of
type as value
"""
cursor = articles.aggregate(
[{"$group": {"_id": "$doc_type", "count": {"$sum": 1}}}]
)
result = {}
for rec in cursor:
result[rec["_id"]] = rec["count"]
return result | a2ef3de905637feabdd44133239a4b49f1dbca34 | 101,453 |
from typing import Iterator
import warnings
def _read_next_tag(fp: Iterator[str]) -> bytes:
"""Read the next tag from the tag file.
Attempts to read the next item from the iterator (usually a text file) `fp`
for use as the printable final 18 bytes of a block tag. bootloader_hd
prints the last 18 bytes of all but the final block's tag to the screen,
making these tags useful progress indicators. Tags can be up to eighteen
bytes long and may only contain the characters A..Z, 0..9, and "./-?" (quotes
not included), which are all that's defined in the ROM. Any tag with fewer
than eighteen characters will be padded with spaces.
Args:
fp: iterator to read tags from.
Returns: 18-byte string for use as the printable part of a block tag.
Raises:
IOError: if end of file is encountered.
RuntimeError: if a tag uses characters not found in the Lisa Boot ROM.
"""
# Read "raw" line and clip away CR/CRLF/LF.
try:
tag = next(fp)
except StopIteration:
raise IOError('ran out of tags in the tag file.')
tag = tag.rstrip('\r\n')
# Scan line for chars not in the ROM.
if any(c not in '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ ./-?' for c in tag):
raise RuntimeError('tag {} has chars not found in the ROM'.format(tag))
# Warn if the tag is too long and truncate to 20 bytes.
if len(tag) > 18:
warnings.warn('tag {} will be clipped to 18 bytes'.format(tag), UserWarning)
tag = tag[:18]
# Space-pad, convert to bytes, and return.
return bytes(tag + (' ' * (18 - len(tag))), 'ascii') | c57ebd60de8baa17b0615c9b6c31300433bf5118 | 101,459 |
import math
import random
def get_learning_rate(low, high):
""" Return LogUniform(low, high) learning rate. """
lr = math.exp(random.uniform(math.log(low), math.log(high)))
return lr | 19acb6f69fda438055c74ee6ae38872002833d76 | 101,460 |
import re
def normalize_font_style_declaration(value):
"""Make first part of font style declaration lowercase (case insensitive).
Lowercase first part of declaration. Only the font name is case sensitive.
The font name is at the end of the declaration and can be 'recognized'
by being preceded by a size or line height. There can actually be multiple
names. So the first part is 'calculated' by selecting everything up to and
including the last valid token followed by a size or line height (both
starting with a number). A valid token is either a size/length or an
identifier.
See http://www.w3.org/TR/css-fonts-3/#font-prop
"""
return re.sub(r"""
^(
(\d[^\s,]*|\w[^\s,]*) # <size>, <length> or <identifier>
(\s+|\s*,\s*) # <whitespace> and/or comma
)* # Repeat until last
\d[^\s,]* # <size> or <line-height>
""", lambda match: match.group().lower(), value, 0, re.VERBOSE) | 4ef81e6cf9c5d074ccc6378e2c60b1f3cb47fb80 | 101,465 |
def tomatrix(coeffs):
"""Form the quadratic form matrix (see equations 11 and 12 in paper)
"""
matrix = {}
for a in range(4):
matrix[a] = {}
for b in range(4):
matrix[a][b] = {}
c1 = 0.429043; c2 = 0.511664 ;
c3 = 0.743125 ; c4 = 0.886227 ; c5 = 0.247708 ;
for col in range(3):
# Equation 12 */
matrix[0][0][col] = c1*coeffs[8][col] #; /* c1 L_{22} */
matrix[0][1][col] = c1*coeffs[4][col] #; /* c1 L_{2-2} */
matrix[0][2][col] = c1*coeffs[7][col] #; /* c1 L_{21} */
matrix[0][3][col] = c2*coeffs[3][col] #; /* c2 L_{11} */
matrix[1][0][col] = c1*coeffs[4][col] #; /* c1 L_{2-2} */
matrix[1][1][col] = -c1*coeffs[8][col]#; /*-c1 L_{22} */
matrix[1][2][col] = c1*coeffs[5][col] #; /* c1 L_{2-1} */
matrix[1][3][col] = c2*coeffs[1][col] #; /* c2 L_{1-1} */
matrix[2][0][col] = c1*coeffs[7][col] #; /* c1 L_{21} */
matrix[2][1][col] = c1*coeffs[5][col] #; /* c1 L_{2-1} */
matrix[2][2][col] = c3*coeffs[6][col] #; /* c3 L_{20} */
matrix[2][3][col] = c2*coeffs[2][col] #; /* c2 L_{10} */
matrix[3][0][col] = c2*coeffs[3][col] #; /* c2 L_{11} */
matrix[3][1][col] = c2*coeffs[1][col] #; /* c2 L_{1-1} */
matrix[3][2][col] = c2*coeffs[2][col] #; /* c2 L_{10} */
matrix[3][3][col] = c4*coeffs[0][col] - c5*coeffs[6][col] ;
# /* c4 L_{00} - c5 L_{20} */
return matrix | 007e0e8e2ff10dc4a74c07b09cd48624a2e67f81 | 101,466 |
def submit_gcp_connector_sync_action(api, configuration, api_version, api_exception, gcp_connector_id):
""" Submits a synchronize action of a GCP connector.
:param api The Deep Security API exports.
:param configuration The configuration object to pass to the API client.
:param api_version The API version to use.
:param api_exception The Deep Security API exception module.
:param gcp_connector_id The GCP connector ID of the target GCP connector.
:return The created Action object which contains the ID and status of the action.
"""
# Create the GCPConnectorActionsApi instance and an Action object to synchronize the GCP connector
api_instance = api.GCPConnectorActionsApi(api.ApiClient(configuration))
gcp_connector_action = api.Action()
gcp_connector_action.type = "synchronize"
try:
# Call the create_gcp_connector_action API to create a synchronize action for the target GCP connector
api_response = api_instance.create_gcp_connector_action(gcp_connector_id, gcp_connector_action, api_version)
return api_response
except api_exception as e:
print("An exception occurred when calling GCPConnectorActionsApi.create_google_connector_action: %s\n" % e) | ac69abf5e01c889e1a8e233a5a3be396ce7c8776 | 101,468 |
def GetFullyQualifiedScopePrefix(scope):
"""Gets the fully qualified scope prefix.
Args:
scope: the Definition for the scope from which the type must be accessed.
Returns:
the fully qualified scope prefix string.
"""
scope_stack = scope.GetParentScopeStack() + [scope]
return '.'.join([s.name for s in scope_stack[1:]] + ['']) | 3105dfc562a312296cfcd927a997c195aa580d82 | 101,470 |
def _toVersionTuple(versionString):
"""Split a version string like "10.5.0.2345" into a tuple (10, 5, 0, 2345).
"""
return tuple((int(part) if part.isdigit() else part)
for part in versionString.split(".")) | 6eba8bcca4dfeca386c6e4b65eb15d9fc7f2a054 | 101,473 |
import torch
def stack_as_batch(tensor: torch.Tensor, n_repeats=1, dim=0) -> torch.Tensor:
"""Inserts new dim dimension, and stacks tensor n times along that dimension"""
res = tensor.unsqueeze(dim)
repeats = [1] * res.ndim
repeats[dim] = n_repeats # repeat across target dimension
res = res.repeat(*repeats)
return res | 3d65487dd23f38a74dec5d667dced427c3637bcd | 101,474 |
def split_path(path):
"""
Normalise GCSFS path string into bucket and key.
"""
if path.startswith('gs://'):
path = path[5:]
path = path.rstrip('/').lstrip('/')
if '/' not in path:
return path, ""
else:
return path.split('/', 1) | d4f1881b9d280f9a5bca003e69b017bf2f408e54 | 101,477 |
from datetime import datetime
def day_of_year(year:int,
month:int,
day:int)->int:
"""
Calculate day of year from a date
"""
doy = datetime(year, month, day).timetuple().tm_yday
return doy | adad28c69ee045ea139ab3e66c627aaa7cd5e37b | 101,480 |
import torch
def neigh_diff_standard(bmus: torch.Tensor, positions: torch.Tensor) -> torch.Tensor:
"""
Positional difference function.
Computes index difference between Best Matching Units (`bmus`) and `positions`, where `positions`
are indices of elements inside the SOM grid.
Parameters
----------
bmus : torch.Tensor
The list of Best Matching Units (2D indices)
positions : torch.Tensor
The 2D tensor of grid element indices
"""
return bmus - positions | a32470cb8b7ae439d745334cbea4699abb98377c | 101,488 |
def underscore_filter(s):
"""Change spaces to underscores and make lowercase
"""
return "_".join(s.lower().split(' ')) | 18a08b375991dbf7e25bf345c1d70cef7faeaf5f | 101,495 |
def get_pr_list(script, project_key, repo_slug, state="OPEN", target_branch=None):
"""
Gets the list of PRs
:param script: a TestScript instance
:type script: TestScript
:param project_key: The project key
:type project_key: str
:param repo_slug: The repo slug
:type repo_slug: str
:param state: The state filter ("OPEN", "MERGED", "DECLINED", "ALL")
:type state: str
:param target_branch: The target branch
:type target_branch: str
:return: The page payload
:rtype: str
"""
pr_list_url = "/projects/%s/repos/%s/pull-requests" % (project_key, repo_slug)
query_params = {
"avatarSize": "64",
"order": "newest",
"start": "0",
"state": state
}
if target_branch:
query_params["at"] = target_branch
# "role.1": "AUTHOR/REVIEWER",
# "username.1": "admin",
return script.http("GET", pr_list_url, query_params) | 5932dbc1957384c1739756a9e78c0777084ce930 | 101,497 |
def file_matches(s, l):
"""
:param s: filename
:param l: list of filepaths
:return: list of matching filepaths
"""
return list(filter(lambda x: x.endswith(s), l)) | 105a02001a9cfb3736cd53594e9d776cec4d813e | 101,499 |
from typing import Any
def check_is_editing_something(match: Any) -> bool:
"""Checks if an editing match is actually going to do editing. It is
possible for an edit regex to match without doing any editing because each
editing field is optional. For example, 'edit issue "BOTS-13"' would pass
but wouldn't preform any actions.
Parameters:
- match: The regex match object.
"""
return bool(
match.group("summary")
or match.group("project_key")
or match.group("type_name")
or match.group("description")
or match.group("assignee")
or match.group("priority_name")
or match.group("labels")
or match.group("due_date")
) | 10b009417fb2945a7db5a2756fea8c6472b31f4d | 101,500 |
def calibrate_2band(instr1, instr2, airmass1, airmass2, coeff1, coeff2,
zero_key='zero', color_key='color', extinct_key='extinction'):
"""
This solves the set of equations:
i_0 = i + A_i + C_i(i-z) + k_i X
z_0 = z + A_z + C_z(z-i) + k_z X
where i_0 and z_0 are the instrumental magnitudes, A_i and A_z are the
zero points, C_i and C_z are the color terms, k_i and k_z are the
atmospheric coefficients, and X is the airmass.
The solution is of the form:
(1+C_i)i = b_i + C_i z
(1+C_z)z = b_z + C_z i
where
b_i = i_0 - A_i - k_i X
b_z = z_0 - A_z - k_z X
so that
i = (C_i b_z + C_z b_i + b_i) / d
z = (C_z b_i + C_i b_z + b_z) / d
where
d = (1+C_i+C_z)
Parameters
----------
instr1: array-like
Instrumental magnitudes of filter 1
instr2: array-like
Instrumental magnitudes of filter 2
airmass1: array-like
Airmass for each observation in filter 1
airmass2: array-like
Airmass for each observation in filter 2
coeff1: array-like
List of coeffients for calibrating instrumental magnitudes for
instrument 1.
* coeff1[0]: zeropoint
* coeff1[1]: color coeffcient
* coeff1[2]: extinction coefficient
coeff2: array-like
List of coeffients for calibrating instrumental magnitudes for
instrument 2
returns
-------
mag1: array-like
Calibrated magnitude 1
mag2: array-like
Calibrated magnitude 2
"""
b1 = instr1 - coeff1[zero_key] - coeff1[extinct_key]*airmass1
b2 = instr2 - coeff2[zero_key] - coeff2[extinct_key]*airmass2
d = 1 + coeff1[color_key] + coeff2[color_key]
mag1 = (coeff1[color_key]*b2 + b1*(1+coeff2[color_key])) / d
mag2 = (coeff2[color_key]*b1 + b2*(1+coeff1[color_key])) / d
return (mag1,mag2) | df3d586646098b0c723032406a5ffbea8df11eb9 | 101,504 |
import json
def json_to_jsonlines(json_file):
"""Naive Implentation for json to jsonlines.
Args:
json_file: A string, the file name of the json file.
Returns:
The input json file as new line delimited json.
"""
with open(json_file) as f:
data = json.load(f)
return '\n'.join([json.dumps(d) for d in data]) | f9f664f3539a29e4337d396964057aea032b1b96 | 101,510 |
def feuler(f, t, y, dt):
"""
Forward-Euler simplest possible ODE solver - 1 step.
:param f: function defining ODE dy/dt = f(t,y), two arguments
:param t: time
:param y: solution at t
:param dt: step size
:return: approximation of y at t+dt.
"""
return y + dt * f(t, y) | 6172855218859935a6adf58baa7cb94b4e7cd201 | 101,516 |
def _query_set_limit(query: str, limit: int) -> str:
"""
Add limit to given query. If the query has limit, changes it.
Args:
query: the original query
limit: new limit value, if the value is negative return the original query.
Returns: query with limit parameters
"""
if limit < 0:
return query
# the query has the structure of "section | section | section ..."
query_list = query.split('|')
# split the query to sections and find limit sections
changed = False
for i, section in enumerate(query_list):
section_list = section.split()
# 'take' and 'limit' are synonyms.
if section_list and section_list[0] == 'limit' or section_list[0] == 'take':
query_list[i] = f" limit {limit} "
changed = True
# if the query have not been changed than limit is added to the query
if not changed:
query_list.append(f" limit {limit} ")
fixed_query = '|'.join(query_list)
return fixed_query | e91bf2e69ea849c1da16f69b5afb5db9272e1f77 | 101,529 |
import hashlib
def get_hash_string(enc_model):
"""
Encode model name using md5 hashing.
Parameters
----------
enc_model: str
Returns
-------
str:
return encoded model name
"""
md5str = hashlib.md5(enc_model)
return md5str | 31c794043dfa8ecc4f3ea3f44567249f48179587 | 101,531 |
def unique(dataset, n_threads, return_counts=False):
""" Find unique values in dataset.
Args:
dataset (z5py.Dataset)
n_threads (int): number of threads
return_counts (bool): return counts of unique values (default: False)
"""
dtype = dataset.dtype
if return_counts:
function = eval('z5_impl.unique_with_counts_%s' % dtype)
else:
function = eval('z5_impl.unique_%s' % dtype)
return function(dataset._impl, n_threads) | ef0720c1582f451c71c5f2aaea34671387e99282 | 101,533 |
import random
def rabinMiller(num: int) -> bool:
"""Rabin-Miller primality test
Uses the `Rabin-Miller`_ primality test to check if a given number is prime.
Args:
num: Number to check if prime.
Returns:
True if num is prime, False otherwise.
Note:
* The Rabin-Miller primality test relies on unproven assumptions, therefore it can return false positives when
given a pseudoprime.
.. _Rabin-Miller:
https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test
"""
if num % 2 == 0 or num < 2:
return False # Rabin-Miller doesn't work on even integers.
if num == 3:
return True
s = num - 1
t = 0
while s % 2 == 0:
# Keep halving s until it is odd (and use t
# to count how many times we halve s):
s = s // 2
t += 1
for trials in range(5): # Try to falsify num's primality 5 times.
a = random.randrange(2, num - 1)
v = pow(a, s, num)
if v != 1: # This test does not apply if v is 1.
i = 0
while v != (num - 1):
if i == t - 1:
return False
else:
i = i + 1
v = (v ** 2) % num
return True | 99bd54a171e4cfd5ce1de79c29f3bf9d8e292c9f | 101,534 |
def flatten(xs):
"""
Flatten any recursive list or tuple into a single list.
For instance:
- `flatten(x) => [x]`
- `flatten([x]) => [x]`
- `flatten([x, [y], [[z]]]) => `[x, y, z]`
"""
if isinstance(xs, (list, tuple)):
return [y for ys in [flatten(x) for x in xs] for y in ys]
return [xs] | f405fe2af3dcc970afea1cf3d18e6ec348544d84 | 101,536 |
def getOffsetLength(filename, line_number, line_count):
"""
Calculates the field offset and length based on line number and count.
"""
offset = 0
length = 0
with open(filename, 'r') as f:
for line in f:
if line_number > 1:
offset += len(line)
line_number -= 1
elif line_count > 0:
length += len(line)
line_count -= 1
else:
break
return offset, length | b69a10cd08b6aaf33f6cd2c036bd427dc1b48d7d | 101,541 |
import json
def update_error_message(chart_data_json_str, acq_state, hat_selection,
active_channels):
"""
A callback function to display error messages.
Args:
chart_data_json_str (str): A string representation of a JSON object
containing the current chart data - triggers the callback.
acq_state (str): The application state of "idle", "configured",
"running" or "error" - triggers the callback.
hat_selection (str): A string representation of a JSON object
containing the descriptor for the selected MCC 134 DAQ HAT.
active_channels ([int]): A list of integers corresponding to the user
selected Active channel checkboxes.
Returns:
str: The error message to display.
"""
error_message = ''
if acq_state == 'running':
chart_data = json.loads(chart_data_json_str)
if ('open_tc_error' in chart_data.keys()
and chart_data['open_tc_error']):
error_message += 'Open thermocouple; '
if ('over_range_error' in chart_data.keys()
and chart_data['over_range_error']):
error_message += 'Temp outside valid range; '
if ('common_mode_range_error' in chart_data.keys()
and chart_data['common_mode_range_error']):
error_message += 'Temp outside common-mode range; '
elif acq_state == 'error':
num_active_channels = len(active_channels)
if not hat_selection:
error_message += 'Invalid HAT selection; '
if num_active_channels <= 0:
error_message += 'Invalid channel selection (min 1); '
return error_message | 640c1cf8ecfdb082174b99c4384267df1cc39fda | 101,542 |
def select_points_in_frustum(points_2d, x1, y1, x2, y2):
"""
Select points in a 2D frustum parametrized by x1, y1, x2, y2 in image coordinates
:param points_2d: point cloud projected into 2D
:param points_3d: point cloud
:param x1: left bound
:param y1: upper bound
:param x2: right bound
:param y2: lower bound
:return: points (2D and 3D) that are in the frustum
"""
keep_ind = (points_2d[:, 0] >= x1) * \
(points_2d[:, 1] >= y1) * \
(points_2d[:, 0] < x2) * \
(points_2d[:, 1] < y2)
return keep_ind | 40d8b31d59ae539a64e69c76f1f0ac99fcaf10f5 | 101,545 |
import functools
import traceback
def print_exc(function):
"""
A decorator that wraps the passed in function and prints any exceptions.
"""
@functools.wraps(function)
def wrapper(*args, **kwargs):
try:
return function(*args, **kwargs)
except Exception:
traceback.print_exc()
raise
return wrapper | 51f08aa4e8448686b3f11c8ecb651d4f61b81d1f | 101,546 |
def perfect_square_binary_search(n: int) -> bool:
"""
Check if a number is perfect square using binary search.
Time complexity : O(Log(n))
Space complexity: O(1)
>>> perfect_square_binary_search(9)
True
>>> perfect_square_binary_search(16)
True
>>> perfect_square_binary_search(1)
True
>>> perfect_square_binary_search(0)
True
>>> perfect_square_binary_search(10)
False
>>> perfect_square_binary_search(-1)
False
>>> perfect_square_binary_search(1.1)
False
>>> perfect_square_binary_search("a")
Traceback (most recent call last):
...
TypeError: '<=' not supported between instances of 'int' and 'str'
>>> perfect_square_binary_search(None)
Traceback (most recent call last):
...
TypeError: '<=' not supported between instances of 'int' and 'NoneType'
>>> perfect_square_binary_search([])
Traceback (most recent call last):
...
TypeError: '<=' not supported between instances of 'int' and 'list'
"""
left = 0
right = n
while left <= right:
mid = (left + right) // 2
if mid ** 2 == n:
return True
elif mid ** 2 > n:
right = mid - 1
else:
left = mid + 1
return False | 0bd73c991499be94160a5972c0b5233ae811bc72 | 101,549 |
def cleanup_list(l):
"""Fixes a list so that comma separated items are put as individual items.
So that "--reviewers joe@c,john@c --reviewers joa@c" results in
options.reviewers == sorted(['joe@c', 'john@c', 'joa@c']).
"""
items = sum((i.split(',') for i in l), [])
stripped_items = (i.strip() for i in items)
return sorted(filter(None, stripped_items)) | 299067c1783eee411bd09e8db61e2826ff87415f | 101,550 |
from typing import List
def find_all(substring: str, string: str) -> List[int]:
"""
Find all occurrences of substring in string
"""
tmp = string
offset = 0
occurrences = []
while tmp.find(substring) != -1:
occurrences.append(tmp.find(substring) + offset)
offset += tmp.find(substring) + len(substring)
tmp = tmp[tmp.find(substring) + len(substring):]
return occurrences | df9ce297448df522beed8cf218a1d0f2bb03c4b6 | 101,552 |
def from_maybe(default, maybe_val):
"""Takes a maybe_value and a default,
if the maybe_value is nothing returns the default,
else returns the value."""
try:
return maybe_val.value
except ValueError:
return default | 803671a411b05c79ad5f3e7764ebffdcaeb0511c | 101,555 |
def find_biomass_reaction(
model, biomass_string=["Biomass", "BIOMASS", "biomass"]
):
"""
Identifies the biomass reaction(s) in a metabolic model.
Parameters
----------
model : cobra.Model
Metabolic model.
biomass_string : str or list
String denoting at least a part of the name of the
biomass function of the metabolic model or a list
containing multiple possibilities to be tested.
Preset to `["Biomass", "BIOMASS", "biomass"]`.
Returns
-------
biomass_reaction_ids : list
Reaction(s) containing the input string.
"""
if isinstance(biomass_string, list):
biomass = biomass_string
else:
biomass = list(biomass_string)
biomass_reaction_ids = []
for reaction in model.reactions:
for biomass_spelling in biomass:
if biomass_spelling in reaction.id:
biomass_reaction_ids.append(reaction.id)
return biomass_reaction_ids | bbc9232462fb3850be1208e324ab147aa14be086 | 101,557 |
def find_carriers(rules, inner_color):
"""Return a list of bag colors that contain the bag of the given inner color"""
valid_bags = []
for outer, inners in rules.items():
inners_list = [i[1] for i in inners]
if inner_color in inners_list:
valid_bags.append(outer)
return valid_bags | 3a65444d89e1b9088afb262c9e7fac3d5448714e | 101,558 |
def syntax_error_transformer(lines):
"""Transformer that throws SyntaxError if 'syntaxerror' is in the code."""
for line in lines:
pos = line.find('syntaxerror')
if pos >= 0:
e = SyntaxError('input contains "syntaxerror"')
e.text = line
e.offset = pos + 1
raise e
return lines | ca197906bd9a509627d1b2bb66c7b8aafaa42cb0 | 101,559 |
import hashlib
def hasher(string, size=8):
"""Simple function to generate a SHA1 hash of a string.
Parameters:
- string : string or bytes
The string to be hashed.
- size : int
Size of the output hash string.
Returns:
- h : string
Hash string trunked to size.
"""
string = str(string)
h = hashlib.sha256(string.encode()).hexdigest()
return h[:size] | a591ae9da62acd7b958b4fd407a7f31bcda90db1 | 101,563 |
def residuals(X, y, beta):
"""Calculate the residuals of a linear regression
Arguments
---------
X: np.array
Design matrix of shape (N, 2), as created by ``~ctapipe.fitting.design_matrix``
y: np.array
y values
beta: np.array
Parameter vector of the linear regression
"""
return y - (X[:, 0] * beta[0] + beta[1]) | 8a9c91f23d7e870e41dde314bc9e03ff194e4ed7 | 101,564 |
def searchForm(
buttonText="",
span=2,
inlineHelpText=False,
blockHelpText=False,
focusedInputText=False,
htmlId=False):
"""
*Generate a search-form - TBS style*
**Key Arguments:**
- ``buttonText`` -- the button text
- ``span`` -- column span
- ``inlineHelpText`` -- inline and block level support for help text that appears around form controls
- ``blockHelpText`` -- a longer block of help text that breaks onto a new line and may extend beyond one line
- ``focusedInputText`` -- make the input focused by providing some initial editable input text
- ``htmlId`` -- htmlId
**Return:**
- ``searchForm`` -- the search-form
"""
if span:
span = "span%(span)s" % locals()
else:
span = ""
if not focusedInputText:
focusedInputText = ""
focusId = ""
else:
focusId = "focusedInput"
if inlineHelpText:
inlineHelpText = """<span class="help-inline">%(inlineHelpText)s</span>""" % locals(
)
else:
inlineHelpText = ""
if blockHelpText:
blockHelpText = """<span class="help-block">%(blockHelpText)s</span>""" % locals(
)
else:
blockHelpText = ""
if not htmlId:
htmlId = ""
searchForm = """
<form class="form-search">
<div class="input-append">
<input type="text" class="search-query %(span)s" id="%(htmlId)s" id="%(focusId)s" value="%(focusedInputText)s">
<button type="submit" class="btn">%(buttonText)s</button>
%(inlineHelpText)s%(blockHelpText)s
</div>
</form>""" % locals()
return searchForm | 5a980390e5742bb31fa280750b3d52f485f632b8 | 101,565 |
def exec_script(scriptpath, stdout=None, stderr=None, mock=False):
""" This function must return a command line executable as a string.
Parameters
----------
scriptpath: str
Path to the script to be executed
stdout: str
Path to file to which stdout is to be piped
stderr: str
Path to file to which stderr is to be piped
mock: Bool
When called with mock=True, the command to be executed will be echoed
to the stdout file.
"""
if mock:
return f'''echo "/bin/bash {scriptpath}" '''
else:
return f"/bin/bash {scriptpath}" | 30e9ee1947572102b5afc5481a24a86b7ece2fc3 | 101,571 |
def _2str(s):
"""Convert s to a string. Return '.' if s is None or an empty string."""
return '.' if s is None or str(s) == '' else str(s) | f2a8f016fc6d2528019c0c99c7f22435e1c029f0 | 101,573 |
def read_file_to_string(filename):
"""
Given a filename, opens the file and returns the contents as a string.
"""
f = open(filename, 'r')
file_str = ""
for line in f:
file_str = "{0}{1}".format(file_str, line)
f.close()
return file_str | b49fb0146bc35f880564090bca22820f99162d3c | 101,579 |
from functools import reduce
def count_leaf_nodes(node):
"""Count the number of leaf nodes in a tree of nested dictionaries."""
if not isinstance(node, dict):
return 1
else:
return reduce(lambda x, y: x + y,
[count_leaf_nodes(node) for node in node.values()], 0) | 8716363896cdc54b25a98ce50abdb8f0096a1f94 | 101,583 |
def count_idf(questions):
"""
统计逆文档频率idf
:param questions: list, 输入语料, 字级别的例子:[['我', '爱', '你'], ['爱', '护', '士']]
:return: dict, 返回逆文档频率, 形式:{'我':1, '爱':2}
"""
idf_char = {}
for question in questions:
question_set = set(question) # 在句子中,重复的只计数一次
for char in question_set:
if char.strip(): # ''不统计
if char not in idf_char: # 第一次计数为1
idf_char[char] = 1
else:
idf_char[char] = idf_char[char] + 1
idf_char['[LENS]'] = len(questions) # 保存一个所有的句子长度
return idf_char | bf54ea7437af318645e552524cd8de34a6d2456d | 101,587 |
import torch
def compute_accuracy(output, target):
"""
Calculates the classification accuracy.
:param target: Tensor of correct labels of size [batch_size, numClasses]
:param output: Tensor of model predictions.
It should have the same dimensions as target
:return: prediction accuracy
"""
num_samples = target.size(0)
num_correct = torch.sum(target == torch.argmax(output, dim=1))
accuracy = num_correct.float() / num_samples
return accuracy | 18edcad92bf681e3edd9a625991a1e4bd813a585 | 101,589 |
def words_to_substitute(match):
"""Ask the user for the words to substitute to placeholders"""
words = list()
for _, name in match:
article = 'an' if name[0].lower() in 'aeiou' else 'a'
words.append(input(f'Give me {article} {name}: '))
return words | 5fba5a4243666f9cb6ce07a2e23cc11d946e165a | 101,590 |
def _join_package_name(ns, name):
"""
Returns a app-name in the 'namespace/name' format.
"""
return "%s/%s" % (ns, name) | d48cdeba7906ecc4829a1b6fb2986ddea47a7bd6 | 101,592 |
def get_first_major_location(data):
"""Returns the best location in the article to start at when an article is entered"""
length_sum = 0
for segment in data['text_segments']:
length_sum+=len(segment[2])
if(length_sum>1000):
return segment[1]
return 1000 | c8c4d1cad579b4bf8d6df7dec7a3fc35558be04f | 101,593 |
def check_if_variable_used(methodNode, name):
"""Check if a variable with the given name is assigned in the given method"""
for assignment in methodNode.find_all('assignment'):
# if assignment has name node on the left side, compare the name of the variable with the given name
if assignment.target.type == 'name' and assignment.target.value == name:
return True
# if assignment has tuple node on the left, check each entry
if assignment.target.type == 'tuple' and assignment.target.find('name', value=name):
return True
return False | a56869d678af428fb4b216441f384144c78431c6 | 101,598 |
def diff_set(before, after, create_replaced=True):
"""Compare sets to get additions, removals and replacements
Return 3 sets:
added -- objects present in second set, but not present in first set
removed -- objects present in first set, but not present in second set
replaced -- objects that have the same name in both sets, but are different
"""
removed = before - after
added = after - before
replaced = set()
removed_by_name = {}
for element_removed in removed:
removed_by_name[element_removed.name] = element_removed
for element_added in added:
element_removed = removed_by_name.get(element_added.name)
if element_removed and create_replaced:
replaced.add((element_removed, element_added))
if create_replaced:
for (element_removed, element_added) in replaced:
removed.discard(element_removed)
added.discard(element_added)
return (added, removed, replaced) | f61fab8c964c768c57f574061c5e0226de9f2771 | 101,600 |
def file_name(n: int, koncnica: str) -> str:
"""Vrne niz z imenom datoteke za n-ti EGMO s podano končnico."""
return f"egmo{n}.{koncnica}" | 1caf942c40c6a26feabb0050f16b08ddd121216e | 101,601 |
import math
def error_probability_to_qv(error_probability, cap=93):
"""
Convert an error probability to a phred-scaled QV.
"""
if error_probability==0:
return cap
else:
return min(cap, int(round(-10*math.log10(error_probability)))) | 71f0b178f5348f4757e562b1c9ba94eafd179999 | 101,603 |
def make_fuel_mat_str(values):
"""Make a string for fuel material cards in MCNP format.
This function takes burnup material inventory data from the main 'values'
dictionary and makes fuel material cards as a string in MCNP format.
Arguments:
values (dict)[-]: Main dictionary storing processed values.
Returns:
mat_card (str)[-]: A string of MCNP material cards.
"""
mat_card = ''
# Write new material cards with inventories of the last burnup step.
final_step = 'step_{0}'.format(values['burnup_summary']['step'][-1])
sorted_keys = sorted(list(values['inventory']),
key=lambda x: (str(x[0]), x[1]))
for (mat, step) in sorted_keys:
if mat != 'sum' and step == final_step:
mat_obj = values['inventory'][(mat, step)]
mat_obj.metadata['mat_number'] = mat
mat_card += mat_obj.mcnp()
return mat_card | ec6d71525ca1127ddd72e9714f3a7f01c343dd92 | 101,605 |
def multiple_help_text(item):
"""Return help text for option with multiple=True."""
return f" Use the option multiple times to specify more than one {item} [multiple]" | 8d0591451231af16892cf54cf5863958abb16767 | 101,607 |
def get_categorized_testlist(alltests, ucat):
""" Sort the master test list into categories. """
testcases = dict()
for category in ucat:
testcases[category] = list(filter(lambda x: category in x['category'], alltests))
return(testcases) | d94a0a65e36fdf52f24873e6b44c0f9ab93b4838 | 101,609 |
import pickle
def unpickle(file):
"""
Unpickles datafile
"""
with open(file, 'rb') as f:
data_dict= pickle.load(f, encoding="bytes")
return data_dict | 7bdf27d9d423d0dde07cbf493b39e008053b851f | 101,612 |
def _safe_recv(sock, count):
"""
Wraps sock.recv() handling short reads.
:param sock: a socket to recv() from
:param count: the number of bytes to ask for
:return: a byte string with those bytes
"""
result = sock.recv(count)
bytes_read = len(result)
if 0 < bytes_read < count:
return result + _safe_recv(sock, count - bytes_read)
return result | 890c13bc27e7b0b14c51fd40dd253e51809b6de3 | 101,613 |
def remove_typedefs(signature: str) -> str:
"""
Strips typedef info from a function signature
:param signature:
function signature
:return:
string that can be used to construct function calls with the same
variable names and ordering as in the function signature
"""
# remove * pefix for pointers (pointer must always be removed before
# values otherwise we will inadvertently dereference values,
# same applies for const specifications)
#
# always add whitespace after type definition for cosmetic reasons
typedefs = [
'const realtype *',
'const double *',
'const realtype ',
'double *',
'realtype *',
'const int ',
'int ',
'SUNMatrixContent_Sparse ',
'gsl::span<const int>'
]
for typedef in typedefs:
signature = signature.replace(typedef, '')
return signature | 7aaf66f6a5e5e8047b61a61b0e6e7f195600695c | 101,616 |
def interpolateRecallPoints(recallPoints):
"""
Get a list of interpolated precision and recall. I.e., transforms the list
of recall points in a list of recall points with 11 points.
Does't change the recallPoints list.
param recallPoints: a list of recall points in pairs (precision, recall),
the values should belong in the interval [0, 1].
return: a list with 11 pairs (precision, recall) of interpolated recall
points, in the interval [0, 1].
"""
nRecallPoints = []
if recallPoints:
for rPoint in range(0, 101, 10):
nRecall = rPoint / 100
try:
nPrecision = max([p for p, r in recallPoints if nRecall <= r])
except ValueError:
for rPoint in range(rPoint, 101,10):
nRecall = rPoint / 100
nRecallPoints.append((0.0, nRecall))
break
nRecallPoints.append((nPrecision, nRecall))
return nRecallPoints | 4fa212d5ecadf0d1e83fd6cbd905c9aaf3cdd77b | 101,618 |
def to_user_facing_code(code):
"""Returns a user-facing code given a raw code (e.g., abcdefghij)."""
return '{}-{}-{}'.format(code[:3], code[3:6], code[6:]) | efa31e4c991a493cfe1c2f470e33262cd3bb5b51 | 101,621 |
def digit_sum(num: int) -> int:
"""Calculate the sum of a number's digits."""
total = 0
while num:
total, num = total + num % 10, num // 10
return total | bb4658e0406b3324fedafb31ff44c557e05fcbb0 | 101,624 |
import hashlib
def valid_proof(last_proof, proof, last_hash, difficulty):
"""
Validates the Proof
:param last_proof: <int> Previous Proof
:param proof: <int> Current Proof
:param last_hash: <str> The hash of the Previous Block
:return: <bool> True if correct, False if not.
"""
guess = f'{last_proof}{proof}{last_hash}'.encode()
guess_hash = hashlib.sha256(guess)
binary_hash = ''.join(format(n, '08b') for n in guess_hash.digest())
return binary_hash[:difficulty] == '0' * difficulty | 3667e7171a65f088974f954de37c0b6e11048412 | 101,627 |
def convert_bc_stage_text(bc_stage: str) -> str:
"""
function that converts bc stage.
:param bc_stage: the string name for business cases that it kept in the master
:return: standard/shorter string name
"""
if bc_stage == "Strategic Outline Case":
return "SOBC"
elif bc_stage == "Outline Business Case":
return "OBC"
elif bc_stage == "Full Business Case":
return "FBC"
elif bc_stage == "pre-Strategic Outline Case":
return "pre-SOBC"
else:
return bc_stage | fab9950d9c7df01f9731aecf6cae7ca4621c35a7 | 101,630 |
def ref_tuple_to_str(key, val):
"""Tuple like ('a', 'b') to string like 'a:b'."""
return '%s:%s' % (key, val) | cdb8d713583e4c467f5bf2df5b2568a7f079901a | 101,632 |
def has_token_in_position(node, token, pos):
"""Has the node the specified token in specified position
(indexed from 0)?
"""
tokens = list(node.get_tokens())
return tokens[pos].spelling == token | 63b0eb049cdd9f1881b40fc7acd9991910aa0381 | 101,637 |
import random
def sample_gamma(alpha, beta):
"""
Sample gamma variate for small alpha, using
transformation from Marsaglia&Tsang method
http://www.hongliangjie.com/2012/12/19/how-to-generate-gamma-random-variables/
"""
x = random.gammavariate(alpha + 1.0, beta)
return x * pow( random.random(), 1.0/alpha ) | 59012b46308634483f1034e7ba51a06f8a9e2cf0 | 101,644 |
def update_path(dct, path, update):
"""
Update a dict structure (e.g. JSON), using a path to specify what value to update.
The `path` specifies a hierarchy of (string) dictionary keys to traverse, separated by periods.
Suffixing a key with `[]` indicates the value of that key is an list, and should be iterated.
e.g. `foo.bar[].baz`
The `update` can be simply the new value, or a function that takes the old value and returns the
new value.
Dicts and lists along the path are copied before being updated, and the updated dict structure
is returned.
"""
if not isinstance(dct, dict):
return dct
if callable(update):
update_fn = update
else:
def update_fn(value):
return update
field, sub_path = path.split('.', 1) if '.' in path else (path, None)
array_field = False
if field.endswith('[]'):
field = field[:-2]
array_field = True
if field not in dct:
return dct
value = dct[field]
if array_field and not isinstance(value, list):
return dct
if sub_path:
if array_field:
updated_value = [update_path(v, sub_path, update_fn) for v in value]
else:
updated_value = update_path(value, sub_path, update_fn)
else:
if array_field:
updated_value = [update_fn(v) for v in value]
else:
updated_value = update_fn(value)
updated_dct = dct.copy()
updated_dct[field] = updated_value
return updated_dct | 5eab915b4fdc5b316b35d427aa999c3d763de116 | 101,646 |
def format_path_nodes(urls):
"""
Takes the content response from a neo4j REST API paths call (URLs to paths)
and returns a list of just the node ID's
"""
nodeIds = []
for url in urls:
nodeIds.append(url.split("/")[-1])
return nodeIds | cd00cd92b5b5c9e01c62fd6dac242f65924204da | 101,649 |
import torch
def load_to_cpu(filename):
"""Load torch object specifically to CPU"""
return torch.load(filename, map_location=lambda storage, loc: storage) | 9fcd1634cc2e838a33f4d6366f5220b6670853ed | 101,653 |
from typing import List
def get_object_structure(obj: dict) -> List[str]:
"""
Traverse through object structure and compose a list of property keys including nested one.
This list reflects object's structure with list of all obj property key
paths. In case if object is nested inside array we assume that it has same
structure as first element.
:param obj: data object to get its structure
:returns list of object property keys paths
"""
paths = []
def _traverse_obj_and_get_path(obj, path=""):
if path:
paths.append(path)
if isinstance(obj, dict):
return {k: _traverse_obj_and_get_path(v, path + "/" + k) for k, v in obj.items()}
elif isinstance(obj, list) and len(obj) > 0:
return [_traverse_obj_and_get_path(obj[0], path + "/[]")]
_traverse_obj_and_get_path(obj)
return paths | 2be1faff3901a217e0f74f3fc9a08244b6ad68c1 | 101,656 |
import collections
def calculate_hints(guess: str, secret: str) -> tuple[int, ...]:
"""Compare a guess against a secret word and calculate a hint on how close the
guess word is to the secret word.
The hint will be comprised of the following:
- 2 - the guess <char> is in the secret word and in the correct spot.
- 1 - the guess <char> is in the secret word but in the wrong spot.
- 0 - the guess <char> is not in the secret word at all.
Args:
guess (str): The guess word
secret (str): The secret word
Returns:
tuple[int, ...]: The hint on how close the guess word is to the secret word.
Examples:
>>> calculate_hints("chess", "swiss")
(0, 0, 0, 2, 2)
>>> calculate_hints("orate", "oater")
(2, 1, 1, 1, 1)
"""
count_of_not_exact_matches = collections.Counter(
secret for secret, guess in zip(secret, guess) if secret != guess
)
hint_pattern: list[int] = []
for secret_char, guess_char in zip(secret, guess):
if secret_char == guess_char:
hint_pattern.append(2) # green
elif guess_char in secret and count_of_not_exact_matches[guess_char] > 0:
hint_pattern.append(1) # yellow
count_of_not_exact_matches[guess_char] -= 1
else:
hint_pattern.append(0) # grey
return tuple(hint_pattern) | 3187c98618ab7cbc94843e52c02f742ef995907f | 101,660 |
def has_data(group, dataset_name):
"""Check whether HDF5 group contains a non-empty dataset with given name.
Parameters
----------
group : :class:`h5py.Group` object
HDF5 group to query
dataset_name : string
Name of HDF5 dataset to query
Returns
-------
has_data : {True, False}
True if dataset exists in group and is non-empty
"""
return dataset_name in group and group[dataset_name].shape != () | 705f3ca52a3591368292065009b112a7661f8975 | 101,666 |
from typing import Tuple
def _convert_twos_complement(obj: int) -> Tuple[int, str]:
""" Converts a negative integer to a twos complement binary string to the nearest octet
Args:
obj: Negative integer
Returns:
The required length in octets of the binary two's complement representation and
a string of octal length of the two's complement representation of the integer
"""
if obj < -32768:
raise OverflowError('[PyASN1PEREncoder][ERROR]: Value too small to encode, hard limit '
'set in per_encoder.py')
x = 0
for x in range(7, 32, 8):
if -1*obj < pow(2, x):
break
total_remainder = pow(2, x) + obj
bin_str = ''
for y in range(x, -1, -1):
if pow(2, y) <= total_remainder:
bin_str += '1'
total_remainder -= pow(2, y)
else:
bin_str += '0'
bin_str = '1' + bin_str[1:]
return int((x + 1)/8), bin_str | d88326976b78689004955daa6bcb98cbc0588934 | 101,667 |
def removeGame(gtitle: str) -> str:
"""Return a query to remove a given game from the database."""
return (f"DELETE FROM game "
f"WHERE title='{gtitle}';"
) | 589096681b70232cbd199f7b2f5bd4685418888d | 101,671 |
import torch
import math
def geometric_transform(pose_tensor,
similarity=False,
nonlinear=True,
as_matrix=False):
"""Converts pose tensor into an affine or similarity transform.
Args:
pose_tensor: [..., 6] tensor.
similarity (bool):
nonlinear (bool): Applies non-linearities to pose params if True.
as_matrix (bool): Converts the transform to a matrix if True.
Returns:
[..., 3, 3] tensor if `as_matrix` else [..., 6] tensor.
"""
scale_x, scale_y, theta, shear, trans_x, trans_y = torch.split(pose_tensor,
1,
dim=-1)
if nonlinear:
scale_x, scale_y = (torch.sigmoid(t) + 1e-2 for t in (scale_x, scale_y))
trans_x, trans_y, shear = (torch.tanh(t * 5.)
for t in (trans_x, trans_y, shear))
theta *= 2. * math.pi
else:
scale_x, scale_y = (abs(t) + 1e-2 for t in (scale_x, scale_y))
c, s = torch.cos(theta), torch.sin(theta)
if similarity:
scale = scale_x
pose = [scale * c, -scale * s, trans_x,
scale * s, scale * c, trans_y]
else:
pose = [
scale_x * c + shear * scale_y * s,
-scale_x * s + shear * scale_y * c,
trans_x,
scale_y * s,
scale_y * c,
trans_y
]
pose = torch.cat(pose, -1)
# convert to a matrix
if as_matrix:
shape = list(pose.shape[:-1])
shape += [2, 3]
pose = pose.view(*shape)
zeros = torch.zeros_like(pose[..., :1, 0])
last = torch.stack([zeros, zeros, zeros + 1], -1)
pose = torch.cat([pose, last], -2)
return pose | f31187a518129316450e10a760fdf18faac0b47b | 101,672 |
def noop(value, param=None):
"""A noop filter that always return its first argument and does nothing with
its second (optional) one.
Useful for testing out whitespace in filter arguments (see #19882)."""
return value | babd4941efdff8fa67df07a5dd3d86da210b91c2 | 101,677 |
def _theta(a, b):
"""Returns 1 if a >= b, 0 otherwise."""
return int(a >= b) | 2dd02cb2e7a316c6a3d851a5e476fe7bb89657d9 | 101,680 |
import math
def saturation_vapour_pressure(t):
"""
:param t: temperature [C]
:return: saturation vapour pressure
"""
return 0.6108 * math.exp((17.27 * t)/(t + 237.3)) | b628294d634a2b5741b1d0ac24afbd6ef7980fb4 | 101,681 |
def calcOutputLen(outputFormat, article_len, wrd):
"""calc length of the summary. wrd is the user-specified output length or ratio"""
if outputFormat == "word_count":
return int(wrd)
else:
return article_len * float(wrd) | 28b53afe4541169df5d72e5d1be57412454232b2 | 101,683 |
def convertToDateAndTime(seconds):
"""
Returns a string with hours, minutes and seconds
"""
hours=seconds//3600
minutes=(seconds%3600)//60
seconds=(seconds%60)
return str(hours)+" hrs, "+str(minutes)+" min, "+str(seconds)+" seconds" | 2ac418ec61fee7081f62a02ab9984121f43d7912 | 101,694 |
def GetForwardingArgs(local_port, remote_port, host_ip, port_forward):
"""Prepare the forwarding arguments to execute for devices that connect with
the host via ssh.
Args:
local_port: Port on the host.
remote_port: Port on the remote device.
host_ip: ip of the host.
port_forward: Direction of the connection. True if forwarding from the host.
Returns:
List of strings, the command arguments for handling port forwarding.
"""
if port_forward:
arg_format = '-R{remote_port}:{host_ip}:{local_port}'
else:
arg_format = '-L{local_port}:{host_ip}:{remote_port}'
return [
arg_format.format(host_ip=host_ip,
local_port=local_port,
remote_port=remote_port or 0)
] | dd82440b001c78624439b1d9cefc5c6c918aaf87 | 101,701 |
def constant_features(X, frac_constant_values = 0.90):
"""
Identifies features that have a large fraction of constant values.
Parameters
----------
X : pandas dataframe
A data set where each row is an observation and each column a feature.
frac_constant_values: float, optional (default = 0.90)
The threshold used to identify features with a large fraction of
constant values.
Returns
-------
labels: list
A list with the labels identifying the features that contain a
large fraction of constant values.
"""
# Get number of rows in X
num_rows = X.shape[0]
# Get column labels
allLabels = X.columns.tolist()
# Make a dict to store the fraction describing the value that occurs the most
constant_per_feature = {label: X[label].value_counts().iloc[0]/num_rows for label in allLabels}
# Determine the features that contain a fraction of missing values greater than
# the specified threshold
labels = [label for label in allLabels if constant_per_feature [label] > frac_constant_values]
return labels | d27b4c5eabc2fe676c76217c2faba13765b6b742 | 101,702 |
def check_nans(df, verbose=True):
""" checks for NANs in a dataframe """
nans = df[df.isnull().any(axis=1)]
num = nans.shape[0]
print('{} nan rows'.format(num))
if verbose:
print('nan values are:')
print(nans.head())
return nans | 6159650235a9e4f578d22b38ec11066c3f5d7275 | 101,704 |
from typing import Any
def create_mapping(key: str, value: Any):
"""
Creates a mapping of a given key and value.
Parameters
----------
key : str
The key for the mapping.
value : Any
The value for the mapping.
Returns
-------
dict
A dictionary containing the mapping created.
Examples
--------
>>> import asyncio
>>> from dffml import *
>>>
>>> dataflow = DataFlow.auto(create_mapping, GetSingle)
>>> dataflow.seed.append(
... Input(
... value=[create_mapping.op.outputs["mapping"].name],
... definition=GetSingle.op.inputs["spec"],
... )
... )
>>> inputs = [
... Input(
... value="key1", definition=create_mapping.op.inputs["key"],
... ),
... Input(
... value=42, definition=create_mapping.op.inputs["value"],
... ),
... ]
>>>
>>> async def main():
... async for ctx, result in MemoryOrchestrator.run(dataflow, inputs):
... print(result)
>>>
>>> asyncio.run(main())
{'mapping': {'key1': 42}}
"""
return {"mapping": {key: value}} | c3b923ef2261a35d3e97db0ed25ae43961c6e54e | 101,705 |
from typing import List
from typing import Dict
def _gen_confirm(msg: str) -> List[Dict]:
""" For generating a confirmation prompt. """
return [
{
'type': 'confirm',
'message': msg,
'name': 'confirmation'
}
] | 400379ade3f3385407ba499f97dbe5b2172c42aa | 101,708 |
def polynomial(x, coef, b=0):
"""polynomial = aX² + bX + c + b"""
return coef[0] * x ** 2 + coef[1] * x + coef[2] + b | 21788e85831fcf1449cd8a31e43d2f92a160dcd8 | 101,715 |
def getEndJoints(jnt):
"""
Recurse through children and return all joints that have no joint children.
Args:
jnt (PyNode): A joint node
"""
result = []
children = jnt.listRelatives(children=True, typ='joint')
if not children:
result.append(jnt)
else:
for child in children:
result.extend(getEndJoints(child))
return result | 7d24cdef7b9d8b4e31eda2a83ec52c022a91774e | 101,717 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.