content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def plugins_by_name(pm):
"""
Organize plugins by plugin name.
Returns a dict where the key is the plugin name and the value is a list
of all plugins that have that name.
"""
plugins = {}
for plugin in pm.plugins:
if plugin.name not in plugins:
plugins[plugin.name] = []
plugins[plugin.name].append(plugin)
return plugins | f274356e99a10054814a2762d807bf25d54e1cce | 42,599 |
def rivers_with_station(stations):
"""input of list of monitoring station type. Output a set of rivers which have stations."""
riverstation = set() #initialise set
for i in stations:
riverstation.add(i.river) #adds river name to set for every station
return riverstation | 2a9cd5d42f9b278dba0bca7384b7743aad8cd47c | 42,600 |
import random
def random_start_goal(width=10, start_bounds=None, goal_bounds=None):
"""Return a random distinct start position in start_bounds and a random goal position in goal_bounds
:param width: width of the grid
:param start_bounds: a tuple of tuples ((x0, x1), (y0, y1))
:param goal_bounds: a tuple of tuples ((x0, x1), (y0, y1))
:return: random start and goal coordinates"""
if start_bounds is None:
(start_x_bounds, start_y_bounds) = (0, width), (0, width)
else:
(start_x_bounds, start_y_bounds) = start_bounds
if goal_bounds is None:
(goal_x_bounds, goal_y_bounds) = (0, width), (0, width)
else:
(goal_x_bounds, goal_y_bounds) = goal_bounds
start = random.randrange(*start_x_bounds), random.randrange(*start_y_bounds)
goal = random.randrange(*goal_x_bounds), random.randrange(*goal_y_bounds)
while goal == start:
goal = random.randrange(*goal_x_bounds), random.randrange(*goal_y_bounds)
return start, goal | 1e983e5d2356aa7fd02ccca0240c960ac154e751 | 42,601 |
def round_(i: int, size: int) -> int:
"""
Round `i` to the nearest greater-or-equal-to multiple of `size`.
"""
if i % size == 0:
return i
return i + (size - (i % size)) | 4a7b95d27b86f2021b98d202a5c209738e84e81d | 42,602 |
def find_from_iterable(it) -> set:
"""
Find all unique conditions in given iterable object.
:param it: Iterable object to traverse.
:return Set of all unique available conditions.
"""
cond = set()
for i in it:
if i not in cond:
cond.add(i)
return cond | aaa8edb2e8fb6268a35ca8d7fc5b406b9e598722 | 42,603 |
from typing import Callable
def map_to_user_params(*args_to_params: str) -> Callable[[dict], dict]:
"""Make a function that reads plugin arguments from user_params.
The returned function can usually be directly assigned to the args_from_user_params attribute
of plugin classes.
:param args_to_params: each item can be <arg_name> or <arg_name>:<param_name>
if <arg_name>, the plugin argument will be arg_name=user_params[arg_name]
if <arg_name>:<param_name>, the argument will be arg_name=user_params[param_name]
:return: function that takes a user_params dict and returns a dict of keyword arguments
"""
def get_args(user_params: dict) -> dict:
args = {}
for atp in args_to_params:
arg_name, _, param_name = atp.partition(":")
value = user_params.get(param_name or arg_name)
if value is not None:
args[arg_name] = value
return args
return get_args | 0609542e5ba0ce2c61b9734b50aee915155c6764 | 42,604 |
import math
def categorize(distance: float) -> int:
"""Distance binning method to be referenced across data analysis files and classifiers.
Args:
distance (float): The premeasured distance, in meters.
Returns:
The floor of the given distance amount.
"""
return math.floor(distance) | 0014f2096131f03e6c0619e7d8a3ad9526d44fd1 | 42,606 |
def _sort_key(name):
"""Sorting helper for members of a directory."""
return name.lower().lstrip("_") | ecd81276f49f6a10ea7de2920ff45ddd2df469c3 | 42,607 |
import os
def pages(request):
"""Return the url of a page from the test files"""
def pages_(*path):
path = os.path.abspath(
os.path.join(os.path.dirname(__file__), "pages", *path)
)
return "file://" + path
return pages_ | 47e55726e8cdaabab47007e87aba9f4c62722af5 | 42,608 |
def sum_of_diffs(*args):
"""Compute sum of diffs"""
total = 0
for v1, v2 in zip(args[0], args[1]):
total += v2 - v1
return total | c50cb9f90a75dc845fbb80d850ff82c4a477d4e0 | 42,609 |
import torch
def concat_feat_var(src, tgt):
"""Concatate feature variable."""
if src is None:
out = tgt
else:
out = torch.cat([src, tgt])
return out | 144093368371ec70ceefa989b4780fdc5de2efad | 42,610 |
def isOnlySpecialCharacters(word):
"""
Checks if the string passed is comprised entirely of special characters typically allowed in passwords
"""
for i in range(len(word)):
if word[i].isalpha() or word[i].isdigit():
return False
return True | 833814554cb87ce8a98310efb8a27b4930758e80 | 42,611 |
def evaluate_predictions(test_y, predictions):
""" Evaluate on the test set. """
assert len(test_y) == len(predictions)
right_cycle = 0
right_menstr = 0
for idx, y in enumerate(test_y):
if y[0] == predictions[idx][0]:
right_cycle += 1
if y[1] == predictions[idx][1]:
right_menstr += 1
return right_cycle / len(test_y), right_menstr / len(test_y) | 1d3f86ea8dd121aef21f02335740abec17fc20ea | 42,612 |
def view_to_world(pos, renderer):
"""
:param pos: list
:param renderer: vtk.vtkRenderer
:return: list
"""
renderer.SetViewPoint(pos)
renderer.ViewToWorld()
return renderer.GetWorldPoint() | a90dffe2620b8a0b35a8607ee40d380597dd3301 | 42,614 |
def PGetTable (inUV, access, tabType, tabVer, err, \
numOrb=0, numPCal=3, numIF=1, numPol=1, \
numTerm=0, numChan=1, numTones=1, numBand=1, \
numTabs=1, npoly=1, numCoef=5,
maxis1=2, maxis2=1, maxis3=1, maxis4=1, maxis5=1):
""" Return (create)the specified associated table
Specific table types are recognized and the appropriate constructor
called, these may have additional parameters. This allows creating
new tables of the appropriate type.
returns Python Obit Table
inUV = Python UV object
access = access code 1=READONLY, 2=WRITEONLY, 3=READWRITE
tabType = Table type, e.g. "AIPS AN"
tabVer = table version, if > 0 on input that table returned,
if 0 on input, the highest version is used.
err = Python Obit Error/message stack
Optional parameters, values only used if table created
numOrb = Number of orbital parameters (AN)
numPCal = Number of polarization parameters (AN)
numIF = Number of IFs (FQ, SN, CL, BP, BL, TY, CQ)
numPol = Number of Stokes' (SN, CL, BP, BL, PC, TY, GC, MC, IM)
numTerm = Number of terms in model polynomial (CL)
numChan = Number of spectral channels (BP)
numTomes = Number of Phase cal tones (PC)
numTabs = Number of ??? (GC)
numCoef = Number of polynomial coefficents (NI)
numBand = Number Bands(?) (IM, GC)
npoly = number of polynomial terms (IM)
maxis1-5 = Dimension of axes of IDI data matrix
"""
################################################################
if inUV.myClass=='AIPSUVData':
return inUV.table(tabType,tabVer)
return inUV.NewTable(access, tabType, tabVer, err,\
numOrb=numOrb, numPCal=numPCal, numIF=numIF, \
numPol=numPol, numTerm=numTerm, numChan=numChan,\
numTones=numTones, numBand=numBand, \
numTabs=numTabs, npoly=npoly, numCoef=numCoef)
# end PGetTable | 767bd73ee8bae15a7bf04f1c3a84d32193ff0f4f | 42,620 |
import numpy
def compute_iss_diff(e, x, incentive):
"""Computes the difference of the LHS and RHS of the ISS condition."""
i = incentive(x)
s = numpy.sum(incentive(x))
lhs = sum(e[j] / x[j] * i[j] for j in range(x.size))
return lhs - s | 3d6b329d193f49bc28f66086cf4e1d99c7c86d32 | 42,621 |
import time
def get_elapsed_time_ms(start_time_in_seconds):
"""
Returns the elapsed time in millis from the given start time.
"""
end_time = time.time()
return int((end_time - start_time_in_seconds) * 1000) | bc71a20fddf62a1cfa4ab51724b826946d171b66 | 42,622 |
import os
def join_basedir(basedir):
"""
Return a shortcut function to join basedir to given path
"""
def proceed_joining(path):
return os.path.join(basedir, path)
return proceed_joining | c5088a47d1db6d9d8a438ecc7e8acac19ad11304 | 42,623 |
def EPSILON_0():
"""
The `EPSILON_0` function returns the Obliquity of the ecliptic at J2000.0
according to the IERS numerical standards (2010).
"""
return 84381.406 | 8b15c742c1ab85d047f0fa1a19b3b701f6814b4c | 42,624 |
def get_urls(link, nb):
"""
Generate a list containing all URLs
Args:
link [str]: Base HTML link
nb [int]: Number of pages usingHTML link
Returns:
url [str]: [List containing all URLs]
"""
def gen_season_index():
for i in range(2000, 2020):
yield i
url = []
gen = gen_season_index()
for gsi in gen:
for i in range(1, nb+1):
j = link + str(gsi) + "-" + str(gsi+1) + "&teamId=" + str(i)
url.append(j)
return url | 743e27067a76c8e2fc4a354740d39ff68e264504 | 42,625 |
from typing import Optional
import re
def email_is_valid(email: Optional[str]) -> bool:
"""
Validate that a valid email was provided.
None is a valid option.
Parameters
----------
email: Optional[str]
The email to validate.
Returns
-------
status: bool
The validation status.
"""
if email is None:
return True
if re.match(r"^[a-zA-Z0-9]+[\.]?[a-zA-Z0-9]+[@]\w+[.]\w{2,3}$", email):
return True
return False | 58efc79668c4856fc74b7fc470405c5e27256c60 | 42,626 |
def sanitize_for_shell(string):
"""
Return `string` with double quotes escaped for use in a Windows shell command.
"""
return string.replace('"', r'\"') | 67135287fc8b1930d43ebd15ef8b113de342fe51 | 42,628 |
import sys
import unicodedata
def remove_punctuation_dict() -> dict:
"""Provide a dictionary for removing punctuation, swallowing spaces.
:return dict with punctuation from the unicode table
>>> print("I'm ok! Oh #%&*()[]{}!? Fine!".translate(
... remove_punctuation_dict()).lstrip())
Im ok Oh Fine
"""
tmp = dict((i, None) for i in range(sys.maxunicode)
if unicodedata.category(chr(i)).startswith('P'))
return tmp | 22f9b9c53b5e7a2c441be3d70548acfee42a8406 | 42,629 |
def tanimoto(list1, list2):
"""tanimoto coefficient
In [2]: list2=['39229', '31995', '32015']
In [3]: list1=['31936', '35989', '27489', '39229', '15468', '31993', '26478']
In [4]: tanimoto(list1,list2)
Out[4]: 0.1111111111111111
Uses intersection of two sets to determine numerical score
"""
intersection = set(list1).intersection(set(list2))
return float(len(intersection)) / (len(list1) + len(list2) - len(intersection)) | b48c2512e60d008e40d82c98b6b15bb4a8f617d4 | 42,630 |
import struct
import base64
def _serialize_linear_biases(linear, nodelist):
"""Serializes the linear biases.
Args:
linear: a interable object where linear[v] is the bias
associated with v.
nodelist (list): an ordered iterable containing the nodes.
Returns:
str: base 64 encoded string of little endian 8 byte floats,
one for each of the biases in linear. Ordered according
to nodelist.
Examples:
>>> _serialize_linear_biases({1: -1, 2: 1, 3: 0}, [1, 2, 3])
'AAAAAAAA8L8AAAAAAADwPwAAAAAAAAAA'
>>> _serialize_linear_biases({1: -1, 2: 1, 3: 0}, [3, 2, 1])
'AAAAAAAAAAAAAAAAAADwPwAAAAAAAPC/'
"""
linear_bytes = struct.pack('<' + 'd' * len(linear), *[linear[i] for i in nodelist])
return base64.b64encode(linear_bytes).decode('utf-8') | bab0881a6adf7387a89885413b12b3e57e515527 | 42,632 |
from typing import IO
from typing import Tuple
from typing import Optional
def solve_task(input_io: IO, slope: Tuple[int, int]) -> int:
"""
Solves task 1: number of trees for slope right 3 down 1.
Parameters
----------
input_io: IO
Map stream.
slope: (slope_right: int, slope_down: int)
Slope to use from top to botton.
Returns
-------
int
Number of trees '#' in path from top to bottom.
"""
def go_down(amount: int) -> Optional[str]:
"""Try to advance amount lines 'down' in the map stream."""
line = None
while (amount > 0) and (line := input_io.readline()):
amount -= 1
return line
trees = 0
slope_right, slope_down = slope
fline = input_io.readline().strip()
assert fline[0] == "."
columns = len(fline)
current_column = 0
while line := go_down(slope_down):
line = line.strip()
current_column += slope_right
current_column %= columns
trees += line[current_column] == "#"
input_io.seek(0)
return trees | 85fef22a62c40839a90a329959d3823a9d38da52 | 42,633 |
def _pos_round(position):
"""
Returns the rounded `position`.
**Don't require Pygame.**
**(Not available in SimpleGUI of CodeSkulptor.)**
:param position: (int or float, int or float)
or [int or float, int or float]
:return: (int, int)
"""
assert isinstance(position, tuple) or isinstance(position, list), \
type(position)
assert len(position) == 2, len(position)
assert isinstance(position[0], int) or isinstance(position[0], float), \
type(position[0])
assert isinstance(position[1], int) or isinstance(position[1], float), \
type(position[1])
return (int(round(position[0])), int(round(position[1]))) | e0e130e17cb39c1c9ca62db03556f9410c417f67 | 42,634 |
def lucas(n):
"""TO RETURNS THE nth VALUE OF THE LUCAS NUMBERS."""
luc = [2, 1]
if n < 0:
print('The number cannot be negative.')
return('The number cannot be negative.')
elif n > 999:
print('The number is too big. Please enter a number between 0 and 999.')
return('The number is too big. Please enter a number between 0 and 999.')
elif n < 2:
print(luc[n])
return(luc[n])
elif n >= 2:
for what in range(2, n+1):
luc.append(luc[-1]+luc[-2])
print(luc[-1])
return(luc[-1]) | b9bae7b07da5383c522bf4d2827c2802e096b860 | 42,635 |
def compute_open_lead(df, shift=-1):
"""Compute leading indicator for open."""
open_lead = df.groupby("store")["open"].shift(shift)
return open_lead.combine_first(df["day_of_week"].ne(6).astype("double")) | b5a8f3a62f3c9ff43f229956d23162c0c06f5614 | 42,636 |
def get_sampling_error(mu, xbar):
"""
Sampling error is defined as the difference between mean of the population and mean of the sample.
Parameters
----------
mu: mean of the parameter
xbar: mean of the sample
Returns
-------
The sampling error mu-xbar.
"""
return mu - xbar | 85ea2357547379c5f9535e79c7ed9ce35bfbc73f | 42,637 |
def expected_base_paths(configuration):
"""API base paths e.g ['/api/indexd', '/api/leo', '/api/sheepdog', '/api/peregrine', '/api/dockstore']."""
return [api['paths']['base'] for api in configuration['apis']] | 6bce840d395e0bf45de3fe173e9cb7bc5fa75c85 | 42,640 |
def custom_formatwarning(message, category, filename, lineno, line=""):
"""Ignore everything except the message."""
return "Warning: " + str(message) + "\n" | 71d1db4370e9023da5da4b60b5ceda216272470c | 42,641 |
def is_valid_ohlc(ohlc_row):
""" ohlc format: (open,high,low,close,volume) """
_open, _high, _low, _close, _volume = ohlc_row
isin_bunds = lambda v: v >= _low and v <= _high
return _high >= _low and isin_bunds(_open) and isin_bunds(_close) and _volume >= 0 | edc46ab2f3b0ab5696bdadfbef6f4bfe0d6d0497 | 42,643 |
def _sdiff(s1, s2):
"""return symmetric diff of strings s1 and s2, as a tuple of (letters
unique to s1, letters common to both, letters unique to s2.
s1 and s2 must be strings with letters in sorted order
"""
if s1 == s2:
return ('', s1, '')
i1 = i2 = 0
u1 = u2 = i = ""
while i1 < len(s1) and i2 < len(s2):
if s1[i1] == s2[i2]:
i += s1[i1]
i1 += 1
i2 += 1
elif s1[i1] <= s2[i2]:
u1 += s1[i1]
i1 += 1
elif s2[i2] <= s1[i1]:
u2 += s2[i2]
i2 += 1
else:
assert False, "Shouldn't be here"
u1 += s1[i1:]
u2 += s2[i2:]
return (u1, i, u2) | b1cdcac3cfb32c7445cdbbeac0b6d2d3b1e494a6 | 42,644 |
import logging
def get_logger():
""" in case we want to use the common logger from a procedural
interface """
return logging.getLogger() | 7d6fa8dcc66a3ae71b4e746ace68a45db92a53d9 | 42,645 |
def income2(households):
"""
Dummy for for income group 2
"""
return (households['income_category'] == 'income group 2').astype(int) | cd8cc4fcd0968b2fad261389934bee1cf183f8d0 | 42,646 |
import configparser
def load_config(config_file):
"""It loads and return the config file."""
config = configparser.ConfigParser(allow_no_value=True)
config.read(config_file)
return config | cac5c29c78766555156ce188810dcb7623377ab8 | 42,647 |
def read_list_to_annotate(filename):
"""
Reads the file containing the paths to the images.
Textfile
"""
img_paths = []
with open(filename, "r") as f:
for line in f:
img_paths.append(str(line).rstrip('\n'))
print("Loaded names of {} images.".format(len(set(img_paths))))
return img_paths | 14bd1cb49c1161f05507b0c112e400fbd38379d3 | 42,648 |
def get_face_id(row):
"""
Extract the face id from the annotations.
"""
return row[7] | b4a3ed110ca1f8267b0bba51d7ca8c2f3c87dc10 | 42,649 |
import operator
def sort(word_freq):
"""
Takes a dictionary of words and their frequencies and returns a list
of pairs where the entries are sorted by frequency
"""
return sorted(word_freq.iteritems(), key=operator.itemgetter(1), reverse=True) | 1a5ed4daf6e9502f5875d99dd978e5e9ab20d7ca | 42,651 |
def getScreenWidth():
"""A stub implementation of the xbmcgui getScreenWidth() function"""
return 1920 | a6d555b0d0c65e59cdbae5ddf19f033ea4ab955b | 42,652 |
def gather_points(points, index):
"""Gather xyz of centroids according to indices
Args:
points: (batch_size, channels, num_points)
index: (batch_size, num_centroids)
Returns:
new_xyz (torch.Tensor): (batch_size, channels, num_centroids)
"""
batch_size = points.size(0)
channels = points.size(1)
num_centroids = index.size(1)
index_expand = index.unsqueeze(1).expand(batch_size, channels, num_centroids)
return points.gather(2, index_expand) | 56286d650ac8beaa89d81eb00d82cba579b9e9cb | 42,653 |
def diagonal_path(size):
"""
Creates a set of diagonal paths moving from the top left to the bottom right of the image.
Successive rows start at the bottom-left corner and move up until the top-right corner.
:param size: The dimensions of the image (width, height)
:return: A generator that yields a set of paths, each path is a set of (x,y) pixel coordinates.
"""
width, height = size
return (((x, x + offset) for x in range(max(0, -offset), min(width, height - offset))) for offset in
range(height - 1, -width, -1)) | e721351b98c3870d3dc743e33532268b51f78b9f | 42,654 |
def worldregions():
"""
Populates the drop down menu with the list of available shapefiles to use for averaging
Dependencies: os, App (app)
"""
return (
('All World Regions', ''),
('Antarctica', 'Antarctica'),
('Asiatic Russia', 'Asiatic Russia'),
('Australia/New Zealand', 'Australia/New Zealand'),
('Caribbean', 'Caribbean'),
('Central America', 'Central America'),
('Central Asia', 'Central Asia'),
('Eastern Africa', 'Eastern Africa'),
('Eastern Asia', 'Eastern Asia'),
('Eastern Europe', 'Eastern Europe'),
('European Russia', 'European Russia'),
('Melanesia', 'Melanesia'),
('Micronesia', 'Micronesia'),
('Middle Africa', 'Middle Africa'),
('Northern Africa', 'Northern Africa'),
('Northern America', 'Northern America'),
('Northern Europe', 'Northern Europe'),
('Polynesia', 'Polynesia'),
('South America', 'South America'),
('Southeastern Asia', 'Southeastern Asia'),
('Southern Africa', 'Southern Africa'),
('Southern Asia', 'Southern Asia'),
('Southern Europe', 'Southern Europe'),
('Western Africa', 'Western Africa'),
('Western Asia', 'Western Asia'),
('Western Europe', 'Western Europe'),
('None', 'none')
) | d46aaf831336af840f070538a4529abc13ca67c1 | 42,655 |
def pluralize(text, n, suffix="s"):
"""Return TEXT pluralized if N != 1."""
if n != 1:
return "%s%s" % (text, suffix)
else:
return text | a0c08408bef982426855dd4b5e0862ac155160e8 | 42,656 |
import json
def parse_response_content(response):
"""Try to load json response
If fail just return string response
"""
try:
return json.loads(response.content.decode())
except json.JSONDecodeError:
return response.content.decode() | 509637b444f560131e6a44fa690373a29541d8b1 | 42,659 |
def steps_per_quarter_to_steps_per_second(steps_per_quarter, qpm):
"""Calculates steps per second given steps_per_quarter and a qpm."""
return steps_per_quarter * qpm / 60.0 | f1ca6976700b1290c67a6f6670e5c3eee14664bb | 42,660 |
def get_paragraph(doc):
"""convert from raw text to a list containing paragraphs"""
return doc.split('\n') | 57c17551be18a1559acdff7706aa9e8d1794cfa3 | 42,661 |
def map2matrix(matrix_size, index):
"""Map index in a time series to the corresponding index in the matrix.
:param matrix_size:
:param index:
:return: index in the matrix
"""
row_index = index // matrix_size[1]
col_index = index % matrix_size[1]
matrix_index = (row_index, col_index)
return matrix_index | 119756f50887cb1fa8bc3afbe978f4ff70a124f8 | 42,662 |
import csv
def validate(args, limit_to=None):
"""Validate an input dictionary for Fisheries HST.
Parameters:
args (dict): The args dictionary.
limit_to=None (str or None): If a string key, only this args parameter
will be validated. If ``None``, all args parameters will be
validated.
Returns:
A list of tuples where tuple[0] is an iterable of keys that the error
message applies to and tuple[1] is the string validation warning.
"""
warnings = []
keys_with_empty_values = set([])
missing_keys = set([])
for key in ('workspace_dir',
'results_suffix',
'population_csv_path',
'sexsp',
'habitat_dep_csv_path',
'habitat_chg_csv_path',
'gamma'):
if key in (None, limit_to):
try:
if args[key] in ('', None):
keys_with_empty_values.add(key)
except KeyError:
missing_keys.add(key)
if len(missing_keys) > 0:
raise KeyError(
'Args is missing required keys: %s' % ', '.join(
sorted(missing_keys)))
if len(keys_with_empty_values) > 0:
warnings.append((keys_with_empty_values,
'Argument must have a value'))
for csv_key in ('population_csv_path',
'habitat_dep_csv_path',
'habitat_chg_csv_path'):
if limit_to in (csv_key, None):
try:
csv.reader(open(args[csv_key], 'r'))
except (csv.Error, IOError):
warnings.append(([csv_key],
'Parameter must be a valid CSV file'))
if limit_to in ('sexsp', None):
if args['sexsp'] not in ('Yes', 'No'):
warnings.append((['sexsp'],
'Parameter must be either "Yes" or "No"'))
if limit_to in ('gamma', None):
try:
float(args['gamma'])
except ValueError:
warnings.append((['gamma'],
'Parameter must be a number'))
return warnings | cc859d617b9fd10e62c1b841480ed738422d9c4c | 42,663 |
def split_lhs_rhs(expr):
"""Split the equation into left and right hand side.
>>> split_lhs_rhs(" 12 + a ")
(None, '12 + a')
>>> split_lhs_rhs(" c = 12 + a ")
('c', '12 + a')
"""
expr = [x.strip() for x in expr.split("=")]
if len(expr) == 1:
rhs = expr[0]
output = None
else:
output, rhs = expr
return output, rhs | ac7fd4861ad3289365030d6eac656e021ee39e6f | 42,664 |
def make_predictions(model, test_x):
"""
Make predictions
"""
predictions = model.predict(test_x)
return predictions | f6e783a5a69c50d129453edf29c1d7f65419f675 | 42,665 |
from typing import Counter
def count_triplets(arr, r):
"""
For each elem in arr, find number of elements==elem*r and number of elements==elem/r
We need to handle the edge case when r=1 though
"""
items_count = Counter(arr)
g_p = 0
for i in arr:
left_count = 0
right_count = 0
right_multiple = i * r
left_factor = i/r
right_count = items_count.get(right_multiple)
left_count = items_count.get(left_factor)
if right_count and left_count:
g_p += (left_count * right_count)
return g_p | 5fdf2136cba2b76092c57182b3ddd2ba3988033d | 42,668 |
def whisper(text):
"""Creates a response only the sender can see."""
return {
"response_type" : "ephemeral",
"text" : text
} | 4efce220d7a65f4dad54f0d7586a9677e46321e2 | 42,669 |
def get_test_json():
"""Fixture to return a sample JSON response str"""
return '[{"SYST": {"CFG": {"MTSP": "N", "NC": "00", "DF": "N", "TU": "C", "CF": "1", "VR": "0183", "CV": "0010", "CC": "043", "ZA": " ", "ZB": " ", "ZC": " ", "ZD": " " }, "AVM": {"HG": "Y", "EC": "N", "CG": "Y", "RA": "N", "RH": "N", "RC": "N" }, "OSS": {"DY": "TUE", "TM": "16:45", "BP": "Y", "RG": "Y", "ST": "N", "MD": "C", "DE": "N", "DU": "N", "AT": "999", "LO": "N" }, "FLT": {"AV": "N", "C3": "000" } } },{"CGOM": {"CFG": {"ZUIS": "N", "ZAIS": "Y", "ZBIS": "Y", "ZCIS": "N", "ZDIS": "N", "CF": "N", "PS": "Y", "DG": "W" }, "OOP": {"ST": "F", "CF": "N", "FL": "00", "SN": "Y" }, "GSS": {"CC": "N", "FS": "N", "CP": "N" }, "APS": {"AV": "N" }, "ZUS": {"AE": "N", "MT": "999" }, "ZAS": {"AE": "N", "MT": "999" }, "ZBS": {"AE": "N", "MT": "999" }, "ZCS": {"AE": "N", "MT": "999" }, "ZDS": {"AE": "N", "MT": "999" } } }]' | 48a9a2029f578a7d2cd90e0efebbe9dd58012ac7 | 42,670 |
import re
def find_profile_links(soup, search_string):
"""
Find links in html based on the class and string content of a div
Required Parameters:
soup: a BeautifulSoup object
search_string: the type of link to search for; must match html exactly
"""
profile_links = {}
div_prefix = soup.find('div', class_='head', string=re.compile(search_string))
if div_prefix:
for s in div_prefix.next_siblings:
if s.name == 'div': # Found the aliases!
for a in s.find_all('a'):
profile_links[a.text] = a.get('href')
break
return profile_links | c8404427973346be0f7c659cb3180cd5da660a02 | 42,672 |
def is_community_role(role):
"""Test if a given role belongs to a community.
Returns:
(bool) True if the role belongs to a community
"""
return len(role.name) > 4 and role.name[:4] == 'com:' | a1f19fe78ce3652e36fd7e651c9dc31a62e2bbe7 | 42,674 |
import json
def json_write_data(json_data, filename):
"""Write json data into a file
"""
with open(filename, 'w') as fp:
json.dump(json_data, fp, indent=4, sort_keys=True, ensure_ascii=False)
return True
return False | 4fb8a9d654460d34a05b6bc88bf5512ff505a7a4 | 42,675 |
def load_rubik():
"""
Loads a rubik's cube from save.rubik
Returns
-------
rubik: list
The loaded cube
"""
try:
with open("custom_files/save.rubik", "r") as savefile: # Open the save file
rubik, counter = eval(str(savefile.readlines()[0]), {}) # Read the line as an evaluation (convert it to a list without recreating one)
if isinstance(rubik, list): # If the rubik is a list, it's considered as a valid cube
return rubik, counter
else: # If the rubik is not a list, it's considered as an invalid cube
print("[ERROR] Invalid save format")
return None
except: # If the save file doesn't exist
print("[ERROR] No save file found")
return None | 1f58ac1cce892314063c39f7b38db21ca59de1a2 | 42,677 |
def decodeDegreesStr(valStr):
"""
Return a signed latitude/longitude value from a string. Only copes with the integer
values used in grid cell names.
"""
val = int(valStr[:-1])
if valStr[-1] in ("S", "W"):
val = -val
return val | e0dd53f4a26a5bf1bb35ea121beeaa09566cf35b | 42,678 |
def get_results(combinations, config_dict):
"""
This function finds for each mission its most resilient configuration and probability.
:param combinations: dictionary which contains for each mission its configurations
:param config_dict: dictionary which contains for each configuration and goal host
the inferred probabilities (C, I, A)
:return: dictionary which contains for each mission only its most resilient configuration
and probability
"""
result_dict = {}
for mission in combinations:
result_dict[mission] = {}
for index in config_dict[mission]:
partial_result = {}
for host in config_dict[mission][index]:
if "probability" not in partial_result or \
sum(config_dict[mission][index][host]['result']) > \
sum(partial_result["probability"]):
partial_result["configuration"] = combinations[mission][index]
partial_result["probability"] = config_dict[mission][index][host]['result']
if "probability" not in result_dict[mission] or \
sum(partial_result["probability"]) < sum(result_dict[mission]["probability"]):
result_dict[mission]['configuration'] = partial_result["configuration"]
result_dict[mission]["probability"] = partial_result["probability"]
return result_dict | 5ba0f5a68f03e8fc252d3eb4aee96493e57a97eb | 42,680 |
def parse_description(description: str) -> str:
""" Make an effort to make the description more human """
if 'Alcoholic beverage, ' in description:
return description[20:]
if 'Alcoholic beverages, ' in description:
return description[21:]
return description | e8acd0bc9b327ef9b36a5e700f13e13995f9ce3f | 42,682 |
def bookmarklet() -> str:
"""Return the full bookmarklet.
Returns:
The contents of the bookmarklet, ready to be installed
"""
snippet = """(() => {
let apipath = '/services/data/';
if (location.pathname === apipath) {
let sessid = (';' + document.cookie).split('; sid=')[1].split('; ')[0];
let domain = location.host;
let output = JSON.stringify([domain, sessid]);
navigator.clipboard.writeText(output);
} else {
window.open(location.origin + apipath, '_blank');
}
})();"""
return f"javascript:{snippet}" | 6105261748779b1e0346c72b71286ee4440e9a17 | 42,683 |
def ones_like(x):
"""Return an array of the same shape as `x` containing only ones."""
return x * 0. + 1. | 4178d00551fe7e499eaba77eebec1aca00212890 | 42,684 |
def convert_schemas(mapping, schemas):
"""Convert schemas to be compatible with storage schemas.
Foreign keys related operations.
Parameters
----------
mapping: dict
Mapping between resource name and table name.
schemas: list
Returns
-------
schemas: list
"""
for schema in schemas:
for fk in schema.get('foreignKeys', []):
resource = fk['reference']['resource']
if resource != 'self':
if resource not in mapping:
message = (
'Resource "%s" for foreign key "%s" '
'doesn\'t exist.' % (resource, fk))
raise ValueError(message)
fk['reference']['resource'] = '<table>'
fk['reference']['table'] = mapping[resource]
return schemas | 5ca096551b637cb80a38f2c8f03efffb8bbb189c | 42,685 |
import torch
def th_matrixcorr(x, y):
"""
return a correlation matrix between
columns of x and columns of y.
So, if X.size() == (1000,4) and Y.size() == (1000,5),
then the result will be of size (4,5) with the
(i,j) value equal to the pearsonr correlation coeff
between column i in X and column j in Y
"""
mean_x = torch.mean(x, 0)
mean_y = torch.mean(y, 0)
xm = x.sub(mean_x.expand_as(x))
ym = y.sub(mean_y.expand_as(y))
r_num = xm.t().mm(ym)
r_den1 = torch.norm(xm, 2, 0)
r_den2 = torch.norm(ym, 2, 0)
r_den = r_den1.t().mm(r_den2)
r_mat = r_num.div(r_den)
return r_mat | 1fcb14f4c1cad928fd14f99d8b8f2301e0b2e6f9 | 42,686 |
import os
import re
def _CheckFilterFileFormat(input_api, output_api):
"""This ensures all modified filter files are free of common syntax errors.
See the following for the correct syntax of these files:
https://chromium.googlesource.com/chromium/src/+/master/testing/buildbot/filters/README.md#file-syntax
As well as:
https://bit.ly/chromium-test-list-format
"""
errors = []
warnings = []
for f in input_api.AffectedFiles():
filename = os.path.basename(f.LocalPath())
if not filename.endswith('.filter'):
# Non-filter files. Ignore these.
continue
inclusions = 0
exclusions = 0
for line_num, line in enumerate(f.NewContents()):
# Implicitly allow for trailing (but not leading) whitespace.
line = line.rstrip()
if not line:
# Empty line. Ignore these.
continue
if line.startswith('#'):
# A comment. Ignore these.
continue
if line.startswith('//') or line.startswith('/*'):
errors.append(
'%s:%d Not a valid comment syntax. Use "#" instead: "%s"' % (
filename, line_num, line))
continue
if not re.match(r'^\S+$', line):
errors.append(
'%s:%d Line must not contain whitespace: "%s"' % (
filename, line_num, line))
continue
if line[0] == '-':
exclusions += 1
else:
inclusions += 1
# If we have a mix of exclusions and inclusions, print a warning with a
# Y/N prompt to the author. Though this is a valid syntax, it's possible
# that such a combination will lead to a situation where zero tests are run.
if exclusions and inclusions:
warnings.append(
'%s: Contains both inclusions (%d) and exclusions (%d). This may '
'result in no tests running. Are you sure this is correct?' % (
filename, inclusions, exclusions))
res = []
if errors:
res.append(output_api.PresubmitError(
'Filter files do not follow the correct format:',
long_text='\n'.join(errors)))
if warnings:
res.append(output_api.PresubmitPromptWarning(
'Filter files may be incorrect:\n%s' % '\n'.join(warnings)))
return res | 011ae89bebb86a4380edc3a8edb3f76e9b8a87f3 | 42,687 |
def estimate_phones(x):
"""
Allocate consumption category given a specific luminosity.
"""
if x['mean_luminosity_km2'] > 5:
return 10
elif x['mean_luminosity_km2'] > 1:
return 5
else:
return 1 | c592c1d5b5b44ed360c03aaf15461803e39704e3 | 42,689 |
def get_analysis_csv(path, line):
"""get analysis csv"""
count = 0
res = []
with open(path, 'r') as analysis_file:
table_header = analysis_file.readline()[:-1].split(',')
lines = analysis_file.readlines()
while line + count < len(lines):
line_list = lines[line + count][:-1].split(',')
res.append(line_list)
count += 1
if count == 5:
break
return res, count, table_header | fdc09fc558a72b14f0e445463d11fd8ed460983e | 42,690 |
def repetition_a_eviter(serie, a_eviter=False):
"""
Une répétition à éviter.
.. exref::
:title: Eviter d'effectuer le même appel deux fois
:tag: Base -
Dans cette fonction on calcule la variance d'une série d'observations.
::
def moyenne(serie):
return sum(serie) / len(serie)
def variance_a_eviter(serie):
s = 0
for obs in serie :
s += (obs-moyenne(serie))**2
return s / len(serie)
La fonction ``variance_a_eviter`` appelle la fonction ``moyenne`` à chaque passage
dans la boucle. Or, rien ne change d'un passage à l'autre. Il vaut mieux stocker
le résultat dans une variable :
::
def moyenne(serie):
return sum(serie) / len(serie)
def variance(serie):
s = 0
moy = moyenne(serie)
for obs in serie :
s += (obs-moy)**2
return s / len(serie)
Le coût de la variance passe alors d'un coût en :math:`O(n^2)` à :math:`O(n)`.
Ce n'est pas le seul endroit où cette erreur survient. Dans le code suivant,
on appelle deux fois la fonction ``major`` avec le même argument.
C'est à éviter.
::
meilleur = major(data)[0] # retourne ("quelque chose", True)
if major(data)[1]:
return {"leaf":guess}
"""
def moyenne(serie):
"moyenne"
return sum(serie) / len(serie)
def variance_a_eviter(serie):
"variance longue"
s = 0
for obs in serie:
s += (obs - moyenne(serie)) ** 2
return s / len(serie)
def variance(serie):
"variance courte"
s = 0
moy = moyenne(serie)
for obs in serie:
s += (obs - moy) ** 2
return s / len(serie)
if a_eviter:
return variance_a_eviter(serie)
else:
return variance(serie) | 51f19ce6d3a85c123789c9713a96d4a3825681db | 42,691 |
import tempfile
def get_temporary_directory_path(prefix, suffix):
"""
Get a temporary directory and files for artifacts
:param prefix: name of the file
:param suffix: .csv, .txt, .png etc
:return: object to tempfile.
"""
temp = tempfile.NamedTemporaryFile(prefix=prefix, suffix=suffix)
return temp | 36782294fcd5639c94e29a2442cfe0f8f0171db2 | 42,693 |
def mask_status(data, column, mask_dict):
"""
Based on the "reopen" status types from NYT dataset, the status column
is returned as integer representations.
"""
mask = 1
masked = data[column].apply(lambda x: mask if x in mask_dict[mask]
else (mask + 1 if x in mask_dict[mask + 1]
else mask + 2))
return masked | 902451d3a2340ec10aa1a973c612641a188ddf81 | 42,694 |
def predict_cluster_by_precomputed_distances(precomputed_distances):
"""
Predict a cluster for each object with precomputed distances.
Parameters
----------
precomputed_distances : np.array
array of shape (n_topics, n_objects) - distances from clusters to objects
Returns
-------
np.array
array of length X.shape[0], each element is cluster of ith object
"""
return precomputed_distances.T.argmin(axis=1).ravel() | 7c42073d7e3dd1369d4ab4986db8b9474bea5516 | 42,695 |
from typing import Any
from typing import List
def fill_array(variable: Any, default: Any, final_size: int) -> List[Any]:
"""Fills the default value for parameter that don't set values for all plots (keys)
Parameters
----------
variable
The parameter to fill values for
default
The default value for the argument on a single plot
final_size
The number of keys in the figure
Returns
-------
A list of length 'final_size' with the default values for the parameter filling to the end of the list
"""
# ensure the parameter is a list
if not isinstance(variable, list):
variable = [variable]
# get the current length of the parameter (i.e. the number of keys with user-specified values)
num_vars = len(variable)
if num_vars < final_size:
for _ in range(final_size - num_vars):
if num_vars == 1:
# if only one value is provided for the parameter, use that by default for all keys
variable.append(variable[0])
else:
# otherwise, add the default value for the parameter to fill the list
variable.append(default)
return variable | 9f8db3a2252bc08236a2fa4837f1bb0941886b34 | 42,696 |
import os
def aws_credentials():
"""Set AWS credentials for testing"""
os.environ['AWS_ACCESS_KEY_ID'] = 'testing'
os.environ['AWS_SECRET_ACCESS_KEY'] = 'testing'
os.environ['AWS_SECURITY_TOKEN'] = 'testing'
os.environ['AWS_SESSION_TOKEN'] = 'testing'
os.environ['SQS_POLLER_AWS_ACCESS_KEY_ID'] = 'testing'
os.environ['SQS_POLLER_AWS_SECRET_ACCESS_KEY'] = 'testing'
return {
'aws_access_key_id': 'testing',
'aws_secret_access_key': 'testing',
'region_name': 'eu-west-1',
} | 2eff3607be4331e26cf3f9c9b68812c82b770a5d | 42,698 |
def g_coordinates(parameters, actual_pos):
"""returns 3 coordinates from a g-code"""
# default values
x = actual_pos[0]
y = actual_pos[0]
z = actual_pos[0]
# parse text
params = parameters.split(' ')
for param in params:
coordinate = param[0]
value = float(param[1:])
if coordinate == 'x':
x = value
elif coordinate == 'y':
y = value
elif coordinate == 'z':
z = value
return([x, y, z]) | 05d24e9dc8d652906f46cc060142d0fe4ec1c4a1 | 42,699 |
def import_component(name):
"""
As a workaround for __import__ behavior,
returns extract the desired component
>>> import_component('os').__name__
'os'
>>> import types
>>> type(import_component('os.path.join')) == types.FunctionType
True
"""
# Import component
# As a workaround for __import__ behavior,
# 1. import module
# 2. use getattr() to extract the desired component
# 1. import module
names = name.split('.')
assert names and len(names) > 0
if len(names) == 1:
return __import__(names[0])
try:
modname = '.'.join(names[:-1])
module = __import__(modname)
for name in names[1:-1]:
module = getattr(module, name)
except ImportError:
raise
else:
# 2. use getattr() to extract the desired component
return getattr(module, names[-1]) | 7862be8ee1d92b4951a7e1a648541de5e636cf85 | 42,704 |
def split_rand_pick(df, splitting_column1, splitting_column2, splitting_column3):
"""
:param df: Target dataframe
:param splitting_column1: Column in df with split indices
:param splitting_column2: Column in df with split indices
:param splitting_column3: Column in df with split indices
:return:
"""
train_pick = df[df[splitting_column1] == 1].index
val_pick = df[df[splitting_column1] == 0].index
test_pick = df[df[splitting_column1] == 2].index
train_rand = df[df[splitting_column2] == 0].index
test_rand = df[df[splitting_column2] == 1].index
train_pca = df[df[splitting_column3] == 0].index
val_pca = df[df[splitting_column3] == 1].index
return train_pick, val_pick, test_pick, train_rand, test_rand, train_pca, val_pca | d4d87355aba16b99816944de8620ae7f30560a48 | 42,705 |
def load_gimp_palette(filename):
"""
For simplicity's sake, a palette is just an array of RGB values:
palette = [
[r,g,b],
[r,g,b],
...
]
"""
lines = open(filename, 'r').readlines()
palette = []
for line in lines:
if '#' in line:
line = line.split('#')[1]
try:
r = int('0x'+line[0:2], 0)
g = int('0x'+line[2:4], 0)
b = int('0x'+line[4:6], 0)
rgb = [r,g,b]
palette.append(rgb)
except:
#print('Ignore %s' % line)
pass
return palette | 8da2f2c3ae2c548e13c19d88be0b341fa7730fc4 | 42,706 |
def hamming_distance(pattern1, pattern2):
"""Return the hamming distance between 2 patterns."""
if len(pattern1) == len(pattern2):
return sum([
pattern1[index] != pattern2[index]
for index in range(len(pattern1))
])
raise Exception('Length of both reads do not match') | aceb59a7136ed6aef9a32ff39ebf32567bef780c | 42,707 |
def _make_properties(prop_names):
"""
Decorator function for adding public properties to classes
This creates the property, and its getter and setter methods
Dean comments: "_make_properties is a _very esoteric function, and I
remember wondering if I should remove it because it decreases readability
while saving only a few lines of code. In general, I sort of think class
decorators should be used sparingly, as they often obfuscate more than they
clarify, (I would argue that the Pyro4.expose decorator is an example of a
good class decorator.)"
@param prop_names : properties to be defined for the decorated class
@type prop_names : list of str
"""
def property_factory(prop_name):
"""
local function which returns a getter and setter
"""
def prop_get(self):
"""
this defines a method for getting the property's value
"""
prop_val = getattr(self, "_{}".format(prop_name))
return prop_val
def prop_set(self, new_val):
"""
this defines a method for setting the property's value
"""
setattr(self, "_{}".format(prop_name), new_val)
# return the methods for the named property
return prop_get, prop_set
def wrapper(cls):
"""
Enhances a class by setting the attributes (property_names) passed to
the decorator function
@param cls : class to be decorated
@type cls : class
"""
for prop_name in prop_names:
prop = property(*property_factory(prop_name))
setattr(cls, prop_name, prop)
return cls
return wrapper | 3f200f819d2cbdc51651d70860cb5d0baf67bc5e | 42,708 |
def to_pecha_id_link(pecha_id):
"""Return pecha_id_link for `pecha_id`."""
return f"[{pecha_id}](https://github.com/OpenPecha/{pecha_id})" | 9115992e22aa8705af488df3ed7ca6b738641b69 | 42,710 |
from typing import List
def generate_suqares_using_list_comprehensions(number: int) -> List[int]:
"""
>>> number = 5
>>> generate_suqares_using_list_comprehensions(number)
[0, 1, 2, 3, 4]
"""
return [x for x in range(number)] | 427134eb2512cf74e357ad65f6cd20a4b0b2d979 | 42,712 |
import locale
def get_locale_language():
"""Get the language code for the current locale setting."""
return locale.getlocale()[0] or locale.getdefaultlocale()[0] | 486ab9598aab8383506372e42b9142d770170b41 | 42,713 |
import torch
def d_change(prior, ground_truth):
"""Compute a change based metric of two sets of boxes.
Args:
prior (tensor): Prior boxes, Shape: [num_priors, 4]
ground_truth (tensor): Ground truth bounding boxes, Shape: [num_objects, 4]
"""
xtl = torch.abs(prior[:, 0] - ground_truth[:, 0])
ytl = torch.abs(prior[:, 1] - ground_truth[:, 1])
xbr = torch.abs(prior[:, 2] - ground_truth[:, 2])
ybr = torch.abs(prior[:, 3] - ground_truth[:, 3])
wgt = ground_truth[:, 2] - ground_truth[:, 0]
hgt = ground_truth[:, 3] - ground_truth[:, 1]
return torch.sqrt((torch.pow(ytl, 2) / hgt) + (torch.pow(xtl, 2) / wgt)
+ (torch.pow(ybr, 2) / hgt) + (torch.pow(xbr, 2) / wgt)) | e40e6506f9173df64819177f70a358ec56697e83 | 42,714 |
def harmonic_series_tcr(n):
"""tail-call recursive"""
def aux(n, acc):
if not isinstance(n, int):
raise TypeError("n must be an integer")
elif n < 1:
raise ValueError("n must be positive")
elif n == 1:
return acc
else:
return aux(n - 1, 1/n + acc)
return aux(n, 1) | be3da578a8543ed5be89c38816b6de135fd48b42 | 42,715 |
def getTweetText(session, ids):
"""Get all tweets with all fields."""
params = ','.join(ids)
qryText = "SELECT * FROM tweets WHERE tweet_id IN ({})".format(params)
results = session.execute(qryText)
return results | b2ff615ebec3fc055f94625f558f4651a1da96e4 | 42,716 |
def load_stop(pathes):
"""
pathes: 停用词路径list
"""
stopwords = []
for path in pathes:
with open(path, 'r', encoding='utf-8') as f:
for line in f.readlines():
stopwords.append(line.strip())
return list(set(stopwords)) | eda73cc6b7424bfc8f5dc3a69a11d8c17d53c6b7 | 42,718 |
def assignment_display_name(assignment):
"""Get name for an assignment"""
if assignment.session.type.slug == 'regular' and assignment.session.historic_group:
return assignment.session.historic_group.name
return assignment.session.name or assignment.timeslot.name | 25cb7ed877dfd8f47e108d497ff040d9ba17e52e | 42,719 |
import argparse
import os
def build_parser() -> argparse.ArgumentParser:
"""Build CLI parser"""
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--downsample",
type=float,
default=1.0,
help="Downsample training to given proportion",
)
parser.add_argument(
"-o", "--outdir", default=os.getcwd(), type=str, help="Output directory"
)
parser.add_argument(
"--onehot", action="store_true", help="Use one-hot instead of embedding",
)
parser.add_argument("--batchsize", type=int, default=512, help="Batch size")
parser.add_argument("--lr", default=1e-3, type=float, help="Learning rate")
parser.add_argument(
"--maxepochs", type=int, default=500, help="Maximum number of epochs"
)
parser.add_argument(
"--earlystop", type=int, default=25, help="Early stopping patience"
)
parser.add_argument(
"--monitor",
choices=["loss", "auroc", "auprc"],
default="auprc",
help="Metric to monitor for checkpointing and early stopping",
)
parser.add_argument(
"--min-edit",
dest="min_edit",
type=int,
default=3,
help="Minimum (inclusive) edit distance between each item in training TRA/B pairs and a test TRA/B pair",
)
parser.add_argument(
"--noneptune", action="store_true", help="Disable neptune logging"
)
parser.add_argument("--device", type=int, default=0, help="GPU to train on")
parser.add_argument("--seed", type=int, default=2394, help="Random seed")
return parser | 4167426a54fc22048ad0c46ebce56b91512001da | 42,720 |
def is_df(
df):
"""is_df
Test if ``df`` is a valid ``pandas.DataFrame``
:param df: ``pandas.DataFrame``
"""
return (
hasattr(df, 'to_json')) | fe5c111e8883ff64e3b63602e57aaa793ef710fa | 42,721 |
import re
def parse_variant(variant):
"""
Parse specified genomic variant.
Generally speaking, the input string should consist of chromosome,
position, reference allele, and alternative allele separated by any one
or combination of the following delimiters: ``-``, ``:``, ``>`` (e.g.
'22-42127941-G-A'). The method will return parsed variant as a tuple with
a shape of ``(chrom, pos, ref, alt)`` which has data types of ``(str,
int, str, str)``.
Note that it's possible to omit reference allele and alternative allele
from the input string to indicate position-only data (e.g.
'22-42127941'). In this case, the method will return empty string for
the alleles -- i.e. ``(str, int, '', '')`` if both are omitted and
``(str, int, str, '')`` if only alternative allele is omitted.
Parameters
----------
variant : str
Genomic variant.
Returns
-------
tuple
Parsed variant.
Examples
--------
>>> from fuc import common
>>> common.parse_variant('22-42127941-G-A')
('22', 42127941, 'G', 'A')
>>> common.parse_variant('22:42127941-G>A')
('22', 42127941, 'G', 'A')
>>> common.parse_variant('22-42127941')
('22', 42127941, '', '')
>>> common.parse_variant('22-42127941-G')
('22', 42127941, 'G', '')
"""
fields = re.split('-|:|>', variant)
chrom = fields[0]
pos = int(fields[1])
try:
ref = fields[2]
except IndexError:
ref = ''
try:
alt = fields[3]
except IndexError:
alt = ''
return (chrom, pos, ref, alt) | 3e58345da18a0ddc72eb1a1a73c4b1afc826f88e | 42,722 |
def get_move_from_to(data):
"""
Gets move from and to from dataset
:param data: dataset
:return: move_from, move_to
"""
move_list = data[0][0][-64:]
move_from = [9, 9]
move_to = [9, 9]
for i in range(len(move_list)):
if move_list[i] == -1:
move_from[0] = int(i/8)
move_from[1] = i % 8
if move_list[i] == 1:
move_to[0] = int(i/8)
move_to[1] = i % 8
if move_from == [9, 9] or move_to == [9, 9]:
raise Exception("Couldn't find moves")
return move_from, move_to | e20fe20b24f5ea6ab9af49f52ff695a74fa7724d | 42,723 |
def get_point(msg):
"""(str) -> tuple
prints a message specified by <msg> and allows the user to enter the
(x, y, z) coordinates of a point.
Returns the point as a tuple
"""
print(msg)
x = float(input('Enter x coordinate: '))
y = float(input('Enter y coordinate: '))
z = float(input('Enter z coordinate: '))
return x, y, z | 136491e061519ec1aebef1db4a7577bc60f28316 | 42,724 |
def traverse_to_end(bm, forwards = True):
""" Traverses a Sterna Helix and returns the index and vertex of the final vertex.
Args:
bm -- bmesh, Sterna object
KWArgs:
forwards -- bool, direction of the traversal
Returns:
tuple<bmesh.Vertex, int>
"""
visited = set()
c_v = bm.verts[0]
i = 0
while True:
i += 1
v = c_v
edges = v.link_edges if forwards else reversed(v.link_edges)
for e in edges:
e_verts = set([vx.index for vx in e.verts])
index_other = sum(e_verts) - v.index
other = bm.verts[index_other]
if not e.seam and e.smooth:
if not index_other in visited:
c_v = other
visited.add(v.index)
if v == c_v: break
return c_v, i | 924c814b2f45e3e48645e7a8ee3547c8d13e8ef8 | 42,725 |
import re
def maybe_get_next_page_path(response):
"""
If a response is paginated, get the url for the next page.
"""
link_regex = re.compile(r'<([^>]*)>;\s*rel="([\w]*)\"')
link_headers = response.headers.get("Link")
next_page_path = None
if link_headers:
links = {}
matched = link_regex.findall(link_headers)
for match in matched:
links[match[1]] = match[0]
next_page_path = links.get("next", None)
return next_page_path | 33f015b8fd7fbe7d696fddcb76acaa00017a207f | 42,726 |
def get_measurement_from_rule(rule):
"""
Return the name of the measurement from
the Alert_Rule checkfield
"""
if len(rule.check_field.split('#')) == 2:
model, measurement = rule.check_field.strip().split('#')
return measurement
elif len(rule.check_field.split('#')) == 1:
return rule.check_field | 183b3efa9d75c05a71223fee11b78ce243ae2129 | 42,728 |
def remove(*types):
"""Select a (list of) to be removed objects(s)
>>> remove("registrationid1", "registrationid2")
{'remove': ['registrationid1', 'registrationid2']}
>>> remove("tag1", "tag2")
{'remove': ['tag1', 'tag2']}
>>> remove("alias1", "alias2")
{'remove': ['alias1', 'alias2']}
"""
vremove = [v for v in types]
return {"remove": vremove} | b51eb91f8a80e33eba3b636763bc27d2f2e07cd6 | 42,729 |
from typing import SupportsRound
def iround(x: SupportsRound) -> int:
"""Rounds x and converts to int.
Because round(np.float32) returns np.float32 instead of int.
"""
return int(round(x)) | 6a8a2d089e4b0986052be308f6179002ab414f1d | 42,733 |
import re
def is_module(content):
"""Checks if the contents are from a Lua module.
It looks for a returned value at the end of the file. If it finds one, it's
safe to assume that it's a module.
content : str
The Lua source code to check.
"""
# We match any number of whitespace after the `return` in case of accidental
# spacing on the user's part.
#
# Then we match any characters to catch variables (`return module`) and
# functions (`return setmetatable(t1, t2)`)
#
# We're optionally matching any number of spaces at the end of the file
# incase of a final newline, or accidentally added spaces after the value.
return re.search(r"return\s+.*(\s+)?$", content) | 8f9ac0dcad6623d73bb47e87ec56ae8519b75f2f | 42,734 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.