content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def allVowelsA(word):
"""
allVowelsA is a function that changes all vowels to A.
This function is a building block for a function that will count
syllables by counting vowels. For that purpose, all vowels are equivalent.
But, we want to remove consecutive duplicate vowels. Converting all
vowels to the same vowel is a first step.
word should be a string---otherwise return boolean value False
when word is a string, return word with all vowels changed to the letter a
examples:
allVowelsA("by") produces "ba"
allVowelsA("alien") produces "alaan"
allVowelsA("boot") produces "baat"
allVowelsA("fruition") produces "fraataan"
Note: empty string is legal, and should return empty string
"""
if type(word)!=str:
return False
List=['a','e','i','o','u','y','A','E','I','O','U','Y']
I=len(word)
J=len(List)
j=0
i=0
lista=list(word)
for i in range(0,I):
for j in range(0,J-1):
if word[i]==List[j]:
lista[i]='a'
word=''.join(lista)
else:
j=j+1
return word
|
f010afea28c6c10efb862d2beed52a6e1e17933e
| 16,626
|
def clang_find_attributes(node):
"""Finds attributes one level below the Clang node."""
return [n for n in node.get_children() if n.kind.is_attribute()]
|
a18483d8f5b19ce1b66abdbef76af5a98e420e21
| 16,627
|
import six
def rows_from_table(table):
"""Given a tinyquery.Table, build an API-compatible rows object."""
result_rows = []
for i in six.moves.xrange(table.num_rows):
field_values = [{'v': str(col.values[i])}
for col in table.columns.values()]
result_rows.append({
'f': field_values
})
return result_rows
|
e8aef81702eb4ce5a89a3c9bc4b0d3389c17ad54
| 16,628
|
import socket
import struct
def inet_aton(s):
"""Like `socket.inet_aton()` but returns an int."""
packed = socket.inet_aton(s)
return struct.unpack('!I', packed)[0]
|
965388ca49c9f8472fc00f08af810e81bc7f0ff1
| 16,629
|
def regenerate_response(db_entry):
"""Unique message generator.
Args:
db_entry (dict?): Stored response from the database that has already been created.
Returns:
JSON string which contains the message response
"""
# Init a blank json response
response_data = {'wallet_address': db_entry['wallet_address'], 'contract_address': db_entry['contract_address'],
'tokenId': db_entry['tokenId'], 'random_str': db_entry['random_str'],
'message': db_entry['message']}
# Form JSON response
return response_data
|
fc60078fc09022811858554c50c5375bfd753f3e
| 16,630
|
from typing import Dict
from typing import List
import json
def _pluck_listen_ids(aws_event: Dict) -> List[str]:
"""Pluck the listen ids from a batched sqs event.
>>> _pluck_listen_ids({'Records': [{'body': '{"listen_id": "5"}'}]})
['5']
"""
message_bodies = [json.loads(record['body']) for record in aws_event['Records']]
return [message_body['listen_id'] for message_body in message_bodies]
|
e7440915ab23207ae82d3cc3878037df7c8a00d1
| 16,632
|
def _unpack_index(i):
"""Unpack index and return exactly four elements.
If index is more shallow than 4, return None for trailing
dimensions. If index is deeper than 4, raise a KeyError.
"""
if len(i) > 4:
raise KeyError(
"Tried to index history with {} indices but only "
"4 indices are possible.".format(len(i)))
# fill trailing indices with None
i_e, k_e, i_b, k_b = i + tuple([None] * (4 - len(i)))
return i_e, k_e, i_b, k_b
|
323a819107ac7c53c2b2abdabb46e573b620c7e9
| 16,633
|
import sys
import torch
def matches(modules, node, pattern, max_uses=sys.maxsize):
""" Matches a node in fx against a pattern
"""
if isinstance(pattern, tuple):
self_match, *arg_matches = pattern
if self_match is getattr:
assert len(pattern) == 2, 'Expecting getattr pattern to have two elements'
arg_matches = []
else:
self_match = pattern
arg_matches = []
if node.uses > max_uses:
return False
if isinstance(self_match, type) and issubclass(self_match, torch.nn.Module):
if node.op != 'call_module':
return False
if not type(modules[node.target]) == self_match:
return False
elif callable(self_match):
if node.op != 'call_function' or node.target is not self_match:
return False
elif node.target is getattr:
if node.args[1] != pattern[1]:
return False
elif node.target != self_match:
return False
if not arg_matches:
return True
if len(arg_matches) != len(node.args):
return False
return all(matches(modules, node, arg_match, max_uses=1) for node, arg_match in zip(node.args, arg_matches))
|
47b64ccdd3627793a12329d25829970c037f0573
| 16,635
|
def does_intersect_rect(p, particles, padding, rect, is_3d = False):
"""
Returns true if particle p is sufficiently close or outside the rectangle (in 2d) or cuboid (in 3d)
Parameters
----------
p : list
Coordinates of center and radius of particle [x,y,z,r]
particles : list
List of center + radius of multiple particles. E.g. particles[0] is a list containing coordinates of center and radius.
padding: float
Minimum distance between circle boundaries such that if two circles
rect: list
Coordinates of left-bottom and right-top corner points of rectangle (2d) or cuboid (3d). E.g. [x1 y1, z1, x2, y2, z2]
is_3d: bool
True if we are dealing with cuboid
Returns
-------
bool
True if particle intersects or is near enough to the rectangle
"""
if len(p) < 4: raise Exception('p = {} must have atleast 4 elements'.format(p))
if len(particles) == 0: raise Exception('particles = {} can not be empty'.format(particles))
if padding < 0.: raise Exception('padding = {} can not be negative'.format(padding))
if len(rect) < 6: raise Exception('rect = {} must have 6 elements'.format(rect))
pr = [p[0] - p[3], p[1] - p[3], p[2], p[0] + p[3], p[1] + p[3], p[2]]
if is_3d:
pr[2] -= p[3]
pr[5] += p[3]
if pr[0] < rect[0] + padding or pr[1] < rect[1] + padding or pr[3] > rect[3] - padding or pr[4] > rect[4] - padding:
if is_3d:
if pr[2] < rect[2] + padding or pr[5] > rect[5] - padding:
return True
else:
return True
return False
|
a3300c17c6f9bf3d8f47efac0c94a222f6ee34ef
| 16,638
|
def alphanumericp(c):
"""Returns true if character is an alphabetic character or a numeric
character; otherwise, returns false. """
return type(c) is str and c.isalpha() or c.isnumeric()
|
f6870d55471e8a96237c56e916df55c0f81266d9
| 16,639
|
import warnings
def get_link_to_assembly(species_list):
"""
Get the path for assembly download from NCBI
Select the most up-to-date Complete Genome of the each organisms
and returns the FTP path for that one.
-----------------------------------------------------------------
Argument:
species_list--list of dictionary obtained from
get_entries_for_species function
Return:
link_assembly: str, FTP path to the genome assembly page of the
selected model:
e.g ftp://ftp.ncbi.nlm.nih.gov/genomes/all/GCA/010/448/615/GCA_010448615.1_ASM1044861v1
"""
genomes = []
link_assembly = ''
for i in species_list:
if i['status'] == 'Complete Genome' and i['assembly'] not in ('', '-'):
genomes.append(i)
date_list = []
if len(genomes) != 0:
for i in genomes:
if len(genomes) == 1:
link_assembly += i['assembly']
elif len(genomes) >1:
date_list.append(i['date'])
else:
# consider eliminating the options below: if there is no
# genomic information, there isn't any cds_from_genomic file
warnings.warn('There is no complete genome for this species')
if len(date_list) != 0:
latest = max(date_list)
for i in species_list:
if i['date'] == latest:
link_assembly += i['assembly']
break # to pick the first match in case more than one entry have the same date
return link_assembly
|
50c086d714786c67335715f4005ecd3eb1338e55
| 16,640
|
def session(request):
"""Load the database session for a given aiohttp request
Internally, it just returns the value that was given as cleanup context by
func:`krake.api.app.db_session`.
Args:
request (aiohttp.web.Request): HTTP request
Returns:
krake.database.Session: Database session for the given request
"""
return request.app["db"]
|
897a3fc517d4f4a0773617769c60b9dc8fd29056
| 16,641
|
def get_labels(gdf):
"""
Returns geodataframe with 'labels' and 'columns' set according to most dense neighbours. Will loop through rows will complete once number of units have been reduced by ~50%
Parameters:
-----------
gdf : (gpd.GeoDataFrame)
Dataframe in which units will be matched with dense neighbours
Returns:
---------
gdf : (gpd.GeoDataFrame)
Dataframe with units matched with their neighbours.
"""
required_number_of_units = round(len(gdf.labels.unique()) - (len(gdf.labels.unique()) * 0.5))
probs = 0
gdf.loc[gdf.labels == 0, 'labels'] = 0
gdf.loc[gdf.labels == 0, 'paired'] = True
for index, row in gdf.iterrows():
if len(gdf.labels.unique()) <= required_number_of_units:
print(f'{len(gdf.labels.unique())} admin units made. Finished')
break
if not gdf.loc[index, 'labels'] == 0:
if gdf.loc[index, 'paired'] == False:
paired = False
neighbour_df = gdf[gdf.geometry.touches(row['geometry'])]
#isplay(neighbour_df)
for i, neighbour in neighbour_df.iterrows():
#Join up polygon with neighbour if not paired before
if gdf.at[i, 'paired'] == False:
gdf.at[index, 'paired'] = True
gdf.at[i, 'paired'] = True
gdf.at[index, 'labels'] = index
gdf.at[i, 'labels'] = index
paired = True
break
return gdf
|
88ac73b24be6fb68a169ff4cb9e66b4c99770b62
| 16,642
|
def upload_logs():
""" This is to get the webpage. """
return {
'page': 'upload_logs',
'raw_logs': '',
}
|
f0238b73acb6a0f4716ea4daf0db11e0cd69e33d
| 16,643
|
def get_play_both(basketball: list, football: list):
"""given two lists return players that appear in both in O(N+M)"""
basketball_map = {f"{player['first_name']} {player['last_name']}":True\
for player in basketball}
overlapping = []
for player in football:
key = f"{player['first_name']} {player['last_name']}"
if key in basketball_map:
overlapping.append(key)
return overlapping
|
3154b91e7e83a7bdc499d75f7ebe3bb610cf30aa
| 16,645
|
from datetime import datetime
def utc_time():
"""Make an utc_time with appended 'Z'"""
return str(datetime.utcnow()) + 'Z'
|
0f718b656b337d3cd14cc023944221ed16fb84d2
| 16,647
|
from typing import Optional
def to_safe_str_or_none(value: Optional[str]) -> Optional[str]:
"""Convert input to cleaned string or None."""
if value is None:
return None
v = str(value.strip()).replace('\r', '').replace('\n', '')
return v or None
|
2946183f58aa51deb4deeb450296af95ca41f72e
| 16,648
|
def check_equal(param1, param2, msg="{},{}"):
"""Checks whether the two parameters are equal or not."""
if param1 != param2:
raise ValueError(msg.format(param1, param2))
return param1
|
40f0058b41b937e6aecef1c035f934b05c9ac818
| 16,649
|
def hdr_str(s, x):
""" Return header description strings
"""
if isinstance(x, str):
return s
r = []
try:
for i, v in enumerate(x):
r += [s + '(' + chr(ord('x') + i) + ')']
except Exception:
r = s
return r
|
06a3135953fd26fec196423efaf22c9390e035c1
| 16,650
|
def calculate_gc(seq):
"""
Returns percentage of G and C nucleotides in a DNA sequence.
"""
gc = 0
for base in seq:
if base in "GCgc":
gc += 1
else:
gc = 1
return gc/len(seq) * 100
|
51048f70febf7309da1d2b4b8946315bdc31e939
| 16,651
|
def rename_coords_to_lon_and_lat(ds):
""" Rename Dataset spatial coord names to:
lat, lon
"""
if 'latitude' in ds.coords:
ds = ds.rename({'latitude': 'lat'})
if 'longitude' in ds.coords:
ds = ds.rename({'longitude': 'lon'})
elif 'long' in ds.coords:
ds = ds.rename({'long': 'lon'})
if 'z' in ds.coords:
ds = ds.drop('z').squeeze()
return ds
|
8ba286e441f2a32a96fbbddc5c1112a6ed890f84
| 16,652
|
def mean(list):
"""
Given a list or tuple, will return the mean.
Usage mean(list)
"""
sum = 0;
for item in list:
sum += item
return(sum / len(list))
|
77d88f7386b53ab79eddfa459fa9c8f907961a9e
| 16,653
|
def read_pos_data(filename):
"""
Read data from file which looks like (sec, nsec, x, y, z)
0 8000000 -14 0 0.149843
1 12000000 -13.9997 -1.6e-05 0.117777
2 16000000 -13.9997 -1.9e-05 0.117841
"""
arr = []
for line in open(filename):
sec, nsec, x, y, z = [float(a) for a in line.split()]
arr.append((sec + nsec/1_000_000_000, (x, y, z)))
return arr
|
8f1a148e44033184cbd92cbb1f00de7e7d21e73f
| 16,654
|
def decode_response_version_from_config(confbytes: bytes) -> str:
"""Decode the string version from the bytearray response from Ledger device"""
return "{}.{}.{}".format(
confbytes[1],
confbytes[2],
confbytes[3],
)
|
66dc0b71b2c9a22ca8198fb2a5ecbe69a7a0871b
| 16,656
|
import re
import json
def read_json (filename) :
"""
Comments in the form of
# rest of line
are stripped from json before parsing
use like this::
import pprint
pprint.pprint (read_json (sys.argv[1]))
"""
with open (filename) as f:
content = ''
# weed out comments
for line in f.readlines () :
content += re.sub (r'^\s*#.*', '', line)
return json.loads (content)
|
8bd89325e8f3b486bb13790fc4c827427a435515
| 16,657
|
def cut_clockwise(positive_offset: bool, spindle_clockwise: bool, climb: bool):
"""
If all 3 are true, then cut must be done clockwise.
Changing one to false, the cut must be done counter-clockwise.
Changing two to false, the cut must be done clockwise.
Changing all three to false, the cut must be done counter-clockwise.
You get the idea..
:param positive_offset: Positive offset = outside cut, negative offset = inside cut
:param spindle_clockwise: Spindle spinning clockwise (top->down)
:param climb: climb milling (vs conventional milling)
:return: cut clockwise (or counter-clockwise)
"""
return bool((positive_offset + spindle_clockwise + climb) % 2)
|
62a5c70ce723e8fc26e1cc9d8c0e7b167c031dc5
| 16,658
|
import pickle
def read_binary_file(file_name):
"""reads binary file and returns the content"""
with open(str(file_name), "rb") as bin_file:
obj = pickle.load(bin_file)
return obj
|
4d350edb97310df963fcd2bf3af67deb184f1cdb
| 16,659
|
def filter_selector(query):
"""
Selects filter
:param query:
:return: query including selected filter
"""
selected_filter = input("Available Filters: \n \
1 = Clinical Trial \n \
2 = Meta-Analysis \n \
3 = Personal Narrative \n \
4 = Preprint \n \
5 = Randomized Controlled Trial \n \
6 = Review \n \
7 = Systematic Review \n Enter the filter number: ")
if selected_filter == "1":
query = query + " AND Clinical Trial[pt]"
elif selected_filter == "2":
query = query + " AND Meta-Analysis[pt]"
elif selected_filter == "3":
query = query + " AND Personal Narrative[pt]"
elif selected_filter == "4":
query = query + " AND Preprint[pt]"
elif selected_filter == "5":
query = query + " AND Randomized Controlled Trial[pt]"
elif selected_filter == "6":
query = query + " AND Review[pt]"
elif selected_filter == "7":
query = query + " AND Systematic Review[pt]"
return query
|
0533c2fad16d1768e907ca3065fb3de8271784c4
| 16,660
|
from typing import Sequence
from typing import Dict
def create_fills_query(start_datetime: str, end_datetime: str, uuids: Sequence[str]) -> Dict:
"""Create a GetFills dictionary request.
Args:
start_datetime: UTC datetime as '%Y%-m%-dTHH:MM:SS'
end_datetime: UTC datetime as '%Y-%m-%dTHH:MM:SS'
uuids: List of user uuids to get fills associated with
Returns: A dictionary representation of a blpapi.Request
"""
return {"GetFills": {"FromDateTime": start_datetime, "ToDateTime": end_datetime, "Scope": {"Uuids": uuids}}}
|
beb383badbe60d39f72185a8633155f797f98f4f
| 16,661
|
def get_request_id(request_json):
"""Get the request_id from the request
"""
request_id = request_json['requestInfo'].get('requestId')
if not request_id:
request_id = request_json['requestInfo'].get('requestID')
return request_id
|
68b3e9e8a15d84d1042173b8fc8c480f996d616a
| 16,664
|
def parse_playlist_uri(uri):
"""
Takes a playlist uri and splits it to (user_id, playlist_id)
"""
playlistparts = uri.split(':')
# Sample: spotify:user:lmljoe:playlist:0DXoY83tBvgWkd8QH49yAI
if len(playlistparts) != 5:
print('Invalid playlist id')
exit()
user_id = playlistparts[2]
playlist_id = playlistparts[4]
return user_id, playlist_id
|
23e7f36d4008f0e361a93e81289f302bf15cee81
| 16,665
|
def _ahull_sort_points(hull, points, p0, p1):
"""Compute the sub-hull using a sorted chain-hull algorithm"""
dx, dy = p1 - p0
p0_x, p0_y = p0
def line_order(pt):
return dx * (pt[0] - p0_x) + dy * (pt[1] - p0_y)
points.sort(key=line_order)
points.append(p1)
stack = [p0]
push = stack.append
pop = stack.pop
for p in points:
while len(stack) >= 2:
v0 = stack[-2]
v1 = stack[-1]
if ((v1[0] - v0[0])*(p[1] - v0[1])
- (p[0] - v0[0])*(v1[1] - v0[1]) >= 0.0):
pop()
else:
break
push(p)
pop()
hull.extend(stack)
|
ca8e16a10db76146d63b1e431982d39f11235ec3
| 16,666
|
import inspect
import logging
def verify_parameters(code, kwargs, err_message):
"""
Used to verify the that the parameters in kwarg match the signature of the code.
:param code: The code fragment's that has the signature to check.
:param kwargs: The kwargs to look for
:param err_message: An error message to show if the signature doesn't match.
:return:
"""
params = inspect.signature(code).parameters
verified = True
# The kwargs should all be in the function. Anything left should have a default
param_keys = set(params.keys())
param_keys.discard('self')
# Make sure they have all the ones we asked for.
missing = set(kwargs.keys()) - set(param_keys)
for param_name in missing:
logging.error(f"Missing param '{param_name}' on function {err_message}")
verified = False
remaining = set(param_keys) - set(kwargs.keys())
for param_name in remaining:
if params[param_name].default == inspect.Parameter.empty:
logging.error(f"Param '{param_name}' not passed for {err_message}")
verified = False
return verified
|
cd3c3542c41bb7ba0d3f8d7f250f44d743acb0a9
| 16,667
|
import socket
def get_open_ports(number):
"""Get the number of open ports from the system.
Args:
number: number of ports
Returns: list of open_ports
"""
ports = []
for i in range(number):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
if port > 0:
ports.append(port)
if len(ports) != number:
raise RuntimeError(
"Could not get enough open ports from the system. Needed {} but got {}.".format(number, len(ports))
)
return ports
|
972787bd26e7430ebbc6ad1f45dbf29b9e4aba00
| 16,671
|
def merge_dfs_by_index(df1, df2):
"""
Merge two pandas dataframe index-by-index.
The dataframes have to share the same index name. Shared indexes will be
merged without data loss. In case of conflicting entries a ValueError is
raised. The merge operation is symmetric and does not depend on the
order of df1 and df2.
Parameters
----------
df1: dataframe
Pandas dataframe to be extended
df2: dataframe
Pandas dataframe with used for extension
Returns
-------
dataframe:
The merged dataframe
Raises
----------
ValueError
in case of incompatible index names or values
"""
if df1.index.name != df2.index.name:
raise ValueError('Dataframes have incompatible indexes: '
f'{df1.index.name} != {df2.index.name}.')
# check for contradicting values by comparing A+B with B+A
left_combine = df1.combine_first(df2)
right_combine = df2.combine_first(df1)
# ignoring dtypes when checking equality
if not left_combine.astype(object).equals(right_combine.astype(object)):
raise ValueError('Dataframes have incompatible values: '
f'{left_combine.compare(right_combine)}')
return right_combine
|
fe1558403ab2ee7a01b034788edd6ac413f77eaf
| 16,674
|
def bytes_to_little_int(data: bytearray) -> int:
"""Convert bytes to little int."""
return int.from_bytes(data, byteorder="little", signed=False)
|
bdca6a59b4036cce1bd9bc59a9096556d87b257b
| 16,675
|
def resolve_translation(instance, info, language_code):
"""Gets translation object from instance based on language code."""
return instance.translations.filter(language_code=language_code).first()
|
79737c123e09760fb1514bbfad7d73c385a4309a
| 16,676
|
def ris_ignoreValue_check(fieldsTuple, itemType_value):
"""
params:
fieldsTuple, ()
itemType_value, str.
return: fieldValue_dict
"""
#
ris_element = fieldsTuple[0]
ignore_value_list = fieldsTuple[2]
fieldValue_dict = {}
#
for ignore_value in ignore_value_list:
if itemType_value in ignore_value:
fieldValue_dict[ris_element] = True
else:
fieldValue_dict[ris_element] = False
#
#
return fieldValue_dict
|
be9bfdf2ae761e8309e1d9b9db33fee99c466cf0
| 16,678
|
import re
def normalise_collection_url(url: str) -> str:
"""Remove anything after ID (e.g. `cp43213`) and change 'sciencemuseum.org.uk' to 'sciencemuseumgroup.org.uk'"""
url = url.replace(
"collection.sciencemuseum.org.uk", "collection.sciencemuseumgroup.org.uk"
)
if "collection.sciencemuseumgroup.org.uk" in url:
url = re.findall(r"https://(?:\w.+)/(?:co|cp|ap|aa)(?:\d+)", url)[0]
return url
|
c58d095b446cdca3ff0292f5905166acb8b863b6
| 16,679
|
def init_parameters(parameter):
"""Auxiliary function to set the parameter dictionary
Parameters
----------
parameter: dict
See the above function NMFconv for further information
Returns
-------
parameter: dict
"""
parameter = dict() if not parameter else parameter
parameter['numIter'] = 30 if 'numIter' not in parameter else parameter['numIter']
parameter['numComp'] = 3 if 'numComp' not in parameter else parameter['numComp']
parameter['numTemplateFrames'] = 8 if 'numIter' not in parameter else parameter['numTemplateFrames']
parameter['beta'] = 0 if 'beta' not in parameter else parameter['beta']
parameter['sparsityWeight'] = 0 if 'sparsityWeight' not in parameter else parameter['sparsityWeight']
parameter['uncorrWeight'] = 0 if 'uncorrWeight' not in parameter else parameter['uncorrWeight']
return parameter
|
397a648b2ed799c5236fd8456874d739e7744974
| 16,681
|
def tonumber(v):
"""
Convert a value to int if its an int otherwise a float.
"""
try:
v = int(v)
except ValueError as e:
v = float(v)
return v
|
8b52ac3385b3ffc721af523799ef3a6da4e29060
| 16,682
|
def _param(param, value):
"""
create 'parameter=value'
"""
return "{0}={1}".format(param, value)
|
a64cedf88c20b774ffef23bb7caded4ed7975143
| 16,683
|
def array_madness(arr1: list, arr2: list) -> bool:
""" This function returns True if the sum of the squares of each element in arr1 is strictly greater than the sum of the cubes of each element in arr2. """
if len(arr1) and len(arr2) >= 1:
return True if sum([i**2 for i in arr1]) > sum([i**3 for i in arr2]) else False
return False
|
67f4d2ca7bb51d0134336993c88928eca29b8383
| 16,684
|
def get_bcrypt_salt(hashed):
"""
Get the salt from on bcrypt hashed string
"""
return hashed[:29]
|
ce7f632ddb832548841b0daead4e8176899ac21d
| 16,685
|
def split_str_avoiding_square_brackets(s: str) -> list:
"""
Splits a string by comma, but skips commas inside square brackets.
:param s: string to split
:return: list of strings split by comma
"""
res = list()
skipping = 0
last_idx = 0
for i, c in enumerate(s):
if c == '[':
skipping += 1
elif c == ']':
skipping -= 1
elif c == ',' and skipping == 0:
res.append(s[last_idx:i])
last_idx = i + 1
res.append(s[last_idx:])
return res
|
29dad952d9b8151bb2bad0f5c7338436251792b7
| 16,686
|
def _write_categories(categories_list):
"""
Parameters
----------
categories_list : list of str
the list of category's names
Returns
-------
the categories' dictionary
"""
# create an empty categories' dictionary
categories_dict = {"categories": []}
for id, category in enumerate(categories_list):
category_dict = {"id": id + 1, "name": category, "supercategory": category}
categories_dict["categories"].append(category_dict)
return categories_dict
|
5af597f584c3b1a06a271cc6115398d9ef617668
| 16,687
|
import os
import fnmatch
def ls_r(path, fileext):
"""
Works like a shells `ls -r` syscall, but searches with a given file
extensions (`fileext`) in mind
**Positional Arguments:**
path:
- The base path from where we traverse the directory structure
fileext:
- The file extensionS we care about when we traverse
"""
matches = []
for ext in fileext:
for root, dirnames, fns in os.walk(path):
for fn in fnmatch.filter(fns, "*"+ext):
matches.append(os.path.join(root, fn))
return matches
|
dce5a8f40bcf76336b33e5ab327bd985dbcd20a1
| 16,688
|
def sizeToTeam(size):
"""Given a size in kilobytes, returns the 512kb.club team (green/orange/blue),
or "N/A" if size is too big for 512kb.club"""
if size<100:
return "green"
elif size<250:
return "orange"
elif size<=512:
return "blue"
else:
return "N/A"
|
a61f6a28f8f00cd05271684f715345b7fac4ed54
| 16,689
|
import time
def time_seconds():
"""
获取当前时间seconds级计时值
:return: float值 eg. 1498381468.38095
"""
return time.time()
|
90fee8f18bf6afc5dd9c6b5b6438051ec5d4ca3b
| 16,691
|
def product_consumption_rate(total_items, total_orders):
"""Returns the average number of units per order.
Args:
total_items (int): Total number of items of a SKU sold during a period.
total_orders (int): Total number of orders during a period.
Returns:
Average number of units per order.
"""
return (total_items / total_orders) * 100
|
e5645a4703d0d9335abc6832da24b8a6b53c0c17
| 16,693
|
import json
def read_cjson(path):
""" Read a json file with #-comment lines """
with open(path) as f:
lines = [line for line in f if not line.strip().startswith('#')]
data = json.loads('\n'.join(lines))
return data
|
f0585820e56c5fa8ccbf9894a70b780b4ce00018
| 16,694
|
import logging
def _get_pathway_scores_old(
pathway_dict: dict
) -> dict:
"""Return pathway scores as a dictionary of scores
:param pathway_dict: pathway dictionary as output by toDict
:type: dict
:return: dictionary of scores
:rtype: dict
"""
__SCORE_KEYS = [
'norm_fba_obj_biomass',
'norm_fba_obj_fraction',
'norm_rule_score',
'norm_steps',
'global_score'
]
scores = {}
for score_type in __SCORE_KEYS:
try:
scores[score_type] = pathway_dict['brsynth'][score_type]
except KeyError:
logging.warning(f'Cannot retrieve pathway score "{score_type}" in rpSBML. Set to None')
scores[score_type] = None
return scores
|
0a547552288dda6b4c4be4836389fe54cb2066f8
| 16,695
|
def evaluate_indiv(indiv, objective_dict):
"""Wrapper function for calculating the individual's fitness. See :class:`~hawks.objective.Objective` for implementation details.
Args:
indiv (:class:`~hawks.genotype.Genotype`): A single individual (i.e. a dataset).
objective_dict (dict): Dictionary with the objective function and its arguments.
Returns:
tuple: Objective/fitness values.
"""
obj_values = []
# Loop over the objectives
for objective in objective_dict.values():
# Calculate this objective
res = objective['class'].eval_objective(indiv)#, **objective['args'])
# Append the result
obj_values.append(res)
return tuple(obj_values)
|
b4d3ff64ace7c5fc9f67ef7d6860022d83e72457
| 16,696
|
def getSelectRedirect(params):
"""Returns the pick redirect for the specified entity.
"""
if params.get('args'):
return '/%(url_name)s/pick?%(args)s' % params
else:
return '/%(url_name)s/pick' % params
|
1eae1150f180986b74ac2ec9bc31c3d3d1f566e1
| 16,698
|
import os
def makeSubjIds(rdfname, mapfile=None, types=None):
"""Map subjects to ids
rdfname - input RDF N-Tripples file name
mapname - output id mapping file object or None
types - resulting set of the loaded types if not None
return
subjstp - name: id mapping for the typed subjects
idend - end of the original (base) ids
"""
tprop = '<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>' # Type property
subjstp = {} # Typed subjects to be returned
sbis = {} # Original subject ids
with open(rdfname) as frdf:
sid = 0 # Subject id
for ln in frdf:
# Consider comments as leading # and empty lines (at least the ending )
if not ln or ln[0] == '#':
continue
if ln.startswith(' '):
raise ValueError('N-Tripple format is invalid: ' + ln)
name, pred, obj = ln.split(' ', 2)
# Update typped subjects
if pred == tprop:
subjstp[name] = sbis.get(name, sid)
if types is not None:
types.add(obj.rstrip('\n. \t'))
# Note: initially unique subjects should be added to retain original ids
if sbis.setdefault(name, sid) == sid:
if mapfile:
mapfile.write('{}\t{}\n'.format(sid, name))
sid += 1 # This item has just been added
print('The number of typed subjects in {}: {} / {} ({:.2%})'.format(os.path.split(rdfname)[1]
, len(subjstp), sid, len(subjstp) / sid))
return subjstp, sid
|
b19c55a5ea4f868d5b674abc5c8f4a36592be337
| 16,700
|
def wrangle(X):
"""Wrangle train, validate, and test sets in the same way"""
# Prevent SettingWithCopyWarning
X = X.copy()
# Fillna for other columns with most frquent values. Definitely with 1 (one)
cols3 = ['Bedrooms', 'Beds', 'Bathrooms']
for col in cols3:
X[col] = X[col].fillna(X[col].mode()[0])
# return the wrangled dataframe
return X
|
47d40e613c6321d862b5c91f3bb99ef767e1907a
| 16,703
|
def feed(attribute, kwargs, dictionary, tab='\t'):
"""
Args:
attribute (str): Attribute to be operated.
kwargs (dict): generic parameters.
dictionary (dict): default parameters dictionary.
tab (str): '\t' or '\t\t', depend on orders of attribute.
Return:
(str)
"""
tmp = kwargs.get(attribute) if kwargs.get(attribute) \
else dictionary.get(attribute).get('value')
return tab + dictionary.get(attribute).get('name'), str(tmp), dictionary.get(attribute).get('comment')
|
c8e0324c4081c0a2852e0eb1f218ebe99b81ccac
| 16,704
|
import glob
import gzip
import json
def scrub_chunks():
"""Collate a list of chunk paths into a single dictionary
Keyword arguments:
files -- list of paths to g-zipped chunks from `tmdb.py`
"""
files = glob.glob('chunks/*')
if len(files) == 0 :
raise SystemExit("No chunks found in `chunks/`. Did you run `tmdb.py` already?")
keep = dict()
for f in files:
with gzip.open(f, "r") as zip_ref:
movies = json.load(zip_ref)
for m in movies.keys():
dat = movies[m]
if ( not dat["adult"] and
dat["vote_count"] > 30
# dat["budget"] > 1 and
# # dat["original_language"] == "en" and
# # dat["poster_path"] is not None and
):
k = dat["id"]
keep.update({k : dat})
return keep
|
64b1052829b9e515aa358b4a7f31fd7580ab13f6
| 16,705
|
from datetime import datetime
def complaint_list_query(self, begin_date=None, end_date=None, limit=10, offset=0, complainted_mchid=None):
"""查询投诉单列表
:param begin_date: 开始日期,投诉发生的开始日期,格式为YYYY-MM-DD。注意,查询日期跨度不超过30天,当前查询为实时查询。示例值:'2019-01-01'
:param end_date: 结束日期,投诉发生的结束日期,格式为YYYY-MM-DD。注意,查询日期跨度不超过30天,当前查询为实时查询。示例值:'2019-01-01'
:param limit: 分页大小,设置该次请求返回的最大投诉条数,范围【1,50】,商户自定义字段,不传默认为10。示例值:5
:param offset: 分页开始位置,该次请求的分页开始位置,从0开始计数,例如offset=10,表示从第11条记录开始返回,不传默认为0 。示例值:10
:param complainted_mchid: 被诉商户号,投诉单对应的被诉商户号。示例值:'1900012181'
"""
if not begin_date:
begin_date = datetime.now().strftime("%Y-%m-%d")
if not end_date:
end_date = begin_date
if not complainted_mchid:
complainted_mchid = self._mchid
path = '/v3/merchant-service/complaints-v2?limit=%s&offset=%s&begin_date=%s&end_date=%s&complainted_mchid=%s'
path = path % (limit, offset, begin_date, end_date, complainted_mchid)
return self._core.request(path)
|
21c598b1de662a2eb0f07c85020062a2bd611d8d
| 16,707
|
def wrap(char, wrapper):
"""Wrap a sequence in a custom string."""
return wrapper.format(char=char)
|
a86bbfb5f1b4ea373eb0a23b365c63ceed106159
| 16,708
|
import pickle
import codecs
def decode_df(str_encode):
"""
"""
p1 = pickle.loads(codecs.decode(str_encode.encode(), "base64"))
return p1
|
14f52ae2dc6dff4a53996a42657ee247d6f9ba33
| 16,710
|
import os
def dirCheck(dirPath):
"""
Check whether the directory exists and if not make it.
Parameters
----------
dirPath : str
The path to the directory being checked.
Returns
-------
dirPath : str
The path to the checked directory.
"""
if not os.path.exists(dirPath):
os.mkdir(dirPath)
return dirPath
|
ec0ad381961fae1f5660b89ae91b53559e6c4a86
| 16,711
|
def form_entity_types(ontotypes):
"""A method to form the numerical identifiers appropriately for further processing"""
onto_types_l = set(ontotypes)
onto_types_l = list(onto_types_l)
# Sort the numerical identifiers #
onto_types_l.sort()
# Insert the '+' symbol between the identifiers so that they can be used from the tagger #
if(onto_types_l[0] == 'all'):
ent_type_plus = '0'
for ent_type in ['-1','-2','-21','-22','-23','-25','-26','-27']:
ent_type_plus = ent_type_plus + '+'.strip() + ent_type.strip()
else:
ent_type_plus = onto_types_l[0]
for index, ent_type in enumerate(onto_types_l):
if(index > 0):
ent_type_plus = ent_type_plus + '+'.strip() + ent_type.strip()
return ent_type_plus
|
e69fec7ca1cf78d20ad3cb1f995a26655ec94204
| 16,712
|
def combine_lists_recursive(list_of_lists):
"""Recursive combination function. This makes no attempt to avoid temporary copies"""
if(len(list_of_lists) < 2):
#Nothing to combine
return list_of_lists
elif(len(list_of_lists) == 2):
list1 = list_of_lists[0]
else:
list1 = combine_lists_recursive(list_of_lists[0:-1])
list2 = list_of_lists[-1]
output = []
for element2 in list2:
for element1 in list1:
output.append(element1 + element2)
return output
|
ff77852cbea2a6b4176dc65a3640daa4f2f2bb9b
| 16,713
|
def is_string(func):
"""This decorator prevents the method from executing normally if the input
is not a string."""
def wrapper(self, text):
if type(text) is not str:
return "'{}' is invalid. Input should be a string.".format(text)
return func(self, text)
return wrapper
|
9ac6c9a12c6f4dde7239af2c2ed05c81b3f1861c
| 16,715
|
def isIrrelevantManualRules(s, cdoc):
"""
Hand-crafted rules to remove paragraphs or entire documents from job advertisements
:param s: String
:param cdoc: List of parts of document (String)
:returns: Boolean (ie. True if removal is needed), list of parts of documents (String), length of irrelevant parts
"""
if len(s) < 4: return True, cdoc, len(s) #too short
if "Monster" in s[:15]: return True, cdoc, len(s) #Paragraph with monster
if "Newspaper logo is conditionally" in s:
return True, [], sum([0]+[len(s) for s in cdoc]) # reset doc, everything above is not relevant for monster jobs, this also includes "Skip to main content" in s: continue
if "About the Job" in s: return True, cdoc, len(s)
if '="' in s: return True, cdoc, len(s) # often html gets not removed properly, what remains is sth like < link rel="stylesheet" href="https:, , coda. newjobs. com,
return False, cdoc, 0
|
772bc6f1f317d28119e16cc992eb668dfece6143
| 16,716
|
def _namedtuple_to_query(query, nt):
"""Return a filled-out query and arguments.
The return value can be exploded and passed directly into execute.
>>> query = 'INSERT INTO jobs ({}) VALUES ({});'
>>> namedtuple_to_query(query, job)
('INSERT INTO jobs (`user`, `pages`) VALUES (%s, %s)', ('ckuehl', 42))
"""
return (
query.format(
', '.join('`{}`'.format(column) for column in nt._fields),
', '.join('%s' for _ in nt._fields),
),
tuple(getattr(nt, column) for column in nt._fields),
)
|
1db1dc9c90fe81dc262f1c1459ca7f0a0b725d9d
| 16,717
|
def x2bool(s):
"""Helper function to convert strings from the config to bool"""
if isinstance(s, bool):
return s
elif isinstance(s, str):
return s.lower() in ["1", "true"]
raise ValueError()
|
2850c3ab0421619a087d88181f3b6e2c6ffa9e9a
| 16,718
|
def round_down(rounded, divider):
"""Round down an integer to a multiple of divider."""
return int(rounded) // divider * divider
|
cf9ea0a437d3246776bd80e03ed19f06827f68ce
| 16,719
|
import json
import base64
import binascii
def validate(arg):
"""
Validate input parameters for `left` and `right`
:param arg: input from request
:return: dictionary of {'data': base64encoded_stirng} if valid or
False if not valid
"""
if not arg:
return False
if isinstance(arg, str) or isinstance(arg, bytes):
try:
arg = json.loads(arg)
except Exception as e:
print(e)
return False
if not arg.get("data"):
return False
try:
base64.b64decode(arg["data"])
except binascii.Error:
return False
return arg
|
c10634020e402ffd7b39f657c405e6bcc7283031
| 16,721
|
def check_digit_luhn_mod_10(digits) -> int:
"""Luhn check verifies if 16 digits are a valid credit card or not.
Args:
digits ([str]): Credit card number
Returns:
[int]: Check digit
"""
reversed_digits = digits[::-1]
calibrated_digits = []
for i, d in enumerate(reversed_digits):
if i % 2 == 0:
m = int(d) * 2
if m > 9:
calibrated_digits.append(m-9)
else:
calibrated_digits.append(m)
else:
calibrated_digits.append(int(d))
sum_of_digs = sum(calibrated_digits)
return (sum_of_digs * 9) % 10
|
77691a752b4db89141e9a791a1c596156dca4be3
| 16,722
|
import json
def read_json_config(cfg):
"""Read a JSON configuration. First attempt to read as a JSON
string. If that fails, assume that it is a JSON file and attempt
to read contents from the file.
Args:
res: a config string or file path
Returns:
dict of config options
"""
try:
cfg = json.loads(cfg)
except ValueError:
cfg_file = open(cfg, 'r')
cfg = json.load(cfg_file)
return cfg
|
2268297273dbfb468e0a8391981b4795702ba0b7
| 16,723
|
def priceGetDeal(soup):
""" Auxiliary price extraction function. Gets price of 'Deal of the Day'. """
price = soup.find('td', id='priceblock_dealprice', class_='a-color-price a-size-medium')
price = price.text
priceList = price.split()
price = priceList[0]
price = price.strip()
price = price.lstrip('$')
price = float(price)
return price
|
61bbe9d74628e3745e518cd8b74bceca9ae625f3
| 16,725
|
import string
def idx_to_label(n: int) -> str:
"""Convert a number to a corresponding letter in the alphabet.
In case the number is higher than the number of letters in the english alphabet, then
a second character is appended.
>>> For instance:
>>>idx_to_label(0)
>>> 'a'
>>> idx_to_label(25)
>>> 'z'
>>> idx_to_label(26)
>>> 'aa'
This function was inspired after:
https://stackoverflow.com/questions/2267362/how-to-convert-an-integer-to-a-string-in-any-base
:param n: the input number
:return: the corresponding string
"""
alphabet_size = 26
digits = []
n += 1
while n:
digits.append(int(n % alphabet_size - 1))
n //= alphabet_size
digits.reverse()
return ''.join([string.ascii_lowercase[i] for i in digits])
|
67e4bba437016dddb6d90f286e1d65f2bfec8caf
| 16,728
|
def dict_contains(subdict, maindict):
"""
return True if the subdict is present with the sames values in dict.
can be recursive. if maindict contains some key not in subdict it's ok.
but if subdict has a key not in maindict or the value are not the same, it's a failure.
>>> dict_contains(dict(a=1, b=2), dict(a=1, c=2, b=2))
True
>>> dict_contains(dict(a=dict(aa=1)), dict(a=dict(aa=1, bb=2), b=2))
True
>>> dict_contains(dict(a=dict(aa=1, bb=2)), dict(a=dict(aa=2)))
False
>>> dict_contains(dict(a=dict(aa=1)), dict(a=[]))
False
>>> dict_contains(dict(a=1), dict())
False
>>> dict_contains(dict(a=[1, 2, 3]), dict(a=[1, 2, 3]))
True
>>> dict_contains(dict(a=[1, 3, 2]), dict(a=[1, 2, 3]))
False
>>> dict_contains(dict(a=[1, 3]), dict(a=[1, 2, 3]))
False
>>> dict_contains(dict(a=[1, 3, 2]), dict(a={1, 2, 3}))
True
:param subdict: the smaller dict that should be present in the big one
:param maindict: the dict
:return: True if subdict is included in maindict
:rtype: bool
"""
try:
for k, v in subdict.items():
mainv = maindict[k]
if isinstance(mainv, dict) and isinstance(v, dict):
if not dict_contains(v, mainv):
return False
elif isinstance(mainv, (set, frozenset)):
return set(v) == mainv
elif mainv != v:
return False
except KeyError:
return False
return True
|
3c02145c7572f4c2d815213527effbfd7df93496
| 16,729
|
from typing import Any
def cast_numeric_greater_than_zero(
value: Any, value_name: str, required_type: type
) -> None:
"""
Checks that `value` is greater than zero and casts
it to `required_type`.
Raises an exception `value` not greater than zero.
Args:
value: numeric value to check
value_name: name to be included in the error message
required_type: target type of the value
Returns:
value as required type
"""
if not isinstance(value, required_type):
value = required_type(value)
if value <= 0:
raise ValueError(f"Value {value_name} must be greater than zero.")
return value
|
27f3b26d824863f1a94d4efe8c902cb84bc26c59
| 16,730
|
from typing import Iterable
def filter_duplicate_key(
line: bytes,
line_number: int,
marked_line_numbers: Iterable[int],
) -> bytes:
"""Return '' if first occurrence of the key otherwise return `line`."""
if marked_line_numbers and line_number == sorted(marked_line_numbers)[0]:
return b""
return line
|
d592e718832f6d0c4718989d1a0fb96783c1508c
| 16,732
|
import copy
import io
import json
def convert(data):
"""Convert a data to a graph structure"""
if isinstance(data, (dict)):
return copy.deepcopy(data)
elif isinstance(data, str): # it's a json file path
with open(data) as f:
return convert(f)
elif isinstance(data, io.TextIOBase): # it's a json file
return json.load(data)
else:
raise TypeError('Unsupported data type.')
|
6e34bcdc5aa72289c3bb899c30ce92a272b13055
| 16,735
|
def get_people_values(people):
"""List person/observatory/site using explict retrieval of relevant values"""
return [
(name, observatory, site)
for name, site, observatory in people.values_list(
"name", "site__name", "site__observatory__name"
)
]
|
6642515d80daa63346e043df05248121c0874ea7
| 16,736
|
def separate_elements(array):
"""splits the strings, delimited by whitespace in the provided list and adds each newly formed string
to the returned list"""
list_a = []
for element in array:
list_a.extend(element.split(" "))
return list_a
|
1b0d8a4ea9d8255bbc70d90cdf963b98f61e38ea
| 16,740
|
import re
def clean_text(text, *, replace='_'):
"""
Ensure input contains ONLY ASCII characters valid in filenames.
Any other character will be replaced with 'replace'.
Characters added in extras will be whitelisted in addiction to normal ASCII.
Args:
text: The text to clean.
replace: The replacement character to use.
extras: Additional characters to whitelist.
Returns:
The cleaned text.
"""
text = re.sub(r'[^a-zA-Z0-9]', replace, text)
text = re.sub(r'{r}{r}+'.format(r=replace), replace, text)
return text
|
92d305e78d4883cea2e0a1c3da218284ad783b72
| 16,741
|
def greedy(circ, v, weight, options):
"""Defines a heuristic that results in greedy search, which focuses soley on minimizing the eval_func, and behaves somewhat similarly to depth first sarch."""
return options.eval_func(options.target, circ.matrix(v))
|
a9deb26009ec877cee2bf81c8c9f73114d268899
| 16,742
|
import sys
def can_trace():
"""Returns True if the user is an administrator on Windows.
It is required for tracing to work.
"""
if sys.platform != 'win32':
return True
return bool(windll.shell32.IsUserAnAdmin())
|
6fbb54a8bed7af8483ced0d178cdfcbe2ce9ae0c
| 16,746
|
def compute_variances(grid):
"""Compute variance for each hyperparameters"""
return grid[:, :, -1].var(axis=1)
|
a618e21b2d6290b60b1f9448acff941ad811fec6
| 16,747
|
def loadData():
"""
加载测试数据
"""
return [[1, 3, 4], [2, 3, 5], [1, 2, 3, 5], [2, 5]]
|
f143b1537198825a4fb85d656ca0772f2171fc48
| 16,748
|
from pathlib import Path
def label_malformed(path: Path) -> Path:
"""
Renames the file at the given location to <original_filename>_MALFORMED.
If such a file already exists, an incremented number is appended to the
name until it can be created. The new file name is returned.
Raises: `OSError`
"""
assert(path.is_file())
malformed_file_path = list(path.parts)
malformed_file_path[-1] += "_MALFORMED_CONTENTS"
malformed_file = Path(*malformed_file_path)
# Avoid naming collision
i = 1
while malformed_file.is_file():
malformed_file_path[-1] += str(i)
malformed_file = Path(*malformed_file_path)
i += 1
path.rename(malformed_file)
return malformed_file
|
12ca5ec240803127dd8aed7d13502c71a12f08ae
| 16,749
|
def get_child_parents(edges):
"""Puts each non-parent node together with its parents
Parameters
----------
edges : list
A list of tuples corresponding to the Bayesian network structure as described in the input file
Returns
-------
child_parents
A dictionary with non-parent nodes as keys and their parents as values
"""
child_parents = {}
for e in edges:
if e[1] in child_parents.keys():
child_parents[e[1]].append(e[0])
else:
child_parents[e[1]] = [e[0]]
return child_parents
|
4b01a264ee1e2498c37f1fa0695f9430c207f04d
| 16,750
|
import random
def get_random_from_distribution(minimum_value, distribution, increment=1):
"""Returns an integer from minimum_value to len(distribution)*increment,
where the probability of any specific integer is determined by the
probability distribution.
"""
x = random.random()
result = minimum_value - increment
for limits in distribution:
if x > limits[1]:
result = result + increment
else:
break
return result
|
0d3cde30c86ea8e230e3740123810d1e2d73d7ee
| 16,751
|
import numpy
def gaussian2d(x, y, sigma_x, sigma_y, x0=0, y0=0, angle=0.0):
"""
Non-normalized 2D Gaussian function
Parameters:
* x, y : float or arrays
Coordinates at which to calculate the Gaussian function
* sigma_x, sigma_y : float
Standard deviation in the x and y directions
* x0, y0 : float
Coordinates of the center of the distribution
* angle : float
Rotation angle of the gaussian measure from the x axis (north) growing
positive to the east (positive y axis)
Returns:
* gauss : array
Gaussian function evaluated at *x*, *y*
"""
theta = -1 * angle * numpy.pi / 180.
tmpx = 1. / sigma_x ** 2
tmpy = 1. / sigma_y ** 2
sintheta = numpy.sin(theta)
costheta = numpy.cos(theta)
a = tmpx * costheta + tmpy * sintheta ** 2
b = (tmpy - tmpx) * costheta * sintheta
c = tmpx * sintheta ** 2 + tmpy * costheta ** 2
xhat = x - x0
yhat = y - y0
return numpy.exp(-(a * xhat ** 2 + 2. * b * xhat * yhat + c * yhat ** 2))
|
41a13e3799f611b9db02827113f537ae97d3cef8
| 16,752
|
def what_is_up():
"""Initial message"""
return 'Turing235 Project'
|
2ab9ed74f15f12b4f1e7291218c22b126690413a
| 16,753
|
def get_divisable(row):
"""Get numbers from row where one divides another without rest."""
for index, num in enumerate(row[:-1]):
for other_num in row[index + 1:]:
if num % other_num == 0 or other_num % num == 0:
return sorted([num, other_num], reverse=True)
|
e7e6cb9936cd54df7cd0594168e53b8821cfbae6
| 16,755
|
def dms(degrees):
"""
Calculate degrees, minutes, seconds representation from decimal degrees.
Parameters
----------
degrees : float
Returns
-------
(int, int, float)
"""
degrees_int = int(abs(degrees)) # integer degrees
degrees_frac = abs(degrees) - degrees_int # fractional degrees, used to compute minutes
minutes_int = float(int(degrees_frac * 60)) # integer minutes
minutes_frac = degrees_frac - minutes_int / 60 # fractional minutes, used to compute seconds
seconds = minutes_frac * 3600 # decimal seconds
# Handle sign. Degrees portion will contain the sign of the coordinate.
# Minutes and seconds will always be positive.
# sign function returns -1, 0, +1 for x < 0, x == 0, x > 0, respectively
if degrees < 0:
degrees_int *= -1
return degrees_int, minutes_int, seconds
|
ef062ddc4d313c0e8376096952d0011c01d27825
| 16,756
|
def number_bigger_than_zero(s):
"""
Returns the Number representation of s if it is bigger than zero, else an error occurs
:param s: Input string
:return: Integer or None
"""
i = int(s)
if i <= 0:
raise ValueError("Value must be larger than 0")
return i
|
a24d62a07964dea08d79392964073ac3ed84fbda
| 16,759
|
def is_genetic_effect(effect):
"""
Is this effect a genetic effect?
:rtype: bool
"""
return effect in set(['additive', 'dominance', 'mitochondrial'])
|
9d79307f3f012b358b1b5c8347f7ba977be65aa1
| 16,760
|
def parenthesise(s, parens = True):
"""
Put parentheses around a string if requested.
"""
if parens:
return '(%s)' % s
else:
return s
|
37a1abbb4a511eff9b9c63a79fafcc60331bee67
| 16,762
|
def delta_sr(processor_sr, aeronet_sr):
"""
Convention in ACIX I paper
:param processor_sr: surface reflectance of the processor
:param aeronet_sr: surface reflectance of the reference (aeronet based)
:return:
"""
return processor_sr - aeronet_sr
|
60b3a69fd986044126cf95b8d87425334e472abe
| 16,763
|
from typing import Tuple
import math
def uniform_divide(total: float, mpp: float) -> Tuple[int, float]:
"""Return the minimum number of partitions and the quantity per partition that uniformly divide a given quantity
:param total: The total quantity to divide
:param mpp: Maximum quantity per partition
:returns: The minimum number of partitions and the quantity in each partition"""
n = int(math.ceil(total / mpp))
p = total / n
return n, p
|
2cccebd710975b34ab7f3538516d8f4c60b18c87
| 16,764
|
import subprocess
def _get_minor_gpu_device_numbers():
"""
Returns:
"""
gpu_info = []
try:
gpu_info = subprocess.check_output(["nvidia-smi", "--format=csv,noheader,nounits", "--query-gpu=pci.bus_id"]).decode()
except:
return gpu_info
gpu_info = gpu_info.split('\n')
device_id_list = []
for line in gpu_info:
if len(line) > 0:
pci_bus_id = line.split(',')
device_id_list.append(pci_bus_id)
|
06eac15f9f15934083db4f75cd7b2161a4c594b4
| 16,765
|
import base64
def printable_encrypted_string(string):
"""
Зашифрованное сообщение не читабельно, т.к. содержит много
непечатаемых символов таких как табуляция, новая строка и т.д.
поэтому мы преобразуем это сообщение в строку в кодировке base64
"""
message_bytes = string.encode("ascii")
base64_bytes = base64.b64encode(message_bytes)
return base64_bytes.decode("ascii")
|
0451a2670408da7ab50227a09d7ff8358891fd49
| 16,768
|
def add_trailing_zero(df_vendor_gt, anchor_key):
"""
This function will amend the single temporary ground truth data frame record at runtime
:param df_vendor_gt: The single temporary ground truth data frame record
:param anchor_key: The field we are currently processing
:return: The amended dataframe
"""
idx = str(df_vendor_gt[anchor_key].iloc[0]).find(".")
if len(str(df_vendor_gt[anchor_key].iloc[0])[idx + 1:]) == 1:
df_vendor_gt[anchor_key].iloc[0] = str(df_vendor_gt[anchor_key].iloc[0]) + '0'
return df_vendor_gt
|
7191c17f1f2795ec04218b9e1611c194569df8ff
| 16,769
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.