content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def make_diamond(length):
"""
Make a dimond in length.
:param length: Integer value of a diamond you want to make.
:return: String array of a diamond.
"""
diamond = "a \nb"
return diamond | a24891ddd6d3aa3327ef877e95f4dd704ee01e0f | 50,477 |
def pts_in_cell_numpy(pts, cell):
""" get the point indices incide of a given cell (numpy)
Input:
pts, a set of points in numpy format
cell, a list of 6 numbers {x1, y1, z1, x2, y2, z2}
Output:
inds, a list of indices for points inside the cell
"""
N = pts.shape[0]
inds = [i for i in range(N) if pts[i,0]>cell[0] and pts[i,0] < cell[3]
and pts[i,1]>cell[1] and pts[i,1] < cell[4]
and pts[i,2]>cell[2] and pts[i,2] < cell[5]]
return inds | 825c2898e22ff8d4f913c78b988b818bb4a6d3b0 | 50,478 |
def extract_job_specs(replica_specs):
"""Extract tf job specs from tfReplicaSpecs.
Args:
replica_specs: A dictionary having information of tfReplicaSpecs from manifest.
returns: Dictionary. Key is tf job type and value is number of replicas.
"""
specs = dict()
for job_type in replica_specs:
specs[job_type.encode("ascii").lower()] = int(
replica_specs.get(job_type, {}).get("replicas", 0))
return specs | e4f375bdbe87e576225fd4f8bcb4f5348440e654 | 50,479 |
def is_valid_exit(exits, chosen_exit):
"""This function checks, given a dictionary "exits" (see map.py) and
a players's choice "chosen_exit" whether the player has chosen a valid exit.
It returns True if the exit is valid, and False otherwise. Assume that
the name of the exit has been normalised by the function normalise_input().
"""
return chosen_exit in exits | e5622cf41c68420b822bcfa450ae4f545d85040e | 50,480 |
import json
def ordereddict_to_dict(d):
"""
Helper function to convert an ordered dict to a dict
This is probably not necessary but slightly cleaner
"""
return json.loads(json.dumps(d)) | aa7b164cc6f7dc9d2e307e0108eb256e376a1eff | 50,481 |
def cur_buf_detail(vim):
""" Get current buffer num
Args:
vim (obj): nvim socket handler
"""
num = vim.command_output("echo bufnr()")
name = vim.command_output("echo bufname()")
return num, name | d21808d448fb02d891df95144fa837a138c12128 | 50,482 |
def __example(wb):
"""win32com excel example found online"""
# excel = win32com.client.gencache.EnsureDispatch('Excel.Application')
# excel.Visible = True
# wb = excel.Workbooks.Open(OUTPUT)
ws = wb.Worksheets("Overview")
ws.Cells(1, 1).Value = "Cell A1"
ws.Cells(1, 1).Offset(2, 4).Value = "Cell D2"
ws.Range("A2").Value = "Cell A2"
ws.Range("A3:B4").Value = "A3:B4"
ws.Range("A6:B7,A9:B10").Value = "A6:B7,A9:B10"
# wb.SaveAs(OUTPUT)
# excel.Application.Quit()
return wb | f7ba05e2b4b97799f45519c41ac9d3b160fc04b9 | 50,483 |
def vector_name_iterator(data):
"""
Produces an iterator that yields 2-tuples of vectors given a dict of fields
vectors are identified by identifying fields with common prefixes that all
end with '_x', '_y', or '_z'.
The first element of the yielded tuple holds the common prefix of the
fields related to the vector, while the second element holds a list of
field names corresponding to the various components (orderred as x,y,z).
Missing components are replaced by a None
"""
ax_map = {'x' : 0, 'y' : 1, 'z' : 2}
candidates = {}
# identify candidate vectors
for elem in data.keys():
temp = elem.split('_')
if len(temp) != 2 or (temp[1] not in ['x','y','z']):
continue
prefix, dim = temp
if prefix not in candidates:
candidates[prefix] = [None, None, None]
candidates[prefix][ax_map[dim]] = elem
return candidates.items() | dc987b589f5aeed94fcc57e2c51fe307ebe5f20f | 50,485 |
def GetOwnerIds(hotlist):
"""Returns the list of ids for the given hotlist's owners."""
return hotlist.owner_ids | a1fff7ecdfb8c1d8a344c261fad3d94b1a81bdc2 | 50,486 |
def get_filenames_from_tsv(tsv_file):
"""Extract the filenames from the tsv file
and returns them in a list. If something went wrong,
an empty list is returned."""
header_index = -1
file_names = []
with open(tsv_file, "r") as fh:
all_lines = fh.read().splitlines()
for index, name in enumerate(all_lines[0].rsplit("\t")):
if name.lower() == "filename":
header_index = index
if header_index == -1:
return file_names
for line in all_lines[1:]:
print(line)
content_ls = line.split("\t")
file_names.append(content_ls[header_index])
return file_names | 3dc77dd47aeeac614733a8b56eecfa3a777da7fb | 50,488 |
def cached(__cache: dict):
"""
cache returned values in __cache,
note that decoratee function can *ONLY* take positional args
"""
def _decorator(decoratee):
def _inner(*args):
try:
return __cache[args]
except KeyError:
result = decoratee(*args)
__cache[args] = result
return result
return _inner
return _decorator | 9e4430afb6b31621240c971593a6dd4c8a0d9fe7 | 50,489 |
def time_diff(t0, t1):
"""
Args:
:t0: start time in seconds
:t1: end time in seconds
Returns: string with time difference (i.e. t1-t0)
"""
minutes, seconds = divmod(t1 - t0, 60)
hours, minutes = divmod(minutes, 60)
return "%d hours, %d minutes, %d seconds" % (hours, minutes, seconds) | 9b1c179fbec8fa0b9dc5143cf3316b061bf5d5c8 | 50,490 |
import json
def offers(request):
"""
Create Json response with offers menu
:param request: POST request from "Offers" dialogflow intent
:return: Json response that contains spoken and display prompt and also list as Dialogflow conversation item
"""
speech_text_pl = "Która opcja Cię interesuje?"
display_text_pl = "Która opcja Cię interesuje?"
list_pl = {
"intent": "actions.intent.OPTION",
"data": {
"@type": "type.googleapis.com/google.actions.v2.OptionValueSpec",
"listSelect": {
"items": [
{
"optionInfo": {
"key": "Przeglądaj oferty",
"synonyms": [
"Przejrzyj oferty"
]
},
"title": "Przeglądaj oferty"
},
{
"optionInfo": {
"key": "Wyszukaj oferty",
"synonyms": [
"Znajdź oferty",
"Znajdź ofertę"
]
},
"title": "Wyszukaj oferty"
},
{
"optionInfo": {
"key": "Wyszukaj ofertę po id",
"synonyms": [
"Znajdź ofertę po id"
]
},
"title": "Wyszukaj ofertę po id"
},
{
"optionInfo": {
"key": "Do kiedy jest ważna oferta",
"synonyms": [
"Ważnosć oferty",
"Do kiedy oferta będzie aktualna",
]
},
"title": "Do kiedy jest ważna oferta"
}
]
}
}
}
suggestions_pl = [{"title": "Oferty"}, {"title": "Zlecenia"}, {"title": "Zapytania"}, {"title": "Konto"},
{"title": "Inne"}]
speech_text_en = "Which option are you interested in?"
display_text_en = "Which option are you interested in?"
list_en = {
"intent": "actions.intent.OPTION",
"data": {
"@type": "type.googleapis.com/google.actions.v2.OptionValueSpec",
"listSelect": {
"items": [
{
"optionInfo": {
"key": "Browse offers",
"synonyms": [
"View offers",
"Display offers"
]
},
"title": "Browse offers"
},
{
"optionInfo": {
"key": "Search offers",
"synonyms": [
"Search active offers"
]
},
"title": "Search offers"
},
{
"optionInfo": {
"key": "Search offer after id",
"synonyms": [
"Search offer according to id"
]
},
"title": "Search offer after id"
},
{
"optionInfo": {
"key": "Until when is the offer valid",
"synonyms": [
"Offer valid",
"Until when is the offer valid?",
]
},
"title": "Until when is the offer valid"
}
]
}
}
}
suggestions_en = [{"title": "Offers"}, {"title": "Orders"}, {"title": "Inquiries"}, {"title": "Account"},
{"title": "Others"}]
with open('api/response.json') as json_file:
offers = json.load(json_file)
part_to_modify = offers['payload']['google']
if request.data['queryResult']['languageCode'] == 'pl':
part_to_modify['richResponse']['items'][0]['simpleResponse']['textToSpeech'] = speech_text_pl
part_to_modify['richResponse']['items'][0]['simpleResponse']['displayText'] = display_text_pl
part_to_modify['systemIntent'] = list_pl
part_to_modify['richResponse']['suggestions'] = suggestions_pl
elif request.data['queryResult']['languageCode'] == 'en':
part_to_modify['richResponse']['items'][0]['simpleResponse']['textToSpeech'] = speech_text_en
part_to_modify['richResponse']['items'][0]['simpleResponse']['displayText'] = display_text_en
part_to_modify['systemIntent'] = list_en
part_to_modify['richResponse']['suggestions'] = suggestions_en
offers['payload']['google'] = part_to_modify
return offers | 9d14ba6b962fd6fee2cb25566b3bbe8aea35fdfe | 50,491 |
def normalize_path(path: str) -> str:
"""
Normalize path.
Converts
# -> root
^ -> parent
^^ -> parent.parent
"""
if not path.startswith('$'):
return path
path = path[1:]
if path.startswith('#'):
return 'root.' + path[1:]
for i, value in enumerate(path):
if value != '^':
return 'parent.' * i + path[i:]
return ('parent.' * len(path))[:-1] | 092b61947dfeecdadc82012f680d02a2fe66463a | 50,492 |
def get(text, word_phones, punc="!?,.;:#-_'\"()[]\n"):
"""Convert block of text into ARPAbet."""
word_phones = [x for x in word_phones if len(x[0])]
out = []
for word in text.split(" "):
end_chars = ''; start_chars = ''
while any(elem in word for elem in punc) and len(word) > 1:
if word[-1] in punc:
end_chars = word[-1] + end_chars
word = word[:-1]
elif word[0] in punc:
start_chars = start_chars + word[0]
word = word[1:]
else:
break
try:
word = "{" + ' '.join(word_phones[0][1]) + "}"
word_phones = word_phones[1:]
except IndexError:
pass
out.append((start_chars + (word or '') + end_chars).rstrip())
return ' '.join(out) | 4bc09d1a31441caa3d5fc1880291de9f5d91d9db | 50,496 |
def dummyfun():
"""
This function guarantees that at least one pytest test is run.
>>> dummyfun()
'dummy'
"""
return("dummy") | 60a0da0517733d286d4cd027f6641c6eef61babd | 50,497 |
def csp_property(key):
"""Return a new property object for a content security policy header.
Useful if you want to add support for a csp extension in a
subclass.
"""
return property(
lambda x: x._get_value(key),
lambda x, v: x._set_value(key, v),
lambda x: x._del_value(key),
"accessor for %r" % key,
) | b829c60e5aec6bc8d498d4a5455266fd294b869e | 50,498 |
def convert_event_name(name: str) -> str:
"""Strips and capitalizes a string.
This function takes a string input and, if the string length is larger than 1,
capitalized the string and strips leading/trailing whitespaces.
Args:
name: Any string of any length.
Returns:
str: Capitalized and stripped string.
"""
if len(name) == 0:
return "Generic Event"
return name.title().strip() | fd734b7a178a1ead518c288de87df1ce6060a97d | 50,499 |
import os
import warnings
import shutil
def get_attachment_filename(_id, unique_id, backup_dir, thread_dir):
""" Get the absolute path of an attachment, warn if it doesn't exist"""
fname = f"Attachment_{_id}_{unique_id}.bin"
source = os.path.abspath(os.path.join(backup_dir, fname))
if not os.path.exists(source):
warnings.warn(
f"Warning: couldn't find attachment {source}. Maybe it was deleted?"
)
return None
# Copying here is a bit of a side-effect
target_dir = os.path.abspath(os.path.join(thread_dir, "attachments"))
os.makedirs(target_dir, exist_ok=True)
target = os.path.join(target_dir, fname)
shutil.copy(source, target)
return target | d805df010424e2b9049041b3b28784649d99b753 | 50,500 |
import argparse
def getArgumentParser():
""" @method: getArgumentParser
@description returns command line parser containing base args
@returns argparse.ArgumentParser {Object}
"""
parser = argparse.ArgumentParser(description="Command line client for PyRemixer")
parser.add_argument("-dbhost", default="127.0.0.1", type=str, help="database host")
parser.add_argument("-dbport", default=5432, type=int, help="database post")
parser.add_argument("-dbname", default="remixerdb", type=str, help="database name")
parser.add_argument("-dbuser", default="postgres", type=str, help="database user")
parser.add_argument("-dbpass", default="dev", type=str, help="database password")
return parser | 0ce970ffea7a1b7e811b1de5ff5b4139fe9270a1 | 50,501 |
def func2() -> list:
"""
This function has no parameters and a return value.
"""
return [1, 2, 3] | b07dfb199552d7b3059520466a0b1ecb811b006f | 50,502 |
def select_window(variances, acceptable_variances, window_sizes):
"""Select window sizes according to the acceptable variances.
:param variances: The variances.
:param acceptable_variances: The acceptable variances.
:param window_sizes: The available window sizes.
:return: The selected window sizes.
"""
n = len(variances)
selected_windows = []
for i in range(n):
selected_windows.append([])
for j in range(n):
selected_size = window_sizes[0]
for window_size in window_sizes:
if variances[i][j][window_size] > \
acceptable_variances[i][j][window_size]:
break
selected_size = window_size
selected_windows[i].append(selected_size)
return selected_windows | 7ceb17764ba110c426736e6983a91342bfe3be7e | 50,504 |
def probe_normalize(sample_normalized, settings):
"""Probe-wise normalize a (sample normalized) count table.
Probe-wise normalize a (sample normalized) count table assuming
the average (median or other specified value) probe represents a certain
copy number. For human samples, for example, assumes the average sample
will have 2 copies of each target.
"""
average_copy_count = int(settings["averageCopyCount"])
norm_percentiles = list(map(float, settings["normalizationPercentiles"]))
copy_counts = sample_normalized.transform(
lambda a: average_copy_count * a/(a.quantile(norm_percentiles).mean()))
return copy_counts | 2488deee830e91ce1e78c8f305b458c070792d84 | 50,505 |
import fnmatch
def filename_matches(file_name, patterns):
"""
Ispituje da li zadati naziv fajla odgovara bilo kojem obrascu datom u listi patterns
file_name - naziv fajla koji se ispituje
patterns - lista stringova - obrasci koji se koriste
Vraća indikaciju da li naziv fajla odgovara makar jednom zadatom obrascu
"""
for p in patterns:
if fnmatch.fnmatch(file_name, p):
return True
return False | 6ba11763738687d14dfa92095dcffd8d5faca746 | 50,507 |
def interpc(coef, lat):
""" linear interpolation (lat step=15) """
i = int(lat / 15.0)
if i < 1:
return coef[:, 0]
if i > 4:
return coef[:, 4]
d = lat / 15.0 - i
return coef[:, i-1] * (1.0 - d) + coef[:, i] * d | b8458f621bcb042a91b0e9076c1effc9838b82ea | 50,508 |
import math
def xywha_to_quad(bbox):
"""
Return list of tuples of quadrilateral points
"""
cnt_x, cnt_y, w, h, angle = bbox
theta = angle * math.pi / 180.0
c = math.cos(theta)
s = math.sin(theta)
rect = [(-w / 2, h / 2), (-w / 2, -h / 2), (w / 2, -h / 2), (w / 2, h / 2)]
# x: left->right ; y: top->down
rotated_rect = [(s * yy + c * xx + cnt_x, c * yy - s * xx + cnt_y) for (xx, yy) in rect]
return rotated_rect | 72ed0e950d8f820942ab29540fb86545d5da1ef7 | 50,511 |
import zlib
import base64
def decode_base64_and_inflate(string):
""" base64 decodes and then inflates according to RFC1951
:param string: a deflated and encoded string
:return: the string after decoding and inflating
"""
return zlib.decompress(base64.b64decode(string), -15) | 20f6a219cf40d1ff2baf0a0d1c1beb6bee793b74 | 50,512 |
def sanitize_parley(parley):
"""Separate memories from context, pull out response, split context/memories lists
"""
if '\n' in parley.context:
snippets = parley.context.split('\n')
text = snippets[-1]
mems = snippets[:-1]
parley.context = text
parley.memories = mems
parley.response = parley.response[0]
assert(isinstance(parley.candidates, list))
assert(isinstance(parley.memories, list))
return parley | b4df37e6d7f9af9c19d0387c7535d5bb8cf064de | 50,513 |
def is_empty(node):
"""Checks whether the :code:`node` is empty."""
return node == [] | 08dca99334a08c979df52d48ce9ef1c767f544e6 | 50,514 |
import argparse
def parse_args(input_args=None):
"""
Parse command-line arguments.
Parameters
----------
input_args : list, optional
Input arguments. If not provided, defaults to sys.argv[1:].
"""
parser = argparse.ArgumentParser()
parser.add_argument('dirs', nargs='+',
help='Directories containing PCBA JSON files.')
parser.add_argument('-c', '--config', required=True,
help='Configuration file containing assay annotations.')
parser.add_argument('-m', '--map',
help='SID->CID map filename.')
parser.add_argument('-s', '--summary',
help='Filename for summary information (.csv.gz).')
parser.add_argument('--no-aid', action='store_false', dest='with_aid',
help='Do not include AID with each data point.')
parser.add_argument('--no-target', action='store_false', dest='with_target',
help='Do not include target with each data point.')
parser.add_argument('--phenotype', action='store_true',
help='Require compound-level phenotype data.')
parser.add_argument('--prefix', default='CID',
help='Prefix for molecule IDs.')
parser.add_argument('-f', '--format',
choices=['csv', 'csv.gz', 'pkl', 'pkl.gz'],
default='pkl.gz',
help='Output file format.')
return parser.parse_args(input_args) | 2242a4c7d4cebf7270958e0d7c9a809d9a0d5298 | 50,515 |
import ast
def msg_decode(s):
"""decode msg into t and v."""
mydata = ast.literal_eval(s.decode("UTF-8"))
return mydata['t'], mydata['v'] | d3854918801bdb65612900886bd0b6ed01457f74 | 50,517 |
import math
def var_y(obliquity_correction):
"""Returns Var Y with Obliquity Correction, obliquity_correction"""
var_y = math.tan(math.radians(obliquity_correction / 2)) * math.tan(
math.radians(obliquity_correction / 2)
)
return var_y | 47c14488da71edcb130e8a64434b6269d06be993 | 50,518 |
def mean(items):
"""Calculate mean value from the items.
:param items: Mean value is calculated from these items.
:type items: list
:returns: MEan value.
:rtype: float
"""
return float(sum(items)) / len(items) | a3c088e06f2c79dcecb8f7a840ab221ea6e3e982 | 50,520 |
def load_players(player_file):
"""loads players that were stored into a json file"""
return None | 7e400e3808ef9f1a43f8bc2b3acee903e75ec6e2 | 50,522 |
def trim_n_and_return_leading_offset(s):
"""
Trims the leading Ns and returns the number of Ns trimmed.
Trims the trailing Ns but does not count this number.
** Only trims the leading and trailing Ns, ignores any
inline Ns. **
"""
offset = len(s)-len(s.lstrip('N'))
return s.strip('N'), offset | 96d27a01258fbc215bda1c57d9ab1523a6389826 | 50,524 |
def describe_call(name, args):
""" Return a human-friendly description of this call. """
description = '%s()' % name
if args:
if isinstance(args, dict):
description = '%s(**%s)' % (name, args)
elif isinstance(args, list):
description = '%s(*%s)' % (name, args)
return description | 1768cd525f67b7f15e4017a2d13e67f7670a9d2c | 50,526 |
def assemble(
client,
file_,
dirname=None,
generic=None,
into_asm=None,
path=None,
ref_model=None,
transform=None,
constraints=None,
package_assembly=None,
walk_children=None,
assemble_to_root=None,
suppress=None,
):
"""Assemble a component into an assembly.
Args:
client (obj):
creopyson Client.
`file_` (str):
File name component.
dirname (str, optional):
Diretory name. Defaults is Creo's current working directory.
generic (str, optional):
Generic model name (if file name represents an instance).
Defaults is generic model name (if file name represents an
instance).
into_asm (str, optional):
Target assembly. Defaults is currently active model.
path (list:int, optional):
Path to a component that the new part will be constrained to.
Defaults to None.
ref_model (str, optional):
Reference model that the new part will be constrained to;
only used if path is not given. If there are multiple of this
model in the assembly, the component will be assembled multiple
times, once to each occurrence. Defaults to None.
transform (obj:JLTransform, optional):
Transform structure for the initial position and orientation of
the new component; only used if there are no constraints, or for
certain constraint types. Defaults to None.
constraints (obj_array:JLConstraint, optional):
Assembly constraints. Defaults to None.
package_assembly (bool, optional):
Whether to package the component to the assembly; only used if
there are no constraints specified. Defaults is If there are no
constraints, then the user will be prompted to constrain the
component through the Creo user interface.
walk_children (bool, optional):
Whether to walk into subassemblies to find reference models to
constrain to. Defaults to None.
assemble_to_root (bool, optional):
Whether to always assemble to the root assembly, or assemble to
the subassembly containing the reference path/model.
Defaults to None.
suppress (bool, optional):
Whether to suppress the components immediately after assembling
them. Defaults to None.
Returns:
(dict):
dirname (str):
Directory name of component.
files (list:str):
File name of component.
revision (int):
Revision of file that was opened; if more than one
file was opened, this field is not returned.
featureid (int):
Last Feature ID of component after assembly.
"""
data = {"file": file_}
if dirname is not None:
data["dirname"] = dirname
if generic is not None:
data["generic"] = generic
if into_asm is not None:
data["into_asm"] = into_asm
if path is not None:
data["path"] = path
if ref_model is not None:
data["ref_model"] = ref_model
if transform is not None:
data["transform"] = transform
if constraints is not None:
data["constraints"] = constraints
if package_assembly is not None:
data["package_assembly"] = package_assembly
if walk_children is not None:
data["walk_children"] = walk_children
if assemble_to_root is not None:
data["assemble_to_root"] = assemble_to_root
if suppress is not None:
data["suppress"] = suppress
return client._creoson_post("file", "assemble", data) | c999ce074a4f0a5c5ba2232483a80cacc53e6f89 | 50,527 |
def _safe_call(method):
"""
Internal decorator to defer a method to the underlying NEURON object,
unpacking all args and returning the result to the decorated method.
"""
def caller(self, *args, **kwargs):
call_result = self._safe_call(method.__name__, *args, **kwargs)
return method(self, call_result, *args, **kwargs)
return caller | 358bd6029d69357803aae99b13e9fdb7e67a4e13 | 50,528 |
def valid_map(file_name):
"""
Checks if the magic numbers of a given file correspond to a
Warcraft III map file
"""
with open(file_name, "rb") as f:
map_name_bytes = f.read(4)
try:
map_name_bytes = str(map_name_bytes.decode('utf-8'))
except UnicodeDecodeError:
return False
if map_name_bytes == "HM3W":
return True
return False | e4a3314aa52badb564283590997c641e0412bd48 | 50,530 |
def __get_package_from_build(build, package_name):
"""
Finds a package in a build by package name
:param build:
:param package:
:return:
"""
package = None
for _package in build["packages"]:
if _package["package"] == package_name:
package = _package
break
return package | d6bd8dcb6d09b7fb882772ba83b2a995db77746a | 50,531 |
def student_average(grades: list) -> float:
"""Return the weighted average of a student's grades.
You may ASSUME that:
- grades consists of exactly three float values
"""
# Sort the student's grades
sorted_grades = sorted(grades)
# These are the weights for the assignment grades
weights = [0.25, 0.35, 0.4]
return (
weights[0] * sorted_grades[0] +
weights[1] * sorted_grades[1] +
weights[2] * sorted_grades[2]
) | 064ffc0deac02556a60a5f37ae1a4aa6741904f7 | 50,533 |
def add_ri(data):
"""
Add real and imaginary data to real channel
"""
return data.real+data.imag | ce07c7c5c3004c469228e80854c46a860a29155f | 50,534 |
import re
def is_valid_attr_name(s: str) -> bool:
"""
Ensure the given string can be used as attribute on an object instance.
"""
return bool(
isinstance(s, str) and re.search(string=s, pattern=r"^[a-zA-Z_][a-zA-Z0-9_]*$")
) | 64e6ae4105985a4738160f432f441fca19c4c718 | 50,535 |
import requests
def modem_url_request(url='http://192.168.100.1'):
"""
Makes http request to Arris modem
web page. Returns page content
"""
try:
r = requests.get(url).content
except:
r = 'failed'
if r == 'failed':
return 'failed'
else:
return r | 4317ca35f426eb45c7b3e6d35af5cfda8d5e15b0 | 50,536 |
def is_git_path(path):
"""Whether the path is to a git sub-directory
>>> is_git_path('/path/to/.git/file')
True
"""
return '/.git' in path | e2df8444997b9a911c03d999b218794d24396469 | 50,537 |
def count_gene_co_occurrence(core_gene_dict, two_gene_segment):
"""
Function to find the number of genomes in which two genes co-occur across the input genomes.
:param core_gene_dict: Dictionary over core genes mapped from genome, to locus_tag, to pan-genome cluster
:param two_gene_segment: List of two genes forming a segment
:return: Int - number of co-occurrences for the two genes in the input two_gene_segment
"""
co_occurrence = 0
gene_occurrence = dict.fromkeys(two_gene_segment, 0)
# Get pan-genome clusters for all genomes in a list of lists
core_gene_presences = [list(core_genes.values()) for core_genes in core_gene_dict.values()]
# Go through all genomes and check if genes co-occur
for core_gene_set in core_gene_presences:
# count the co-occurrences
if set(two_gene_segment).issubset(core_gene_set):
co_occurrence += 1
# Count the individual occurrences
if two_gene_segment[0] in core_gene_set:
gene_occurrence[two_gene_segment[0]] += 1
if two_gene_segment[1] in core_gene_set:
gene_occurrence[two_gene_segment[1]] += 1
return co_occurrence, gene_occurrence | 122d41fc216cde777819add35faa21e8c512ef5b | 50,539 |
import os
def main():
""" Main cli """
# generate the man page using help2man tool
result = os.system('help2man "python3 tchess --verbose" --output=man/tchess.1')
if result != 0:
print("error: help2man is not installed")
return 1
# read the generated file
f = open('man/tchess.1', 'r')
content = f.read()
f.close()
# make the changes
content = content.replace('python3 tchess --verbose', 'tchess')
content = content.replace('PYTHON3 TCHESS --VERBOSE', 'TCHESS')
content = content.replace('.PP', '.SH')
content = content.replace('.IP', '')
content = content.replace('.SH DESCRIPTION', '', 1)
content = content.replace('.SH NAME', '.SH DESCRIPTION')
content = content.replace('.SH DESCRIPTION', '.SH NAME', 1)
content = content.replace('''.SH "SEE ALSO"
The full documentation for
.B tchess
is maintained as a Texinfo manual. If the
.B info
and
.B tchess
programs are properly installed at your site, the command
.B info tchess
.SH
should give you access to the complete manual.''', '')
content = '\n'.join([line for line in content.splitlines() if not line.strip().startswith('tchess \\- manual page for tchess ')])
# write the file again
f = open('man/tchess.1', 'w')
f.write(content)
f.close()
print('Manual page was generated in `man/tchess.1`.')
print('Run `man -l man/tchess.1` to see the generated manpage.') | 757968f0ca6a6197d81ad220098decc2002c49bc | 50,540 |
def get_object_from_destination(input_object, destination, location_id):
"""
To stop the repeat code of finding specific objects using destination and location_id - we will import
the object filter for it here - before returning it.
:param object: The object we want to filter
:param destination: The destination we are interested in
:param location_id: The location_id
:return:
"""
if destination == "kanban_board":
input_object = input_object.filter(
kanban_board_id=location_id,
)
if destination == "kanban_card":
input_object = input_object.filter(
kanban_card_id=location_id,
)
elif destination == "opportunity":
input_object = input_object.filter(
opportunity_id=location_id,
)
elif destination == "organisation":
input_object = input_object.filter(
organisation_id=location_id,
)
elif destination == "project":
input_object = input_object.filter(
project_id=location_id,
)
elif destination == "quote":
input_object = input_object.filter(
quote_id=location_id,
)
elif destination == "requirement":
input_object = input_object.filter(
requirement_id=location_id,
)
elif destination == "request_for_change":
input_object = input_object.filter(
request_for_change_id=location_id,
)
elif destination == "requirement_item":
input_object = input_object.filter(
requirement_item_id=location_id,
)
elif destination == "task":
input_object = input_object.filter(
task_id=location_id,
)
elif destination == "whiteboard":
input_object = input_object.filter(
whiteboard_id=location_id,
)
# Just send back the array
return input_object | e591476d378b574b47cc2ff6dfeb3a9df5eb5d39 | 50,541 |
import warnings
import torch
import math
def get_ood_score(entropy_in, entropy_out, n_classes):
"""Deprecated! Use ROCAUC instead."""
warnings.warn('Deprecated use `get_AUROC_ood` instead.')
with torch.no_grad():
# p_uniform = torch.full((n_classes,), 1 / n_classes)
# max_entropy = -torch.sum(p_uniform * p_uniform.log())
max_entropy = math.log(10)
in_score = entropy_in.mean() / max_entropy
out_score = (max_entropy - entropy_out.mean()) / max_entropy
return ((in_score + out_score) / 2).item() | c5b80b5f98dc3e8b3fbbd160cf76d08915c7c5fe | 50,542 |
def u_backward_rnvp(params, xs, vs, initd_forward_rnvp, u_f_fn, ke_fn, kT):
"""compute the energy of the log-push-backward probability B'"""
forward_xs, forward_vs, logdetJ = initd_forward_rnvp.apply(params, xs, vs) #first push the xs, vs forward
u_forward, ke_forward = u_f_fn(forward_xs) / kT, ke_fn(forward_vs)
return u_forward - logdetJ | 7720867733a4b44b6b7c4c2089233609a2e6a39c | 50,543 |
import sys
def get_size(python_object: object) -> int:
"""Return the size of an object."""
return sys.getsizeof(python_object) | 2d2e248bc3fb6684bc6b0d3f8cd91d1376862b11 | 50,544 |
import math
def euclidian(p1, p2):
"""Return euclidian distance between 2 points."""
return math.sqrt((p2[0] - p1[0])**2 + (p2[1] - p1[1])**2) | 82c326077e8a90ed067e7d6cd2d5aabfd9745499 | 50,546 |
def extract_column(X, col_name):
"""Extract specified column from dataframe. """
if col_name is None or col_name not in list(X.columns):
return X, None
w = X[col_name].copy()
X = X.drop(col_name, axis=1)
return X, w | b5617474644deffb7ae0b25059c3255732bacdc7 | 50,547 |
def get_roi(img, top_left, bot_right):
""" Returns region of interest of an img given bounding box points """
y = [max(top_left[1], 0), min(bot_right[1], img.shape[0] - 1)]
x = [max(top_left[0], 0), min(bot_right[0], img.shape[1] - 1)]
return img[y[0]:y[1], x[0]:x[1]] | 3a53c5388424e18d0cdd4d03ad70db8eaeedadb8 | 50,548 |
import configparser
def readConfig(filePath):
"""Converts data from an ini file to dict"""
config = configparser.ConfigParser()
config.read(filePath)
data = {}
for section in config.sections():
data[section] = {}
for option in config.options(section):
data[section][option] = config.get(section, option)
return data | 3783431892ff1ecc4350cf15fe25a8f101c285c1 | 50,549 |
from typing import List
def midi_program_change(program: int, channel: int = 0) -> List[int]:
"""MIDI CnH message - program change.
>>> midi_program_change(80, 1)
[193, 80]
"""
status = 0xC0 + channel
return [status, program] | dc41574f54d016d5fa2c83cb87186180f5e0edb6 | 50,550 |
def corpus2sentences(corpus):
"""split corpus into a list of sentences.
"""
return corpus.strip().split('\n') | 6ec305d00e410adf4b80acf753665d0ee849e98e | 50,553 |
def get_color(matrix):
"""Returns the color of the matrix (excluding black)
"""
for a in matrix:
for color in a:
if color != 0:
return color | a8b41bb3555e89abd7e92d54685d5a82b17813bd | 50,554 |
def select_metrics_by_snr(cur_nodes: list, prev_node: dict, metric_names: list, tol_error: dict,
compute_all_at_once: bool, alowed_metrics: list, cur_metr: str) -> str:
"""SNR approach to a metric selection.
Metrics that had the highest SNR ratio (metric distance from the prev point)/(ambient noise) is selected next
However, this approach does not always work and while you may a high SNR with contacts, there may be no real decrease in the rmsd.
It is affected by the previous point performance.
Args:
:param list cur_nodes: recent nodes
:param dict prev_node: previous node
:param list metric_names: list of metrics implemented (I want to know whole statistics, not only allowed metrics)
:param dict tol_error: dict with noise data
:param bool compute_all_at_once: toggle left as a reminder to not implement all at once
:param list alowed_metrics: list of metrics that we allow to be used during the current run
:param str cur_metr: name of the current metric
Returns:
:return: metric name with the highest SNR
"""
if not compute_all_at_once:
# easy to implement, but I do not have plans to use it since 'all at once' is very fast
# just take last node and compute all metrics
raise Exception('Not implemented')
snr = False
if snr: # SNR approach may be biased. Additionally, prev_node should be computed here as prev point in name: s_1 is prev to s_1_3
signal = dict()
best_metr = metric_names[0]
best_val = -1
for metr in metric_names:
cur_name = '{}_to_goal'.format(metr)
signal[metr] = 0
for i in range(len(cur_nodes)):
signal[metr] += (cur_nodes[i][cur_name] - prev_node[cur_name]) / tol_error[metr]
if metric_names != metric_names[0] and signal[metr] > best_val and metr in alowed_metrics:
best_val = signal[metr]
best_metr = metr
if best_metr == cur_metr:
print('New metric is the same as previous. Switching to next metric')
while len(metric_names) > 1 and (best_metr == cur_metr or best_metr not in alowed_metrics):
best_metr = metric_names[(metric_names.index(best_metr) + 1) % len(metric_names)]
print('SNR for metrics:')
for metr in metric_names:
if metr == best_metr:
print(' >*{}: {}'.format(metr, signal[metr]))
elif best_val == signal[metr]:
print(' +{}: {}'.format(metr, signal[metr]))
elif metr not in alowed_metrics:
print(' {}: {} # ignored'.format(metr, signal[metr]))
else:
print(' {}: {}'.format(metr, signal[metr]))
else: # use round-robin
best_metr = metric_names[(metric_names.index(cur_metr) + 1) % len(metric_names)]
while best_metr not in alowed_metrics:
print('Skipping {} since it is not in allowed list'.format(best_metr))
best_metr = metric_names[(metric_names.index(cur_metr) + 1) % len(metric_names)]
print('Switching to {}'.format(best_metr))
return best_metr | 5deb0c6de5f06f3305a45c84b44f898850688983 | 50,556 |
def get_rnd_param():
"""
Assemble params for get_validate_image
:return: Params in dict
"""
param_dict = dict()
param_dict['act'] = "13"
return param_dict | cc22a4fd6318f3686582aebfa9943938fde89c44 | 50,558 |
import pprint
import asyncio
def test_add_batch_to_queue_1(pq, batch_params, queue_seq, batch_seq, expected_seq):
"""
Basic test for the function ``PlanQueueOperations.add_batch_to_queue()``
"""
async def add_plan(plan, n, **kwargs):
plan_added, qsize = await pq.add_item_to_queue(plan, **kwargs)
assert plan_added["name"] == plan["name"], f"plan: {plan}"
assert qsize == n, f"plan: {plan}"
async def testing():
# Create the queue with plans
for n, p_name in enumerate(queue_seq):
await add_plan({"name": p_name, "item_uid": f"{p_name}{p_name}"}, n + 1)
def fix_uid(uid):
return f"{uid}{uid}"
if "before_uid" in batch_params:
batch_params["before_uid"] = fix_uid(batch_params["before_uid"])
if "after_uid" in batch_params:
batch_params["after_uid"] = fix_uid(batch_params["after_uid"])
items = []
for p_name in batch_seq:
items.append({"name": p_name, "item_uid": p_name + p_name})
items_added, results, qsize, success = await pq.add_batch_to_queue(items, **batch_params)
assert success is True, pprint.pformat(results)
assert qsize == len(queue_seq) + len(batch_seq)
assert await pq.get_queue_size() == len(queue_seq) + len(batch_seq)
assert len(items_added) == len(items)
assert len(results) == len(items)
# Verify that the results are set correctly (success)
for res in results:
assert res["success"] is True, pprint.pformat(results)
assert res["msg"] == "", pprint.pformat(results)
# Verify the sequence of items in the queue
queue, _ = await pq.get_queue()
queue_sequence = [_["name"] for _ in queue]
queue_sequence = "".join(queue_sequence)
assert queue_sequence == expected_seq
await pq.clear_queue()
asyncio.run(testing()) | 62a2fa560c2b2477f3eda0570c6be0114992c849 | 50,561 |
def draw_truth_table(boolean_fn):
""" This function prints a truth table for the given boolean function.
It is assumed that the supplied function has three arguments.
((bool, bool, bool) -> bool) -> None
If your function is working correctly, your console output should look
like this:
>>> from truth_tables import *
>>> draw_truth_table(boolean_fn1)
a b c res
-----------------------
True True True False
True True False False
True False True False
True False False False
False True True False
False True False False
False False True True
False False False True
"""
# Regularize the print format of each line
print_format = "%-6s" * 3 + "%s"
# Line 1 and 2
print(print_format % ("a", "b", "c", "res"))
print("-" * 23)
# Line 3 to 10
for a in (1, 0):
for b in (1, 0):
for c in (1, 0):
tup = (a, b, c, boolean_fn(a, b, c))
print(print_format % tuple(map(bool, tup)))
return None | 6ffbd8a8d80c0a0044b547facd50b1861948b9d7 | 50,562 |
import jinja2
def render_j2_template(templatefile, searchpath, obj):
"""Render a Jinja2 template and return the rendered string"""
rendered_data = None
template_loader = jinja2.FileSystemLoader(searchpath=searchpath)
env = jinja2.Environment(
loader=template_loader, trim_blocks=False, lstrip_blocks=False
)
template = env.get_template(templatefile)
rendered_data = template.render(obj)
return rendered_data | f051fa9b1c50ba39e1e4fc71d894ff50cb0043be | 50,563 |
def unpack_string(value):
"""
Unpack a string from byte format,
to its original form.
"""
return value.decode('utf-16') | d87dc41225d6f1de3082b8cedbdd9e489d458edb | 50,564 |
def tokenize_function(examples, tokenizer, block_size):
"""
This function will take the text dataset and complete this steps below
1. Tokenize the entire dataset
2. Concatenate all examples from 2d list into a 1D
3. Create blocks of the concatenated examples with a certain block size
4. Create labels for the dataset
@params:
examples: The dataset to be tokenized
tokenizer: The tokenizer to be used for tokenizing the dataset
block_size: The size of the blocks to be created
@returns:
Tokenized dataset with labels
"""
#1. Tokenize the entire dataset
tokenized_examples = tokenizer(examples["text"])
#2. Concatenate all examples from 2d list into a 1D
# Going to flatten ['text'], ['input_ids'], ['attention_masks] from 2D lists to 1D lists or concatenate them
concatenated_examples = {key:sum(tokenized_examples[key], []) for key in tokenized_examples.keys()}
#3. Create blocks of the concatenated examples with a certain block size
# Getting the total number of words
num_tokens = len(concatenated_examples['input_ids'])
# Getting the number of blocks; Cutting the that are left over that cannot make another block
total_length = (num_tokens // block_size) * block_size
results = {}
for key, value in concatenated_examples.items():
blocks = []
for i in range(0, total_length, block_size):
blocks.append(value[i: i+block_size])
results[key] = blocks
#4. Create labels for the dataset
results['labels'] = results['input_ids'].copy()
return results | b79777b039a8f6eaaf25559fd939ff72a7dcfc60 | 50,565 |
def key_callback(x):
""" extract class_names from all instances detected on a image """
keys = list()
for inst in x['pred_inst']:
keys.append(inst['class_name'])
return keys | fb27d14a2804716d010d291c7061c8c968c65502 | 50,566 |
import re
def clean_training_text(txt, lower=False, total_clean=False):
"""
Competition's evaluation: `lower=True` and `total_clean=False`.
"""
txt = str(txt).lower() if lower else str(txt)
txt = re.sub('[^A-Za-z0-9]+', ' ', txt).strip()
if total_clean:
txt = re.sub(' +', ' ', txt)
return txt | aa198fc7b690b2e2355ae7ac5bd0f735417db423 | 50,568 |
import random
def d6() -> int:
"""Roll a D6"""
return random.randint(1, 6) | 8a56a6bc614a5397d28fb5abafd97df0383276f4 | 50,569 |
def is_listing_owner(listing, user_object):
"""returns true if a user is the owner of a given listing"""
return user_object.id == listing.owner_id | b129ee05eccf1e9e3ca62966e75a8a6051e9b03b | 50,570 |
def check_arguments(args):
"""
:param args:
:return:
"""
def _build_missing_arg_error(command, arg_name):
return "With {} command, {} argument needs to be defined.".format(command, arg_name)
if args.verbosity > 0:
global verbosity
verbosity = args.verbosity
if args.files:
assert args.pubkey, _build_missing_arg_error("-files,", "--pubkey")
assert args.message1, _build_missing_arg_error("-files", "--message1")
assert args.message2, _build_missing_arg_error("-files", "--message2")
assert args.signature1, _build_missing_arg_error("-files", "--signature1")
assert args.signature2, _build_missing_arg_error("-files", "--signature2")
elif args.cli:
assert args.pk, _build_missing_arg_error("-files", "-pk")
assert args.m1, _build_missing_arg_error("-files", "-m1")
assert args.m2, _build_missing_arg_error("-files", "-m2")
if args.sig1:
assert args.sig2, "With -sig1, -sig2 must also be defined."
elif args.sig2:
assert args.sig1, "With -sig2, -sig1 must also be defined."
else:
if args.r:
assert args.s1, "With -r s1 and s2 must both be defined."
assert args.s2, "With -r s1 and s2 must both be defined."
else:
assert "If signatures are not given through -sig1 and -sig2, it is possible to give the common r and " \
"both remaining halfs of the signatures with -s1 and -s2. "
elif args.hardcoded:
return True
return True | fc5c8c8eb8598aebfa5119f970de8b993472e759 | 50,572 |
import os
def _default_settings(model, n_instances, query_strategy, balance_strategy,
mode, data_fp):
""" Create settings dictionary with values. """
data_name = os.path.basename(data_fp)
settings = {
"data_file": data_name,
"model": model.lower(),
"query_strategy": query_strategy,
"balance_strategy": balance_strategy,
"n_instances": n_instances,
"mode": mode,
"model_param": {},
"fit_param": {},
"query_param": {},
"balance_param": {},
}
return settings | d43f97c9a3aa2db79313c1941906d5d85cca60b1 | 50,574 |
def merge_bbox(bbox1, bbox2):
"""Merge two pdf blocks' bounding boxes."""
return (
min(bbox1[0], bbox2[0]), # x0
min(bbox1[1], bbox2[1]), # y0
max(bbox1[2], bbox2[2]), # x1
max(bbox1[3], bbox2[3]), # y1
) | 6e5343d1f651755bc2ac9412fac257a0c7dc6170 | 50,575 |
def is_abbreviation(nm: str):
"""
Determine if something is an abbreviation.
Otherwise if text ends with "." we'll conclude so.
Examples:
Ala. YES
Ala NO
S. Bob NO -- abbreviated, yes, but this is more like a contraction.
S. B. YES
:param nm: textual name
:return: True if obj is inferred to be an abbreviation
"""
return nm.endswith(".") | 16415152adad3ba41a11d9f9216fa7e65f5123ff | 50,576 |
import re
def find_timestamp(text_list):
"""
Find timestamp line and put digit's value
Parameters
----------
text_list : dataframe
A dataframe you want to convert
Returns
-------
dataframe
it has new columns ["start_timestamp", "digit"]
The digit column helps filling start_timestamp and end_timestamp
"""
pat = re.compile('(\d\d:\d\d:\d\d. *\d\d)')
matches = pat.search(text_list['speech'])
if matches is not None:
text_list['start_timestamp'] = matches.group(1) if matches is not None else None
text_list['digit'] = 1
else:
text_list['digit'] = 0
text_list['start_timestamp'] = None
return(text_list) | 867359e5267e421af0595a670b3137c0cd0b8147 | 50,578 |
def align_by_root(joints):
"""
Assumes joints is 24 x 3 in SMPL order.
Subtracts the location of the root joint from all the other joints
"""
root = joints[0, :]
return joints - root | bb1470fc1bce79710a770bc97122a3e2fcd4ab23 | 50,579 |
def simple_bytecode() -> str:
"""From C code:
int B() {
return 10;
}
int A() {
int x = B();
if (x == 5) {
x += 1;
}
return x;
}
"""
return """
; Function Attrs: noinline nounwind optnone ssp uwtable
define i32 @B() #0 {
ret i32 10
}
; Function Attrs: noinline nounwind optnone ssp uwtable
define i32 @A() #0 {
%1 = alloca i32, align 4
%2 = call i32 @B()
store i32 %2, i32* %1, align 4
%3 = load i32, i32* %1, align 4
%4 = icmp eq i32 %3, 5
br i1 %4, label %5, label %8
; <label>:5: ; preds = %0
%6 = load i32, i32* %1, align 4
%7 = add nsw i32 %6, 1
store i32 %7, i32* %1, align 4
br label %8
; <label>:8: ; preds = %5, %0
%9 = load i32, i32* %1, align 4
ret i32 %9
}
""" | 2ff2f9f11a46733c1e91afbea3610273505a228f | 50,580 |
import __main__ as main
def is_interactive():
"""checks wether called in an interactive environment"""
return not hasattr(main, '__file__') | 127f77a97b11db8a025c3d38c8d0cbc3f86d6658 | 50,581 |
def exception_view__upgrade(exc, request):
"""if we end with .json, serve json."""
if (request.path[-5:]).lower() == ".json":
request.environ["HTTP_ACCEPT"] = "application/json"
return exc | 862c4b3ed518ff9bfa65fac99e02ce3b1b81f67e | 50,582 |
def get_property(obj, name):
"""Get an object property value by a property name."""
return getattr(name, obj) | 40600461d264a7d331f1960744fd1f527279792a | 50,587 |
import statistics
import math
def asymptotic_S_1(t_S, nu, N, flag = False, t_start = 1):
""" """
t = t_S[-1][0]
if flag == False:
c = math.exp(-nu*N/10)
t_start = int(0.6*c*t)
S = []
for i in range(t_start,t):
S.append(t_S[i][1][0])
S_mean = statistics.mean(S)
return S_mean | 13ca9a36e2c43fc573decb11d07f952d0d403bab | 50,588 |
from operator import add
def multiply(x, y):
"""
Question 5.5: Compute X x Y without using
arithmetic operators
"""
cur_sum = 0
while x:
if x & 1 == 1:
cur_sum = add(cur_sum, y)
x >>= 1
y <<= 1
return cur_sum | 1e4888c4c36b84dc604f7927a76ba346cf97fb32 | 50,591 |
import random
def shuffle_dataset(sorted_dataset):
"""
Given a dataset sorted by len, sorts within each length to make
chunks of roughly the same size. Returns all items as a single list.
"""
dataset = []
for l in sorted_dataset.keys():
items = list(sorted_dataset[l])
random.shuffle(items)
dataset.extend(items)
return dataset | d8266d4c4f0f14742ebdeecb6a70b60c4341595c | 50,592 |
def set0 (strlist,pos):
""" removes elements of a list from pos to the end, save these as a separate list """
hold = []
while len(strlist) > pos:
hold.append(strlist.pop(len(strlist)-1))
hold.reverse()
return strlist,hold | a6703026d76556ea4ce7c5805ba3acaaa1162ac3 | 50,593 |
def print_dice(dice:list):
"""
crée une liste de dés en remplaçant les 6 par un "vers"
"""
dice_print = []
for dice in dice:
if dice == 6:
dice_print.append("vers")
else:
dice_print.append(dice)
return dice_print | ae33066fac171461227391bfcee5f9c10b4a21ae | 50,595 |
from typing import Counter
def detect_straightlining(df, adjust=True):
"""Detect straightlining
:param df pandas.DataFrame.
:param adjust bool. If True, the function will return the percentage of answers on which a user
was straightlining. Otherwise, if False, the function returns the number of answers the user
was straightlining.
"""
straightlining = df.apply(lambda xs: max(Counter([str(x) for x in xs.values]).values()), axis=1)
if adjust:
return straightlining / df.shape[1]
else:
return straightlining | cad02d575f274b1e585cc077f29d7bd7a0823993 | 50,596 |
def predictors_validate(predictors, data=None):
"""Validates the predictors and ensures that they are type list(str)
Optionally checks that the predictors are columns in the data set. Only
performs this check if the data parameter is not None
Parameters
----------
predictors: list(str) or str
the predictor(s) to validate
data : pd.DataFrame or None, optional
the data set to validate the predictors are in
Returns
-------
list(str)
validated predictors
Raises
------
ValueError
if a predictor is named 'all' or 'none'
if a predictor is not a column in the data set
Examples
--------
>>> predictors_validate('famhistory')
['famhistory']
>>> predictors_validate(['famhistory', 'marker'])
['famhistory', 'marker']
>>> predictors_validate('all')
Traceback (most recent call last):
...
ValueError: predictor cannot be named 'all' or 'none'
"""
if isinstance(predictors, str): # single predictor
predictors = [predictors] #convert to list
#cant't use 'all' or 'none' columns as predictors
for predictor in predictors:
if predictor in ['all', 'none']:
raise ValueError("predictor cannot be named 'all' or 'none'")
#check that predictors are columns in the data
if data is not None:
for predictor in predictors:
if predictor not in data.columns:
raise ValueError("predictor must be a column in the dataframe")
else:
pass # skip check
return predictors | 646ee3e0f6e93fe149a02ac05c50cdadef1ec7c2 | 50,598 |
def correctinput(string):
""" Change from backslash to slash """
string = string.encode('unicode-escape').decode() # un-escape escape characters
string = string.replace('\\', '/') # change every unescaped backslash to slash
string = string.replace('//', '/') # change original double backslash (one for escaping) to one slash
return string | 2264d2aba670546f87a427662c4b6c8953fe6dfd | 50,600 |
def anyendswith(value, ends):
""" Check if `value` ends with one of the possible `ends` """
for end in ends:
if value.endswith(end):
return True
return False | f74f3abc7358e71ef116d25d2ead975d3b65de56 | 50,601 |
def capfile(name, ext):
"""ADD EXTENSION TO FILENAME IF NECESSARY"""
if ext[0] != '.':
ext = '.' + ext
n = len(ext)
if name[-n:] != ext:
name += ext
return name | 366f639e3ad5740facc47dd396783ab38e615c76 | 50,602 |
import torch
def quat_to_d6(quats:torch.Tensor) -> torch.Tensor: # take (...,4) --> (...,6)
"""This code is adapted from https://github.com/facebookresearch/pytorch3d/blob/master/pytorch3d/transforms/rotation_conversions.py"""
r, i, j, k = torch.unbind(quats, -1)
two_s = 2.0 / (quats * quats).sum(-1)
o = torch.stack(
(
1 - two_s * (j * j + k * k),
two_s * (i * j - k * r),
two_s * (i * k + j * r),
two_s * (i * j + k * r),
1 - two_s * (i * i + k * k),
two_s * (j * k - i * r),
two_s * (i * k - j * r),
two_s * (j * k + i * r),
1 - two_s * (i * i + j * j),
),
-1,
)
matrix = o.reshape(quats.shape[:-1] + (3, 3))
return matrix[..., :2, :].clone().reshape(*matrix.size()[:-2], 6) | f7c49b4964b29a483962db6afcc51f289b321f0a | 50,603 |
def get_level(data, bits):
"""
Returns sensor level value from data using sensor bit mask in micro volts (uV).
"""
level = 0
for i in range(13, -1, -1):
level <<= 1
b, o = (bits[i] / 8) + 1, bits[i] % 8
level |= (ord(data[b]) >> o) & 1
return level | 28f088dca77797de751d6595ab3dd7c3c7ab3ce3 | 50,604 |
def get_column(length, count):
"""This custom tag takes two integers, the length of a ordered list and the count of the current
list item. It returns col[1-4] to be used as a class to position the item in the
correct column.
"""
col_length = length // 4
if count <= col_length:
return 'col1'
elif count <= 2 * col_length:
return 'col2'
elif count <= 3 * col_length:
return 'col3'
else:
return 'col4' | f93b6d6b071bbb2868ddaab144f81c80d8557905 | 50,605 |
def parse_data_line(line: str):
"""
处理图片官方提供的数据,得到图片编号,url,类型
:param line: 待处理的数据
:return: 图片编号、URL、类型
"""
pieces = line.strip().split(",")
return pieces[0], pieces[1], "_".join(pieces[2:]) | ab7112636c91fb622923cb522710caa085654f41 | 50,608 |
def try_anafas_float(floatstr):
"""
Try converting a string into a float. Trims empty space and checks whether
there is a decimal separator. When a decimal separator is unspecified, assumes
two decimals separators by default (Anafas' default) dividing the resulting
number by 100.
"""
try:
num = float(floatstr.strip())
# checks if the decimal separator was omitted
thereIsDot = not (floatstr.find(".") == -1)
if not thereIsDot:
num = num / 100.0
except ValueError:
num = 0.0
return num | cad5ca9f3aae58b2417a0cd9786522c762193144 | 50,609 |
def ace_rule_cmd(safe_classifier, acl_data):
"""
Fixture - get ACE from ACL and compose ip(6)tables command
:param safe_classifier: safe_classifier fixture
:type safe_classifier: `:class:common.classifier.NfqClassifier`
:param acl_data: acl_data fixture
:type acl_data: dict
:returns list
"""
ace_matches = (acl_data['access-lists']
['access-list']
[0]
['access-list-entries']
[0]
['matches'])
return safe_classifier.parse_ace(ace_matches) | ebd4deca764548064643392cd694eaf39b77c96e | 50,611 |
import random
def byte_sample(b, size, n):
""" Sample a bytestring from many locations """
starts = [random.randint(0, len(b) - size) for j in range(n)]
ends = []
for i, start in enumerate(starts[:-1]):
ends.append(min(start + size, starts[i + 1]))
ends.append(starts[-1] + size)
return b''.join([b[start:end] for start, end in zip(starts, ends)]) | 5960928ca0a2d24ed023acecfa3cbc2e1b648955 | 50,612 |
def all_pairs(elements):
"""
Helper function, giving all pairs of a list of elements
Parameter
--------
elements: List[Any]
list of elements
Returns
-------
List[Tuple[Any, Any]]
Unique pairings of the elements in the given list.
"""
return [(elements[i], elements[j]) for i in range(0, len(elements)) \
for j in range(i + 1, len(elements))] | b71bb0d3d573cd818c4b946fe517d0f5632b7e4e | 50,613 |
def simplify_symmetric_case(p_dict, q_dict):
"""
Simplifies expressions if p_dict and q_dict are "symmetric".
Example: numbers p = [1, x, 1] and q = [1, 1-x, 1] are symmetric, since
if multiplied, they will produce the same result regardless of the value of x.
This function handles such cases and sets the bits in such way,
that p is greater than q.
Args:
p_dict, q_dict: See module documentation at the top.
Returns:
p_dict, q_dict: See module documentation at the top.
"""
if len(p_dict) != len(q_dict):
return p_dict, q_dict
for key in p_dict.keys():
if type(p_dict[key]) != int or type(q_dict[key]) != int:
if p_dict[key] + q_dict[key] == 1:
p_dict[key] = 1
q_dict[key] = 0
return p_dict, q_dict | ef6b61deef3646955594331b26322591b373d54b | 50,614 |
def nicenum(value, spacer='\u202F', nonchar='␀'):
"""Format the given number with spacer as delimiter, e.g. `1 234 456`.
Default spacer is NARROW NO-BREAK SPACE U+202F.
Probably `style="white-space:nowrap; word-spacing:0.5em;"` would be an CSS based alternative.
"""
if value is None:
return nonchar
rev_value = ('%d' % int(value))[::-1]
return spacer.join(reversed([rev_value[i:i + 3][::-1] for i in range(0, len(rev_value), 3)])) | 6294516f83401283a4c14973a0db57e173c13374 | 50,615 |
def linear_assym(t, t0, A, s1, s2):
"""Helper, a linear fit of the decay"""
q = A + (t - t0) / s1
right_filter = t > t0
q[right_filter] = (A - (t - t0) / s2)[right_filter]
return q | cf2068d7439b1ce39841c497e3171f5bcea6aa11 | 50,616 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.