content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def safe(string):
"""Safe string to fit in to the YAML standard (hopefully). Counterpart
to :func:`acrylamid.readers.unsafe`."""
if not string:
return '""'
if len(string) < 2:
return string
for char in ':%#*?{}[]':
if char in string:
if '"' in string:
return '\'' + string + '\''
else:
return '\"' + string + '\"'
for char, repl in ('\'"', '"\''):
if string.startswith(char) and string.endswith(char):
return repl + string + repl
return string
|
7f3945a22bf9f5f51b524a7dbc0a6dcf5347f911
| 24,707
|
def collapse_list(items):
"""Given an ordered list of numbers, returns the ranges of
present items. For example, given a list [1,2,3,5,6,7,10], it
would return the string '1-3, 5-7, 10'."""
result = ""
previtem = items[0]
sequence = False
for i in items:
if sequence:
if not i == previtem + 1:
sequence = False
result += "-" + str(previtem) + ", " + str(i)
else:
if i == previtem + 1:
sequence = True
else:
result += ", " + str(i)
previtem = i
if sequence:
result += "-" + str(previtem)
result = result[2:]
return result
|
88d06adc903a511dfc53ea4c4e3f0c782482b2ca
| 24,708
|
def timestamp_to_float(t):
"""Convert timestamp to floating number of seconds since 1970-01-01 UTC
For precise serialization see :func:`timestamp_to_bytes` /
:func:`timestamp_from_bytes`
Args:
t (Timestamp): timestamp
Returns:
float: timestamp
"""
return t.s + t.us * 1E-6
|
5eb7551217a33c325ec5080a735718ae3a62e06a
| 24,711
|
def hash2(key: str, tablesize: int) -> int:
"""
假设:字符串的长度至少为3
一位有小写字母加空格共27种可能
按位加权相加,相当于27进制
“3个字母(忽略空格)有26^3=17576种组合,但是参考足够多词汇的词典发现,(我觉得意思是开头三个字母)3个字母的不同组合数实际只有2851”
"""
assert(len(key) >= 3)
return (ord(key[0]) + 27 * ord(key[1]) + 729 * ord(key[2])) % tablesize
|
9ef93edf3ef754af43ed73383c8f801486bd64bf
| 24,713
|
import requests
import yaml
def bioconductor_versions():
"""
Returns a list of available Bioconductor versions scraped from the
Bioconductor site.
"""
url = "https://bioconductor.org/config.yaml"
response = requests.get(url)
bioc_config = yaml.safe_load(response.text)
versions = list(bioc_config["r_ver_for_bioc_ver"].keys())
# Handle semantic version sorting like 3.10 and 3.9
versions = sorted(versions, key=lambda v: list(map(int, v.split('.'))), reverse=True)
return versions
|
2c1d3b27f6977afe12ad0e1b506c051253402420
| 24,714
|
def package_is_pinned(name):
"""quick check to make sure packages are pinned"""
for pin in ['>', '<', '==']:
if pin in name:
return True
return False
|
b5f08d704ab9c3427236c1828eda43abe88d7616
| 24,716
|
import logging
import pathlib
def collector(name, fields, filepath, append=False, format_types=None, delimiter='|'):
"""
Returns a function for collecting rows with fields :fields: (along with
datetime information) in a CSV log file located at :filepath:.
We often want to collect some data about choices we are making
while processing and transforming data. This collector function provides
a way to do that using python's logging standard library module.
:name: the name to be given to the logger
:fields: list of fields that you want to collect
:filepath: target for the logfile
:append: (default False) if True, will append to the given filepath. Default
behavior is to overwrite it, with column headings in the first line.
:format_types: optional dictionary from fields to format-string types
(like 's' or '.6f') describing how fields should be formatted in the
CSV. Any fields not included will default to 'f'.
:delimiter: the delimiter in the CSV. Defaults to '|' to avoid collisions.
"""
if not format_types:
format_types = dict()
if 'asctime' not in fields:
fields = ['asctime'] + fields
logger = logging.Logger(name)
pathlib.Path(filepath).parent.mkdir(parents=True, exist_ok=True)
if not append:
with open(filepath, 'w') as f:
f.write(delimiter.join(fields) + '\n')
handler = logging.FileHandler(filepath, mode='a')
default_types = {field: 's' for field in fields}
types = {**default_types, **format_types}
formatted_fields = [f"%({field}){types[field]}" for field in fields]
formatter = logging.Formatter(delimiter.join(
formatted_fields), "%Y-%m-%d %H:%M:%S")
handler.setFormatter(formatter)
handler.setLevel(logging.INFO)
logger.addHandler(handler)
def collect(**kwargs):
logger.info("Collected data point for {vtd_splits}", extra=kwargs)
return collect
|
0f6f9fd0df1e262d2b0d0a8d4965f8038722e649
| 24,717
|
import os
def get_filename(in_path):
"""
returns only file name without extension
:param in_path: path to get filename
:type in_path: String or PathLike object
:rtype: os.path object
"""
return os.path.splitext(os.path.basename(in_path))[0]
|
072bf079c72f47ed48d270e0ae619a6ad0473c48
| 24,718
|
import os
import six
import json
import re
def parse_sm_config():
"""Attempts to parse SageMaker configuration returning False if it can't find it"""
sagemaker_config = "/opt/ml/input/config/hyperparameters.json"
resource_config = "/opt/ml/input/config/resourceconfig.json"
if os.path.exists(sagemaker_config) and os.path.exists(resource_config):
conf = {}
conf["sagemaker_training_job_name"] = os.getenv('TRAINING_JOB_NAME')
# Hyper-parameter searchs quote configs...
for k, v in six.iteritems(json.load(open(sagemaker_config))):
cast = v.strip('"')
if os.getenv("WANDB_API_KEY") is None and k == "wandb_api_key":
os.environ["WANDB_API_KEY"] = cast
else:
if re.match(r'^[-\d]+$', cast):
cast = int(cast)
elif re.match(r'^[-.\d]+$', cast):
cast = float(cast)
conf[k] = cast
return conf
else:
return False
|
30595db0193fabec83276d3a092f09a23f11f71a
| 24,719
|
def print_network(net, out_f=None):
"""
Prints the number of learnable parameters.
:param net: the network.
:type net:
:param out_f: file
:type out_f:
:return: number of learnable parameters.
:rtype: int
"""
num_params = 0
for param in net.parameters():
num_params += param.numel()
if out_f is not None:
out_f.write(net.__repr__() + "\n")
out_f.write('Total number of parameters: %d\n' % num_params)
out_f.flush()
return num_params
|
9dcfd7da51eab385dacb90d35bfeb139ed785be1
| 24,720
|
def get_lowest_pair(pairs):
"""Finds the lowest and furthest left x,y pair."""
miny = pairs[:,1].argmin()
coords_miny = pairs[miny,:]
if coords_miny.ndim == 1:
lowest_vtx = coords_miny
else:
lowest_vtx = coords_miny[coords_miny[:,0].argmin(),:]
return lowest_vtx
|
fbfe309550c0d40f5d67c4a5efdf19af4b931251
| 24,721
|
import numpy
def _get_remaining_batch_size_by_class(
class_to_batch_size_dict, target_values_in_memory):
"""Returns number of remaining examples needed for each class.
:param class_to_batch_size_dict: Dictionary created by
`_get_batch_size_by_class`.
:param target_values_in_memory: 1-D numpy array of target values (integer
class labels).
:return: class_to_rem_batch_size_dict: Same as input
`class_to_batch_size_dict` but with different values.
"""
if target_values_in_memory is None:
return class_to_batch_size_dict
class_keys = list(class_to_batch_size_dict.keys())
class_to_rem_batch_size_dict = {}
for this_class in class_keys:
this_num_examples = (
class_to_batch_size_dict[this_class] -
numpy.sum(target_values_in_memory == this_class)
)
this_num_examples = max([this_num_examples, 0])
class_to_rem_batch_size_dict.update({this_class: this_num_examples})
return class_to_rem_batch_size_dict
|
2ae5c922b37cfb2b9f388b78d9c6a172c67c0add
| 24,722
|
def _skip_first_max_pooling(inputs):
"""Whether to skip the first max pooling layer in ResNet.
For 128x128 inputs, we skip the first 3x3 2-stride max pooling layer.
Args:
inputs: The inputs passed to ResNet.
Returns:
Whether to skip the first max pooling layer in ResNet.
"""
dims = inputs.shape.dims
size = dims[1] # dims is like [B, H, W, C]
return size == 128
|
45425522c23191e014c9cb687baf1a5d123fc443
| 24,723
|
def is_hom(G, H, hom):
"""
Check whether hom is a homomorphism from G to H.
Works for both directed and undirected.
"""
assert set(G.vertices()) == set(hom.keys())
assert set(H.vertices()).issuperset(set(hom.values()))
for e in G.edges():
u, v = e[0], e[1]
if not H.has_edge(hom[u], hom[v]):
return False
return True
|
256ed3b2b496ed72bcd4e23f6f28bcc742049e15
| 24,725
|
from os import urandom
from time import time
def _gen_seed():
""" ランダムなバイト文字列を生成
桁数は不定なので、必要に応じてハッシュ関数を適用すること
@return: バイト文字列
"""
seed = urandom(64)
seed += str(time()).encode()
return seed
|
6634c9cb8a4723491d899a28e1fe687b675e7a2f
| 24,727
|
from typing import List
from typing import Any
def lazy_content(__list: List, /, nvals: int, ellipsis: Any = ...) -> List:
"""Return list at most {nvals} items from object
Extra values are replaced with mid_value
Examples:
>>> lazy_content([1, 2, 3, 4, 5], 3)
... [1, ..., 5]
"""
if len(__list) <= nvals:
return __list
return (
__list[: nvals // 2] + [ellipsis] + __list[len(__list) - abs(nvals - 1) // 2:]
)
|
7af6fe6766c677b0bd9261c305e1e5301567045f
| 24,728
|
def clear_form(n_clicks):
"""Empty input textarea"""
return ""
|
a1d72b827f61d14d898170129d8dee75adf2a88a
| 24,729
|
def line_pre(line):
"""行数据预处理"""
line_ = line.split("\t")
if len(line_) > 7:
line_pred = line_[:6]
text = " ".join(line_[6:])
line_pred.append(text)
line = '\t'.join(line_pred)
str_tf = {
"#E-s[数字x]": "微笑",
"#E-j[数字x]": "愤怒",
" ": " ",
"[数字x]%": "比例",
"[金额x]%": "比例",
"%": " ",
"#": " ",
}
for k, v in str_tf.items():
line = line.replace(k, v)
return line
|
edf7772ecbb4a1708d51a8ff64457f11cf9eab44
| 24,730
|
from typing import Dict
def celkove_skore(zaznamy: str) -> Dict[str, int]:
"""
>>> pprint(celkove_skore(PRIKLAD))
{'Gryffindor': 580, 'Hufflepuff': 360, 'Ravenclaw': 500, 'Slytherin': 570}
>>> pprint(celkove_skore(''))
{}
"""
vysledok = {}
casti = zaznamy.split(" ")
for i in range(len(casti)):
zapas = casti[i].strip().split(':')
if len(zapas) != 4:
continue
vysledok[zapas[0]] = vysledok.get(zapas[0], 0) + int(zapas[2])
vysledok[zapas[1]] = vysledok.get(zapas[1], 0) + int(zapas[3])
"""
if vysledok.get(zapas[0], -1) == -1:
vysledok[zapas[0]] = 0
if vysledok.get(zapas[1], -1) == -1:
vysledok[zapas[1]] = 0
vysledok[zapas[0]] += int(zapas[2])
vysledok[zapas[1]] += int(zapas[3])
"""
return vysledok
|
810910cbbc548170411c602481b2c42694720f30
| 24,731
|
def load_json(stat_arr, options):
"""
Loads runtime data from STAT objects.
"""
# preparing data
if options['join_key']:
stat_arr.cluster(use_key=options['join_key'])
if options['filter']:
stat_arr.filterinsts(options['filter'])
data = []
# choosing the minimal value
min_value = 0.000000001
if options['plot_type'] == 'scatter':
if options['x_min']:
min_value = max(options['x_min'], options['y_min'])
else:
min_value = options['y_min'] # options['y_min'] is always defined
max_value = float(options['timeout']) if options['plot_type'] == 'scatter' else 10 * float(options['timeout'])
# make VBSes
if options['vbs']:
for vbs_name, tools in options['vbs'].items():
stat_arr.make_vbs(vbs_name, tools, options['key'])
# processing (normal) separate data
for stat_obj in stat_arr:
data.append(stat_obj.get_data(options, min_value, max_value))
if options['ratio']:
max_value = float(options['timeout']) if options['plot_type'] == 'scatter' else 10 * float(options['y_max'] if options['y_max'] else options['timeout'])
for ratio_name, tools in options['ratio'].items():
data.append(stat_arr.create_ratio(ratio_name, tools, options['key'], options['timeout'], max_value))
if options['only']:
data = [d for d in data if d[0] in options['only']]
if options['repls']:
data = [(options['repls'][n], v, s, l) if n in options['repls'] else (n, v, s, l) for n, v, s, l in data]
# use given order, do not sort
if (options['ordering'] == "fixed"):
return data
return sorted(data, key=lambda x: x[2] + len(x[1]) / sum(x[1]), reverse=not (options['ordering'] == "reverse"))
|
493c12016c09375e383dcc77c66b64b5841ec1ec
| 24,732
|
def main1(nums, k):
""" Find compliment of each number in the remaining list.
Time: O(n^2)
Space: O(1)
"""
for i, num in enumerate(nums):
compliment = k - num
if compliment in nums[i + 1:]:
return True
return False
|
d57a848f658abdeb9c9f794ae403add414eb0d47
| 24,733
|
def address_case(address):
"""
Title-cases a mailing address except for its ordinals, i.e. 'nd', 'rd', 'st', 'th'.
:param address: A standard physical address.
:return: A properly title-cased address.
"""
result = ''
directions = ['N', 'S', 'E', 'W', 'NW', 'NE', 'SW', 'SE']
address_split = address.split()
for i in range(len(address_split)):
curr = address_split[i]
curr = curr.upper() if curr in directions else curr.capitalize()
result += curr
if i < len(address_split) - 1:
result += ' '
# Extra measure to strip text just in case
result.strip()
return result
|
11bfac1f9d8b8ebf8d721f6a6609aff90d88ae09
| 24,734
|
def wrap_to_pmh(x, to2):
"""Wrap x to [-to/2,to/2)."""
to = to2/2
return (x + to)%to2 - to
|
29a1ce74e903fb23c316ec841097bf571f561a40
| 24,735
|
def return_index(lbound, ubound, cells, position):
"""
Give the position of a node on a 1D mesh this function will return the corresponding index
of that node in an array that holds the node positions.
lbound: Lower bound of mesh domain.
ubound: Upper bound of mesh domain.
cells: Number of cells along axis in mesh domain.
position: Position of mesh node to find corresponding index for.
returns
Integer
"""
index = (position - lbound) * cells / (ubound - lbound)
return int(index)
|
dcb0fb35f6ec8b5d7d948b951ce6677d27023a95
| 24,736
|
def in_parenthetical(match):
"""
Checks for text wrapped in parenthesis, and removes any
returned protected grounds if they we're wrapped in parenthesis
used in protected grounds in order to improve accuracy
"""
open_parens = 0
# search the rest of the sentence
for i in range(match.end, match.sent.end):
if match.doc[i].text == '(':
open_parens += 1
elif match.doc[i].text == ')':
if open_parens > 0:
open_parens -= 1
else:
return True
return False
|
8b27f4820004b1a9213f6342501b882cd7b017c5
| 24,737
|
import math
def cockroach_speed(s):
"""
Function which takes its speed in km per hour and returns it in cm per second, rounded down to the integer (= floored).
:param s: an float value.
:return: speed in cm per second.
"""
return math.floor((s / 3600) * 100000)
|
8071152a165c44d474e45f3537a2d01999bb385e
| 24,738
|
import ast
def unwrap(node):
"""
It a node cleanly translates to a python literal, return it instead.
"""
if isinstance(node, ast.Name):
return node.id
if isinstance(node, ast.Num):
return node.n
raise TypeError("Only handle primitive like nodes")
|
74ffeb8b93312773d997ac169f0ee4f6f6ed2bee
| 24,739
|
def validate_cpf():
"""
CPF e um conjunto de 11 digitos que passa pelo seguinte processo de validacao:
1 - Multiplica-se os 9 primeiros dígitos pela sequência decrescente de números
de 10 à 2 e soma os resultados. Depois multiplicarmos esse resultado por 10
e dividirmos por 11. Se ele for igual ao primeiro dígito verificador
(primeiro dígito depois do '-'), a primeira parte da validação está correta.
Obs: Se o resto da divisão for igual a 10, nós o consideramos como 0
2 - Considera-se os 9 primeiros dígitos, mais o primeiro dígito verificador, e
multiplica-se esses 10 números pela sequencia decrescente de 11 a 2. Depois
multiplicarmos esse resultado por 10 e dividirmos por 11. Verifica-se, se o
resto corresponde ao segundo dígito verificador.
"""
print('Digite um CPF para validar')
value = str(input())
if not value.isdigit():
print('CPF deve conter apenas números.')
return False
if len(value) != 11:
print('CPF deve ter 11 números.')
return False
#Verifica CPFs do tipo 1111111111, 8888888888
for n in range(0,10):
invalid_cpf = str(n) * 11
if value == invalid_cpf:
print('Digite um CPF válido.')
return False
#Primeira Validacao
v1 = sum( [ int(value[i]) * w for i, w in enumerate(range(10,1, -1))])
mod1 = (v1 * 10) % 11
dv1 = 0 if mod1 == 10 else mod1
if dv1 != int(value[-2]):
print('CPF inválido.', 'invalid')
return False
#Segunda validacao
v2 = sum( [ int(value[i]) * w for i, w in enumerate(range(11,1, -1))])
mod2 = (v2 * 10) % 11
dv2 = 0 if mod2 == 10 else mod2
if dv2 != int(value[-1]):
print('CPF inválido.', 'invalid')
return False
print('CPF Valido')
|
915fbd67fc22110bc4776f807ad15aad501fc030
| 24,740
|
def _ean_digit2(arg):
"""Alternate implementation of EAN check digit calculation used for sanity checks."""
if len(arg) % 2 == 1:
weight = [3, 1] * ((len(arg) * 2) + 1)
else:
weight = [1, 3] * ((len(arg) * 2) + 1)
magic = 10
summe = 0
for i in range(len(arg)): # checksum based on first 12 digits.
summe = summe + int(arg[i]) * weight[i]
ret = (magic - (summe % magic)) % magic
if ret < 0 or ret >= magic:
raise RuntimeError("EAN checkDigit: something wrong.")
return str(ret)
|
85d8ae6871223ef7f7458af0a44626afcbbce6bc
| 24,741
|
import huggingface_hub
def list_metrics(with_community_metrics=True, with_details=False):
"""List all the metrics script available on the Hugging Face Hub.
Args:
with_community_metrics (:obj:`bool`, optional, default ``True``): Include the community provided metrics.
with_details (:obj:`bool`, optional, default ``False``): Return the full details on the metrics instead of only the short name.
Example:
```py
>>> from datasets import list_metrics
>>> list_metrics()
['accuracy',
'bertscore',
'bleu',
'bleurt',
'cer',
'chrf',
...
]
```
"""
metrics = huggingface_hub.list_metrics()
if not with_community_metrics:
metrics = [metric for metric in metrics if "/" not in metric.id]
if not with_details:
metrics = [metric.id for metric in metrics]
return metrics
|
db833a683cb3a549ba040cd5ef3c020c7286624e
| 24,742
|
async def perform_get_light_state(client, entity_id, expected_status):
"""Test the getting of a light state."""
result = await client.get("/api/username/lights/{}".format(entity_id))
assert result.status == expected_status
if expected_status == 200:
assert "application/json" in result.headers["content-type"]
return await result.json()
return None
|
5e40bd81edb7ac554793ac1d61d8694c6d030e90
| 24,743
|
def pathCount(stairs: int):
"""number of unique ways to climb N stairs using 1 or 2 steps"""
#we've reached the top
if stairs == 0:
return 1
else:
if stairs < 2:
return pathCount(stairs - 1)
return pathCount(stairs - 2) + pathCount(stairs - 1)
|
69992b4f1a0043935a34ae15a3370f78d0bbf274
| 24,744
|
def besties_coverage(individuals, friend_dict, relationship_list):
"""
calculate the proportion of individuals are connected in the network
"""
#set to include connected individuals in network according to
#relationship type in relationship list
#individual themselves are also included
network = set()
for individual in individuals:
# only add the individual to network if in friend_dict
if individual in friend_dict:
network.add(individual)
#adding people to network according to degree of friendship
for relationship in relationship_list:
network = network.union(relationship(individual, friend_dict))
return len(network) / len(friend_dict)
|
125a198347c8f252758869c8d4c4379ba928689d
| 24,747
|
def worst_case_height():
"""Solution to exercise R-11.24.
Consider a tree T storing 100,000 entries. What is the worst-case height
of T in the following cases?
a. T is a binary search tree.
b. T is an AVL tree.
c. T is a splay tree.
d. T is a (2, 4) tree.
e. T is a red-black tree.
---------------------------------------------------------------------------
Solution:
---------------------------------------------------------------------------
a. The worst-case height for a BST is n, so a height of 100,000.
b. The worst-case height of an AVL tree is 2*logn + 2, so a height of 36.
c. The worst-case height of a splay tree is n, so a height of 100,000.
d. The worst-case height of a (2, 4) tree is log(n+1), so a height of 17.
e. The worst-case height of a red-black tree is 2*log(n+1)-2, height of 32.
"""
return True
|
ed303d77dfbfcf335ca7194064ff39d00a2103bf
| 24,749
|
def _split_comment(lineno, comment):
"""Return the multiline comment at lineno split into a list of comment line
numbers and the accompanying comment line"""
return [(lineno + index, line) for index, line in
enumerate(comment.splitlines())]
|
07a4f160ce47d9391b90646cf8b43b18eb32ed14
| 24,751
|
def ts_path(path):
"""Make a URL path with a database and test suite embedded in them."""
return "/api/db_<string:db>/v4/<string:ts>/" + path
|
c007887e752af8aa4be6936f1fceb3df873d9d61
| 24,752
|
def display_formatter(input_other_type):
"""
Used by QAbstractItemModel data method for Qt.DisplayRole
Format any input value to a string
:param input_other_type:
:return: str
"""
return str(input_other_type)
|
8f5e619d98e666f3d6820ec119dd01b11d357555
| 24,753
|
def craft_keys():
"""Standard keys for craft"""
return ['market_price', 'resources']
|
e3875f3aa06c7e363fabc035a3c4e5c4a4e52932
| 24,754
|
def first_name(s):
"""
Returns the first name in s
Examples:
last_name_first('Walker White') returns 'Walker'
last_name_first('Walker White') returns 'Walker'
Parameter s: a name 'first-name last-name'
Precondition: s is a string 'first-name last-name' with one or more blanks
between the two names.
"""
end_first = s.find(' ')
return s[:end_first]
|
ca37032ac865981ff83d8f076b3c827334e2fc58
| 24,757
|
def isEulerian(self):
"""
Verifica se o grafo é Euleriano.
Verifica se o grafo é conexo. Se sim, verifica se o grau de cada vértice
do grafo é par (Teorema de Euler). Em caso afirmativo para as duas afirmações,
conclui-se que o grafo é Euleriano.
Args:
None
Returns:
Boolean: True se o grafo for Euleriano; False em caso contrário
Raises:
None
"""
if not self.isConnected(): return False
for v in range(self.nodesAmount):
if self.degree(v) % 2 != 0:
return False
return True
|
e87d4731e7c1de47f3bc770bb3646fdc84c4f792
| 24,758
|
import os
def get_filepaths(directory, file_type = "*"):
"""
utils return file paths under directory
Modify filtering file type
:param directory:
:return:
"""
file_paths = []
for root, directories, files in os.walk(directory):
for filename in files:
if file_type == '*':
filepath = os.path.join(root, filename)
file_paths.append(filepath)
else:
if os.path.splitext(filename)[1].lower() == '.' + file_type:
filepath = os.path.join(root, filename)
file_paths.append(filepath)
return file_paths
|
f68f55caabf63bb2796135f8563b6091cc45c82d
| 24,760
|
def replace_keys(d, old, new):
"""replace keys in a dict."""
return {k.replace(old, new): v for k, v in d.items()}
|
44e44308741e37a3b499aac4518cff74f087a589
| 24,761
|
def __discount_PA(i, n):
"""
Present worth of an annuity factor
Factor: (P/A, i, N)
:param i:
:param n:
:return:
Cash Flow
"""
f = (i + 1) ** n
return (f - 1) / (i * f)
|
3920c98bf73c57398c8e62f92c33ca0a97ac6555
| 24,762
|
def unrequitted_friendships_v2(G):
"""Alternative answer to unrequitted friendships problem. By @schwanne."""
return len([(n1, n2) for n1, n2 in G.edges() if not G.has_edge(n2, n1)])
|
a038dbac4caf8360dfb7497a4f828d13c1fb5128
| 24,765
|
from typing import Union
import io
def _hash(fn, buffer: Union[io.StringIO, io.BytesIO]):
"""Partial function for generating checksum of binary content."""
buffer.seek(0)
hashsum = fn()
for chunk in iter(lambda: buffer.read(4096), b''):
hashsum.update(chunk)
return hashsum.hexdigest()
|
c2b7b3ba1487273b7759d396d29e0aece15b5efa
| 24,766
|
import inspect
def _num_required_args(func):
""" Number of args for func
>>> def foo(a, b, c=None):
... return a + b + c
>>> _num_required_args(foo)
2
>>> def bar(*args):
... return sum(args)
>>> print(_num_required_args(bar))
None
borrowed from: https://github.com/pytoolz/toolz
"""
try:
spec = inspect.getfullargspec(func)
if spec.varargs:
return None
num_defaults = len(spec.defaults) if spec.defaults else 0
return len(spec.args) - num_defaults
except TypeError:
return None
|
dacc2ed0165ea8bc1e4be45bf2a9477778d2fe45
| 24,768
|
def expand(values, index, padding=128):
""" Modify in place and return the list values[] by appending
zeros to ensure that values[index] is not out of bounds.
An error is raised if index is negative.
"""
assert index >= 0, f"Oops: negative index in expand(values, index={index})"
if index >= len(values):
space_needed = index - len(values) + 1
values.extend([0] * (space_needed + padding))
return values
|
6f4d09244972e7af28b6119706a8095bedbc629d
| 24,769
|
from typing import Dict
from typing import Any
def _augment_page_post(page_post: Dict[str, Any]) -> Dict[str, Any]:
"""
Augment page posts to reflect changes in version 3.3
Once we confirm that all page posts are downloaded using the newest version, we can drop this augmentation
and perform it in platform views
Source: https://developers.facebook.com/docs/graph-api/changelog/version3.3
"""
if 'attachments' in page_post and 'data' in page_post['attachments'] and len(page_post['attachments']['data']) == 1:
data = page_post['attachments']['data'][0]
if 'title' in data:
page_post['caption'] = data['title']
page_post['name'] = data['title']
if 'description' in data:
page_post['description'] = data['description']
if 'url_unshimmed' in data:
page_post['link'] = data['url_unshimmed']
if 'target' in data and 'id' in data['target']:
page_post['object_id'] = data['target']['id']
if 'media' in data and 'source' in data['media']:
page_post['source'] = data['media']['source']
if 'type' in data:
page_post['type'] = data['type']
return page_post
|
fb455249c4302446a6ac9776f04cd0c4a0d4baeb
| 24,771
|
import typing
import dataclasses
def dataclass_from_dict(cls: type, src: typing.Dict[str, typing.Any]) -> typing.Any:
"""
Utility function to construct a dataclass object from dict
"""
field_types_lookup = {field.name: field.type for field in dataclasses.fields(cls)}
constructor_inputs = {}
for field_name, value in src.items():
if dataclasses.is_dataclass(field_types_lookup[field_name]):
constructor_inputs[field_name] = dataclass_from_dict(field_types_lookup[field_name], value)
else:
constructor_inputs[field_name] = value
return cls(**constructor_inputs)
|
f91ddb784d3a0ef4a2c5d78205f4a7908b79a1b3
| 24,772
|
import base64
def fromProtobufString(protobuf_string, protoClass):
"""
Deserialise base-64 encoded native protobuf string
into an instance of protobuf class
"""
msg = protoClass()
msg.ParseFromString(base64.b64decode(protobuf_string))
return msg
|
4241e10d891e8001f517b2cca5ab6a4cb820ca49
| 24,773
|
from typing import Any
def pm_assert(
condition: Any,
exc: Any=Exception,
context: Any=None,
msg: str="",
) -> Any:
""" Generic assertion that can be used anywhere
@condition: A condition to assert is true
@exc: Raise if @condition is False
@context: The relevant data structures
@msg: Any additional text to include
"""
if not condition:
raise exc(f"{msg}\n{context}")
return condition
|
a99f0ed3f460f95c84a1391664cc2bc169f17201
| 24,775
|
from typing import OrderedDict
def node_setup_files_list_table_format(result):
"""Format list of node setup task files"""
table = []
for item in result:
row = OrderedDict()
row['Name'] = item['name']
row['Is directory'] = 'yes' if item['is_directory'] else 'no'
row['Size'] = '' if item['size'] is None else (item['size'] or '0')
table.append(row)
return table
|
2bc7c2f8099364bab162d03791dd417e8cb46e21
| 24,776
|
import argparse
def handle_arguments():
"""
Function used to parse script arguments
:return args: commandline arguments for script
"""
channel_choices = [
'Fp1', 'AF3', 'F7', 'F3', 'FC1', 'FC5', 'T7', 'C3', 'CP1', 'CP5', 'P7', 'P3',
'Pz', 'PO3', 'O1', 'Oz', 'O2', 'PO4', 'P4', 'P8', 'CP6', 'CP2', 'C4', 'T8',
'FC6', 'FC2', 'F4', 'F8', 'AF4', 'Fp2', 'Fz', 'Cz'
]
parser = argparse.ArgumentParser(description='Train a model to classify spectrograms')
parser.add_argument('-c', '--channel', dest='channel', required=True, choices=channel_choices,
help='Flag used to determine what channel we want to create a model for')
parser.add_argument('-s', '--set', dest='size', required=True,
help='Flag used to determine the amount of experiments to import for train/test data')
parser.add_argument('-i', '--image-size', dest='image_size', required=True,
help='Flag used to determine the length and width to resize the data spectrogram images')
parser.add_argument('-e', '--epochs', dest='epochs', required=True,
help='Flag used to determine the number of epochs for training')
args = parser.parse_args()
return args
|
bb5e47f4e51d0f074e3925898e7793bf222697b4
| 24,777
|
def validate_time_course(simulation):
""" Validate a time course
Args:
simulation (:obj:`UniformTimeCourseSimulation`): simulation
Returns:
:obj:`tuple`:
* nested :obj:`list` of :obj:`str`: nested list of errors (e.g., required ids missing or ids not unique)
* nested :obj:`list` of :obj:`str`: nested list of errors (e.g., required ids missing or ids not unique)
"""
errors = []
warnings = []
if simulation.initial_time != 0:
errors.append(['Initial time must be 0, not `{}`.'.format(simulation.initial_time)])
if simulation.output_start_time != int(simulation.output_start_time):
errors.append(['Output start time must be an integer, not `{}`.'.format(simulation.output_start_time)])
if simulation.output_end_time != int(simulation.output_end_time):
errors.append(['Output end time must be an integer, not `{}`.'.format(simulation.output_end_time)])
step_size = (simulation.output_end_time - simulation.output_start_time) / simulation.number_of_steps
if abs(step_size - round(step_size)) > 1e-8:
msg = (
'The interval between the output start and time time '
'must be an integer multiple of the number of steps, not `{}`:'
'\n Output start time: {}'
'\n Output end time: {}'
'\n Number of steps: {}'
).format(step_size, simulation.output_start_time, simulation.output_end_time, simulation.number_of_steps)
errors.append([msg])
return (errors, warnings)
|
4b89308ce29c0e41e301ef81dab6551b504c53b0
| 24,778
|
def add_toc(source: str) -> str:
"""Add table of contents to the beginning of the page source"""
result = '<ac:structured-macro ac:macro-id="1" '\
'ac:name="toc" ac:schema-version="1"/>\n' + source
return result
|
87617ef78c3240f6e8f2518075351f421e2fdb4a
| 24,779
|
def resize_halo_datasets(halos_dset, new_size, write_halo_props_cont, dtype):
"""
Resizes the halo datasets
Parameters
-----------
halos_dset: dictionary, required
new_size: scalar integer, required
write_halo_props_cont: boolean, required
Controls if the individual halo properties are written as distinct
datasets such that any given property for ALL halos is written
contiguously (structure of arrays, SOA).
dtype: numpy datatype
Returns
-------
Returns ``True`` on successful completion
"""
if write_halo_props_cont:
for name in dtype.names:
dset = halos_dset[name]
dset.resize((new_size, ))
else:
halos_dset.resize((new_size, ))
return True
|
a5461a776a0991eda04fc5d0e1d2a2a14e6e1f5f
| 24,780
|
import yaml
import sys
def load_class(mod, name):
"""
Load the class from mod and name specification.
"""
toks = name.split("?")
params = {}
if len(toks) == 2:
for p in toks[-1].split(","):
ptoks = p.split("=")
params[ptoks[0]] = yaml.safe_load(ptoks[1])
elif len(toks) > 2:
print("Bad handler specification")
sys.exit(-1)
mod = __import__(mod, globals(), locals(), [toks[0]], 0)
return getattr(mod, toks[0])(**params)
|
59bc3f8082879ee4728ef0fc9fdae2693670f9b8
| 24,782
|
def checksum(sentence):
"""Calculate and return checsum for given NMEA sentence"""
crc = 0
for c in sentence:
crc = crc ^ ord(c)
crc = crc & 0xFF
return crc
|
841c35f11c5f4a46cfb62efc04379fa54772e739
| 24,784
|
def calculate_hash_value(string):
"""Helper function to calulate a
hash value from a string."""
# hash_value is calculated as ** unicode(A)*100 + unicode(h) **
return ord(string[0])*100+ord(string[1])
|
f64a53bfc049b5b3a6c14e1957f19a4f5ebe68a1
| 24,785
|
import calendar
import time
def seconds_after_epoch():
"""
seconds since epoch
"""
return int(calendar.timegm(time.gmtime()))
|
97c8955b39eefd3f8eae09bda56c14470d6fd64e
| 24,787
|
import re
import unicodedata
def clean_name(col_name: str) -> str:
"""Clean column name. Adapted from ericmjl/pyjanitor."""
col_name = str(col_name).lower()
fixes = [(r"[ /:,?()\.-]", "_"), (r"['’]", "")]
for search, replace in fixes:
col_name = re.sub(search, replace, col_name)
col_name = "".join(c for c in col_name if c.isalnum() or "_" in c)
col_name = "".join(
letter
for letter in unicodedata.normalize("NFD", col_name)
if not unicodedata.combining(letter)
)
col_name = col_name.strip("_")
return col_name
|
15e53da036c0dd972bc579f58266ebbd208c7640
| 24,788
|
def pa_test(a, b, c, d, e = None, f = None):
""" ============ DOCTEST FOOD ============
>>> pa_test(1, 2, 3, 4, 5)
(1, 2, 3, 4, 5, None)
>>> pa = pa_test(_1, 2, 3, 4, 5)
>>> pa(1)
(1, 2, 3, 4, 5, None)
"""
return (a, b, c, d, e, f)
|
e4dc81aa8a65a52df78284225fb7d0a2c8d10f53
| 24,790
|
def _apply_affine_scalar(i, j, k, affine_matrix):
"""
Applies an affine matrix to the given coordinates. The 3 values i, j and k must be scalars. The affine matrix consists of a 3x3 rotation matrix and a 3x1 transposition matrix (plus the last row).
Parameters
----------
i, j, k: numeric scalars
The source coordinates.
affine_matrix: numpy 2D float array with shape (4, 4)
The affine matrix
Returns
-------
The coordinate vector after applying the matrix.
"""
rotation = affine_matrix[:3, :3]
translation = affine_matrix[:3, 3]
return rotation.dot([i, j, k]) + translation
|
a548ac37cad242549e7f40ab799a2361dc2733ff
| 24,791
|
import argparse
def parse_args(args):
"""
We want the user to tell us where exactly the pivt app is located in their unique file system.
So we get one agrument from them that is the path to the pivt app.
:param args: The path the user gave on the command line to the Pivt app directory
:return: Array with path to pivt app
example: args = ['C:/Users/alfano/pivt/app']
"""
parser = argparse.ArgumentParser(description='Update and Compress Pivt app for release')
parser.add_argument('root', help='path to PIVT Splunk app directory')
parser.add_argument('--name', default="pivt-splunk-app.tar.gz", help='name of the .tar.gz file')
return parser.parse_args(args)
|
1e5473bdcdaf92430ff6d635ba95a52ae6204527
| 24,793
|
import subprocess
def execute_stdout(command):
""" Execute a command with its parameter and return the exit code
and the command output """
try:
output = subprocess.check_output([command], stderr=subprocess.STDOUT,
shell=True)
return 0, output.decode()
except subprocess.CalledProcessError as excp:
return excp.returncode, excp.output
|
7a9d03eccf9f4c04b78328ba90054914ad26dfd4
| 24,796
|
import re
def get_dict(post, key):
""" Extract from POST PHP-like arrays as dictionary.
Example usage::
<input type="text" name="option[key1]" value="Val 1">
<input type="text" name="option[key2]" value="Val 2">
options = get_dict(request.POST, 'option')
options['key1'] is 'Val 1'
options['key2'] is 'Val 2'
"""
result = {}
if post:
patt = re.compile('^([a-zA-Z_]\w+)\[([a-zA-Z_\-][\w\-]*)\]$')
for post_name, value in post.items():
value = post[post_name]
match = patt.match(post_name)
if not match or not value:
continue
name = match.group(1)
if name == key:
k = match.group(2)
result.update({k: value})
return result
|
35649820407161432f5e3d01816376e67b19ea82
| 24,799
|
def _create_64bit_mask(vals):
""" create a 64bit int value that corresponds to the bytes of the mask """
if len(vals) != 8:
# simple error check for number of vals to convert
return 0
bmask = 0
for i, val in enumerate(vals):
bmask += (val << i*8)
return bmask
|
11ecf5e1210a9f9c2b9bb52662aa8b37e804248e
| 24,800
|
def get_ranking19():
"""
Return the ranking with ID 19.
"""
return [
("a5", 1.000000),
("a4", 0.750000),
("a3", 0.500000),
("a2", 0.250000),
("a1", 0.000000),
]
|
56b473592e586a37bc47863e322447bb5c0ca0e4
| 24,802
|
import json
def json_load(file):
"""
load json data from file.
"""
with open(file, 'r', encoding='utf8') as _file:
data = json.load(_file)
return data
|
9d2ef792a9d2b201a5608601057f4f76e7905662
| 24,804
|
from typing import Dict
from typing import Any
def severity(event: Dict[str, Any]) -> str:
"""Maps `score` of a Qualys event to `severity`.
Possible `score` values:
0 = Known Good [File/Process/Network]
1 = Remediated [File/Process/Network]
2 = Suspicious Low File event
3 = Suspicious Low Process event
4 = Suspicious Low Network event
5 = Suspicious Medium File event
6 = Suspicious Medium Process event
7 = Suspicious Medium Network event
8 = Malicious File event
9 = Malicious Process event
10 = Malicious Network event
"""
if 'score' not in event:
return 'Unknown'
scores = {
'0': 'None',
'1': 'High',
'2': 'Low',
'3': 'Low',
'4': 'Low',
'5': 'Medium',
'6': 'Medium',
'7': 'Medium',
'8': 'High',
'9': 'High',
'10': 'High'
}
score = event['score']
return scores.get(score, 'Unknown')
|
4952bbf5b76f7d5f7ab16d1ef9f475a9a5f54582
| 24,805
|
def variable_om_cost_rule(mod, prj, tmp):
"""
Variable cost is incurred on all power produced (including what's
curtailed).
"""
return mod.Capacity_MW[prj, mod.period[tmp]] \
* mod.Availability_Derate[prj, tmp] \
* mod.gen_var_stor_hyb_cap_factor[prj, tmp] \
* mod.variable_om_cost_per_mwh[prj]
|
5036babb949ce093a993b02ca7d66d7119e85dfe
| 24,807
|
def get_fitting_size(db):
"""We only fit unlimeted data"""
out = {}
for variable in db.get_keys():
ndim = 1
dims = db.get_dimension(variable)
if not dims[0].isunlimited():
continue
for dim in dims[1:]:
ndim *= dim.size
out[variable] = ndim
return out
|
20ecb2f2297f958aed9dd1b457b594c9f87fa39f
| 24,809
|
def get_diff(df, column1, column2):
"""Get the difference between two column values.
Args:
df: Pandas DataFrame.
column1: First column.
column2: Second column.
Returns:
Value of summed columns.
Usage:
df['item_quantity_vs_mean'] = get_diff(df, 'item_quantity', 'item_code_item_quantity_mean')
"""
return df[column1] - df[column2]
|
1fc5ec361cfdd28775257980c28b4924fdab4eeb
| 24,810
|
def int_to_bytes(number: int) -> bytes:
"""Convert integer to byte array in big endian format"""
return number.to_bytes((number.bit_length() + 7) // 8, byteorder='big')
|
1158b63b71774c6202aa4c96dae54eca2fae2c0a
| 24,811
|
def unix_sock_file(request):
"""Check that bound UNIX socket address is stored in server."""
name = 'unix_{request.param}_sock'.format(**locals())
return request.getfixturevalue(name)
|
c476f91ca756e7246652b19a21a1ab3c67004c51
| 24,812
|
import os
def parse(text: str) -> dict:
"""
Parse the contents of the environment contained in the provided *text*
(e.g. obtained through the console interface) and return the environment as
a dictionary.
A :py:exc:`ValueError` is raised if no environment variables are found.
"""
results = {}
prev_name = None
expect_continuation = False
for line in text.splitlines():
if expect_continuation:
results[prev_name] += os.linesep + line
expect_continuation = line.endswith('\\')
else:
if not line or line.startswith('Environment size: '):
continue
try:
delim_idx = line.index('=')
except ValueError:
# Try to be resilient and ignore bizzare or malformed lines...
continue
name = line[:delim_idx]
value = line[delim_idx+1:]
results[name] = value
prev_name = name
expect_continuation = value.endswith('\\')
if not results:
raise ValueError('No environment variables found')
return results
|
ad1cc3fc20524703c4cdebe38cdc5204eb0a87aa
| 24,813
|
import random
def DEFAULTPOLICY(state):
"""
random policy
:param state: from this state, run simulation with random action
:return: final reward after reaching the final state
"""
while state.terminal() == False: # simulate until terminal state
random_action = random.randint(0, 3)
state = state.next_state(random_action)
# print(state)
return state.reward()
|
99a281406c5a293a9040b2c772ca60fdc7b6a2eb
| 24,814
|
import os
def get_ifname_by_pci_address(pci_addr, pf_interface=False):
"""Get the interface name based on a VF's pci address
The returned interface name is either the parent PF's or that of the VF
itself based on the argument of pf_interface.
"""
if pf_interface:
dev_path = "/sys/bus/pci/devices/%s/physfn/net" % pci_addr
else:
dev_path = "/sys/bus/pci/devices/%s/net" % pci_addr
try:
dev_info = os.listdir(dev_path)
return dev_info.pop()
except Exception:
return None
|
5149d32c1a3dbed034c07de17f1fea14ce16a82c
| 24,815
|
def assign_json_subfields(field, subfields, data):
"""Apply subfields from field to data."""
for subfield in subfields:
data[subfield] = data.apply(lambda row: row[field].get(subfield, '') if row[field] else '', axis=1)
return data
|
600482fdeea52befebdc1c5a29cc3fc0d14e6c2d
| 24,816
|
import hashlib
from pathlib import Path
def label_generator(predictions, processor, filename):
"""Generates a unique label for the image based on the predicted class, a hash,
and the existing file extension.
Parameters
----------
predictions : np.array
Output from model showing each target probability
processor : keras.util
Prediction utility unique to each model
filename : str
Path to image or image name
Returns
-------
new_label : str
New label consisting of predicted class plus a hash
"""
# Hash predictions for always unique filename
hashed = hashlib.sha1(predictions).hexdigest()
# Get label from keras predictor
label = processor(predictions, top=1)[0][0][1]
# Capture original image suffix
suffix = "".join(Path(filename).suffixes)
new_label = f"{label}_{hashed}{suffix}"
return new_label
|
8baba1aad6ad9a7f4d69b499a76b1b4079e79cb5
| 24,817
|
import logging
import sys
def _get_stream_handler():
""" Создание консольного хандлера."""
stream_obj = logging.StreamHandler(sys.stdout)
stream_obj.setLevel("TRACE")
return stream_obj
|
6000747718b2a8f75eff2a00feaa6754c0958326
| 24,818
|
import subprocess
import pipes
def exists_remote(host, path, silent=False):
"""Test if a file exists at path on a host accessible with SSH."""
status = subprocess.call(
["ssh", "-oStrictHostKeyChecking=no", "-oIdentitiesOnly=yes", "-i~/.ssh/id_cah", host, "test -f {}".format(pipes.quote(path))],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
if not silent:
print(".", end = "", flush=True)
if status == 0:
return True
if status == 1 or status == 255:
return False
|
8716ce609f249c9ed46afea1496c3161d55aee2c
| 24,819
|
import unicodedata
def unicode_to_ascii(s):
"""Transforms an ascii string into unicode."""
normalized = unicodedata.normalize('NFD', s)
return ''.join(c for c in normalized if unicodedata.category(c) != 'Mn')
|
0a286bd19b6492976eeb206f43231ec079f468c2
| 24,821
|
import re
def parse_csl_item_note(note):
"""
Return the dictionary of key-value pairs encoded in a CSL JSON note.
Extracts both forms (line-entry and braced-entry) of key-value pairs from "cheater syntax"
https://github.com/Juris-M/citeproc-js-docs/blob/93d7991d42b4a96b74b7281f38e168e365847e40/csl-json/markup.rst#cheater-syntax-for-odd-fields
"""
note = str(note)
line_matches = re.findall(
r'^(?P<key>[A-Z]+|[-_a-z]+): *(?P<value>.+?) *$', note, re.MULTILINE)
braced_matches = re.findall(
r'{:(?P<key>[A-Z]+|[-_a-z]+): *(?P<value>.+?) *}', note)
return dict(line_matches + braced_matches)
|
2a9076c646cd3efff12a4f2bbc3d639e2104a5b3
| 24,822
|
def cli(ctx, user_id, deleted=False):
"""Display information about a user.
Output:
a dictionary containing information about the user
"""
return ctx.gi.users.show_user(user_id, deleted=deleted)
|
9806cdb59f6797a4a08d775ecb555936956746e4
| 24,824
|
def create_path_env_var(new_entries, env=None, env_var='PATH', delimiter=':', prepend=False):
"""Join path entries, combining with an environment variable if specified."""
if env is None:
env = {}
prev_path = env.get(env_var, None)
if prev_path is None:
path_dirs = list()
else:
path_dirs = list(prev_path.split(delimiter))
new_entries_list = list(new_entries)
if prepend:
path_dirs = new_entries_list + path_dirs
else:
path_dirs += new_entries_list
return delimiter.join(path_dirs)
|
2bcd3a70c31abf5fae4e93633d79d61d7d8185ad
| 24,825
|
def nested_contains(L, value):
"""
"""
return True in [nested_contains(x, value) if isinstance(x, list) else x == value for x in L]
#any([nested_contains(x, value) if isinstance(x, list) else x == value for x in L])
|
72767582e451663329c64ec25052c8174daa4469
| 24,826
|
def omp_get_thread_limit():
"""
The omp_get_thread_limit routine returns the maximum number
of OpenMP threads available to participate in the current
contention group.
"""
return 1
|
dad77e1fa6e608652ce67e9c96e8b00a6b1e59b4
| 24,827
|
import os
def get_script_dir(fobj):
"""Determine the directory the script is in.
Parameters
----------
fobj : str
Path to file to check
Returns
-------
Directory the file is in : str
"""
return os.path.dirname(os.path.realpath(fobj))
|
b75bc38a665ccbb257167ed7b6091940cea4c5f1
| 24,829
|
import math
def log10(v):
"""
LOG10 num
outputs the common logarithm of the input.
"""
return math.log10(v)
|
fee9a6a45ff4402894b98a61cd09a13125b2f6b4
| 24,831
|
from functools import reduce
def singleNumber(nums):
"""
:type nums: List[int]
:rtype: int
"""
return reduce(lambda x, y: x ^ y, nums)
|
9387b456a5aa62e60bd55a100728e7fa39ae8faa
| 24,832
|
def field_is_required(field):
""" Returns true if the field is required (e.g. not nullable) """
return getattr(field, 'required', False)
|
15eaf261ef0316c06272b1c38e0ec65bd16e77e6
| 24,834
|
import os
def get_mtab_path(synphot_root):
"""
Get the full path to the synphot mtab directory.
"""
return os.path.join(synphot_root, "mtab")
|
e802376227cdd7a01191cb56cd1f72f39ff676cd
| 24,835
|
from datetime import datetime
def date_to_datetime(value):
"""Get datetime value converted from a date or datetime object
:param date/datetime value: a date or datetime value to convert
:return: datetime; input value converted to datetime
>>> from datetime import date, datetime
>>> from pyams_utils.date import date_to_datetime
>>> value = date(2016, 11, 15)
>>> date_to_datetime(value)
datetime.datetime(2016, 11, 15, 0, 0)
>>> value = datetime(2016, 11, 15, 10, 13, 12)
>>> value
datetime.datetime(2016, 11, 15, 10, 13, 12)
>>> date_to_datetime(value) is value
True
>>> date_to_datetime(None) is None
True
"""
if not value:
return None
if isinstance(value, datetime):
return value
return datetime(value.year, value.month, value.day)
|
5ba2d15d8169d923b87181545eb910f1715ac4c2
| 24,836
|
def filter_set_options(options):
"""Filters out options that are not set."""
return {key: value for key, value in options.items() if value}
|
e6605af08626189a501895973c59d6dd9956cb55
| 24,838
|
def check_for_victory(board):
"""
the function analyzes the board status in order to check if
the player using 'O's or 'X's has won the game
"""
row_len = len(board)
col_len = len(board[0])
#Row check
for row_list in board:
result = len(row_list) > 0 and all(elem == row_list[0] for elem in row_list)
if result:
return (True, row_list[0])
#Column check
for col_index in range(col_len):
column_lst = []
column_lst.clear()
for row_index in range(row_len):
column_lst.append(board[row_index][col_index])
result = len(column_lst) > 0 and all(elem == column_lst[0] for elem in column_lst)
if result:
return (True, column_lst[0])
#Diagonal check from top-left to bottom right
diag_lst = []
for diag_index in range(row_len):
diag_lst.append(board[diag_index][diag_index])
result = len(diag_lst) > 0 and all(elem == diag_lst[0] for elem in diag_lst)
if result:
return (True, diag_lst[0])
diag_lst.clear()
#Diagonal check from bottom-left to top-right
min_val, max_val = 0, col_len-1
while min_val < col_len-1:
diag_lst.append(board[min_val][max_val])
min_val += 1
max_val -= 1
result = len(diag_lst) > 0 and all(elem == diag_lst[0] for elem in diag_lst)
if result:
return (True, diag_lst[0])
#no one success yet
return (False, " ")
|
dc786d3b5a3790ab0c20fdeb9d8c677e2d262241
| 24,839
|
def not_in_dict_or_none(dict, key):
"""
Check if a key exists in a map and if it's not None
:param dict: map to look for key
:param key: key to find
:return: true if key is in dict and not None
"""
if key not in dict or dict[key] is None:
return True
else:
return False
|
2bc3f2194b82e978ab8edb2ffaac7a88a58e9c9e
| 24,841
|
def sort_qparams(v):
"""Sort query params as subject, predicate, object, page, as the current ldf-client require about this particular order..."""
if v[0] == 'subject':
return 0
elif v[0] == 'predicate':
return 1
elif v[0] == 'object':
return 2
elif v[0] == 'page':
return 3
return 4
|
0c137781bd81e004cc44da27950dff3232041791
| 24,842
|
def to_digits_base10(n):
"""
Return the digits of a number in base 10.
"""
digits = []
remaining = n
while remaining > 0:
digit = remaining % 10
remaining = (remaining - digit) // 10
digits.append(digit)
return digits[::-1]
|
5acc6a2ef1e10bc3142371944232c7d1bcad3a32
| 24,843
|
def validateYear(year):
""" Validate the year value """
if year < 0:
return False
if len(str(year)) < 4:
return False
return True
|
0d6d6edb0d1d3f96b3621416be532f8f21e32f91
| 24,844
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.