content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
from datetime import datetime
def get_date(data):
""" Parses a date
"""
# Match patterns like "2019-04-22"
parsed_date = None
try:
parsed_date = datetime.strptime(data, "%Y-%m-%d")
except ValueError:
pass
return parsed_date | a4eea694076c70f1ee672524ad529b0804f4d65d | 42,461 |
import time
def calc_time(func, *args, **kwargs):
"""Calculate execution time of specified function."""
start_time = time.time()
func(*args, **kwargs)
finish_time = time.time()
return finish_time - start_time | 104b378b66009a9b49ebf8804c27f33a01b33287 | 42,463 |
def _nus_uuid(short: int) -> str:
"""Get a 128-bit UUID from a ``short`` UUID.
Args:
short: The 16-bit UUID.
Returns:
The 128-bit UUID as a string.
"""
return f"6e40{short:04x}-b5a3-f393-e0a9-e50e24dcca9e" | 207163a7339808b65c0f3d1b0fcdfde87057eb57 | 42,464 |
import functools
def force_run(iterable):
"""
Run every function in the provided list - left to right - regardless of
whether any exceptions are thrown. Useful for unwinding.
>>> a, b = 0, 0
>>> a == 0 and b == 0
True
>>> def x():
... global a
... a = 1
>>> def y():
... global b
... b = 2
>>> force_run([x, y])
>>> a == 1 and b == 2
True
>>> a, b = 0, 0
>>> a == 0 and b == 0
True
>>> def x():
... global a
... a = 1
>>> def y():
... global b
... b = 2
>>> def z(): raise ValueError
>>> force_run([x, z, y])
Traceback (most recent call last):
...
ValueError
>>> a == 1 and b == 2
True
"""
def gen(func1, func2):
"Generator to run both functions, regardless of exceptions."
def _():
try:
func1()
finally:
func2()
return _
functools.reduce(gen, iterable)() | 6d4c86132e4a9d769ea9a82c67e5c58eb95aaed3 | 42,465 |
def form_model_meta_model_name(form):
"""
:param form
:return: form model name
Usage: {% form_model_meta_model_name form %}
"""
return form.Meta.model._meta.model_name[1:] | e7d932c16c6ee1cdd018b7f384ddba1c0ad28c5a | 42,466 |
def whole_requirement(func):
"""A decorator for whole requirement functions. A whole requirement
function should accept a *data* object and return values appropriate
for instantiating a :exc:`ValidationError` (either an iterable of
differences or a 2-tuple containing an iterable of differences and
a description).
"""
if getattr(func, '_whole_requirement', False):
return func # <- EXIT!
func._whole_requirement = True
return func
@wraps(func)
def wrapper(data):
result = func(data)
return _normalize_requirement_result(result, func)
wrapper._whole_requirement = True
return wrapper | 4e6957767e68d8ad445816a9040f99f006ced1bf | 42,467 |
def address_in_db(cursor, address):
"""Returns true if address is in database, false otherwise."""
cursor.execute('SELECT address FROM farmers WHERE address=?',
(str(address),))
data = cursor.fetchone()
if data is None:
return False
return True | 1e93eaf2874fdf9fca8c4db73b981e41a00c5538 | 42,468 |
def convert_units(cube, units):
"""
Convert the units of a cube to new ones.
This converts units of a cube.
Arguments
---------
cube: iris.cube.Cube
input cube
units: str
new units in udunits form
Returns
-------
iris.cube.Cube
converted cube.
"""
cube.convert_units(units)
return cube | a0562d75d96b97caaab125b9999856030c41975c | 42,469 |
def lemmatize_all_pos_pair(keyword_score_pair, lemmatizer):
"""
lemmatize each word in text and return a list of lemmatized tokens
:param keyword_score_pair: list of tuple, [(keyword, score), ...], where keyword is a string and score is a float
:param lemmatizer: nltk WordNetLemmatizer
:return: dictionary {keyword:score, ...}
"""
# get the keywords in each pair
tokens = [pair[0] for pair in keyword_score_pair]
scores = [pair[1] for pair in keyword_score_pair]
# lemmatize Verb (v), Noun (n), Adverb (r) and Adjective (a)
lemmatized_tokens = [lemmatizer.lemmatize(t, pos='v') for t in tokens]
lemmatized_tokens = [lemmatizer.lemmatize(t, pos='n') for t in lemmatized_tokens]
lemmatized_tokens = [lemmatizer.lemmatize(t, pos='r') for t in lemmatized_tokens]
lemmatized_tokens = [lemmatizer.lemmatize(t, pos='a') for t in lemmatized_tokens]
# map the score back to keywords
keyword_score_mapping = dict(zip(lemmatized_tokens, scores))
return keyword_score_mapping | 854b8c4ba6b29fa1db92d65429ddead2f9b83aa5 | 42,470 |
from pathlib import Path
def get_real_path_to_torchmeta_miniimagenet(dummy_datapath: Path) -> Path:
"""
Since torchmeta expects the path to where the folder miniimagenet/ is (i.e. the folder with the data set)
this function get the path to miniimagenet given the dummy path e.g.
~/data/miniimagenet -> ~/data/
if miniimagenet is not in that path it will download it later on.
Note:
- this is a weird hack since the torchmeta helper functions doesn't let me just pass the actual path to the folder
containing the dataset but instead wants the path to the folder containing the data set instead of the direct
path to the data set.
"""
if dummy_datapath == 'miniimagenet':
# - this is the location torchmeta expects to be pointed to
data_path: Path = Path('//').expanduser()
else:
# splits by folder removes the word miniimagenet and gives the real path to torchmeta's miniimagenet
# -- ~/data/miniimagenet/ -> ~/data/
data_path: Path = Path('/'.join(str(dummy_datapath.expanduser()).split('/')[:-1]))
return data_path | b218902fcbdb8eb277b999d116c79689d64e1e09 | 42,471 |
import yaml
def gen_yaml_decoder(cls):
"""Generate a custom YAML decoder with non-default mapping class
Args:
cls: Class used for mapping
"""
def struct_constructor(loader, node):
"""Custom constructor for Struct"""
return cls(loader.construct_pairs(node))
# pylint: disable=too-many-ancestors
class StructYAMLLoader(yaml.Loader):
"""Custom YAML loader for Struct data"""
def __init__(self, *args, **kwargs):
yaml.Loader.__init__(self, *args, **kwargs)
self.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
struct_constructor)
return StructYAMLLoader | d1218af849ddf8f1402057735bb40de7d954f181 | 42,473 |
def all_equal(arg1, arg2):
"""
Shortcut function to compute element-wise equality between two iterables
Parameters
----------
arg1 : iterable
Any iterable sequence
arg2 : iterable
Any iterable sequence that has the same length as arg1
Returns
-------
bool
True if each pair of elements are equal. Otherwise, False
"""
return all([a == b for a, b in zip(arg1, arg2)]) | 1920e0e28c7ca632438c3adde2c22f5b21f7e54e | 42,474 |
def extract_general_rxn_info(br_data):
"""
Extract all activating molecules for the PR of this RS.
It is possible that there is no information, or multiple entries.
"""
nprop = 'RT'
output_nprop = []
list_of_nprop = br_data[nprop]
for i, entry in enumerate(list_of_nprop):
value = entry.rstrip()
output_nprop.append(value)
reaction_type = output_nprop
nprop = 'RE'
output_nprop = []
list_of_nprop = br_data[nprop]
for i, entry in enumerate(list_of_nprop):
value = entry.rstrip()
output_nprop.append(value)
reaction_cat = output_nprop
return reaction_type, reaction_cat | df06d4a61f83282cf904ca69b38cd0e5a8b6ac98 | 42,476 |
def get_adjacent_face ( surface, edge_data, idx ):
""" get_adjacent_face ( surface, edge_data, idx )
Get the number (index) of the face that includes the edge that is the reverse
direction of the passed (hashed) edge
@param surface - working surface structure (dictionary)
@param edge_data - pre generated object edge data
@param idx - index of the edge to process (in surface [ 'edgehash' ])
@return face number to add to the surface
"""
# get the existing stored edge hash from the surface
edge_hash = surface [ 'edgehash'][ idx ]
# get the edge end point indexes back from the (searchable) hash
# create a new hash for the reverse direction edge
reverse_edge = [ edge_hash & 0xffffffff, edge_hash >> 32 ]
reverse_hash = reverse_edge [ 0 ] << 32 | reverse_edge [ 1 ]
if reverse_hash in surface [ 'edgehash' ]:
return None # Face already on the surface: do not add again
# return the adjacent face index
return int ( edge_data [ 'byEdge' ].index ( reverse_hash ) / 3 ) | 5d417afb1668b1eeb82ebbc9c2cd612e6559889a | 42,477 |
def get_cluster_idx(_cluster):
"""
returns cluster idx for sorting
"""
return _cluster.cluster_idx | b58c5fd2fe51231cb900428f068868b0ed0218b1 | 42,479 |
def bool_mapper(attribute):
"""
Maps ``yes``, ``1`` and ``on`` to ``True`` and ``no``, ``0``
and ``off`` to ``False``.
"""
def _fn(values):
if values["bool"].lower() in ("yes", "1", "on", "true"):
return {attribute: True}
elif values["bool"].lower() in ("no", "0", "off", "false"):
return {attribute: False}
return {}
return _fn | 549260ae13c65b452b91551865f37626dda5a423 | 42,481 |
import os
def get_cybershake_list(cybershake_root):
"""Gets the cybershake list, specifying the faults and number of realisation"""
return os.path.join(cybershake_root, "list.txt") | 4b02f49e1d5d3535ba228b9404132f14adc64009 | 42,484 |
def ask_for_even_more_input():
"""
asks for even more input needed for the simulation
Returns:
detection_probability (float): probability an infected human will get quarantined
"""
while True:
try:
detection_probability = float(input(
"Probability of detecting an infected human (between 0 and 100) (recommended value: 40): "))
if detection_probability >= 0 and detection_probability <= 100:
break
else:
raise ValueError
except ValueError:
print("Please enter a number between 0 and 100. ")
return detection_probability | e50c9de0d729298ee04206710c912d475eb14d3d | 42,485 |
def pkcs7pad(ciphertext, blocksize, value=b'\x04'):
"""
ciphertext: bytes string, blocksize: int, value: bytes char
Return ciphertext padded with value until it reach blocksize length.
"""
length = len(ciphertext)
pad = blocksize - (length % blocksize)
return b''.join((ciphertext, value * pad)) | 1715cd2e5a7ffb4cfb535db28f0d8dca5db4adae | 42,486 |
def get_indeed_url(position, location):
"""
Gives the indeed url of a search query with a job position and location given as parameters
Parameters : Job Position
Job Location
Return : Appropriate Indeed URL
"""
pattern = 'https://fr.indeed.com/jobs?q={}&l={}'
url = pattern.format(position, location)
return url | 7b31bf4ca2a89ed25d43add9028c23b505419da2 | 42,487 |
def fix_list(x):
"""
Turn a string of a list into a Python list.
"""
if len(x) > 0:
y = eval(x)
if len(y) > 0:
return y
return None | 15bf9733b145a29a89c95985b41457b79fc984cc | 42,489 |
def mean(array):
"""
get mean of list, returns None if length is less than 1
"""
if not array:
return None
return float(sum(array))/float(len(array)) | 2417125c93ceb9a46b108817c7cc7961c841bc14 | 42,490 |
import os
def is_network_namespace_exists(name):
"""
Checks that a namespace exist or not
Parameter
---------
name : namespace name as str
Returns
-------
True if exists False otherwise
"""
return os.path.exists('/var/run/netns/%s' % name) | 14fc0fb1ad373b5bfd637f5fdb41d14d1e59daa2 | 42,491 |
import math
def sigmoid(z):
"""激活函数(Sigmoid):f(z) = Sigmoid(z)
:param z: 神经元的输入
:return: 神经元的输出
"""
return 1.0 / (1.0 + math.exp(-z)) | 996cbbbddd7f38596054b794375695fcf923c813 | 42,492 |
import os
import secrets
from pathlib import Path
def mktempfn(fn):
"""Return a path object temporary filename in the same directory as `fn`.
For very long input file names, the resulting file name my exceed the
maximum length allowed by the OS. This is considered to be unlikely here,
where only names of character and block devices or FIFOs (named pipes)
are used as input, which are (a) rare and (b) commonly very short."""
dirname = os.path.dirname(fn)
basename = os.path.basename(fn)
ext = secrets.token_urlsafe(6) # 36 bits as URL-safe Base64
return Path(dirname, '.' + basename + '.' + ext) | 05b973522af9ca9723d3433119ca07b000eb2a61 | 42,493 |
def copynode2(nod,dup):
"""
copies only essential elements of nod into dup (faster)
"""
x=nod.value
dup.value=x if type(x)!=list else x[:]
dup.lastchange=nod.lastchange
dup.nature=nod.nature[:]
dup.isdelayed=nod.isdelayed
dup.occur=nod.occur[:]
dup.pointer=nod.pointer
dup.count=nod.count
return dup | 87cf9763288b3a030c49a3c004f6546d82a0a4ca | 42,494 |
from typing import List
def subsequence(search: List, seq: List) -> bool:
"""Determine if seq is a subsequence of search, starting at index 0.
"""
return len(search) >= len(seq) and all(x == y for (x, y) in zip(search, seq)) | f7d93fba1c5af1972442d9f4306c6b0986ba8858 | 42,495 |
import torch
def _align_and_update_loaded_state_dicts(model_state_dict, loaded_state_dict):
"""
Strategy: suppose that the models that we will create will have
prefixes appended to each of its keys, for example due to an extra
level of nesting that the original pre-trained weights from ImageNet
won't contain. For example, model.state_dict() might return
backbone[0].body.res2.conv1.weight, while the pre-trained model contains
res2.conv1.weight. We thus want to match both parameters together.
For that, we look for each model weight, look among all loaded keys
if there is one that is a suffix of the current weight name,
and use it if that's the case. If multiple matches exist,
take the one with longest size of the corresponding name. For example,
for the same model as before, the pretrained weight file can contain
both res2.conv1.weight, as well as conv1.weight. In this case,
we want to match backbone[0].body.conv1.weight to conv1.weight, and
backbone[0].body.res2.conv1.weight to res2.conv1.weight.
"""
current_keys = sorted(list(model_state_dict.keys()))
loaded_keys = sorted(list(loaded_state_dict.keys()))
aligned_loaded_state_dict = loaded_state_dict.copy()
# get a matrix of string matches, where each (i, j) entry
# correspond to the size of the loaded_key string, if it matches
match_matrix = [
len(j) if i.endswith(j) else 0 for i in current_keys for j in
loaded_keys]
match_matrix = torch.as_tensor(match_matrix).view(
len(current_keys), len(loaded_keys))
max_match_size, idxs = match_matrix.max(1)
idxs[max_match_size == 0] = -1
for idx_new, idx_old in enumerate(idxs.tolist()):
if idx_old == -1:
continue
key = current_keys[idx_new]
key_old = loaded_keys[idx_old]
aligned_loaded_state_dict[key] = \
aligned_loaded_state_dict.pop(key_old)
del loaded_state_dict
return aligned_loaded_state_dict | 6f2a40f13b9789218aaff015e442ef17be311f83 | 42,500 |
def rangelist(numbers):
""" Convert a list of integers to a range string in the form
'1,2-5,7'.
"""
numbers = numbers[:]
numbers.sort()
numbers.append(999999)
pattern = ','
for i in range(len(numbers)-1):
if pattern[-1] == ',':
pattern = pattern + str(numbers[i])
if numbers[i] + 1 == numbers[i+1]:
pattern = pattern + '-'
else:
pattern = pattern + ','
elif numbers[i] + 1 != numbers[i+1]:
pattern = pattern + str(numbers[i]) + ','
if pattern[-1] in ',-':
return pattern[1:-1]
return pattern[1:] | e4ab99de85e67ac17ca2f3a0450c988b540cc8e6 | 42,501 |
def estimate_bias_randomized_response_bool(prior, p):
"""estimates the bias of randomized response
when the probability of returning the true answer is `p`,
and the likelihood that each answer is given is held in `priors`.
For example, say you have a prior that your survey question will be answered "yes" 90% of the time.
You run randomized response with p = 0.5.
Then on average, the randomized responses will exhibit a bias of -0.2.
As in, randomized responses will be False 2% more often than in the real data.
:returns the bias of the randomized response"""
assert 0 <= prior <= 1
assert 0 <= p <= 1
expectation = p * prior + (1 - p) / 2
return expectation - prior | e5d1690c0c4cb9888d304794925fdd84287adeff | 42,503 |
import os
def env_get_var_value(var_name):
"""Returns value for variable in os.environ
Function throws AssertionError if variable is defined.
Unit-test based python tests require certain input params
to be set in environment, otherwise they can't be run
"""
assert var_name in os.environ.keys(), "Please supply %s in environment" % var_name
return os.environ[var_name] | 100271f66cdf8d0a3d0701d45d203ac9d02ae116 | 42,504 |
import sys
def get_os():
"""Get OS name.
.. note::
darwin: Mac.
linux: Linux.
Win32: Windows.
Returns:
str: OS name.
Examples:
>>> get_os() #doctest: +ELLIPSIS
'...'
"""
return sys.platform | b6bc09a4ad03201f8dce5a6e9e7fa72d9f49b145 | 42,506 |
import math
def euclidean_distance(point1, point2):
"""Return the euclidean distance between two points."""
return math.sqrt((point2.x - point1.x)**2 + (point2.y - point1.y)**2 + (point2.z - point1.z)**2) | 5ab2d751e24313b3ebe969b7c897164ffac425f9 | 42,507 |
def select_subscription(subs_code, subscriptions):
"""
Return the uwnetid.subscription object with the subs_code.
"""
if subs_code and subscriptions:
for subs in subscriptions:
if (subs.subscription_code == subs_code):
return subs
return None | 95490620b6841e7bf718023b6484096ff112d27b | 42,508 |
def get_object_to_write(result):
"""
Returns the constructed object containing the search results data, for later analysis.
Args:
result: The output from running the query
Returns:
object_to_write: the constructed object containing desired search result data
"""
result_items = []
urls = []
if "items" in result["response"].keys():
se_items = result["response"]["items"]
for item in se_items:
title = item["title"]
link = item["link"]
result_items.append({
"title": title,
"link": link
})
urls.append(link)
request_data = []
requests = result["response"]["queries"]["request"]
for req in requests:
request_data.append({
"request_cx": req["cx"],
"request_count": req["count"],
"total_results": req["totalResults"],
"start_index": req["startIndex"],
"search_terms": req["searchTerms"]
})
object_to_write = {
"segment_id": result["segment_id"],
"query_string": result["query"],
"api_info": {
"api_key": result["api_key"],
"search_engine_id": result["search_engine_id"]
},
"number_of_results_specified": result["number_of_results"],
"response_info": {
"search_info_total_results": result["response"]["searchInformation"]["totalResults"],
"search_time": result["response"]["searchInformation"]["searchTime"],
"url_template": result["response"]["url"]["template"],
"requests": request_data
},
"results": result_items,
"links": urls
}
return object_to_write | 9b3bba46d8eadd8f3df4c21e622aa8b821bf1781 | 42,509 |
import torch
def cam_project(points, K):
"""
:param points: torch.Tensor of shape [b, n, 3]
:param K: torch.Tensor intrinsics matrix of shape [b, 3, 3]
:return: torch.Tensor points projected to 2d using K, shape: [b, n, 2]
"""
b = points.shape[0]
n = points.shape[1]
points_K = torch.matmul(
K.reshape(b, 1, 3, 3).repeat(1, n, 1, 1),
points.reshape(b, n, 3, 1)
) # shape: [b, n, 3, 1]
points_2d = points_K[:, :, :2, 0] / points_K[:, :, [2], 0] # shape: [b, n, 2]
return points_2d | 3fc4af68e156f4bc46fbe106d54f7e577a457cf6 | 42,510 |
import re
def check_string(text, search=re.compile(r'[^A-Za-z0-9-_]').search):
"""Test that a string doesnt contain unwanted characters.
:param text: Text that you want to verify is compliant.
:type text: str
:param search: Regex to use to check the string. Defaults to allowing
[^a-z0-9-_].
:return: bool
"""
return not bool(search(text)) | 59348de4e86bc762cc8a7aef2243e1d2b2ce9f85 | 42,511 |
def get_bool_param(param):
"""Return bool param value."""
if isinstance(param, bool): return param
return True if param.strip().lower() == 'true' else False | ce43ae34e8676d1b9412738adc6647f9af1713c6 | 42,514 |
import sys
def is_venv():
"""Check to see if we are currently in a virtual environment
Returns:
[type] -- [description]
"""
return hasattr(sys, "real_prefix") or (
hasattr(sys, "base_prefix") and sys.base_prefix != sys.prefix
) | c746d2510ee0dd0764f586a692d5c389b815eb61 | 42,517 |
def stdist2(stddev, x):
"""
I don't remember. Related to Cumulative distribution function...
"""
if stddev == 0.0:
return 0.0 # to avoid division by zero
ea = abs(x / stddev / pow(2, 0.5))
if ea < 0.5:
return 0.38
elif ea < 1.5:
return 0.24
elif ea < 2.5:
return 0.06
elif ea < 3.5:
return 0.01
else:
return 0.0 | 91870cd43238cdabf31a10be7ecdb54e01d6befa | 42,519 |
def _get_band_order(root):
"""Returns B0, B1, B2, B3 in some order"""
ee = root.findall('.//Band_Display_Order/*')
return [e.text for e in ee] | ff6837824a66cb3d8ef537df6eed575e1d8231f5 | 42,521 |
import re
def year_from_babyfile_string(babyfile_string):
"""
return year as a string
"""
print('year_from_babyfile_string')
match = re.search(r'Popularity in \w\w\w\w', babyfile_string)
if match:
print('found {}', match.group())
year_int = match.group()[-4:]
year = str(year_int)
else:
year = None
print(year)
return year | f3d8729a2e7d3909acffffd6fdce48a81c9395d5 | 42,522 |
def modify_idx(*args, idx, dim):
"""
Make an index that slices a specified dimension while keeping the slices
for other dimensions the same.
Parameters
----------
*args : tuple of int or None
constructor arguments for the slice object at target axis
idx : tuple of slice
tuple of slices in the original region of interest
dim : int
target axis
Returns
-------
new_idx : tuple of slice
New tuple of slices with dimension dim substituted by slice(*args)
Can be used to index np.ndarray and torch.Tensor
"""
new_idx = list(idx)
new_idx[dim] = slice(*args)
return tuple(new_idx) | 09d0553b6281d1c7e5103b14dfde78003f92a554 | 42,523 |
import argparse
def parse_args():
"""
Returns parsed arguments from command line input.
"""
# Opens up an argument parser.
parser = argparse.ArgumentParser(description="Determines type of capture.")
parser.add_argument("--type", action="store", default="frame",
type=str,
dest="capture_type",
help="Type of capture.")
parser.add_argument("--cameras", action="store", default=1,
type=int,
dest="num_cameras",
help="Number of cameras.")
parser.add_argument("--index", action="store", default=0,
type=int,
dest="camera_index",
help="Index of camera.")
return parser.parse_args() | 31592b48d24106b678dabaa2289edb4a3ab67430 | 42,524 |
def is_wanted_header(header):
"""Return True if the given HTTP header key is wanted.
"""
key, value = header
return key.lower() not in ('x-content-type-warning', 'x-powered-by') | 5b3dddbbfc7279d9d750f88eba2c07b9c87a0e29 | 42,525 |
from pathlib import Path
def _get_all_migrations_from_folder(migration_folder):
"""Simply checks folder for files which ends with up.sql
:param migration_folder: Path to migration folder
:type migration_folder: string
:returns: Path object globbed for a string *.up.sql
:rtype: pathlib.Path
"""
return Path(migration_folder).glob("*.up.sql") | 983c736cd2cf1ec2587ec67f33018c3368740c70 | 42,526 |
def matrix2vec(m, axis='x'):
"""Calculate axis vector from rotation matrix
Parameters
----------
m : numpy.ndarray
rotation matrix
axis : str, optional
axis x, y, z, by default 'x'
Returns
-------
vec : numpy.ndarray
Raises
------
ValueError
axis shoule be x, y, z
"""
if axis == 'x':
vec = m[:, 0]
elif axis == 'y':
vec = m[:, 1]
elif axis == 'z':
vec = m[:, 2]
else:
raise ValueError("Valid axis are 'x', 'y', 'z'")
return vec | 9c6cda577f35158a8756e866a1db9a4d5f851ab4 | 42,527 |
def split_text_by_maxlen(text, maxlen=512):
"""
长句切分为短句,每个短句maxlen个字
:param text: str
:param maxlen: int, 最大长度
:return: list, (sentence, idx)
"""
result = []
for i in range(0, len(text), maxlen):
result.append((text[i:i + maxlen], i))
return result | 6ed3bb269d7c7628e5743f824aaa5a85057ecc1e | 42,528 |
def time_in_range(start, end, x_time):
""" See if a time falls within range """
if start <= end:
return start <= x_time <= end
return end <= x_time <= start | 7250d9fb58360b5923e3a29fbf268f3ced979890 | 42,529 |
import shlex
def split_cli_command_and_shell_commands(command_text):
"""Split the command text into cli commands and pipes
e.g.::
cat ID | grep text
``cat ID`` is a CLI command, which will be matched against the CLI grammar tree
``grep text`` is treated as a SHELL command
Returns a tuple ``(a, b)`` where ``a`` is the CLI command, and ``b`` is a list of shell commands
"""
tokens = shlex.split(command_text)
parts = []
stack = []
for token in tokens:
if token != '|':
stack.append(token)
else:
parts.append(stack)
stack = []
if stack:
parts.append(stack)
if len(parts) == 1:
return ' '.join(parts[0]), []
return ' '.join(parts[0]), parts[1:] | d7219805e8064ce681eb24487e5bbc8ff2236af4 | 42,531 |
def make_category_query(category):
"""Creates a search query for the target audio category"""
# mediatype:(audio) subject:"radio"
return f"mediatype:(audio) subject:{category}" | f30b563bde7323483a7be670d2b41c475622ba20 | 42,532 |
import os
def get_framework_template(framework_name: str) -> str:
"""Reads the framework's template.
Args:
framework_name: String represents the name of the framework.
Returns:
The framework's template.
"""
return open(os.path.join('./src/frameworks', framework_name, 'template.py'), 'r').read() | 96da8f7fda1ad1bee3ed90967ee75a1f00b151e2 | 42,533 |
import os
import re
def get_future_s3_filename(filename, bucket_objects):
"""
Allows us to get the filename to use when we upload the object in S3
e.g if filename='temp.txt' does not exist in bucket_objects, we return temp.txt;
if temp.txt exists but temp-(\d?).txt does not, we return temp-1.txt;
if temp.txt and temp-1.txt exists but no other temp-(\d?).txt files exist,
we return temp-2.txt; we do the same for temp-3.txt, temp-100.txt ...
"""
already_exists = filename in bucket_objects
if already_exists:
name_without_ext, file_extension = os.path.splitext(filename)
reg_exp = rf"{name_without_ext}-(\d+){file_extension}"
matches = re.search(reg_exp, str(bucket_objects))
if matches is None:
return f"{name_without_ext}-1{file_extension}"
else:
next_number = int(matches.group(1)) + 1
return f"{name_without_ext}-{next_number}{file_extension}"
else:
return filename | 52b25f091e28ccac4d3c06011672336aa8c5b2c2 | 42,535 |
def new_position(floor_df, participant_df, mappings_df):
"""
Create position.
"""
# print("Info : my_lib/position/new_position().")
"""
floor_df
--------
ID,X,Y,BLOCK
27,0,0,C
26,1,0,C
25,2,0,C
"""
# print("Info : floor.csv : {}".format(floor_map_csv_file))
# print(flo_df.head(100))
"""
participant.csv
-----------------------
ID,GENRE_CODE
1,Red
2,Red
3,Blue
"""
# print("Info : participant.csv : {}".format(participant_df))
# print(par_df.head(100))
"""
mappings.csv
------------------------
PARTICIPANT,TABLE
45,35
23,27
57,47
"""
# print("Info : Mappings DF: {}".format(mappings_df.shape))
# print("Info : mappings.csv : {}".format(mappings_df))
# print(mappings_df.head(100))
new_df = floor_df.merge(mappings_df, left_on='ID',
right_on='TABLE', how='outer')
# print("Info : Join1.")
# print(new_df.head(100))
"""
new_df
------
ID X Y BLOCK PARTICIPANT TABLE
0 27 0 0 C 1 27
1 26 1 0 C 2 26
2 25 2 0 C 3 25
"""
new_df = new_df.drop("ID", axis=1)
"""
new_df
------
X Y BLOCK PARTICIPANT TABLE
27 0 0 C 1 27
26 1 0 C 2 26
25 2 0 C 3 25
"""
new_df = new_df.merge(participant_df, left_on='PARTICIPANT',
right_on='ID', how='outer')
# print("Info : Join2.")
# print(new_df.head(100))
"""
X Y BLOCK PARTICIPANT TABLE ID GENRE_CODE
0 0 0 C 1 27 1 Red
1 1 0 C 2 26 2 Red
2 2 0 C 3 25 3 Blue
"""
new_df = new_df.drop("ID", axis=1)
"""
new_df
------
X Y BLOCK PARTICIPANT TABLE GENRE_CODE
0 0 0 C 1 27 Red
1 1 0 C 2 26 Red
2 2 0 C 3 25 Blue
"""
# print("Info : Position DF: {}".format(new_df.shape))
return new_df | edf8737f10d6c7c574861800d12b4b84026f2146 | 42,536 |
import os
def getFileNameWithoutExtension(fileName: str) -> str:
"""
获取文件名,不带扩展名
"""
return os.path.splitext(os.path.basename(fileName))[0] | a775395fee8affd5126f7adc03fe66ea4b380107 | 42,537 |
def _exclusions(table_name, ignore_columns):
"""Generate a list of columns to exclude from serialisation for a given table name
Parameters
----------
table_name : str
The name of a data table within the app
ignore_columns : list, tuple, dict or str
A list or tuple of column names to ignore, a dict mapping
table names to such lists or tuples, or a string with a single column name
Returns
-------
list
of column names
"""
if isinstance(ignore_columns, (list, tuple)):
return ignore_columns
elif isinstance(ignore_columns, dict):
return ignore_columns[table_name]
elif isinstance(ignore_columns, str):
return [ignore_columns]
else:
return [] | 5590783c98e3e24317705751965ee48499611064 | 42,539 |
def plugins_string(secrets):
"""Provide a plugins_string property to the template if it exists"""
return secrets.relation.plugins_string | b2137fb7e8be7911c48a4d95cb48d280df3a0dc0 | 42,540 |
def get_multiple_model_method(model):
"""
It returns the name of the Multiple Model Chain element of the model.
Parameters
----------
model :
A Scikit-learn model instance
Returns
-------
The multiple model method for a mining model.
"""
if model.__class__.__name__ == 'GradientBoostingClassifier':
return 'modelChain'
elif model.__class__.__name__ == 'GradientBoostingRegressor':
return 'sum'
elif model.__class__.__name__ == 'RandomForestClassifier':
return 'majorityVote'
elif model.__class__.__name__ in ['RandomForestRegressor','IsolationForest']:
return 'average' | a87525ab9eedfb46443319f771b2275756b7188e | 42,542 |
import pathlib
import os
def realpath(path: pathlib.Path) -> pathlib.Path:
"""Return the canonical path of the specified filename."""
return pathlib.Path(os.path.realpath(path)) | ebe94ecabab3693e5590de94d5a9d5d05bd1b041 | 42,543 |
from datetime import datetime
def parse_date_string(date_string):
"""
Converts the date strings created by the API (e.g. '2012-04-06T19:11:33.032') and
returns an equivalent datetime instance.
"""
return datetime.strptime(date_string, "%Y-%m-%dT%H:%M:%S.%f") | eefa2ba1cec70c2a96c2deb1ece85802360e47f1 | 42,544 |
def create_pi_dict(file_path):
"""Reads Octamers file and creates a dictionary of P and I values for each octamer.
:param file_path (string): Path of the file containing P and I values for all octamers
:return: A dictionary containing a list of PI values for each octamer
"""
pi_dict = dict()
with open(file_path) as file:
# ignore the header section containing bullshit
while True:
current_line = file.readline()
if current_line == "Octamer \tP-index I-index\n":
break
# make dictionary of the P and I values
i = 0
while True:
i += 1
current_line = file.readline()
if current_line == '':
print("Reached the end of the file.")
break
else:
# sometime there is an empty line that can't be split
# so catch and ignore these lines
try:
(key, p_value, i_value) = current_line.split()
pi_dict[key] = [float(p_value), float(i_value)]
except:
print('Warning: Encountered an empty line in the data.')
return pi_dict | 798790a9b8ef5fddbea18ee759a4af7069b656a6 | 42,545 |
def make_plotting_func(function, ratios):
"""Return a lambda function ready for plotting in ternary."""
return lambda inputs: function(ratios)/100 | 223acf81f6e5f810dc4b746c56b904c0b1dc8564 | 42,546 |
def _find_files(metadata):
"""
.. versionadded:: 3001
Looks for all the files in the Azure Blob container cache metadata.
:param metadata: The metadata for the container files.
"""
ret = {}
for container, data in metadata.items():
if container not in ret:
ret[container] = []
# grab the paths from the metadata
file_paths = [k["name"] for k in data]
# filter out the dirs
ret[container] += [k for k in file_paths if not k.endswith("/")]
return ret | 2c410afa58131466ec83675d98a23770680575be | 42,547 |
def normalize_title(title):
"""Normalize titles.
"""
return title.strip().title() | 86a06fa50033e565d34b4c8b93bc67ad048fdb72 | 42,548 |
def make_filter_params(db447x, n_downsample: int = 1, n_0: int = 4096, n_poles: int = 1, f_cutoff: int = 10000,
n_avg: int = 1, factor: int = 1):
"""
Function to turn filter parameters into a dictionary that is later read by the parse_filter function
:param db447x: dataframe of ni447x filter data
:param n_downsample: downsampling factor
:param n_0:
:param n_poles:
:param f_cutoff:
:param n_avg:
:param factor:
:return:
"""
return {
"n_downsample": n_downsample,
"n_0": n_0,
"n_poles": n_poles,
"f_cutoff": f_cutoff,
"n_avg": n_avg,
"factor": factor,
"db447x": db447x
} | ef59ee3c427387947702532f3827e779b3f2cb91 | 42,549 |
def handle_dot(next, token):
"""
/./
"""
def select(result):
return result
return select | 3a8575f515c3336c751cabf13d543ceb2f0ce229 | 42,553 |
def state_in_addrlist(state, l):
"""
Determine whether the instruction addresses of a state are in a list
provided by ...
"""
if not l:
# empty list
return False
for addr in state.block().instruction_addrs:
if addr in l:
return True
return False | d5d52e036c931061e3e6a1298524b9b1e821847e | 42,554 |
def convert_mongo_document_to_api_dict(obj):
"""Converts a mongo document object to the api dict that can be used for json conversion"""
dict_ = obj.to_mongo()
dict_.update({'id': str(dict_['_id'])})
result = dict_.copy() # cannot change a dict while iterating
for k, _ in dict_.iteritems():
if any((k.startswith('_'), k.startswith('password'))):
del result[k]
# next make sure we have all the fields:
missing_fields = set(obj._fields.keys()) - set(result.keys())
for field in missing_fields:
result[field] = None
return result | ec1015b508adead06fc29aff90cdb5a494b0754b | 42,556 |
def last(path):
"""Returns a last "part" of a path.
Examples:
last('abc/def/ghi') => 'ghi'
last('abc') => 'abc'
last('') => ''
"""
if '/' not in path:
return path
return path.split('/')[-1] | af0071fa80ed67c2e427daf9f4202782aeb71997 | 42,559 |
def select_covid(conn, chatId):
"""
"""
cur = conn.cursor()
sql = f"""
SELECT Body
FROM MessageInfo
WHERE ChatID = {chatId} AND Body like "%covid19-live-analytics%" AND Timestamp > 0
"""
cur.execute(sql)
rows = cur.fetchall()
return rows | 0c358b3d6d4ab673742d5b244981a94feaeed604 | 42,560 |
def _get_configs(mod):
"""Gets module variables and creates dictionary"""
if "__all__" in mod.__dict__: # noqa
configs = mod.__dict__["__all__"]
else:
configs = [x for x, v in mod.__dict__.items() if not x.startswith("__") and not callable(v)]
configs = {k: getattr(mod, k) for k in configs}
return configs | 0aaeab86d4aa16d581254f273d816b983506fa07 | 42,561 |
def _aligned_i(i, olen, alignment):
"""
A function that checks if a given foreign word is aligned
to any 'english' word in a given 'english' sentence
"""
# print "i:%s" % (i)
# print "olen:%s" % (olen)
# print "len(alignment):%s" % (len(alignment))
for o in range(olen):
if (i, o) in alignment:
return True
return False | 86bb647db43cca2df0f102dc725bac040dc142de | 42,562 |
import pathlib
import zipfile
def source_input_directory(user_filepath):
"""Examine program arguments to determine the location of an archive.
Extract as needed.
"""
if user_filepath is None:
return "export"
path = pathlib.Path(user_filepath)
if not path.exists():
raise RuntimeError("Specified path {} does not exist".format(user_filepath))
if path.is_dir():
return user_filepath
if path.is_file():
if user_filepath.endswith(".zip"):
extracted_filepath = user_filepath.replace(".zip", "")
with zipfile.ZipFile(user_filepath, 'r') as zip_file:
zip_file.extractall(extracted_filepath)
return extracted_filepath
else:
raise RuntimeError("Specified path {} is a file, but not an archive".format(user_filepath)) | 730319b194cebff9ed15e1ffc90e0991f5531f04 | 42,563 |
def n_target_classes():
"""
Number of target classes (that have >= 1 prototype assigned to them).
"""
return 10 | 3a56a980358488b86524e0d522ceef32ac3c4b66 | 42,564 |
def fuzzy_sort_coarse(image, pano_threshold):
""" A very fuzzy sort by aspect ratio - portrait then square then landscape then pano"""
if image.width > image.height*pano_threshold:
return 2
elif image.width > image.height:
return 1
elif image.width < image.height:
return -1
else:
return 0 | 87249a3fa900131b80e0a7cd86d2b51691eee166 | 42,565 |
def print_para(opt):
"""
Prints parameters of a model
:param opt:
:return:
"""
st = []
for p_name, p in opt.named_parameters():
st.append("({}) {}: {}".format('grad' if p.requires_grad else ' ',
p_name, p.size()))
return '\n'.join(st) | ce4d70015494451f5cf34056350d1334cdbcbfca | 42,566 |
def prettify_error(s):
"""Adds a blank and replaces regular spaces by non-breaking in the first 90 characters
This function adds a big blank space and forces the first words to be a big block of
unbreakable words. This enforces a newline in the DSS display and makes the error prettier.
"""
return '\xa0' * 130 + ' \n' + s[:90].replace(' ', '\xa0') + s[90:] | 5424d45c735b860918685b6190c445603cf7d0d9 | 42,567 |
def increase(_):
"""Return offset adjust 1."""
return 1 | cf83fd360f07980b1a21d63eff6f405dd5a0ee1c | 42,568 |
import re
def onlyalphanum(value):
"""
Filtre `value` pour ne garder que les caractères alphanumériques.
Parameters
----------
value : str
La chaîne à filtrer
Returns
-------
str
La chaîne obtenue après filtrage
"""
return re.sub(r'[^A-Za-z0-9]+', '', value) | ade9b61fbda291a31d00ca2d8cb4975b197a3101 | 42,569 |
def eval_acc(test_fn, all_examples):
"""
Evaluate accuracy on `all_examples`.
"""
acc = 0
n_examples = 0
for x1, mask1, x2, mask2, l, y in all_examples:
acc += test_fn(x1, mask1, x2, mask2, l, y)
n_examples += len(x1)
return acc * 100.0 / n_examples | 62b7413fd39d30b35e144349a82e80a60adfca9e | 42,570 |
import os
def get_path_or_create(path):
"""
returns the path and the flag, if it has been just created
"""
path = os.path.abspath(os.path.expanduser(path))
if not os.path.exists(path):
os.makedirs(path)
return path, True
return path, False | d5a479e7ba2a98b60adf703f27dbc33340bc3b7e | 42,572 |
def create_worker(queue, worker_class):
"""
Creates a non-stop worker to gather tweets
Arguments:
----------
queue: queue.Queue
A queue to gather tasks from
worker_class: class or function returning an object
The returned object must respond to `work(status, query)`
Returns:
--------
worker: function
A function to be used to start a working thread
"""
tweet_worker = worker_class()
def worker():
while True:
args = queue.get(block=True)
tweet_worker.work(*args)
queue.task_done()
return worker | 269569e4c6016169f59d84862630c588c2a93c17 | 42,574 |
def verify_files(dir, addon):
"""Verify if the files in the path can be compressed in a gma"""
verified, disallowed = addon.verify_files()
if verified:
print("No illegal files were found.")
else:
print("Illegal files were found:")
for f in disallowed: print('\t' + f)
print("Please remove these files or add them to the ignore list of your addon.")
return verified, disallowed | 2f39850ae02d2d6b7d5dbae6990037113d7b9aa4 | 42,575 |
def layer_info(layer, x, y, crs, params, identity):
"""Query layer and return info result as dict:
{
'features': [
{
'id': <feature ID>, # optional
'attributes': [
{
'name': '<attribute name>',
'value': '<attribute value>'
}
],
'bbox': [<minx>, <miny>, <maxx>, <maxy>], # optional
'geometry': '<WKT geometry>' # optional
}
]
}
:param str layer: Layer name
:param float x: X coordinate of query
:param float y: Y coordinate of query
:param str crs: CRS of query coordinates
:param obj params: FeatureInfo service params
{
'i': <X ordinate of query point on map, in pixels>,
'j': <Y ordinate of query point on map, in pixels>,
'height': <Height of map output, in pixels>,
'width': <Width of map output, in pixels>,
'bbox': '<Bounding box for map extent as minx,miny,maxx,maxy>',
'crs': '<CRS for map extent>',
'feature_count': <Max feature count>,
'with_geometry': <Whether to return geometries in response
(default=1)>,
'with_maptip': <Whether to return maptip in response
(default=1)>,
'FI_POINT_TOLERANCE': <Tolerance for picking points, in pixels
(default=16)>,
'FI_LINE_TOLERANCE': <Tolerance for picking lines, in pixels
(default=8)>,
'FI_POLYGON_TOLERANCE': <Tolerance for picking polygons, in pixels
(default=4)>,
'resolution': <Resolution in map units per pixel>
}
:param str identity: User name or Identity dict
"""
features = []
feature_id = 123
attributes = [
{
'name': 'title',
'value': 'Feature for Layer %s' % layer
},
{
'name': 'name',
'value': 'Feature Name'
}
]
px = round(x)
py = round(y)
bbox = [px - 50, py - 50, px + 50, py + 50]
geometry = "POINT(%s %s)" % (px, py)
features.append({
'id': feature_id,
'attributes': attributes,
'bbox': bbox,
'geometry': geometry
})
return {
'features': features
} | 0ba311d8022de7dbc6a7bd8b505dbac67905fc64 | 42,576 |
def _is_yearbook_transaction(transaction_detail):
"""Returns True iff the paypal `transaction_detail` object contains an `item_name` of Yearbook.
"""
cart_info = transaction_detail.get('cart_info')
if cart_info:
item_details = cart_info.get('item_details')
if item_details and len(item_details) == 1:
item = item_details[0]
item_name = item.get('item_name', '').lower()
return item_name and item_name.find('yearbook') != -1
transaction_info = transaction_detail.get('transaction_info')
if transaction_info:
invoice_id = transaction_info.get('invoice_id', '').lower()
return invoice_id and invoice_id.find('yearbook-invoice') != -1
return False | 0540efb4d5d2fedc97ac5fe14ea29577d0adb358 | 42,577 |
def get_string_after_n_space(text, n):
"""
Method to return string after the nth space
Input --> 'test1 test2 test3', 1
Output --> test2 test3
Input --> 'test1 test2 test3, 2
Output --> test3
:param text
:param n:
:return: string after nth space
"""
return text.split(' ', n)[-1].strip() | 99abae9dc2f24d1b26999d6e401b7fc56ec584ad | 42,579 |
def hovertext_eth_type(eth_type):
"""
Passed an eth_type (decimal, not enumerated) and
return it wrapped in extra text to convey context
"""
return "Ethernet Type: " + str(eth_type) + " (decimal)" | b99e79cb1adca0aab7daba5f59e3f4c8fda1dc68 | 42,581 |
def loadMushroomDataSet(path):
"""
return the dataset (list) of mushroom from path file
:param path: path of mushroom dataset
:return: mushroom dataset
"""
mushroomDataset = None
try :
mushroomDataset = [line.split() for line in open(path).readlines()]
except Exception as e:
print(e)
finally:
return mushroomDataset | 48119645cb8dc67f68f0fe56eb41040fe5e414b1 | 42,583 |
import os
import configparser
def read_config(section, key=None):
"""Read configure file.
Read configure file to get configure information.
:param: section: the section of ini file
:param: key in section of ini file
:return: configure information
"""
root_path = os.path.abspath(os.path.dirname(__file__))
config = configparser.ConfigParser()
config.read(root_path + "/../config/config.ini")
if key is None:
items = config.items(section)
models = []
for item in items:
models.append(item[1])
return models
else:
value = config.get(section, key)
return value | 40b256321578a8174345d60aed79bfbfc46542ff | 42,584 |
def formatSearchQueryForCostco(query):
"""
The formatSearchQueryForCostco function formats the search string into a string that
can be sent as a url paramenter.
"""
queryStrings = query.split(' ')
formattedQuery = ""
for str in queryStrings:
formattedQuery += str
formattedQuery += '+'
formattedQuery = formattedQuery[:-1]
return formattedQuery | 30818a20c304192fcba70e9161772744799e4b5b | 42,585 |
def get_bits(register, index, length=1):
"""
Get selected bit(s) from register while masking out the rest.
Returns as boolean if length==1
:param register: Register value
:type register: int
:param index: Start index (from right)
:type index: int
:param length: Number of bits (default 1)
:type length: int
:return: Selected bit(s)
:rtype: Union[int, bool]
"""
result = (register >> index) & (2 ** length - 1)
if length == 1:
return result == 1
return result | a407074b60cb7598341ab335137f595cc9e811e4 | 42,587 |
def get_last_chunk(buffer, info):
"""A sampler function to extract only the last chunk of the memory.
"""
i_trans = info["num_records"]
# Get the last chunk, reshaped (mix the first two dimensions)
chunk = {}
for key in buffer:
chunk[key] = buffer[key][:,i_trans-info["num_steps"]-1:i_trans]
return chunk | 987f1695191c7c9826e867c8c88aff6aa46f72c0 | 42,589 |
def _fix_url(url: str) -> str:
""" Add 'https://' to the start of a URL if necessary """
corrected_url = url if url.startswith("http") else "https://" + url
return corrected_url | ad26693c9cf2958e31155f6666e5bffad9c13628 | 42,590 |
import calendar
def first_day(year, month, bday=True):
"""
Return first day of month. Default to business days
"""
weekday, days_in_month = calendar.monthrange(year, month)
if not bday:
return 1
if weekday <= 4:
return 1
else:
return 7-weekday+1 | f68ba78fdfed08c2bd25165932a857a51de353c4 | 42,591 |
def wiki_title(name):
"""Like title(), except it never lowercases a letter."""
return ''.join(min(x,y) for x,y in zip(name, name.title())) | 073e2466bc21c8bd838561a5a798357386fec1f8 | 42,592 |
def extract_preference(prefer):
"""Extracts the parameters from a Prefer header's value
>>> extract_preferences('return=representation;include="http://www.w3.org/ns/ldp#PreferMinimalContainer http://www.w3.org/ns/oa#PreferContainedIRIs"')
{"return": "representation", "include": ["http://www.w3.org/ns/ldp#PreferMinimalContainer", "http://www.w3.org/ns/oa#PreferContainedIRIs"]}
"""
obj = {}
if prefer:
params = prefer.split(';')
for p in params:
key, value = p.split('=')
obj[key] = value.strip('"').split(' ')
return obj | 21e226ca89e76cb5b508f6ac60d24c4caa4d051b | 42,594 |
def put_in_bucket(d, l, value):
"""
If this helper function is called on every gene in our analysis, it will group all
genes with the same z-score into the same list. Each of those lists of same-z-score
genes is stored in l.
Args:
d: Dict, in the context of this file, a dict of genes to z-scores
l: List of Lists, represents out buckets. Initiate with an empty list, the continually pass in
l to build up your buckets
value: String, the current gene we would like to put into a bucket
Returns: the list now containing value in its proper bucket within l.
"""
# dummy list to prevent ['string'] -> ['s','t','r','i','n','g']
dummy = []
# if list is empty, init it
if len(l) == 0:
dummy.append(value)
return dummy
else:
# else search to see if the value fits into an existing bucket
for i in range(len(l)):
# along the way, make sure this is a list of lists
if type(l[i]) != list:
dummy.append(l[i])
l[i] = dummy
dummy = []
# aka find a bucket with same z-score as value's
if d[l[i][0]] == d[value]:
l[i].append(value)
return l
# if our value (gene) doesn't have a bucket to go in, make a new one at the end of the list
dummy.append(value)
l.append(dummy)
return l | 475a6edf846bc56f9b5ecc15a6c742f1a6ed377c | 42,595 |
def existed(fileobj):
"""
Returns a boolean indicating whether a file opened by openReadWrite existed
in the filesystem before it was opened.
"""
return 'r' in getattr(fileobj, "mode", '') | 620e7271c9d6abc7851f0ded16413b02e73b82b6 | 42,596 |
def extractDEPDataFromSoup( soup ):
"""
Takes in data structure from BeautifulSoup and parses for DEP Boiler Data.
We assume that the soup has been prescreened to ensure that data exist.
"""
tables = soup.find_all( "table" )
#get premise address, boro name, BIN, block #, and lot #
#This part has the following format:
#'\n\n\n\r\n PREMISES: [address] \xa0\xa0[boro name]\r\n...
#\xa0\xa0 BIN: [BIN, last 6]\xa0\xa0BLOCK:\r\n [block #]...
#\xa0\xa0LOT: [lot #]\r\n \n\n\n'
locationData = tables[ 1 ].get_text()
locationData = locationData.replace( '\n', '' )#removes '\n's
locationData = locationData.replace( '\r' , '' )#removes '\r's
locDataSplit = locationData.split( ": " )
locDataSplit2 = locDataSplit[ 1 ].split( "\xa0" )
appAddress = locDataSplit2[ 0 ][ 0:-1 ]
appBoro = locDataSplit2[2].partition( ' ')[0]
#check for case where BIN, Block, Lot are missing
appBIN = "NA"
appBlock = "NA"
appLot = "NA"
try:
appBIN = int( locDataSplit[2].partition( '\xa0' )[ 0 ] )
except:
pass
try:
appBlock = int( locDataSplit[3].partition( '\xa0' )[ 0 ] )
except:
pass
try:
appLot = int( locDataSplit[4].partition( '\xa0' )[ 0 ] )
except:
pass
allLocationData = [ appAddress, appBoro, appBIN, appBlock, appLot ]
#get DEP Application Data
applicationData = tables[2].find_all("td") #Grab individual table entries.
allDEPData = []
for i in applicationData:
txt = i.get_text() # Get the text,
if ':' in txt:
allDEPData.append(txt
.replace('\r', '') # then remove the '\r's,
.replace('\n', '') # then remove the '\n's,
.partition(':')[2] # then remove everything before ":",
.strip() # strip it
)
return allLocationData + allDEPData | 321b5b8dc00a4ac9773ba73ac171845f51ff4917 | 42,597 |
def choose_attribute(data, attributes, class_attr, fitness, method):
"""
Cycles through all the attributes and returns the attribute with the
highest information gain (or lowest entropy).
"""
best = (-1e999999, None)
for attr in attributes:
if attr == class_attr:
continue
gain = fitness(data, attr, class_attr, method=method)
best = max(best, (gain, attr))
return best[1] | b99a9fe29accc199314ac9f9852f911b9e28eeb5 | 42,598 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.