content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def svn_wc_adm_probe_retrieve(*args):
"""svn_wc_adm_probe_retrieve(svn_wc_adm_access_t associated, char path, apr_pool_t pool) -> svn_error_t"""
return _wc.svn_wc_adm_probe_retrieve(*args)
|
2716094e31c596212b5ccd7833b9ae10ef52d44e
| 3,641,300
|
def flights_preclean(df):
"""
Input: Raw dataframe of Flights table.
Output: Cleaned flights table:
- Remove cancelled rows, made available in new dataframe "df_can"
- Drop columns ['Unnamed: 0', 'branded_code_share',
'mkt_carrier', 'cancelled', 'cancellation_code', 'flights', 'air_time',
'first_dep_time', 'total_add_gtime', 'longest_add_gtime', 'no_name']
- Fill null values in delay columns
- Drop remaining null values
"""
global df_can
df_can = df[df.cancelled == 1].copy()
print("Removed cancelled flights - now available in dataframe 'df_can'")
df = df[df.cancelled == 0]
df = df.drop(columns=['Unnamed: 0', 'branded_code_share',
'mkt_carrier', 'cancelled', 'cancellation_code', 'flights', 'air_time',
'first_dep_time', 'total_add_gtime', 'longest_add_gtime', 'no_name'])
for col in ['carrier_delay', 'weather_delay', 'nas_delay', 'security_delay', 'late_aircraft_delay']:
df[col] = df[col].fillna(value=0)
df = df.dropna()
return df
|
61dcfa6afd6ec7dd0abb5525187938d6ab978996
| 3,641,301
|
def convert_spectral_kernel_quint(sequences, list_seq_to_id):
""" Return a list seq of nb of time the seq in list_seq_to_id appear in sequence"""
final = []
for j in range(len(sequences)):
sequence = sequences[j]
dico_appear = {seq: 0 for seq in list_seq_to_id}
for i in range(len(sequence) - 4):
seq_to_add = sequence[i] + sequence[i+1] + sequence[i+2] + sequence[i+3] + sequence[i+4]
dico_appear[seq_to_add] += 1
final.append([dico_appear[k] for k in list_seq_to_id])
return final
|
49f727dd26822834bad2c9a448136288dc1c426c
| 3,641,302
|
def grad_of_marginal_fit(c, h, tau, epsilon):
"""Computes grad of terms linked to marginals in objective.
Computes gradient w.r.t. f ( or g) of terms in
https://arxiv.org/pdf/1910.12958.pdf, left-hand-side of Eq. 15
(terms involving phi_star)
Args:
c: jnp.ndarray, first target marginal (either a or b in practice)
h: jnp.ndarray, potential (either f or g in practice)
tau: float, strength (in ]0,1]) of regularizer w.r.t. marginal
epsilon: regularization
Returns:
a vector of the same size as c or h
"""
if tau == 1.0:
return c
else:
rho = epsilon * tau / (1 - tau)
return jnp.where(c > 0, c * derivative_phi_star(-h, rho), 0.0)
|
38b6b57766c97f8eda72162b6919e48c235cd880
| 3,641,303
|
def SuggestField(**kwargs):
"""
Query 'foo' to get the TextField, or 'foo.raw' to get the KeywordField, or 'foo.suggest' to get the CompletionField.
"""
return fields.TextField(
fields={
'raw': fields.KeywordField(),
'suggest': fields.CompletionField(),
},
**kwargs
)
|
57f673bbc310a22432178ee078c8f5eec2355e12
| 3,641,304
|
import math
def distance_on_unit_sphere(FoLat, FoLng, ToLat, ToLng):
""" Convert latitude and longitude to spherical coordinates in radians."""
phi1 = math.radians(90.0 - FoLat)
phi2 = math.radians(90.0 - ToLat)
theta1 = math.radians(FoLng)
theta2 = math.radians(ToLng)
"""Compute spherical distance from spherical coordinates.
For two locations in spherical coordinates
(1, theta, phi) and (1, theta', phi')
cosine( arc length ) =
sin phi sin phi' cos(theta-theta') + cos phi cos phi'
distance = rho * arc length"""
cos = (math.sin(phi1) * math.sin(phi2) * math.cos(theta1 - theta2) + math.cos(phi1) * math.cos(phi2))
arc = math.acos(cos)
"""Remember to multiply arc by the radius of the earth in your favorite set of units to get length."""
return arc
|
98c9294697e36c5b45cd165ba96529187f2750de
| 3,641,305
|
import pandas # noqa
def check_pandas_support(caller_name):
"""Raise ImportError with detailed error message if pandsa is not
installed.
Plot utilities like :func:`fetch_openml` should lazily import
pandas and call this helper before any computation.
Parameters
----------
caller_name : str
The name of the caller that requires pandas.
"""
try:
return pandas
except ImportError as e:
raise ImportError(
"{} requires pandas.".format(caller_name)
) from e
|
f3d484bb3a5dbca43a81cca83b7343e1fcd7cbcf
| 3,641,306
|
import argparse
from datetime import datetime
def parse_cmdline():
"""Parse the command line arguments.
Returns:
argparse.Namespace. The parsed arguments or defaults.
"""
parser = argparse.ArgumentParser(
description="Dataplane Automated Testing System, version " + __version__,
epilog='Note: parameters on the command line will override values set in the configuration file.')
parser.add_argument('-d', '--tests-directory',
default='./tests', metavar='DIRECTORY', dest='tests_dir',
help='Directory containing test scripts, ./tests/ by default')
parser.add_argument('-f', '--config',
default='./dats.cfg',
help='Configuration file name, ./dats.cfg by default')
parser.add_argument('-l', '--list',
action='store_true',
help='List valid names of test cases and exit')
parser.add_argument('-r', '--report',
default=datetime.now().strftime('dats-report-%Y%m%d_%H%M%S/'),
metavar='DIRECTORY', dest='report_dir',
help='Where to save the report. A new directory with timestamp in its name is created by default.')
parser.add_argument('-v', '--verbose', action='store_true',
help='Verbose output - set log level of screen to VERBOSE instead of INFO')
parser.add_argument(
'test', nargs='*',
help='List of test names to execute. All tests are executed by default.')
args = parser.parse_args()
return args
|
a9c95d12fe3a30a3fb130310ea0b9cb57fb20e0b
| 3,641,307
|
def summarize_sequence(summary_type, hidden, d_model, n_head, d_head, dropout,
dropatt, input_mask, is_training, initializer,
scope=None, reuse=None):
"""Summarize hidden sequence into a vector."""
tf.logging.info("===== Sequence summary =====")
tf.logging.info(" - input_mask %s", input_mask)
tf.logging.info(" - summary_type %s", summary_type)
tf.logging.info("============================")
with tf.variable_scope(scope, "sequnece_summary", reuse=reuse):
if summary_type == "last":
summary = hidden[:, -1]
elif summary_type == "first":
summary = hidden[:, 0]
elif summary_type == "max":
if input_mask is None:
summary = tf.reduce_max(hidden, axis=1)
else:
neg_pad = -1e10 * input_mask[:, :, None]
summary = tf.reduce_max(hidden + neg_pad, axis=1)
elif summary_type == "mean":
if input_mask is None:
summary = tf.reduce_mean(hidden, axis=1)
else:
inp_mask = (1. - input_mask)[:, :, None]
summary = (tf.reduce_sum(hidden * inp_mask, axis=1) /
(1e-6 + tf.reduce_sum(inp_mask, axis=1)))
elif summary_type == "attn":
bsz = tf.shape(hidden)[1]
summary_bias = tf.get_variable("summary_bias", [d_model],
dtype=hidden.dtype,
initializer=initializer)
summary_bias = tf.tile(summary_bias[None, None], [bsz, 1, 1])
if input_mask is not None:
# [B X T] -> [B x N x F x T]
input_mask = input_mask[:, None, None, :]
summary, _ = multihead_attn(summary_bias, hidden, hidden, input_mask,
d_model, n_head, d_head, dropout, dropatt,
is_training, initializer, residual=False)
summary = summary[:, 0]
else:
raise ValueError("Unsupported summary type {}".format(summary_type))
# use another projection with `tanh` activation
summary = tf.layers.dense(
summary,
d_model,
activation=tf.tanh,
use_bias=True,
kernel_initializer=initializer,
name="summary")
return summary
|
50dd0e72c15adfa522847cb822e897c9892cd1cf
| 3,641,308
|
def encode_line(line, vocab):
"""Given a string and a vocab dict, encodes the given string"""
line = line.strip()
sequence = [vocab.get(char, vocab['<UNK>']) for char in line]
sequence_length = len(sequence)
return sequence, sequence_length
|
feb14d86dd6c219d57cffc4cd9d90d16c4e9c987
| 3,641,309
|
import math
def get_like_from_mats(ky_mat, l_mat, alpha, name):
""" compute the likelihood from the covariance matrix
:param ky_mat: the covariance matrix
:return: float, likelihood
"""
# catch linear algebra errors
labels = _global_training_labels[name]
# calculate likelihood
like = (-0.5 * np.matmul(labels, alpha) -
np.sum(np.log(np.diagonal(l_mat))) -
math.log(2 * np.pi) * ky_mat.shape[1] / 2)
return like
|
8fb7842547ecee25425bdaf920ff69d3386b920b
| 3,641,310
|
import os
import json
def cached(*cache_args, **cache_kwargs):
"""General-purpose.
Allows custom filename, with function fallback.
Load/save cached function data. Also handle data types gracefully.
Example:
>>> cached('myfile.json', directory=SOME_DIR)
>>> def my_thing():
>>> return {'foo': 'bar'}
"""
def outer(func, *args, **kwargs):
folder = cache_kwargs.get('directory')
if len(cache_args) > 0:
name = cache_args[0]
else:
# Guess a resonable name.
name = func.__name__.replace('_', '-') + '.json'
def _inner(*args, **kwargs):
try:
# Allow users to specify non-existant subfolders
# but fail gracefully.
os.makedirs(folder)
except Exception:
pass
path = '{}/{}'.format(folder, name) if folder is not None else name
try:
with open(path, 'r') as _cached:
if '.json' in name:
return json.loads(_cached.read())
else:
return _cached.read()
except ValueError as exc:
if 'No JSON object could be decoded' in str(exc):
return func(*args, **kwargs)
except IOError:
res = func(*args, **kwargs)
if res is None:
return
with open(path, 'w') as _cached:
if '.json' in name:
try:
to_write = json.dumps(res, indent=4)
# The json was invalid, skip this.
except TypeError:
return res
else:
to_write = res
_cached.write(to_write)
_cached.write('\n')
return res
return _inner
return outer
|
c8f85b8f4ba80cbc991622c220e59bce80683863
| 3,641,311
|
def engulfing(data: pd.DataFrame):
"""
engulfing
Positive numbers are multi-side, negative numbers are short-side
0 is abnormal, meaning that the ratio of the absolute value of the current Candle up or down to the previous one is more than 10 times.
For machine learning convenience, a floating point number should be returned to indicate the strength of the engulfing,
in preparation for the final Machine Learning normalization.
"""
def cal(ser):
result = 0
if ser.raise_0 > 0 >= ser.raise_1 and ser.open <= ser.close_1 and ser.close >= ser.open_1:
# Current candle is going up๏ผlong
rr = abs(ser.raise_0) / abs(ser.raise_1) if 0 > ser.raise_1 else ser.raise_0/ser.avg_5_change_abs
result = rr if rr > 1 else 0
elif ser.raise_0 < 0 < ser.raise_1 and ser.open >= ser.close_1 and ser.close <= ser.open_1:
# Current candle is going down, short
rr = abs(ser.raise_0) / abs(ser.raise_1) if 0 < ser.raise_1 else ser.raise_0/ser.avg_5_change_abs
result = -rr if rr > 1 else 0
return result
data_copy = data.copy()
data_copy["raise_0"] = data_copy["close"] - data_copy["open"]
data_copy["raise_1"] = data_copy["raise_0"].shift(1)
data_copy["open_1"] = data_copy["open"].shift(1)
data_copy["close_1"] = data_copy["close"].shift(1)
# get recent 5 average price change, in order to calculate if prev day price is zero change, we still won't miss it
data_copy["avg_5_change_abs"] = data_copy.raise_0.rolling(window=5).apply(lambda ser: ser.abs().mean())
data_copy["engulfing"] = data_copy[["raise_0", "raise_1", "open", "open_1", "close", "close_1", "avg_5_change_abs"]].apply(cal, axis=1)
# print(data_copy.query("raise_1==0").tail(20))
data["engulfing"] = data_copy["engulfing"]
|
2974a62afa6b77ae2d8d02b35d7293362dd90927
| 3,641,312
|
import os
def create_dat_files(ipc_tests, test_results, dst_dir):
"""
Creates .dat files for all the test_results.
@param ipc_tests: list of ipc tests which was used to created the data.
@param test_results: dictionary containing all test results.
@param dst_dir: directory where the .dat-files shall be stored.
@return: a list of paths to all dat files which where created.
@rtype: list of strings. e.g. ["path1", "path2", ... ]
"""
dat_files = {}
for ipc_method in ipc_tests:
filename = f"{ipc_method}.dat"
filename = os.path.join(dst_dir, filename)
try:
with open(filename, 'w') as out_file:
line = "# message_size (in Bytes) throughput (in Mbit/s)\n"
out_file.write(line)
for message_size in sorted(test_results.keys()):
throughput = None
try:
throughput = test_results[message_size][ipc_method]["throughput"]
except KeyError:
throughput = "-"
line = f"{message_size}\t{throughput}\n"
out_file.write(line)
dat_files[ipc_method] = ipc_method + ".dat"
except IOError as ex:
raise IpcBenchError() from ex
return dat_files
|
363a37046b9ad8c495bed2d92c3a6b2d5adcc505
| 3,641,313
|
def filter_matches(kp1, kp2, matches, ratio = 0.75):
"""
This function applies a ratio test
:param kp1: raw keypoint 1
:param kp2: raw keypoint 2
:param matches: raw matches
:param ratio: filtering ratio
:return: filtered keypoint 1, filtered keypoint 2, keypoint pairs
"""
mkp1, mkp2 = [], []
for m in matches:
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
m = m[0]
mkp1.append( kp1[m.queryIdx] ) # keypoint with Index of the descriptor in query descriptors
mkp2.append( kp2[m.trainIdx] ) # keypoint with Index of the descriptor in train descriptors
p1 = np.float32([kp.pt for kp in mkp1])
p2 = np.float32([kp.pt for kp in mkp2])
kp_pairs = list(zip(mkp1, mkp2))
return p1, p2, kp_pairs
|
a54d96e092019b9629852b1bf57511f9994aba46
| 3,641,314
|
import itertools
def add_derived_columns(
data: pd.DataFrame,
differences: bool = True,
second_differences: bool = True,
multiplications: bool = True,
rolling_means: int | None = 10,
rolling_stds: int | None = 10,
mean_distances: bool = True,
) -> pd.DataFrame:
"""This will create many columns that can be valuable for making predictions like difference, or rolling mean or
distance from average. Computed columns will be appened to original data. It will process all the columns,
so a lot of redundant data will be created. It is necessary do some feature extraction afterwards to remove
noncorrelated columns.
Args:
data (pd.DataFrame): Data that we want to extract more information from.
differences (bool, optional): Compute difference between n and n-1 sample. Defaults to True.
second_differences (bool, optional): Compute second difference. Defaults to True.
multiplications (bool, optional): Column multiplicated with other column. Defaults to True.
rolling_means (int | None, None), optional): Rolling mean with defined window. Defaults to 10.
rolling_stds (int | None, optional): Rolling std with defined window. Defaults to 10.
mean_distances (bool, optional): Distance from average. Defaults to True.
Returns:
pd.DataFrame: Data with more columns, that can have more informations,
than original data. Number of rows can be little bit smaller. Data has the same type as input.
Example:
>>> import mydatapreprocessing as mdp
>>> data = pd.DataFrame(
... [mdp.generate_data.sin(n=100), mdp.generate_data.ramp(n=100)]
... ).T
...
>>> extended = add_derived_columns(data, differences=True, rolling_means=32)
"""
results = [data]
if differences:
results.append(
pd.DataFrame(np.diff(data.values, axis=0), columns=[f"{i} - Difference" for i in data.columns],)
)
if second_differences:
results.append(
pd.DataFrame(
np.diff(data.values, axis=0, n=2), columns=[f"{i} - Second difference" for i in data.columns],
)
)
if multiplications:
combinations = list(itertools.combinations(data.columns, 2))
combinations_names = [f"Multiplicated {i}" for i in combinations]
multiplicated = np.zeros((len(data), len(combinations)))
for i, j in enumerate(combinations):
multiplicated[:, i] = data[j[0]] * data[j[1]]
results.append(pd.DataFrame(multiplicated, columns=combinations_names))
if rolling_means:
results.append(
pd.DataFrame(
np.mean(rolling_windows(data.values.T, rolling_means), axis=2).T,
columns=[f"{i} - Rolling mean" for i in data.columns],
)
)
if rolling_stds:
results.append(
pd.DataFrame(
np.std(rolling_windows(data.values.T, rolling_stds), axis=2).T,
columns=[f"{i} - Rolling std" for i in data.columns],
)
)
if mean_distances:
mean_distanced = np.zeros(data.T.shape)
for i in range(data.shape[1]):
mean_distanced[i] = data.values.T[i] - data.values.T[i].mean()
results.append(pd.DataFrame(mean_distanced.T, columns=[f"{i} - Mean distance" for i in data.columns]))
min_length = min(len(i) for i in results)
return pd.concat([i.iloc[-min_length:].reset_index(drop=True) for i in results], axis=1)
|
080f3853f67ead678c55a3c95bf2a1c19614452c
| 3,641,315
|
from typing import List
def parse_query(
query: List[str],
format,
use_youtube,
generate_m3u,
lyrics_provider,
threads,
path_template,
) -> List[SongObject]:
"""
Parse query and return list containing song object
"""
songs_list = []
# Iterate over all search queries and add them to songs_list
for request in query:
if request.endswith(".spotdlTrackingFile"):
continue
songs_list.extend(
parse_request(
request,
format,
use_youtube,
generate_m3u,
lyrics_provider,
threads,
path_template,
)
)
# linefeed to visually separate output for each query
print()
# remove duplicates
seen_songs = set()
songs = []
for song in songs_list:
if song.file_name not in seen_songs:
songs.append(song)
seen_songs.add(song.file_name)
return songs
|
a58e5bd6acf2c12de7eb7fae7824aab924188c26
| 3,641,316
|
import os
import shutil
import time
def start_db_server():
"""Spin up a test database server"""
log.debug('start_db_server()')
try:
os.mkdir(DATA_DIR)
except FileExistsError:
shutil.rmtree(DATA_DIR)
sp.check_call(['initdb', '-D', DATA_DIR])
db_proc = sp.Popen(['postgres', '-D', DATA_DIR])
time.sleep(0.5) # Ensure the db has time to start
return db_proc
|
94bddfad0e926dd330672d55598686ae7c2fbb9f
| 3,641,317
|
def assert_address_book(address_book):
"""Fixture returning an object providing a custom address book asserts."""
return icemac.addressbook.testing.AddressBookAssertions(address_book)
|
fd9197472c86a59dd1c52dad14febe1a0b318c85
| 3,641,318
|
def make_shell_context():
"""Open shell."""
db = get_db()
return {"db": db, "Doi": Doi, "Url": Url, "FBRequest": FBRequest}
|
996988c06aa8039c7689126360ce0fea886ab392
| 3,641,319
|
import re
def get_version():
"""
Extracts the version number from the version.py file.
"""
VERSION_FILE = 'fleming/version.py'
mo = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', open(VERSION_FILE, 'rt').read(), re.M)
if mo:
return mo.group(1)
else:
raise RuntimeError('Unable to find version string in {0}.'.format(VERSION_FILE))
|
b50df998254d83bd48d7a5c0863ba89b29ab529b
| 3,641,320
|
import os
def get_cache_path():
""" Return a path suitable to store cached files """
try:
os.mkdir(cache_path)
except FileExistsError:
pass
return cache_path
|
2370a3433d3a29603b625a6b5ea8b6afe23d024d
| 3,641,321
|
import math
def distance(s1, s2):
""" Euclidean distance between two sequences. Supports different lengths.
If the two series differ in length, compare the last element of the shortest series
to the remaining elements in the longer series. This is compatible with Euclidean
distance being used as an upper bound for DTW.
:param s1: Sequence of numbers
:param s2: Sequence of numbers
:return: Euclidean distance
"""
n = min(len(s1), len(s2))
ub = 0
for v1, v2 in zip(s1, s2):
ub += (v1 - v2)**2
# If the two series differ in length, compare the last element of the shortest series
# to the remaining elements in the longer series
if len(s1) > len(s2):
v2 = s2[n - 1]
for v1 in s1[n:]:
ub += (v1 - v2)**2
elif len(s1) < len(s2):
v1 = s1[n-1]
for v2 in s2[n:]:
ub += (v1 - v2)**2
return math.sqrt(ub)
|
61c308da89b98b4bbde1bba690c86559fd5e1400
| 3,641,322
|
def remove_test_set_gender_and_age(nodes):
"""Remove the gender feature from a subset of the nodes for estimation"""
# todo: the 40k random can be adjusted if youre working with a subset
test_profiles = np.random.choice(nodes["user_id"].unique(), 40000, replace=False)
nodes["TRAIN_TEST"] = "TRAIN"
test_condition = nodes["user_id"].isin(test_profiles)
nodes.loc[test_condition, ["AGE", "gender"]] = np.nan
nodes.loc[test_condition, ["TRAIN_TEST"]] = "TEST"
return nodes
|
285ee3f99b49e52af52f5a03465021154c41c11b
| 3,641,323
|
import re
def valid_attribute(attr_filter_key, attr_filter_val, hit):
"""Validates the hit according to a filter attribute."""
if (attr_filter_key != "None") and (attr_filter_val != "None"):
try:
# If key for filtering is not correct or doesn't exist-> error
# should be ignored
hit_attrib_val = re.split(
"; " + attr_filter_key + " ", hit[8])[1].split(';')[0].strip('"\'').rstrip('\"')
except IndexError: # if key doesn't exist re.split will give error
hit_attrib_val = "not.found"
# If biotype of hit == attr_value from query-> continue annotation
return attr_filter_val == hit_attrib_val
else:
return True
|
c7480008f24e011f0803d82f1243a5d00c5a4030
| 3,641,324
|
import calendar
def dek_to_greg(dek_day: int, dek_month: int, dek_year: int) -> tuple:
"""Returns a Gregorian date from a Dekatrian date.
Args:
dek_day (int): Day of the month.
dek_month (int): Month of the year.
dek_year (int): Year.
Return:
tuple: A tuple with the day, month and year.
"""
year_day = year_day_on_greg_date(dek_day, dek_month, dek_year)
JAN = 31
FEB = 28 + calendar.isleap(dek_year)
MAR = 31
APR = 30
MAY = 31
JUN = 30
JUL = 31
AUG = 31
SEP = 30
OCT = 31
NOV = 30
DEC = 31
gregorian_calendar_months = (
JAN,
FEB,
MAR,
APR,
MAY,
JUN,
JUL,
AUG,
SEP,
OCT,
NOV,
DEC,
) # TODO: MUDAR PRA DICIONARIO
for month, days in enumerate(gregorian_calendar_months, start=1):
if year_day > days:
year_day -= days
else:
break
return (year_day, month, dek_year)
|
c190ff52fa104c4c522397eb46a82e263096dc84
| 3,641,325
|
def encrypt_message(kx, ky, message):
"""
Encrypts a message using ECC and AES-256
First generates a random AES key and IV with os.urandom()
Then encrypts the original message with that key
Then encrypts the AES key with the ECC key
NOTE:
This means that plaintext will not have the same ciphertext
when encrypted twice. Keep this in mind if you require reproducibility behavior
:param kx: Public key kx (int)
:param ky: Public key ky (int)
:param message: Message (bytes)
:return: Tuple (encrypted key (list of ints), encrypted IV (list of ints),
and encrypted message (bytes))
"""
ecies = ecc.ECEIS(ecc.CURVE)
r, s = ecies.exchange(ecc.ECPublicKey(ecc.AffineCurvePoint(kx, ky, ecc.CURVE)))
s = str(s).encode('utf-8')
key = sha.SHA3_512(s).digest()
message_encryptor = Encrypter(mode=AESModeOfOperationCBC(key[:32], iv=key[32:48]))
encrypted_blocks = message_encryptor.feed(oaep.oaep_pad(message))
encrypted_blocks += message_encryptor.feed()
encrypted_key = r.x, r.y
return encrypted_key, encrypted_blocks
|
e4bd462e8724a85ed0e183e048203ff23e349f34
| 3,641,326
|
from typing import List
import os
def _create_init_files(original_file_location: FilePath) -> List[str]:
"""Given the original file location of a handler, create the artifact paths for all the __init__.py files that
make the handle a valid python modules
Args:
original_file_location (str): original file path
Returns:
List[str]: all __init__.py files needed
"""
relative_to_ws_path_paths = paths.get_relative_to_workspace_path(
original_file_location
).split("/")[:-1]
base_path = paths.get_workspace_path()
rv = []
for path in relative_to_ws_path_paths:
rv.append(os.path.join(base_path, path, "__init__.py"))
base_path = os.path.join(base_path, path)
return rv
|
442f299e493bd25afd2798922b4ef0652dda0416
| 3,641,327
|
def mirror_notes(key_position: int) -> int:
"""
ๆๅฎใใใญใผใใธใทใงใณใๅ่ปขใใใๅคใ่ฟใใพใ
ๅผๆฐ
----
key_position : int
-> ใญใผใใธใทใงใณ
ๆปใๅค
------
int
-> ใญใผใใธใทใงใณใๅ่ปขใใใจใใฎใญใผใใธใทใงใณ
"""
return 512 - key_position
|
03ad894eca67405bb79cbf6ea1ecef12b19958ed
| 3,641,328
|
def import_odim_hdf5(filename, **kwargs):
"""Import a precipitation field (and optionally the quality field) from a
HDF5 file conforming to the ODIM specification.
Parameters
----------
filename : str
Name of the file to import.
Other Parameters
----------------
qty : {'RATE', 'ACRR', 'DBZH'}
The quantity to read from the file. The currently supported identitiers
are: 'RATE'=instantaneous rain rate (mm/h), 'ACRR'=hourly rainfall
accumulation (mm) and 'DBZH'=max-reflectivity (dBZ). The default value
is 'RATE'.
Returns
-------
out : tuple
A three-element tuple containing the OPERA product for the requested
quantity and the associated quality field and metadata. The quality
field is read from the file if it contains a dataset whose quantity
identifier is 'QIND'.
"""
if not h5py_imported:
raise MissingOptionalDependency(
"h5py package is required to import "
"radar reflectivity composites using ODIM HDF5 specification "
"but it is not installed"
)
qty = kwargs.get("qty", "RATE")
if qty not in ["ACRR", "DBZH", "RATE"]:
raise ValueError(
"unknown quantity %s: the available options are 'ACRR', 'DBZH' and 'RATE'"
)
f = h5py.File(filename, "r")
R = None
Q = None
for dsg in f.items():
if dsg[0][0:7] == "dataset":
what_grp_found = False
# check if the "what" group is in the "dataset" group
if "what" in list(dsg[1].keys()):
qty_, gain, offset, nodata, undetect = _read_odim_hdf5_what_group(
dsg[1]["what"]
)
what_grp_found = True
for dg in dsg[1].items():
if dg[0][0:4] == "data":
# check if the "what" group is in the "data" group
if "what" in list(dg[1].keys()):
qty_, gain, offset, nodata, undetect = _read_odim_hdf5_what_group(
dg[1]["what"]
)
elif not what_grp_found:
raise DataModelError(
"Non ODIM compilant file: "
"no what group found from {} "
"or its subgroups".format(dg[0])
)
if qty_.decode() in [qty, "QIND"]:
ARR = dg[1]["data"][...]
MASK_N = ARR == nodata
MASK_U = ARR == undetect
MASK = np.logical_and(~MASK_U, ~MASK_N)
if qty_.decode() == qty:
R = np.empty(ARR.shape)
R[MASK] = ARR[MASK] * gain + offset
R[MASK_U] = 0.0
R[MASK_N] = np.nan
elif qty_.decode() == "QIND":
Q = np.empty(ARR.shape, dtype=float)
Q[MASK] = ARR[MASK]
Q[~MASK] = np.nan
if R is None:
raise IOError("requested quantity %s not found" % qty)
where = f["where"]
proj4str = where.attrs["projdef"].decode()
pr = pyproj.Proj(proj4str)
LL_lat = where.attrs["LL_lat"]
LL_lon = where.attrs["LL_lon"]
UR_lat = where.attrs["UR_lat"]
UR_lon = where.attrs["UR_lon"]
if (
"LR_lat" in where.attrs.keys()
and "LR_lon" in where.attrs.keys()
and "UL_lat" in where.attrs.keys()
and "UL_lon" in where.attrs.keys()
):
LR_lat = float(where.attrs["LR_lat"])
LR_lon = float(where.attrs["LR_lon"])
UL_lat = float(where.attrs["UL_lat"])
UL_lon = float(where.attrs["UL_lon"])
full_cornerpts = True
else:
full_cornerpts = False
LL_x, LL_y = pr(LL_lon, LL_lat)
UR_x, UR_y = pr(UR_lon, UR_lat)
if full_cornerpts:
LR_x, LR_y = pr(LR_lon, LR_lat)
UL_x, UL_y = pr(UL_lon, UL_lat)
x1 = min(LL_x, UL_x)
y1 = min(LL_y, LR_y)
x2 = max(LR_x, UR_x)
y2 = max(UL_y, UR_y)
else:
x1 = LL_x
y1 = LL_y
x2 = UR_x
y2 = UR_y
if "xscale" in where.attrs.keys() and "yscale" in where.attrs.keys():
xpixelsize = where.attrs["xscale"]
ypixelsize = where.attrs["yscale"]
else:
xpixelsize = None
ypixelsize = None
if qty == "ACRR":
unit = "mm"
transform = None
elif qty == "DBZH":
unit = "dBZ"
transform = "dB"
else:
unit = "mm/h"
transform = None
if np.any(np.isfinite(R)):
thr = np.nanmin(R[R > np.nanmin(R)])
else:
thr = np.nan
metadata = {
"projection": proj4str,
"ll_lon": LL_lon,
"ll_lat": LL_lat,
"ur_lon": UR_lon,
"ur_lat": UR_lat,
"x1": x1,
"y1": y1,
"x2": x2,
"y2": y2,
"xpixelsize": xpixelsize,
"ypixelsize": ypixelsize,
"yorigin": "upper",
"institution": "Odyssey datacentre",
"accutime": 15.0,
"unit": unit,
"transform": transform,
"zerovalue": np.nanmin(R),
"threshold": thr,
}
f.close()
return R, Q, metadata
|
650875bb3d04627f4570507892ee26b42912c39e
| 3,641,329
|
def sugerir(update: Update, _: CallbackContext) -> int:
"""Show new choice of buttons"""
query = update.callback_query
query.answer()
keyboard = [
[
InlineKeyboardButton("\U0001F519 Volver", callback_data=str(NINE)),
InlineKeyboardButton("\U0001F44B Salir", callback_data=str(TEN)),
]
]
reply_markup = InlineKeyboardMarkup(keyboard)
query.edit_message_text(
text="\U0001F91A Sugerir cuentos:\n\n Responde este mensaje para sugerir un personaje o para realizar el aporte de un cuento\n", reply_markup=reply_markup
)
return NINE
|
e278c6bdab82e4fdfc38c7a4bb58a5511a003515
| 3,641,330
|
def clone_subgraph(*, outputs, inputs, new_inputs, suffix="cloned"):
"""
Take all of the tensorflow nodes between `outputs` and `inputs` and clone
them but with `inputs` replaced with `new_inputs`.
Args:
outputs (List[tf.Tensor]): list of output tensors
inputs (List[tf.Tensor]): list of input tensors
new_inputs (List[tf.Tensor]): list of new input tensors
suffix (str, optional): suffix to the transformed operation names
Returns:
List[T]: list of transformed outputs
"""
return transform(outputs=outputs, inputs=inputs,
transformed_inputs=new_inputs, transformer=lambda op,
inputs: clone_op(op, inputs, suffix=suffix))
|
b61d73d79635551f8277cbc0c2da97d0c5c2908e
| 3,641,331
|
async def refresh_replacements(db, sample_id: str) -> list:
"""
Remove sample file `replacement` fields if the linked files have been deleted.
:param db: the application database client
:param sample_id: the id of the sample to refresh
:return: the updated files list
"""
files = await virtool.db.utils.get_one_field(db.samples, "files", sample_id)
for file in files:
replacement = file.get("replacement")
if replacement and not await db.files.count_documents({"_id": replacement["id"]}):
file["replacement"] = None
document = await db.samples.find_one_and_update({"_id": sample_id}, {
"$set": {
"files": files
}
})
return document["files"]
|
43667801bf6bb96edbeb59bf9d538b62c9bf9785
| 3,641,332
|
def torch_model (model_name, device, checkpoint_path = None):
""" select imagenet models by their name and loading weights """
if checkpoint_path:
pretrained = False
else:
pretrained = True
model = models.__dict__ [model_name](pretrained)
if hasattr (model, 'classifier'):
if model_name == 'mobilenet_v2':
model.classifier = nn.Sequential(
nn.Dropout (0.2),
nn.Linear (model.classifier [-1].in_features, 2))
else:
model.classifier = nn.Sequential(
nn.Linear (model.classifier.in_features, 2))
elif hasattr (model, 'fc'):
model.fc = nn.Linear (model.fc.in_features, 2)
model.to(device)
if checkpoint_path:
load_checkpoint (checkpoint_path, model, device)
return model
|
831cf1edd83b76049e7f6d60434961cbd44e4bd9
| 3,641,333
|
from typing import Tuple
from datetime import datetime
def get_timezone() -> Tuple[datetime.tzinfo, str]:
"""Discover the current time zone and it's standard string representation (for source{d})."""
dt = get_datetime_now().astimezone()
tzstr = dt.strftime("%z")
tzstr = tzstr[:-2] + ":" + tzstr[-2:]
return dt.tzinfo, tzstr
|
f73cedb8fb91c75a19104d4d8bef29f73bfb9b1a
| 3,641,334
|
from typing import List
from typing import Tuple
def get_model(input_shape: List[int], weight_array: np.array,
batches_per_step: int, replication_factor: int, batch_size: int,
channels: int, data_len: int, synthetic_data: bool,
buffer_streams: bool) -> Tuple:
"""Get a simple model for comparison with buffer streams on and off.
Adapted from prefetch_test.py as we require to test the validity of streams
here as well.
Args:
batches_per_step (int): Batches to run per step
replication_factor (int): Replicas to run
batch_size (int): Number of samples per model run
channels (int): Number of channels e.g. RGB = 3
data_len (int): Data size
synthetic_data (bool): Use synthetic data (zeros in this case)
buffer_streams (bool): The test option: whether to create ops
before the stream in order to schedule data loading as part of
graph scheduling. See T29603.
Returns:
Tuple: session, anchors, input_shape, label_shape required to run the model
"""
micro_batch_size = batch_size // (replication_factor)
builder = popart.Builder()
data_shape = popart.TensorInfo("FLOAT", input_shape)
lbl_shape = popart.TensorInfo("INT32", [micro_batch_size])
w = builder.addInitializedInputTensor(weight_array)
ip = builder.addInputTensor(data_shape, "main_input_123")
lb = builder.addInputTensor(lbl_shape, "label_input_456")
a = builder.aiOnnx.matmul([ip, w])
o = builder.reshape_const(
builder.aiOnnx, [a],
[micro_batch_size, channels * data_len * data_len])
relu = builder.aiOnnx.relu([o])
sm = builder.aiOnnx.softmax([relu], axis=0, debugContext="output")
builder.addOutputTensor(sm)
o = builder.aiGraphcore.nllloss([sm, lb],
reduction=popart.ReductionType.Mean)
art = popart.AnchorReturnType("All")
data_flow = popart.DataFlow(batches_per_step, {
ip: art,
lb: art,
o: art,
sm: art,
a: art,
relu: art
})
opts = popart.SessionOptions()
opts.useHostCopyOps = buffer_streams
# TODO: Fix outlining
opts.enableOutlining = False
ipus = 1
if replication_factor > 1:
opts.replicatedGraphCount = replication_factor
opts.enableReplicatedGraphs = True
ipus *= replication_factor
device = tu.create_test_device(ipus)
assert device
patterns = popart.Patterns(popart.PatternsLevel.Minimal).enablePattern(
"MatMulLhsGradOp", True).enablePattern("MatMulRhsGradOp", True)
patterns.InPlace = False
if synthetic_data:
opts.syntheticDataMode = popart.SyntheticDataMode.Zeros
session = popart.TrainingSession(fnModel=builder.getModelProto(),
dataFlow=data_flow,
loss=o,
optimizer=popart.ConstSGD(LR),
userOptions=opts,
deviceInfo=device,
patterns=patterns)
session.setRandomSeed(0)
session.prepareDevice()
label_shape = [micro_batch_size]
if replication_factor > 1:
input_shape = [replication_factor] + input_shape
label_shape = [replication_factor] + label_shape
if batches_per_step > 1:
input_shape = [batches_per_step] + input_shape
label_shape = [batches_per_step] + label_shape
anchors = session.initAnchorArrays()
return session, anchors, label_shape
|
ab71faae9faeb082c4139a52dddac6116ee0a194
| 3,641,335
|
from collections import OrderedDict
import ast
import json
def shell_safe_json_parse(json_or_dict_string, preserve_order=False):
""" Allows the passing of JSON or Python dictionary strings. This is needed because certain
JSON strings in CMD shell are not received in main's argv. This allows the user to specify
the alternative notation, which does not have this problem (but is technically not JSON). """
try:
if not preserve_order:
return json.loads(json_or_dict_string)
return json.loads(json_or_dict_string, object_pairs_hook=OrderedDict)
except ValueError as json_ex:
try:
return ast.literal_eval(json_or_dict_string)
except SyntaxError:
raise CLIError(json_ex)
except ValueError as ex:
# log the exception which could be a python dict parsing error.
logger.debug(ex)
# raise json_ex error which is more readable and likely.
raise CLIError(json_ex)
|
f812a3c13a4c460fac1569d7ca9fe143312e43b8
| 3,641,336
|
def get_timed_roadmaps_grid_common(
ins: Instance, T: int, size: int,
) -> list[TimedRoadmap]:
"""[deprecated] get grid roadmap shared by all agents
Args:
ins (Instance): instance
T (int): assumed makespan
size (int): size x size grid will be constructed
Returns:
list[np.ndarray]: locations
Note:
use get_timed_roadmaps_grid_common_2d_fast in 2d environment
"""
if ins.dim == 2:
return get_timed_roadmaps_grid_common_2d_fast(ins, T, size)
return get_common_roadmaps(ins, T, get_grid(size, ins.rads[0], ins))
|
9b8e283ad66db35132393b53af2bfa36fc4aaf83
| 3,641,337
|
def arithmetic_series(a: int, n: int, d: int = 1) -> int:
"""Returns the sum of the arithmetic sequence with parameters a, n, d.
a: The first term in the sequence
n: The total number of terms in the sequence
d: The difference between any two terms in the sequence
"""
return n * (2 * a + (n - 1) * d) // 2
|
168f0b07cbe6275ddb54c1a1390b41a0f340b0a6
| 3,641,338
|
import re
def get_arc_proxy_user(proxy_file=None):
"""
Returns the owner of the arc proxy. When *proxy_file* is *None*, it defaults to the result of
:py:func:`get_arc_proxy_file`. Otherwise, when it evaluates to *False*, ``arcproxy`` is queried
without a custom proxy file.
"""
out = _arc_proxy_info(args=["--infoitem=identity"], proxy_file=proxy_file)[1].strip()
try:
return re.match(r".*\/CN\=([^\/]+).*", out.strip()).group(1)
except:
raise Exception("no valid identity found in arc proxy: {}".format(out))
|
01f1040cd1217d7722a691a78b5884125865cf39
| 3,641,339
|
def pass_hot_potato(names, num):
"""Pass hot potato.
A hot potato is sequentially passed to ones in a queue line.
After a number of passes, the one who got the hot potato is out.
Then the passing hot potato game is launched againg,
until the last person is remaining one.
"""
name_queue = Queue()
for name in names:
name_queue.enqueue(name)
while name_queue.size() > 1:
for i in xrange(num):
name_queue.enqueue(name_queue.dequeue())
name_queue.dequeue()
return name_queue.dequeue()
|
f78a635bdf3138809329ef8ad97934b125b9335a
| 3,641,340
|
import copy
def convert_timeseries_dataframe_to_supervised(df: pd.DataFrame, namevars, target, n_in=1, n_out=0, dropT=True):
"""
Transform a time series in dataframe format into a supervised learning dataset while
keeping dataframe intact.
Returns the transformed pandas DataFrame, the name of the target column and the names of the predictor columns
Arguments:
df: A timeseries dataframe that you want to convert to Supervised dataset.
namevars: columns that you want to lag in the data frame. Other columns will be untouched.
target: this is the target variable you intend to use in supervised learning
n_in: Number of lag periods as input (X).
n_out: Number of future periods (optional) as output for the taget variable (y).
dropT: Boolean - whether or not to drop columns at time 't'.
Returns:
df: This is the transformed data frame with the time series columns laggged.
Note that the original columns are dropped if you set the 'dropT' argument to True.
If not, they are preserved.
This Pandas DataFrame of lagged time series data is immediately available for supervised learning.
rtype: pd.DataFrame, str, List[str]
"""
target = copy.deepcopy(target)
df = copy.deepcopy(df)
int_vars = df.select_dtypes(include='integer').columns.tolist()
# Notice that we will create a sequence of columns from name vars with suffix (t-n,... t-1), etc.
drops = []
int_changes = []
for i in range(n_in, -1, -1):
if i == 0:
for var in namevars:
addname = var + '(t)'
df = df.rename(columns={var:addname})
drops.append(addname)
if var in int_vars:
int_changes.append(addname)
else:
for var in namevars:
addname = var + '(t-' + str(i) + ')'
df[addname] = df[var].shift(i)
if var in int_vars:
int_changes.append(addname)
## forecast sequence (t, t+1,... t+n)
if n_out == 0:
n_out = False
for i in range(1, n_out):
for var in namevars:
addname = var + '(t+' + str(i) + ')'
df[addname] = df[var].shift(-i)
# drop rows with NaN values
df = df.dropna()
### Make sure that whatever vars came in as integers return back as integers!
if int_changes:
### only do this if there are some changes to implement ###
df[int_changes] = df[int_changes].astype(np.int64)
# put it all together
for each_target in target:
df = df.rename(columns={each_target+'(t)':each_target})
if dropT:
### If dropT is true, all the "t" series of the target column (in case it is in the namevars)
### will be removed if you don't want the target to learn from its "t" values.
### Similarly, we will also drop all the "t" series of name_vars if you set dropT to Trueself.
try:
drops.remove(target)
except:
pass
df.drop(drops, axis=1, inplace=True)
preds = [x for x in list(df) if x not in target]
return df, target, preds
|
b62296680f6a871f20078e55eefa20f09392b012
| 3,641,341
|
def build_graph(adj_mat):
"""build sparse diffusion graph. The adjacency matrix need to preserves divergence."""
# sources, targets = adj_mat.nonzero()
# edgelist = list(zip(sources.tolist(), targets.tolist()))
# g = Graph(edgelist, edge_attrs={"weight": adj_mat.data.tolist()}, directed=True)
g = Graph.Weighted_Adjacency(adj_mat)
return g
|
bdc8dc5d1c107086c4c548b500f6958bdbe48103
| 3,641,342
|
def retrieve_context_path_comp_service_end_point_end_point(uuid): # noqa: E501
"""Retrieve end-point
Retrieve operation of resource: end-point # noqa: E501
:param uuid: ID of uuid
:type uuid: str
:rtype: List[str]
"""
return 'do some magic!'
|
e3169e139b5992daf00411b694cf77436fb17fba
| 3,641,343
|
def get_external_repos(gh):
"""
Get all external repositories from the `repos.config` file
"""
external_repos = []
with open("repos.config") as f:
content = f.readlines()
content = [x.strip() for x in content]
for entry in content:
org_name, repo_name = entry.split('/')
external_repos.append(gh.get_organization(org_name).get_repo(repo_name))
return external_repos
|
a83515acd77c7ef9e30bf05d8d4478fa833ab5bc
| 3,641,344
|
def handle_standard_table(pgconn, table_name, columns, record):
"""
:param pgconn:
:param table_name:
:param columns:
:param record:
:return:
"""
data = dict(record)
log.debug("Standard handler: {}".format(data))
if 'id' in columns:
data_exists = pgconn.execute(
"SELECT 1 FROM {table_name} WHERE id = :id".format(table_name=table_name),
{
'id': data['id']
}
).fetchone()
if data_exists:
pgconn.execute(update_statement(table_name, columns, data), record)
else:
pgconn.execute(insert_statement(table_name, columns, data), record)
else:
pgconn.execute(insert_statement(table_name, columns, data), record)
return True
|
c6414b2f60cf90ed6a671c0b7affcb2d6b9e75c9
| 3,641,345
|
import json
def load_fit_profile():
"""
This methods return the FIT profile types based on the Profile.xslx that is included in the Garmin FIT SDK (https://developer.garmin.com/fit/download/).
The returned profile can be used to translate e.g. Garmin product names to their corresponding integer product ids.
"""
fpath = _fit_profile_json_path()
with fpath.open("r") as fit_profile_file:
profile = json.load(fit_profile_file)
return profile
|
13108546c2d88d77d090b222c1b3ff2b59208310
| 3,641,346
|
def mmethod(path, *args, **kwargs):
"""
Returns a mapper function that runs the path method for each instance of
the iterable collection.
>>> mmethod('start')
is equivalent to
>>> lambda thread: thread.start()
>>> mmethod('book_set.filter', number_of_pages__gte=100)
is equivalent to
>>> lambda author: author.book_set.filter(number_of_pages__gte=100)
"""
return lambda x: mattr(path)(x)(*args, **kwargs)
|
6ded620d190d338d981c433514018a4182b7e207
| 3,641,347
|
def generate_test_demand_design_image() -> TestDataSet:
"""
Returns
-------
test_data : TestDataSet
2800 points of test data, uniformly sampled from (price, time, emotion). Emotion is transformed into img.
"""
org_test: TestDataSet = generate_test_demand_design(False)
treatment = org_test.treatment
covariate = org_test.covariate
target = org_test.structural
emotion_arr = covariate[:, 1].astype(int)
emotion_img = attach_image(emotion_arr, False, 42)
covariate_img = np.concatenate([covariate[:, 0:1], emotion_img], axis=1)
return TestDataSet(treatment=treatment,
covariate=covariate_img,
structural=target)
|
238cf11480e0d23f30b426ed19877126edc010fa
| 3,641,348
|
def value_iteration(game, depth_limit, threshold):
"""Solves for the optimal value function of a game.
For small games only! Solves the game using value iteration,
with the maximum error for the value function less than threshold.
This algorithm works for sequential 1-player games or 2-player zero-sum
games, with or without chance nodes.
Arguments:
game: The game to analyze, as returned by `load_game`.
depth_limit: How deeply to analyze the game tree. Negative means no limit, 0
means root-only, etc.
threshold: Maximum error for state values..
Returns:
A `dict` with string keys and float values, mapping string encoding of
states to the values of those states.
"""
if game.num_players() not in (1, 2):
raise ValueError("Game must be a 1-player or 2-player game")
if (game.num_players() == 2 and
game.get_type().utility != pyspiel.GameType.Utility.ZERO_SUM):
raise ValueError("2-player games must be zero sum games")
# We expect Value Iteration to be used with perfect information games, in
# which `str` is assumed to display the state of the game.
states = get_all_states.get_all_states(
game, depth_limit, True, False, to_string=str)
values = {}
transitions = {}
_initialize_maps(states, values, transitions)
error = threshold + 1 # A value larger than threshold
min_utility = game.min_utility()
while error > threshold:
error = 0
for key, state in states.items():
if state.is_terminal():
continue
player = state.current_player()
value = min_utility if player == 0 else -min_utility
for action in state.legal_actions():
next_states = transitions[(key, action)]
q_value = sum(p * values[next_state] for next_state, p in next_states)
if player == 0:
value = max(value, q_value)
else:
value = min(value, q_value)
error = max(abs(values[key] - value), error)
values[key] = value
return values
|
2a9ae3903666ee16e86fe30a0458707394fe4695
| 3,641,349
|
import typing
def printable_dataframe(data: typing.List[typing.Mapping], ignore_phase: bool = True) -> pd.DataFrame:
""" Print performance results using pandas data frames.
TODO: Re-write me.
"""
columns = {'name': 'Model', 'input_shape': 'Input shape', 'num_parameters': '#Parameters',
'param_memory': 'Model size (MB) FP32', 'flops': 'GFLOPs (multiply-add)',
'activation_memory': 'Activation size (MB) FP32'}
column_oder = ['name', 'input_shape', 'num_parameters', 'param_memory', 'flops', 'activation_memory']
if not ignore_phase:
column_oder.insert(1, 'phase')
columns['phase'] = 'Phase'
df = pd.DataFrame(data, columns=column_oder)
df.rename(columns=columns, inplace=True)
return df
|
c76841d06891da4019e32194347f8d87c11dbea1
| 3,641,350
|
def _import_and_infer(save_dir, inputs):
"""Import a SavedModel into a TF 1.x-style graph and run `signature_key`."""
graph = ops.Graph()
with graph.as_default(), session_lib.Session() as session:
model = loader.load(session, [tag_constants.SERVING], save_dir)
signature = model.signature_def[
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
assert set(inputs.keys()) == set(signature.inputs.keys())
feed_dict = {}
for arg_name in inputs.keys():
feed_dict[graph.get_tensor_by_name(signature.inputs[arg_name].name)] = (
inputs[arg_name])
output_dict = {}
for output_name, output_tensor_info in signature.outputs.items():
output_dict[output_name] = graph.get_tensor_by_name(
output_tensor_info.name)
return session.run(output_dict, feed_dict=feed_dict)
|
1610c4d52fa8d18a770f1f347b9cd30b4652ab8b
| 3,641,351
|
import os
def new_module(fname, main=False, parser_vmx=None):
"""
`fname` is Python str (or None for internal Module)
`main` is Python True for main program (from command line)
`argv` is Python list of str (if main is True)
`parser_vmx` is Python str for parser VMX file to use
returns (CModule, CClosure) if newly loaded module
the Closure is the (bootstrap) code to populate the Module
returns (CModule, None) if previously loaded (or internal Module)
"""
if fname:
sfname = mkstr(fname)
else:
sfname = [] # should not be used! illegal as dict key!
md = Module.getprop('modules') # Module Dict/directory (Class variable)
mdd = md.getvalue() # module directory dict
if fname and sfname in mdd: # previously loaded?
return mdd[sfname], None # yes; return it, no bootstrap needed
scope = scopes.Scope(root_scope) # create base scope for module
mod = CModule(scope)
scope.defvar(const.DOC, null_value)
scope.defvar('__xxl', CObject(class_by_name('XXLObject')))
if fname:
mdd[sfname] = mod # save as previously loaded
mi = new_modinfo(main=main, module=mod, fname=fname, parser_vmx=parser_vmx)
mod.modinfo = mi
scope.defvar(const.MODINFO, mi) # make ModInfo visible in module namespace
if fname is None: # internal module?
return mod, None
bootstrap_vmx = xxlobj.find_in_lib_path(os.environ.get('XXL_BOOTSTRAP',
'bootstrap.vmx'))
# XXX handle Exceptions for I/O, bad JSON, bad instructions
code = vmx.load_vm_json(bootstrap_vmx)
boot = CClosure(code, mod.scope) # CClosure with bootstrap_vmx code
return mod, boot
|
c5732841b213d41e8bb466fb755804dd3f0902f2
| 3,641,352
|
def url_by_properties(
config_properties,
db_type,
submit_dir=None,
top_dir=None,
rundir_properties=None,
cl_properties=None,
props=None,
):
""" Get URL from the property file """
# Validate parameters
if not db_type:
raise ConnectionError(
"A type should be provided with the property file.", db_type=db_type
)
# Parse, and process properties
if not props:
props = properties.Properties()
props.new(config_file=config_properties, rundir_propfile=rundir_properties)
_merge_properties(props, cl_properties)
if db_type.upper() == DBType.JDBCRC:
dburi = _get_jdbcrc_uri(props)
elif db_type.upper() == DBType.MASTER:
dburi = _get_master_uri(props)
elif db_type.upper() == DBType.WORKFLOW:
dburi = _get_workflow_uri(props, submit_dir, top_dir)
else:
raise ConnectionError("Invalid database type '%s'." % db_type, db_type=db_type)
if dburi:
log.debug("Using database: %s" % dburi)
return dburi
raise ConnectionError("Unable to find a database URI to connect.", db_type=db_type)
|
3f742af88320eccdacd81603ceeaf94116852798
| 3,641,353
|
def nth(seq, idx):
"""Return the nth item of a sequence. Constant time if list, tuple, or str;
linear time if a generator"""
return get(seq, idx)
|
cca44dca33d19a2e0db355be525009dce752445c
| 3,641,354
|
import psutil
def get_internal_plot_drive_to_use():
"""
Same as above but returns the next drive. This is the drive we will use for internal plots. We do
this to make sure we are not over saturating a single drive with multiple plot copies. When you run
out of drives, these scripts will fail.
"""
available_drives = []
try:
for part in psutil.disk_partitions(all=False):
if part.device.startswith('/dev/sd') \
and part.mountpoint.startswith('/mnt/enclosure') \
and get_drive_info('space_free_plots_by_mountpoint', part.mountpoint) >= 1 \
and get_drive_by_mountpoint(part.mountpoint) not in chianas.offlined_drives:
drive = get_drive_by_mountpoint(part.mountpoint)
available_drives.append((part.mountpoint, part.device, drive))
return (natsorted(available_drives)[1])
except IndexError:
log.debug("ERROR: No Additional Internal Drives Found, Please add drives, run auto_drive.py and try again!")
exit()
|
de86c7a6bb61ba9ebbf7555dae2d07576f8ccb3e
| 3,641,355
|
def _build_discretize_fn(value_type, stochastic, beta):
"""Builds a `tff.tf_computation` for discretization."""
@computations.tf_computation(value_type, tf.float32, tf.float32)
def discretize_fn(value, scale_factor, prior_norm_bound):
return _discretize_struct(value, scale_factor, stochastic, beta,
prior_norm_bound)
return discretize_fn
|
75f9f50ec376b1a10b5fcb629527a873b8768235
| 3,641,356
|
def expand_mapping_target(namespaces, val):
"""Expand a mapping target, expressed as a comma-separated list of
CURIE-like strings potentially prefixed with ^ to express inverse
properties, into a list of (uri, inverse) tuples, where uri is a URIRef
and inverse is a boolean."""
vals = [v.strip() for v in val.split(',')]
ret = []
for v in vals:
inverse = False
if v.startswith('^'):
inverse = True
v = v[1:]
ret.append((expand_curielike(namespaces, v), inverse))
return ret
|
b4a4f08d39728c8f61b7b373a521890f88d6f912
| 3,641,357
|
def home(request):
"""Handle the default request, for when no endpoint is specified."""
return Response('This is Michael\'s REST API!')
|
a37a2eaa68366de4d8542357c043c4e29ac7a9f9
| 3,641,358
|
def create_message(sender, to, subject, message_text, is_html=False):
"""Create a message for an email.
Args:
sender: Email address of the sender.
to: Email address of the receiver.
subject: The subject of the email message.
message_text: The text of the email message.
Returns:
An object containing a base64url encoded email object.
"""
if is_html:
message = MIMEText(message_text, "html")
else:
message = MIMEText(message_text)
message["to"] = to
message["from"] = sender
message["subject"] = subject
encoded_message = urlsafe_b64encode(message.as_bytes())
return {"raw": encoded_message.decode()}
|
2b5dc225df5786df9f2650631d209c53e3e8145b
| 3,641,359
|
def get_agent(runmode, name): # noqa: E501
"""get_agent
# noqa: E501
:param runmode:
:type runmode: str
:param name:
:type name: str
:rtype: None
"""
return 'do some magic!'
|
065302bb7793eff12973208db5f35f3494a83930
| 3,641,360
|
def find_splits(array1: list, array2: list) -> list:
"""Find the split points of the given array of events"""
keys = set()
for event in array1:
keys.add(event["temporalRange"][0])
keys.add(event["temporalRange"][1])
for event in array2:
keys.add(event["temporalRange"][0])
keys.add(event["temporalRange"][1])
return list(sorted(keys))
|
c52f696caddf35fa050621e7668eec06686cee14
| 3,641,361
|
def to_subtask_dict(subtask):
"""
:rtype: ``dict``
"""
result = {
'id': subtask.id,
'key': subtask.key,
'summary': subtask.fields.summary
}
return result
|
5171d055cc693b1aa00976c063188a907a7390dc
| 3,641,362
|
from typing import Tuple
from typing import Optional
def _partition_labeled_span(
contents: Text, labeled_span: substitution.LabeledSpan
) -> Tuple[substitution.LabeledSpan, Optional[substitution.LabeledSpan],
Optional[substitution.LabeledSpan]]:
"""Splits a labeled span into first line, intermediate, last line."""
start, end = labeled_span.span
first_newline = contents.find('\n', start, end)
if first_newline == -1:
return (labeled_span, None, None)
first, remainder = _split_labeled_span_after(labeled_span, first_newline)
last_newline = contents.rfind('\n', *remainder.span)
if last_newline == -1:
return (first, None, remainder)
between, last = _split_labeled_span_after(remainder, last_newline)
return (first, between, last)
|
6f22341d32c03ba0057fbfd6f08c88ac8736220f
| 3,641,363
|
def is_active(relation_id: RelationID) -> bool:
"""Retrieve an activation record from a relation ID."""
# query to DB
try:
sups = db.session.query(RelationDB) \
.filter(RelationDB.supercedes_or_suppresses == int(relation_id)) \
.first()
except Exception as e:
raise DBLookUpError from e
# return true if there is no superceder/suppressor
return bool(sups is None)
|
352f44e2f025ac0918519d0fe8e513b3871be7b9
| 3,641,364
|
def vectorize_with_similarities(text, vocab_tokens, vocab_token_to_index, vocab_matrix):
"""
Generate a vector representation of a text string based on a word similarity matrix. The resulting vector has
n positions, where n is the number of words or tokens in the full vocabulary. The value at each position indicates
the maximum similarity between that corresponding word in the vocabulary and any of the words or tokens in the
input text string, as given by the input similarity matrix. Therefore, this is similar to an n-grams approach but
uses the similarity between non-identical words or tokens to make the vector semantically meaningful.
Args:
text (str): Any arbitrary text string.
vocab_tokens (list of str): The words or tokens that make up the entire vocabulary.
vocab_token_to_index (dict of str:int): Mapping between words in the vocabulary and an index in rows and columns of the matrix.
vocab_matrix (numpy.array): A pairwise distance matrix holding the similarity values between all possible pairs of words in the vocabulary.
Returns:
numpy.Array: A numerical vector with length equal to the size of the vocabulary.
"""
doc_tokens = [token for token in text.split() if token in vocab_tokens]
vector = [max([vocab_matrix[vocab_token_to_index[vocab_token]][vocab_token_to_index[doc_token]] for doc_token in doc_tokens]) for vocab_token in vocab_tokens]
return(vector)
|
5b843ffbfdefbf691fb5766bbe6772459568cf78
| 3,641,365
|
def get_puppet_node_cert_from_server(node_name):
"""
Init environment to connect to Puppet Master and retrieve the certificate for that node in the server (if exists)
:param node_name: Name of target node
:return: Certificate for that node in Puppet Master or None if this information has not been found
"""
_init_puppet_master_connection()
return _execute_command(COMMAND_PUPPET_GET_CERT.format(node_name))
|
7f7fa2164bf7f289ce9dbc1b35f2d8aea546bb60
| 3,641,366
|
from typing import Optional
def get_notebook_workspace(account_name: Optional[str] = None,
notebook_workspace_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNotebookWorkspaceResult:
"""
A notebook workspace resource
:param str account_name: Cosmos DB database account name.
:param str notebook_workspace_name: The name of the notebook workspace resource.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['notebookWorkspaceName'] = notebook_workspace_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:documentdb/v20190801:getNotebookWorkspace', __args__, opts=opts, typ=GetNotebookWorkspaceResult).value
return AwaitableGetNotebookWorkspaceResult(
id=__ret__.id,
name=__ret__.name,
notebook_server_endpoint=__ret__.notebook_server_endpoint,
status=__ret__.status,
type=__ret__.type)
|
d9020323c0ea520951730a31b2f457ab80fcc931
| 3,641,367
|
def get_current_player(player_one_turn: bool) -> str:
"""Return 'player one' iff player_one_turn is True; otherwise, return
'player two'.
>>> get_current_player(True)
'player one'
>>> get_current_player(False)
'player two'
"""
if player_one_turn:
return P1
else:
return P2
# Complete this function.
|
6bade089054513943aef7656972cadd2d242807c
| 3,641,368
|
import os
import glob
def CityscapesGTFine(path: str) -> Dataset:
"""`CityscapesGTFine <https://www.cityscapes-dataset.com/>`_ dataset.
The file structure should be like::
<path>
leftImg8bit/
test/
berlin/
berlin_000000_000019_leftImg8bit.png
...
...
train/
aachen/
aachen_000000_000019_leftImg8bit.png
...
...
val/
frankfurt/
frankfurt_000000_000019_leftImg8bit.png
...
...
...
gtFine/
test/
berlin/
berlin_000000_000019_gtFine_instanceIds.png
berlin_000000_000019_gtFine_labelIds.png
berlin_000000_000019_gtFine_polygons.json
...
...
train/
aachen/
aachen_000000_000019_gtFine_instanceIds.png
aachen_000000_000019_gtFine_labelIds.png
aachen_000000_000019_gtFine_polygons.json
...
...
val/
frankfurt/
frankfurt_000000_000019_gtFine_instanceIds.png
frankfurt_000000_000019_gtFine_labelIds.png
frankfurt_000000_000019_gtFine_polygons.json
...
...
...
Arguments:
path: The root directory of the dataset.
Returns:
Loaded :class:`~tensorbay.dataset.dataset.Dataset` instance.
"""
root_path = os.path.join(os.path.abspath(os.path.expanduser(path)))
dataset = Dataset(DATASET_NAME_GTFINE)
dataset.load_catalog(os.path.join(os.path.dirname(__file__), "catalog.json"))
for segment_name in _SEGMENT_NAMES_GTFINE:
segment = dataset.create_segment(segment_name)
for image_path in glob(os.path.join(root_path, "leftImg8bit", segment_name, "*", "*.png")):
segment.append(_get_data(image_path, root_path, segment_name, "gtFine"))
return dataset
|
70e1adb939519fe6641f1a390ed6b011b27fc1ec
| 3,641,369
|
def is_word(s):
""" String `s` counts as a word if it has at least one letter. """
for c in s:
if c.isalpha(): return True
return False
|
524ed5cc506769bd8634a46d346617344485e5f7
| 3,641,370
|
def index_all_messages(empty_index):
"""
Expected index of `initial_data` fixture when model.narrow = []
"""
return dict(empty_index, **{'all_msg_ids': {537286, 537287, 537288}})
|
ea2c59a4de8e62d2293f87e26ead1b4c15f15a11
| 3,641,371
|
def compute_affine_matrix(in_shape,
out_shape,
crop=None,
degrees=0.0,
translate=(0.0, 0.0),
flip_h=False,
flip_v=False,
resize=False,
keep_ratio=False):
"""
Similarity warp transformation of the image keeping center invariant.
Args:
in_shape (Sequence): the shape of the input image
out_shape (Sequence): the shape of the output image
crop (Sequence, optional): crop center location, width and height. The
center location is relative to the center of the image. If
:attr:`resize` is not ``True``, crop is simply a translation in the
:attr:`in_shape` space.
degrees (float or int, optional): degrees to rotate the crop.
(default: ``(0.0)``)
translate (Sequence, optional): horizontal and vertical translations.
(default: ``(0.0, 0.0)``)
flip_h (bool, optional): flip the image horizontally.
(default: ``False``)
flip_v (bool, optional): flip the image vertically.
(default: ``False``)
resize (bool, optional): resize the cropped image to fit the output's
size. (default: ``False``)
keep_ratio (bool, optional): match the smaller edge to the
corresponding output edge size, keeping the aspect ratio after
resize. Has no effect if :attr:`resize` is ``False``.
(default: ``False``)
"""
if crop is not None:
T_crop_x, T_crop_y, crop_w, crop_h = crop
else:
T_crop_x, T_crop_y = 0, 0
crop_w, crop_h = in_shape
r = np.deg2rad(degrees)
tx, ty = translate
fh = 1 - 2 * float(flip_h)
fv = 1 - 2 * float(flip_v)
#
# H = T_inshape*T_crop*R*S_resize*T_outshapeT
#
T_i_x = (in_shape[0] - 1) / 2
T_i_y = (in_shape[1] - 1) / 2
T_inshape = np.asarray([[fh, 0, T_i_x],
[0, fv, T_i_y],
[0, 0, 1]])
T_crop = np.asarray([[1, 0, T_crop_x],
[0, 1, T_crop_y],
[0, 0, 1]])
R = np.asarray([[+np.cos(r), -np.sin(r), 0],
[+np.sin(r), +np.cos(r), 0],
[0, 0, 1]])
S_r_x = 1
S_r_y = 1
if resize:
top_left, bot_right = R.dot([[-crop_w / 2, crop_w / 2],
[-crop_h / 2, crop_h / 2],
[1, 1]]).transpose()[:, 0:2]
crop_w, crop_h = np.absolute(bot_right - top_left)
S_r_x = crop_w / out_shape[0]
S_r_y = crop_h / out_shape[1]
if keep_ratio:
scale_ratio = min(S_r_x, S_r_y)
S_r_x = scale_ratio
S_r_y = scale_ratio
S_resize = np.asarray([[S_r_x, 0, 0],
[0, S_r_y, 0],
[0, 0, 1]])
T_o_x = tx - (out_shape[0] - 1) / 2
T_o_y = ty - (out_shape[1] - 1) / 2
T_outshapeT = np.asarray([[1, 0, T_o_x],
[0, 1, T_o_y],
[0, 0, 1]])
return T_inshape.dot(T_crop).dot(R).dot(S_resize).dot(T_outshapeT)
|
0c3786c44d35341e5e85d3756e50eb59dd473d64
| 3,641,372
|
def Bern_to_Fierz_nunu(C,ddll):
"""From semileptonic Bern basis to Fierz semileptonic basis for Class V.
C should be the corresponding leptonic Fierz basis and
`ddll` should be of the form 'sbl_enu_tau', 'dbl_munu_e' etc."""
ind = ddll.replace('l_','').replace('nu_','')
return {
'F' + ind + 'nu': C['nu1' + ind],
'F' + ind + 'nup': C['nu1p' + ind],
}
|
4f08f79d6614c8929c3f42096fac71b04bfe7b4b
| 3,641,373
|
def enforce_boot_from_volume(client):
"""Add boot from volume args in create server method call
"""
class ServerManagerBFV(servers.ServerManager):
def __init__(self, client):
super(ServerManagerBFV, self).__init__(client)
self.bfv_image_client = images.ImageManager(client)
def create(self, name, image, flavor, **kwargs):
image_obj = self.bfv_image_client.get(image)
if "block_device_mapping" not in image_obj.metadata.keys() and \
not "block_device_mapping_v2" in kwargs.keys() and \
not "block_device_mapping" in kwargs.keys():
if 'volume_size' in kwargs:
vol_size = kwargs.pop('volume_size')
else:
vol_size = CONF.nova_server_volume_size
bv_map = [{
"source_type": "image",
"destination_type": "volume",
"delete_on_termination": "1",
"boot_index": 0,
"uuid": image,
"device_name": "vda",
"volume_size": str(vol_size)}]
bdm_args = {
'block_device_mapping_v2' : bv_map,
}
kwargs.update(bdm_args)
image = ''
return super(ServerManagerBFV, self).create(name, image,
flavor, **kwargs)
client.servers = ServerManagerBFV(client)
|
4ae4d2624f216c96722e811d9d44cb04caa46e1d
| 3,641,374
|
def img_to_yuv(frame, mode, grayscale=False):
"""Change color space of `frame` from any supported `mode` to YUV
Args:
frame: 3-D tensor in either [H, W, C] or [C, H, W]
mode: A string, must be one of [YV12, YV21, NV12, NV21, RGB, BGR]
grayscale: discard uv planes
return:
3-D tensor of YUV in [H, W, C]
"""
_planar_mode = ('YV12', 'YV21', 'NV12', 'NV21')
_packed_mode = ('RGB', 'BGR')
_allowed_mode = (*_planar_mode, *_packed_mode)
if not isinstance(frame, list):
raise TypeError("frame must be a list of numpy array")
if not mode in _allowed_mode:
raise ValueError("invalid mode: " + mode)
if mode in _planar_mode:
if mode in ('YV12', 'YV21'):
y, u, v = frame
elif mode in ('NV12', 'NV21'):
y, uv = frame
u = uv.flatten()[0::2].reshape([1, uv.shape[1] // 2, uv.shape[2]])
v = uv.flatten()[1::2].reshape([1, uv.shape[1] // 2, uv.shape[2]])
else:
y = u = v = None
y = np.transpose(y)
u = np.transpose(u)
v = np.transpose(v)
if '21' in mode:
u, v = v, u
if not grayscale:
up_u = np.zeros(shape=[u.shape[0] * 2, u.shape[1] * 2, u.shape[2]])
up_v = np.zeros(shape=[v.shape[0] * 2, v.shape[1] * 2, v.shape[2]])
up_u[0::2, 0::2, :] = up_u[0::2, 1::2, :] = u
up_u[1::2, ...] = up_u[0::2, ...]
up_v[0::2, 0::2, :] = up_v[0::2, 1::2, :] = v
up_v[1::2, ...] = up_v[0::2, ...]
yuv = np.concatenate([y, up_u, up_v], axis=-1)
yuv = np.transpose(yuv, [1, 0, 2]) # PIL needs [W, H, C]
img = Image.fromarray(yuv.astype('uint8'), mode='YCbCr')
else:
y = np.squeeze(y)
img = Image.fromarray(np.transpose(y).astype('uint8'), mode='L')
elif mode in _packed_mode:
assert len(frame) is 1
rgb = np.asarray(frame[0])
if mode == 'BGR':
rgb = rgb[..., ::-1]
rgb = np.transpose(rgb, [1, 0, 2])
if not grayscale:
img = Image.fromarray(rgb, mode='RGB').convert('YCbCr')
else:
img = Image.fromarray(rgb, mode='RGB').convert('L')
else:
raise RuntimeError("unreachable!")
# return img_to_array(image1) if turn_array else image1
return img
|
002506b3a46fa6b601f4ca65255c8f06b990992d
| 3,641,375
|
def assemblenet_kinetics600() -> cfg.ExperimentConfig:
"""Video classification on Videonet with assemblenet."""
exp = video_classification.video_classification_kinetics600()
feature_shape = (32, 224, 224, 3)
exp.task.train_data.global_batch_size = 1024
exp.task.validation_data.global_batch_size = 32
exp.task.train_data.feature_shape = feature_shape
exp.task.validation_data.feature_shape = (120, 224, 224, 3)
exp.task.train_data.dtype = 'bfloat16'
exp.task.validation_data.dtype = 'bfloat16'
model = AssembleNetModel()
model.backbone.assemblenet.model_id = '50'
model.backbone.assemblenet.blocks = flat_lists_to_blocks(
asn50_structure, asn_structure_weights)
model.backbone.assemblenet.num_frames = feature_shape[0]
exp.task.model = model
assert exp.task.model.backbone.assemblenet.num_frames > 0, (
f'backbone num_frames '
f'{exp.task.model.backbone.assemblenet}')
return exp
|
3356b6ea758baf04cc98421d700f25e342884d5a
| 3,641,376
|
import math
import torch
def channel_selection(inputs, module, sparsity=0.5, method='greedy'):
"""
ํ์ฌ ๋ชจ๋์ ์
๋ ฅ ์ฑ๋์ค, ์ค์๋๊ฐ ๋์ ์ฑ๋์ ์ ํํฉ๋๋ค.
๊ธฐ์กด์ output์ ๊ฐ์ฅ ๊ทผ์ ํ๊ฒ ๋ง๋ค์ด๋ผ ์ ์๋ ์
๋ ฅ ์ฑ๋์ ์ฐพ์๋
๋๋.
:param inputs: torch.Tensor, input features map
:param module: torch.nn.module, layer
:param sparsity: float, 0 ~ 1 how many prune channel of output of this layer
:param method: str, how to select the channel
:return:
list of int, indices of channel to be selected and pruned
"""
num_channel = inputs.size(1) # ์ฑ๋ ์
num_pruned = int(math.ceil(num_channel * sparsity)) # ์
๋ ฅ๋ sparsity ์ ๋ง์ถฐ ์ญ์ ๋์ด์ผ ํ๋ ์ฑ๋ ์
num_stayed = num_channel - num_pruned
print('num_pruned', num_pruned)
if method == 'greedy':
indices_pruned = []
while len(indices_pruned) < num_pruned:
min_diff = 1e10
min_idx = 0
for idx in range(num_channel):
if idx in indices_pruned:
continue
indices_try = indices_pruned + [idx]
inputs_try = torch.zeros_like(inputs)
inputs_try[:, indices_try, ...] = inputs[:, indices_try, ...]
output_try = module(inputs_try)
output_try_norm = output_try.norm(2)
if output_try_norm < min_diff:
min_diff = output_try_norm
min_idx = idx
indices_pruned.append(min_idx)
print('indices_pruned !!! ', indices_pruned)
indices_stayed = list(set([i for i in range(num_channel)]) - set(indices_pruned))
elif method == 'greedy_GM':
indices_stayed = []
while len(indices_stayed) < num_stayed:
max_farthest_channel_norm = 1e-10
farthest_channel_idx = 0
for idx in range(num_channel):
if idx in indices_stayed:
continue
indices_try = indices_stayed + [idx]
inputs_try = torch.zeros_like(inputs)
inputs_try[:, indices_try, ...] = inputs[:, indices_try, ...]
output_try = module(inputs_try).view(num_channel,-1).cpu().detach().numpy()
similar_matrix = distance.cdist(output_try, output_try,'euclidean')
similar_sum = np.sum(np.abs(similar_matrix), axis=0)
similar_large_index = similar_sum.argsort()[-1]
farthest_channel_norm= np.linalg.norm(similar_sum[similar_large_index])
if max_farthest_channel_norm < farthest_channel_norm :
max_farthest_channel_norm = farthest_channel_norm
farthest_channel_idx = idx
print(farthest_channel_idx)
indices_stayed.append(farthest_channel_idx)
print('indices_stayed !!! ', indices_stayed)
indices_pruned = list(set([i for i in range(num_channel)]) - set(indices_stayed))
elif method == 'lasso':
y = module(inputs)
if module.bias is not None: # bias.shape = [N]
bias_size = [1] * y.dim() # bias_size: [1, 1, 1, 1]
bias_size[1] = -1 # [1, -1, 1, 1]
bias = module.bias.view(bias_size) # bias.view([1, -1, 1, 1] = [1, N, 1, 1])
y -= bias # output feature ์์ bias ๋งํผ์ ๋นผ์ค (y - b)
else:
bias = 0.
y = y.view(-1).data.cpu().numpy() # flatten all of outputs
y_channel_spread = []
for i in range(num_channel):
x_channel_i = torch.zeros_like(inputs)
x_channel_i[:, i, ...] = inputs[:, i, ...]
y_channel_i = module(x_channel_i) - bias
y_channel_spread.append(y_channel_i.data.view(-1, 1))
y_channel_spread = torch.cat(y_channel_spread, dim=1).cpu()
alpha = 1e-7
solver = Lasso(alpha=alpha, warm_start=True, selection='random', random_state=0)
# choice_idx = np.random.choice(y_channel_spread.size()[0], 2000, replace=False)
# selected_y_channel_spread = y_channel_spread[choice_idx, :]
# new_output = y[choice_idx]
#
# del y_channel_spread, y
# ์ํ๋ ์์ ์ฑ๋์ด ์ญ์ ๋ ๋๊น์ง alpha ๊ฐ์ ์กฐ๊ธ์ฉ ๋๋ ค๋๊ฐ
alpha_l, alpha_r = 0, alpha
num_pruned_try = 0
while num_pruned_try < num_pruned:
alpha_r *= 2
solver.alpha = alpha_r
# solver.fit(selected_y_channel_spread, new_output)
solver.fit(y_channel_spread,y)
num_pruned_try = sum(solver.coef_ == 0)
# ์ถฉ๋ถํ๊ฒ pruning ๋๋ alpha ๋ฅผ ์ฐพ์ผ๋ฉด, ์ดํ alpha ๊ฐ์ ์ข์ฐ๋ฅผ ์ขํ ๋๊ฐ๋ฉด์ ์ข ๋ ์ ํํ alpha ๊ฐ์ ์ฐพ์
num_pruned_max = int(num_pruned)
while True:
alpha = (alpha_l + alpha_r) / 2
solver.alpha = alpha
# solver.fit(selected_y_channel_spread, new_output)
solver.fit(y_channel_spread,y)
num_pruned_try = sum(solver.coef_ == 0)
if num_pruned_try > num_pruned_max:
alpha_r = alpha
elif num_pruned_try < num_pruned:
alpha_l = alpha
else:
break
# ๋ง์ง๋ง์ผ๋ก, lasso coeff๋ฅผ index๋ก ๋ณํ
indices_stayed = np.where(solver.coef_ != 0)[0].tolist()
indices_pruned = np.where(solver.coef_ == 0)[0].tolist()
else:
raise NotImplementedError
inputs = inputs.cuda()
module = module.cuda()
return indices_stayed, indices_pruned
|
957cbcc799185fd6c2547662bfe79205389d44da
| 3,641,377
|
import six
def format_host(host_tuple):
"""
Format a host tuple to a string
"""
if isinstance(host_tuple, (list, tuple)):
if len(host_tuple) != 2:
raise ValueError('host_tuple has unexpeted length: %s' % host_tuple)
return ':'.join([six.text_type(s) for s in host_tuple])
elif isinstance(host_tuple, six.string_types):
return host_tuple
else:
raise ValueError('host_tuple unexpected type: (%s) %s' % (type(host_tuple), host_tuple))
|
f4822aec5143a99ccc52bb2657e1f42477c65400
| 3,641,378
|
import psutil
def get_cpu_stats():
"""
Obtains the system's CPU status.
:returns: System CPU static.
"""
return psutil.cpu_stats()
|
f538977db72083f42c710faa987a97511959c973
| 3,641,379
|
from typing import Dict
from typing import Union
import os
import re
def load_gene_metadata(gtf_file : str) -> Dict[str, Dict[str, Union[int, str]]]:
"""
Read gene metadata from a GTF file.
Args:
gtf_file (str): path to GTF file
Returns:
A Dict with each GeneId pointing to a Dict of metadata keys -> values
"""
if not os.path.exists(gtf_file):
raise ValueError(f"Gene metadata file '{gtf_file}' not found.")
regex_genetype = re.compile('gene_biotype "([^"]+)"')
regex_geneid = re.compile('gene_id "([^"]+)"')
regex_genename = re.compile('gene_name "([^"]+)"')
geneid2annots = {}
for line in open(gtf_file).readlines():
if line.startswith('#'):
continue
fields = line.rstrip().split('\t')
chrom, feature_class, feature_type, start_str, end_str, junk, strand, junk, tags = fields
if feature_type == "gene":
genename = geneid = regex_geneid.search(tags).group(1)
_genename_search = regex_genename.search(tags)
if _genename_search:
genename = _genename_search.group(1)
genetype = regex_genetype.search(tags).group(1)
chrid, start, end = fields[0], int(fields[3]), int(fields[4])
geneid2annots[geneid] = { "Gene:": genename, "Accession": geneid, "Biotype": genetype, \
"Chromosome": chrid, "Start": start, "End": end }
return geneid2annots
|
bc5f096f7a14579fcdee5a862f3f800d21012244
| 3,641,380
|
import sys
import glob
import os
import pathlib
import importlib
def get_plugins() -> dict[str, Plugin]:
"""
This function is really time consuming...
"""
# get entry point plugins
# Users can use Python's entry point system to create rich plugins, see
# example here: https://github.com/Pioreactor/pioreactor-air-bubbler
eps = entry_points()
pioreactor_plugins: tuple = eps.get("pioreactor.plugins", tuple())
plugins: dict[str, Plugin] = {}
for plugin in pioreactor_plugins:
try:
md = metadata(plugin.name)
plugins[md["Name"]] = Plugin(
plugin.load(),
md["Summary"],
md["Version"],
md["Home-page"],
md["Author"],
"entry_points",
)
except Exception as e:
print(f"{plugin.name} plugin load error: {e}")
# get file-based plugins.
# Users can put .py files into the MODULE_DIR folder below.
# The below code will load it into Python, and treat it like any other plugin.
# The authors can add metadata to their file with the following variables at the
# highest level in the file:
# __plugin_name__
# __plugin_author__
# __plugin_summary__
# __plugin_version__
# __plugin_homepage__
BLANK = "Unknown"
# The directory containing your modules needs to be on the search path.
if is_testing_env():
MODULE_DIR = "plugins_dev"
else:
MODULE_DIR = "/home/pioreactor/.pioreactor/plugins"
sys.path.append(MODULE_DIR)
# Get the stem names (file name, without directory and '.py') of any
# python files in your directory, load each module by name and run
# the required function.
py_files = glob.glob(os.path.join(MODULE_DIR, "*.py"))
for py_file in py_files:
module_name = pathlib.Path(py_file).stem
module = importlib.import_module(module_name)
plugins[getattr(module, "__plugin_name__", module_name)] = Plugin(
module,
getattr(module, "__plugin_summary__", BLANK),
getattr(module, "__plugin_version__", BLANK),
getattr(module, "__plugin_homepage__", BLANK),
getattr(module, "__plugin_author__", BLANK),
"plugins_folder",
)
return plugins
|
234a86b1ee142b9ccf1cfc39a6bc3947bd103488
| 3,641,381
|
import json
def _deserialise_list_of(collection_type, kind, owning_cls, field, value):
"""
Deserialise a list of items into a collection objects of class `kind`. Note
that if the value is None, we return None here so that we get a more
meaningful error message later on.
Args:
kind: Type to deserialise into
value: List of raw items
collection_type: The type of the container (list, set, etc)
Returns:
Collection of deserialized items, or None (if value was None)
"""
if value is None:
return None
# TODO(dan): Is this json stuff necessary?
if isinstance(value, (bytes, str)):
value = json.loads(value)
result = [
_deserialise_maybe_union(owning_cls, field, each) for each in value
]
return collection_type(result)
|
8dc7dfa88966d90bb29714c887a2457d9aeb1e8f
| 3,641,382
|
def get_minmax_array(X):
"""Utility method that returns the boundaries for each feature of the input array.
Args:
X (np.float array of shape (num_instances, num_features)): The input array.
Returns:
min (np.float array of shape (num_features,)): Minimum values for each feature in array.
max (np.float array of shape (num_features,)): Maximum values for each feature in array.
"""
min = np.min(X, axis=0)
max = np.max(X, axis=0)
return min, max
|
5453371759af5bf6d876aa8fe5d2caf88ee6eb08
| 3,641,383
|
def getAllHeaders(includeText=False):
"""
Get a dictionary of dream numbers and headers. If includeText=true, also
add the text of the dream to the dictionary as 'text' (note that this key
is all lowercase so it will not conflict with the usual convention for
header names, even if "Text" would be an odd header name).
"""
dreams = {}
for f in allDreamfiles():
dream = {}
textLines = []
inHeaders = True
for line in f:
if not line.strip(): # end of headers
if includeText:
inHeaders = False
else:
break
if inHeaders:
header, value = (i.strip() for i in line.split(':\t'))
dream[header] = value
else:
textLines.append(line)
if includeText:
# omit the first blank separator line
dream['text'] = '\n'.join(i for i in textLines[1:])
dreams[dream['Id']] = dream
return dreams
|
2bbd78d9c9cbfaa50a62e99c25148844d7c5e330
| 3,641,384
|
def zscore(arr, period):
"""
ZScore transformation of `arr` for rolling `period.` ZScore = (X - MEAN(X)) / STDEV(X)
:param arr:
:param period:
:return:
"""
if period <= 0:
raise YaUberAlgoArgumentError("'{}' must be positive number".format(period))
# Do quick sanity checks of arguments
_check_series_args(arr=arr)
try:
if isinstance(arr, pd.Series):
return pd.Series(_zscore(arr.values, period), index=arr.index)
elif isinstance(arr, np.ndarray):
return _zscore(arr, period)
except ValueError as exc:
raise YaUberAlgoInternalError(str(exc))
|
8a49afe3ecefc326b3bd889279085cccd1d19a61
| 3,641,385
|
import glob
import pandas
def _load_event_data(prefix, name):
"""Load per-event data for one single type, e.g. hits, or particles.
"""
expr = '{!s}-{}.csv*'.format(prefix, name)
files = glob.glob(expr)
dtype = DTYPES[name]
if len(files) == 1:
return pandas.read_csv(files[0], header=0, index_col=False, dtype=dtype)
elif len(files) == 0:
raise Exception('No file matches \'{}\''.format(expr))
else:
raise Exception('More than one file matches \'{}\''.format(expr))
|
04b2e4a7483ba56fdd282dc6355e9acb2d6da7b1
| 3,641,386
|
from datetime import datetime
def check_file(file_id: str, upsert: bool = False) -> File:
"""Checks that the file with file_id exists in the DB
Args:
file_id: The id for the requested file.
upsert: If the file doesn't exist create a placeholder file
Returns:
The file object
Raises:
NotFoundError: File with the requested ID doesn't exist and is expected to
ModelValidationError: Incorrectly formatted ID is given
"""
try:
ObjectId(file_id)
except (InvalidId, TypeError):
raise ModelValidationError(
f"Cannot create a file id with the string {file_id}. "
"Requires 24-character hex string."
)
res = db.query_unique(File, id=file_id)
if res is None:
if upsert:
create_file("BG_placeholder", 0, 0, file_id)
res = db.query_unique(File, id=file_id)
else:
raise NotFoundError(f"Tried to fetch an unsaved file {file_id}")
db.modify(res, updated_at=datetime.utcnow())
return res
|
2f4e94a064d0bdfea8f001855eb39675f78ab6e5
| 3,641,387
|
def parse(volume_str):
"""Parse combined k8s volume string into a dict.
Args:
volume_str: The string representation for k8s volume,
e.g. "claim_name=c1,mount_path=/path1".
Return:
A Python dictionary parsed from the given volume string.
"""
kvs = volume_str.split(",")
volume_keys = []
parsed_volume_dict = {}
for kv in kvs:
k, v = kv.split("=")
if k not in volume_keys:
volume_keys.append(k)
else:
raise ValueError(
"The volume string contains duplicate volume key: %s" % k
)
if k not in _ALLOWED_VOLUME_KEYS:
raise ValueError(
"%s is not in the allowed list of volume keys: %s"
% (k, _ALLOWED_VOLUME_KEYS)
)
parsed_volume_dict[k] = v
return parsed_volume_dict
|
f6984faf90081eb8ca3fbbb8ffaf636b040c7ffc
| 3,641,388
|
def longest_common_substring(text1, text2):
"""ๆ้ฟๅ
ฌๅ
ฑๅญๅญ็ฌฆไธฒ๏ผๅบๅๅคงๅฐๅ"""
n = len(text1)
m = len(text2)
maxlen = 0
span1 = (0, 0)
span2 = (0, 0)
if n * m == 0:
return span1, span2, maxlen
dp = np.zeros((n+1, m+1), dtype=np.int32)
for i in range(1, n+1):
for j in range(1, m+1):
if text1[i-1] == text2[j-1]:
dp[i][j] = dp[i-1][j-1] + 1
if dp[i][j] > maxlen:
maxlen = dp[i][j]
span1 = (i - maxlen, i)
span2 = (j - maxlen, j)
return span1, span2, maxlen
|
ed892739d22ee0763a2fe5dd44b48b8d1902605e
| 3,641,389
|
def make_subclasses_dict(cls):
"""
Return a dictionary of the subclasses inheriting from the argument class.
Keys are String names of the classes, values the actual classes.
:param cls:
:return:
"""
the_dict = {x.__name__:x for x in get_all_subclasses(cls)}
the_dict[cls.__name__] = cls
return the_dict
|
36eb7c9242b83a84fcd6ee18b4ca9297038f9ee6
| 3,641,390
|
import time
def _parse_realtime_data(xmlstr):
"""
Takes xml a string and returns a list of dicts containing realtime data.
"""
doc = minidom.parseString(xmlstr)
ret = []
elem_map = {"LineID": "id", "DirectionID": "direction",
"DestinationStop": "destination" }
ack = _single_element(doc, "Acknowledge")
if ack == None or ack.attributes["Result"].nodeValue != "ok":
return None
curtime = time.mktime(time.strptime(
ack.attributes["TimeStamp"].nodeValue[:-10], "%Y-%m-%dT%H:%M:%S"))
for elem in doc.getElementsByTagName("DISDeviation"):
entry = {"is_realtime": False}
for name, value in [ (e.nodeName, _get_text(e.childNodes)) \
for e in elem.childNodes \
if e.nodeType == e.ELEMENT_NODE ]:
if name in elem_map:
entry[elem_map[name]] = unicode(value)
elif name == "TripStatus":
entry["is_realtime"] = value == "Real"
if entry["is_realtime"]:
timeele = _single_element(elem, "ExpectedDISDepartureTime")
else:
timeele = _single_element(elem, "ScheduledDISDepartureTime")
parsed_time = time.strptime(
_get_text(timeele.childNodes)[:-10], "%Y-%m-%dT%H:%M:%S")
entry["time"] = parsed_time
entry["wait_time"] = int(time.mktime(parsed_time) - curtime)
ret.append(entry)
return ret
|
90958c7f66072ecfd6c57b0da95293e35196354c
| 3,641,391
|
def tocopo_accuracy_fn(tocopo_logits: dt.BatchedTocopoLogits,
target_data: dt.BatchedTrainTocopoTargetData,
oov_token_id: int,
pad_token_id: int,
is_distributed: bool = True) -> AccuracyMetrics:
"""Computes accuracy metrics.
Args:
tocopo_logits: Predictions from model (unnormalized log scores).
target_data: target data to compare prediction against.
oov_token_id: Id of out of vocabulary token.
pad_token_id: Id of pad token.
is_distributed: Whether to perform cross-device aggregation.
Returns:
A `AccuracyMetrics` instance.
"""
vocab_size = tocopo_logits.token_logits.shape[2]
one_hot_target_tokens = jax.nn.one_hot(target_data.token_ids,
vocab_size) # (B, O, U)
# Don't give credit for OOV tokens.
one_hot_target_tokens = one_hot_target_tokens.at[:, :, oov_token_id].set(
jnp.zeros_like(target_data.token_ids))
# Disable predictions for all tokens when there is a pointer.
# Mask indicating absence of a pointer at target.
not_pointer_mask = target_data.is_target_pointer.sum(axis=2) == 0 # (B, O)
one_hot_target_tokens = one_hot_target_tokens * jnp.expand_dims(
not_pointer_mask, axis=2)
few_hot_targets = jnp.concatenate([
one_hot_target_tokens, target_data.is_target_copy,
target_data.is_target_pointer
],
axis=2) # (B, O, U+2V)
# Get the one hot predictions.
tocopo_logits_stacked = jnp.concatenate([
tocopo_logits.token_logits, tocopo_logits.copy_logits,
tocopo_logits.pointer_logits
],
axis=2) # (B, O, U+2V)
prediction_indices = jnp.argmax(tocopo_logits_stacked, axis=2) # (B, O)
one_hot_predictions = jax.nn.one_hot(
prediction_indices, tocopo_logits_stacked.shape[2]) # (B, O, U+2V)
# (B, O)
is_pad = (target_data.token_ids == pad_token_id)
# (B, O, U+2V) -> (B, O)
# If the target is a pad token, then we remove it from consideration when
# calculating accuracies. `element_correct_or_pad` array always assign a 1 to
# padded prediction (this property is used in the sequence accuracy
# computation).
element_correct = jnp.sum(one_hot_predictions * few_hot_targets, axis=-1)
element_correct_or_pad = jnp.where(is_pad, 1, element_correct)
per_element_correct = jnp.sum(element_correct_or_pad * (1 - is_pad))
per_element_attempts = jnp.sum(1 - is_pad)
per_sequence_correct = jnp.sum(jnp.prod(element_correct_or_pad, axis=-1))
per_sequence_attempts = element_correct_or_pad.shape[0]
pointer_mask = jnp.logical_and(
jnp.logical_not(not_pointer_mask), jnp.logical_not(is_pad))
pointer_correct = jnp.sum(element_correct * pointer_mask)
pointer_attempts = jnp.sum(pointer_mask)
# Pointer sequence accuracy: construct an array of 1s everywhere except where
# a pointer is incorrectly predicted. Note: this counts a sequence without
# pointers as accurately predicted.
pointer_correct_or_toco_or_pad = jnp.where(not_pointer_mask, 1,
element_correct_or_pad)
per_sequence_po_correct = jnp.sum(
jnp.prod(pointer_correct_or_toco_or_pad, axis=-1))
toco_mask = jnp.logical_and(not_pointer_mask, jnp.logical_not(is_pad))
toco_correct = jnp.sum(element_correct * toco_mask)
toco_attempts = jnp.sum(toco_mask)
# ToCo sequence accuracy: construct an array of 1s everywhere except where
# a To/Co is incorrectly predicted. Note: this counts a sequence without
# ToCo as accurately predicted.
toco_correct_or_po_or_pad = jnp.where(pointer_mask, 1, element_correct_or_pad)
per_sequence_toco_correct = jnp.sum(
jnp.prod(toco_correct_or_po_or_pad, axis=-1))
# Correct predictions using the To head.
is_prediction_token_mask = prediction_indices < vocab_size
token_correct = jnp.sum(
element_correct *
jnp.logical_and(is_prediction_token_mask, jnp.logical_not(is_pad)))
# Aggregate across devices.
if is_distributed:
per_element_correct = jax.lax.psum(per_element_correct, axis_name='i')
per_element_attempts = jax.lax.psum(per_element_attempts, axis_name='i')
per_sequence_correct = jax.lax.psum(per_sequence_correct, axis_name='i')
per_sequence_attempts = jax.lax.psum(per_sequence_attempts, axis_name='i')
pointer_correct = jax.lax.psum(pointer_correct, axis_name='i')
pointer_attempts = jax.lax.psum(pointer_attempts, axis_name='i')
toco_correct = jax.lax.psum(toco_correct, axis_name='i')
token_correct = jax.lax.psum(token_correct, axis_name='i')
toco_attempts = jax.lax.psum(toco_attempts, axis_name='i')
per_sequence_po_correct = jax.lax.psum(
per_sequence_po_correct, axis_name='i')
per_sequence_toco_correct = jax.lax.psum(
per_sequence_toco_correct, axis_name='i')
return AccuracyMetrics(
num_element_correct=per_element_correct,
num_element_attempts=per_element_attempts,
num_seq_correct=per_sequence_correct,
num_seq_attempts=per_sequence_attempts,
num_pointer_correct=pointer_correct,
num_pointer_attempts=pointer_attempts,
num_pointer_seq_correct=per_sequence_po_correct,
num_toco_correct=toco_correct,
num_token_correct=token_correct,
num_toco_attempts=toco_attempts,
num_toco_seq_correct=per_sequence_toco_correct)
|
828b7d3db40d488a7e05bbfe1f3d2d94f58d8efa
| 3,641,392
|
def cols_from_html_tbl(tbl):
""" Extracts columns from html-table tbl and puts columns in a list.
tbl must be a results-object from BeautifulSoup)"""
rows = tbl.tbody.find_all('tr')
if rows:
for row in rows:
cols = row.find_all('td')
for i,cell in enumerate(cols):
if not'col_list' in locals():
col_list=[[] for x in range(len(cols))]
col_list[i].append(cell.text)
else:
col_list=[]
return col_list
|
94bef05b782073955738cf7b774af34d64520499
| 3,641,393
|
from typing import List
from typing import Tuple
def get_score_park(board: List[List[str]]) -> Tuple[int]:
"""
Calculate the score for the building - park (PRK).
Score 1: If ONLY 1 park.
Score 3: If the park size is 2.
Score 8: If the park size is 3.
Score 16: If the park size is 4.
Score 22: If the park size is 5.
Score 23: If the park size is 6.
Score 24: If the park size is 7.
Score 25: If the park size is 8.
Score 17 + x: For all park size > 8, where x = size of park
Parameters
----------
board: List[List[str]]
2D array containing all the game detail, including column header, row header and placed buildings.
Returns
-------
score: Tuple[int]
A list containing all the score for the specific building - park (PRK).
"""
type = 'PRK'
# @ Convert board into logical matrix, where 1 represent park and other type of building are represent by 0.
grid = [[1 if type == col else 0 for col in row] for row in board]
visited_location_set = set()
score_list = []
table = [
[1, 2, 3, 4, 5, 6, 7, 8],
[1, 3, 8, 16, 22, 23, 24, 25]
]
for idx_row in range(len(grid)):
for idx_col in range(len(grid[0])):
score = 0
size = get_island_size(idx_row, idx_col, grid, visited_location_set, direction=('up', 'down', 'left', 'right'))
if 0 == size:
continue
if 8 > size:
score_idx = table[0].index(size)
score = table[1][score_idx]
else:
score = 17 + size
score_list.append(score)
return *score_list,
|
2bf1629aeb9937dfd871aa118e675cd9358b65ef
| 3,641,394
|
def kernel_epanechnikov(inst: np.ndarray) -> np.ndarray:
"""Epanechnikov kernel."""
if inst.ndim != 1:
raise ValueError("'inst' vector must be one-dimensional!")
return 0.75 * (1.0 - np.square(inst)) * (np.abs(inst) < 1.0)
|
7426e068c3a939595b77c129af4f8d30bbfc89fb
| 3,641,395
|
def submission_parser(reddit_submission_object):
"""Parses a submission and returns selected parameters"""
post_timestamp = reddit_submission_object.created_utc
post_id = reddit_submission_object.id
score = reddit_submission_object.score
ups = reddit_submission_object.ups
downs = reddit_submission_object.downs
# post_body = np.nan
thread_title = reddit_submission_object.title
thread_url = reddit_submission_object.url
subreddit = reddit_submission_object.subreddit.display_name
return post_timestamp, post_id, score, ups, downs, thread_title, thread_url, subreddit
|
d2b406f38e799230474e918df91d55e48d27f385
| 3,641,396
|
def dashboard():
"""Displays dashboard to logged in user"""
user_type = session.get('user_type')
user_id = session.get('user_id')
if user_type == None:
return redirect ('/login')
if user_type == 'band':
band = crud.get_band_by_id(user_id)
display_name = band.display_name
age = band.age
gender = band.gender
influences = band.influences
location = band.location
description = band.description
seeking = band.skills
genres = band.genres
return render_template('dashboard.html',
user_type=user_type,
display_name=display_name,
age=age,
gender=gender,
influences=influences,
location=location,
description=description,
seeking=seeking,
genres=genres)
if user_type == 'musician':
musician = crud.get_musician_by_id(user_id)
display_name = musician.display_name
age = musician.age
gender = musician.gender
influences = musician.influences
location = musician.location
description = musician.description
skills = musician.skills
genres = musician.genres
return render_template('dashboard.html',
user_type=user_type,
display_name=display_name,
age=age,
gender=gender,
influences=influences,
location=location,
description=description,
skills=skills,
genres=genres)
|
1cec9fcd17a963921f23f03478a8c3195db9a18e
| 3,641,397
|
from bs4 import BeautifulSoup
def parse_site(site_content, gesture_id):
""" Parses the following attributes:
title, image, verbs and other_gesture_ids
:param site_content: a html string
:param gesture_id: the current id
:return: {
title: str,
img: str,
id: number,
compares: [
{
verb: [str],
other_gesture_id: number
}
]
}
"""
soup = BeautifulSoup(site_content, 'html.parser')
img = soup.body.img
img = img['src'] if img else False
title = soup.body.font.b.contents[0].lower().strip()
table = soup.body.table.tr
rows = table.find_all('td')
compares = []
for td in rows:
content = td.font.contents
current_verb = []
current_other = ''
for line in content:
if str(line) == '<br/>':
compares.append({
'verb': current_verb,
'other_gesture_id': current_other,
})
current_verb = []
current_other = ''
elif hasattr(line, 'name') and line.name == 'a':
current_other = line['href'].replace('.htm', '')
else:
current_verb.append(str(line).strip().replace('\\n', '').lower())
return {
'id': gesture_id,
'title': title,
'img': img,
'compares': compares,
}
|
b9719dbbd2ca7883257c53410423de5e3df3fe93
| 3,641,398
|
from multiprocessing import Pool
import multiprocessing
def test_multiprocessing_function () :
"""Test parallel processnig with multiprocessing
"""
logger = getLogger ("ostap.test_multiprocessing_function")
logger.info ('Test job submission with module %s' % multiprocessing )
ncpus = multiprocessing.cpu_count()
pool = Pool ( ncpus )
jobs = pool.imap_unordered ( make_histos , zip ( count () , inputs ) )
result = None
for h in progress_bar ( jobs , max_value = len ( inputs ) ) :
if not result : result = h
else : result.Add ( h )
pool.close ()
pool.join ()
logger.info ( "Histogram is %s" % result.dump ( 80 , 20 ) )
logger.info ( "Entries %s/%s" % ( result.GetEntries() , sum ( inputs ) ) )
with wait ( 5 ) , use_canvas ( 'test_multiprocessing_function' ) :
result.draw ( )
return result
|
a59635b844b4ff80a090a1ec8e3661e340903269
| 3,641,399
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.