content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import pprint
def load_transform_data(human_dataset, bot_dataset, drop_features, bins, logger, **kwargs):
"""
Load and preprocess data, returning the examples and labels as numpy.
"""
# Load data for humans.
df1 = pd.read_csv(human_dataset)
df1 = df1.drop("screen_name", axis=1) # remove screen_name column
df1 = df1.assign(is_bot=0)
# Load data for bots.
df2 = pd.read_csv(bot_dataset)
df2 = df2.drop("screen_name", axis=1) # remove screen_name column
df2 = df2.assign(is_bot=1)
# Concatenate dataframes.
df = df1.append(df2, ignore_index=True)
# Drop unwanted features.
df = df.drop(drop_features, axis=1)
for column in df:
# Source identity and is_bot are not quantizable.
if column == "source_identity" or column == "is_bot":
continue
# Drop feature if there is only 1 distinct value.
if np.unique(df[column]).size == 1:
logger.warn("Dropping feature because only one unique value: %s" % column)
df = df.drop(column, axis=1)
continue
df[column] = pd.qcut(df[column], bins, duplicates="drop")
logger.info("Features:\n %s" % pprint.pformat(list(df.columns)))
# Encode 'source_identity' field by setting '1's if source is present.
transformed = _transform_source_identity(df.loc[:, "source_identity"])
df = df.drop("source_identity", axis=1)
df["source_identity_other_present"] = transformed[:, 0]
df["source_identity_other_absent"] = transformed[:, 1]
df["source_identity_browser_present"] = transformed[:, 2]
df["source_identity_browser_absent"] = transformed[:, 3]
df["source_identity_mobile_present"] = transformed[:, 4]
df["source_identity_mobile_absent"] = transformed[:, 5]
df["source_identity_osn_present"] = transformed[:, 6]
df["source_identity_osn_absent"] = transformed[:, 7]
df["source_identity_automation_present"] = transformed[:, 8]
df["source_identity_automation_absent"] = transformed[:, 9]
df["source_identity_marketing_present"] = transformed[:, 10]
df["source_identity_marketing_absent"] = transformed[:, 11]
df["source_identity_news_present"] = transformed[:, 12]
df["source_identity_news_absent"] = transformed[:, 13]
# Perform one-hot encoding
df = pd.get_dummies(df)
# Separate features from targets
df_X = df.drop("is_bot", axis=1)
df_y = df["is_bot"]
# Convert to numpy.
X = df_X.values.astype("float")
y = df_y.values.astype("float")
return X, y, df_X.columns | 666b9161f1309f3e9765a123773318f55b9f6662 | 33,000 |
def latest():
"""
Latest route returns latest performed searches.
"""
return jsonify(get_latest_searches()) | a9cc69921566ecb9f97eab008ffc8f7fea167273 | 33,001 |
from typing import List
import zipfile
import os
def extract_zip(inzip: str, path: str) -> List[str]:
"""
Extract content of a zipfile inside a given directory.
Parameters:
===========
inzip: str
Input zip file.
path: str
Output path.
Returns:
========
namelist: List
List of files extracted from the zip.
"""
with zipfile.ZipFile(inzip) as zid:
zid.extractall(path=path)
namelist = [os.path.join(path, f) for f in zid.namelist()]
return namelist | 4c041f9e0ef2eb742608305ae9f1ecbc3238ee5c | 33,002 |
def actions(board):
"""
Returns set of all possible actions (i, j) available on the board.
"""
PossActions = []
# Find empty positions
for i in range(3):
for j in range(3):
if(board[i][j] == EMPTY):
PossActions.append((i,j))
return PossActions | 51f8b37c7b50b655c33a9ea73ded5e175c21670c | 33,003 |
import re
import json
def getCity(html):
"""This function uses the ``html`` passed to it as a string to extract, parse and return a City object
Parameters
----------
html : str
the html returned when a get request to view the city is made. This request can be made with the following statement: ``s.get(urlCiudad + id)``, where urlCiudad is a string defined in ``config.py`` and id is the id of the city.
Returns
-------
city : dict
this function returns a json parsed City object. For more information about this object refer to the github wiki page of Ikabot.
"""
city = re.search(r'"updateBackgroundData",\s?([\s\S]*?)\],\["updateTemplateData"', html).group(1)
city = json.loads(city, strict=False)
city['Id'] = city.pop('ownerId')
city['Name'] = city.pop('ownerName')
city['x'] = city.pop('islandXCoord')
city['y'] = city.pop('islandYCoord')
city['cityName'] = city['name']
i = 0
for position in city['position']:
position['position'] = i
i += 1
if 'level' in position:
position['level'] = int(position['level'])
position['isBusy'] = False
if 'constructionSite' in position['building']:
position['isBusy'] = True
position['building'] = position['building'][:-17]
elif 'buildingGround ' in position['building']:
position['name'] = 'empty'
position['type'] = position['building'].split(' ')[-1]
position['building'] = 'empty'
city['id'] = str(city['id'])
city['propia'] = True
city['recursos'] = getAvailableResources(html, num=True)
city['storageCapacity'] = getWarehouseCapacity(html)
city['ciudadanosDisp'] = getFreeCitizens(html)
city['consumo'] = getWineConsumption(html)
city['enventa'] = onSale(html)
city['freeSpaceForResources'] = []
for i in range(5):
city['freeSpaceForResources'].append( city['storageCapacity'] - city['recursos'][i] - city['enventa'][i] )
return city | 77af6a1c49f254f08ab226138b9a5ddc4abbc9b3 | 33,004 |
def server_hostname(config):
"""
Reads the ambari server name from the config or using the supplied script
"""
global cached_server_hostname
if cached_server_hostname is not None:
return cached_server_hostname
if config.has_option('server', 'hostname_script'):
scriptname = config.get('server', 'hostname_script')
try:
osStat = subprocess.Popen([scriptname], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = osStat.communicate()
if (0 == osStat.returncode and 0 != len(out.strip())):
cached_server_hostname = out.strip()
logger.info("Read server hostname '" + cached_server_hostname + "' using server:hostname_script")
except Exception, err:
logger.info("Unable to execute hostname_script for server hostname. " + str(err))
if cached_server_hostname is None:
cached_server_hostname = config.get('server', 'hostname')
return cached_server_hostname | bb6f0311566d47b32be855bcd33964b28425143e | 33,005 |
from typing import Callable
import json
from pathlib import Path
import types
def cache_instance(get_instance_fn: Callable[..., data.TrainingInstance] = None, *, cache_dir, **instance_config):
"""Decorator to automatically cache training instances."""
if get_instance_fn is None:
return partial(cache_instance, cache_dir=cache_dir)
cache_data_id = create_hash_id(json.dumps(instance_config).encode())
cache_dir = Path(cache_dir) / cache_data_id
if not Path(cache_dir).exists():
cache_dir.mkdir(parents=True, exist_ok=True)
config_path = cache_dir / "config.json"
if not config_path.exists():
with config_path.open("w") as f:
json.dump(instance_config, f, indent=4, default=str)
@lru_cache
def get_instance(instance_id, location_query, target_date):
cache_path = cache_dir / f"{instance_id}.json"
if cache_path.exists():
with cache_path.open("r") as f:
logger.info(f"getting training instance {instance_id} from cache dir {cache_dir}")
return data.TrainingInstance(**json.load(f))
instance = get_instance_fn(location_query, target_date, instance_id=instance_id, **instance_config)
if not pd.isna(instance.target) and instance.target_is_complete:
with cache_path.open("w") as f:
json.dump(asdict(instance), f, default=str)
return instance
@wraps(get_instance_fn)
def wrapped_instance_fn(
location_query: str,
target_date: types.DateType,
):
return get_instance(
create_id(
target_date,
location_query.encode(),
target_date.isoformat().encode(),
json.dumps(instance_config).encode(),
),
location_query,
target_date,
)
return wrapped_instance_fn | 50bab385439550eca541f4d18915c031c41a8107 | 33,006 |
def get_definitions_query_filter(request_args):
""" Get query_filter for alert_alarm_definition list route.
"""
query_filters = None
display_retired = False
valid_args = ['array_name', 'platform_name', 'instrument_name', 'reference_designator']
# Process request arguments
if 'retired' in request_args:
if (request_args.get('retired')).lower() == 'true':
display_retired = True
key = None
key_value = None
for key in valid_args:
if key in request_args:
tmp = request_args.get(key)
if tmp:
key_value = str(tmp)
break
# If query_filter to be created, create it
if key_value is not None or display_retired:
query_filters = {}
if key_value is not None:
query_filters[key] = key_value
if display_retired:
query_filters['retired'] = True
return query_filters | a087cbd9ca6ffe9b38afc2d8802c12e4dfd47e50 | 33,007 |
import io
def _read_dictionary_page(file_obj, schema_helper, page_header, column_metadata):
"""Read a page containing dictionary data.
Consumes data using the plain encoding and returns an array of values.
"""
raw_bytes = _read_page(file_obj, page_header, column_metadata)
io_obj = io.BytesIO(raw_bytes)
values = encoding.read_plain(
io_obj,
column_metadata.type,
page_header.dictionary_page_header.num_values
)
# convert the values once, if the dictionary is associated with a converted_type.
schema_element = schema_helper.schema_element(column_metadata.path_in_schema[-1])
return convert_column(values, schema_element) if schema_element.converted_type is not None else values | f4c0bf36b23238f79bfcc11821e47f88186524e0 | 33,008 |
def num_songs(t):
"""Return the number of songs in the pyTunes tree, t.
>>> pytunes = make_pytunes('i_love_music')
>>> num_songs(pytunes)
3
"""
"*** YOUR CODE HERE ***"
if is_leaf(t):
return 1
else:
sum_songs = 0
for subt in branches(t):
sum_songs += num_songs(subt)
return sum_songs
# better : sum([num_songs(b) for b in branches(t)]) | ffba78cccbd98963daa6c1ba29650c624fdba29f | 33,009 |
def _get_parameter_value(potential: Potential, handler: str, parameter: str) -> float:
"""Returns the value of a parameter in its default units"""
return (
potential.parameters[parameter].to(_DEFAULT_UNITS[handler][parameter]).magnitude
) | 2fef58b3018737975e96deb4d58d54f55407c624 | 33,010 |
def triplets_in_range(mini, maxi):
"""
Finds all the triplets in a given range that meet the condition a ** 2 + b ** 2 = c ** 2
>>> triplets_in_range(2, 10)
{(3, 4, 5), (6, 8, 10)}
:param mini: The minimum in the range
:param maxi: Maximum in the rnage
:return: a set of tuples (with length 3) of numbers that meet the given condition
:rtype: set
"""
res = set()
for a in range(mini, maxi + 1):
for b in range(a + 1, maxi + 1):
c = int(sqrt(a * a + b * b) + 0.5)
if c * c == a * a + b * b and mini <= c <= maxi:
res.update([(a, b, c,)])
return res | 1dbe7c64d483d87b2eab1f652a77e346f0ffefec | 33,011 |
import re
def targetInCol(df, target):
"""
Return meta information (Line or Area) from information in a column of DF.
Arguments:
doc -- csv Promax geometry file
target -- meta information to get (Line or Area)
"""
c = list(df.columns)
ptarget = r''+re.escape(target)
i = [i for i, x in enumerate(c) if re.search(ptarget, x, re.I)]
return df.iloc[0][i[0]] if i else None | 5d40cf251bd2a7593a46a5b63b5de3a56f8cec29 | 33,012 |
def default_monitor(verbose=1):
"""Returns very simple monitor object to summarize training progress.
Args:
verbose: Level of verbosity of output.
Returns:
Default monitor object.
"""
return BaseMonitor(verbose=verbose) | fbc5494d2545439daaeb12a4d3215295226b064e | 33,013 |
def pca(X, k = 30, optim = "fastest"):
"""Use PCA to project X to k dimensions."""
# Center/scale the data.
s = np.std(X, axis=0)
s = np.where(s==0, 1, s)
X = (X - np.mean(X, axis=0))/s
if optim == "none":
# Compute covariance eigenvectors with numpy.
#
# TO BE DONE
#
print("Custom PCA is broken; defaulting to sklearn.")
#_, U = np.linalg.eigh(X.T@X)
#return X@U[:,:k]
pca_ = PCA(n_components=k)
return pca_.fit_transform(X)
else:
# Run PCA with sklearn.
pca_ = PCA(n_components=k)
return pca_.fit_transform(X) | 2e5e9b82ec770aa1cda80519f7d392d68c6949a6 | 33,014 |
def get_range(a_list):
"""
=================================================================================================
get_range(a_list)
This is meant to find the maximal span of a list of values.
=================================================================================================
Arguments:
a_list -> A list of floats/ints. [1,2,-3]
=================================================================================================
Returns: a tuple of the values that are either at the end/beginning of the list. (-3,3)
=================================================================================================
"""
# Make sure the input list is correctly formatted
assert type(a_list) == list, "The input a_list should be of type list..."
# First, unpack the list of lists. This makes one list with all values from
# the lists within the input list.
#print(a_list)
unpacked = gh.unpack_list(a_list)
#print(unpacked)
# Next, float the items in the unpacked list. This will fail if any
# strings that are not floatable are in the list.
unpacked = [float(item) for item in unpacked if float(item) == float(item)]
# Then we can get the max and min of the list.
maxi = max(unpacked)
mini = min(unpacked)
# If the max value is greater than or equal to the minimum value
if abs(maxi) >= abs(mini):
# Then the bound is the int of max plus 1
bound = int(abs(maxi)) + 1
# We can then return the bounds, plus and minus bound
return (-bound, bound)
# If the min value is greater than the max value
elif abs(maxi) < abs(mini):
# Then the bound is the int of the absolute value
# of mini plus 1
bound = int(abs(mini)) + 1
# We can then return the bounds, plus and minus bound
return (-bound, bound)
# If something goes wrong,
else:
# Then raise an error
raise ValueError("This is an unexpected outcome...") | 36e0cc78d2f45b25af56c1af51292f00c2f2623b | 33,015 |
def create_config(solution, nodes, description_info):
"""Creates compact string representing input data file
Parameters:
solution (list) List of solutions
nodes (list) List of node specification
description_info (tuple) CSP description in form of tuple: (algorithm name, domains, constraints)
Returns:
ret_string (string) String representing input data file
"""
ret_string = write_description(description_info)
ret_string += "path:=R0;name:=none;order:=1\n"
for i, node in enumerate(nodes, start=2):
partial_string = "path:=R0"
domains = node[0]
node_name = None
for domain in domains:
variable = domain[0]
value = domain[1]
partial_string += variable + str(value)
node_name = value
partial_string += ";name:=" + str(node_name)
partial_string += ";order:=" +str(i)
if node[1] == "d":
partial_string += ";shape:=square;color:=red"
if any(node[0] == s for s in solution):
partial_string += ";color:=blank"
if node[2]:
partial_string += ";bottom_label:="
for l in node[2]:
if l != node[2][0]:
partial_string += ","
partial_string += "(c" + str(l+1) + ")"
if node[3]:
partial_string += ";side_label:="
for l in node[3]:
if l!=node[3][0]:
partial_string += "&&"
for c in l[2]:
if c != l[2][0]:
partial_string += ","
partial_string += "c" + str(c+1)
if l[1]:
partial_string += "->" + l[0] + ":" + str(set(l[1]))
else:
partial_string += "->" + l[0] + ":{}"
ret_string += partial_string
if node != nodes[len(nodes)-1]:
ret_string += "\n"
return ret_string | 3ccf76ca36b92ceb698aafea43414fe014258b0e | 33,016 |
def get_effective_option(metadata, settings, key):
"""
Return option with highest priority:
not-defined key < default < pelican config settings < file metadata
"""
return metadata.get(key, settings[DP_KEY].get(key)) | 4b617bd9c7fb0f0533014fae0533c0500f64c9bb | 33,017 |
def positions_sync_out_doc_view(request):
"""
Show documentation about positionsSyncOut
"""
url_root = WE_VOTE_SERVER_ROOT_URL
template_values = positions_sync_out_doc.positions_sync_out_doc_template_values(url_root)
template_values['voter_api_device_id'] = get_voter_api_device_id(request)
return render(request, 'apis_v1/api_doc_page.html', template_values) | f775b6eddf1419a781e7a43d047f288c56566b3b | 33,018 |
import gettext
def __build_caj_q_html_view__(data: object) -> any:
"""
popup's table for Caju Quality Information
"""
satellite_est = gettext("Satellite Estimation")
tns_survey = gettext("TNS Survey")
nut_count_average = gettext("Nut Count Average")
defective_rate_average = gettext("Defective Rate Average")
kor_average = gettext("KOR Average")
return f'''
<h4>Caju Quality Informations</h4>
<table>
<tr>
<th></th>
<th>{tns_survey}</th>
</tr>
<tr>
<td>{nut_count_average}</td>
<td>{__get_average_nut_count__(data.qars)}</td>
</tr>
<tr>
<td>{defective_rate_average}</td>
<td>{__get_average_defective_rate__(data.qars)}</td>
</tr>
<tr>
<td>{kor_average}</td>
<td>{__get_average_kor__(data.qars)}</td>
</tr>
</table>
''' | a4442f4ba486991ea3b1c75168f8ba921d9459c7 | 33,019 |
def code(email):
"""
Returns the one-time password associated with the given user for the
current time window. Returns empty string if user is not found.
"""
print("route=/code/<email> : email:", email)
u = User.get_user(email)
if u is None:
print("user not found, returning ''")
return ''
t = pyotp.TOTP(u.key)
result = str(t.now())
print("result:", result)
return result | 4479f6af448f6c91ab6d1c563d6baa94542826a3 | 33,020 |
def blend_image_with_masks(image, masks, colors, alpha=0.5):
"""Add transparent colored mask to an image.
Args:
image: `np.ndarray`, the image of shape (width, height, channel) or (width, height).
masks: `np.ndarray`, the mask of shape (n, width, height).
colors: list, a list of RGB colors (from 0 to 1).
alpha: float, transparency to apply to masks.
Returns:
`np.ndarray`
"""
if image.dtype != "uint8":
raise Exception("The image needs to be of type uint8. "
f"Current type is: {image.dtype}.")
image = image.copy()
image = image / 255
colored = np.zeros(image.shape, dtype="float32")
if image.ndim == 2:
image = np.stack([image, image, image], axis=-1)
for color, mask in zip(colors, masks):
rgb_mask = np.stack([mask, mask, mask], axis=-1)
rgb_mask = rgb_mask.astype('float32') * alpha
colored = np.ones(image.shape, dtype='float32') * color[:3]
image = colored * rgb_mask + image * (1 - rgb_mask)
image = image * 255
return image.astype("uint8") | 9a733d9a6721c2139a64e2e718c4bb5648dbb759 | 33,021 |
import functools
def get_trainee_and_group(func):
"""Decorator to insert trainee and group as arguments to the given function.
Creates new Trainee if did not exist in DB.
Creates new Group if did not exist in DB.
Adds the trainee to the group if it was not part of it.
Appends the trainee and group as last argument of the function.
Example:
@get_trainee_and_group
def run(bot, update, trainee, group):
....
Notes:
func has to be used in dispatcher as handler in order to receive the bot and the update arguments.
"""
@get_group
@functools.wraps(func)
def wrapper(*args, **kwargs):
bot, update = get_bot_and_update_from_args(args)
trainee_id = update.effective_user.id
trainee = Trainee.objects.get(id=trainee_id)
if trainee is None: # new trainee.
trainee = Trainee.objects.create(id=trainee_id,
first_name=update.effective_user.first_name)
group = args[-1]
if trainee not in group.trainees:
group.add_trainee(new_trainee=trainee)
args_with_trainee_and_group = args[:-1] + (trainee, group)
return func(*args_with_trainee_and_group, **kwargs)
return wrapper | 76fb80e90b36c0264e50510c7226587a131095f5 | 33,022 |
import subprocess
def get_bisect_info(good_commits, bad_commit):
"""Returns a dict with info about the current bisect run.
Internally runs `git rev-list --bisect-vars`. Information includes:
- bisect_rev: midpoint revision
- bisect_nr: expected number to be tested after bisect_rev
- bisect_good: bisect_nr if good
- bisect_bad: bisect_nr if bad
- bisect_all: commits we are bisecting right now
- biset_step: estimated steps after bisect_rev
"""
args = [bad_commit] + [f"^{commit}" for commit in good_commits]
lines = (
subprocess.check_output(["git", "rev-list", "--bisect-vars"] + args)
.decode()
.splitlines()
)
key_values = [line.split("=") for line in lines]
info = dict(key_values)
# this is a quoted string; strip the quotes
info["bisect_rev"] = info["bisect_rev"][1:-1]
for key in ("bisect_nr", "bisect_good", "bisect_bad", "bisect_all", "bisect_steps"):
info[key] = int(info[key])
return info | 7e8da4c432d73ee84b79345f2d1f195f4960ba64 | 33,023 |
def generate_chromatogram(
ms_data: dict,
chromatogram: str,
ms_level: int = 1
) -> list:
"""
Generates a either a Base Peak Chromatogram (BPC) or Total Ion Chromatogram
(TIC) from ripper data.
Args:
ms_data (dict): mzml ripper data in standard ripper format.
chromatogram (str): specifies type of chromatogram. Must be either "tic"
or "bpc" for Total Ion Chromatogram or Base Peak Chromatogram,
respectively.
ms_level (int, optional): specifies ms level for BPC. Defaults to 1.
Raises:
Exception: raised if chromatogram.lower() is not in ["tic", "bpc"]
Returns:
BPC (List[Tuple[float, float, float]]): list of base peaks in format:
[(retention time, m/z, intensity), ...]
TIC (List[Tuple[float, float]]): list of total ion currents in format:
[(retention_time, total_intensity), ...]
"""
if f"ms{ms_level}" not in ms_data:
return []
ms_data = ms_data[f"ms{ms_level}"]
if chromatogram.lower() == "bpc":
return [
find_max_peak(spectrum=spectrum) for spectrum in ms_data.values()
]
elif chromatogram.lower() == "tic":
return [
sum_intensity_peaks(spectrum) for spectrum in ms_data.values()
]
else:
raise Exception(
f"{chromatogram} not valid chromatogram type. Please choose "
"'tic' or 'bpc' for Total Ion Chromatogram or Base Peak "
"Chromatogram, repspectively"
) | 901ab7c350ccb00ee277ec96c7496675274ac0f1 | 33,024 |
def get_tensor_batch_size(values):
"""Extracts batch size from tensor"""
return tf.gather(params=tf.shape(input=values), indices=tf.constant([0])) | c1a7d0cb789526310c332d1e2a24697d1357ceb5 | 33,025 |
def generate_particle_timestamp(time_2000):
"""
This function calculates and returns a timestamp in epoch 1900
based on an ASCII hex time in epoch 2000.
Parameter:
time_2000 - number of seconds since Jan 1, 2000
Returns:
number of seconds since Jan 1, 1900
"""
return int(time_2000, 16) + zulu_timestamp_to_ntp_time("2000-01-01T00:00:00.00Z") | 9c05fc809953e371b756a389d98f3a74c1ea5975 | 33,026 |
def clip_histogram(hist, clip_limit):
"""Perform clipping of the histogram and redistribution of bins.
The histogram is clipped and the number of excess pixels is counted.
Afterwards the excess pixels are equally redistributed across the
whole histogram (providing the bin count is smaller than the cliplimit).
Parameters
----------
hist : ndarray
Histogram array.
clip_limit : int
Maximum allowed bin count.
Returns
-------
hist : ndarray
Clipped histogram.
"""
# calculate total number of excess pixels
excess_mask = hist > clip_limit
excess = hist[excess_mask]
n_excess = excess.sum() - excess.size * clip_limit
# Second part: clip histogram and redistribute excess pixels in each bin
bin_incr = int(n_excess / hist.size) # average binincrement
upper = clip_limit - bin_incr # Bins larger than upper set to cliplimit
hist[excess_mask] = clip_limit
low_mask = hist < upper
n_excess -= hist[low_mask].size * bin_incr
hist[low_mask] += bin_incr
mid_mask = (hist >= upper) & (hist < clip_limit)
mid = hist[mid_mask]
n_excess -= mid.size * clip_limit - mid.sum()
hist[mid_mask] = clip_limit
prev_n_excess = n_excess
while n_excess > 0: # Redistribute remaining excess
index = 0
while n_excess > 0 and index < hist.size:
under_mask = hist < 0
step_size = int(hist[hist < clip_limit].size / n_excess)
step_size = max(step_size, 1)
indices = np.arange(index, hist.size, step_size)
under_mask[indices] = True
under_mask = under_mask & (hist < clip_limit)
hist[under_mask] += 1
n_excess -= under_mask.sum()
index += 1
# bail if we have not distributed any excess
if prev_n_excess == n_excess:
break
prev_n_excess = n_excess
return hist | 0947568a36024dfdfd9fc37385676e924aedb603 | 33,027 |
from typing import Iterable
from typing import Callable
from typing import Tuple
def aggregate_precision_recall(
labels_pred_iterable: Iterable,
precision_recall_fn: Callable = buffered_precision_recall,
) -> Tuple[float, float]:
"""
Computes aggregate range-based precision recall metrics for the given prediction labels.
Parameters
----------
labels_pred_iterable
An iterable that gives 2-tuples of boolean lists corresponding to `true_labels` and
`pred_labels` respectively.
precision_recall_fn
Function to call in order to get the precision, recall metrics.
Returns
-------
A tuple containing average precision and recall in that order.
"""
total_prec, total_reca, total_prec_w, total_reca_w = 0.0, 0.0, 0.0, 0.0
for true_labels, pred_labels in labels_pred_iterable:
true_ranges = labels_to_ranges(true_labels)
pred_ranges = labels_to_ranges(pred_labels)
_prec, _reca = precision_recall_fn(true_ranges, pred_ranges)
_prec_w, _reca_w = len(pred_ranges), len(true_ranges)
total_prec += _prec * _prec_w
total_prec_w += _prec_w
total_reca += _reca * _reca_w
total_reca_w += _reca_w
return (
total_prec / total_prec_w if total_prec_w > 0 else 0,
total_reca / total_reca_w if total_reca_w > 0 else 0,
) | d777832230ae84ff86c0ad60dced8a1c007ed90f | 33,028 |
def find_tag_for(t):
"""If transaction matches a rule, returns corresponding tuple
(tag, ruler, match).
"""
res = []
for (tag, rulers) in list(TAGS.items()):
for ruler in rulers:
m, matches = match(ruler, t)
if m:
res.append((tag, ruler, matches))
if res:
# Return rule with the most fields.
# If several, pick the ont with the longer rules.
return max(
res,
key=lambda tag_ruler_matches: (
len(list(rulify(tag_ruler_matches[1]).keys())),
sum([len(v) for v in list(tag_ruler_matches[2].values()) if v]),
),
)
return None, None, None | 1b0afd086f428606dfc993d61a0753da98ea176d | 33,029 |
from typing import Union
from typing import Dict
from typing import Any
import typing
def DOMWidget(
layout: Union[Dict[str, Any], Element[ipywidgets.widgets.widget_layout.Layout]] = {},
on_layout: typing.Callable[[Union[Dict[str, Any], Element[ipywidgets.widgets.widget_layout.Layout]]], Any] = None,
) -> Element[ipywidgets.widgets.domwidget.DOMWidget]:
"""Widget that can be inserted into the DOM"""
kwargs: Dict[Any, Any] = without_default(DOMWidget, locals())
if isinstance(kwargs.get("layout"), dict):
kwargs["layout"] = Layout(**kwargs["layout"])
widget_cls = ipywidgets.widgets.domwidget.DOMWidget
comp = react.core.ComponentWidget(widget=widget_cls)
return Element(comp, **kwargs) | cee1f61b5eb57582fae65e28ca8823d13bcdff51 | 33,030 |
def load(path, element_spec=None, compression=None, reader_func=None):
"""Loads a previously saved dataset.
Example usage:
>>> import tempfile
>>> path = os.path.join(tempfile.gettempdir(), "saved_data")
>>> # Save a dataset
>>> dataset = tf.data.Dataset.range(2)
>>> tf.data.experimental.save(dataset, path)
>>> new_dataset = tf.data.experimental.load(path)
>>> for elem in new_dataset:
... print(elem)
tf.Tensor(0, shape=(), dtype=int64)
tf.Tensor(1, shape=(), dtype=int64)
Note that to load a previously saved dataset, you need to specify
`element_spec` -- a type signature of the elements of the saved dataset, which
can be obtained via `tf.data.Dataset.element_spec`. This requirement exists so
that shape inference of the loaded dataset does not need to perform I/O.
If the default option of sharding the saved dataset was used, the element
order of the saved dataset will be preserved when loading it.
The `reader_func` argument can be used to specify a custom order in which
elements should be loaded from the individual shards. The `reader_func` is
expected to take a single argument -- a dataset of datasets, each containing
elements of one of the shards -- and return a dataset of elements. For
example, the order of shards can be shuffled when loading them as follows:
```python
def custom_reader_func(datasets):
datasets = datasets.shuffle(NUM_SHARDS)
return datasets.interleave(lambda x: x, num_parallel_calls=AUTOTUNE)
dataset = tf.data.experimental.load(
path="/path/to/data", ..., reader_func=custom_reader_func)
```
Args:
path: Required. A path pointing to a previously saved dataset.
element_spec: Optional. A nested structure of `tf.TypeSpec` objects matching
the structure of an element of the saved dataset and specifying the type
of individual element components. If not provided, the nested structure of
`tf.TypeSpec` saved with the saved dataset is used.
compression: Optional. The algorithm to use to decompress the data when
reading it. Supported options are `GZIP` and `NONE`. Defaults to `NONE`.
reader_func: Optional. A function to control how to read data from shards.
If present, the function will be traced and executed as graph computation.
Returns:
A `tf.data.Dataset` instance.
Raises:
FileNotFoundError: If `element_spec` is not specified and the saved nested
structure of `tf.TypeSpec` can not be located with the saved dataset.
"""
return _LoadDataset(
path=path,
element_spec=element_spec,
compression=compression,
reader_func=reader_func) | d3ec8a97cab7897658758f42486e6f4f3b605e6d | 33,031 |
def test_confusion_PRFAS():
"""
Line=True class, column=Prediction
TR_B [[1585 109 4]
TR_I [ 126 1233 17]
TR_O [ 20 12 82]]
(unweighted) Accuracy score = 90.97 % trace=2900 sum=3188
precision recall f1-score support
TR_B 0.916 0.933 0.924 1698
TR_I 0.911 0.896 0.903 1376
TR_O 0.796 0.719 0.756 114
avg / total 0.909 0.910 0.909 3188
"""
cm = np.array([ [1585, 109, 4]
, [ 126, 1233, 17]
, [ 20, 12, 82]])
p, r, f1, a, s = confusion_PRFAS(cm)
def ok(a,b): return abs(a-b) < 0.001
assert ok(p,0.909), ("p",p)
assert ok(r,0.910), ("r",r)
assert ok(f1,0.909), ("f",f1)
assert ok(a,0.9097), ("a",a)
assert ok(s,3188), ("s",s) | 257736819e3dd6a1c4f2644a15bc74cde2f4c49b | 33,032 |
def wrap(x, m, M):
"""
:param x: a scalar
:param m: minimum possible value in range
:param M: maximum possible value in range
Wraps ``x`` so m <= x <= M; but unlike ``bound()`` which
truncates, ``wrap()`` wraps x around the coordinate system defined by m,M.\n
For example, m = -180, M = 180 (degrees), x = 360 --> returns 0.
"""
diff = M - m
while x > M:
x = x - diff
while x < m:
x = x + diff
return x | 274017550a39a79daacdcc96c76c09116093f47a | 33,033 |
from typing import Iterable
from typing import List
from typing import Any
import click
from typing import cast
from typing import Callable
def execute_processors(processors: Iterable[ProcessorType], state: State) -> None:
"""Execute a sequence of processors to generate a Document structure. For block handling,
we use a recursive approach. Only top-level blocks are extracted and processed by block
processors, which, in turn, recursively call this function.
Args:
processors: iterable of processors
state: state structure
Returns:
generated geometries
"""
outer_processors: List[Any] = [] # gather commands outside of top-level blocks
top_level_processors: List[Any] = [] # gather commands inside of top-level blocks
block = None # save the current top-level block's block layer_processor
nested_count = 0 # block depth counter
expect_block = False # set to True by `begin` command
for proc in processors:
if getattr(proc, "__vpype_block_processor__", False):
if not expect_block:
# `begin` was omitted
nested_count += 1
else:
expect_block = False
# if we in a top level block, we save the block layer_processor
# (nested block are ignored for the time being)
if nested_count == 1:
block = proc
else:
top_level_processors.append(proc)
elif expect_block:
raise click.BadParameter("A block command must always follow 'begin'")
elif isinstance(proc, BeginBlock):
# entering a block
nested_count += 1
expect_block = True
elif isinstance(proc, EndBlock):
if nested_count < 1:
raise click.BadParameter(
"A 'end' command has no corresponding 'begin' command"
)
nested_count -= 1
if nested_count == 0:
# We're closing a top level block. The top-level sequence [BeginBlock,
# block_processor, *top_level_processors, EndBlock] is now replaced by a
# placeholder closure that will execute the corresponding block processor on
# the top_level_processors sequence.
#
# Note: we use the default argument trick to copy the *current* value of
# block and top_level_processor "inside" the placeholder function.
# noinspection PyShadowingNames
def block_processor_placeholder(
state: State, block=block, processors=tuple(top_level_processors)
) -> State:
return cast(Callable, block)(state, processors)
outer_processors.append(block_processor_placeholder)
# reset the top level layer_processor list
top_level_processors = list()
else:
top_level_processors.append(proc)
else:
# this is a 'normal' layer_processor, we can just add it to the top of the stack
if nested_count == 0:
outer_processors.append(proc)
else:
top_level_processors.append(proc)
# at this stage, the stack must have a single frame, otherwise we're missing end commands
if nested_count > 0:
raise click.ClickException("An 'end' command is missing")
# the (only) frame's processors should now be flat and can be chain-called
for proc in outer_processors:
cast(Callable, proc)(state) | 4af44e41c02184286c4c038143e1461d8fbe044d | 33,034 |
def date(repo, subset, x):
"""Changesets within the interval, see :hg:`help dates`.
"""
# i18n: "date" is a keyword
ds = getstring(x, _("date requires a string"))
dm = util.matchdate(ds)
return subset.filter(lambda x: dm(repo[x].date()[0]),
condrepr=('<date %r>', ds)) | 91d6cea81861791daed3220bc03e3002a47a959d | 33,035 |
def addAuthor(author):
"""
Creates an Author dictionary
:param author: Author instance
:return: Dict
"""
author_dict = dict()
# author_dict['id'] = "{}/api/{}".format(DOMAIN, author.id)
author_dict['id'] = "{}/api/author/{}".format(DOMAIN, author.id)
author_dict['host'] = "{}/api/".format(author.host_url)
author_dict['displayName'] = author.username
author_dict['github'] = author.github_url
author_dict['url'] = "{}/api/author/{}".format(DOMAIN, author.id)
# Optional Attributes
if author.github_url:
author_dict['github'] = author.github_url
if author.user.first_name:
author_dict['firstName'] = author.user.first_name
if author.user.last_name:
author_dict['lastName'] = author.user.last_name
if author.user.email:
author_dict['email'] = author.user.email
if author.bio:
author_dict['bio'] = author.bio
return author_dict | f6b35909e223987eb37178d1f6722eaffacc94cd | 33,036 |
def add_transformer_enc_hyperparams_args(parser):
"""Only applicable when args.model_name is 'transformer_enc'"""
parser.add_argument('--hid_dim', type=int, default=128)
parser.add_argument('--num_enc_layers', type=int, default=3)
parser.add_argument('--num_enc_heads', type=int, default=8)
parser.add_argument('--enc_pf_dim', type=int, default=256)
parser.add_argument('--enc_dropout', type=float, default=0.1)
parser.add_argument('--fc_dim', type=int, default=64, help='hidden size of the linear layer added on top')
return parser | bc38c3cc1d9fc7e87cebfbf7bdc74f8e9d0a124e | 33,037 |
def make_length(value):
""" Make a kicad length measurement from an openjson measurement """
return int(round(float(value) * MULT)) | 1fe311b94eaaf123f7a028d3a06232185903179d | 33,038 |
import os
def get_data_path():
"""
Return the path to the project's data folder
:return: The path to the data folder
"""
project_folder = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
return os.path.join(project_folder, "data") | 351d11a22b56567f59858e0e0f092661beedaed6 | 33,039 |
def with_metaclass(meta, *bases):
"""copied from https://github.com/Byron/bcore/blob/master/src/python/butility/future.py#L15"""
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, nbases, d):
if nbases is None:
return type.__new__(cls, name, (), d)
# There may be clients who rely on this attribute to be set to a reasonable value, which is why
# we set the __metaclass__ attribute explicitly
if not PY3 and '___metaclass__' not in d:
d['__metaclass__'] = meta
# end
return meta(name, bases, d)
# end
# end metaclass
return metaclass(meta.__name__ + 'Helper', None, {})
# end handle py2 | e0d9c4d580125cc60ab8319cc9a2ca918ef40291 | 33,040 |
import math
def calc_mupen_res(N,region_w,region_h):
"""find res to fit N mupen instances in region"""
results = []
for row_length in range(1,N+1):
col_length = math.ceil(N/float(row_length))
instance_width = int(math.floor( min(640, region_w/float(row_length) )))
instance_height = int(math.floor(instance_width*(480.0/640.0)))
if instance_height*col_length <= region_h and instance_width*row_length <= region_w:
results.append((instance_width, instance_height))
return max(results) | 35b5e739102097d856b7c2e154516d4e866a1567 | 33,041 |
from typing import List
def pos_tag_wordnet(text: List) -> List:
"""Create pos_tag with wordnet format
:rtype: object
:param (List) text: string to be nltk_pos_tagged for syntactic similar synonyms
:return (List[List[str, 'pos_tag']]) tagged_text: str values with according nltk_pos_tag
"""
pos_tagged_text = nltk.pos_tag(text)
# map the pos tagging output with wordnet output
tagged_text = []
wordnet_map = {
"N": wordnet.NOUN,
"V": wordnet.VERB,
"J": wordnet.ADJ,
"R": wordnet.ADV
}
for (word, pos_tag) in pos_tagged_text:
tagged_text.append((word, wordnet_map.get(pos_tag[0])) if pos_tag[0] in wordnet_map.keys() else (word, wordnet.NOUN))
return tagged_text | 7da0081c37064678ce70590cecc313ee6ec60673 | 33,042 |
def create_app():
"""
Create an app with config file
:return: Flask App
"""
# init a flask app
app = Flask(__name__)
# 从yaml文件中加载配置,此加载方式有效加载
# 初始化APP
_config_app(app)
# 允许跨域请求
if app.config.get('CORS_ENABLE'):
CORS(app)
# 配置蓝图
configure_blueprints(app)
# 配置中间件
configure_middleware(app)
return app | 74d7e7beab4e86faec1fbf8dc357791ac50874dd | 33,043 |
import json
def img_to_json(img, decimals=2, swap=False, save=None):
""" Convert an image volume to web-ready JSON format suitable for import into
the Neurosynth viewer.
Args:
img: An image filename.
round: Optional integer giving number of decimals to round values to.
swap: A temporary kludge to deal with some orientation problems. For some reason
the switch from PyNifti to NiBabel seems to produce images that load in a
different orientation given the same header. In practice this can be addressed
by flipping the x and z axes (swap = True), but need to look into this and
come up with a permanent solution.
Returns:
a JSON-formatted string.
"""
try:
data = nb.load(img).get_data()
except Exception as e:
raise Exception("Error loading %s: %s" % (img, str(e)))
dims = list(data.shape)
# Convenience method to package and output the converted data;
# also handles cases where image is blank.
def package_json(contents=None):
if contents is None:
contents = {
'thresh': 0.0,
'max': 0.0,
'min': 0.0,
'dims': dims,
'values': [],
'indices': []
}
# Write to file or return string
if save is None:
return json.dumps(contents)
else:
json.dump(contents, open(save, 'w'))
# Skip empty images
data = np.nan_to_num(data)
if np.sum(data) == 0:
return package_json()
# Round values to save space. Note that in practice the resulting JSON file will
# typically be larger than the original nifti unless the image is relatively
# dense (even when compressed). More reason to switch from JSON to nifti reading
# in the viewer!
data = np.round_(data, decimals)
# Temporary kludge to fix orientation issue
if swap:
data = np.swapaxes(data, 0, 2)
# Identify threshold--minimum nonzero value
thresh = np.min(np.abs(data[np.nonzero(data)]))
# compress into 2 lists, one with values, the other with list of indices
# for each value
uniq = list(np.unique(data))
# uniq = np.unique()
uniq.remove(0)
if len(uniq) == 0:
return package_json()
contents = {
'thresh': round(thresh, decimals),
'max': round(np.max(data), decimals),
'min': round(np.min(data), decimals),
'dims': dims,
'values': [float('%.2f' % u) for u in uniq]
}
ds_flat = data.ravel()
all_inds = []
for val in uniq:
if val == 0:
continue
ind = [int(x) for x in list(np.where(ds_flat == val)[0])] # UGH
all_inds.append(ind)
contents['indices'] = all_inds
return package_json(contents) | 18e1d92d73493e69efaf055616ccb2f5d55fc835 | 33,044 |
import ctypes
import typing
import array
def encode_float(
encoder_state: ctypes.Structure,
pcm_data: bytes,
frame_size: int,
max_data_bytes: int
) -> typing.Union[bytes, typing.Any]:
"""Encodes an Opus frame from floating point input"""
pcm_pointer = ctypes.cast(pcm_data, opuslib.api.c_float_pointer)
opus_data = (ctypes.c_char * max_data_bytes)()
result = libopus_encode_float(
encoder_state,
pcm_pointer,
frame_size,
opus_data,
max_data_bytes
)
if result < 0:
raise opuslib.OpusError(result)
return array.array('b', opus_data[:result]).tobytes() | fc349b4eae1c330444114b3df86f1603c931a30a | 33,045 |
from datetime import datetime
def _login(use_cookie):
"""User login helper function.
The request data should contain at least 'email' and 'password'.
The cookie expiration duration is defined in flask app config.
If user is not authenticated, it raises Unauthorized exception.
"""
data = _get_request_data()
if 'email' not in data or 'password' not in data:
raise exception_handler.BadRequest(
'missing email or password in data'
)
expire_timestamp = (
datetime.datetime.now() + app.config['REMEMBER_COOKIE_DURATION']
)
data['expire_timestamp'] = expire_timestamp
user = auth_handler.authenticate_user(**data)
if not login_user(user, remember=data.get('remember', False)):
raise exception_handler.UserDisabled('failed to login: %s' % user)
user_log_api.log_user_action(user.id, request.path)
response_data = user_api.record_user_token(
user.token, user.expire_timestamp, user=user
)
return utils.make_json_response(200, response_data) | 084238d593b95901fcb260088f730fd7e9ac3f64 | 33,046 |
def get_days_to_complete(course_id, date_for):
"""Return a dict with a list of days to complete and errors
NOTE: This is a work in progress, as it has issues to resolve:
* It returns the delta in days, so working in ints
* This means if a learner starts at midnight and finished just before
midnight, then 0 days will be given
NOTE: This has limited scaling. We ought to test it with
1k, 10k, 100k cert records
TODO: change to use start_date, end_date with defaults that
start_date is open and end_date is today
TODO: Consider collecting the total seconds rather than days
This will improve accuracy, but may actually not be that important
TODO: Analyze the error based on number of completions
When we have to support scale, we can look into optimization
techinques.
"""
certificates = GeneratedCertificate.objects.filter(
course_id=as_course_key(course_id),
created_date__lte=as_datetime(date_for))
days = []
errors = []
for cert in certificates:
ce = CourseEnrollment.objects.filter(
course_id=as_course_key(course_id),
user=cert.user)
# How do we want to handle multiples?
if ce.count() > 1:
errors.append(
dict(msg='Multiple CE records',
course_id=course_id,
user_id=cert.user.id,
))
days.append((cert.created_date - ce[0].created).days)
return dict(days=days, errors=errors) | a00d2e934e73710914d296c251541846d19f021c | 33,047 |
import requests
from bs4 import BeautifulSoup
import re
def get_course_data(level="graduate", department_code="CS"):
# Get appropriate regex for the provided data.
"""
Retrieves the information as tuple of the form (course numbers, titles, pre-requisites, description)
for a given program and department.
:type level: string (Must be one of "undergraduate", "graduate" or "Any")
:type department_code: string (Must be one of "CS", "DS", "IS", "GNSD", "HINF", "IA")
"""
course_code_regex = get_regexp(level, department_code)
# CCIS Courses Page URL.
url = "http://www.ccis.northeastern.edu/academics/courses/"
request_time_out = 5
# Add user-agent info to headers to identify this bot.
custom_header = requests.utils.default_headers()
custom_header.update({'User-Agent': "CourseBot/1.0"})
# Make GET request and obtain page data.
r = requests.get(url, headers=custom_header, timeout=request_time_out)
# Make sure its not an erroneous response.
r.raise_for_status()
data = r.text
# Soup-ify the data to make it easier to parse.
soup = BeautifulSoup(data, "html.parser")
# TODO: Build a graph of courses with course numbers as keys and rest of the data as values.
course_numbers = []
course_titles = []
course_descriptions = []
course_dependencies = []
# Find all links which match the pattern.
course_elements = soup.find_all('a', text=course_code_regex)
# The pre-requisite courses could be undergraduate too, for graduate courses.
all_courses_code_regex = get_regexp(None, "Any")
for element in course_elements:
course_numbers.append(element.text)
# Ignore the " -" in the beginning.
course_titles.append(element.next_sibling[3:])
# Get the course data from the course link.
course_link = element["href"]
course_request = requests.get(course_link)
course_data = course_request.text
course_soup = BeautifulSoup(course_data, "lxml")
# Course Description.
course_description = course_soup.find('td', 'ntdefault')
course_descriptions.append(course_description.contents[0].strip())
# The course page contains an italicised prerequisites element.
pre_req_elem = course_soup.find('i', text=re.compile(r'Prereq.*'))
dep = []
if pre_req_elem and pre_req_elem.text:
# Ignore the "Prereq." in the beginning.
s1 = pre_req_elem.text[7:]
"""
* The pattern appears to be (course1,course2, or course3) and (course4, or course5). So split on the "and".
* The groups remaining after the split are all of the OR type. This works in most cases.
"""
s2 = s1.split('and')
for sub in s2:
matches = re.findall(all_courses_code_regex, sub.strip())
if matches:
dep.append(matches)
course_dependencies.append(dep)
# Moment of silence.
sleep(1)
# TODO: Need to json-ify the data and return that.
return course_numbers, course_titles, course_dependencies, course_descriptions | 306b043f2edf7d9fa38f579c5b39daef1f759964 | 33,048 |
def get_highlightjs_setting(setting, default=None):
"""
Read a setting
"""
return HIGHLIGHTJS.get(setting, default) | 86b3b52fc7e95448a2ce6e860d4b261d78d68a38 | 33,049 |
import numba
import os
def make_walks(T,
walklen=10,
epochs=3,
return_weight=1.,
neighbor_weight=1.,
threads=0):
"""
Create random walks from the transition matrix of a graph
in CSR sparse format
NOTE: scales linearly with threads but hyperthreads don't seem to
accelerate this linearly
Parameters
----------
T : scipy.sparse.csr matrix
Graph transition matrix in CSR sparse format
walklen : int
length of the random walks
epochs : int
number of times to start a walk from each nodes
return_weight : float in (0, inf]
Weight on the probability of returning to node coming from
Having this higher tends the walks to be
more like a Breadth-First Search.
Having this very high (> 2) makes search very local.
Equal to the inverse of p in the Node2Vec paper.
neighbor_weight : float in (0, inf]
Weight on the probability of visitng a neighbor node
to the one we're coming from in the random walk
Having this higher tends the walks to be
more like a Depth-First Search.
Having this very high makes search more outward.
Having this very low makes search very local.
Equal to the inverse of q in the Node2Vec paper.
threads : int
number of threads to use. 0 is full use
Returns
-------
out : 2d np.array (n_walks, walklen)
A matrix where each row is a random walk,
and each entry is the ID of the node
"""
n_rows = T.shape[0]
sampling_nodes = np.arange(n_rows)
sampling_nodes = np.tile(sampling_nodes, epochs)
if type(threads) is not int:
raise ValueError("Threads argument must be an int!")
if threads == 0:
threads = numba.config.NUMBA_DEFAULT_NUM_THREADS
threads = str(threads)
try:
prev_numba_value = os.environ['NUMBA_NUM_THREADS']
except KeyError:
prev_numba_value = threads
# If we change the number of threads, recompile
if threads != prev_numba_value:
os.environ['NUMBA_NUM_THREADS'] = threads
_csr_node2vec_walks.recompile()
_csr_random_walk.recompile()
if return_weight <= 0 or neighbor_weight <= 0:
raise ValueError("Return and neighbor weights must be > 0")
if (return_weight > 1. or return_weight < 1.
or neighbor_weight < 1. or neighbor_weight > 1.):
walks = _csr_node2vec_walks(T.data, T.indptr, T.indices,
sampling_nodes=sampling_nodes,
walklen=walklen,
return_weight=return_weight,
neighbor_weight=neighbor_weight)
# much faster implementation for regular walks
else:
walks = _csr_random_walk(T.data, T.indptr, T.indices,
sampling_nodes, walklen)
# set back to default
os.environ['NUMBA_NUM_THREADS'] = prev_numba_value
return walks | ec68f7741a788e722b24fe2e3023210ea354a4db | 33,050 |
def package_dir_path(path):
"""Return package path to package install directory"""
return path + '/.pkg' | edd4b97256ccf02a3f1165b99cae746826e8aee0 | 33,051 |
def sort_cluster_data(cluster_data, cluster_accuracy):
"""
sort cluster data based on GDT_mean values of cluster_accuracy.
-> cluster 0 will have highest GDT_mean
-> cluster <max> will have lowest GDT_mean
.. Note :: if cluster_data has noise_label assigned, will move this label to the end of the sorted cluster data.
Args:
cluster_data (CLUSTER_DATA): output of apply_DBSCAN(), apply_KMeans(), or heat_KMeans()
cluster_accuracy (CLUSTER_DATA_ACCURACY): output of map_cluster_accuracy()
Returns:
sorted_cluster_data (CLUSTER_DATA)
sorted cluster_data
"""
if not isinstance(cluster_data, CLUSTER_DATA):
raise TypeError("cluster_data has wrong data type.")
if not isinstance(cluster_accuracy, CLUSTER_DATA_ACCURACY):
raise TypeError("cluster_accuracy has wrong data type.")
### rank and test if labels have same range
ranked_array, ranked_ndx = _misc.get_ranked_array(cluster_accuracy.GDT_mean, verbose=False)
if set(cluster_data.labels) != set(ranked_ndx):
raise ValueError("labels of cluster_data and cluster_accuracy do not match.")
if cluster_data.noise_label is not None:
# move noise label to the very end
noise_ndx = np.where(ranked_ndx == cluster_data.noise_label)[0]
other_ndx = np.where(ranked_ndx != cluster_data.noise_label)[0]
ranked_array = np.append(ranked_array[other_ndx], ranked_array[noise_ndx])
ranked_ndx = np.append(ranked_ndx[other_ndx], ranked_ndx[noise_ndx])
### REMOVE ###
# # algorithms with -1 label for noise (e.g. DBSCAN, OPTICS)
# else:
# ranked_array, ranked_ndx = _misc.get_ranked_array(cluster_accuracy.GDT_mean, verbose=False)
# if set(cluster_data.labels + 1) != set(ranked_ndx):
# raise ValueError("labels of cluster_data and cluster_accuracy do not match.")
# # move noise label (here: max(ranked_ndx)) to the very end
# noise_ndx = np.where(ranked_ndx == max(ranked_ndx))[0]
# other_ndx = np.where(ranked_ndx != max(ranked_ndx))[0]
# ranked_array = np.append(ranked_array[other_ndx], ranked_array[noise_ndx])
# ranked_ndx = np.append(ranked_ndx[other_ndx], ranked_ndx[noise_ndx])
### REMOVE ###
### remap data
sorted_labels = [ranked_ndx.tolist().index(i) for i in cluster_data.labels]
### REMOVE ###
# # algorithms with -1 label for noise (e.g. DBSCAN, OPTICS)
# else:
# _misc.cprint(f"Note: shifted labels from {min(cluster_data.labels)}..{max(cluster_data.labels)} to {min(ranked_ndx)}..{max(ranked_ndx)} with {max(ranked_ndx)} being the 'noise'.", "red")
# sorted_labels = [ranked_ndx.tolist().index(i) for i in cluster_data.labels + 1] # shift labels
### REMOVE ###
### create new object
sorted_wss_data = WSS_DATA_obj(wss=cluster_data.wss_data.wss, # float
sse=cluster_data.wss_data.sse[ranked_ndx], # sorted
se_mean=cluster_data.wss_data.se_mean[ranked_ndx], # sorted
se_std=cluster_data.wss_data.se_std[ranked_ndx]) # sorted
sorted_cluster_data = CLUSTER_DATA(centers=cluster_data.centers[ranked_ndx], # sorted
counts=cluster_data.counts[ranked_ndx], # sorted
labels=sorted_labels, # sorted
noise_label=cluster_data.noise_label, # reassign
inertia=cluster_data.inertia, # float
wss_data=sorted_wss_data, # sorted
compact_score=cluster_data.compact_score[ranked_ndx]) # sorted
return sorted_cluster_data | 964e9a646da025ae6819bc902319f7f1b6c9ae9c | 33,052 |
import random
def reprintClean(pack):
"""
Helper function specifically for reprint packs.
:param pack: List, contains the 12 cards in a pack
:return: temppack, the pack with the higher rarity cards implanted in
"""
temppack = pack
rarity = random.randint(0, 12)
if rarity == 0:
card = randCard(ultra)
if ("Monster" in card["type"]):
temppack[5] = card
elif ("Spell" in card["type"]):
temppack[8] = card
elif ("Trap" in card["type"]):
temppack[11] = card
else:
rarity = random.randint(0, 6)
if rarity == 0:
card = randCard(super)
if ("Monster" in card["type"]):
temppack[5] = card
elif ("Spell" in card["type"]):
temppack[8] = card
elif ("Trap" in card["type"]):
temppack[11] = card
card = randCard(rare)
if ("Monster" in card["type"]):
temppack[4] = card
elif ("Spell" in card["type"]):
temppack[7] = card
elif ("Trap" in card["type"]):
temppack[10] = card
else:
card = randCard(rare)
if ("Monster" in card["type"]):
temppack[4] = card
elif ("Spell" in card["type"]):
temppack[7] = card
elif ("Trap" in card["type"]):
temppack[10] = card
return temppack | 3b73ab930197e482699b340b4cb9c0f068e63985 | 33,053 |
def logout():
"""Logs the current user out"""
del session['user_id']
return redirect('/') | 7584ceceb2f6afa95a82d212ca4b9b537a1d4ad2 | 33,054 |
def main() -> int:
"""
Main function. Executed if script is called standalone.
"""
args = _parse_cmd_args()
try:
return _search_symantec(args.keyword, args.limit)
except KeyboardInterrupt:
_warn("Keyboard interrupt detected\n", True)
return 1 | b8f06d95ab08ca25b55a8f0256ac51902383dfe0 | 33,055 |
def _check_imgs_array(imgs):
"""Check input image if it is an array
Parameters
----------
imgs : array of str, shape=[n_subjects, n_sessions]
Element i, j of the array is a path to the data of subject i
collected during session j.
Data are loaded with numpy.load and expected
shape is [n_voxels, n_timeframes]
n_timeframes and n_voxels are assumed to be the same across
subjects
n_timeframes can vary across sessions
Each voxel's timecourse is assumed to have mean 0 and variance 1
Returns
-------
shapes : array
Shape of input images
"""
assert_array_2axis(imgs, "imgs")
n_subjects, n_sessions = imgs.shape
shapes = np.zeros((n_subjects, n_sessions, 2))
for i in range(n_subjects):
for j in range(n_sessions):
if not (isinstance(imgs[i, j], str) or isinstance(
imgs[i, j], np.str_) or isinstance(imgs[i, j], np.str)):
raise ValueError("imgs[%i, %i] is stored using "
"type %s which is not a str" %
(i, j, type(imgs[i, j])))
shapes[i, j, :] = get_shape(imgs[i, j])
return shapes | 43705a4467a27df3027d9ebed4b8f5eec2866916 | 33,056 |
def smoothline(xs, ys=None, interpol=3, window=1, verbose=3):
"""Smoothing 1D vector.
Description
-----------
Smoothing a 1d vector can be challanging if the number of data is low sampled.
This smoothing function therefore contains two steps. First interpolation of the
input line followed by a convolution.
Parameters
----------
xs : array-like
Data points for the x-axis.
ys : array-like
Data points for the y-axis.
interpol : int, (default : 3)
The interpolation factor. The data is interpolation by a factor n before the smoothing step.
window : int, (default : 1)
Smoothing window that is used to create the convolution and gradually smoothen the line.
verbose : int [1-5], default: 3
Print information to screen. A higher number will print more.
Returns
-------
xnew : array-like
Data points for the x-axis.
ynew : array-like
Data points for the y-axis.
"""
if window is not None:
if verbose>=3: print('[smoothline] >Smoothing by interpolation..')
# Specify number of points to interpolate the data
# Interpolate
extpoints = np.linspace(0, len(xs), len(xs) * interpol)
spl = make_interp_spline(range(0, len(xs)), xs, k=3)
# Compute x-labels
xnew = spl(extpoints)
xnew[window:-window]
# First smoothing on the raw input data
ynew=None
if ys is not None:
ys = _smooth(ys,window)
# Interpolate ys line
spl = make_interp_spline(range(0, len(ys)), ys, k=3)
ynew = spl(extpoints)
ynew[window:-window]
else:
xnew, ynew = xs, ys
return xnew, ynew | 7e7d50e55f801a14394dc2c9fab4e8f392dee546 | 33,057 |
def prune_model(keras_model, prun_factor_dense=10, prun_factor_conv=10, metric='L1', comp=None, num_classes=None, label_one_hot=None):
"""
A given keras model get pruned. The factor for dense and conv says how many percent
of the dense and conv layers should be deleted.
Args:
keras_model: Model which should be pruned
prun_factor_dense: Integer which says how many percent of the neurons should be deleted
prun_factor_conv: Integer which says how many percent of the filters should be deleted
metric: Metric which should be used to prune the model
comp: Dictionary with compiler settings
num_classes: Number of different classes of the model
label_one_hot: Boolean value if labels are one hot encoded or not
Return:
pruned_model: New model after pruning
"""
if callable(getattr(keras_model, "predict", None)) :
model = keras_model
elif isinstance(keras_model, str) and ".h5" in keras_model:
model = load_model(keras_model)
else:
print("No model given to prune")
if num_classes <= 2 and comp == None:
comp = {
"optimizer": 'adam',
"loss": tf.keras.losses.BinaryCrossentropy(),
"metrics": 'accuracy'}
elif num_classes > 3 and comp == None:
if label_one_hot == True:
comp = {
"optimizer": 'adam',
"loss": tf.keras.losses.CategoricalCrossentropy(),
"metrics": 'accuracy'}
else:
comp = {
"optimizer": 'adam',
"loss": tf.keras.losses.SparseCategoricalCrossentropy(),
"metrics": 'accuracy'}
layer_types, layer_params, layer_output_shape, layer_bias = load_model_param(model)
num_new_neurons = np.zeros(shape=len(layer_params), dtype=np.int16)
num_new_filters = np.zeros(shape=len(layer_params), dtype=np.int16)
layer_params, num_new_neurons, num_new_filters, layer_output_shape = model_pruning(layer_types, layer_params, layer_output_shape, layer_bias, num_new_neurons, num_new_filters, prun_factor_dense, prun_factor_conv,metric)
print("Finish with pruning")
pruned_model = build_pruned_model(model, layer_params, layer_types, num_new_neurons, num_new_filters, comp)
print("Model built")
return pruned_model | 90e01b5e1de4acc4649f48f0931f8db5cdc6867c | 33,058 |
def scale_values(tbl, columns):
"""Scale values in a dataframe using MinMax scaling.
:param tbl: Table
:param columns: iterable with names of columns to be scaled
:returns: Table with scaled columns
"""
new_tbl = tbl.copy()
for col in columns:
name = new_tbl.labels[col]
x_scaled = minmax_scale(new_tbl[name])
new_tbl[name] = x_scaled
return new_tbl | c2b6ff0414ab7930020844005e3bdf4783609589 | 33,059 |
def setup_args(args):
""" Setup the args based on the argparser obj
Args:
args(ArgParser): Parsed arguments
Notes:
If there is no core_root, or test location passed, create a default
location using the build type and the arch.
"""
host_os = None
arch = args.arch
build_type = args.build_type
test_location = args.test_location
core_root = args.core_root
test_native_bin_location = args.test_native_bin_location
coreclr_repo_location = args.coreclr_repo_location
if os.path.basename(coreclr_repo_location) == "tests":
coreclr_repo_location = os.path.dirname(coreclr_repo_location)
if _platform == "linux" or _platform == "linux2":
host_os = "Linux"
elif _platform == "darwin":
host_os = "OSX"
elif _platform == "win32":
host_os = "Windows_NT"
else:
print "Unknown OS: %s" % host_os
sys.exit(1)
assert os.path.isdir(coreclr_repo_location)
if test_location is None:
print "Using default test location."
test_location = os.path.join(coreclr_repo_location, "bin", "tests", "%s.%s.%s" % (host_os, arch, build_type))
print "TestLocation: %s" % test_location
print
if core_root is None:
print "Using default location for core_root."
core_root = os.path.join(test_location, "Tests", "Core_Root")
print "Core_Root: %s" % core_root
print
if host_os != "Windows_NT":
if test_native_bin_location is None:
print "Using default location for test_native_bin_location."
test_native_bin_location = os.path.join(os.path.join(coreclr_repo_location, "bin", "obj", "%s.%s.%s" % (host_os, arch, build_type), "tests"))
print "Native bin location: %s" % test_native_bin_location
print
valid_arches = ["x64", "x86", "arm", "arm64"]
if not arch in valid_arches:
print "Unsupported architecture: %s." % arch
print "Supported architectures: %s" % "[%s]" % ", ".join(valid_arches)
sys.exit(1)
valid_build_types = ["Debug", "Checked", "Release"]
if not build_type in valid_build_types:
print "Unsupported configuration: %s." % build_type
print "Supported configurations: %s" % "[%s]" % ", ".join(valid_build_types)
sys.exit(1)
if not os.path.isdir(test_location):
print "Error, test location: %s, does not exist." % test_location
sys.exit(1)
if not os.path.isdir(core_root):
print "Error, core_root: %s, does not exist." % core_root
sys.exit(1)
if host_os != "Windows_NT":
if not os.path.isdir(test_native_bin_location):
print "Error, test_native_bin_location: %s, does not exist." % test_native_bin_location
sys.exit(1)
return host_os, arch, build_type, coreclr_repo_location, core_root, test_location, test_native_bin_location | c454795a3ca1d7c6e93c26758618b21cee0c522d | 33,060 |
from typing import Dict
from typing import Optional
import socket
def discover_devices(timeout : int = 30, debug : bool = False) -> Dict[Optional[str], str]:
"""
Discovers Nanoleaf devices on the network using SSDP
:param timeout: The timeout on the search in seconds (default 30)
:param debug: Prints each device string for the SSDP discovery
:returns: Dictionary of found devices in format {name: ip}
"""
ssdp = """M-SEARCH * HTTP/1.1\r\nHOST: 239.255.255.250:1900\r\nMAN:
\"ssdp:discover\"\r\nMX: 1\r\nST: nanoleaf:nl29\r\n\r\n"""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.settimeout(timeout)
sock.sendto(ssdp.encode(), ("239.255.255.250", 1900))
nanoleaves = []
while True:
try:
data = sock.recv(1024).decode()
except socket.error:
break
nanoleaves.append(data)
nanoleaf_dict = {}
for device in nanoleaves:
if debug:
print(device)
headers = device.split('\r\n')
ip = None
name = None
for header in headers:
if "Location" in header:
try:
ip_string = header.split("http://")[1]
ip = ip_string.split(":")[0]
except ValueError:
pass
if "nl-devicename" in header:
try:
name = header.split("nl-devicename: ")[1]
except ValueError:
pass
if ip is not None:
nanoleaf_dict[name] = ip
return nanoleaf_dict | fa3a9d97e76c330f2f1a3852e0e0e1278a69b23d | 33,061 |
def get_params_out_of_range(
params: list, lower_params: list, upper_params: list
) -> list:
"""
Check if any parameter specified by the user is out of the range that was defined
:param params: List of parameters read from the .inp file
:param lower_params: List of lower bounds provided by the user in the .inp file
:param upper_params: List of upper bounds provided by the user in the .inp file
:return: List of parameters out of the defined range
"""
params_out = [
i
for i in range(len(lower_params))
if params[i] < lower_params[i] or params[i] > upper_params[i]
]
return params_out | 67a8ca57a29da8b431ae26f863ff8ede58f41a34 | 33,062 |
import functools
def _filter_work_values(
works: np.ndarray,
max_value: float = 1e4,
max_n_devs: float = 100,
min_sample_size: int = 10,
) -> np.ndarray:
"""Remove pairs of works when either is determined to be an outlier.
Parameters
----------
works : ndarray
Array of records containing fields "forward" and "reverse"
max_value : float
Remove work values with magnitudes greater than this
max_n_devs : float
Remove work values farther than this number of standard
deviations from the mean
min_sample_size : int
Only apply the `max_n_devs` criterion when sample size is
larger than this
Returns
-------
out : ndarray
1-D array of filtered works.
``out.shape == (works.size, 1)``
"""
mask_work_outliers = functools.partial(
_mask_outliers,
max_value=max_value,
max_n_devs=max_n_devs,
min_sample_size=min_sample_size,
)
f_good = mask_work_outliers(works["forward"])
r_good = mask_work_outliers(works["reverse"])
both_good = f_good & r_good
return works[both_good] | 93002df6f7bdaf0ffd639f37021a8e6844fee4bd | 33,063 |
def pf_from_ssig(ssig, ncounts):
"""Estimate pulsed fraction for a sinusoid from a given Z or PDS power.
See `a_from_ssig` and `pf_from_a` for more details
Examples
--------
>>> round(a_from_pf(pf_from_ssig(150, 30000)), 1)
0.1
"""
a = a_from_ssig(ssig, ncounts)
return pf_from_a(a) | 235b473f60420f38dd8c0ad19c64366f85c8ac4c | 33,064 |
def get_aic(mse: float, n: int, p: int):
"""
Calcuate AIC score.
Parameters
----------
mse: float
Mean-squared error.
n: int
Number of observations.
p: int
Number of parameters
Returns
-------
float
AIC value.
"""
return n * log(mse) + 2 * p | 033cb5ea7e9d06a2f630d3eb2718630904e4209f | 33,065 |
def triangle(a, b):
""" Return triangle function:
^ .
| / \
|____/___\____
a b
"""
return partial(primitives.tri, a, b) | f28bbe0bacb260fb2fb30b9811b1d5d6e5b99750 | 33,066 |
import pickle
def get_max_trans_date() -> date:
"""Return the date of the last transaction in the database"""
return pickle.load(open(conf("etl_accts"), "rb"))[1] | 61db89cfbbdc9f7e2b86930f50db75dcc213205c | 33,067 |
def build_argparser():
"""
Parse command line arguments.
:return: command line arguments
"""
parser = ArgumentParser()
parser.add_argument("-m", "--model", required=True, type=str,
help="Path to an xml file with a trained model.")
parser.add_argument("-i", "--input", required=True, type=str,
help="Path to image or video file")
parser.add_argument("-l", "--cpu_extension", required=False, type=str,
default=None,
help="MKLDNN (CPU)-targeted custom layers."
"Absolute path to a shared library with the"
"kernels impl.")
parser.add_argument("-d", "--device", type=str, default="CPU",
help="Specify the target device to infer on: "
"CPU, GPU, FPGA or MYRIAD is acceptable. Sample "
"will look for a suitable plugin for device "
"specified (CPU by default)")
parser.add_argument("-pt", "--prob_threshold", type=float, default=0.5,
help="Probability threshold for detections filtering"
"(0.5 by default)")
return parser | 65c6e45e30b67ff879dbcf1a64cd8321192adfcf | 33,068 |
from pathlib import Path
def read_file(in_file: str):
"""Read input file."""
file_path = Path(in_file)
data = []
count = 0
with open(file_path) as fp:
for line in fp:
data.append(line)
count = count + 1
return ''.join(data), count | 4fbae8f1af7800cb5f89784a0230680a1d6b139a | 33,069 |
def web_authorize():
"""OAuth 登录跳转"""
# TODO: (演示使用, 自动登录), 请删除并配置自己的认证方式, OAuth2或账密系统
set_user_login({
'job_number': 7777,
'realname': 'Fufu'
})
return redirect(url_for('web.web_index'))
# OAuth 认证
redirect_uri = url_for('web.web_authorized', _external=True)
return oauth.OA.authorize_redirect(redirect_uri) | 469af59659032f46b862fd17f55c28e3a1853d9d | 33,070 |
def limit(value, limits):
"""
:param <float> value: value to limit
:param <list>/<tuple> limits: (min, max) limits to which restrict the value
:return <float>: value from within limits, if input value readily fits into the limits its left unchanged. If value exceeds limit on either boundary its set to that boundary.
"""
if value < limits[0]:
value = limits[0]
elif value > limits[1]:
value = limits[1]
else:
pass
return value | 55fb603edb478a26b238d7c90084e9c17c3113b8 | 33,071 |
def _cdp_no_split_worker(work_queue, counts_by_ref, seq_1, seq_2, nt):
"""
Worker process - get refseq from work queue, aligns reads from seq_1 and seq_2,
and adds as (x,y) coords to counts_by_ref if there are alignments.
:param work_queue: joinable queue with refseq header and seq tuples (JoinableQueue(header,ref_seq))
:param counts_by_ref: Manager dict for counts for each reference result (mgr.dict)
:param seq_1: seq file set 1 (SRNASeq)
:param seq_2: seq file set 2 (SRNASeq)
:param nt: read length to align (int)
:return: True
"""
try:
while not work_queue.empty():
both_aligned = _cdp_no_split_single_ref_align(work_queue.get(), seq_1, seq_2, nt)
if both_aligned is not None:
counts_by_ref[both_aligned[0]] = (both_aligned[1], both_aligned[2])
except Exception as e:
print(e)
return True | 498e9da9c9f30adc1fa2564590cc7a99cbed9b94 | 33,072 |
def createIQSatelliteChannel(satellite_id):
"""Factory
This method creates a satellite channel object that exchanges IQ data in between both ends.
"""
return SatelliteChannel(satellite_id, stellarstation_pb2.Framing.Value('IQ')) | e1fc08de59692ab308716abc8d137d0ab90336bc | 33,073 |
def scaled_herding_forward_pass(weights, scales, input_data, n_steps):
"""
Do a forward pass with scaled units.
:param weights: A length:L list of weight matrices
:param scales: A length:L+1 list of scale vectors
:param input_data: An (n_samples, n_dims) array of input data
:param n_steps: Number of steps to run for
:param rng: A random number generator or see d
:return: A (n_samples, n_steps, n_outputs) integar array representing the output spike count in each time bin
"""
assert all(w_in.shape[1]==w_out.shape[0] for w_in, w_out in zip(weights[:-1], weights[1:]))
assert len(scales) == len(weights)+1
assert all(s.ndim==1 for s in scales)
assert all(w.ndim==2 for w in weights)
assert all(len(s_in)==w.shape[0] and len(s_out)==w.shape[1] for w, s_in, s_out in zip(weights, scales[:-1], scales[1:]))
assert input_data.shape[1] == len(scales[0])
# scaled_weights = [s_in[:, None]**-1 * w * s_out[None, :] for s_in, w, s_out in zip(scales[:-1], weights, scales[1:])]
spikes = sequential_quantize(input_data*scales[0], n_steps=n_steps)
for s, w in zip(scales[1:], weights):
spikes = sequential_quantize(spikes.dot(w)*s)
# spikes = sequential_quantize(spikes.dot(w/s_pre[:, None])*s_post)
return spikes/scales[-1] | e5af2c099b1296bdc1f584acaa6ce8f832fb29f6 | 33,074 |
from io import StringIO
def open_remote_factory(mocker):
"""Fixture providing open_remote function for ReferenceLoader construction."""
return mocker.Mock(return_value=StringIO(REMOTE_CONTENT)) | b3df151b021cfd3a07c5737b50d8856d3dc0a599 | 33,075 |
def get_displacement_bcs(domain, macro_strain):
"""Get the shift and fixed BCs.
The shift BC has the the right top and bottom points in x, y and z
fixed or displaced.
The fixed BC has the left top and bottom points in x, y and z
fixed.
Args:
domain: an Sfepy domain
macro_strain: the macro strain
Returns:
the Sfepy boundary conditions
"""
return Conditions(
[
get_shift_or_fixed_bcs(
domain,
lambda min_, max_: {"u.0": macro_strain * (max_[0] - min_[0])},
"shift",
lambda min_, max_: (max_[0],),
),
get_shift_or_fixed_bcs(
domain,
lambda min_, max_: merge(
{"u.0": 0.0, "u.1": 0.0}, {"u.2": 0.0} if len(min_) == 3 else dict()
),
"fix",
lambda min_, max_: (min_[0],),
),
]
) | 02090e4d64f75b671597c4b916faf086c5bfe096 | 33,076 |
def public_rest_url(path_url: str = "",
domain: str = CONSTANTS.DEFAULT_DOMAIN,
only_hostname: bool = False,
domain_api_version: str = None,
endpoint_api_version: str = None) -> str:
"""
Creates a full URL for provided public REST endpoint
:param path_url: a public REST endpoint
:param host: the CoinFLEX host to connect to ("live" or "test"). The default value is "live"
:return: the full URL to the endpoint
"""
local_domain_api_version = domain_api_version or CONSTANTS.PUBLIC_API_VERSION
local_endpoint_api_version = endpoint_api_version or CONSTANTS.PUBLIC_API_VERSION
subdomain_prefix = f"{local_domain_api_version}stg" if domain == "test" else local_domain_api_version
endpoint = "" if not len(path_url) else f"/{path_url}"
if only_hostname:
return CONSTANTS.REST_URL.format(subdomain_prefix)
return "https://" + CONSTANTS.REST_URL.format(subdomain_prefix) + f"/{local_endpoint_api_version}{endpoint}" | 3c99f5a388c33d5c7aa1e018d2d38c7cbe82b112 | 33,077 |
import torch
def get_dir_cos(dist_vec):
""" Calculates directional cosines from distance vectors.
Calculate directional cosines with respect to the standard cartesian
axes and avoid division by zero
Args:
dist_vec: distance vector between particles
Returns: dir_cos, array of directional cosines of distances between particles
"""
norm = torch.linalg.norm(dist_vec, axis=-1)
dir_cos = dist_vec * torch.repeat_interleave(torch.unsqueeze(torch.where(
torch.linalg.norm(dist_vec, axis=-1) == 0,
torch.zeros(norm.shape, device=dist_vec.device),
1 / norm), axis=-1), 3, dim=-1)
return dir_cos | f325ca5535eaf9083082b147ff90f727214031ec | 33,078 |
def lambda_handler(event, context):
"""
Entry point for the Get All Lambda function.
"""
handler_request.log_event(event)
# Get gk_user_id from requestContext
player_id = handler_request.get_player_id(event)
if player_id is None:
return handler_response.return_response(401, 'Unauthorized.')
# get bundle_name from path
bundle_name = handler_request.get_path_param(event, 'bundle_name')
if bundle_name is None:
return handler_response.return_response(400, 'Invalid bundle name')
if len(bundle_name) > user_game_play_constants.BUNDLE_NAME_MAX_LENGTH:
return handler_response.return_response(414, 'Invalid bundle name')
bundle_name = sanitizer.sanitize(bundle_name)
# get bundle_item_key from path
bundle_item_key = handler_request.get_path_param(event, 'bundle_item_key')
if bundle_item_key is None:
return handler_response.return_response(400, 'Invalid bundle item key')
if len(bundle_item_key) > user_game_play_constants.BUNDLE_NAME_MAX_LENGTH:
return handler_response.return_response(414, 'Invalid bundle item key')
bundle_item_key = sanitizer.sanitize(bundle_item_key)
# get payload from body (an items value)
item_data = handler_request.get_body_as_json(event)
if item_data is None:
return handler_response.return_response(400, 'Missing payload')
if "bundle_item_value" not in item_data:
return handler_response.return_response(400, 'Invalid payload')
item_key = sanitizer.sanitize(item_data["bundle_item_value"])
if not item_key:
return handler_response.return_response(400, 'Invalid payload')
player_id_bundle = f'{player_id}_{bundle_name}'
try:
ddb_client.update_item(**_build_bundle_item_update_request(player_id_bundle, bundle_item_key, item_key))
except ddb_client.exceptions.ConditionalCheckFailedException:
return handler_response.return_response(404, 'Bundle and/or bundle item not found.')
except botocore.exceptions.ClientError as err:
logger.error(f'Error updating bundle item, please ensure bundle item exists. Error: {err}')
raise err
# Return operation result
return handler_response.return_response(204, None) | 2cc1aeb6a451feb41cbf7a121c67ddfbd06e686f | 33,079 |
def reverse_complement_dna(seq):
"""
Reverse complement of a DNA sequence
Parameters
----------
seq : str
Returns str
"""
return complement_dna(seq)[::-1] | 680cf032c0a96fc254928bfa58eb25bee56e44dc | 33,080 |
def Wizard():
"""
Creates a wizardcharacter
:returns: fully initialised wizard
:rtype: Character
"""
character = (CharacterBuilder()
.with_hit_points(5)
.with_max_hp(5)
.with_spirit(20)
.with_max_spirit(20)
.with_speed(4)
.with_body(4)
.with_mind(8)
.with_attack(1)
.with_name('Wizard')
.build())
return character | 23019f41ba6bf51e049ffe16831a725dd3c20aa2 | 33,081 |
def tournament_communication(comm,
comm_fn=lambda x,y: None,
comm_kw={}):
"""
This is useful for the development of parallelized O(N) duplicate check
functions. The problem with such functions is that the operation of
checking if a set of parameters/descriptor has been observed previously
requires there be a master set of observed values to compare against and
add to. This means that it is not naively parallel. In order to achieve
decent scaling for O(N) duplicate checks, this method has been implemented.
This method will combine the results from ranks in a tournament braket
style such that in the end, the master list will be on rank 0. This
achieves better scaling because in the beginning, all ranks compare their
lists to another rank adding unique values to one of the ranks. This rank
moves forward to another comparions with another rank that has completed
its comparisons as well. This continues until all results are on the master
rank.
comm_fn API: comm_fn(comm, (rank1, rank2), **kwargs)
"""
### Step 1 is to build the tournament braket
size = comm.Get_size()
rank = comm.Get_rank()
rank_list = np.arange(0,size)
tournament = []
temp_tournament = []
for idx,value in enumerate(rank_list[::2]):
value2 = value+1
temp_tournament.append((value,value2))
tournament.append(temp_tournament)
if size <= 1:
tournament = [(0,)]
prev_tournament = tournament[0]
while len(prev_tournament) != 1:
temp_tournament = []
for idx,entry in enumerate(prev_tournament[::2]):
next_idx = idx*2+1
keep_rank1 = min(entry)
if (next_idx+1) > len(prev_tournament):
temp_tournament.append((keep_rank1,))
else:
keep_rank2 = min(prev_tournament[next_idx])
temp_tournament.append((keep_rank1, keep_rank2))
tournament.append(temp_tournament)
prev_tournament = tournament[-1]
if len(tournament) > 1:
tournament.append([(0,)])
if tournament == [(0,)]:
return
idx = 0
for braket in tournament:
if rank == 0:
print("Round {} of {}".format(idx, len(tournament)), flush=True)
idx += 1
# ### Rank loop is here to emulate parallel execution
# for rank in rank_list:
found = False
for entry in braket:
if rank in entry:
found = True
break
if found:
comm_fn(comm, entry, **comm_kw)
# if found:
# print("{}: {}".format(rank, entry))
return tournament | 03827a02f3df099aa3eead3d8214f0f2f90e60b1 | 33,082 |
def create_permutation_feature(number, rate_pert=1., name=None):
"""Create permutation for features."""
n = np.random.randint(0, 100000)
if name is None:
name = f_stringer_pert_rate(rate_pert)
lista_permuts = []
for i in range(number):
lista_permuts.append((name, PartialPermutationPerturbationGeneration,
{'seed': n+i, 'rate_pert': rate_pert},
f_pert_partialfeatures_instantiation))
return lista_permuts | 8c6931e2e2b1dcd9313fda5d8be63bfb0c549f5f | 33,083 |
import random
def DiceRoll():
"""A function to simulate rolling of one or more dice."""
def Roll():
return random.randint(1,6)
print("\nRoll Dice: Simulates rolling of one or more dice.")
num = 1
try: num = int(input("\nEnter the number of dice you wish to roll: "))
except: print("Input should be a number.")
if num > 0:
out = [] # list to store roll output
i = 1
while i <= num:
out.append(str(Roll()))
i+=1
print("\nRoll Result(s)")
print("============")
print(", ".join(out)) | 90e9587473fb06541ec9daa2ec223759940a5ecb | 33,084 |
def rook_move(self, game, src):
""" Validates rook move """
x = src[0]
y = src[1]
result = []
loop_condition = (lambda i: i < 8) if self.color == 'white' else (lambda i: i >= 0)
reverse_loop_condition = (lambda i: i < 8) if self.color == 'black' else (lambda i: i >= 0)
counter_eval = +1 if self.color == 'white' else -1
reverse_counter_eval = -counter_eval
loops = [
[loop_condition, counter_eval],
[reverse_loop_condition, reverse_counter_eval]
]
for loop in loops:
i = x
while loop[0](i):
if i != x:
if game.board[i][y] is not None:
if game.board[i][y].color != self.color:
result.append([i, y])
break
result.append([i, y])
i += loop[1]
for loop in loops:
i = y
while loop[0](i):
if i != y:
if game.board[x][i] is not None:
if game.board[x][i].color != self.color:
result.append([x, i])
break
result.append([x, i])
i += loop[1]
return result | 76a782541c565d14a84c1845841338d99f23704d | 33,085 |
def figure_5a():
"""
This creates the plot for figure 5A in the Montague paper. Figure 5A is
a 'plot of ∂(t) over time for three trials during training (1, 30, and 50).'
"""
# Create Processing Components
sample_mechanism = pnl.TransferMechanism(default_variable=np.zeros(60),
name=pnl.SAMPLE)
action_selection = pnl.TransferMechanism(default_variable=np.zeros(60),
function=pnl.Linear(slope=1.0,
intercept=0.01),
name='Action Selection')
sample_to_action_selection = pnl.MappingProjection(sender=sample_mechanism,
receiver=action_selection,
matrix=np.zeros((60, 60)))
# Create Composition
composition_name = 'TD_Learning_Figure_5A'
comp = pnl.Composition(name=composition_name)
# Add Processing Components to the Composition
pathway = [sample_mechanism, sample_to_action_selection, action_selection]
# Add Learning Components to the Composition
learning_related_components = comp.add_td_learning_pathway(pathway, learning_rate=0.3).learning_components
# Unpack Relevant Learning Components
prediction_error_mechanism = learning_related_components[pnl.OBJECTIVE_MECHANISM]
target_mechanism = learning_related_components[pnl.TARGET_MECHANISM]
# Create Log
prediction_error_mechanism.log.set_log_conditions(pnl.VALUE)
# Create Stimulus Dictionary
no_reward_trials = {14, 29, 44, 59, 74, 89}
inputs = build_stimulus_dictionary(sample_mechanism, target_mechanism, no_reward_trials)
# Run Composition
comp.learn(inputs=inputs)
if args.enable_plot:
# Get Delta Values from Log
delta_vals = prediction_error_mechanism.log.nparray_dictionary()[composition_name][pnl.VALUE]
# Plot Delta Values form trials 1, 30, and 50
with plt.style.context('seaborn'):
plt.plot(delta_vals[0][0], "-o", label="Trial 1")
plt.plot(delta_vals[29][0], "-s", label="Trial 30")
plt.plot(delta_vals[49][0], "-o", label="Trial 50")
plt.title("Montague et. al. (1996) -- Figure 5A")
plt.xlabel("Timestep")
plt.ylabel("∂")
plt.legend()
plt.xlim(xmin=35)
plt.xticks()
plt.show(block=not pnl._called_from_pytest)
return comp | a8764f75cd9fc7cf0e0a9ddadc452ffdf05f099e | 33,086 |
import click
import sys
import os
import yaml
import io
def init():
"""Return top level command handler."""
@click.group(cls=cli.make_commands(__name__))
@click.option('--distro', required=True,
help='Path to treadmill distro.',
envvar='TREADMILL_DISTRO')
@click.option('--install-dir', required=True,
help='Target installation directory.',
envvar='TREADMILL_APPROOT')
@click.option('--cell', required=True,
envvar='TREADMILL_CELL',
callback=cli.handle_context_opt,
is_eager=True,
expose_value=False)
@click.option('--config', required=False,
type=click.Path(exists=True, readable=True, allow_dash=True),
multiple=True)
@click.option('--override', required=False, type=cli.DICT)
@click.option('--profile', required=True,
envvar='TREADMILL_PROFILE',
callback=cli.handle_context_opt,
is_eager=True,
expose_value=False)
@click.pass_context
def install(ctx, distro, install_dir, config, override):
"""Installs Treadmill."""
cell = None if context.GLOBAL.cell == '-' else context.GLOBAL.cell
profile = context.GLOBAL.get_profile_name()
ctx.obj['PARAMS'] = {
'cell': cell,
'dns_domain': context.GLOBAL.dns_domain,
'ldap_suffix': context.GLOBAL.ldap_suffix,
'treadmill': distro,
'dir': install_dir,
'profile': profile,
'python': sys.executable,
'python_path': os.getenv('PYTHONPATH', ''),
'init_hook': os.getenv('TREADMILL_INIT_HOOK', ''),
}
install_data = {}
for conf in config:
if conf == '-':
conf_dict = yaml.load(stream=sys.stdin)
else:
with io.open(conf, 'r') as fd:
conf_dict = yaml.load(stream=fd)
ctx.obj['PARAMS'].update(conf_dict)
install_data.update(conf_dict.get('data', {}))
if override:
ctx.obj['PARAMS'].update(override)
install_data.update(override)
# Store the intall data in the context.
# TODO: This is a terrible terrible mess.
ctx.obj['PARAMS'].update(install_data)
ctx.obj['PARAMS']['data'] = install_data
# XXX: hack - templates use treadmillid, but it is defined as
# "username" in cell object.
ctx.obj['PARAMS']['treadmillid'] = ctx.obj['PARAMS'].get('username')
os.environ['TREADMILL'] = distro
return install | c2e93bf589137c8b6821b169628d7e1f816f9e4d | 33,087 |
import json
def lambda_handler(event, context):
""" Transforms a binary payload by invoking "decode_{event.type}" function
Parameters
----------
DeviceId : str
Device Id
ApplicationId : int
LoRaWAN Application Id / Port number
PayloadData : str
Base64 encoded input payload
Returns
-------
This function returns a JSON object with the following keys:
- status: 200 or 500
- transformed_payload: result of calling "{PayloadDecoderName}.dict_from_payload" (only if status == 200)
- lns_payload: a representation of payload as received from an LNS
- error_type (only if status == 500)
- error_message (only if status == 500)
- stackTrace (only if status == 500)
"""
logger.info("Received event: %s" % json.dumps(event))
input_base64 = event.get("PayloadData")
device_id = event.get("WirelessDeviceId")
metadata = event.get("WirelessMetadata")["LoRaWAN"]
try:
# Invoke a payload conversion function
decoded_payload = rfi_power_switch.dict_from_payload(
event.get("PayloadData"))
# Define the output of AWS Lambda function in case of successful decoding
decoded_payload["status"] = 200
result = decoded_payload
logger.info(result)
return result
except Exception as exp:
logger.error(f"Exception {exp} during binary decoding")
raise exp | d645454656d85589652942b944e84863cb22a425 | 33,088 |
def _get_adi_snrs(psf, angle_list, fwhm, plsc, flux_dist_theta_all,
wavelengths=None, mode='median', ncomp=2):
""" Get the mean S/N (at 3 equidistant positions) for a given flux and
distance, on a median subtracted frame.
"""
snrs = []
theta = flux_dist_theta_all[2]
flux = flux_dist_theta_all[0]
dist = flux_dist_theta_all[1]
# 3 equidistant azimuthal positions
for ang in [theta, theta + 120, theta + 240]:
cube_fc, posx, posy = create_synt_cube(GARRAY, psf, angle_list, plsc,
flux=flux, dist=dist, theta=ang,
verbose=False)
fr_temp = _compute_residual_frame(cube_fc, angle_list, dist, fwhm,
wavelengths, mode, ncomp,
svd_mode='lapack', scaling=None,
collapse='median', imlib='opencv',
interpolation='lanczos4')
res = frame_quick_report(fr_temp, fwhm, source_xy=(posx, posy),
verbose=False)
# mean S/N in circular aperture
snrs.append(np.mean(res[-1]))
# median of mean S/N at 3 equidistant positions
median_snr = np.median(snrs)
return flux, median_snr | 3d00ccb6163962dbfdcedda7aa565dfc549e1f2b | 33,089 |
def compute_nearest_neighbors(fit_embeddings_matrix, query_embeddings_matrix,
n_neighbors, metric='cosine'):
"""Compute nearest neighbors.
Args:
fit_embeddings_matrix: NxD matrix
"""
fit_eq_query = False
if ((fit_embeddings_matrix.shape == query_embeddings_matrix.shape)
and np.allclose(fit_embeddings_matrix, query_embeddings_matrix)):
fit_eq_query = True
if metric == 'cosine':
distances, indices, sort_indices = compute_nearest_neighbors_cosine(fit_embeddings_matrix,
query_embeddings_matrix,
n_neighbors, fit_eq_query)
else:
raise ValueError('Use cosine distance.')
return distances, indices, sort_indices | 1020827cbaab50d591b3741d301ebe88c4ac6d93 | 33,090 |
import re
def commodify_cdli_no( cdli_no ):
"""
Given a CDLI number, fetch the text of the corresponding
artifact from the database and pass it to commodify_text
"""
# Ensure that we have a valid artifact number:
if re.match(r'P[0-9]{6}', cdli_no) is not None:
art_no = int(cdli_no[1:])
elif re.match(r'[0-9]{6}', cdli_no) is not None:
art_no = int(cdli_no)
else:
raise Exception("%s: not a well-formed artifact id"%(cdli_no))
# For the moment, only accept texts in Sumerian:
LANG_ID_SUMERIAN = 5
# Connect to DB:
conn = mariadb.connect(
user=config['db']['user'],
password=config['db']['password'],
host=config['db']['host'],
port=config['db']['port'],
database=config['db']['database']
)
cur = conn.cursor()
# DB query to get text content and language:
cur.execute("SELECT transliteration, language_id FROM inscriptions INNER JOIN artifacts_languages ON inscriptions.artifact_id = artifacts_languages.artifact_id WHERE inscriptions.artifact_id=%s", (art_no,))
text = None
for atf, lang_id in cur:
if lang_id == LANG_ID_SUMERIAN:
text = [line.strip().split(" ") for line in atf.split("\n")]
break
cur.close()
conn.close()
if text is not None:
return commodify_text( text, cdli_no )
# If no text found with specified id
# and correct language, raise exception
raise Exception("%s: artifact not found or language not supported"%(cdli_no)) | 5c194f40cbde371329671712d648019ac2e43a90 | 33,091 |
import sys
def do_verify(options, _fuse):
"""
@param options: Commandline options
@type options: object
@param _fuse: FUSE wrapper
@type _fuse: dedupsqlfs.fuse.dedupfs.DedupFS
"""
tableOption = _fuse.operations.getTable("option")
curHashFunc = tableOption.get("hash_function")
tableHash = _fuse.operations.getTable("hash")
tableHashCT = _fuse.operations.getTable("hash_compression_type")
tableBlock = _fuse.operations.getTable("block")
_fuse.operations.hash_function = curHashFunc
hashCount = tableHash.get_count()
if _fuse.getOption("verbosity") > 0:
print("Ready to verify %s blocks." % hashCount)
cur = tableHash.getCursor(True)
cur.execute("SELECT * FROM `%s`" % tableHash.getName())
cnt = equal = 0
lastPrc = ""
for hashItem in iter(cur.fetchone, None):
cnt += 1
blockItem = tableBlock.get(hashItem["id"])
hashCT = tableHashCT.get(hashItem["id"])
blockData = _fuse.decompressData(_fuse.operations.getCompressionTypeName(hashCT["type_id"]), blockItem["data"])
newHash = _fuse.operations.do_hash(blockData)
if newHash == hashItem["value"]:
equal += 1
prc = "%6.2f%%" % (cnt*100.0/hashCount)
if prc != lastPrc:
lastPrc = prc
if _fuse.getOption("verbosity") > 0:
sys.stdout.write("\r%s " % prc)
sys.stdout.flush()
if _fuse.getOption("verbosity") > 0:
sys.stdout.write("\n")
sys.stdout.flush()
if _fuse.getOption("verbosity") > 0:
print("Processed %s hashes, equal %s blocks." % (cnt, equal,))
if hashCount != cnt:
print("Something went wrong?")
return 1
if cnt != equal:
print("Data corrupted?! %s block hashes not equals!" % (cnt - equal))
return 1
else:
print("All data in good hash ;)")
return 0 | cba45a78cf57422bcca2b01909666fe0fdcb72a1 | 33,092 |
def zvalues(r, N=1):
"""
Generate random pairs for the CDF a normal distribution.
The z-values are from the cumulative distribution function of the
normal distribution.
Args:
r: radius of the CDF
N: number of pairs to generate
Returns:
pairs of random numbers
"""
y1, y2 = box_muller(0, 1, N)
z1 = (np.sqrt(1 + r) * y1 - np.sqrt(1 - r) * y2) / np.sqrt(2)
z2 = (np.sqrt(1 + r) * y1 + np.sqrt(1 - r) * y2) / np.sqrt(2)
return z1, z2 | 146d363d7fbb92a9152c6b05a8f38562d4cfc107 | 33,093 |
def exp(fdatagrid):
"""Perform a element wise exponential operation.
Args:
fdatagrid (FDataGrid): Object to whose elements the exponential
operation is going to be applied.
Returns:
FDataGrid: Object whose elements are the result of exponentiating
the elements of the original.
"""
return fdatagrid.copy(data_matrix=np.exp(fdatagrid.data_matrix)) | aef02937bf0fac701e0ae2bac75911a5a2a8ee9e | 33,094 |
def ksvm(param, data):
""" kernelized SVM """
certif = np.linalg.eigvalsh(data['K'])[0]
if certif < 0:
data['K'] = data['K'] - 2 * certif * np.eye(data['K'].shape[0])
optimal = {}
if len(param['kappa']) > 1 or float('inf') not in param['kappa']:
optimal.update(dist_rob_ksvm(param, data))
if float('Inf') in param['kappa']:
optimal.update(regularized_ksvm(param, data))
return optimal | e549411b0ac12926753e2eefa968e978414829fa | 33,095 |
def _ordered_unique(arr):
"""
Get the unique elements of an array while preserving order.
"""
arr = np.asarray(arr)
_, idx = np.unique(arr, return_index=True)
return arr[np.sort(idx)] | c4e2578a41d7481b602c4251890276dc2a92dbe9 | 33,096 |
def is_byte_array(value, count):
"""Returns whether the given value is the Python equivalent of a
byte array."""
return isinstance(value, tuple) and len(value) == count and all(map(lambda x: x >= 0 and x <= 255, value)) | 16793415885ea637aecbeeefe24162d6efe9eb39 | 33,097 |
def _FilterSubstructureMatchByAtomMapNumbers(Mol, PatternMol, AtomIndices, AtomMapIndices):
"""Filter substructure match atom indices by atom map indices corresponding to
atom map numbers.
"""
if AtomMapIndices is None:
return list(AtomIndices)
return [AtomIndices[Index] for Index in AtomMapIndices] | 3594a11452848c9ae11f770fa560fe29d68aa418 | 33,098 |
def questions_for_written_answer_tabled_in_range(start, end):
"""Returns a list of all Questions for Written Answer tabled in date range.
"""
try:
_start = start.isoformat()
except AttributeError:
return []
try:
_end = end.isoformat()
except AttributeError:
return []
q_args = {'startDate': _start, 'endDate': _end}
resp = _call_questions_service("GetQuestionsForWrittenAnswer_TabledInRange", **q_args)
return _parse_list_response(resp, "QuestionsList", "Question") | 5702005be754bb7485fb81e49ff9aab6fbc1d549 | 33,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.