content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
import os
def cases():
"""
Loads all filenames of the pre-calculated test cases.
"""
case_dir = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'cases'
)
cases = []
for dir_path, _, files in os.walk(case_dir):
cases = cases + [os.path.join(dir_path, f) for f in files]
return cases
|
1e8cbf1001cb52ab5875b38714f1edca664f867c
| 3,645,400
|
def axisAligned(angle, tol=None, axis=None):
""" Determine if a line (represented by its angle) is aligned with an axis.
Parameters
----------
angle : float
The line's angle of inclination (in radians)
tol : float
Maximum distance from `axis` for which `angle` is still considered to
be aligned.
axis : {'horizontal', 'vertical'}
The reference axis.
Returns
-------
is_aligned : bool
True if `angle` is within `tol` radians of `axis`.
"""
if axis == 'horizontal':
target_angle = 1.57 # about pi / 2
elif axis == 'vertical':
target_angle = 0.0
distance = abs(target_angle - abs(angle))
is_aligned = distance < tol
return is_aligned
|
9198f1d1e8b3755696f5ccf01b9df112d18bd363
| 3,645,401
|
def make_results_dict(
mesh_data,key_descriptor,
key_transformation=None,
verbose=False
):
"""Load mesh data into dictionary, using specified parameter tuple as
key.
Example key descriptor:
(("Nsigmamax",int),("Nmax",int),("hw",float))
Example:
>>> KEY_DESCRIPTOR_NMAX_HW = (("Nmax",int),("hw",float))
For now, in the event that the same mesh point arises multiple
times on input (i.e., a given value for the key tuple is
duplicated), the final occurrence overwrites any earlier
occurrences. In the future, a more sophisticated "merging"
process might be appropriate.
An optional key transformation is useful for, e.g., shifting the Nmax value
stored in the dictionary when results are to be used as reference results
for the space of opposite parity.
Arguments:
mesh_data (list of ResultsData): data for mesh points
key_descriptor (tuple of tuple): dtype descriptor for key
key_transformation (callable,optional): transformation function to apply to key tuple
verbose (bool,optional): verbose output
Returns:
(dict): mapping from key tuple to data object
"""
key_function = make_key_function(key_descriptor)
results_dict = dict()
for mesh_point in mesh_data:
# make key
key = key_function(mesh_point)
if (key_transformation is not None):
key = key_transformation(key)
if (verbose):
print(" make_results_dict: filename {} key {}".format(mesh_point.filename,key))
# store data point
if (key not in results_dict):
# save mesh point
results_dict[key] = mesh_point
else:
# TODO: do smart merge "update" on existing mesh point
# overwrite mesh point
results_dict[key] = mesh_point
return results_dict
|
cfc0e56751090fd3aea32ed42659652caf6c25ae
| 3,645,402
|
def plot_1d(x_test, mean, var):
"""
Description
----------
Function to plot one dimensional gaussian process regressor mean and
variance.
Parameters
----------
x_test: array_like
Array containing one dimensional inputs of the gaussian process
model.
Mean: array_like
An array with the values of the mean function of the guassian
process.
Var: array_like
The variance around the values of the mean function of the
gaussian process.
Returns
----------
Matplotlib plot of mean function and variance of the gaussian process
model.
"""
x_test = exactly_1d(x_test)
mean = exactly_1d(mean)
var = exactly_1d(var)
plt.fill_between(x_test,
mean-.674*np.sqrt(var),
mean+.674*np.sqrt(var),
color='k', alpha=.4, label='50% Credible Interval')
plt.fill_between(x_test,
mean-1.150*np.sqrt(var),
mean+1.150*np.sqrt(var),
color='k', alpha=.3, label='75% Credible Interval')
plt.fill_between(x_test,
mean-1.96*np.sqrt(var),
mean+1.96*np.sqrt(var),
color='k', alpha=.2, label='95% Credible Interval')
plt.fill_between(x_test,
mean-2.326*np.sqrt(var),
mean+2.326*np.sqrt(var),
color='k', alpha=.1, label='99% Credible Interval')
plt.plot(x_test, mean, c='w')
return None
|
f53ca71b2546d6c849cdcb52c16ec77125a4c0a6
| 3,645,403
|
def sentence_to_windows(sentence, min_window, max_window):
"""
Create window size chunks from a sentence, always starting with a word
"""
windows = []
words = sentence.split(" ")
curr_window = ""
for idx, word in enumerate(words):
curr_window += (" " + word)
curr_window = curr_window.lstrip()
next_word_len = len(words[idx+1]) + 1 if idx+1 < len(words) else 0
if len(curr_window) + next_word_len > max_window:
curr_window = clean_sentence(curr_window)
if validate_sentence(curr_window, min_window):
windows.append(curr_window.strip())
curr_window = ""
if len(curr_window) >= min_window:
windows.append(curr_window)
return windows
|
867240f310c9e7bc3f887a2592485a02ab646870
| 3,645,404
|
def get_master_name(els):
"""Function: get_master_name
Description: Return name of the master node in a Elasticsearch cluster.
Arguments:
(input) els -> ElasticSearch instance.
(output) Name of master node in ElasticSearch cluster.
"""
return els.cat.master().strip().split(" ")[-1]
|
0371dac1fdf0fd6b906646e1882e9089d9dfa12c
| 3,645,405
|
from typing import Sequence
import random
def flop_turn_river(dead: Sequence[str]) -> Sequence[str]:
"""
Get flop turn and river cards.
Args:
dead: Dead cards.
Returns:
5 cards.
"""
dead_concat = "".join(dead)
deck = [card for card in DECK if card not in dead_concat]
return random.sample(deck, 5)
|
cea8289a5deb03dd74a9b20b99899d908e3f38e3
| 3,645,406
|
def smith_gassmann(kstar, k0, kfl2, phi):
"""
Applies the Gassmann equation.
Returns Ksat2.
"""
a = (1 - kstar/k0)**2.0
b = phi/kfl2 + (1-phi)/k0 - (kstar/k0**2.0)
ksat2 = kstar + (a/b)
return ksat2
|
ae413d7ed55862927e5f8d06d4aff5bfc0e91167
| 3,645,407
|
import json
async def _preflight_cors(request):
"""Respond to preflight CORS requests and load parameters."""
if request.method == "OPTIONS":
return textify("ok", headers=generate_cors_headers(request))
request['args'] = {}
if request.form:
for key in request.form:
key_lower = key.lower()
if key_lower in _MUST_BE_GET_PARAM:
raise UserException(CANNOT_BE_POST_PARAM % key)
request['args'][key_lower] = request.form[key][0]
elif request.json:
for key in request.json:
key_lower = key.lower()
if key_lower in _MUST_BE_GET_PARAM:
raise UserException(CANNOT_BE_POST_PARAM % key)
# Make all url parameters strings
if isinstance(request.json[key], list):
request['args'][key_lower] = json.dumps(request.json[key])
else:
request['args'][key_lower] = str(request.json[key])
# Take all Get parameters
for key, value in list(request.raw_args.items()):
key_lower = key.lower()
if key_lower in _MUST_BE_POST_PARAM:
raise UserException(CANNOT_BE_GET_PARAM % key)
request['args'][key_lower] = value
|
91f6057fc4d624d576b7a8ae45cd202264fde7c1
| 3,645,408
|
def login_teacher():
""" Login User and redirect to index page. """
# forget any user
session.clear()
# if user reached via route POST
if request.method == "POST":
# check user credentials
email_id = request.form.get("email_id")
passw = request.form.get("password")
result = db.execute("SELECT * FROM registrants WHERE email_id = :email", email = email_id)
if len(result) != 1 or not pwd_context.verify(passw, result[0]['hash']):
return "INVALID USERNAME/PASSWORD"
else:
folder_id = db.execute('SELECT folder_id FROM shared_folder WHERE user_id = :user_id', user_id = result[0]['id'])
print(folder_id)
session["user_id"] = result[0]["id"]
session['folder_id'] = folder_id[0]['folder_id']
return redirect(url_for('index'))
else:
return render_template('login.html')
|
04982b664b18c3c10d1d5dadabe101de97f4383d
| 3,645,409
|
import os
import tempfile
import json
def upload_file():
"""Upload files"""
print("UPLOADED FILES", len(request.files))
if not os.path.exists(FILE_START_PATH):
os.makedirs(FILE_START_PATH)
# Set the upload folder for this user if it hasn't been set yet
# pylint: disable=consider-using-with
if 'upload_folder' not in session or session['upload_folder'] is None or not os.path.isdir(session['upload_folder']):
session['upload_folder'] = tempfile.mkdtemp(dir=FILE_START_PATH)
loaded_filenames = []
for file_id in request.files:
one_file = request.files[file_id]
save_path = os.path.join(session['upload_folder'], secure_filename(one_file.filename))
if os.path.exists(save_path):
os.unlink(save_path)
one_file.save(save_path)
loaded_filenames.append(one_file.filename)
return json.dumps(loaded_filenames)
|
26071b9b6e8c6915994a0ddc049002e9f2e2ad8e
| 3,645,410
|
import base64
def mult_to_bytes(obj: object) -> bytes:
"""Convert given {array of bits, bytes, int, str, b64} to bytes"""
if isinstance(obj, list):
i = int("".join(["{:01b}".format(x) for x in obj]), 2)
res = i.to_bytes(bytes_needed(i), byteorder="big")
elif isinstance(obj, int):
res = obj.to_bytes(bytes_needed(obj), "big")
elif isBase64(obj):
res = base64.b64decode(obj)
elif isinstance(obj, bytes):
res = obj
elif isinstance(obj, str):
alphabet = max([int(c) for c in obj]) + 1
res = int(obj, alphabet)
return mult_to_bytes(res)
else:
res = bytes(obj)
return res
|
7e86caf56f8187215c6ecbea63b259e627dde0ad
| 3,645,411
|
import six
def get_barrier(loopy_opts, local_memory=True, **loopy_kwds):
"""
Returns the correct barrier type depending on the vectorization type / presence
of atomics
Parameters
----------
loopy_opts: :class:`loopy_utils.loopy_opts`
The loopy options used to create this kernel.
local_memory: bool [True]
If true, this barrier will be used for memory in the "local" address spaces.
Only applicable to OpenCL
loopy_kwds: dict
Any other loopy keywords to put in the instruction options
Returns
-------
barrier: str
The built barrier instruction
"""
mem_kind = ''
barrier_kind = 'nop'
if use_atomics(loopy_opts):
mem_kind = 'local' if local_memory else 'global'
barrier_kind = 'lbarrier'
loopy_kwds['mem_kind'] = mem_kind
return '...' + barrier_kind + '{' + ', '.join([
'{}={}'.format(k, v) for k, v in six.iteritems(loopy_kwds)]) + '}'
|
6f45099827f93ebe41e399b6c75aa7a1b85779fb
| 3,645,412
|
def monthly_rain(year, from_month, x_months, bound):
"""
This function downloaded the data embedded tif files from the SILO Longpaddock Dataset
and creates a cumulative annual total by stacking the xarrays. This function is embedded
in the get_rainfall function or can be used separately
Parameters
----------
input :
year (integer) value of the year for the data to be pulled
month (integer) value of the first month for the data to be pulled
x_months (integer) number of months to be pulled
bound (shapefile) area of interest for the final calculated tif to be clipped to
Returns
------
output : rioxarray item representing each of the months pulled and
summed up for the months selected
"""
#create month string as pandas frame
mon_string = pd.DataFrame({'mon': ['01', '02', '03', '04', '05', '06',
'07', '08', '09', '10', '11', '12']})
#assign year column
mon_string['year'] = str(year)
#assign yearmon column
mon_string['yearmon'] = mon_string['year'] + mon_string['mon']
#filter to first x months
mon_select = mon_string[from_month-1:x_months]
#set base url
base = 'https://s3-ap-southeast-2.amazonaws.com/silo-open-data/monthly/monthly_rain'
rain_stack = []
#loop to download tifs, reporoject, stack, sum and clip
for index, i in mon_select.iterrows():
call = base + '/' + i['year'] + '/' + i['yearmon'] + '.monthly_rain.tif'
month_rain = rxr.open_rasterio(call, masked = True).squeeze()
rain_stack.append(month_rain)
bound_crs = bound.to_crs(rain_stack[1].rio.crs)
stacked_rain = sum(rain_stack).rio.clip(bound_crs.geometry)
return stacked_rain
|
951ac32a8afcc5b0fd6f0c1b6616f3cc4d162540
| 3,645,413
|
def organize_by_chromosome(genes, transcripts):
""" Iterate through genes and transcripts and group them by chromosome """
gene_dict = {}
transcript_dict = {}
for ID in genes:
gene = genes[ID]
chromosome = gene.chromosome
if chromosome not in gene_dict:
chrom_genes = {}
chrom_genes[ID] = gene
gene_dict[chromosome] = chrom_genes
gene_dict[chromosome][ID] = gene
for ID in transcripts:
transcript = transcripts[ID]
chromosome = transcript.chromosome
if chromosome not in transcript_dict:
chrom_transcripts = {}
chrom_transcripts[ID] = transcript
transcript_dict[chromosome] = chrom_transcripts
transcript_dict[chromosome][ID] = transcript
transcript_dict[chromosome][ID] = transcript
return gene_dict, transcript_dict
|
2f55d29a75f5c28fbf3c79882b8b2ac18590cdb2
| 3,645,414
|
from itertools import product
import pandas as pd
def get_synth_stations(settings, wiggle=0):
""" Compute synthetic station locations.
Values for mode "grid" and "uniform" and currently for tests on global Earth geometry.
TODO: incorporate into settings.yml
:param settings: dict holding all info for project
:type settings: dict
:param wiggle: adds random variations in interval [-wiggle, wiggle] to locations, defaults to 0
:type wiggle: float, optional
:return: array containing station locations longitude/x and latitude/y coordinates. shape = (n, 2)
:rtype: numpy.ndarray
"""
mode = settings["synth_stations_mode"]
n = settings["synth_stations_n"]
if mode == "grid":
lons = np.linspace(-180, 180 - (360 / int(np.sqrt(n))), int(np.sqrt(n)))
lats = np.linspace(-75, 75, int(np.sqrt(n)))
station_locations = list(product(lons, lats))
elif mode == "uniform":
lons = np.random.uniform(low=-180, high=180, size=n)
lats = np.random.uniform(low=-75, high=75, size=n)
station_locations = list(zip(lons, lats))
elif mode == "partial_circle":
n_total = settings["synth_stations_circle_max"]
radius = settings["synth_stations_circle_radius"]
n_used = settings["synth_stations_circle_n"]
azimuths = np.linspace(0, 2 * np.pi, n_total)
azimuths_used = azimuths[:n_used]
lons = radius * np.cos(azimuths_used)
lats = radius * np.sin(azimuths_used)
station_locations = list(zip(lons, lats))
elif mode == "file":
df = pd.read_csv(settings["synth_stations_file"])
lons = df["x"].values
lats = df["y"].values
station_locations = list(zip(lons, lats))
if wiggle != 0:
station_locations = [
[
sta_lon + np.random.uniform(-wiggle, wiggle),
sta_lat + np.random.uniform(-wiggle, wiggle),
]
for sta_lon, sta_lat in product(lons, lats)
]
station_locations = np.array(station_locations)
return station_locations
|
962fa23773ebc297fedec6b79ac27718780a8699
| 3,645,415
|
def test_show_chromosome_labels(dash_threaded):
"""Test the display/hiding of chromosomes labels."""
prop_type = 'bool'
def assert_callback(prop_value, nclicks, input_value):
answer = ''
if nclicks is not None:
answer = FAIL
if PROP_TYPES[prop_type](input_value) == prop_value:
answer = PASS
return answer
template_test_component(
dash_threaded,
APP_NAME,
assert_callback,
ideogram_test_props_callback,
'showChromosomeLabels',
'True',
prop_type=prop_type,
component_base=COMPONENT_REACT_BASE,
**BASIC_PROPS
)
driver = dash_threaded.driver
# assert the absence of chromosomes' labels
labels = driver.find_elements_by_class_name('chrLabel')
assert len(labels) == 0
# trigger a change of the component prop
btn = wait_for_element_by_css_selector(driver, '#test-{}-btn'.format(APP_NAME))
btn.click()
# assert the presence of chromosomes' labels
labels = wait_for_elements_by_css_selector(driver, '.chrLabel')
assert len(labels) > 0
|
da3003e54c681b689703f7226b3a5f7a13756944
| 3,645,416
|
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
name = entry.data.get(CONF_NAME)
ha = get_ha(hass, name)
if ha is not None:
await ha.async_remove()
clear_ha(hass, name)
return True
|
1783c518e919eb60b2a40603322aa2a04dbc4000
| 3,645,417
|
def relay_state(pin):
"""Take in pin, return string state of the relay"""
logger.debug("relay_state() for pin %s", pin)
disabled = GPIO.digitalRead(pin)
logger.debug("Pin %s disabled: %s", pin, disabled)
state = "off"
if not disabled:
state = "on"
logger.debug("Relay state for pin %s is %s", pin, state)
return state
|
eae5ce94baa8ffe114ffeed811c7a8733dfb5cc5
| 3,645,418
|
def calc_fn(grid, size, coefficients=(-0.005, 10)):
""" Apply the FitzHugh-Nagumo equations to a given grid"""
a, b, *_ = coefficients
out = np.zeros(size)
out[0] = grid[0] - grid[0] ** 3 - grid[1] + a
out[1] = b * (grid[0] - grid[1])
return out
|
47a46f75a56ffb3d034a689034fa04f7593c485f
| 3,645,419
|
def destr(screenString):
"""
should return a valid screen object
as defined by input string
(think depickling)
"""
#print "making screen from this received string: %s" % screenString
rowList = []
curRow = []
curAsciiStr = ""
curStr = ""
for ch in screenString:
if ch == '\n':
# then we are done with the row and append it
# and start a new row
rowList.append(curRow)
curRow = []
elif ch == '|':
# then we're ready to make our current asciipixel
curAsciiPixel = AsciiPixel(int(curAsciiStr), int(curStr))
curAsciiStr = curColorStr = ""
curRow.append(curAsciiPixel)
curStr = ""
elif ch == ',':
# then we're now building the color string
curAsciiStr = curStr[:]
curStr = ""
else:
curStr += ch
ret = Screen(rowList)
return ret
|
38f540b3e8f6a16d2dbe7519ea5a43cbf2432b55
| 3,645,420
|
def analytical_solution_with_penalty(train_X, train_Y, lam, poly_degree):
"""
加惩罚项的数值解法
:param poly_degree: 多项式次数
:param train_X: 训练集的X矩阵
:param train_Y: 训练集的Y向量
:param lam: 惩罚项系数
:return: 解向量
"""
X, Y = normalization(train_X, train_Y, poly_degree)
matrix = np.linalg.inv(X.T.dot(X) + lam * np.eye(X.shape[1])).dot(X.T).dot(Y)
w_result = np.poly1d(matrix[::-1].reshape(poly_degree + 1))
# print("w result analytical")
# print(w_result)
return w_result
|
30f81cd74622889df64d6e67f023f67b3149504a
| 3,645,421
|
def formule_haversine(lat1: float, lon1: float, lat2: float, lon2: float) -> float:
"""
Description:
Calcule la distance entre deux points par la formule de Haversine.
Paramètres:
lat1: {float} -- Latitude du premier point.
lon1: {float} -- Longitude du premier point.
lat2: {float} -- Latitude du second point.
lon2: {float} -- Longitude du second point.
Retourne:
{float} -- Distance entre les deux points.
Exemple:
>>> formule_haversine(0, 0, 1, 1)
157.24938127194397
"""
EARTH_RADIUS = 6371e3
dLat = radians(lat2 - lat1)
dLon = radians(lon2 - lon1)
lat1 = radians(lat1)
lat2 = radians(lat2)
a = sin(dLat/2)**2 + cos(lat1) * cos(lat2) * sin(dLon/2)**2
c = 2 * atan2(sqrt(a), sqrt(1-a))
return (EARTH_RADIUS * c) / 1000
|
03ac0c191aa17b9f20944a7de56febba77db1edc
| 3,645,422
|
def get_word_combinations(word):
"""
'one-two-three'
=>
['one', 'two', 'three', 'onetwo', 'twothree', 'onetwothree']
"""
permutations = []
parts = [part for part in word.split(u'-') if part]
for count in range(1, len(parts) + 1):
for index in range(len(parts) - count + 1):
permutations.append(u''.join(parts[index:index+count]))
return permutations
|
5a4c042cc0f3dedb297e2513bf638eac4278e0a6
| 3,645,423
|
import tempfile
def env_to_file(env_variables, destination_path=None, posix=True):
"""
Write environment variables to a file.
:param env_variables: environment variables
:param destination_path: destination path of a file where the
environment variables will be stored. the
stored variables will be a bash script you can
then source.
:param posix: false if the target of the generated file will be a
windows machine
"""
if not env_variables:
return None
if not destination_path:
destination_path = tempfile.mkstemp(suffix='env')[1]
if posix:
linesep = '\n'
else:
linesep = '\r\n'
with open(destination_path, 'w') as f:
if posix:
f.write('#!/bin/bash')
f.write(linesep)
f.write('# Environmnet file generated by Cloudify. Do not delete '
'unless you know exactly what you are doing.')
f.write(linesep)
f.write(linesep)
else:
f.write('rem Environmnet file generated by Cloudify. Do not '
'delete unless you know exactly what you are doing.')
f.write(linesep)
for key, value in env_variables.iteritems():
if posix:
f.write('export {0}={1}'.format(key, value))
f.write(linesep)
else:
f.write('set {0}={1}'.format(key, value))
f.write(linesep)
f.write(linesep)
return destination_path
|
c242ff4d6956922b2ccceecaef5b95640116e75a
| 3,645,424
|
def _phase_norm(signal, reference_channel=0):
"""Unit normalization.
Args:
signal: STFT signal with shape (..., T, D).
Returns:
Normalized STFT signal with same shape.
"""
angles = np.angle(signal[..., [reference_channel]])
return signal * np.exp(-1j * angles)
|
f4e9021f8942bebf97d35e529068792b7f956425
| 3,645,425
|
def maintenance_():
"""Render a maintenance page while on maintenance mode."""
return render_template("maintenance/maintenance.html")
|
61b95cdeb1a16f216a60330d7501e5270e1342ba
| 3,645,426
|
def CanEditHotlist(effective_ids, hotlist):
"""Return True if a user is editor(add/remove issues and change rankings)."""
return any([user_id in (hotlist.owner_ids + hotlist.editor_ids)
for user_id in effective_ids])
|
dc29c74e2628930faffb12b6772046564ffb8218
| 3,645,427
|
from desimodel.io import load_fiberpos, load_target_info
def model_density_of_sky_fibers(margin=1.5):
"""Use desihub products to find required density of sky fibers for DESI.
Parameters
----------
margin : :class:`float`, optional, defaults to 1.5
Factor of extra sky positions to generate. So, for margin=10, 10x as
many sky positions as the default requirements will be generated.
Returns
-------
:class:`float`
The density of sky fibers to generate in per sq. deg.
"""
fracsky = load_target_info()["frac_sky"]
nfibers = len(load_fiberpos())
nskies = margin*fracsky*nfibers
return nskies
|
a50111f51c2ce081c3379e2b5506912326fafb55
| 3,645,428
|
def dice_counts(dice):
"""Make a dictionary of how many of each value are in the dice """
return {x: dice.count(x) for x in range(1, 7)}
|
427703283b5c0cb621e25f16a1c1f2436642fa9f
| 3,645,429
|
def SynthesizeData(phase, total_gen):
""" Phase ranges from 0 to 24 with increments of 0.2. """
x_list = [phase]
y_list = []
while len(x_list) < total_gen or len(y_list) < total_gen:
x = x_list[-1]
y = sine_function(x=x, amp=amp, per=per, shift_h=shift_h, shift_v=shift_v)
x_list.append(y+x)
y_list.append(y)
x_list = x_list[:-1]
return x_list, y_list
|
e656767f7ebf13575571b5eb0592a0e11cbbfcf7
| 3,645,430
|
from pathlib import Path
import difflib
from datetime import datetime
def compare():
""" Eats two file names, returns a comparison of the two files.
Both files must be csv files containing
<a word>;<doc ID>;<pageNr>;<line ID>;<index of the word>
They may also contain lines with additional HTML code (if the
output format is html):
<h3>Document 1</h3>
"""
if request.method == 'GET':
return "html"
elif request.method == 'POST':
# Get the JSON payload of the request containing the two file names
payload = request.get_json()
if payload['format'] == "html":
# Read input data, i.e. both input files (CSV) from disk:
dumping_path = Path(current_app.config["CACHE_PATH"]) # \Dokumente\Synchronisation\Programmieren\Python\tutorial_flask_wsgi\instance\cache
filename1 = Path(dumping_path, payload['files'][0])
filename2 = Path(dumping_path, payload['files'][1])
o = openfile(filename1)
e = openfile(filename2)
balance_tokens(o, e)
data1 = prepare_for_diff(o)
data2 = prepare_for_diff(e)
# Use difflib to find the differences:
print("ANALYZER: searching for differences (with difflib) ...")
d = difflib.Differ()
delta = d.compare(data1, data2)
delta = [*delta] # convert generator to list
pairs = prepare_output(delta, o,e)
filtered = filter_false_positives(pairs)
html = export_to_html(filtered,
original_document=o[0]['document'],
censored_document=e[0]['document'])
dumping_path = Path(current_app.config["CACHE_PATH"])
timestamp = datetime.now().strftime('%Y-%m-%dT%H-%M-%S')
filename = f"differences,{o[0]['document']}_vs_{e[0]['document']},{timestamp}.html"
savename = Path(dumping_path, filename)
try:
with open(savename, "w", encoding="utf-8") as f:
f.write(html)
except:
pass
return html
elif payload['format'] == "raw_diff":
# Read input data, i.e. both input files (CSV) from disk:
dumping_path = Path(current_app.config["CACHE_PATH"])
filename1 = Path(dumping_path, payload['files'][0])
filename2 = Path(dumping_path, payload['files'][1])
o = openfile(filename1)
e = openfile(filename2)
balance_tokens(o, e)
data1 = prepare_for_diff(o)
data2 = prepare_for_diff(e)
# Use difflib to find the differences:
print("ANALYZER: searching for differences (with difflib) ...")
d = difflib.Differ()
delta = d.compare(data1, data2)
delta = [*delta] # convert generator to list
pairs = prepare_output(delta, o,e)
filtered = filter_false_positives(pairs)
output = serialize_diff_pairs(filtered)
output["original"]["docTitle"] = o[0]['document']
output["censored"]["docTitle"] = e[0]['document']
output["message"] = "Success! Use the censorship inspector to process the output."
print("ANALYZER: Done! Sending JSON to client.")
return jsonify(output)
elif payload['format'] == "TRACER":
""" The TRACER data is already formatted correctly in the TSV files.
The only thing we have to do here is to replace the "XX" place holders
at the beginning of every line with a two digit number representing
the no. of the document. """
dumping_path = Path(current_app.config["CACHE_PATH"])
output = []
docs = []
docnr = 10
for file in payload['files']:
infile = Path(dumping_path, file)
with open(infile, "r", encoding="utf-8") as f:
lines = f.readlines()
for idx, line in enumerate(lines):
output.append(f"{docnr}{line[2:]}")
if idx == 0: # get the document identifier of the first line
docs.append(line.split("\t")[-1].strip())
docnr += 1
timestamp = datetime.now().strftime('%Y-%m-%dT%H-%M-%S')
filename = f"tracer_{','.join([str(x) for x in docs])}_{timestamp}.txt"
savename = Path(dumping_path, filename)
print(f"ANALYZER: Trying to write {savename}")
try:
print("ANALYZER: Sucess!")
with open(savename, "w", encoding="utf-8") as f:
f.writelines(output)
return jsonify(message = f'Success! You can download the exported file under /download/{savename}',
links = [{'href': f'/download/{savename}',
'rel': 'download',
'type': 'GET'}]), 200
except:
print(f"ERROR: Analyzer: Could not write file {savename}")
return jsonify(message = f"ERROR: Analyzer: Could not write file {savename}",
links = [{'href': "error",
'rel': 'download',
'type': 'GET'}]), 500
|
f6aa0421e84cf9d97a211904e64bd793ff7e989e
| 3,645,431
|
def draw_transform(dim_steps, filetype="png", dpi=150):
"""create image from variable transormation steps
Args:
dim_steps(OrderedDict): dimension -> steps
* each element contains steps for a dimension
* dimensions are all dimensions in source and target domain
* each step is (from_level, to_level, action, (weight_level, weight_var))
filetype(str): "png" or "svg"
dpi(int): resolution for png image
"""
dot_cmd = get_dot_cmd(filetype=filetype, dpi=dpi)
dot_components = get_components(dim_steps)
dot_str = get_dot_digraph_str(dot_components)
image_bytes = get_image_bytes(dot_cmd, dot_str)
return image_bytes
|
4738f9512065a9d0d6e33879954581cbf0940a11
| 3,645,432
|
import statistics
def get_ei_border_ratio_from_exon_id(exon_id, regid2nc_dic,
exid2eibrs_dic=None,
ratio_mode=1,
last_exon_dic=None,
last_exon_ratio=2.5,
min_reg_cov=5,
min_reg_mode=1):
"""
Ratio is average of ratios at both exon ends (if embedded in introns),
or if first / last exon, only one ratio.
Assign -1, if only exon, or if both exon and intron border region read
count below min_reg_cov.
min_reg_cov:
Minimum region read coverage. If both exon and intron border region
have < min_reg_cov, return ratio of -1.
regid2nc_dic:
Contains exon/intron/border region ID -> [norm_cov, coverage, reg_len]
exid2eibrs_dic:
Exon ID to all EIB ratios list mapping.
ratio_mode:
How to calculate the returned EIBR ratio.
1: Return the exon-intro border ratio with the higher coverage.
2: Average the two exon-intron border ratios of the exon,
if both have more than > min_reg_cov
last_exon_dic:
Last transcript exon ID -> polarity
Used for prioritizing the inner exon intron border for multi-exon
transcript last exons. Only effective for ratio_mode 1.
last_exon_ratio:
If the outer last exon read count is higher last_exon_ratio, prioritize
the outter border again, i.e. select the outter ratio
for EIB ratio calculation.
>>> regid2nc_dic = {"t1_e1_ebe2" : [0.5, 10, 20], "t1_e1_ebi2" : [0.2, 4, 20]}
>>> get_ei_border_ratio_from_exon_id("t1_e1", regid2nc_dic)
(2.5, 'first_exon')
>>> get_ei_border_ratio_from_exon_id("t2_e1", regid2nc_dic)
(-1, 'single_exon')
>>> regid2nc_dic = {"t1_e2_ebe1" : [0.5, 10, 20], "t1_e2_ebi1" : [0.25, 5, 20], "t1_e2_ebe2" : [1.0, 20, 20], "t1_e2_ebi2" : [0.25, 5, 20]}
>>> get_ei_border_ratio_from_exon_id("t1_e2", regid2nc_dic, ratio_mode=2)
(3.0, 'inner_exon')
>>> regid2nc_dic = {"t1_e2_ebe1" : [0.5, 10, 20], "t1_e2_ebi1" : [0.25, 5, 20], "t1_e2_ebe2" : [0.1, 2, 20], "t1_e2_ebi2" : [0.1, 2, 20]}
>>> get_ei_border_ratio_from_exon_id("t1_e2", regid2nc_dic, ratio_mode=2)
(2.0, 'inner_exon_ds_lc')
>>> regid2nc_dic = {"t1_e2_ebe1" : [0.1, 2, 20], "t1_e2_ebi1" : [0.1, 2, 20], "t1_e2_ebe2" : [0.5, 10, 20], "t1_e2_ebi2" : [0.25, 5, 20]}
>>> get_ei_border_ratio_from_exon_id("t1_e2", regid2nc_dic, ratio_mode=2)
(2.0, 'inner_exon_us_lc')
>>> regid2nc_dic = {"t1_e1_ebe2" : [0.5, 10, 20], "t1_e1_ebi2" : [0.0, 0, 20]}
>>> get_ei_border_ratio_from_exon_id("t1_e1", regid2nc_dic)
(10, 'first_exon')
>>> regid2nc_dic = {"t1_e1_ebe2" : [0.0, 0, 20], "t1_e1_ebi2" : [0.5, 10, 20]}
>>> get_ei_border_ratio_from_exon_id("t1_e1", regid2nc_dic)
(0.0, 'first_exon')
>>> regid2nc_dic = {"t1_e2_ebe1" : [0.5, 10, 20], "t1_e2_ebi1" : [0.25, 5, 20], "t1_e2_ebe2" : [1.0, 20, 20], "t1_e2_ebi2" : [0.25, 5, 20]}
>>> get_ei_border_ratio_from_exon_id("t1_e2", regid2nc_dic, ratio_mode=1)
(4.0, 'inner_exon')
"""
exb_id_e1 = exon_id + "_ebe1"
exb_id_i1 = exon_id + "_ebi1"
exb_id_e2 = exon_id + "_ebe2"
exb_id_i2 = exon_id + "_ebi2"
# For single-exon transcripts.
if exb_id_e1 not in regid2nc_dic and exb_id_e2 not in regid2nc_dic:
if exid2eibrs_dic is not None:
assert exon_id not in exid2eibrs_dic, "exon ID %s already stored in exid2eibrs_dic"
exid2eibrs_dic[exon_id] = [-1]
return -1, "single_exon"
# Last exon.
if exb_id_e1 in regid2nc_dic and exb_id_e2 not in regid2nc_dic:
assert exb_id_i1 in regid2nc_dic, "exb_id_e1 %s in regid2nc_dic, but not exb_id_i1 %s" %(exb_id_e1, exb_id_i1)
ratio1 = -1
sel_crit = "last_exon"
if regid2nc_dic[exb_id_e1][1] >= min_reg_cov or regid2nc_dic[exb_id_i1][1] >= min_reg_cov:
if regid2nc_dic[exb_id_i1][0]:
ratio1 = regid2nc_dic[exb_id_e1][0] / regid2nc_dic[exb_id_i1][0]
else:
ratio1 = regid2nc_dic[exb_id_e1][1]
if exid2eibrs_dic is not None:
assert exon_id not in exid2eibrs_dic, "exon ID %s already stored in exid2eibrs_dic"
exid2eibrs_dic[exon_id] = [ratio1]
return ratio1, sel_crit
# First exon.
if exb_id_e1 not in regid2nc_dic and exb_id_e2 in regid2nc_dic:
assert exb_id_i2 in regid2nc_dic, "exb_id_e2 %s in regid2nc_dic, but not exb_id_i2 %s" %(exb_id_e2, exb_id_i2)
ratio2 = -1
sel_crit = "first_exon"
if regid2nc_dic[exb_id_e2][1] >= min_reg_cov or regid2nc_dic[exb_id_i2][1] >= min_reg_cov:
if regid2nc_dic[exb_id_i2][0]:
ratio2 = regid2nc_dic[exb_id_e2][0] / regid2nc_dic[exb_id_i2][0]
else:
ratio2 = regid2nc_dic[exb_id_e2][1]
if exid2eibrs_dic is not None:
assert exon_id not in exid2eibrs_dic, "exon ID %s already stored in exid2eibrs_dic"
exid2eibrs_dic[exon_id] = [ratio2]
return ratio2, sel_crit
# In-between exons.
if exb_id_e1 in regid2nc_dic and exb_id_e2 in regid2nc_dic:
assert exb_id_i1 in regid2nc_dic, "exb_id_e1 %s in regid2nc_dic, but not exb_id_i1 %s" %(exb_id_e1, exb_id_i1)
assert exb_id_i2 in regid2nc_dic, "exb_id_e2 %s in regid2nc_dic, but not exb_id_i2 %s" %(exb_id_e2, exb_id_i2)
ratio1 = -1
ratio2 = -1
# if exon_id == "ENST00000366553.3_e2":
# print(exon_id)
# print("regid2nc_dic[exb_id_i1][1]:", regid2nc_dic[exb_id_i1][1])
# print("regid2nc_dic[exb_id_e1][1]:", regid2nc_dic[exb_id_e1][1])
# print("regid2nc_dic[exb_id_e2][1]:", regid2nc_dic[exb_id_e2][1])
# print("regid2nc_dic[exb_id_i2][1]:", regid2nc_dic[exb_id_i2][1])
# print("regid2nc_dic[exb_id_i1][0]:", regid2nc_dic[exb_id_i1][0])
# print("regid2nc_dic[exb_id_e1][0]:", regid2nc_dic[exb_id_e1][0])
# print("regid2nc_dic[exb_id_e2][0]:", regid2nc_dic[exb_id_e2][0])
# print("regid2nc_dic[exb_id_i2][0]:", regid2nc_dic[exb_id_i2][0])
sel_crit = "inner_exon"
if regid2nc_dic[exb_id_e1][1] >= min_reg_cov or regid2nc_dic[exb_id_i1][1] >= min_reg_cov:
if regid2nc_dic[exb_id_i1][0]:
ratio1 = regid2nc_dic[exb_id_e1][0] / regid2nc_dic[exb_id_i1][0]
else:
ratio1 = regid2nc_dic[exb_id_e1][1]
else:
sel_crit += "_us_lc"
if regid2nc_dic[exb_id_e2][1] >= min_reg_cov or regid2nc_dic[exb_id_i2][1] >= min_reg_cov:
if regid2nc_dic[exb_id_i2][0]:
ratio2 = regid2nc_dic[exb_id_e2][0] / regid2nc_dic[exb_id_i2][0]
else:
ratio2 = regid2nc_dic[exb_id_e2][1]
else:
sel_crit += "_ds_lc"
if exid2eibrs_dic is not None:
assert exon_id not in exid2eibrs_dic, "exon ID %s already stored in exid2eibrs_dic" %(exon_id)
exid2eibrs_dic[exon_id] = [ratio1, ratio2]
if ratio1 == -1 and ratio2 != -1:
avg_ratio = ratio2
elif ratio1 != -1 and ratio2 == -1:
avg_ratio = ratio1
elif ratio1 == -1 and ratio2 == -1:
avg_ratio = -1
else:
if ratio_mode == 1:
cov_b1 = regid2nc_dic[exb_id_i1][0] + regid2nc_dic[exb_id_e1][0]
cov_b2 = regid2nc_dic[exb_id_i2][0] + regid2nc_dic[exb_id_e2][0]
if cov_b1 > cov_b2:
avg_ratio = ratio1
else:
avg_ratio = ratio2
if last_exon_dic is not None:
if exon_id in last_exon_dic:
sel_crit = "last_exon"
exon_pol = last_exon_dic[exon_id]
# Define inner borders.
cov_inner = cov_b1
ratio_inner = ratio1
cov_outer = cov_b2
ratio_outer = ratio2
if exon_pol == "-":
cov_inner = cov_b2
ratio_inner = ratio2
cov_outer = cov_b1
ratio_outer = ratio1
if cov_inner*last_exon_ratio >= cov_outer:
avg_ratio = ratio_inner
sel_crit += "_inner"
else:
avg_ratio = ratio_outer
sel_crit += "_outer"
elif ratio_mode == 2:
avg_ratio = statistics.mean([ratio1, ratio2])
else:
assert False, "invalid ratio_mode (%i)" %(ratio_mode)
return avg_ratio, sel_crit
assert False, "invalid get_ei_border_ratio_from_exon_id()"
|
fd5239fabb81d328d644dbb8b56608eda15e78ce
| 3,645,433
|
def events(*_events):
""" A class decorator. Adds auxiliary methods for callback based event
notification of multiple watchers.
"""
def add_events(cls):
# Maintain total event list of both inherited events and events added
# using nested decorations.
try:
all_events = cls.events
except AttributeError:
cls.events = _events
else:
cls.events = all_events + _events
for e in _events:
helpers = {}
exec("""
@lazy
def {event}_handlers(self):
return []
def {event}(self, *a, **kw):
for h in list(self.{handlers}):
h(*a, **kw)
def watch_{event}(self, cb):
self.{handlers}.append(cb)
def unwatch_{event}(self, cb):
self.{handlers}.remove(cb)
""".format(event = e, handlers = e + "_handlers"),
globals(), helpers
)
for n, h in helpers.items():
setattr(cls, n, h)
return cls
return add_events
|
601f7d55ff4d05dd0aca552213dcd911f15c91b6
| 3,645,434
|
def _find_nearest(array, value):
"""Find the nearest numerical match to value in an array.
Args:
array (np.ndarray): An array of numbers to match with.
value (float): Single value to find an entry in array that is close.
Returns:
np.array: The entry in array that is closest to value.
"""
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return array[idx]
|
7440447c4079563722b91771f07fcd3c3f5e0c3b
| 3,645,435
|
import requests
from bs4 import BeautifulSoup
def download_document(url):
"""Downloads document using BeautifulSoup, extracts the subject and all
text stored in paragraph tags
"""
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
title = soup.find('title').get_text()
document = ' '.join([p.get_text() for p in soup.find_all('p')])
return document
|
8bb9055b40dd5554185ddec1d3218157a016bfd8
| 3,645,436
|
def rz_gate(phi: float = 0):
"""Functional for the single-qubit Pauli-Z rotation-gate.
Parameters
----------
phi : float
Rotation angle (in radians)
Returns
-------
rz : (2, 2) np.ndarray
"""
arg = 1j * phi / 2
return np.array([[np.exp(-arg), 0], [0, np.exp(arg)]])
|
c148a03f3525698c44e5f8aa14085bfeb29c72ef
| 3,645,437
|
from typing import List
def dict_to_kvp(dictionary: dict) -> List[tuple]:
"""
Converts a dictionary to a list of tuples where each tuple has the key and value
of each dictionary item
:param dictionary: Dictionary to convert
:return: List of Key-Value Pairs
"""
return [(k, v) for k, v in dictionary.items()]
|
2b856ebb218884a4975d316bebe27546070f2083
| 3,645,438
|
def convert_and_remove_punctuation(text):
"""
remove punctuation that are not allowed, e.g. / \
convert Chinese punctuation into English punctuation, e.g. from「 to "
"""
# removal
text = text.replace("\\", "")
text = text.replace("\\", "")
text = text.replace("[", "")
text = text.replace("]", "")
text = text.replace("【", "")
text = text.replace("】", "")
text = text.replace("{", "")
text = text.replace("}", "")
# conversion
text = text.replace(u"\u201C", "\"")
text = text.replace(u"\u201D", "\"")
text = text.replace(u"\u2018", "'")
text = text.replace(u"\u2019", "'")
text = text.replace("「", "\"")
text = text.replace("」", "\"")
text = text.replace("『", "\"")
text = text.replace("』", "\"")
text = text.replace("quot;", "\"")
return text
|
2de1f930ca76da7fec3467469f98b0e0858e54a0
| 3,645,439
|
def create_random_context(dialog,rng,minimum_context_length=2,max_context_length=20):
"""
Samples random context from a dialog. Contexts are uniformly sampled from the whole dialog.
:param dialog:
:param rng:
:return: context, index of next utterance that follows the context
"""
# sample dialog context
#context_turns = rng.randint(minimum_context_length,len(dialog)-1)
max_len = min(max_context_length, len(dialog)) - 2
if max_len <= minimum_context_length:
context_turns = max_len
else:
context_turns = rng.randint(minimum_context_length,max_len)
# create string
return dialog_turns_to_string(dialog[:context_turns]),context_turns
|
d66ee8f185380801735644a7ce4528f398385e60
| 3,645,440
|
def dev_test_new_schema_version(dbname, sqldb_dpath, sqldb_fname,
version_current, version_next=None):
"""
hacky function to ensure that only developer sees the development schema
and only on test databases
"""
TESTING_NEW_SQL_VERSION = version_current != version_next
if TESTING_NEW_SQL_VERSION:
print('[sql] ATTEMPTING TO TEST NEW SQLDB VERSION')
devdb_list = ['PZ_MTEST', 'testdb1', 'testdb0', 'testdb2',
'testdb_dst2', 'emptydatabase']
testing_newschmea = ut.is_developer() and dbname in devdb_list
#testing_newschmea = False
#ut.is_developer() and ibs.get_dbname() in ['PZ_MTEST', 'testdb1']
if testing_newschmea:
# Set to true until the schema module is good then continue tests
# with this set to false
testing_force_fresh = True or ut.get_argflag('--force-fresh')
# Work on a fresh schema copy when developing
dev_sqldb_fname = ut.augpath(sqldb_fname, '_develop_schema')
sqldb_fpath = join(sqldb_dpath, sqldb_fname)
dev_sqldb_fpath = join(sqldb_dpath, dev_sqldb_fname)
ut.copy(sqldb_fpath, dev_sqldb_fpath, overwrite=testing_force_fresh)
# Set testing schema version
#ibs.db_version_expected = '1.3.6'
print('[sql] TESTING NEW SQLDB VERSION: %r' % (version_next,))
#print('[sql] ... pass --force-fresh to reload any changes')
return version_next, dev_sqldb_fname
else:
print('[ibs] NOT TESTING')
return version_current, sqldb_fname
|
ec57d6ccb39d76159ab80c6fdfe094b486d00777
| 3,645,441
|
def _get_distance_euclidian(row1: np.array, row2: np.array):
"""
_get_distance
returns the distance between 2 rows
(euclidian distance between vectors)
takes into account all columns of data given
"""
distance = 0.
for i, _ in enumerate(row1):
distance += (row1[i] - row2[i]) ** 2
return np.sqrt(distance)
|
13a3944becf717222eb6fc997ceb937ad37b30ab
| 3,645,442
|
import re
def _get_ip_from_response(response):
"""
Filter ipv4 addresses from string.
Parameters
----------
response: str
String with ipv4 addresses.
Returns
-------
list: list with ip4 addresses.
"""
ip = re.findall(r'\b(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\b', response)
return ip
|
ac36a3b729b0ce4ba13a6db550a71276319cbd70
| 3,645,443
|
from typing import Optional
from typing import List
import logging
def create_processor(
options: options_pb2.ConvertorOptions,
theorem_database: Optional[proof_assistant_pb2.TheoremDatabase] = None,
tactics: Optional[List[deephol_pb2.Tactic]] = None) -> ProofLogToTFExample:
"""Factory function for ProofLogToTFExample."""
if theorem_database and options.theorem_database_path:
raise ValueError(
'Both thereom database as well as a path to load it from file '
'provided. Only provide one.')
if not theorem_database:
theorem_database = io_util.load_theorem_database_from_file(
str(options.theorem_database_path))
if tactics and options.tactics_path:
raise ValueError('Both tactics as well as a path to load it from '
'provided. Only provide one.')
if not tactics:
tactics = io_util.load_tactics_from_file(str(options.tactics_path), None)
tactics_name_id_map = {tactic.name: tactic.id for tactic in tactics}
if options.replacements_hack:
logging.warning('Replacments hack is enabled.')
tactics_name_id_map.update({
'GEN_TAC': 8,
'MESON_TAC': 11,
'CHOOSE_TAC': 34,
})
if options.format != options_pb2.ConvertorOptions.HOLPARAM:
raise ValueError('Unknown options_pb2.ConvertorOptions.TFExampleFormat.')
return ProofLogToTFExample(tactics_name_id_map, theorem_database, options)
|
898a72372a80546f4de277c5f3e3573c7f8edff6
| 3,645,444
|
def EscapeShellArgument(s):
"""Quotes an argument so that it will be interpreted literally by a POSIX
shell. Taken from
http://stackoverflow.com/questions/35817/whats-the-best-way-to-escape-ossystem-calls-in-python
"""
return "'" + s.replace("'", "'\\''") + "'"
|
132a3b1bb8a0e7b3c92ac15e2d68337eeef19042
| 3,645,445
|
def login_invalid(request, error_type):
""" Displays the index with an error message. """
# TODO - encode authentification error message in URI
try:
message = INVALID_LOGIN_MESSAGE[error_type]
except KeyError:
message = "Erreur inconnue"
context = {'form': LoginForm(), 'message': message }
return render(request, 'index.htm', context)
|
e9d901a052f696c69b00f6499da87fa6b5b3419d
| 3,645,446
|
def lsh(B_BANDS, docIdList, sig):
""" Applies the LSH algorithm. This function first divides the signature matrix into bands and hashes each column onto buckets.
:param B_BANDS: Number of bands in signature matrix
:param docIdList: List of document ids
:param sig: signature matrix
:return: List of document to its hash along with the buckets
"""
numHash = number_of_hash
bands = getbestb(threshold,number_of_hash)
rows = numHash / bands
d = 1681
# Array of dictionaries, each dictionary is for each band which will hold buckets for hashed vectors in that band
buckets = np.full(bands, {})
# Mapping from docid to h to find the buckets in which document with docid was hashed
docth = np.zeros((d, bands), dtype=int) # doc to hash
for i in range(bands):
for j in range(d):
low = int(i*rows) # First row in a band
high = min(int((i+1)*rows), numHash)# Last row in current band
l = []
for x in range(low, high):
l.append(sig[x, j]) # Append each row into l
h = int(hash(tuple(l))) % (d+1)
try:
buckets[i][h].append(j) # If a bucket corresponds to this hash value append this document into it
except:
buckets[i][h] = {j}
docth[j][i] = h
# print(docth)
return docth, buckets
|
ad6071e52d2c442764e57bb68e2f1e2d4c5a7c2e
| 3,645,447
|
def langevin_coefficients(
temperature,
dt,
friction,
masses):
"""
Compute coefficients for langevin dynamics
Parameters
----------
temperature: float
units of Kelvin
dt: float
units of picoseconds
friction: float
frequency in picoseconds
masses: array
mass of each atom in standard mass units
Returns
-------
tuple (ca, cb, cc)
ca is scalar, and cb and cc are n length arrays
that are used during langevin dynamics
"""
vscale = np.exp(-dt*friction)
if friction == 0:
fscale = dt
else:
fscale = (1-vscale)/friction
kT = BOLTZ * temperature
nscale = np.sqrt(kT*(1-vscale*vscale)) # noise scale
invMasses = 1.0/masses
sqrtInvMasses = np.sqrt(invMasses)
ca = vscale
cb = fscale*invMasses
cc = nscale*sqrtInvMasses
return ca, cb, cc
|
680d5c8898ecb7c0627232c8c993bb0f64a2e9d3
| 3,645,448
|
def waitpid_handle_exceptions(pid, deadline):
"""Wrapper around os.waitpid()/waitpid_with_timeout(), which waits until
either a child process exits or the deadline elapses, and retries if certain
exceptions occur.
Args:
pid: Process ID to wait for, or -1 to wait for any child process.
deadline: If non-zero, waiting stops when time.time() exceeds this value.
If zero, waiting stops when a child process exits.
Returns:
(pid, status): Same as for waitpid_with_timeout(). |pid| is non-zero if and
only if a child exited during the wait.
Raises:
Same as for os.waitpid(), except:
OSError with errno==EINTR causes the wait to be retried (this can happen,
for example, if this parent process receives SIGHUP).
OSError with errno==ECHILD means there are no child processes, and so
this function sleeps until |deadline|. If |deadline| is zero, this is an
error and the OSError exception is raised in this case.
"""
while True:
try:
if deadline == 0:
pid_result, status = os.waitpid(pid, 0)
else:
pid_result, status = waitpid_with_timeout(pid, deadline)
return (pid_result, status)
except OSError, e:
if e.errno == errno.EINTR:
continue
elif e.errno == errno.ECHILD:
now = time.time()
if deadline == 0:
# No time-limit and no child processes. This is treated as an error
# (see docstring).
raise
elif deadline > now:
time.sleep(deadline - now)
return (0, 0)
else:
# Anything else is an unexpected error.
raise
|
2d15594c9b066b3e1000a6394503a9b8a88e5420
| 3,645,449
|
import subprocess
def tedor_ideal(t_mix, a, dist, t2, j_cc, obs='C13', pulsed='N15', vr=14000, return_t=False):
"""
Makes a SpinEvolution input file from template file "tedor_ideal_template", calls SpinEvolution, parses the output,
and applies phenomenological scaling and exponential relaxation.
The tedor_ideal is a calculation for interpreting and ultimately fitting ZF-TEDOR build-up curves
Parameters
----------
a: float, scaling factor
dist: float, distance between 13C-15N
t2: float, $T_2$ relaxations time
vr: float, MAS speed in HZ
j_cc: float, carbon carbon J coupling in Hz
return_t: bool, should the function return t=np.arange(0, n)*tr
t_mix: array of mixing experimental mixing times in ms
obs: string, the observed nucleus for the TEDOR experiment
pulsed: string, the nucleus with the REDOR pulses on it
Returns
-------
signal: array, len(t_mix)
or
time; signal: array, len(n); array, len(t_mix)
"""
# Build the simulation program from the template
sim_params = {'dist': dist, 'vr': vr / 1000, 'tr': 1 / vr, 'obs': obs, 'pulsed': pulsed}
with open('templates/tedor_ideal_template', 'r') as fid:
template = fid.read()
with open('templates/tedor_ideal_step', 'w') as fid:
fid.write(template.format(**sim_params))
cmd = ['/opt/spinev/spinev', 'templates/tedor_ideal_step']
# Run the simulation
subprocess.call(cmd)
# Parse the results
output_file = 'templates/tedor_ideal_step_re.dat'
results = np.loadtxt(output_file)
time = results[:, 0]
signal = results[:, 1]
# Apply phenomenological corrections
signal = a * signal * (np.cos(np.pi * (j_cc * 1000 / 2))**2) * np.exp(-time / t2)
time_points = []
signal_points = []
for i in t_mix:
ind = (np.where((np.trunc(time * 100) / 100) == i)[0][0])
time_points.append(time[ind])
signal_points.append(signal[ind])
if return_t:
return time_points, signal_points
else:
return signal_points
|
646d4d3a811c8fc7ad2521a1aca921d2ceb2e8a6
| 3,645,450
|
def preprocess(image, image_size):
"""
Preprocess
pre-process the image by to adaptive_treshold, perspectiv_transform,
erode, diletate, resize
:param image: image of display from cv2.read
:return out_image: output image after preprocessing
"""
# blurr
blurred = cv2.GaussianBlur(image, (5, 5), 1)
# perspective transformation
out_img = myPerspectiveTransformation(blurred)
# resize it
out_img = resizeSquareRespectAcpectRatio(
out_img,
image_size,
cv2.INTER_AREA
)
return out_img
|
497d3d1a32be643486903d44621ff203503b726e
| 3,645,451
|
import urllib
import json
import time
def download(distributor: Distributor, max_try:int = 4) -> list[TrainInformation]|None:
"""Download train information from distributor.
If response status code was 500-599, this function retries up to max_try times.
Parameters
----------
distributor : Distributor
Distributor of infomation source.
max_try : int, optional
If response status code was 500-599, it retries up to this value.(default = 4)
Returns
-------
list[TrainInformation]|None
List of train information which is downloaded from web, or None if consumerKey is unset.
Raises
------
InvalidParameterError
HTTP status code was 400.
InvalidConsumerKeyError
HTTP status code was 401.
Forbidden
HTTP status code was 403.
NotFound
HTTP status code was 404.
OdptServerError
HTTP status code was 500-599.
UnknownHTTPError
HTTP status code was unexpected.
"""
if not distributor.is_valid():
return None
query = {}
query["acl:consumerKey"] = distributor.consumer_key
json_dict:list[TrainInformation_jsondict] = []
for try_count in range(max_try):
try:
with urllib.request.urlopen("%s?%s" % (distributor.URL, urllib.parse.urlencode(query))) as f:
json_dict = json.load(f)
break
except HTTPError as e:
match e.code:
case 400:
raise InvalidParameterError(e)
case 401:
raise InvalidConsumerKeyError(e)
case 403:
raise Forbidden(e)
case 404:
raise NotFound(e, distributor.value)
case code if 500 <= code < 600:
if try_count == max_try-1:
raise OdptServerError(e)
else:
time.sleep(1+try_count)
continue
case _:
raise UnknownHTTPError(e)
except Exception as e:
if try_count == max_try-1:
raise
else:
time.sleep(1+try_count)
continue
return TrainInformation.from_list(json_dict)
|
1288e50807465164dd4aa2e082b4136abe81636c
| 3,645,452
|
def add_payloads(prev_layer, input_spikes):
"""Get payloads from previous layer."""
# Get only payloads of those pre-synaptic neurons that spiked
payloads = tf.where(tf.equal(input_spikes, 0.),
tf.zeros_like(input_spikes), prev_layer.payloads)
print("Using spikes with payloads from layer {}".format(prev_layer.name))
return input_spikes + payloads
|
4f7bd805e8659ddea0da63fd542edb6d52073569
| 3,645,453
|
def read_csv_to_data(path: str, delimiter: str = ",", headers: list = []):
"""A zero-dependancy helper method to read a csv file
Given the path to a csv file, read data row-wise. This data may be later converted to a dict of lists if needed (column-wise).
Args:
path (str): Path to csv file
delimiter (str, optional): Delimiter to split the rows by. Defaults to ','
headers: (list, optional): Given header list for a csv file. Defaults to an empty list, which results in the first row being used as a header.
Returns:
A list of dictionary values (list of rows) representing the file being read
"""
data = []
with open(path, "r") as f:
header = headers
if len(headers) == 0:
header = f.readline().split(",")
for line in f:
entry = {}
for i, value in enumerate(line.split(",")):
entry[header[i].strip()] = value.strip()
data.append(entry)
return data
|
f60e163e770680efd1f8944becd79a0dd7ceaa08
| 3,645,454
|
def main_menu(update, context):
"""Handling the main menu
:param update: Update of the sent message
:param context: Context of the sent message
:return: Status for main menu
"""
keyboard = [['Eintragen'],
['Analyse']]
update.message.reply_text(
'Was möchtest du machen?',
reply_markup=ReplyKeyboardMarkup(keyboard)
)
return MAIN
|
bafc092ec662286f417a9a5d2c47a675336c4825
| 3,645,455
|
def build_model(inputs, num_classes, is_training, hparams):
"""Constructs the vision model being trained/evaled.
Args:
inputs: input features/images being fed to the image model build built.
num_classes: number of output classes being predicted.
is_training: is the model training or not.
hparams: additional hyperparameters associated with the image model.
Returns:
The logits of the image model.
"""
scopes = setup_arg_scopes(is_training)
if len(scopes) != 1:
raise ValueError('Nested scopes depreciated in py3.')
with scopes[0]:
if hparams.model_name == 'pyramid_net':
logits = build_shake_drop_model(inputs, num_classes, is_training)
elif hparams.model_name == 'wrn':
logits = build_wrn_model(inputs, num_classes, hparams.wrn_size)
elif hparams.model_name == 'shake_shake':
logits = build_shake_shake_model(inputs, num_classes, hparams,
is_training)
elif hparams.model_name == 'resnet':
logits = build_resnet_model(inputs, num_classes, hparams,
is_training)
else:
raise ValueError("Unknown model name.")
return logits
|
0ad57496d77e4406c5081982a2c02f2111cb5b57
| 3,645,456
|
import flask_monitoringdashboard
def get_test_app_for_status_code_testing(schedule=False):
"""
:return: Flask Test Application with the right settings
"""
app = Flask(__name__)
@app.route('/return-a-simple-string')
def return_a_simple_string():
return 'Hello, world'
@app.route('/return-a-tuple')
def return_a_tuple():
return 'Hello, world', 404
@app.route('/ridiculous-return-value')
def return_ridiculous_return_value():
return 'hello', 'ridiculous'
@app.route('/return-jsonify-default-status-code')
def return_jsonify_default_status_code():
return jsonify({
'apples': 'banana'
})
@app.route('/return-jsonify-with-custom-status-code')
def return_jsonify_with_custom_status_code():
response = jsonify({
'cheese': 'pears'
})
response.status_code = 401
return response
@app.route('/unhandled-exception')
def unhandled_exception():
potatoes = 1000
bananas = 0
return potatoes / bananas
app.config['SECRET_KEY'] = flask_monitoringdashboard.config.security_token
app.testing = True
flask_monitoringdashboard.user_app = app
app.config['WTF_CSRF_ENABLED'] = False
app.config['WTF_CSRF_METHODS'] = []
flask_monitoringdashboard.config.get_group_by = lambda: '12345'
flask_monitoringdashboard.bind(app=app, schedule=schedule)
TEST_CACHE = {'main': EndpointInfo()}
flask_monitoringdashboard.core.cache.memory_cache = TEST_CACHE
return app
|
69951350c8b14cf02b1327773665d9080b0eeb48
| 3,645,457
|
import os
def current_user():
"""Returns the value of the USER environment variable"""
return os.environ['USER']
|
75d588d801a5afcd2037a05c7dc5e990532eb114
| 3,645,458
|
def run_multiple_cases(x, y, z, door_height, door_width, t_amb,
HoC, time_ramp, hrr_ramp, num, door, wall,
simulation_time, dt_data):
"""
Generate multiple CFAST input files and calls other functions
"""
resulting_temps = np.array([])
for i in range(len(door_width)):
casename = gen_input(x, y, z, door_height[i], door_width[i],
t_amb[i], HoC, time_ramp, hrr_ramp, num, door,
wall, simulation_time, dt_data)
run_cfast(casename)
temps, outfile = read_cfast(casename)
outfile.close()
hgl = temps[:,1]
resulting_temps = np.append(hgl[-1], resulting_temps)
return(resulting_temps)
|
1c056b4c991889b81324857788cda416f90a8cdc
| 3,645,459
|
def get_all():
"""
Obtiene todas las tuplas de la relación Estudiantes
:returns: Todas las tuplas de la relación.
:rtype: list
"""
try:
conn = helpers.get_connection()
cur = conn.cursor()
cur.execute(ESTUDIANTE_QUERY_ALL)
result = cur.fetchall()
# Confirma los cambios y libera recursos
conn.commit()
cur.close()
conn.close()
return result
except Exception as e:
raise e
|
8b2248f09b02bf8fb4198bd36e743a5d052dd9f3
| 3,645,460
|
import warnings
def load_fgong(filename, fmt='ivers', return_comment=False,
return_object=True, G=None):
"""Given an FGONG file, returns NumPy arrays ``glob`` and ``var`` that
correspond to the scalar and point-wise variables, as specified
in the `FGONG format`_.
.. _FGONG format: https://www.astro.up.pt/corot/ntools/docs/CoRoT_ESTA_Files.pdf
Also returns the first four lines of the file as a `comment`, if
desired.
The version number ``ivers`` is used to infer the format of floats
if ``fmt='ivers'``.
If ``return_object`` is ``True``, instead returns an :py:class:`FGONG`
object. This is the default behaviour as of v0.0.12. The old
behaviour will be dropped completely from v0.1.0.
Parameters
----------
filename: str
Name of the FGONG file to read.
fmt: str, optional
Format string for floats in `glob` and `var`. If ``'ivers'``,
uses ``%16.9E`` if the file's ``ivers < 1000`` or ``%26.18E3` if
``ivers >= 1000``. If ``'auto'``, tries to guess the size of each
float. (default: 'ivers')
return_comment: bool, optional
If ``True``, return the first four lines of the FGONG file.
These are comments that are not used in any calculations.
Returns
-------
glob: NumPy array
The scalar (or global) variables for the stellar model
var: NumPy array
The point-wise variables for the stellar model. i.e. things
that vary through the star like temperature, density, etc.
comment: list of strs, optional
The first four lines of the FGONG file. These are comments
that are not used in any calculations. Only returned if
``return_comment=True``.
"""
with tomso_open(filename, 'rb') as f:
comment = [f.readline().decode('utf-8').strip() for i in range(4)]
nn, iconst, ivar, ivers = [int(i) for i in f.readline().decode('utf-8').split()]
# lines = f.readlines()
lines = [line.decode('utf-8').lower().replace('d', 'e')
for line in f.readlines()]
tmp = []
if fmt == 'ivers':
if ivers < 1000:
N = 16
else:
N = 27
# try to guess the length of each float in the data
elif fmt == 'auto':
N = len(lines[0])//5
else:
N = len(fmt % -1.111)
for line in lines:
for i in range(len(line)//N):
s = line[i*N:i*N+N]
# print(s)
if s[-9:] == '-Infinity':
s = '-Inf'
elif s[-9:] == ' Infinity':
s = 'Inf'
elif s.lower().endswith('nan'):
s = 'nan'
elif 'd' in s.lower():
s = s.lower().replace('d','e')
tmp.append(float(s))
glob = np.array(tmp[:iconst])
var = np.array(tmp[iconst:]).reshape((-1, ivar))
if return_object:
return FGONG(glob, var, ivers=ivers, G=G,
description=comment)
else:
warnings.warn("From tomso 0.1.0+, `fgong.load_fgong` will only "
"return an `FGONG` object: use `return_object=True` "
"to mimic future behaviour",
FutureWarning)
if return_comment:
return glob, var, comment
else:
return glob, var
|
17fcac5511a588351701f921dc8449d81a603fb6
| 3,645,461
|
import os
import numpy
import time
def hla_saturation_flags(drizzled_image, flt_list, catalog_name, catalog_data, proc_type, param_dict, plate_scale,
column_titles, diagnostic_mode):
"""Identifies and flags saturated sources.
Parameters
----------
drizzled_image : string
drizzled filter product image filename
flt_list : list
list of calibrated images that were drizzle-combined to produce image specified by input parameter
'drizzled_image'
catalog_name : string
drizzled filter product catalog filename to process
catalog_data : astropy.Table object
drizzled filter product catalog data to process
proc_type : string
sourcelist generation type.
param_dict : dictionary
Dictionary of instrument/detector - specific drizzle, source finding and photometric parameters
plate_scale : float
plate scale, in arcseconds/pixel
column_titles : dictionary
Relevant column titles
diagnostic_mode : bool
write intermediate files?
Returns
-------
phot_table_rows : astropy.Table object
drizzled filter product catalog data with updated flag values
"""
image_split = drizzled_image.split('/')[-1]
channel = drizzled_image.split("_")[4].upper()
if channel == 'IR': # TODO: Test and IR case just to make sure that IR shouldn't be skipped
return catalog_data
# -------------------------------------------------------------------
# STEP THROUGH EACH APPLICABLE FLT IMAGE, DETERMINE THE COORDINATES
# FOR ALL SATURATION FLAGGED PIXELS, AND TRANSFORM THESE COORDINATES
# INTO THE DRIZZLED IMAGE REFERENCE FRAME.
# -------------------------------------------------------------------
num_flts_in_main_driz = len(flt_list)
flt_list.sort()
log.info(' ')
log.info("Current Working Directory: {}".format(os.getcwd()))
log.info(' ')
log.info('LIST OF FLTS IN {}: {}'.format(drizzled_image.split('/')[-1], flt_list))
log.info(' ')
log.info('NUMBER OF FLTS IN {}: {}'.format(drizzled_image.split('/')[-1], num_flts_in_main_driz))
log.info(' ')
# ----------------------------------------------------
# EXTRACT DQ DATA FROM FLT IMAGE AND CREATE A LIST
# OF "ALL" PIXEL COORDINATES WITH A FLAG VALUE OF 256
# ----------------------------------------------------
if ((channel.lower() != 'wfpc2') and (channel.lower() != 'pc')):
if channel.lower() in ['wfc', 'uvis']:
image_ext_list = ["[sci,1]", "[sci,2]"]
if channel.lower() in ['sbc', 'hrc']:
image_ext_list = ["[sci,1]"]
dq_sat_bit = 256
if channel.lower() == 'wfpc2':
image_ext_list = ["[sci,1]", "[sci,2]", "[sci,3]", "[sci,4]"]
dq_sat_bit = 8
if channel.lower() == 'pc':
image_ext_list = ["[sci,1]"]
dq_sat_bit = 8
# build list of arrays
drz_sat_xy_coords_list = []
for flt_cnt, flt_image in enumerate(flt_list):
for ext_cnt, image_ext in enumerate(image_ext_list):
ext_part = image_ext.split(',')[1].split(']')[0]
try:
if ((channel.lower() != 'wfpc2') and (channel.lower() != 'pc')):
flt_data = fits.getdata(flt_image, 'DQ', int(ext_part))
if ((channel.lower() == 'wfpc2') or (channel.lower() == 'pc')):
flt_data = fits.getdata(flt_image.replace("_c0m", "_c1m"), 'SCI', int(ext_part))
except KeyError:
log.info(' ')
log.info('WARNING: There is only one set of file extensions in {}'.format(flt_image))
log.info(' ')
continue
# TODO: Should we also look for pixels flagged with DQ value 2048 (A to D saturation) for ACS data?
# ----------------------------------------------------
# DETERMINE IF ANY OF THE PIXELS LOCATED IN THE GRID
# HAVE A BIT VALUE OF 256, I.E. FULL WELL SATURATION.
# ----------------------------------------------------
# NOTE: NUMPY ARRAYS REPORT Y COORD VALUES FIRST AND
# X COORD VALUES SECOND AS FOLLOWS:
#
# --> numpy.shape(flt_data)
# (2051, 4096)
#
# WHERE 2051 IS THE NUMBER OF PIXELS IN THE Y
# DIRECTION, AND 4096 IS THE NUMBER OF PIXELS
# IN THE X DIRECTION.
# ----------------------------------------------------
bit_flt_data = dq_sat_bit & flt_data
complete_sat_coords = numpy.where(bit_flt_data == dq_sat_bit)
if len(complete_sat_coords[0]) == 0:
continue
# -------------------------------------------------
# RESTRUCTURE THE LIST OF X AND Y COORDINATES FROM
# THE FLT FILE THAT HAVE BEEN FLAGGED AS SATURATED
# -------------------------------------------------
nsat = len(complete_sat_coords[0])
x_y_array = numpy.empty((nsat, 2), dtype=int)
x_y_array[:, 0] = complete_sat_coords[1]
x_y_array[:, 1] = complete_sat_coords[0]
# ---------------------------------------------------
# WRITE FLT COORDS TO A FILE FOR DIAGNOSTIC PURPOSES
# ---------------------------------------------------
if diagnostic_mode:
flt_xy_coord_out = flt_image.split('/')[-1].split('.')[0] + '_sci' + str(ext_cnt + 1) + '.txt'
outfile = open(flt_xy_coord_out, 'w')
for flt_xy_coord in x_y_array:
x = flt_xy_coord[0]
y = flt_xy_coord[1]
outfile.write(str(x) + ' ' + str(y) + '\n')
outfile.close()
# ----------------------------------------------------
# CONVERT SATURATION FLAGGED X AND Y COORDINATES FROM
# THE FLT IMAGE INTO RA AND DEC
# ----------------------------------------------------
flt_ra_dec_coords = xytord(x_y_array, flt_image, image_ext)
# -------------------------------------------------
# CONVERT RA & DEC VALUES FROM FLT REFERENCE FRAME
# TO THAT OF THE DRIZZLED IMAGE REFERENCE FRAME
# -------------------------------------------------
drz_sat_xy_coords_list.append(rdtoxy(flt_ra_dec_coords, drizzled_image, "[sci,1]"))
log.info(' ')
log.info('FLT IMAGE = {}'.format(flt_image.split('/')[-1]))
log.info('IMAGE EXT = {}'.format(image_ext))
log.info(' ')
# ----------------------------------------------------------------
# IF NO SATURATION FLAGS EXIST IN ANY OF THE FLT FILES, THEN SKIP
# ----------------------------------------------------------------
if len(drz_sat_xy_coords_list) == 0:
log.info(' ')
log.info('*******************************************************************************************')
log.info('NO SATURATION FLAGGED PIXELS EXIST IN ANY OF THE FLT FILES FOR:')
log.info(' --> {}'.format(drizzled_image.split('/')[-1]))
log.info('*******************************************************************************************')
log.info(' ')
return catalog_data
# ------------------------------
# now concatenate all the arrays
# ------------------------------
full_sat_list = numpy.concatenate(drz_sat_xy_coords_list)
# --------------------------------------------
# WRITE RA & DEC FLT CONVERTED X & Y DRIZZLED
# IMAGE COORDINATES TO A TEXT FILE
# --------------------------------------------
if diagnostic_mode:
drz_coord_file = drizzled_image.split('/')[-1].split('.')[0] + '_ALL_FLT_SAT_FLAG_PIX.txt'
drz_coord_out = open(drz_coord_file, 'w')
for coord in full_sat_list:
drz_coord_out.write(str(coord[0]) + ' ' + str(coord[1]) + '\n')
drz_coord_out.close()
# ----------------------------------------------------
# GET SOURCELIST X AND Y VALUES
# ----------------------------------------------------
all_detections = catalog_data
nrows = len(all_detections)
full_coord_list = numpy.empty((nrows, 2), dtype=float)
for row_count, detection in enumerate(all_detections):
full_coord_list[row_count, 0] = float(detection[column_titles["x_coltitle"]])
full_coord_list[row_count, 1] = float(detection[column_titles["y_coltitle"]])
"""
# This option to determine saturation from the drizzled image alone should complement
# the computation based on the DQ array, since the IR (and MAMA?) detectors will not
# have saturated sources that 'bleed' or 'top out'...
#
# Extract Ap2 radius from parameter dict
#
ap2 = param_dict['catalog generation']['aperture_2']
#
# Convert source positions into slices
#
apers = CircularAperture(full_coord_list, ap2)
#
# Determine whether any source (slice) has more than 3 pixels
# within 10% of the max value in the source slice.
# If True, flag as saturated.
#
drz_img = fits.getdata(drizzled_image, ext=1)
img_sat = numpy.zeros(len(full_coord_list), dtype=bool)
for n,aper in enumerate(apers):
if (drz_img[aper.bbox.slices] > drz_img[aper.bbox.slices].max() * 0.9).sum() > 3:
img_sat[n] = True
del drz_img
"""
# ----------------------------------------------------
# CREATE SUB-GROUPS OF SATURATION-FLAGGED COORDINATES
# ----------------------------------------------------
proc_time1 = time.ctime()
log.info(' ')
log.info('PROC_TIME_1: {}'.format(proc_time1))
log.info(' ')
# ----------------------------------
# Convert aperture radius to pixels
# ----------------------------------
ap2 = param_dict['catalog generation']['aperture_2']
radius = round((ap2/plate_scale) + 0.5) * 2.
log.info(' ')
log.info('THE RADIAL DISTANCE BEING USED IS {} PIXELS'.format(str(radius)))
log.info(' ')
# do the cross-match using xymatch
log.info('Matching {} saturated pixels with {} catalog sources'.format(len(full_sat_list), len(full_coord_list)))
psat, pfull = xymatch(full_sat_list, full_coord_list, radius, multiple=True, verbose=False)
log.info('Found cross-matches (including duplicates)'.format(len(psat)))
saturation_flag = numpy.zeros(len(full_coord_list), dtype=bool)
saturation_flag[pfull] = True
proc_time2 = time.ctime()
log.info(' ')
log.info('PROC_TIME_2: {}'.format(proc_time2))
log.info(' ')
# ------------------------------------------------------------------
# REMOVE DUPLICATE DETECTIONS FROM THE LIST, "group", CREATTED FROM
# MATCHING SATURATION FLAGGED FLT PIXELS TO FINAL SOURCE DETECTIONS
# ------------------------------------------------------------------
nsaturated = saturation_flag.sum()
if nsaturated == 0:
log.info(' ')
log.info('**************************************************************************************')
log.info('NOTE: NO SATURATED SOURCES WERE FOUND FOR: {}'.format(image_split))
log.info('**************************************************************************************')
log.info(' ')
return catalog_data
else:
log.info(' ')
log.info('FLAGGED {} SOURCES'.format(nsaturated))
log.info(' ')
if diagnostic_mode:
sat_coord_file = drizzled_image.split('/')[-1].split('.')[0] + '_INTERMEDIATE.txt'
sat_coord_out = open(sat_coord_file, 'w')
for sat_coord in full_coord_list[saturation_flag, :]:
sat_coord_out.write(str(sat_coord[0]) + ' ' + str(sat_coord[1]) + '\n')
sat_coord_out.close()
# --------------------------------------------------------------------------
# WRITE SAT FLAGS TO OUTPUT PHOT TABLE BASED ON flag_src_central_pixel_list
# --------------------------------------------------------------------------
phot_table = catalog_name
phot_table_root = phot_table.split('.')[0]
phot_table_rows = catalog_data
for i, table_row in enumerate(phot_table_rows):
if saturation_flag[i]:
table_row["Flags"] = int(table_row["Flags"]) | 4
phot_table_rows = flag4and8_hunter_killer(phot_table_rows, column_titles)
if diagnostic_mode:
phot_table_temp = phot_table_root + '_SATFILT.txt'
phot_table_rows.write(phot_table_temp, delimiter=",", format='ascii')
return phot_table_rows
|
9ccd478331ec1e22068fb344d7a2d63eb4a40533
| 3,645,462
|
import ctypes
def dskb02(handle, dladsc):
"""
Return bookkeeping data from a DSK type 2 segment.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dskb02_c.html
:param handle: DSK file handle
:type handle: int
:param dladsc: DLA descriptor
:type dladsc: spiceypy.utils.support_types.SpiceDLADescr
:return: bookkeeping data from a DSK type 2 segment
:rtype: tuple
"""
handle = ctypes.c_int(handle)
nv = ctypes.c_int(0)
np = ctypes.c_int(0)
nvxtot = ctypes.c_int(0)
vtxbds = stypes.emptyDoubleMatrix(3, 2)
voxsiz = ctypes.c_double(0.0)
voxori = stypes.emptyDoubleVector(3)
vgrext = stypes.emptyIntVector(3)
cgscal = ctypes.c_int(0)
vtxnpl = ctypes.c_int(0)
voxnpt = ctypes.c_int(0)
voxnpl = ctypes.c_int(0)
libspice.dskb02_c(handle, dladsc, ctypes.byref(nv), ctypes.byref(np), ctypes.byref(nvxtot), vtxbds, ctypes.byref(voxsiz), voxori, vgrext, ctypes.byref(cgscal), ctypes.byref(vtxnpl), ctypes.byref(voxnpt), ctypes.byref(voxnpl))
return nv.value, np.value, nvxtot.value, stypes.cMatrixToNumpy(vtxbds), voxsiz.value, stypes.cVectorToPython(voxori), stypes.cVectorToPython(vgrext), cgscal.value, vtxnpl.value, voxnpt.value, voxnpl.value
|
b08eed84bd518d35166ee28df8f87c06b08220c4
| 3,645,463
|
from sys import version
def mt_sec(package, db):
"""
Multithreaded function for security check of packages
:param package: package name
:param db: vuln db
:return:
"""
all_rep = {}
all_rep[package] = {}
error_message = None
try:
_, status, rep = control_vulnerability(package, db)
if status:
all_rep = {**all_rep, **rep}
package_name = package.split('==')[0]
versions = get_available_package_versions(package_name)
secure_packages = []
for v in versions:
_, status, rep = control_vulnerability("==".join([package_name, v]), db)
if not status:
secure_packages.append(_)
# else:
# all_rep = {**all_rep, **rep}
if not secure_packages:
error_message = f"!!! IMPORTANT !!! No alternative secure package versions found for package {package}."
print_critical(error_message)
return package, all_rep, error_message
else:
for pkg in secure_packages:
if version.parse(pkg.split("==")[1]) > version.parse(package.split('==')[1]):
error_message = f"Package: {package} is vulnerable replacing with package: {pkg}. Available " \
f"secure versions are : {secure_packages} "
print_warning("WARNING : " + error_message)
return pkg, all_rep, error_message
error_message = f'Package: {package} is vulnerable replacing with latest secure package: ' \
f'{secure_packages[-1]}. Available secure versions are : {secure_packages} '
print_warning(error_message)
return secure_packages[-1], all_rep, error_message
else:
return _, all_rep, error_message
except Exception as e:
error_message = str(e)
return package, all_rep, error_message
|
46e6b3bc0725e88d443b418faf9fc1622e9210cf
| 3,645,464
|
def train_node2vec(graph, dim, p, q):
"""Obtains node embeddings using Node2vec."""
emb = n2v.Node2Vec(
graph=graph,
dimensions=dim,
workers=mp.cpu_count(),
p=p,
q=q,
quiet=True,
).fit()
emb = {
node_id: emb.wv[str(node_id)]
for node_id in sorted(graph.nodes())
}
return emb
|
7cea146b2971e973de2ecd365ad25c7f4fd57289
| 3,645,465
|
def approx_match_dictionary():
"""Maps abbreviations to the part of the expanded form that is common beween all forms of the word"""
k=["%","bls","gr","hv","hæstv","kl","klst","km","kr","málsl",\
"málsgr","mgr","millj","nr","tölul","umr","þm","þskj","þús"]
v=['prósent','blaðsíð',\
'grein','háttvirt',\
'hæstvirt','klukkan',\
'klukkustund','kílómetr',\
'krón','málslið',\
'málsgrein','málsgrein',\
'milljón','númer','tölulið',\
'umræð','þingm',\
'þingskj','þúsund']
d={}
for i in range(len(k)):
d[k[i]] = v[i]
return d
|
021c7de862b2559b55051bc7267113d77132e195
| 3,645,466
|
def matrix2array(M):
"""
1xN matrix to array.
In other words:
[[1,2,3]] => [1,2,3]
"""
if isspmatrix(M):
M = M.todense()
return np.squeeze(np.asarray(M))
|
731317458f6ec7c068c1a9450447eba39e1423f9
| 3,645,467
|
def expected(data):
"""Computes the expected agreement, Pr(e), between annotators."""
total = float(np.sum(data))
annotators = range(len(data.shape))
percentages = ((data.sum(axis=i) / total) for i in annotators)
percent_expected = np.dot(*percentages)
return percent_expected
|
86562fec2b17df35401b8d8b7eafd759a13715e3
| 3,645,468
|
import numpy
def maximization_step(num_words, stanzas, schemes, probs):
"""
Update latent variables t_table, rprobs
"""
t_table = numpy.zeros((num_words, num_words + 1))
rprobs = numpy.ones(schemes.num_schemes)
for i, stanza in enumerate(stanzas):
scheme_indices = schemes.get_schemes_for_len(len(stanza))
for scheme_index in scheme_indices:
myprob = probs[i, scheme_index]
rprobs[scheme_index] += myprob
scheme = schemes.scheme_list[scheme_index]
rhymelists = get_rhymelists(stanza, scheme)
for rhymelist in rhymelists:
for j, word_index in enumerate(rhymelist):
t_table[word_index, -1] += myprob
for word_index2 in rhymelist[:j] + rhymelist[j + 1:]:
t_table[word_index, word_index2] += myprob
# Normalize t_table
t_table_sums = numpy.sum(t_table, axis=0)
for i, t_table_sum in enumerate(t_table_sums.tolist()):
if t_table_sum != 0:
t_table[:, i] /= t_table_sum
# Normalize rprobs
totrprob = numpy.sum(rprobs)
rprobs /= totrprob
return t_table, rprobs
|
a0e23367d6dff50d79bb828a0af8a82b640400c8
| 3,645,469
|
import json
def account_export_mydata_content(account_id=None):
"""
Export ServiceLinks
:param account_id:
:return: List of dicts
"""
if account_id is None:
raise AttributeError("Provide account_id as parameter")
# Get table names
logger.info("ServiceLinkRecord")
db_entry_object = ServiceLinkRecord()
slr_table_name = db_entry_object.table_name
logger.info("ServiceLinkRecord table name: " + str(slr_table_name))
logger.info("ConsentRecord")
db_entry_object = ConsentRecord()
cr_table_name = db_entry_object.table_name
logger.info("ConsentRecord table name: " + str(cr_table_name))
# Get DB cursor
try:
cursor = get_db_cursor()
except Exception as exp:
logger.error('Could not get database cursor: ' + repr(exp))
raise
logger.info("Get SLR IDs")
db_entry_list = []
cursor, slr_id_list = get_slr_ids(cursor=cursor, account_id=account_id, table_name=slr_table_name)
for slr_id in slr_id_list:
logger.info("Getting SLR with slr_id: " + str(slr_id))
slr_dict = account_get_slr(account_id=account_id, slr_id=slr_id)
#
logger.info("Getting status records for SLR")
slsr_dict = account_get_slsrs(account_id=account_id, slr_id=slr_id)
logger.info("Appending status record to SLR")
slr_dict['status_records'] = slsr_dict
#
logger.info("Get CR IDs")
cr_dict_list = []
cursor, cr_id_list = get_cr_ids(slr_id=slr_id, table_name=cr_table_name, cursor=cursor)
for cr_id in cr_id_list:
logger.info("Getting CR with cr_id: " + str(cr_id))
cr_dict = account_get_cr(cr_id=cr_id, account_id=account_id)
logger.info("Getting status records for CR")
csr_dict = account_get_csrs(account_id=account_id, consent_id=cr_id)
logger.info("Appending status record to CR")
cr_dict['status_records'] = csr_dict
logger.info("Appending CR to CR list")
cr_dict_list.append(cr_dict)
#
slr_dict['consent_records'] = cr_dict_list
#
logger.info("Appending SLR to main list")
db_entry_list.append(slr_dict)
logger.info("SLR added to main list: " + json.dumps(slr_dict))
return db_entry_list
|
d61dd638319479572ecea5335f0a9a7fc7156410
| 3,645,470
|
from typing import List
def indicator_entity(indicator_types: List[str] = None) -> type:
"""Return custom model for Indicator Entity."""
class CustomIndicatorEntity(IndicatorEntity):
"""Indicator Entity Field (Model) Type"""
@validator('type', allow_reuse=True)
def is_empty(cls, value: str, field: 'ModelField') -> str:
"""Validate that the value is a non-empty string."""
if isinstance(value, str) and value.replace(' ', '') == '':
raise InvalidEmptyValue(field_name=field.name)
return value
@validator('type', allow_reuse=True)
def is_type(cls, value: str, field: 'ModelField') -> str:
"""Validate that the entity is of a specific Indicator type."""
if value.lower() not in [i.lower() for i in indicator_types]:
raise InvalidEntityType(
field_name=field.name, entity_type=str(indicator_types), value=value
)
return value
return CustomIndicatorEntity
|
f6c77ffd3b8415e07e0e64ab8120a084aab3e2c8
| 3,645,471
|
def z_to_t(z_values, dof):
"""
Convert z-statistics to t-statistics.
An inversion of the t_to_z implementation of [1]_ from Vanessa Sochat's
TtoZ package [2]_.
Parameters
----------
z_values : array_like
Z-statistics
dof : int
Degrees of freedom
Returns
-------
t_values : array_like
T-statistics
References
----------
.. [1] Hughett, P. (2007). Accurate Computation of the F-to-z and t-to-z
Transforms for Large Arguments. Journal of Statistical Software,
23(1), 1-5.
.. [2] Sochat, V. (2015, October 21). TtoZ Original Release. Zenodo.
http://doi.org/10.5281/zenodo.32508
"""
# Select just the nonzero voxels
nonzero = z_values[z_values != 0]
# We will store our results here
t_values_nonzero = np.zeros(len(nonzero))
# Select values less than or == 0, and greater than zero
c = np.zeros(len(nonzero))
k1 = nonzero <= c
k2 = nonzero > c
# Subset the data into two sets
z1 = nonzero[k1]
z2 = nonzero[k2]
# Calculate p values for <=0
p_values_z1 = stats.norm.cdf(z1)
t_values_z1 = stats.t.ppf(p_values_z1, df=dof)
# Calculate p values for > 0
p_values_z2 = stats.norm.cdf(-z2)
t_values_z2 = -stats.t.ppf(p_values_z2, df=dof)
t_values_nonzero[k1] = t_values_z1
t_values_nonzero[k2] = t_values_z2
t_values = np.zeros(z_values.shape)
t_values[z_values != 0] = t_values_nonzero
return t_values
|
4700f52263519169a4610daee8c0940489b2731e
| 3,645,472
|
def getInputShape(model):
"""
Gets the shape when there is a single input.
Return:
Numeric dimensions, omits dimensions that have no value. eg batch
size.
"""
s = []
for dim in model.input.shape:
if dim.value:
s.append(dim.value)
return tuple(s)
|
628f61a995784b9be79816a5bbcde2f8204640be
| 3,645,473
|
import os
def get_latest_file(file_paths, only_return_one_match=True):
"""
Returns the latest created file from a list of file paths
:param file_paths: list(str)
:param only_return_one_match: bool
:return: list(str) or str
"""
last_time = 0
times = dict()
for file_path in file_paths:
mtime = os.stat(file_path).st_mtime
if mtime not in times:
times[mtime] = list()
times[mtime].append(file_path)
if mtime > last_time:
last_time = mtime
if not times:
return
if only_return_one_match:
return times[mtime][0]
else:
return times[mtime]
|
895e11ddd1e46228233b880afd5df8a2772e7f44
| 3,645,474
|
def get_node_depths(tree):
"""
Get the node depths of the decision tree
>>> d = DecisionTreeClassifier()
>>> d.fit([[1,2,3],[4,5,6],[7,8,9]], [1,2,3])
>>> get_node_depths(d.tree_)
array([0, 1, 1, 2, 2])
"""
def get_node_depths_(current_node, current_depth, l, r, depths):
depths += [current_depth]
if l[current_node] != -1 and r[current_node] != -1:
get_node_depths_(l[current_node], current_depth + 1, l, r, depths)
get_node_depths_(r[current_node], current_depth + 1, l, r, depths)
depths = []
get_node_depths_(0, 0, tree.children_left, tree.children_right, depths)
return np.array(depths)
|
4a5a001600c0cb6b1b545be003708088bbd2d060
| 3,645,475
|
import attr
from typing import Tuple
def homo_tuple_typed_attrs(draw, defaults=None, legacy_types_only=False, kw_only=None):
"""
Generate a tuple of an attribute and a strategy that yields homogenous
tuples for that attribute. The tuples contain strings.
"""
default = attr.NOTHING
val_strat = tuples(text(), text(), text())
if defaults is True or (defaults is None and draw(booleans())):
default = draw(val_strat)
return (
attr.ib(
type=draw(
sampled_from(
[tuple[str, ...], tuple, Tuple, Tuple[str, ...]]
if not legacy_types_only
else [tuple, Tuple, Tuple[str, ...]]
)
),
default=default,
kw_only=draw(booleans()) if kw_only is None else kw_only,
),
val_strat,
)
|
398e47ea6fb65ba0fab1e633ea27dc3cac30ed28
| 3,645,476
|
from typing import Dict
from typing import Any
from typing import Callable
from typing import Union
from typing import Tuple
from typing import Optional
def flatland_env_factory(
evaluation: bool = False,
env_config: Dict[str, Any] = {},
preprocessor: Callable[
[Any], Union[np.ndarray, Tuple[np.ndarray], Dict[str, np.ndarray]]
] = None,
include_agent_info: bool = False,
random_seed: Optional[int] = None,
) -> FlatlandEnvWrapper:
"""Loads a flatand environment and wraps it using the flatland wrapper"""
del evaluation # since it has same behaviour for both train and eval
env = create_rail_env_with_tree_obs(**env_config)
wrapped_env = FlatlandEnvWrapper(env, preprocessor, include_agent_info)
if random_seed and hasattr(wrapped_env, "seed"):
wrapped_env.seed(random_seed)
return wrapped_env
|
a2076ef15964e60b7a5e4cf885e5b92da594f0ac
| 3,645,477
|
import six
def industry(code, market="cn"):
"""获取某个行业的股票列表。目前支持的行业列表具体可以查询以下网址:
https://www.ricequant.com/api/research/chn#research-API-industry
:param code: 行业代码,如 A01, 或者 industry_code.A01
:param market: 地区代码, 如'cn' (Default value = "cn")
:returns: 行业全部股票列表
"""
if not isinstance(code, six.string_types):
code = code.code
else:
code = to_industry_code(code)
return [
v.order_book_id
for v in _all_instruments_list(market)
if v.type == "CS" and v.industry_code == code
]
|
bf5606b93e17d5b5125f6afd133e86b5ded9a03d
| 3,645,478
|
def kewley_agn_oi(log_oi_ha):
"""Seyfert/LINER classification line for log([OI]/Ha)."""
return 1.18 * log_oi_ha + 1.30
|
5e6b71742bec307ad609d855cced80ae08e5c35c
| 3,645,479
|
def XGMMLReader(graph_file):
"""
Arguments:
- `file`:
"""
parser = XGMMLParserHelper()
parser.parseFile(graph_file)
return parser.graph()
|
ef9c1cb101b22f3302cf93db7447431fb1f5cfa8
| 3,645,480
|
def pt_encode(index):
"""pt: Toggle light."""
return MessageEncode(f"09pt{index_to_housecode(index)}00", None)
|
1e2143d7c356736082d4dc25b459630e8c97fe7a
| 3,645,481
|
def normalize_inputspace(
x,
vmax=1,
vmin=0,
mean=PYTORCH_IMAGENET_MEAN,
std=PYTORCH_IMAGENET_STD,
each=True,
img_format="CHW",
):
"""
Args:
x: numpy.ndarray
format is CHW or BCHW
each: bool
if x has dimension B
then apply each input x.
Returns:
normalized x: numpy.ndarray
"""
if len(x.shape) == 3:
return normalize3d_inputspace(x, vmax, vmin, mean, std, img_format=img_format)
elif len(x.shape) == 4:
if each:
return np.array(
[
normalize_inputspace(
_x, vmax, vmin, mean, std, img_format=img_format
)
for _x in x
]
)
else:
# TODO:
raise ValueError(each)
|
d616213457722eb183b7c9b64e9b4778e56aa5be
| 3,645,482
|
from typing import Tuple
import os
def get_load_average() -> Tuple[float, float, float]:
"""Get load average"""
return os.getloadavg()
|
48942b9dbd5c1c38e0c9e13566521d96e980b7a7
| 3,645,483
|
from qtpy.QtCore import QUrl
from qtpy.QtGui import QDesktopServices
def start_file(filename):
"""
Generalized os.startfile for all platforms supported by Qt
This function is simply wrapping QDesktopServices.openUrl
Returns True if successfull, otherwise returns False.
"""
# We need to use setUrl instead of setPath because this is the only
# cross-platform way to open external files. setPath fails completely on
# Mac and doesn't open non-ascii files on Linux.
# Fixes spyder-ide/spyder#740.
url = QUrl()
url.setUrl(filename)
return QDesktopServices.openUrl(url)
|
269704fdd5bbf4e3d3e35bec6e9862fe36602f22
| 3,645,484
|
def require_context(f):
"""Decorator to require *any* user or admin context.
This does no authorization for user or project access matching, see
:py:func:`authorize_project_context` and
:py:func:`authorize_user_context`.
The first argument to the wrapped function must be the context.
"""
def wrapper(*args, **kwargs):
if not is_admin_context(args[0]) and not is_user_context(args[0]):
raise exception.NotAuthorized()
return f(*args, **kwargs)
return wrapper
|
7a052ddf20b9afff055daed09dbe0963269d46f4
| 3,645,485
|
def failsafe_hull(coords):
"""
Wrapper of ConvexHull which returns None if hull cannot be computed for given points (e.g. all colinear or too few)
"""
coords = np.array(coords)
if coords.shape[0] > 3:
try:
return ConvexHull(coords)
except QhullError as e:
if 'hull precision error' not in str(e) and 'input is less than 3-dimensional' not in str(e):
raise e
return None
|
dca4d35d98032f9c77da38a860c2209758babfda
| 3,645,486
|
def list_closed_poll_sessions(request_ctx, **request_kwargs):
"""
Lists all closed poll sessions available to the current user.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:return: List closed poll sessions
:rtype: requests.Response (with void data)
"""
path = '/v1/poll_sessions/closed'
url = request_ctx.base_api_url + path.format()
response = client.get(request_ctx, url, **request_kwargs)
return response
|
90c2d660a18ed9fa9f10f092a415e5f94148eba1
| 3,645,487
|
from typing import List
import struct
def _wrap_apdu(command: bytes) -> List[bytes]:
"""Return a list of packet to be sent to the device"""
packets = []
header = struct.pack(">H", len(command))
command = header + command
chunks = [command[i : i + _PacketData.FREE] for i in range(0, len(command), _PacketData.FREE)]
# Create a packet for each command chunk
for packet_id in range(len(chunks)):
header = struct.pack(">HBH", _CHANNEL_ID, _CmdTag.APDU, packet_id)
packet = header + chunks[packet_id]
packet.ljust(_PacketData.SIZE, bytes([0x0]))
packets.append(packet)
return packets
|
828521642b43758cf0c43f2c8af171d3463cacf5
| 3,645,488
|
from pathlib import Path
def build_dtree(bins):
"""
Build the directory tree out of what's under `user/`. The `dtree` is a
dict of:
string name -> 2-list [inumber, element]
, where element could be:
- Raw bytes for regular file
- A `dict` for directory, which recurses on
"""
def next_inumber():
"""
Allocate the next available inumber.
"""
global curr_inumber
inumber = curr_inumber
curr_inumber += 1
return inumber
for b in bins:
bpath = Path(b)
if not bpath.is_file():
print("Error: user binary '{}' is not a regular file".format(b))
exit(1)
parts = PurePath(b).parts
parents = parts[1:-1]
binary = parts[-1]
if parts[0] != "user":
print("Error: user binray '{}' is not under 'user/'".format(b))
exit(1)
if not binary.endswith(".bin"):
print("Error: user binray '{}' does not end with '.bin'".format(b))
exit(1)
binary = binary[:-4]
curr_dir = dtree
for d in parents:
if d not in curr_dir:
curr_dir[d] = [next_inumber(), dict()]
curr_dir = curr_dir[d][1]
with bpath.open(mode='br') as bfile:
curr_dir[binary] = [next_inumber(), bytearray(bfile.read())]
|
66248226318a6225ea17d82d535012447b33f7e5
| 3,645,489
|
def _compose_image(digit, background):
"""Difference-blend a digit and a random patch from a background image."""
w, h, _ = background.shape
dw, dh, _ = digit.shape
x = np.random.randint(0, w - dw)
y = np.random.randint(0, h - dh)
bg = background[x:x+dw, y:y+dh]
return np.abs(bg - digit).astype(np.uint8)
|
956e06623f0534bea93b446e9a742ae78aada69f
| 3,645,490
|
def permissions_vsr(func):
"""
:param func:
:return:
"""
def func_wrapper(name):
return "<p>{0}</p>".format(func(name))
return func_wrapper
|
a7e01f7711cab6bc46c004c4d062930c2a656eee
| 3,645,491
|
import scipy
def tri_interpolate_zcoords(points: np.ndarray, triangles: np.ndarray, mesh_points: np.ndarray,
is_mesh_edge: np.ndarray, num_search_tris: int=10):
"""
Interpolate z-coordinates to a set of 2D points using 3D point coordinates and a triangular mesh.
If point is along a mesh boundary, the boundary values are used instead.
Returned values are:
z: The interpolated z-values
"""
# Get triangle centroid coordinates and create KD-tree.
tri_coords = points[triangles,:]
tri_coords2D = points[triangles,0:2]
tri_centroids = np.mean(tri_coords2D, axis=1)
tri_tree = scipy.spatial.cKDTree(tri_centroids)
# Loop over points.
coords2d = mesh_points[:,0:2]
num_mesh_points = coords2d.shape[0]
z = np.zeros(num_mesh_points, dtype=np.float64)
for point_num in range(num_mesh_points):
if not(is_mesh_edge[point_num]):
z[point_num] = project_2d_coords(tri_coords, coords2d[point_num,:], tri_tree, num_search_tris=num_search_tris)
return z
|
0a1702407c8a5b175b8fa8314eede203ac5a86ca
| 3,645,492
|
from typing import List
def getServiceTypes(**kwargs) -> List:
"""List types of services.
Returns:
List of distinct service types.
"""
services = getServices.__wrapped__()
types = [s['type'] for s in services]
uniq_types = [dict(t) for t in {tuple(sorted(d.items())) for d in types}]
return uniq_types
|
23bd7730b43c1d942450fc57c2a3c6f83f7c578c
| 3,645,493
|
from keras.callbacks import EarlyStopping, ModelCheckpoint
import pylab as plt
def train_model(train_data, test_data, model, model_name, optimizer, loss='mse', scale_factor=1000., batch_size=128, max_epochs=200, early_stop=True, plot_history=True):
""" Code to train a given model and save out to the designated path as given by 'model_name'
Parameters
----------
train_data : 2-tuple
(train_x, train_y) where train_x is the images and train_y is the Gaussian dot annotation images in the train split.
test_data : 2-tuple
(test_x, test_y) where test_x is the images and test_y is the Gaussian dot annotation images in the test split.
model : a Keras model
a defined Keras model
optimizer : Keras optimizer object
the gradient descent optimizer e.g. Adam, SGD instance used to optimizer the model. We used Adam() with default settings.
loss : string
one of 'mse' (mean squared error) or 'mae' (mean absolute error)
scale_factor : None or float
multiplicative factor to apply to annotation images to increase the gradient in the backpropagation
batch_size : int
number of images to batch together for training
max_epochs : int
the maximum number of epochs to train for if early_stop is enabled else this is the number of epochs of training.
early_stop : bool
if True, monitors the minimum of the test loss. If loss does not continue to decrease for a set duration, stop the training and return the model with the best test loss.
plot_hist : bool
if True, plots the training and test loss over the training period on the same axes for visualisation.
Returns
-------
None : void
This function will simply save the model to the location given by model_name.
"""
train_x, train_y = train_data
test_x, test_y = test_data
if scale_factor is not None:
train_y = train_y * float(scale_factor)
test_y = test_y * float(scale_factor)
# compile the model with chosen optimizer.
model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])
if early_stop:
""" Set some early stopping parameters """
early_stop = EarlyStopping(monitor='val_loss',
min_delta=0.001,
patience=15,
mode='min',
verbose=1)
checkpoint = ModelCheckpoint(model_name,
monitor='val_loss',
verbose=1,
save_best_only=True,
mode='min',
period=1)
history = model.fit(train_x, train_y,
batch_size=batch_size,
epochs=epochs,
validation_data=(test_x, test_y), shuffle=True,
callbacks = [early_stop, checkpoint])
else:
history = model.fit(train_x, train_y,
batch_size=batch_size,
epochs=epochs,
validation_data=(test_x, test_y), shuffle=True)
model.save(model_name) # save the whole model state.
if plot_history:
plt.figure()
plt.plot(history.history['loss'], 'r', label='train loss')
plt.plot(history.history['val_loss'], 'g', label='test loss')
plt.legend()
plt.show()
return []
|
3d74e765065b8514dd43d0a0ba6f83542bc47b11
| 3,645,494
|
def get_pipeline_storage_es_client(session, *, index_date):
"""
Returns an Elasticsearch client for the pipeline-storage cluster.
"""
secret_prefix = f"elasticsearch/pipeline_storage_{index_date}"
host = get_secret_string(session, secret_id=f"{secret_prefix}/public_host")
port = get_secret_string(session, secret_id=f"{secret_prefix}/port")
protocol = get_secret_string(session, secret_id=f"{secret_prefix}/protocol")
username = get_secret_string(
session, secret_id=f"{secret_prefix}/read_only/es_username"
)
password = get_secret_string(
session, secret_id=f"{secret_prefix}/read_only/es_password"
)
return Elasticsearch(f"{protocol}://{username}:{password}@{host}:{port}")
|
8b759f1c2b6fa2b525a0a20653bd1ff99441e893
| 3,645,495
|
def cqcc_resample(s, fs_orig, fs_new, axis=0):
"""implement the resample operation of CQCC
Parameters
----------
s : ``np.ndarray``
the input spectrogram.
fs_orig : ``int``
origin sample rate
fs_new : ``int``
new sample rate
axis : ``int``
the resample axis
Returns
-------
spec_res : ``np.ndarray``
spectrogram after resample
"""
if int(fs_orig) != int(fs_new):
s = resampy.resample(s, sr_orig=fs_orig, sr_new=fs_new,
axis=axis)
return s
|
d252fdc2587c48d15d7f41224df3bfcd9e17693c
| 3,645,496
|
def weights_init():
"""
Gaussian init.
"""
def init_fun(m):
classname = m.__class__.__name__
if (classname.find("Conv") == 0 or classname.find("Linear") == 0) and hasattr(m, "weight"):
nn.init.normal_(m.weight, 0.0, 0.02)
if hasattr(m, "bias") and m.bias is not None:
nn.init.constant_(m.bias, 0.0)
return init_fun
|
f56aa9c988b93d30c6a78769bc0f2c86f0209cd8
| 3,645,497
|
def named(name):
"""
This function is used to decorate middleware functions in order
for their before and after sections to show up during a verbose run.
For examples see documentation to this module and tests.
"""
def new_annotate(mware):
def new_middleware(handler):
new_handler = mware(handler)
def verbose_handler(ctx):
_print_inwards(name)
new_ctx = new_handler(ctx)
_print_outwards(name)
return new_ctx
return verbose_handler
return new_middleware
return new_annotate
|
0f1cef0788eae16bf557b5f7cb01bd52e913203d
| 3,645,498
|
def concatFile(file_list):
""" To combine files in file list.
"""
config = getConfig()
print('[load]concating...')
df_list = []
for f in file_list:
print(f)
tmp = pd.read_csv(config['dir_raw']+f, index_col=None, header=0)
df_list.append(tmp)
df = pd.concat(df_list, axis=0, ignore_index=True)
return df
|
c289db2e1a995f3b536f2d472eed550843980635
| 3,645,499
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.