content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def get_all_reports_url(url_1,url_2, headers=None):
""" Returns all reports URLs on a single 'url' """
if headers == None:
header = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36'}
else:
header = headers
url = urljoin(url_1, url_2)
# initialize the session
session = HTMLSession()
# make the HTTP request and retrieve response
response = session.get(url, headers=header)
# execute Javascript with a timeout of 20 seconds
# response.html.render(timeout=20) ## pyppeteer.errors.TimeoutError: Navigation Timeout Exceeded: 20000 ms exceeded.
# construct the soup parser
soup = BeautifulSoup(response.html.html, "html.parser")
urls = []
table = soup.find("table", class_="ms-rteTable-5")
for report, name in zip(table.find_all("td", class_="ms-rteTableEvenCol-5"), table.find_all("td", class_="ms-rteTableOddCol-5")) :
report_url = report.find("a").attrs.get("href")
name = ((''.join(name.text.split())).replace("/", "-")).replace(" ", "").replace("\u200b", "")
if not report_url:
# if img does not contain src attribute, just skip
continue
# make the URL absolute by joining domain with the URL that is just extracted
report_url = urljoin(url_1, report_url)
try:
pos = report_url.index("?")
report_url = report_url[:pos]
except ValueError:
pass
# finally, if the url is valid
if is_valid(report_url):
urls.append({'url':report_url, 'name':name})
# close the session to end browser process
session.close()
# print total images found in URL
print(f"Total {len(urls)} Reports Found!")
return urls
| 17,700
|
def generate_url_with_signature(endpoint, signature):
"""Generate a url for an endpoint with a signature.
Args:
endpoint: An endpoint referencing a method in the backend.
signature: A signature serialized with releant data and the secret
salt.
Returns:
url for the given endpoint with signature attached.
"""
if os.environ.get('FLASK_ENV', 'development') == 'production':
return url_for(
endpoint, signature=signature, _external=True, _scheme='https'
)
return url_for(endpoint, signature=signature, _external=True)
| 17,701
|
def is_scalar(dt): # real signature unknown; restored from __doc__
"""
Return True if given value is scalar.
Parameters
----------
val : object
This includes:
- numpy array scalar (e.g. np.int64)
- Python builtin numerics
- Python builtin byte arrays and strings
- None
- datetime.datetime
- datetime.timedelta
- Period
- decimal.Decimal
- Interval
- DateOffset
- Fraction
- Number
Returns
-------
bool
Return True if given object is scalar, False otherwise
Examples
--------
>>> dt = pd.datetime.datetime(2018, 10, 3)
>>> pd.is_scalar(dt)
True
>>> pd.api.types.is_scalar([2, 3])
False
>>> pd.api.types.is_scalar({0: 1, 2: 3})
False
>>> pd.api.types.is_scalar((0, 2))
False
pandas supports PEP 3141 numbers:
>>> from fractions import Fraction
>>> pd.api.types.is_scalar(Fraction(3, 5))
True
"""
pass
| 17,702
|
def left_shift(k, n=32):
"""
Returns the n*n matrix corresponding to the operation
lambda v: vec_from_int(int_from_vec(v) << k, n)
>>> print_mat(left_shift(2, 6))
000000
000000
100000
010000
001000
000100
>>> int_from_vec(left_shift(2) * vec_from_int(42)) == 42 << 2
True
"""
D = set(range(n))
return Mat((D, D), {(j + k, j): one for j in range(n - k)})
| 17,703
|
def get_tensorboard_logger(
trainer: Engine, evaluators: ThreeEvaluators, metric_names: List[str]
) -> TensorboardLogger:
"""
creates a ``tensorboard`` logger which read metrics from given evaluators and attaches it to a given trainer
:param trainer: an ``ignite`` trainer to attach to
:param evaluators: a triple of train, validation, and test evaluators to get metrics from
:param metric_names: a list of metrics to log during validation and testing
"""
tb_logger = TensorboardLogger(
log_dir=f"runs/{datetime.now()}", flush_secs=1
)
training_loss = OutputHandler(
"training",
["running_loss"],
global_step_transform=global_step_from_engine(trainer),
)
tb_logger.attach(trainer, training_loss, Events.EPOCH_COMPLETED)
validation_loss = OutputHandler(
"validation",
metric_names,
global_step_transform=global_step_from_engine(trainer),
)
tb_logger.attach(evaluators.validation, validation_loss, Events.COMPLETED)
test_loss = OutputHandler(
"test",
metric_names,
global_step_transform=global_step_from_engine(trainer),
)
tb_logger.attach(evaluators.test, test_loss, Events.COMPLETED)
return tb_logger
| 17,704
|
def get_vocabs(datasets):
"""Build vocabulary from an iteration of dataset objects
Args:
dataset: a list of dataset objects
Returns:
two sets of all the words and tags respectively in the dataset
"""
print("Building vocabulary...")
vocab_words = set()
vocab_tags = set()
for dataset in datasets:
for words, tags in dataset:
vocab_words.update(words)
vocab_tags.update(tags)
print("- done. {} tokens".format(len(vocab_words)))
return vocab_words, vocab_tags
| 17,705
|
def check_row_uniqueness(board: list) -> bool:
"""
Return True if each row has no repeated digits.
Return False otherwise.
>>> check_row_uniqueness([\
"**** ****",\
"***1 ****",\
"** 3****",\
"* 4 1****",\
" 9 5 ",\
" 6 83 *",\
"3 1 **",\
" 8 2***",\
" 2 ****"\
])
True
>>> check_row_uniqueness([\
"**** ****",\
"***1 ****",\
"** 3****",\
"* 4 1****",\
" 5 9 5 ",\
" 6 83 *",\
"3 1 **",\
" 8 2***",\
" 2 ****"\
])
False
"""
global NUMBER
for row in board:
count = 0
row_set = set()
for char in row:
if char.isdigit():
if int(char) in range(1, NUMBER + 1):
count += 1
row_set.add(char)
if len(row_set) != count:
return False
return True
| 17,706
|
def calculate_distance_to_divide(
grid, longest_path=True, add_to_grid=False, clobber=False
):
"""Calculate the along flow distance from drainage divide to point.
This utility calculates the along flow distance based on the results of
running flow accumulation on the grid. It will use the connectivity
used by the FlowAccumulator (e.g. D4, D8, Dinf).
Parameters
----------
grid : ModelGrid
longest_path : bool, optional
Take the longest (or shortest) path to a drainage divide. Default is
true.
add_to_grid : boolean, optional
Flag to indicate if the stream length field should be added to the
grid. Default is False. The field name used is ``distance_to_divide``.
clobber : boolean, optional
Flag to indicate if adding the field to the grid should not clobber an
existing field with the same name. Default is False.
Returns
-------
distance_to_divide : float ndarray
The distance that has to be covered from an imaginary flow, located in
each node of the grid, to reach the watershed's outlet.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> from landlab.components import FlowAccumulator
>>> from landlab.utils.distance_to_divide import (
... calculate_distance_to_divide)
>>> mg = RasterModelGrid((5, 4))
>>> elev = np.array([0., 0., 0., 0.,
... 0., 10., 10., 0.,
... 0., 20., 20., 0.,
... 0., 30., 30., 0.,
... 0., 0., 0., 0.])
>>> _ = mg.add_field("topographic__elevation", elev, at="node")
>>> mg.set_closed_boundaries_at_grid_edges(
... bottom_is_closed=False,
... left_is_closed=True,
... right_is_closed=True,
... top_is_closed=True)
>>> fr = FlowAccumulator(mg, flow_director = 'D8')
>>> fr.run_one_step()
>>> distance_to_divide = calculate_distance_to_divide(
... mg,
... add_to_grid=True,
... clobber=True,
... )
>>> mg.at_node['distance_to_divide']
array([ 0., 3., 3., 0.,
0., 2., 2., 0.,
0., 1., 1., 0.,
0., 0., 0., 0.,
0., 0., 0., 0.])
Now, let's change to MFD the flow_director method, which routes flow to
multiple nodes.
>>> from landlab import RasterModelGrid
>>> from landlab.components import FlowAccumulator
>>> from landlab.utils.distance_to_divide import (
... calculate_distance_to_divide)
>>> mg = RasterModelGrid((5, 4), xy_spacing=(1, 1))
>>> elev = np.array([0., 0., 0., 0.,
... 0., 10., 10., 0.,
... 0., 20., 20., 0.,
... 0., 30., 30., 0.,
... 0., 0., 0., 0.])
>>> _ = mg.add_field("topographic__elevation", elev, at="node")
>>> mg.set_closed_boundaries_at_grid_edges(
... bottom_is_closed=False,
... left_is_closed=True,
... right_is_closed=True,
... top_is_closed=True)
>>> fr = FlowAccumulator(mg, flow_director = 'MFD')
>>> fr.run_one_step()
>>> distance_to_divide = calculate_distance_to_divide(
... mg,
... add_to_grid=True,
... clobber=True,
... )
>>> mg.at_node['distance_to_divide']
array([ 0., 3., 3., 0.,
0., 2., 2., 0.,
0., 1., 1., 0.,
0., 0., 0., 0.,
0., 0., 0., 0.])
The distance_to_divide utility can also work on irregular grids. For the
example we will use a Hexagonal Model Grid, a special type of Voroni Grid
that has regularly spaced hexagonal cells.
>>> from landlab import HexModelGrid
>>> from landlab.components import FlowAccumulator
>>> from landlab.utils.distance_to_divide import (
... calculate_distance_to_divide)
>>> dx = 1
>>> hmg = HexModelGrid((5, 3), dx)
>>> _ = hmg.add_field(
... "topographic__elevation",
... hmg.node_x + np.round(hmg.node_y),
... at="node",
... )
>>> hmg.status_at_node[hmg.boundary_nodes] = hmg.BC_NODE_IS_CLOSED
>>> hmg.status_at_node[0] = hmg.BC_NODE_IS_FIXED_VALUE
>>> fr = FlowAccumulator(hmg, flow_director = 'D4')
>>> fr.run_one_step()
>>> distance_to_divide = calculate_distance_to_divide(
... hmg,
... add_to_grid=True,
... clobber=True,
... )
>>> hmg.at_node['distance_to_divide']
array([ 3., 0., 0.,
0., 2., 1., 0.,
0., 1., 1., 0., 0.,
0., 0., 0., 0.,
0., 0., 0.])
"""
# check that flow__receiver nodes exists
if "flow__receiver_node" not in grid.at_node:
raise FieldError(
"A 'flow__receiver_node' field is required at the "
"nodes of the input grid."
)
if "flow__upstream_node_order" not in grid.at_node:
raise FieldError(
"A 'flow__upstream_node_order' field is required at the "
"nodes of the input grid."
)
if "drainage_area" not in grid.at_node:
raise FieldError(
"A 'flow__upstream_node_order' field is required at the "
"nodes of the input grid."
)
# get the reciever nodes, depending on if this is to-one, or to-multiple,
# we'll need to get a different at-node field.
if grid.at_node["flow__receiver_node"].size != grid.size("node"):
to_one = False
else:
to_one = True
flow__receiver_node = grid.at_node["flow__receiver_node"]
drainage_area = grid.at_node["drainage_area"]
# get the upstream node order
flow__upstream_node_order = grid.at_node["flow__upstream_node_order"]
# get downstream flow link lengths, result depends on type of grid.
if isinstance(grid, RasterModelGrid):
flow_link_lengths = grid.length_of_d8[
grid.at_node["flow__link_to_receiver_node"]
]
else:
flow_link_lengths = grid.length_of_link[
grid.at_node["flow__link_to_receiver_node"]
]
# create an array that representes the distance to the divide.
distance_to_divide = np.zeros(grid.nodes.size)
if not longest_path:
distance_to_divide[:] = 2 * grid.size("node") * np.max(flow_link_lengths)
# iterate through the flow__upstream_node_order backwards.
for node in reversed(flow__upstream_node_order):
# if drainage are is equal to node cell area, set distance to zeros
# this should handle the drainage divide cells as boundary cells have
# their area set to zero.
if drainage_area[node] == grid.cell_area_at_node[node]:
distance_to_divide[node] = 0
# get flow recievers
reciever = flow__receiver_node[node]
if to_one:
# if not processing an outlet node.
if reciever != node:
if longest_path:
cond = (
distance_to_divide[reciever]
< distance_to_divide[node] + flow_link_lengths[node]
)
else:
cond = (
distance_to_divide[reciever]
> distance_to_divide[node] + flow_link_lengths[node]
)
if cond:
distance_to_divide[reciever] = (
distance_to_divide[node] + flow_link_lengths[node]
)
else:
# non-existant links are coded with -1
useable_receivers = np.where(reciever != grid.BAD_INDEX)[0]
for idx in range(len(useable_receivers)):
r = reciever[useable_receivers][idx]
fll = flow_link_lengths[node][useable_receivers][idx]
# if not processing an outlet node.
if r != node:
if longest_path:
cond = distance_to_divide[r] < distance_to_divide[node] + fll
else:
cond = distance_to_divide[r] > distance_to_divide[node] + fll
if cond:
distance_to_divide[r] = distance_to_divide[node] + fll
# store on the grid
if add_to_grid:
grid.add_field(
"distance_to_divide", distance_to_divide, at="node", clobber=clobber
)
return distance_to_divide
| 17,707
|
def run_example(device_id, do_plot=False):
"""
Run the example: Connect to a Zurich Instruments UHF Lock-in Amplifier or
UHFAWG, UHFQA, upload and run a basic AWG sequence program. It then demonstrates
how to upload (replace) a waveform without changing the sequencer program.
Requirements:
UHFLI with UHF-AWG Arbitrary Waveform Generator Option.
Hardware configuration: Connect signal output 1 to signal input 1 with a
BNC cable.
Arguments:
device_id (str): The ID of the device to run the example with. For
example, `dev2006` or `uhf-dev2006`.
do_plot (bool, optional): Specify whether to plot the signal measured by the scope
output. Default is no plot output.
Returns:
data: Data structure returned by the Scope
Raises:
Exception: If the UHF-AWG Option is not installed.
RuntimeError: If the device is not "discoverable" from the API.
See the "LabOne Programing Manual" for further help, available:
- On Windows via the Start-Menu:
Programs -> Zurich Instruments -> Documentation
- On Linux in the LabOne .tar.gz archive in the "Documentation"
sub-folder.
"""
# Settings
apilevel_example = 6 # The API level supported by this example.
err_msg = "This example can only be ran on either a UHFAWG, UHFQA or a UHF with the AWG option enabled."
# Call a zhinst utility function that returns:
# - an API session `daq` in order to communicate with devices via the data server.
# - the device ID string that specifies the device branch in the server's node hierarchy.
# - the device's discovery properties.
(daq, device, _) = zhinst.utils.create_api_session(device_id, apilevel_example, required_devtype='UHF',
required_options=['AWG'], required_err_msg=err_msg)
zhinst.utils.api_server_version_check(daq)
# Create a base configuration: Disable all available outputs, awgs, demods, scopes,...
zhinst.utils.disable_everything(daq, device)
# Now configure the instrument for this experiment. The following channels
# and indices work on all device configurations. The values below may be
# changed if the instrument has multiple input/output channels and/or either
# the Multifrequency or Multidemodulator options installed.
out_channel = 0
out_mixer_channel = 3
in_channel = 0
osc_index = 0
awg_channel = 0
frequency = 1e6
amplitude = 1.0
exp_setting = [
['/%s/sigins/%d/imp50' % (device, in_channel), 1],
['/%s/sigins/%d/ac' % (device, in_channel), 0],
['/%s/sigins/%d/diff' % (device, in_channel), 0],
['/%s/sigins/%d/range' % (device, in_channel), 1],
['/%s/oscs/%d/freq' % (device, osc_index), frequency],
['/%s/sigouts/%d/on' % (device, out_channel), 1],
['/%s/sigouts/%d/range' % (device, out_channel), 1],
['/%s/sigouts/%d/enables/%d' % (device, out_channel, out_mixer_channel), 1],
['/%s/sigouts/%d/amplitudes/*' % (device, out_channel), 0.],
['/%s/awgs/0/outputs/%d/amplitude' % (device, awg_channel), amplitude],
['/%s/awgs/0/outputs/0/mode' % device, 0],
['/%s/awgs/0/time' % device, 0],
['/%s/awgs/0/userregs/0' % device, 0]
]
daq.set(exp_setting)
daq.sync()
# Number of points in AWG waveform
AWG_N = 2000
# Define an AWG program as a string stored in the variable awg_program, equivalent to what would
# be entered in the Sequence Editor window in the graphical UI.
# This example demonstrates four methods of definig waveforms via the API
# - (wave w0) loaded directly from programmatically generated CSV file wave0.csv.
# Waveform shape: Blackman window with negative amplitude.
# - (wave w1) using the waveform generation functionalities available in the AWG Sequencer language.
# Waveform shape: Gaussian function with positive amplitude.
# - (wave w2) using the vect() function and programmatic string replacement.
# Waveform shape: Single period of a sine wave.
# - (wave w3) directly writing an array of numbers to the AWG waveform memory.
# Waveform shape: Sinc function. In the sequencer language, the waveform is initially
# defined as an array of zeros. This placeholder array is later overwritten with the
# sinc function.
awg_program = textwrap.dedent("""\
const AWG_N = _c1_;
wave w0 = "wave0";
wave w1 = gauss(AWG_N, AWG_N/2, AWG_N/20);
wave w2 = vect(_w2_);
wave w3 = zeros(AWG_N);
while(getUserReg(0) == 0);
setTrigger(1);
setTrigger(0);
playWave(w0);
playWave(w1);
playWave(w2);
playWave(w3);
""")
# Define an array of values that are used to write values for wave w0 to a CSV file in the module's data directory
waveform_0 = -1.0 * np.blackman(AWG_N)
# Redefine the wave w1 in Python for later use in the plot
width = AWG_N/20
waveform_1 = np.exp(-(np.linspace(-AWG_N/2, AWG_N/2, AWG_N))**2/(2*width**2))
# Define an array of values that are used to generate wave w2
waveform_2 = np.sin(np.linspace(0, 2*np.pi, AWG_N))
# Fill the waveform values into the predefined program by inserting the array
# as comma-separated floating-point numbers into awg_program
awg_program = awg_program.replace('_w2_', ','.join([str(x) for x in waveform_2]))
# Do the same with the integer constant AWG_N
awg_program = awg_program.replace('_c1_', str(AWG_N))
# Create an instance of the AWG Module
awgModule = daq.awgModule()
awgModule.set('awgModule/device', device)
awgModule.execute()
# Get the modules data directory
data_dir = awgModule.getString('awgModule/directory')
# All CSV files within the waves directory are automatically recognized by the AWG module
wave_dir = os.path.join(data_dir, "awg", "waves")
if not os.path.isdir(wave_dir):
# The data directory is created by the AWG module and should always exist. If this exception is raised,
# something might be wrong with the file system.
raise Exception("AWG module wave directory {} does not exist or is not a directory".format(wave_dir))
# Save waveform data to CSV
csv_file = os.path.join(wave_dir, "wave0.csv")
np.savetxt(csv_file, waveform_0)
# Transfer the AWG sequence program. Compilation starts automatically.
awgModule.set('awgModule/compiler/sourcestring', awg_program)
# Note: when using an AWG program from a source file (and only then), the compiler needs to
# be started explicitly with awgModule.set('awgModule/compiler/start', 1)
while awgModule.getInt('awgModule/compiler/status') == -1:
time.sleep(0.1)
if awgModule.getInt('awgModule/compiler/status') == 1:
# compilation failed, raise an exception
raise Exception(awgModule.getString('awgModule/compiler/statusstring'))
if awgModule.getInt('awgModule/compiler/status') == 0:
print("Compilation successful with no warnings, will upload the program to the instrument.")
if awgModule.getInt('awgModule/compiler/status') == 2:
print("Compilation successful with warnings, will upload the program to the instrument.")
print("Compiler warning: ", awgModule.getString('awgModule/compiler/statusstring'))
# Wait for the waveform upload to finish
time.sleep(0.2)
i = 0
while (awgModule.getDouble('awgModule/progress') < 1.0) and (awgModule.getInt('awgModule/elf/status') != 1):
print("{} awgModule/progress: {:.2f}".format(i, awgModule.getDouble('awgModule/progress')))
time.sleep(0.5)
i += 1
print("{} awgModule/progress: {:.2f}".format(i, awgModule.getDouble('awgModule/progress')))
if awgModule.getInt('awgModule/elf/status') == 0:
print("Upload to the instrument successful.")
if awgModule.getInt('awgModule/elf/status') == 1:
raise Exception("Upload to the instrument failed.")
# Replace the waveform w3 with a new one.
waveform_3 = np.sinc(np.linspace(-6*np.pi, 6*np.pi, AWG_N))
# The set command below on awgs/0/waveform/index defines the index of the waveform in the sequencer program to
# replace with the data that is written to awgs/0/waveform/data.
# Let N be the total number of waveforms and M>0 be the number of waveforms defined from CSV file. Then the index of
# the waveform to be replaced is defined as following:
# - 0,...,M-1 for all waveforms defined from CSV file alphabetically ordered by filename,
# - M,...,N-1 in the order that the waveforms are defined in the sequencer program.
# For the case of M=0, the index is defined as:
# - 0,...,N-1 in the order that the waveforms are defined in the sequencer program.
# Of course, for the trivial case of 1 waveform, use index=0 to replace it.
# Here we replace waveform w3, the 4th waveform defined in the sequencer program. Using 0-based indexing the
# index of the waveform we want to replace (w3, a vector of zeros) is 3:
index = 3
daq.setInt('/' + device + '/awgs/0/waveform/index', index)
daq.sync()
# Write the waveform to the memory. For the transferred array, floating-point (-1.0...+1.0)
# as well as integer (-32768...+32768) data types are accepted.
# For dual-channel waves, interleaving is required.
daq.vectorWrite('/' + device + '/awgs/0/waveform/data', waveform_3)
# Configure the Scope for measurement
# 'channels/0/inputselect' : the input channel for the scope:
# 0 - signal input 1
daq.setInt('/%s/scopes/0/channels/0/inputselect' % (device), in_channel)
# 'time' : timescale of the wave, sets the sampling rate to 1.8GHz/2**time.
# 0 - sets the sampling rate to 1.8 GHz
# 1 - sets the sampling rate to 900 MHz
# ...
# 16 - sets the sampling rate to 27.5 kHz
daq.setInt('/%s/scopes/0/time' % device, 0)
# 'single' : only get a single scope shot.
# 0 - take continuous shots
# 1 - take a single shot
# Disable the scope.
daq.setInt('/%s/scopes/0/enable' % device, 0)
# Configure the length of the scope shot.
daq.setInt('/%s/scopes/0/length' % device, 10000)
# Now configure the scope's trigger to get aligned data
# 'trigenable' : enable the scope's trigger (boolean).
daq.setInt('/%s/scopes/0/trigenable' % device, 1)
# Specify the trigger channel:
#
# Here we trigger on the signal from UHF signal input 1. If the instrument has the DIG Option installed we could
# trigger the scope using an AWG Trigger instead (see the `setTrigger(1);` line in `awg_program` above).
# 0: Signal Input 1
# 192: AWG Trigger 1
trigchannel = 0
daq.setInt('/%s/scopes/0/trigchannel' % device, trigchannel)
if trigchannel == 0:
# Trigger on the falling edge of the negative blackman waveform `w0` from our AWG program.
daq.setInt('/%s/scopes/0/trigslope' % device, 2)
daq.setDouble('/%s/scopes/0/triglevel' % device, -0.600)
# Set hysteresis triggering threshold to avoid triggering on noise
# 'trighysteresis/mode' :
# 0 - absolute, use an absolute value ('scopes/0/trighysteresis/absolute')
# 1 - relative, use a relative value ('scopes/0trighysteresis/relative') of the trigchannel's input range
# (0.1=10%).
daq.setDouble('/%s/scopes/0/trighysteresis/mode' % device, 0)
daq.setDouble('/%s/scopes/0/trighysteresis/relative' % device, 0.025)
# Set a negative trigdelay to capture the beginning of the waveform.
trigdelay = -1.0e-6
daq.setDouble('/%s/scopes/0/trigdelay' % device, trigdelay)
else:
# Assume we're using an AWG Trigger, then the scope configuration is simple: Trigger on rising edge.
daq.setInt('/%s/scopes/0/trigslope' % device, 1)
# Set trigdelay to 0.0: Start recording from when the trigger is activated.
trigdelay = 0.0
daq.setDouble('/%s/scopes/0/trigdelay' % device, trigdelay)
trigreference = 0.0
# The trigger reference position relative within the wave, a value of 0.5 corresponds to the center of the wave.
daq.setDouble('/%s/scopes/0/trigreference' % device, trigreference)
# Set the hold off time in-between triggers.
daq.setDouble('/%s/scopes/0/trigholdoff' % device, 0.025)
# Set up the Scope Module.
scopeModule = daq.scopeModule()
scopeModule.set('scopeModule/mode', 1)
scopeModule.subscribe('/' + device + '/scopes/0/wave')
daq.setInt('/%s/scopes/0/single' % device, 1)
scopeModule.execute()
# Start the AWG in single-shot mode.
# This is the preferred method of using the AWG: Run in single mode continuous waveform playback is best achieved by
# using an infinite loop (e.g., while (true)) in the sequencer program.
daq.set([['/' + device + '/awgs/0/single', 1],
['/' + device + '/awgs/0/enable', 1]])
daq.sync()
# Start the scope...
daq.setInt('/%s/scopes/0/enable' % device, 1)
daq.sync()
time.sleep(1.0)
daq.setInt('/%s/awgs/0/userregs/0' % device, 1)
# Read the scope data with timeout.
local_timeout = 2.0
records = 0
while (records < 1) and (local_timeout > 0):
time.sleep(0.1)
local_timeout -= 0.1
records = scopeModule.getInt("scopeModule/records")
# Disable the scope.
daq.setInt('/%s/scopes/0/enable' % device, 0)
data_read = scopeModule.read(True)
wave_nodepath = '/{}/scopes/0/wave'.format(device)
assert wave_nodepath in data_read, "Error: The subscribed data `{}` was returned.".format(wave_nodepath)
data = data_read[wave_nodepath][0][0]
f_s = 1.8e9 # sampling rate of scope and AWG
for n in range(0, len(data['channelenable'])):
p = data['channelenable'][n]
if p:
y_measured = data['wave'][n]
x_measured = np.arange(-data['totalsamples'], 0)*data['dt'] + \
(data['timestamp'] - data['triggertimestamp'])/f_s
# Compare expected and measured signal
full_scale = 0.75
y_expected = np.concatenate((waveform_0, waveform_1, waveform_2, waveform_3))*full_scale*amplitude
x_expected = np.linspace(0, 4*AWG_N/f_s, 4*AWG_N)
# Correlate measured and expected signal
corr_meas_expect = np.correlate(y_measured, y_expected)
index_match = np.argmax(corr_meas_expect)
if do_plot:
# The shift between measured and expected signal depends among other things on cable length.
# We simply determine the shift experimentally and then plot the signals with an according correction
# on the horizontal axis.
x_shift = index_match/f_s - trigreference*(x_measured[-1] - x_measured[0]) + trigdelay
import matplotlib.pyplot as plt
print('Plotting the expected and measured AWG signal.')
x_unit = 1e-9
plt.figure(1)
plt.clf()
plt.title('Measured and expected AWG Signals')
plt.plot(x_measured/x_unit, y_measured, label='measured')
plt.plot((x_expected + x_shift)/x_unit, y_expected, label='expected')
plt.grid(True)
plt.autoscale(axis='x', tight=True)
plt.legend(loc='upper left')
plt.xlabel('Time, relative to trigger (ns)')
plt.ylabel('Voltage (V)')
plt.draw()
plt.show()
# Normalize the correlation coefficient by the two waveforms and check they
# agree to 95%.
norm_correlation_coeff = corr_meas_expect[index_match]/np.sqrt(sum(y_measured**2)*sum(y_expected**2))
assert norm_correlation_coeff > 0.95, \
("Detected a disagreement between the measured and expected signals, "
"normalized correlation coefficient: {}.".format(norm_correlation_coeff))
print("Measured and expected signals agree, normalized correlation coefficient: ",
norm_correlation_coeff, ".", sep="")
return data_read
| 17,708
|
def string_unquote(value: str):
"""
Method to unquote a string
Args:
value: the value to unquote
Returns:
unquoted string
"""
if not isinstance(value, str):
return value
return value.replace('"', "").replace("'", "")
| 17,709
|
def test_vertical_interpolation_target_depth(vertical_values):
"""Test vertical interpolation of u/v to default target depth."""
u_target_depth, v_target_depth = vertical_interpolation(vertical_values.u, vertical_values.v, vertical_values.depth,
vertical_values.num_x, vertical_values.num_y,
vertical_values.time_index,
vertical_values.target_depth_default)
print(f"u_target_depth_default: {u_target_depth}")
print(f"v_target_depth_default: {v_target_depth}")
assert numpy.allclose(u_target_depth, vertical_values.expected_u_target_depth_default)
assert numpy.allclose(v_target_depth, vertical_values.expected_v_target_depth_default)
| 17,710
|
def test_datasets_ls_files_lfs(tmpdir, large_file, runner, project):
"""Test file listing lfs status."""
# NOTE: create a dataset
result = runner.invoke(cli, ["dataset", "create", "my-dataset"])
assert 0 == result.exit_code, format_result_exception(result)
assert "OK" in result.output
# NOTE: create some data
paths = []
new_file = tmpdir.join("file_1")
new_file.write(str(1))
paths.append(str(new_file))
paths.append(str(large_file))
# NOTE: add data to dataset
result = runner.invoke(cli, ["dataset", "add", "my-dataset"] + paths, catch_exceptions=False)
assert 0 == result.exit_code, format_result_exception(result)
# NOTE: check files
result = runner.invoke(cli, ["dataset", "ls-files"])
assert 0 == result.exit_code, format_result_exception(result)
lines = result.output.split("\n")
file1_entry = next(line for line in lines if "file_1" in line)
file2_entry = next(line for line in lines if large_file.name in line)
assert file1_entry
assert file2_entry
assert not file1_entry.endswith("*")
assert file2_entry.endswith("*")
| 17,711
|
def main():
"""
询问用户的姓名和年龄,并根据年龄确定该玩哪个游戏。
"""
name = input("Hello, What's your name? ")
valid_input, age = False, -1
while not valid_input:
valid_input, age = validate_age(name)
game = FrogWorld if age < 18 else WizardWorld
env = GameEvn(game(name))
env.play()
| 17,712
|
def translate(tx, ty, tz):
"""Translate."""
return affine(t=[tx, ty, tz])
| 17,713
|
def job_complete_worker(
completed_work_queue, work_db_path, clean_result, n_expected):
"""Update the database with completed work.
Args:
completed_work_queue (queue): queue with (working_dir, job_id)
incoming from each stitched raster
work_db_path (str): path to the work database
clean_result (bool): if true, delete the working directory after
``n_expected`` results come through.
n_expected (int): number of expected duplicate jobs to come through
before marking complete.
Return:
``None``
"""
try:
start_time = time.time()
connection = sqlite3.connect(work_db_path)
uncommited_count = 0
processed_so_far = 0
working_jobs = collections.defaultdict(int)
global WATERSHEDS_TO_PROCESS_COUNT
LOGGER.info(
f'started job complete worker, initial watersheds '
f'{WATERSHEDS_TO_PROCESS_COUNT}')
while True:
payload = completed_work_queue.get()
if payload is None:
LOGGER.info('got None in completed work, terminating')
break
working_dir, job_id = payload
working_jobs[job_id] += 1
if working_jobs[job_id] < n_expected:
continue
# we got n_expected, so mark complete
del working_jobs[job_id]
WATERSHEDS_TO_PROCESS_COUNT -= 1
if clean_result:
shutil.rmtree(working_dir, ignore_errors=True)
sql_command = (
f"""
INSERT INTO completed_job_ids VALUES ("{job_id}")
""")
LOGGER.debug(sql_command)
cursor = connection.execute(sql_command)
cursor.close()
LOGGER.info(f'done with {job_id} {working_dir}')
uncommited_count += 1
if uncommited_count > N_TO_STITCH:
connection.commit()
processed_so_far += uncommited_count
watersheds_per_sec = (
processed_so_far / (time.time() - start_time))
uncommited_count = 0
remaining_time_s = (
WATERSHEDS_TO_PROCESS_COUNT / watersheds_per_sec)
remaining_time_h = int(remaining_time_s // 3600)
remaining_time_s -= remaining_time_h * 3600
remaining_time_m = int(remaining_time_s // 60)
remaining_time_s -= remaining_time_m * 60
LOGGER.info(
f'remaining watersheds to process: '
f'{WATERSHEDS_TO_PROCESS_COUNT} - '
f'processed so far {processed_so_far} - '
f'process/sec: {watersheds_per_sec:.1f} - '
f'time left: {remaining_time_h}:'
f'{remaining_time_m:02d}:{remaining_time_s:04.1f}')
connection.commit()
connection.close()
cursor = None
connection = None
except Exception:
LOGGER.exception('error on job complete worker')
raise
| 17,714
|
def randomized_svd_gpu(M, n_components, n_oversamples=10, n_iter='auto',
transpose='auto', random_state=0, lib='pytorch',tocpu=True):
"""Computes a truncated randomized SVD on GPU. Adapted from Sklearn.
Parameters
----------
M : ndarray or sparse matrix
Matrix to decompose
n_components : int
Number of singular values and vectors to extract.
n_oversamples : int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples. Smaller
number can improve speed but can negatively impact the quality of
approximation of singular vectors and singular values.
n_iter : int or 'auto' (default is 'auto')
Number of power iterations. It can be used to deal with very noisy
problems. When 'auto', it is set to 4, unless `n_components` is small
(< .1 * min(X.shape)) `n_iter` in which case is set to 7.
This improves precision with few components.
transpose : True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case.
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
lib : {'cupy', 'pytorch'}, str optional
Chooses the GPU library to be used.
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components. In order to
obtain further speed up, `n_iter` can be set <=2 (at the cost of
loss of precision).
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
* An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if n_iter == 'auto':
# Checks if the number of iterations is explicitly specified
n_iter = 7 if n_components < .1 * min(M.shape) else 4
if transpose == 'auto':
transpose = n_samples < n_features
if transpose:
M = M.T # this implementation is a bit faster with smaller shape[1]
if lib == 'pytorch':
M_gpu = torch.Tensor.cuda(torch.from_numpy(M.astype('float32')))
# Generating normal random vectors with shape: (M.shape[1], n_random)
Q = torch.cuda.FloatTensor(M_gpu.shape[1], n_random).normal_()
# Perform power iterations with Q to further 'imprint' the top
# singular vectors of M in Q
for i in range(n_iter):
Q = torch.mm(M_gpu, Q)
Q = torch.mm(torch.transpose(M_gpu, 0, 1), Q)
# Sample the range of M using by linear projection of Q. Extract an orthonormal basis
Q, _ = torch.qr(torch.mm(M_gpu, Q))
# project M to the (k + p) dimensional space using the basis vectors
B = torch.mm(torch.transpose(Q, 0, 1), M_gpu)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = torch.svd(B)
del B
U = torch.mm(Q, Uhat)
if transpose:
# transpose back the results according to the input convention
U, s, V=(torch.transpose(V[:n_components, :], 0, 1),s[:n_components],torch.transpose(U[:, :n_components], 0, 1))
else:
U, s, V=( U[:, :n_components], s[:n_components], V[:n_components, :])
if tocpu is True:
return np.array(U.cpu()).astype('float'), np.array(s.cpu()).astype('float'), np.array(V.cpu()).astype('float')
else:
return U, s, V
| 17,715
|
def clean_column_names(df: pd.DataFrame) -> pd.DataFrame:
"""Cleans the column names of the given dataframe by applying the following steps
after using the janitor `clean_names` function:
* strips any 'unnamed' field, for example 'Unnamed: 0'
* replaces the first missing name with 'is_away'
* coverts '#' to '_nbr'
* converts '%' to '_pct'
Args:
df (pd.DataFrame): The dataframe to clean the column names of.
Returns:
pd.DataFrame: The dataframe with cleaned column names.
"""
df = clean_names(df)
cols = df.columns
cols = [re.sub("unnamed_[0-9]+_level_[0-9]", "", x).strip("_") for x in cols]
# away will always be the first empty string following cleaning step above
cols[cols.index("")] = "is_away"
cols = [x.replace("#", "_nbr") for x in cols]
cols = [x.replace("%", "_pct") for x in cols]
cols = ["is_active" if x == "status" else x for x in cols]
cols = ["is_start" if x == "gs" else x for x in cols]
df.columns = cols
return df
| 17,716
|
def cos(x):
"""Return the cosine.
INPUTS
x (Variable object or real number)
RETURNS
if x is a Variable, then return a Variable with val and der.
if x is a real number, then return the value of np.cos(x).
EXAMPLES
>>> x = Variable(0, name='x')
>>> t = cos(x)
>>> print(t.val, t.der['x'])
1.0 0.0
"""
try:
val = np.cos(x.val)
ders = defaultdict(float)
sec_ders = defaultdict(float)
for key in x.der:
ders[key] += -np.sin(x.val) * (x.der[key])
sec_ders[key] += -x.sec_der[key]*np.sin(x.val)+(x.der[key]**2)*(-np.cos(x.val))
return Variable(val, ders, sec_ders)
except AttributeError:
return np.cos(x)
| 17,717
|
def write_match(out, superlocus, name, match_status, match_type):
"""Write match to output file."""
if not out:
return
for locus in sorted(superlocus, key=NormalizedLocus.record_order_key):
sample = locus.record.samples[name]
sample['BD'] = match_status
sample['BK'] = match_type
out.write(locus.record)
| 17,718
|
def _get_regions(connection):
""" Get list of regions in database excluding GB. If no regions are found,
a ValueError is raised.
"""
query_regions = connection.execute(
db.select([models.Region.code]).where(models.Region.code != "GB")
)
regions = [r[0] for r in query_regions]
if not regions:
raise ValueError("NPTG data not populated yet.")
return regions
| 17,719
|
def process_audit(logger, settings, sc_client, audit, get_started):
"""
Export audit in the format specified in settings. Formats include PDF, JSON, CSV, MS Word (docx), media, or
web report link.
:param logger: The logger
:param settings: Settings from command line and configuration file
:param sc_client: instance of safetypy.SafetyCulture class
:param audit: Audit JSON to be exported
"""
if not check_if_media_sync_offset_satisfied(logger, settings, audit):
return
audit_id = audit['audit_id']
logger.info('downloading ' + audit_id)
audit_json = sc_client.get_audit(audit_id)
template_id = audit_json['template_id']
preference_id = None
if settings[PREFERENCES] is not None and template_id in settings[PREFERENCES].keys():
preference_id = settings[PREFERENCES][template_id]
export_filename = parse_export_filename(audit_json, settings[FILENAME_ITEM_ID]) or audit_id
for export_format in settings[EXPORT_FORMATS]:
if export_format in ['pdf', 'docx']:
export_audit_pdf_word(logger, sc_client, settings, audit_id, preference_id, export_format, export_filename)
elif export_format == 'json':
export_audit_json(logger, settings, audit_json, export_filename)
elif export_format == 'csv':
export_audit_csv(settings, audit_json)
elif export_format in ['sql', 'pickle']:
if get_started[0] == 'complete':
export_audit_pandas(logger, settings, audit_json, get_started)
elif get_started[0] != 'complete':
logger.error('Something went wrong connecting to the database, please check your settings.')
sys.exit(1)
elif export_format == 'media':
export_audit_media(logger, sc_client, settings, audit_json, audit_id, export_filename)
elif export_format == 'web-report-link':
export_audit_web_report_link(logger, settings, sc_client, audit_json, audit_id, template_id)
logger.debug('setting last modified to ' + audit['modified_at'])
update_sync_marker_file(audit['modified_at'], settings[CONFIG_NAME])
| 17,720
|
def word_embedding_forward(x, W):
"""
Forward pass for word embeddings. We operate on minibatches of size N where
each sequence has length T. We assume a vocabulary of V words, assigning each
to a vector of dimension D.
Inputs:
- x: Integer array of shape (N, T) giving indices of words. Each element idx
of x muxt be in the range 0 <= idx < V.
- W: Weight matrix of shape (V, D) giving word vectors for all words.
Returns a tuple of:
- out: Array of shape (N, T, D) giving word vectors for all input words.
"""
out = None
##############################################################################
# TODO: Implement the forward pass for word embeddings. #
# #
# HINT: This should be very simple. #
##############################################################################
pass
##############################################################################
# END OF YOUR CODE #
##############################################################################
return out
| 17,721
|
def retrieve_from_stream(iden, interval=60):
"""
Return messages from a stream.
:param iden: Identifier of the stream.
:param interval: defaults to messages of last 60 seconds.
"""
return stm_str.get_messages(str(UID), str(TOKEN), interval, iden)
| 17,722
|
def assert_allowed_methods(path, methods, app):
"""Ensures that a URL only allows a set of HTTP methods.
It not only checks that the HTTP methods passed in the ``methods`` parameter
are allowed, but also that the URL does not allow methods not included in
the list.
If the path provided does not exist a :class:`werkzeug.exceptions.NotFound`
exception is raised.
:param path: The string URL to test.
:param methods: List of HTTP methods expected to be allowed.
:param app: A Flask application instance.
"""
adapter = app.url_map.bind("")
# Get the list of allowed methods
current_methods = adapter.allowed_methods(path)
# If the list is empty is because no route matches the path.
if not current_methods:
raise NotFound(
f"{path}. Make sure that an endpoint is implemented that handles it."
)
current_methods.remove("OPTIONS")
# Check if HEAD is present because just endpoints that implements GET
# explicily implement the HEAD method.
if "HEAD" in current_methods:
current_methods.remove("HEAD")
for current_method in current_methods:
assert (
current_method in methods
), "The path `{}` should not allow the method {}".format(
path, current_method
)
for expected_method in methods:
assert (
expected_method in current_methods
), "The path `{}` does not implement the method {}".format(
path, expected_method
)
| 17,723
|
def _raise_runtime_error(info, param=None):
"""
Raise RuntimeError in both graph/pynative mode
Args:
info(str): info string to display
param(python obj): any object that can be recognized by graph mode. If is
not None, then param's value information will be extracted and displayed.
Default is None.
"""
if param is None:
raise RuntimeError(info)
raise RuntimeError(info + f"{param}")
| 17,724
|
def post_tweet(status):
"""This Function will submit a Tweet for you. In order to do this
you need to call the function with a valid string. Note you can also
add emojis. E.g
post_status("Python is awesome") """
twitter = authenticate()
twitter.update_status(status=status)
| 17,725
|
def train_op(tot_loss, lr, var_opt, name):
"""
When only the discriminator is trained, the learning rate is set to be 0.0008
When the generator model is also trained, the learning rate is set to be 0.0004
Since there are batch_normalization layers in the model, we need to use update_op for keeping train and test moving average
of the batch_norm parameters
"""
# optimizer = tf.train.RMSPropOptimizer(learning_rate = lr)
epsilon = 1e-4 # added on 18th of July
optimizer = tf.train.AdamOptimizer(learning_rate=lr, epsilon=epsilon, name=name)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
grads = optimizer.compute_gradients(tot_loss, var_list=var_opt)
print("================================================")
print("I am printing the non gradient")
for grad, var in grads:
if grad is None:
print("no gradient", grad, var)
print("================================================")
opt = optimizer.apply_gradients(grads)
return opt
| 17,726
|
def logadd(x, y):
"""Adds two log values.
Ensures accuracy even when the difference between values is large.
"""
if x < y:
temp = x
x = y
y = temp
z = math.exp(y - x)
logProb = x + math.log(1.0 + z)
if logProb < _MinLogExp:
return _MinLogExp
else:
return logProb
| 17,727
|
def getResourceNameString(ro_config, rname, base=None):
"""
Returns a string value corresoponding to a URI indicated by the supplied parameter.
Relative references are assumed to be paths relative to the supplied base URI or,
if no rbase is supplied, relative to the current directory.
"""
rsplit = rname.split(":")
if len(rsplit) == 2:
# Try to interpret name as CURIE
for rpref in ro_config["annotationPrefixes"]:
if rsplit[0] == rpref:
rname = ro_config["annotationPrefixes"][rpref]+rsplit[1]
if urlparse.urlsplit(rname).scheme == "":
if base:
rname = resolveUri(rname, base)
else:
rname = resolveFileAsUri(rname)
return rname
| 17,728
|
def NMFcomponents(ref, ref_err = None, n_components = None, maxiters = 1e3, oneByOne = False, path_save = None):
"""Returns the NMF components, where the rows contain the information.
Input: ref and ref_err should be (N * p) where n is the number of references, p is the number of pixels in each reference.
path_save (string): a path to save intermediate results to calculate additional componetns with previous calculated information. Default: None.
Output: NMf components (n_components * p).
"""
if ref_err is None:
ref_err = np.sqrt(ref)
if (n_components is None) or (n_components > ref.shape[0]):
n_components = ref.shape[0]
ref[ref < 0] = 0
ref_err[ref <= 0] = np.nanpercentile(ref_err, 95)*10 #Setting the err of <= 0 pixels to be max error to reduce their impact
ref_columnized = ref.T #columnize ref, making the columns contain the information
ref_err_columnized = ref_err.T # columnize ref_err, making the columns contain the information
components_column = 0
if not oneByOne:
if path_save is not None:
print('path_save is only supported when oneByOne == True.')
g_img = nmf.NMF(ref_columnized, V=1.0/ref_err_columnized**2, n_components=n_components)
chi2, time_used = g_img.SolveNMF(maxiters=maxiters)
components_column = g_img.W/np.sqrt(np.nansum(g_img.W**2, axis = 0)) #normalize the components
else:
print("Building components one by one...")
if path_save is None:
for i in range(n_components):
print("\t" + str(i+1) + " of " + str(n_components))
n = i + 1
if (i == 0):
g_img = nmf.NMF(ref_columnized, V = 1.0/ref_err_columnized**2, n_components= n)
else:
W_ini = np.random.rand(ref_columnized.shape[0], n)
W_ini[:, :(n-1)] = np.copy(g_img.W)
W_ini = np.array(W_ini, order = 'F') #Fortran ordering, column elements contiguous in memory.
H_ini = np.random.rand(n, ref_columnized.shape[1])
H_ini[:(n-1), :] = np.copy(g_img.H)
H_ini = np.array(H_ini, order = 'C') #C ordering, row elements contiguous in memory.
g_img = nmf.NMF(ref_columnized, V = 1.0/ref_err_columnized**2, W = W_ini, H = H_ini, n_components= n)
chi2 = g_img.SolveNMF(maxiters=maxiters)
components_column = g_img.W/np.sqrt(np.nansum(g_img.W**2, axis = 0)) #normalize the components
else:
print('\t path_save provided, you might want to load data and continue previous component calculation')
print('\t\t loading from ' + path_save + '_comp.fits for components.')
if not os.path.exists(path_save + '_comp.fits'):
print('\t\t ' + path_save + '_comp.fits does not exist, calculating from scratch.')
for i in range(n_components):
print("\t" + str(i+1) + " of " + str(n_components))
n = i + 1
if (i == 0):
g_img = nmf.NMF(ref_columnized, V = 1.0/ref_err_columnized**2, n_components= n)
else:
W_ini = np.random.rand(ref_columnized.shape[0], n)
W_ini[:, :(n-1)] = np.copy(g_img.W)
W_ini = np.array(W_ini, order = 'F') #Fortran ordering, column elements contiguous in memory.
H_ini = np.random.rand(n, ref_columnized.shape[1])
H_ini[:(n-1), :] = np.copy(g_img.H)
H_ini = np.array(H_ini, order = 'C') #C ordering, row elements contiguous in memory.
g_img = nmf.NMF(ref_columnized, V = 1.0/ref_err_columnized**2, W = W_ini, H = H_ini, n_components= n)
chi2 = g_img.SolveNMF(maxiters=maxiters)
print('\t\t\t Calculation for ' + str(n) + ' components done, overwriting raw 2D component matrix at ' + path_save + '_comp.fits')
fits.writeto(path_save + '_comp.fits', g_img.W, overwrite = True)
print('\t\t\t Calculation for ' + str(n) + ' components done, overwriting raw 2D coefficient matrix at ' + path_save + '_coef.fits')
fits.writeto(path_save + '_coef.fits', g_img.H, overwrite = True)
components_column = g_img.W/np.sqrt(np.nansum(g_img.W**2, axis = 0)) #normalize the components
else:
W_assign = fits.getdata(path_save + '_comp.fits')
H_assign = fits.getdata(path_save + '_coef.fits')
if W_assign.shape[1] >= n_components:
print('You have already had ' + str(W_assign.shape[1]) + ' components while asking for ' + str(n_components) + '. Returning to your input.')
components_column = W_assign/np.sqrt(np.nansum(W_assign**2, axis = 0))
components = decolumnize(components_column, mask = mask)
else:
print('You are asking for ' + str(n_components) + ' components. Building the rest based on the ' + str(W_assign.shape[1]) + ' provided.')
for i in range(W_assign.shape[1], n_components):
print("\t" + str(i+1) + " of " + str(n_components))
n = i + 1
if (i == W_assign.shape[1]):
W_ini = np.random.rand(ref_columnized.shape[0], n)
W_ini[:, :(n-1)] = np.copy(W_assign)
W_ini = np.array(W_ini, order = 'F') #Fortran ordering, column elements contiguous in memory.
H_ini = np.random.rand(n, ref_columnized.shape[1])
H_ini[:(n-1), :] = np.copy(H_assign)
H_ini = np.array(H_ini, order = 'C') #C ordering, row elements contiguous in memory.
g_img = nmf.NMF(ref_columnized, V = 1.0/ref_err_columnized**2, W = W_ini, H = H_ini, n_components= n)
else:
W_ini = np.random.rand(ref_columnized.shape[0], n)
W_ini[:, :(n-1)] = np.copy(g_img.W)
W_ini = np.array(W_ini, order = 'F') #Fortran ordering, column elements contiguous in memory.
H_ini = np.random.rand(n, ref_columnized.shape[1])
H_ini[:(n-1), :] = np.copy(g_img.H)
H_ini = np.array(H_ini, order = 'C') #C ordering, row elements contiguous in memory.
g_img = nmf.NMF(ref_columnized, V = 1.0/ref_err_columnized**2, W = W_ini, H = H_ini, n_components= n)
chi2 = g_img.SolveNMF(maxiters=maxiters)
print('\t\t\t Calculation for ' + str(n) + ' components done, overwriting raw 2D component matrix at ' + path_save + '_comp.fits')
fits.writeto(path_save + '_comp.fits', g_img.W, overwrite = True)
print('\t\t\t Calculation for ' + str(n) + ' components done, overwriting raw 2D coefficient matrix at ' + path_save + '_coef.fits')
fits.writeto(path_save + '_coef.fits', g_img.H, overwrite = True)
components_column = g_img.W/np.sqrt(np.nansum(g_img.W**2, axis = 0)) #normalize the components
return components_column.T
| 17,729
|
def parse_args():
"""
Parses arguments provided through the command line.
"""
# Initialize
parser = argparse.ArgumentParser()
# Arguments
parser.add_argument("meme_file", metavar="motifs.meme")
parser.add_argument("tomtom_file", metavar="tomtom.txt")
parser.add_argument("clusters_file", metavar="clusters.txt")
parser.add_argument(
"--cluster",
default=None,
help="cluster (defaut: all)",
type=int,
)
parser.add_argument(
"--out-dir",
default="./",
help="output directory (default: ./)",
)
return(parser.parse_args())
| 17,730
|
def get_mean_from_protobin(filename):
"""Get image mean from protobinary and return ndarray with skimage format.
"""
img = read_caffe_protobin(filename)
size = (img.channels, img.height, img.width)
img = caffe.io.blobproto_to_array(img).reshape(size)
img = img.transpose([1, 2, 0])
return img
| 17,731
|
def has_key(key):
"""
Check if key is in the minion datastore
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' data.has_key <mykey>
"""
store = load()
return key in store
| 17,732
|
def delete_file(file):
"""
Delete the file if exist
:param file: the file to delete
:type file: str
:return: None
"""
if os.path.isfile(file):
os.remove(file)
| 17,733
|
def add_subprofile(subprofile, user_id, data):
"""Create a news subprofile for for a user."""
user = User.query.filter(User.id == user_id).first()
if not user:
raise KeyError('Cannot find user with id: ' + str(user_id))
url = "https://api.copernica.com/profile/{}/subprofiles/{}?access_token={}"
url = url.format(user.copernica_id, subprofile,
app.config['COPERNICA_API_KEY'])
requests.post(url, data)
| 17,734
|
def __create_setting(ingest):
"""Creates the setting for a particular family"""
signer, addresser, auth_keys, threshold = ingest
settings = Settings(
auth_list=','.join(auth_keys),
threshold=threshold)
return (
signer,
addresser,
SettingPayload(
action=SettingPayload.CREATE,
dimension=addresser.family,
data=settings.SerializeToString()))
| 17,735
|
def test_separation(expected_great_circle):
"""
Tests that all separation functions return a dataframe using a variety of inputs.
"""
cmd = reset_simulation()
assert cmd == True
cmd = create_aircraft(
aircraft_id=aircraft_id,
type=type,
latitude=latitude,
longitude=longitude,
heading=heading,
flight_level=flight_level,
speed=speed,
)
assert cmd == True
cmd = create_aircraft(
aircraft_id=aircraft_id_2,
type=type_2,
latitude=latitude_2,
longitude=longitude_2,
heading=heading_2,
flight_level=flight_level_2,
speed=speed_2,
)
assert cmd == True
separation1 = geodesic_separation(
from_aircraft_id=[aircraft_id, aircraft_id_2],
to_aircraft_id=[aircraft_id, aircraft_id_2],
)
assert isinstance(separation1, pd.DataFrame)
assert separation1.loc[aircraft_id, aircraft_id_2] == pytest.approx(1000 * 176.92, 0.01)
separation2 = great_circle_separation(
from_aircraft_id=[aircraft_id, aircraft_id_2], to_aircraft_id=aircraft_id
)
assert isinstance(separation2, pd.DataFrame)
expected = expected_great_circle(latitude, longitude, latitude_2, longitude_2)
assert separation2.loc[aircraft_id_2, aircraft_id] == pytest.approx(expected, 0.01)
separation3 = vertical_separation(
from_aircraft_id=aircraft_id, to_aircraft_id=[aircraft_id, aircraft_id_2]
)
assert isinstance(separation3, pd.DataFrame)
## altitude is provided as flight_level, which must be converted to:
# feet (*100) and then to metres (*0.3048)
assert (
separation3.loc[aircraft_id, aircraft_id_2]
== abs(flight_level - flight_level_2) * 100 * SCALE_FEET_TO_METRES
)
separation4 = euclidean_separation(
from_aircraft_id=aircraft_id, to_aircraft_id=aircraft_id_2
)
assert isinstance(separation4, pd.DataFrame)
ecef = pyproj.Proj(proj="geocent", ellps="WGS84", datum="WGS84")
lla = pyproj.Proj(proj="latlong", ellps="WGS84", datum="WGS84")
from_ECEF = pyproj.transform(
lla, ecef, longitude, latitude, flight_level * 100 * SCALE_FEET_TO_METRES
)
to_ECEF = pyproj.transform(
lla, ecef, longitude_2, latitude_2, flight_level_2 * 100 * SCALE_FEET_TO_METRES
)
assert separation4.loc[aircraft_id, aircraft_id_2] == pytest.approx(
euclidean(from_ECEF, to_ECEF), 0.01
)
separation5 = euclidean_separation(from_aircraft_id=aircraft_id_2)
assert isinstance(separation5, pd.DataFrame)
assert separation5.loc[aircraft_id_2, aircraft_id_2] == 0
| 17,736
|
def paser_bs(sent):
"""Convert compacted bs span to triple list
Ex:
"""
sent=sent.strip('<sos_b>').strip('<eos_b>')
sent = sent.split()
belief_state = []
domain_idx = [idx for idx,token in enumerate(sent) if token in all_domain]
for i,d_idx in enumerate(domain_idx):
next_d_idx = len(sent) if i+1 == len(domain_idx) else domain_idx[i+1]
domain = sent[d_idx]
sub_span = sent[d_idx+1:next_d_idx]
sub_s_idx = [idx for idx,token in enumerate(sub_span) if token in all_slots]
for j,s_idx in enumerate(sub_s_idx):
next_s_idx = len(sub_span) if j == len(sub_s_idx) - 1 else sub_s_idx[j+1]
slot = sub_span[s_idx]
value = ' '.join(sub_span[s_idx+1:next_s_idx])
bs = " ".join([domain,slot,value])
belief_state.append(bs)
return list(set(belief_state))
| 17,737
|
def app_factory(global_conf, **local_conf):
"""paste.deploy app factory for creating WSGI container server apps."""
conf = global_conf.copy()
conf.update(local_conf)
return ContainerController(conf)
| 17,738
|
def retrieve_succinct_traceback() -> str:
"""
A utility that retrive succint traceback digest from a complete traceback string.
"""
tb = traceback.format_exc()
return "\n".join(pg.splitlines()[-1] for pg in split_paragraphs(tb))
| 17,739
|
def get_canonical_format_name(format_name):
"""
Get the canonical format name for a possible abbreviation
Args:
format_name (str): Format name or abbreviation
Returns:
The canonical name from CANONICAL_FORMATS, or None if the format is
not recognized.
"""
try:
return CANONICAL_FORMATS[format_name.lower()]
except KeyError:
return None
| 17,740
|
def build_protoc_args(
ctx,
plugin,
proto_infos,
out_arg,
extra_options = [],
extra_protoc_args = [],
short_paths = False,
resolve_tools = True):
"""
Build the args for a protoc invocation.
This does not include the paths to the .proto files, which should be done external to this function.
Args:
ctx: The Bazel rule execution context object.
plugin: The ProtoPluginInfo for the plugin to use.
proto_infos: The list of ProtoInfo providers.
out_arg: The path to provide as the output arg to protoc, usually the generation root dir.
extra_options: An optional list of extra options to pass to the plugin.
extra_protoc_args: An optional list of extra args to add to the command.
short_paths: Whether to use the .short_path instead of .path when creating paths. The short_path is used when
making a test/executable and referencing the runfiles.
resolve_tools: Whether to resolve and add the tools to returned inputs.
Returns:
- The list of args.
- The inputs required for the command.
- The input manifests required for the command.
"""
# Specify path getter
get_path = _short_path if short_paths else _path
# Build inputs and manifests list
inputs = []
input_manifests = []
if plugin.tool and resolve_tools:
plugin_runfiles, plugin_input_manifests = ctx.resolve_tools(tools = [plugin.tool])
inputs += plugin_runfiles.to_list()
input_manifests += plugin_input_manifests
inputs += plugin.data
# Get plugin name
plugin_name = plugin.name
if plugin.protoc_plugin_name:
plugin_name = plugin.protoc_plugin_name
# Build args
args_list = []
# Load all descriptors (direct and transitive) and remove dupes
descriptor_sets = depset([
descriptor_set
for proto_info in proto_infos
for descriptor_set in proto_info.transitive_descriptor_sets.to_list()
]).to_list()
inputs += descriptor_sets
# Add descriptors
pathsep = ctx.configuration.host_path_separator
args_list.append("--descriptor_set_in={}".format(pathsep.join(
[get_path(f) for f in descriptor_sets],
)))
# Add --plugin if not a built-in plugin
if plugin.tool_executable:
# If Windows, mangle the path. It's done a bit awkwardly with
# `host_path_seprator` as there is no simple way to figure out what's
# the current OS.
if ctx.configuration.host_path_separator == ";":
plugin_tool_path = get_path(plugin.tool_executable).replace("/", "\\")
else:
plugin_tool_path = get_path(plugin.tool_executable)
args_list.append("--plugin=protoc-gen-{}={}".format(plugin_name, plugin_tool_path))
# Add plugin --*_out/--*_opt args
plugin_options = list(plugin.options)
plugin_options.extend(extra_options)
if plugin_options:
opts_str = ",".join(
[option.replace("{name}", ctx.label.name) for option in plugin_options],
)
if plugin.separate_options_flag:
args_list.append("--{}_opt={}".format(plugin_name, opts_str))
else:
out_arg = "{}:{}".format(opts_str, out_arg)
args_list.append("--{}_out={}".format(plugin_name, out_arg))
# Add any extra protoc args provided or that plugin has
args_list.extend(extra_protoc_args)
if plugin.extra_protoc_args:
args_list.extend(plugin.extra_protoc_args)
return args_list, inputs, input_manifests
| 17,741
|
def test_opf_sgen_voltage():
""" Testing a simple network with transformer for voltage
constraints with OPF using a static generator """
# boundaries
vm_max = 1.04
vm_min = 0.96
# create net
net = pp.create_empty_network()
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_transformer_from_parameters(net, 0, 1, vsc_percent=3.75,
tp_max=2, vn_lv_kv=0.4,
shift_degree=150, tp_mid=0,
vn_hv_kv=10.0, vscr_percent=2.8125,
tp_pos=0, tp_side="hv", tp_min=-2,
tp_st_percent=2.5, i0_percent=0.68751,
sn_kva=16.0, pfe_kw=0.11, name=None,
in_service=True, index=None, max_loading_percent=1000000)
pp.create_sgen(net, 3, p_kw=-10, controllable=True, max_p_kw=-5, min_p_kw=-15, max_q_kvar=25,
min_q_kvar=-25)
pp.create_polynomial_cost(net, 0, "sgen", array([-100, 0]))
pp.create_ext_grid(net, 0)
pp.create_line_from_parameters(net, 1, 2, 1, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=1000000)
pp.create_line_from_parameters(net, 2, 3, 1, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=1000000)
# run OPF
for init in ["pf", "flat"]:
pp.runopp(net, verbose=False, init=init)
assert net["OPF_converged"]
# assert and check result
logger.debug("test_opf_sgen_voltage")
logger.debug("res_sgen:\n%s" % net.res_sgen)
logger.debug("res_bus.vm_pu: \n%s" % net.res_bus.vm_pu)
assert max(net.res_bus.vm_pu) < vm_max
assert min(net.res_bus.vm_pu) > vm_min
| 17,742
|
def cycle_left(state):
"""Rotates the probabilityfunction, translating each discrete left by one site.
The outcome is the same as if the probabilityfunction was fully local, with n_discretes
indices, and the indices were permuted with (1, 2, ..., n_discretes-1, 0).
Args:
state: The probabilityfunction to which the rotation is applied.
Returns:
The rotation probabilityfunction.
"""
# TODO Can we do this more easily using the kwargs available for
# pswapaxes/all_to_all?
dim = LOCAL_DIM
pmap_index = pops.AXIS_NAME
n_global_discretes, n_local_discretes = number_of_discretes(state)
if n_local_discretes < 8:
msg = ("cycle_left isn't supported for less than 8 local discretes, you "
f"provided {n_local_discretes}.")
raise NotImplementedError(msg)
# Number of discretes that don't really take part in the process.
num_discretes_leftover = n_local_discretes - n_global_discretes - 1
orig_shape = state.shape
# REDACTED Make a diagram illustrating what is going on here.
state = state.reshape((dim, dim**n_global_discretes, dim**num_discretes_leftover))
state = jax.lax.pswapaxes(state, pmap_index, 1)
state = state.transpose((1, 0, 2))
state = state.reshape((dim, dim**n_global_discretes, dim**num_discretes_leftover))
state = jax.lax.pswapaxes(state, pmap_index, 1)
state = state.reshape((dim**8, dim**(n_local_discretes - 8)))
state = state.transpose((1, 0))
state = state.reshape((dim**(n_local_discretes - 7), dim**7))
state = state.transpose((1, 0))
return state.reshape(orig_shape)
| 17,743
|
def make(ctx, checks, version, initial_release, skip_sign, sign_only):
"""Perform a set of operations needed to release checks:
\b
* update the version in __about__.py
* update the changelog
* update the requirements-agent-release.txt file
* update in-toto metadata
* commit the above changes
You can release everything at once by setting the check to `all`.
\b
If you run into issues signing:
\b
- Ensure you did `gpg --import <YOUR_KEY_ID>.gpg.pub`
"""
# Import lazily since in-toto runs a subprocess to check for gpg2 on load
from ..signing import update_link_metadata, YubikeyException
releasing_all = 'all' in checks
valid_checks = get_valid_checks()
if not releasing_all:
for check in checks:
if check not in valid_checks:
abort('Check `{}` is not an Agent-based Integration'.format(check))
# don't run the task on the master branch
if get_current_branch() == 'master':
abort('Please create a release branch, you do not want to commit to master directly.')
if releasing_all:
if version:
abort('You cannot bump every check to the same version')
checks = sorted(valid_checks)
else:
checks = sorted(checks)
if initial_release:
version = '1.0.0'
# Keep track of the list of checks that have been updated.
updated_checks = []
for check in checks:
if sign_only:
updated_checks.append(check)
continue
elif initial_release and check in BETA_PACKAGES:
continue
# Initial releases will only bump if not already 1.0.0 so no need to always output
if not initial_release:
echo_success('Check `{}`'.format(check))
if version:
# sanity check on the version provided
cur_version = get_version_string(check)
if version == 'final':
# Remove any pre-release metadata
version = finalize_version(cur_version)
else:
# Keep track of intermediate version bumps
prev_version = cur_version
for method in version.split(','):
# Apply any supported version bumping methods. Chaining is required for going
# from mainline releases to development releases since e.g. x.y.z > x.y.z-rc.A.
# So for an initial bug fix dev release you can do `fix,rc`.
if method in VERSION_BUMP:
version = VERSION_BUMP[method](prev_version)
prev_version = version
p_version = parse_version_info(version)
p_current = parse_version_info(cur_version)
if p_version <= p_current:
if initial_release:
continue
else:
abort('Current version is {}, cannot bump to {}'.format(cur_version, version))
else:
cur_version, changelog_types = ctx.invoke(changes, check=check, dry_run=True)
if not changelog_types:
echo_warning('No changes for {}, skipping...'.format(check))
continue
bump_function = get_bump_function(changelog_types)
version = bump_function(cur_version)
if initial_release:
echo_success('Check `{}`'.format(check))
# update the version number
echo_info('Current version of check {}: {}'.format(check, cur_version))
echo_waiting('Bumping to {}... '.format(version), nl=False)
update_version_module(check, cur_version, version)
echo_success('success!')
# update the CHANGELOG
echo_waiting('Updating the changelog... ', nl=False)
# TODO: Avoid double GitHub API calls when bumping all checks at once
ctx.invoke(
changelog,
check=check,
version=version,
old_version=cur_version,
initial=initial_release,
quiet=True,
dry_run=False,
)
echo_success('success!')
commit_targets = [check]
updated_checks.append(check)
# update the list of integrations to be shipped with the Agent
if check not in NOT_CHECKS:
req_file = get_agent_release_requirements()
commit_targets.append(os.path.basename(req_file))
echo_waiting('Updating the Agent requirements file... ', nl=False)
update_agent_requirements(req_file, check, get_agent_requirement_line(check, version))
echo_success('success!')
echo_waiting('Committing files...')
# commit the changes.
# do not use [ci skip] so releases get built https://docs.gitlab.com/ee/ci/yaml/#skipping-jobs
msg = '[Release] Bumped {} version to {}'.format(check, version)
git_commit(commit_targets, msg)
if not initial_release:
# Reset version
version = None
if sign_only or not skip_sign:
if not updated_checks:
abort('There are no new checks to sign and release!')
echo_waiting('Updating release metadata...')
echo_info('Please touch your Yubikey immediately after entering your PIN!')
try:
commit_targets = update_link_metadata(updated_checks)
git_commit(commit_targets, '[Release] Update metadata', force=True)
except YubikeyException as e:
abort('A problem occurred while signing metadata: {}'.format(e))
# done
echo_success('All done, remember to push to origin and open a PR to merge these changes on master')
| 17,744
|
def make_rare_deleterious_variants_filter(sample_ids_list=None):
""" Function for retrieving rare, deleterious variants """
and_list = [
{
"$or":
[
{"cadd.esp.af": {"$lt": 0.051}},
{"cadd.esp.af": {"$exists": False}}
]
},
{
"$or":
[
{"func_knowngene": "exonic"},
{"func_knowngene": "splicing"}
]
},
{"cadd.phred": {"$gte": 10}},
{"exonicfunc_knowngene": {"$ne": "synonymous SNV"}},
{"1000g2015aug_all": {"$lt": 0.051}}
]
result = _append_sample_id_constraint_if_needed(and_list, sample_ids_list)
return result
| 17,745
|
def CreateTensorPileup(args):
"""
Create pileup tensor for pileup model training or calling.
Use slide window to scan the whole candidate regions, keep all candidates over specific minimum allelic frequency
and minimum depth, use samtools mpileup to store pileup info for pileup tensor generation. Only scan candidate
regions once, we could directly get all variant candidates directly.
"""
ctg_start = args.ctgStart
ctg_end = args.ctgEnd
fasta_file_path = args.ref_fn
ctg_name = args.ctgName
samtools_execute_command = args.samtools
bam_file_path = args.bam_fn
chunk_id = args.chunk_id - 1 if args.chunk_id else None # 1-base to 0-base
chunk_num = args.chunk_num
tensor_can_output_path = args.tensor_can_fn
minimum_af_for_candidate = args.min_af
minimum_snp_af_for_candidate = args.snp_min_af
minimum_indel_af_for_candidate = args.indel_min_af
min_coverage = args.minCoverage
platform = args.platform
confident_bed_fn = args.bed_fn
is_confident_bed_file_given = confident_bed_fn is not None
alt_fn = args.indel_fn
extend_bed = args.extend_bed
is_extend_bed_file_given = extend_bed is not None
min_mapping_quality = args.minMQ
min_base_quality = args.minBQ
fast_mode = args.fast_mode
vcf_fn = args.vcf_fn
is_known_vcf_file_provided = vcf_fn is not None
global test_pos
test_pos = None
# 1-based regions [start, end] (start and end inclusive)
ref_regions = []
reads_regions = []
known_variants_set = set()
tree, bed_start, bed_end = bed_tree_from(bed_file_path=extend_bed,
contig_name=ctg_name,
return_bed_region=True)
fai_fn = file_path_from(fasta_file_path, suffix=".fai", exit_on_not_found=True, sep='.')
if not is_confident_bed_file_given and chunk_id is not None:
contig_length = 0
with open(fai_fn, 'r') as fai_fp:
for row in fai_fp:
columns = row.strip().split("\t")
contig_name = columns[0]
if contig_name != ctg_name:
continue
contig_length = int(columns[1])
chunk_size = contig_length // chunk_num + 1 if contig_length % chunk_num else contig_length // chunk_num
ctg_start = chunk_size * chunk_id # 0-base to 1-base
ctg_end = ctg_start + chunk_size
if is_confident_bed_file_given and chunk_id is not None:
chunk_size = (bed_end - bed_start) // chunk_num + 1 if (bed_end - bed_start) % chunk_num else (bed_end - bed_start) // chunk_num
ctg_start = bed_start + 1 + chunk_size * chunk_id # 0-base to 1-base
ctg_end = ctg_start + chunk_size
if is_known_vcf_file_provided and chunk_id is not None:
known_variants_list = vcf_candidates_from(vcf_fn=vcf_fn, contig_name=ctg_name)
total_variants_size = len(known_variants_list)
chunk_variants_size = total_variants_size // chunk_num if total_variants_size % chunk_num == 0 else total_variants_size // chunk_num + 1
chunk_start_pos = chunk_id * chunk_variants_size
known_variants_set = set(known_variants_list[chunk_start_pos: chunk_start_pos + chunk_variants_size])
ctg_start, ctg_end = min(known_variants_set), max(known_variants_set)
is_ctg_name_given = ctg_name is not None
is_ctg_range_given = is_ctg_name_given and ctg_start is not None and ctg_end is not None
if is_ctg_range_given:
extend_start = ctg_start - no_of_positions
extend_end = ctg_end + no_of_positions
reads_regions.append(region_from(ctg_name=ctg_name, ctg_start=extend_start, ctg_end=extend_end))
reference_start, reference_end = ctg_start - param.expandReferenceRegion, ctg_end + param.expandReferenceRegion
reference_start = 1 if reference_start < 1 else reference_start
ref_regions.append(region_from(ctg_name=ctg_name, ctg_start=reference_start, ctg_end=reference_end))
elif is_ctg_name_given:
reads_regions.append(region_from(ctg_name=ctg_name))
ref_regions.append(region_from(ctg_name=ctg_name))
reference_start = 1
reference_sequence = reference_sequence_from(
samtools_execute_command=samtools_execute_command,
fasta_file_path=fasta_file_path,
regions=ref_regions
)
if reference_sequence is None or len(reference_sequence) == 0:
sys.exit(log_error("[ERROR] Failed to load reference sequence from file ({}).".format(fasta_file_path)))
if is_confident_bed_file_given and ctg_name not in tree:
sys.exit(log_error("[ERROR] ctg_name {} not exists in bed file({}).".format(ctg_name, confident_bed_fn)))
# samtools mpileup options
# reverse-del: deletion in forward/reverse strand were marked as '*'/'#'
min_mapping_quality = 0 if args.gvcf else min_mapping_quality
min_base_quality = 0 if args.gvcf else min_base_quality
max_depth = param.max_depth_dict[args.platform] if args.platform else args.max_depth
mq_option = ' --min-MQ {}'.format(min_mapping_quality)
bq_option = ' --min-BQ {}'.format(min_base_quality)
flags_option = ' --excl-flags {}'.format(param.SAMTOOLS_VIEW_FILTER_FLAG)
max_depth_option = ' --max-depth {}'.format(max_depth)
bed_option = ' -l {}'.format(extend_bed) if is_extend_bed_file_given else ""
gvcf_option = ' -a' if args.gvcf else ""
samtools_mpileup_process = subprocess_popen(
shlex.split(
"{} mpileup {} -r {} --reverse-del".format(samtools_execute_command,
bam_file_path,
" ".join(reads_regions), )
+ mq_option + bq_option + bed_option + flags_option + max_depth_option + gvcf_option))
if tensor_can_output_path != "PIPE":
tensor_can_fpo = open(tensor_can_output_path, "wb")
tensor_can_fp = subprocess_popen(shlex.split("{} -c".format(param.zstd)), stdin=PIPE, stdout=tensor_can_fpo)
else:
tensor_can_fp = TensorStdout(sys.stdout)
# whether save all alternative information, only for debug mode
if alt_fn:
alt_fp = open(alt_fn, 'w')
pos_offset = 0
pre_pos = -1
tensor = [[]] * sliding_window_size
candidate_position = []
all_alt_dict = {}
depth_dict = {}
af_dict = {}
# to generate gvcf, it is needed to record whole genome statistical information
if args.gvcf:
nonVariantCaller = variantInfoCalculator(gvcfWritePath=args.temp_file_dir, ref_path=args.ref_fn,
bp_resolution=args.bp_resolution, ctgName=ctg_name,sample_name='.'.join(
[args.sampleName, ctg_name, str(ctg_start), str(ctg_end)]), p_err=args.base_err,
gq_bin_size=args.gq_bin_size)
confident_bed_tree = bed_tree_from(bed_file_path=confident_bed_fn, contig_name=ctg_name, bed_ctg_start=extend_start,
bed_ctg_end=extend_end)
empty_pileup_flag = True
for row in samtools_mpileup_process.stdout:
empty_pileup_flag = False
columns = row.strip().split('\t')
pos = int(columns[1])
pileup_bases = columns[4]
reference_base = reference_sequence[pos - reference_start].upper()
valid_reference_flag = True
within_flag = True
if args.gvcf:
if not valid_reference_flag:
nonVariantCaller.make_gvcf_online({}, push_current=True)
if ctg_start != None and ctg_end != None:
within_flag = pos >= ctg_start and pos <= ctg_end
elif ctg_start != None and ctg_end == None:
within_flag = pos >= ctg_start
elif ctg_start == None and ctg_end != None:
within_flag = pos <= ctg_end
else:
within_flag = True
if columns[3] == '0' and within_flag and valid_reference_flag:
cur_site_info = {'chr': columns[0], 'pos': pos, 'ref': reference_base, 'n_total': 0, 'n_ref': 0}
nonVariantCaller.make_gvcf_online(cur_site_info)
continue
# start with a new region, clear all sliding windows cache, avoid memory occupation
if pre_pos + 1 != pos:
pos_offset = 0
tensor = [[]] * sliding_window_size
candidate_position = []
pre_pos = pos
# a condition to skip some positions creating tensor,but return allele summary
# allele count function
pileup_tensor, alt_dict, af, depth, pass_af, pileup_list, max_del_length = generate_tensor(pos=pos,
pileup_bases=pileup_bases,
reference_sequence=reference_sequence,
reference_start=reference_start,
reference_base=reference_base,
minimum_af_for_candidate=minimum_af_for_candidate,
minimum_snp_af_for_candidate=minimum_snp_af_for_candidate,
minimum_indel_af_for_candidate=minimum_indel_af_for_candidate,
platform=platform,
fast_mode=fast_mode)
if args.gvcf and within_flag and valid_reference_flag:
cur_n_total = 0
cur_n_ref = 0
for _key, _value in pileup_list:
if (_key == reference_base):
cur_n_ref = _value
cur_n_total += _value
cur_site_info = {'chr': columns[0], 'pos': pos, 'ref': reference_base, 'n_total': cur_n_total,
'n_ref': cur_n_ref}
nonVariantCaller.make_gvcf_online(cur_site_info)
pass_confident_bed = not is_confident_bed_file_given or is_region_in(tree=confident_bed_tree,
contig_name=ctg_name,
region_start=pos - 1,
region_end=pos + max_del_length + 1) # 0-based
if (pass_confident_bed and reference_base in 'ACGT' and (pass_af and depth >= min_coverage) and not is_known_vcf_file_provided) or (
is_known_vcf_file_provided and pos in known_variants_set):
candidate_position.append(pos)
all_alt_dict[pos] = alt_dict
depth_dict[pos] = depth
af_dict[pos] = af
tensor[pos_offset] = pileup_tensor
# save pileup tensor for each candidate position with nearby flanking_base_num bp distance
pos_offset = (pos_offset + 1) % sliding_window_size
if len(candidate_position) and pos - candidate_position[0] == flanking_base_num:
center = candidate_position.pop(0)
has_empty_tensor = sum([True for item in tensor if not len(item)])
if not has_empty_tensor:
depth = depth_dict[center]
ref_seq = reference_sequence[center - (
flanking_base_num) - reference_start: center + flanking_base_num + 1 - reference_start]
concat_tensor = tensor[pos_offset:] + tensor[0:pos_offset]
alt_info = str(depth) + '-' + ' '.join(
[' '.join([item[0], str(item[1])]) for item in list(all_alt_dict[center].items())])
l = "%s\t%d\t%s\t%s\t%s" % (
ctg_name,
center,
ref_seq,
" ".join(" ".join("%d" % x for x in innerlist) for innerlist in concat_tensor),
alt_info
)
tensor_can_fp.stdin.write(l)
tensor_can_fp.stdin.write("\n")
if alt_fn:
alt_info = ' '.join(
[' '.join([item[0], str(item[1])]) for item in list(all_alt_dict[center].items())])
alt_fp.write(
'\t'.join([ctg_name + ' ' + str(center), str(depth), alt_info, str(af_dict[center])]) + '\n')
del all_alt_dict[center], depth_dict[center], af_dict[center]
if args.gvcf and len(nonVariantCaller.current_block) != 0:
nonVariantCaller.write_to_gvcf_batch(nonVariantCaller.current_block, nonVariantCaller.cur_min_DP,
nonVariantCaller.cur_raw_gq)
if args.gvcf and empty_pileup_flag:
nonVariantCaller.write_empty_pileup(ctg_name,ctg_start,ctg_end)
if args.gvcf:
nonVariantCaller.vcf_writer.close()
samtools_mpileup_process.stdout.close()
samtools_mpileup_process.wait()
if tensor_can_output_path != "PIPE":
tensor_can_fp.stdin.close()
tensor_can_fp.wait()
tensor_can_fpo.close()
if alt_fn:
alt_fp.close()
| 17,746
|
def upilab6_1_9 () :
"""6.1.9. Exercice UpyLaB 6.6 - Parcours rouge
Écrire une fonction symetrise_amis qui reçoit un dictionnaire d d’amis où les clés sont des prénoms et les valeurs, des
ensembles de prénoms représentant les amis de chacun.
Cette fonction modifie le dictionnaire d de sorte que si une clé prenom1 contient prenom2 dans l’ensemble de ses amis,
l’inverse soit vrai aussi.
La fonction accepte un second paramètre englobe.
Si englobe est vrai, la fonction ajoutera les éléments nécessaires pour symétriser le dictionnaire d.
Sinon, la fonction enlèvera les éléments nécessaires pour symétriser d.
Exemple 1 : L’exécution du code suivant :
d = {'Thierry': {'Michelle', 'Bernadette'},
'Michelle': {'Thierry'},
'Bernadette': set()}
symetrise_amis(d, True)
print(d) doit afficher, à l’ordre près :
{'Thierry': {'Michelle', 'Bernadette'},
'Michelle' : {'Thierry'},
'Bernadette' : {'Thierry'}}
Exemple 2 :L’exécution du code suivant :
d = {'Thierry': {'Michelle', 'Bernadette'},
'Michelle': {'Thierry'},
'Bernadette': set()}
symetrise_amis(d, False)
print(d)
doit afficher, à l’ordre près :
{'Thierry': {'Michelle'},
'Michelle' : {'Thierry'},
'Bernadette' : set()}
Montre l'importance de faire une copie profonde si on souhaite modifier le type construit dans la fonction
"""
def symetrise_amis(dico, englobe) :
""" modifie le dictionnaire des amis pour le symétriser :
* soit en l'étandant : englobe = True ;
* soit en le contractant : englobe = False ;"""
dicCopy = {} # copie profonde
for p1 in dico :
dicCopy[p1] = set() # copie profonde, la valeur est nouvelle
# from copy import deepcopy dicCopy = deepcopy ( dico )
for p2 in dico[p1] :
dicCopy[p1].add(p2) # reconstitution d'un nouveau set
mm
for p1 in dicCopy :
#set + add - discard
for p2 in dicCopy[p1] :
if englobe : # add = ajoute un élément
if p1 not in dicCopy[p2] :
dico[p2].add(p1)
else : # discard = enlève un élément
if p1 not in dicCopy[p2]:
dico[p1].discard(p2)
"""dico = {} marche pas dans ce sens ??? à partir de là dico n'est pas modifié en externe
for p1 in dicCopy :
dico[p1] = set() # copie profonde, la valeur est nouvelle
for p2 in dicCopy[p1] :
dico[p1].add(p2) # reconstitution d'un nouveau set
return dicCopy"""
test = [
({'Thierry': {'Michelle', 'Bernadette'}, 'Michelle': {'Thierry'}, 'Bernadette': set()}, True ),
({'Thierry': {'Michelle', 'Bernadette'}, 'Michelle': {'Thierry'}, 'Bernadette': set()}, False )
]
extension = [ " la contraction ", " l'extension "]
reponse = [{'Thierry': {'Michelle', 'Bernadette'}, 'Michelle' : {'Thierry'}, 'Bernadette' : {'Thierry'} },
{'Thierry': {'Michelle'} , 'Michelle' : {'Thierry'}, 'Bernadette' : set() }
]
for t, b in test :
print("Pour dico initial = \n", t, extension[int(b)], " donne ")
dicCopy = symetrise_amis(t, b)
print(t, " devrait être\n", reponse[test.index((t,b))])
print(dicCopy)
print("Test réussi ? :", t == reponse[test.index((t,b))])
| 17,747
|
def PluginCompleter(unused_self, event_object):
"""Completer function that returns a list of available plugins."""
ret_list = []
if not IsLoaded():
return ret_list
if not '-h' in event_object.line:
ret_list.append('-h')
plugins_list = parsers_manager.ParsersManager.GetWindowsRegistryPlugins()
for plugin_cls in plugins_list.GetKeyPlugins(RegCache.hive_type):
plugins_list = plugin_cls(reg_cache=RegCache.reg_cache)
plugin_name = plugins_list.plugin_name
if plugin_name.startswith('winreg'):
plugin_name = plugin_name[7:]
if plugin_name == 'default':
continue
ret_list.append(plugin_name)
return ret_list
| 17,748
|
def analyze_violations(files, layers, flow_graph, _assert):
"""Analyze violations provided the flow graph"""
# Get layer sizes
sizes = get_layers_size(layers.keys(), layers)
# Back calls
back_calls = get_back_calls(flow_graph, layers)
if back_calls != []:
logging.warning('Back Calls')
for u, v in back_calls:
logging.warning(
'{} (Layer {}) -> {} (Layer {})'.format(u, layers[u], v, layers[v]))
# Calculate BCVI
count = count_calls(back_calls, layers)
bcvi = calculate_violation_index(count, sizes)
logging.warning('Average BCVI: {}'.format(bcvi))
else:
logging.info('No Back Calls Found')
# Skip calls
skip_calls = get_skip_calls(flow_graph, layers)
if skip_calls != []:
logging.warning('Skip Calls')
for u, v in skip_calls:
logging.warning(
'{} (Layer {}) -> {} (Layer {})'.format(u, layers[u], v, layers[v]))
# Calculate SCVI
count = count_calls(skip_calls, layers)
scvi = calculate_violation_index(count, sizes)
logging.warning('Average SCVI: {}'.format(scvi))
else:
logging.info('No Skip Calls Found')
# Raise errors to testing routine
if _assert:
assert(back_calls == [])
assert(skip_calls == [])
| 17,749
|
def train_model(item_user_data) -> []:
""""Returns trained model"""
model = implicit.als.AlternatingLeastSquares(factors=50)
model.fit(item_user_data)
return model
| 17,750
|
def refresh_blind_balances(wallet, balances, storeback=True):
""" Given a list of (supposedly) unspent balances, iterate over each one
and verify it's status on the blockchain. Each balance failing
this verification updates own status in the database (if storeback is True).
Returns a list of TRULY unspent balances.
"""
rpc = wallet.rpc
unspent = [ ]
for balance in balances:
result = rpc.get_blinded_balances([balance["commitment"]])
if len(result) == 0:
if storeback:
wallet.modifyBlindBalance(balance["commitment"], used=True)
else:
unspent.append(balance)
return unspent
| 17,751
|
def command_up(
stairlight: StairLight, args: argparse.Namespace
) -> Union[dict, "list[dict]"]:
"""Execute up command
Args:
stairlight (StairLight): Stairlight class
args (argparse.Namespace): CLI arguments
Returns:
Union[dict, list]: Upstairs results
"""
return search(
func=stairlight.up,
args=args,
tables=find_tables_to_search(stairlight=stairlight, args=args),
)
| 17,752
|
def hull_area(par, llhs, above_min=1):
"""Estimate projected area of llh minimum for single parameter
Parameters
----------
par : np.ndarray
the parameter values
llhs : np.ndarray
the llh values
Returns
-------
float
"""
min_llh = llhs.min()
try:
Hull = ConvexHull(np.stack([par, llhs]).T[llhs < min_llh+above_min])
return Hull.volume
except QhullError:
return np.inf
| 17,753
|
def send_images_via_email(email_subject, email_body, image_file_paths, sender_email="ozawamariajp@gmail.com", recipient_emails=["ozawamariajp@gmail.com"]):
"""
Send image via email
"""
# libraries to be imported
import os
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email import encoders
### Create instance of MIMEMultipart message
msg = MIMEMultipart()
# storing the senders email address
msg['From'] = sender_email
### Recipient emails
msg['To'] = ", ".join(recipient_emails)
### SUBJECT
msg['Subject'] = email_subject
### BODY TEXT
# string to store the body of the mail
body = email_body
# attach the body with the msg instance
msg.attach(MIMEText(body, 'plain'))
### ATTACH IMAGES
for i_image in range(len(image_file_paths)):
# open the image file to be sent
image_data = open(image_file_paths[i_image], "rb").read()
# instance of MIMEImage and named as image
image = MIMEImage(image_data, name=os.path.basename(image_file_paths[i_image]))
# attach the image instance 'image' to instance 'msg'
msg.attach(image)
# creates SMTP session
s = smtplib.SMTP('smtp.gmail.com', 587)
# start TLS for security
s.starttls()
# Authentication
s.login(sender_email, "10179224")
# Converts the Multipart msg into a string
text = msg.as_string()
# sending the mail
s.sendmail(sender_email, recipient_emails, text)
# terminating the session
s.quit()
| 17,754
|
def assemble_chain(leaf, store):
"""Assemble the trust chain.
This assembly method uses the certificates subject and issuer common name and
should be used for informational purposes only. It does *not*
cryptographically verify the chain!
:param OpenSSL.crypto.X509 leaf: The leaf certificate from which to build the
chain.
:param list[OpenSSL.crypto.X509] store: A list of certificates to use to
resolve the chain.
:return: The trust chain.
:rtype: list[OpenSSL.crypto.X509]
"""
store_dict = {}
for cert in store:
store_dict[cert.get_subject().CN] = cert
chain = [leaf]
current = leaf
try:
while current.get_issuer().CN != current.get_subject().CN:
chain.append(store_dict[current.get_issuer().CN])
current = store_dict[current.get_issuer().CN]
except KeyError:
invalid = crypto.X509()
patch_certificate(invalid)
invalid.set_subject(current.get_issuer())
chain.append(invalid)
chain.reverse()
return chain
| 17,755
|
def spin(node: 'Node', executor: 'Executor' = None) -> None:
"""
Execute work and block until the context associated with the executor is shutdown.
Callbacks will be executed by the provided executor.
This function blocks.
:param node: A node to add to the executor to check for work.
:param executor: The executor to use, or the global executor if ``None``.
"""
executor = get_global_executor() if executor is None else executor
try:
executor.add_node(node)
while executor.context.ok():
executor.spin_once()
finally:
executor.remove_node(node)
| 17,756
|
def test_load_facts_mismatch_version(tmpdir):
"""Test load facts when loaded nodes have different format versions."""
version1 = "version1"
node1 = {"node1": "foo"}
version2 = "version2"
node2 = {"node2": "foo"}
tmpdir.join("node1.yml").write(_encapsulate_nodes_facts(node1, version1))
tmpdir.join("node2.yml").write(_encapsulate_nodes_facts(node2, version2))
with pytest.raises(ValueError) as e:
load_facts(str(tmpdir))
assert "Input file version mismatch" in str(e.value)
| 17,757
|
def hcp_mg_relax_cell() -> tuple:
"""
HCP Mg relax cell, wyckoff='c'.
"""
aiida_twinpy_dir = os.path.dirname(
os.path.dirname(aiida_twinpy.__file__))
filename = os.path.join(aiida_twinpy_dir,
'tests',
'data',
'HCP_Mg_relax.poscar')
pos = Poscar.from_file(filename)
cell = get_cell_from_pymatgen_structure(pos.structure)
return cell
| 17,758
|
def dilate( data, iterations=1, structure=None ):
"""Dilate a binary ND array by a number of iterations."""
# Convert to binary, just in case.
mask = binarise(data).astype(int)
if not structure:
structure = ndimage.generate_binary_structure(3,1)
# Check we have positive iterations - no header available here to convert from mm.
iterations = np.abs(iterations)
# Slightly awkward as I'm not certain iterations == voxels
print (" Dilating {0} iterations ...".format(iterations))
if iterations > 0:
dilated_mask = ndimage.binary_dilation( mask, structure, iterations )
return dilated_mask
# End of dilate() definition
| 17,759
|
def text_from_pdf(file_name : str) -> str:
"""
Extract text from PDF file
==========================
Parameters
----------
file_name : str
Name of the file to extract text from.
Returns
-------
str
The extracted text.
"""
from PyPDF4 import PdfFileReader
text = ''
with open(file_name, 'rb') as instream:
reader = PdfFileReader(instream)
for i in range(reader.numPages):
text += '{}\n'.format(reader.getPage(i).extractText())
return text
| 17,760
|
def read_section(section, fname):
"""Read the specified section of an .ini file."""
conf = configparser.ConfigParser()
conf.read(fname)
val = {}
try:
val = dict((v, k) for v, k in conf.items(section))
return val
except configparser.NoSectionError:
return None
| 17,761
|
def _onnx_export(
model: nn.Module,
path_to_save: str,
):
"""
Export PyTorch model to ONNX.
"""
model.cpu()
model.eval()
# hardcoded [batch_size, seq_len] = [1, 1] export
tokens = torch.tensor([[0]], dtype=torch.long)
lengths = torch.tensor([1], dtype=torch.long)
with torch.no_grad():
torch.onnx.export(
model=model,
args=(tokens, lengths),
f=path_to_save,
export_params=True,
opset_version=12, # hardcoded
do_constant_folding=True, # hardcoded
input_names=["tokens", "lengths"],
output_names=["output"],
dynamic_axes={
"tokens": {0: "batch_size", 1: "seq_len"},
"lengths": {0: "batch_size"},
"output": {0: "batch_size", 1: "seq_len"},
},
)
| 17,762
|
async def test(ctx):
"""A test for dummies"""
member = ctx.message.author
# for row in bot.servers:
# print(row)
guilds = []
[guilds.append(x) for x in bot.guilds]
guild = guild[0]
role = discord.utils.get(ctx.message.guild.roles, name='dumb')
await bot.add_roles(member, role)
| 17,763
|
def calculate_gene_coverage_fragments( annot, frags ):
"""
Iterates through the fragments aligned to a reference molecule and tracks the
overlaps of each fragment with the genes that are annotated on that reference
"""
## this can be reduced to a sliding analysis window if this performs unreasonably
for frag in frags:
for gene in annot:
## if the gene fmin falls within range of this fragment, it has at least partial coverage
if gene['fmin'] >= frag['rfmin'] and gene['fmin'] <= frag['rfmax']:
## if the gene fmax also falls within range, the gene is fully covered
if gene['fmax'] <= frag['rfmax']:
gene['frags'].append( [gene['fmin'], gene['fmax']] )
else:
gene['frags'].append( [gene['fmin'], frag['rfmax']] )
## also check for fmax-only coverage of the gene
elif gene['fmax'] >= frag['rfmin'] and gene['fmax'] <= frag['rfmax']:
gene['frags'].append( [frag['rfmin'], gene['fmax']] )
| 17,764
|
def valid_tmpl(s: str):
"""check if s is valid template name"""
pattern = re.compile(r"TMPL_[A-Z0-9_]+")
if pattern.fullmatch(s) is None:
raise TealInputError("{} is not a valid template variable".format(s))
| 17,765
|
def freqz(
b, a=1, worN=512, whole=False, fs=2 * np.pi, log=False, include_nyquist=False
):
"""Compute the frequency response of a digital filter."""
h = None
lastpoint = 2 * np.pi if whole else np.pi
if log:
w = np.logspace(0, lastpoint, worN, endpoint=include_nyquist and not whole)
else:
w = np.linspace(0, lastpoint, worN, endpoint=include_nyquist and not whole)
w = torch.tensor(w, device=b.device)
if a.size() == 1:
n_fft = worN if whole else worN * 2
h = torch.fft.rfft(b, n=n_fft)[:worN]
h /= a
if h is None:
zm1 = torch.exp(-1j * w)
h = polyval(b, zm1) / (polyval(a, zm1) + 1e-16)
# need to catch NaNs here
w = w * fs / (2 * np.pi)
return w, h
| 17,766
|
def _helper_fit_partition(self, pnum, endog, exog, fit_kwds,
init_kwds_e={}):
"""handles the model fitting for each machine. NOTE: this
is primarily handled outside of DistributedModel because
joblib cannot handle class methods.
Parameters
----------
self : DistributedModel class instance
An instance of DistributedModel.
pnum : scalar
index of current partition.
endog : array_like
endogenous data for current partition.
exog : array_like
exogenous data for current partition.
fit_kwds : dict-like
Keywords needed for the model fitting.
init_kwds_e : dict-like
Additional init_kwds to add for each partition.
Returns
-------
estimation_method result. For the default,
_est_regularized_debiased, a tuple.
"""
temp_init_kwds = self.init_kwds.copy()
temp_init_kwds.update(init_kwds_e)
model = self.model_class(endog, exog, **temp_init_kwds)
results = self.estimation_method(model, pnum, self.partitions,
fit_kwds=fit_kwds,
**self.estimation_kwds)
return results
| 17,767
|
def test_fox_wl() -> None:
""" Test on fox """
run_test(['-wl', FOX], './tests/expected/fox.txt.wl.out')
| 17,768
|
def generate_2d_scatter(data, variables, class_data=None, class_names=None,
nrows=None, ncols=None, sharex=False, sharey=False,
show_legend=True, xy_line=False, trendline=False,
cmap_class=None, shorten_variables=False,
**kwargs):
"""Generate 2D scatter plots from the given data and variables.
This method will generate 2D scatter plots for all combinations
of the given variables.
Parameters
----------
data : object like :class:`pandas.core.frame.DataFrame`
The data we will plot here.
variables : list of strings
The variables we will generate scatter plots for.
class_data : object like :class:`pandas.core.series.Series`, optional
Class information for the points (if available).
class_names : dict of strings, optional
A mapping from the class data to labels/names.
nrows : integer, optional
The number of rows to use in a figure.
ncols : integer, optional
The number of columns to use in a figure.
sharex : boolean, optional
If True, the scatter plots will share the x-axis.
sharey : boolean, optional
If True, the scatter plots will share the y-axis.
show_legend : boolean, optional
If True, we will create a legend here and show it.
xy_line : boolean, optional
If True, we will add a x=y line to the plot.
trendline : boolean, optional
If True, we will add a trend line to the plot.
cmap_class : string or object like :class:`matplotlib.colors.Colormap`, optional
A color map to use for classes.
kwargs : dict, optional
Additional arguments used for the plotting.
Returns
-------
figures : list of objects like :class:`matplotlib.figure.Figure`
The figures containing the plots.
axes : list of objects like :class:`matplotlib.axes.Axes`
The axes containing the plots.
"""
nplots = comb(len(variables), 2, exact=True)
figures, axes = create_fig_and_axes(
nplots, nrows=nrows, ncols=ncols, sharex=sharex, sharey=sharey,
**kwargs,
)
fig = None
for i, (xvar, yvar) in enumerate(combinations(variables, 2)):
# We do not want to repeat the legend in all subplots:
show_legend_ax = False
if axes[i].figure != fig:
fig = axes[i].figure
show_legend_ax = True
xlabel = None
ylabel = None
if shorten_variables:
if len(xvar) > 5:
xlabel = xvar[:3] + '...'
if len(yvar) > 5:
ylabel = yvar[:3] + '...'
_, _, patches, labels = plot_scatter(
data,
xvar,
yvar,
axi=axes[i],
xlabel=xlabel,
ylabel=ylabel,
class_data=class_data,
class_names=class_names,
cmap_class=cmap_class,
**kwargs,
)
if xy_line:
line_xy = add_xy_line(axes[i], alpha=0.7, color='black')
patches.append(line_xy)
labels.append('x = y')
if trendline:
line_trend = add_trendline(axes[i], data[xvar], data[yvar],
alpha=0.7, ls='--', color='black')
patches.append(line_trend)
labels.append('y = a + bx')
if show_legend and show_legend_ax and patches and labels:
axes[i].legend(patches, labels)
return figures, axes
| 17,769
|
def getOptions(options):
"""translate command line options to PAML options."""
codeml_options = {}
if options.analysis == "branch-specific-kaks":
codeml_options["seqtype"] = "1"
codeml_options["model"] = "1"
elif options.analysis == "branch-fixed-kaks":
codeml_options["seqtype"] = "1"
codeml_options["model"] = "0"
elif options.analysis == "branch-all-but-one-fixed-kaks":
codeml_options["seqtype"] = "1"
codeml_options["model"] = "2"
if not tree:
raise ValueError("please supply a tree for this mode.")
if not options.filename_output_tree:
raise ValueError(
"please speficy filename-output-tree as location "
"(relative to this script) for trees.")
elif options.analysis == "site-specific-kaks":
codeml_options["ncatG"] = "10"
codeml_options["getSE"] = "1"
codeml_options["seqtype"] = "1"
codeml_options["NSsites"] = "0 3 1 2 7 8"
codeml_options["model"] = "0"
codeml_options["CodonFreq"] = "2"
elif options.analysis == "pairwise":
codeml_options["seqtype"] = "1"
codeml_options["model"] = "0"
codeml_options["runmode"] = "-2"
if options.multiple_genes:
codeml_options["Malpha"] = "0"
codeml_options["Mgene"] = "0"
if options.omega is not None:
codeml_options["omega"] = str(options.omega)
if options.estimate_ancestors:
codeml_options["RateAncestor"] = "1"
if options.codon_frequencies is not None:
c = options.codon_frequencies.upper()
if c in ("UNIFORM", "FEQUAL"):
a = "0"
elif c == "F1X4":
a = "1"
elif c == "F3X4":
a = "2"
elif c == "F61":
a = "3"
else:
a = options.codon_frequencies
codeml_options["CodonFreq"] = a
if options.method is not None:
codeml_options["method"] = str(options.method)
if options.optimization_threshold is not None:
codeml_options["Small_Diff"] = str(options.optimization_threshold)
if options.clean_data:
codeml_options["cleandata"] = options.clean_data
return codeml_options
| 17,770
|
def deserialize_block_to_json(block: Block) -> str:
"""Deserialize Block object to JSON string
Parameters
----------
block : Block
Block object
Returns
-------
str
JSON string
"""
try:
if block:
return json.dumps(
{
"blockId": block.id,
"blockNumber": block.number,
"timestamp": block.timestamp,
"producer": block.producer,
"unfilteredTransactionCount": block.unfilteredTransactionCount,
"unfilteredTransactionTraceCount": block.unfilteredTransactionTraceCount,
"unfilteredExecutedInputActionCount": block.unfilteredExecutedInputActionCount,
"unfilteredExecutedTotalActionCount": block.unfilteredExecutedTotalActionCount,
"filteringIncludeFilterExpr": block.filteringIncludeFilterExpr,
"filteredTransactionTraceCount": block.filteredTransactionTraceCount,
"filteredExecutedInputActionCount": block.filteredExecutedInputActionCount,
"filteredExecutedTotalActionCount": block.filteredExecutedTotalActionCount,
"filteredTransactionCount": block.filteredTransactionCount,
},
sort_keys=True,
)
else:
raise ValueError("None block made it through")
except ValueError as e:
logger.exception(
BlocktraceLog(
__name__,
"catching_exception",
{"hint": "Check debug logs to see why None Block made it through"},
)
)
return ""
| 17,771
|
def get_invocations(benchmark: Benchmark):
"""
Returns a list of invocations that invoke the tool for the given benchmark.
It can be assumed that the current directory is the directory from which execute_invocations.py is executed.
For QCOMP 2020, this should return a list of invocations for all tracks in which the tool can take part. For each track an invocation with default settings has to be provided and in addition, an optimized setting (e.g., the fastest engine and/or solution technique for this benchmark) can be specified. Only information about the model type, the property type and the state space size are allowed to be used to tweak the parameters.
If this benchmark is not supported, an empty list has to be returned.
"""
if not is_benchmark_supported(benchmark):
return []
prec = dict()
prec["epsilon-correct"] = "0.000001"
prec["probably-epsilon-correct"] = "0.05"
prec["often-epsilon-correct"] = "0.001"
prec["often-epsilon-correct-10-min"] = "0.001"
result = []
for track in prec.keys():
benchmark_settings = "./pet.sh reachability --precision {} --relative-error --only-result -m {} -p {} --property {}".format(
prec[track],
benchmark.get_prism_program_filename(),
benchmark.get_prism_property_filename(),
benchmark.get_property_name(),
)
if benchmark.get_open_parameter_def_string() != "":
benchmark_settings += " --const {}".format(
benchmark.get_open_parameter_def_string()
)
if (
"haddad" in benchmark.get_prism_program_filename()
or "gathering" in benchmark.get_prism_program_filename()
):
benchmark_settings = "./fix-syntax " + benchmark_settings
# default settings PET eps-corr
default_inv = Invocation()
default_inv.identifier = "default"
default_inv.note = "Default settings."
default_inv.track_id = track
default_inv.add_command(benchmark_settings)
result += [default_inv]
if (
track == "epsilon-correct"
or benchmark.get_model_type() == "ctmc"
or "haddad" in benchmark.get_prism_program_filename()
or "csma" in benchmark.get_prism_program_filename()
or "wlan" in benchmark.get_prism_program_filename()
or "gathering" in benchmark.get_prism_program_filename()
):
# smc is prob eps correct, cannot handle ctmc and haddad monmege cannot be parsed by it
continue
if benchmark.get_num_states_tweak() is None:
# need this info
continue
smc_settings = "./smc.sh {} {} -prop {} -heuristic RTDP_ADJ -RTDP_ADJ_OPTS 1 -colourParams S:{},Av:10,e:{},d:0.05,p:0.05,post:64".format(
benchmark.get_prism_program_filename(),
benchmark.get_prism_property_filename(),
benchmark.get_property_name(),
benchmark.get_num_states_tweak(),
prec[track],
)
if benchmark.get_open_parameter_def_string() != "":
smc_settings += " -const {}".format(
benchmark.get_open_parameter_def_string()
)
if (
"haddad" in benchmark.get_prism_program_filename()
or "gathering" in benchmark.get_prism_program_filename()
):
smc_settings = "./fix-syntax " + smc_settings
# SMC invocations
SMC_inv = Invocation()
SMC_inv.identifier = "specific"
SMC_inv.note = "Statistical model checking with limited information (no transition probabilities)"
SMC_inv.track_id = track
SMC_inv.add_command(smc_settings)
result += [SMC_inv]
return result
| 17,772
|
def see(node: "Position", move: Move = None) -> float:
"""Static-Exchange-Evaluation
Args:
node: The current position to see
move (Move, optional): The capture move to play. Defaults to None.
Returns:
float: The score associated with this capture. Positive is good.
"""
c = node.state.turn
bitboards = node.boards
if move is None:
return 0
if not move.is_capture:
return 0
i = 0
gain = [0] * 32
target = bitboards.piece_at(move._to)
if target is None:
return 0
occ = bitboards.occupancy
from_bb = Bitboard(1 << move._from)
attack_defend_bb = bitboards.attack_defend_to(move._to, c)
xrays = bitboards.xrays_bb
gain[i] = PIECE_VALUES[target._type]
assert target is not None
pt = (bitboards.piece_at(move._from))._type
while True:
i += 1
gain[i] = PIECE_VALUES[pt] - gain[i-1]
if max(-gain[i-1], gain[i]) < 0:
break
attack_defend_bb ^= from_bb
occ ^= from_bb
from_bb, pt = least_valuable_attacker(~c, bitboards, attack_defend_bb)
if not from_bb:
break
i -= 1
while i:
gain[i-1] = -max(-gain[i-1], gain[i])
i -= 1
return gain[0]
| 17,773
|
def new(option):
"""
Create a new message queue object; options must contain the type of
queue (which is the name of the child class), see above.
"""
options = option.copy()
qtype = options.pop("type", "DQS")
try:
__import__("messaging.queue.%s" % (qtype.lower()))
except SyntaxError:
raise SyntaxError("error importing dirq type: %s" % qtype)
except ImportError:
raise ImportError(
"you must install %s dependencies before using this module" %
(qtype, ))
try:
module = sys.modules["messaging.queue.%s" % (qtype.lower())]
return getattr(module, qtype)(**options)
except KeyError:
pass
raise ValueError("queue type not valid: %s" % qtype)
| 17,774
|
def rk4(f, t0, y0, h, N):
""""Solve IVP given by y' = f(t, y), y(t_0) = y_0 with step size h > 0, for N steps,
using the Runge-Kutta 4 method.
Also works if y is an n-vector and f is a vector-valued function."""
t = t0 + np.array([i * h for i in range(N+1)])
m = len(y0)
y = np.zeros((N+1, m))
y[0] = y0
# Repeatedly approximate next value.
for n in range(N):
k1 = f(t[n], y[n])
k2 = f(t[n] + h/2, y[n] + k1 * h/2)
k3 = f(t[n] + h/2, y[n] + k2 * h/2)
k4 = f(t[n] + h, y[n] + k3 * h)
y[n+1] = y[n] + h * (k1 + 2 * k2 + 2 * k3 + k4) / 6
return t, y
| 17,775
|
def main():
"""
Example entry point; please see Enumeration example for more in-depth
comments on preparing and cleaning up the system.
:return: True if successful, False otherwise.
:rtype: bool
"""
# Since this application saves images in the current folder
# we must ensure that we have permission to write to this folder.
# If we do not have permission, fail right away.
try:
test_file = open('test.txt', 'w+')
except IOError:
print('Unable to write to current directory. Please check permissions.')
input('Press Enter to exit...')
return False
test_file.close()
os.remove(test_file.name)
result = True
# Retrieve singleton reference to system object
system = PySpin.System.GetInstance()
# Get current library version
version = system.GetLibraryVersion()
print('Library version: {}.{}.{}.{}\n'.format(version.major, version.minor, version.type, version.build))
# Retrieve list of cameras from the system
cam_list = system.GetCameras()
num_cameras = cam_list.GetSize()
print('Number of cameras detected: {}\n'.format(num_cameras))
# Finish if there are no cameras
if num_cameras == 0:
# Clear camera list before releasing system
cam_list.Clear()
# Release system instance
system.ReleaseInstance()
print('Not enough cameras!')
input('Done! Press Enter to exit...')
return False
# Run example on each camera
for i, cam in enumerate(cam_list):
print('Running example for camera {}...\n'.format(i))
result &= run_single_camera(cam)
print('Camera {} example complete...\n'.format(i))
# Release reference to camera
del cam
# Clear camera list before releasing system
cam_list.Clear()
# Release system instance
system.ReleaseInstance()
input('Done! Press Enter to exit...')
return result
| 17,776
|
def parse_predictions(est_data, gt_data, config_dict):
""" Parse predictions to OBB parameters and suppress overlapping boxes
Args:
est_data, gt_data: dict
{point_clouds, center, heading_scores, heading_residuals,
size_scores, size_residuals, sem_cls_scores}
config_dict: dict
{dataset_config, remove_empty_box, use_3d_nms, nms_iou,
use_old_type_nms, conf_thresh, per_class_proposal}
Returns:
batch_pred_map_cls: a list of len == batch size (BS)
[pred_list_i], i = 0, 1, ..., BS-1
where pred_list_i = [(pred_sem_cls, box_params, box_score)_j]
where j = 0, ..., num of valid detections - 1 from sample input i
"""
eval_dict = {}
pred_center = est_data['center'] # B,num_proposal,3
pred_heading_class = torch.argmax(est_data['heading_scores'], -1) # B,num_proposal
heading_residuals = est_data['heading_residuals_normalized'] * (
np.pi / config_dict['dataset_config'].num_heading_bin) # Bxnum_proposalxnum_heading_bin
pred_heading_residual = torch.gather(heading_residuals, 2,
pred_heading_class.unsqueeze(-1)) # B,num_proposal,1
pred_heading_residual.squeeze_(2)
pred_size_class = torch.argmax(est_data['size_scores'], -1) # B,num_proposal
size_residuals = est_data['size_residuals_normalized'] * torch.from_numpy(
config_dict['dataset_config'].mean_size_arr.astype(np.float32)).cuda().unsqueeze(0).unsqueeze(0)
pred_size_residual = torch.gather(size_residuals, 2,
pred_size_class.unsqueeze(-1).unsqueeze(-1).repeat(1, 1, 1,
3)) # B,num_proposal,1,3
pred_size_residual.squeeze_(2)
pred_sem_cls = torch.argmax(est_data['sem_cls_scores'], -1) # B,num_proposal
sem_cls_probs = softmax(est_data['sem_cls_scores'].detach().cpu().numpy()) # B,num_proposal,10
pred_sem_cls_prob = np.max(sem_cls_probs, -1) # B,num_proposal
num_proposal = pred_center.shape[1]
# Since we operate in upright_depth coord for points, while util functions
# assume upright_camera coord.
bsize = pred_center.shape[0]
pred_corners_3d_upright_camera = np.zeros((bsize, num_proposal, 8, 3))
pred_center_upright_camera = flip_axis_to_camera(pred_center.detach().cpu().numpy())
for i in range(bsize):
for j in range(num_proposal):
heading_angle = config_dict['dataset_config'].class2angle( \
pred_heading_class[i, j].detach().cpu().numpy(), pred_heading_residual[i, j].detach().cpu().numpy())
box_size = config_dict['dataset_config'].class2size( \
int(pred_size_class[i, j].detach().cpu().numpy()), pred_size_residual[i, j].detach().cpu().numpy())
corners_3d_upright_camera = get_3d_box(box_size, -heading_angle, pred_center_upright_camera[i, j, :])
pred_corners_3d_upright_camera[i, j] = corners_3d_upright_camera
K = pred_center.shape[1] # K==num_proposal
nonempty_box_mask = np.ones((bsize, K))
if config_dict['remove_empty_box']:
# -------------------------------------
# Remove predicted boxes without any point within them..
batch_pc = gt_data['point_clouds'].cpu().numpy()[:, :, 0:3] # B,N,3
for i in range(bsize):
pc = batch_pc[i, :, :] # (N,3)
for j in range(K):
box3d = pred_corners_3d_upright_camera[i, j, :, :] # (8,3)
box3d = flip_axis_to_depth(box3d)
pc_in_box, inds = extract_pc_in_box3d(pc, box3d)
if len(pc_in_box) < 5:
nonempty_box_mask[i, j] = 0
# -------------------------------------
obj_logits = est_data['objectness_scores'].detach().cpu().numpy()
obj_prob = softmax(obj_logits)[:, :, 1] # (B,K)
if not config_dict['use_3d_nms']:
# ---------- NMS input: pred_with_prob in (B,K,7) -----------
pred_mask = np.zeros((bsize, K), dtype=np.uint8)
for i in range(bsize):
boxes_2d_with_prob = np.zeros((K, 5))
for j in range(K):
boxes_2d_with_prob[j, 0] = np.min(pred_corners_3d_upright_camera[i, j, :, 0])
boxes_2d_with_prob[j, 2] = np.max(pred_corners_3d_upright_camera[i, j, :, 0])
boxes_2d_with_prob[j, 1] = np.min(pred_corners_3d_upright_camera[i, j, :, 2])
boxes_2d_with_prob[j, 3] = np.max(pred_corners_3d_upright_camera[i, j, :, 2])
boxes_2d_with_prob[j, 4] = obj_prob[i, j]
nonempty_box_inds = np.where(nonempty_box_mask[i, :] == 1)[0]
pick = nms_2d_faster(boxes_2d_with_prob[nonempty_box_mask[i, :] == 1, :],
config_dict['nms_iou'], config_dict['use_old_type_nms'])
assert (len(pick) > 0)
pred_mask[i, nonempty_box_inds[pick]] = 1
eval_dict['pred_mask'] = pred_mask
# ---------- NMS output: pred_mask in (B,K) -----------
elif config_dict['use_3d_nms'] and (not config_dict['cls_nms']):
# ---------- NMS input: pred_with_prob in (B,K,7) -----------
pred_mask = np.zeros((bsize, K), dtype=np.uint8)
for i in range(bsize):
boxes_3d_with_prob = np.zeros((K, 7))
for j in range(K):
boxes_3d_with_prob[j, 0] = np.min(pred_corners_3d_upright_camera[i, j, :, 0])
boxes_3d_with_prob[j, 1] = np.min(pred_corners_3d_upright_camera[i, j, :, 1])
boxes_3d_with_prob[j, 2] = np.min(pred_corners_3d_upright_camera[i, j, :, 2])
boxes_3d_with_prob[j, 3] = np.max(pred_corners_3d_upright_camera[i, j, :, 0])
boxes_3d_with_prob[j, 4] = np.max(pred_corners_3d_upright_camera[i, j, :, 1])
boxes_3d_with_prob[j, 5] = np.max(pred_corners_3d_upright_camera[i, j, :, 2])
boxes_3d_with_prob[j, 6] = obj_prob[i, j]
nonempty_box_inds = np.where(nonempty_box_mask[i, :] == 1)[0]
pick = nms_3d_faster(boxes_3d_with_prob[nonempty_box_mask[i, :] == 1, :],
config_dict['nms_iou'], config_dict['use_old_type_nms'])
assert (len(pick) > 0)
pred_mask[i, nonempty_box_inds[pick]] = 1
eval_dict['pred_mask'] = pred_mask
# ---------- NMS output: pred_mask in (B,K) -----------
elif config_dict['use_3d_nms'] and config_dict['cls_nms']:
# ---------- NMS input: pred_with_prob in (B,K,8) -----------
pred_mask = np.zeros((bsize, K), dtype=np.uint8)
for i in range(bsize):
boxes_3d_with_prob = np.zeros((K, 8))
for j in range(K):
boxes_3d_with_prob[j, 0] = np.min(pred_corners_3d_upright_camera[i, j, :, 0])
boxes_3d_with_prob[j, 1] = np.min(pred_corners_3d_upright_camera[i, j, :, 1])
boxes_3d_with_prob[j, 2] = np.min(pred_corners_3d_upright_camera[i, j, :, 2])
boxes_3d_with_prob[j, 3] = np.max(pred_corners_3d_upright_camera[i, j, :, 0])
boxes_3d_with_prob[j, 4] = np.max(pred_corners_3d_upright_camera[i, j, :, 1])
boxes_3d_with_prob[j, 5] = np.max(pred_corners_3d_upright_camera[i, j, :, 2])
boxes_3d_with_prob[j, 6] = obj_prob[i, j]
boxes_3d_with_prob[j, 7] = pred_sem_cls[i, j] # only suppress if the two boxes are of the same class!!
nonempty_box_inds = np.where(nonempty_box_mask[i, :] == 1)[0]
pick = nms_3d_faster_samecls(boxes_3d_with_prob[nonempty_box_mask[i, :] == 1, :],
config_dict['nms_iou'], config_dict['use_old_type_nms'])
assert (len(pick) > 0)
pred_mask[i, nonempty_box_inds[pick]] = 1
eval_dict['pred_mask'] = pred_mask
# ---------- NMS output: pred_mask in (B,K) -----------
return eval_dict, {'pred_corners_3d_upright_camera': pred_corners_3d_upright_camera,
'sem_cls_probs': sem_cls_probs,
'obj_prob': obj_prob,
'pred_sem_cls': pred_sem_cls}
| 17,777
|
def benchmark_op(op, burn_iters: int = 2, min_iters: int = 10):
"""Final endpoint for all kb.benchmarks functions."""
assert not tf.executing_eagerly()
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
bm = tf.test.Benchmark()
result = bm.run_op_benchmark(
sess, op, burn_iters=burn_iters, min_iters=min_iters
)
summarize(result)
return result
| 17,778
|
def main():
""""The main function, creates everything and starts the polling loop."""
if len(sys.argv) < 2:
print("No config file specified on the command line! Usage: "
"python3 sensorReporter.py [config].ini")
sys.exit(1)
config_file = sys.argv[1]
global poll_mgr
poll_mgr = create_poll_manager(config_file)
# Register functions to handle signals
register_sig_handlers(config_file, poll_mgr)
# Starting polling loop
poll_mgr.start()
| 17,779
|
def run_truncated_sprt(list_alpha, list_beta, logits_concat, labels_concat, verbose=False):
""" Calculate confusion matrix, mean hitting time, and truncate rate of a batch.
Args:
list_alpha: A list of floats.
list_beta: A list of floats with the same length as list_alpha's.
logits_concat: A logit Tensor with shape (batch, (duration - order_sprt), order_sprt + 1, 2). This is the output of datasets.data_processing.sequential_concat(logit_slice, labels_slice)
labels_concat: A binary label Tensor with shape (batch size,) with label = 0 or 1. This is the output of datasets.data_processing.sequential_concat(logit_slice, labels_slice).
Returns:
dict_confmx_sprt: A dictionary with keys like "thresh=0.2342,-0.2342". Value is a confusion matrix Tensor.
dict_mean_hittimes: A dictionary with keys like "thresh=0.2342,-0.2342". Value is a mean hitting time.
dict_truncate_rates: A dictionary with keys like "thresh=0.2342,-0.2342". Value is an truncate rate.
"""
dict_confmx_sprt = dict()
dict_mean_hittimes = dict()
dict_var_hittimes = dict()
dict_truncate_rates = dict()
batch_size_tmp = labels_concat.shape[0]
for alpha, beta in zip(list_alpha, list_beta):
# Calc thresholds
alpha = float(alpha)
beta = float(beta)
thresh = [np.log(beta/(1-alpha)), np.log((1-beta)/alpha)]
key = "thresh={:6.4f},{:7.4f}".format(thresh[0], thresh[1])
# Run truncated sprt
confmx, mean_hittime, var_hittime, truncate_rate = binary_truncated_sprt(logits_concat, labels_concat, alpha, beta)
dict_confmx_sprt[key] = confmx
dict_mean_hittimes[key] = mean_hittime
dict_var_hittimes[key] = var_hittime
dict_truncate_rates[key] = truncate_rate
if verbose:
print("====================================")
print("SPRT w/ alpha={}, beta={}".format(alpha, beta))
print("Thresholds = {}".format(thresh))
print("Confusion Matrix")
print(confmx)
print("Mean Hitting Time: {} +- {}".format(mean_hittime, tf.sqrt(var_hittime)))
print("truncate: {} / {} = {}".format(tf.round(truncate_rate*batch_size_tmp), batch_size_tmp, truncate_rate))
print("====================================")
return dict_confmx_sprt, dict_mean_hittimes, dict_var_hittimes, dict_truncate_rates
| 17,780
|
def get_users(metadata):
"""
Pull users, handles hidden user errors
Parameters:
metadata: sheet of metadata from mwclient
Returns:
the list of users
"""
users = []
for rev in metadata:
try:
users.append(rev["user"])
except (KeyError):
users.append(None)
return users
| 17,781
|
def solve(FLT_MIN, FLT_MAX):
"""Solving cos(x) <= -0.99, dx/dt=1, x(0) = 0
# Basic steps:
# 1. First compute the n terms for each ode
# 2. Next replace the guard with ode(t), so that it is only in t
# 3. Then compute the number of terms needed for g(t)
# 4. Finally, compute g(t) = 0 and g(t)-2g(0) = 0
# 5. Note that computing number of terms "n" in taylor essentially
# guarantees that tᵣ - t ≤ floating point error only, specified by the
# polynomial solver.
"""
# XXX: This is the theta
def test_multivariate():
# LTI is easy to solve
# Xdiff = S.sympify('(5*x(t) + 2*y(t) + 1)')
# Time varying, takes more time in general,
# with increasing power for t^n
# Xdiff = S.sympify('(5*x(t) + 2*y(t) + t**3)')
# Non linear with periodic functions
# Xdiff = S.sympify('sin(sqrt(x(t)+1))')
# import math
# FLT_MIN = 0
# FLT_MAX = 2*math.pi
# More complex ode
# Xdiff = S.sympify('sin(sin(x(t)+1))')
# The angles can only be between 0 and 2π
# import math
# FLT_MIN = -2*math.pi
# FLT_MAX = 2*math.pi
# A sqrt
# Xdiff = S.sympify('sqrt(x(t)+1)')
# The ones below need to have a reduced search space bound for
# continous variables.
# Another sqrt, does not seem to converge
# Xdiff = S.sympify('x(t)*t') # Does not work
# Now multiplication, seems to not coverge ever.
Xdiff = S.sympify('exp(2*x(t))') # Does not work either
# Using scaling factor, to reduce the bounds of the maximisation
# problem.
FLT_MIN = -1e1
FLT_MAX = 1e1
return FLT_MIN, FLT_MAX, Xdiff
FLT_MIN, FLT_MAX, tomaximize = test_multivariate()
xt = S.sympify('x(t)')
x = S.abc.x
yt = S.sympify('y(t)')
y = S.abc.y
# Coupled ode example
(tokens, nx) = getN({xt.diff(t): ([tomaximize],
{yt.diff(t): (xt,
# args always in
# same order for
# everyone
[x, y, t])},
# Always list all the replacements
{xt: x, yt: y},
[x, y, t])},
FLT_MIN=FLT_MIN, FLT_MAX=FLT_MAX, epsilon=1e-6)
# print(tokens)
print('required terms for θ satisfying Lipschitz constant:', nx)
# Now make the taylor polynomial
taylorxcoeffs = [5*S.pi/2, 1] + [0]*(nx-2)
# These are the smooth tokens
taylorxpoly = sum([t**i*v for i, v in zip(range(nx), taylorxcoeffs)])
# The theta' taylor polynomial
print('θ(t) = ', taylorxpoly)
# The guard function that needs the lipschitz constant
def guard():
gt = (S.cos(taylorxpoly)+0.99)
return gt.diff(t)
gt = S.sympify('g(t)')
tokens, n = getN({gt.diff(t): ([guard()], dict(), dict(), [t])})
# print(tokens)
print('Number of terms for cos(%s)+0.99: %s' % (taylorxpoly, n))
# Now we do the example of the ode with taylor polynomial
cosseries1 = S.fps(S.cos(taylorxpoly)+0.99, x0=0).polynomial(n=n)
print('Guard taylor polynomial:', cosseries1, '\n')
# print(S.simplify(cosseries1))
root = None
try:
root1 = S.nsolve(cosseries1, t, 0, dict=True)[0][t]
root = root1
except ValueError:
print('No root for g(t)=0')
# Now the second one, this one fails
# g(t) - 2*g(0) = 0
cosseries2 = S.fps(S.cos((5*S.pi/2) + t)-1.98, x0=0).polynomial(n=n)
# print(S.simplify(cosseries2))
try:
root2 = S.nsolve(cosseries2, t, 0, dict=True)[0][t]
root = min(root, root2)
except ValueError:
print('No root for g(t)-2*g(0) = 0')
print('guard Δt:', root)
| 17,782
|
def run_tvm_graph(
coreml_model, target, device, input_data, input_name, output_shape, output_dtype="float32"
):
"""Generic function to compile on relay and execute on tvm"""
if isinstance(input_data, list):
shape_dict = {}
dtype_dict = {}
for i, e in enumerate(input_name):
shape_dict[e] = input_data[i].shape
dtype_dict[e] = input_data[i].dtype
else:
shape_dict = {input_name: input_data.shape}
dtype_dict = {input_name: input_data.dtype}
mod, params = relay.frontend.from_coreml(coreml_model, shape_dict)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target, params=params)
from tvm.contrib import graph_executor
m = graph_executor.GraphModule(lib["default"](device))
# set inputs
if isinstance(input_data, list):
for i, e in enumerate(input_name):
m.set_input(e, tvm.nd.array(input_data[i].astype(input_data[i].dtype)))
else:
m.set_input(input_name, tvm.nd.array(input_data.astype(input_data.dtype)))
# execute
m.run()
# get outputs
if isinstance(output_shape, list) and isinstance(output_dtype, list):
tvm_output_list = []
for i, s in enumerate(output_shape):
tvm_output = m.get_output(i, tvm.nd.empty((s), output_dtype[i]))
tvm_output_list.append(tvm_output.numpy())
return tvm_output_list
else:
if not output_shape:
tvm_output = m.get_output(0)
else:
tvm_output = m.get_output(0, tvm.nd.empty((output_shape), output_dtype))
return tvm_output.numpy()
| 17,783
|
def detect_peaks(data, srate):
"""
obrain maximum and minimum values from blood pressure or pleth waveform
the minlist is always one less than the maxlist
"""
ret = []
if not isinstance(data, np.ndarray):
data = np.array(data)
raw_data = np.copy(data)
raw_srate = srate
# resampling rate to 100Hz
data = resample_hz(data, srate, 100)
srate = 100
# upper and lower bound of the heart rate (Hz = /sec)
# heart rate = hf * 60;
fh = 200 / 60 # 3.3
fl = 30 / 60 # 0.5
# estimate hr
y1 = band_pass(data, srate, 0.5 * fl, 3 * fh)
# Divide the entire x into four regions and use the median of these
# hf = []
# for(var i = 0; i < 4; i++) {
# var subw = new Wav(srate, y1.vals.copy(data.length / 4 * i, data.length / 4 * (i+1)));
# hf[i] = subw.estimate_heart_rate(fl, fh);
# if(hf[i] == 0) {
# console.log("HR estimation failed, assume 75");
# hf[i] = 75 / 60;
# }
# }
# hf = hf.median();
# Whole heart freq estimation
hf = estimate_heart_freq(y1, srate)
if hf == 0:
print("HR estimation failed, assume 75")
hf = 75 / 60
# band pass filter again with heart freq estimation
y2 = band_pass(data, srate, 0.5 * fl, 2.5 * hf)
d2 = np.diff(y2)
# detect peak in gradient
p2 = detect_maxima(d2, 90)
# detect real peak
y3 = band_pass(data, srate, 0.5 * fl, 10 * hf)
p3 = detect_maxima(y3, 60)
# find closest p3 that follows p2
p4 = []
last_p3 = 0
for idx_p2 in p2:
idx_p3 = 0
for idx_p3 in p3:
if idx_p3 > idx_p2:
break
if idx_p3 != 0:
if last_p3 != idx_p3:
p4.append(idx_p3)
last_p3 = idx_p3
# nearest neighbor and inter beat interval correction
# p: location of detected peaks
pc = []
# find all maxima before preprocessing
m = detect_maxima(data, 0)
m = np.array(m)
# correct peaks location error due to preprocessing
last = -1
for idx_p4 in p4:
cand = find_nearest(m, idx_p4)
if cand != last:
pc.append(cand)
last = cand
ht = 1 / hf # beat interval (sec)
# correct false negatives (FN)
# Make sure if there is rpeak not included in the PC.
i = -1
while i < len(pc):
if i < 0:
idx_from = 0
else:
idx_from = pc[i]
if i >= len(pc) - 1:
idx_to = len(data)-1
else:
idx_to = pc[i+1]
# find false negative and fill it
if idx_to - idx_from < 1.75 * ht * srate:
i += 1
continue
# It can not be within 0.2 of both sides
idx_from += 0.2 * ht * srate
idx_to -= 0.2 * ht * srate
# Find missing peak and add it
# find the maximum value from idx_from to idx_to
idx_max = -1
val_max = 0
for j in range(np.searchsorted(m, idx_from), len(m)):
idx_cand = m[j]
if idx_cand >= idx_to:
break
if idx_max == -1 or val_max < data[idx_cand]:
val_max = data[idx_cand]
idx_max = idx_cand
# There is no candidate to this FN. Overtake
if idx_max != -1: # add idx_max and restart trom there
pc.insert(i+1, idx_max)
i -= 1
i += 1
# correct false positives (FP)
i = 0
while i < len(pc) - 1:
idx1 = pc[i]
idx2 = pc[i+1]
if idx2 - idx1 < 0.75 * ht * srate: # false positive
idx_del = i + 1 # default: delete i+1
if 1 < i < len(pc) - 2:
# minimize heart rate variability
idx_prev = pc[i-1]
idx_next = pc[i+2]
# find center point distance
d1 = abs(idx_next + idx_prev - 2 * idx1)
d2 = abs(idx_next + idx_prev - 2 * idx2)
if d1 > d2:
idx_del = i
else:
idx_del = i+1
elif i == 0:
idx_del = i
elif i == len(pc) - 2:
idx_del = i+1
pc.pop(idx_del)
i -= 1
i += 1
# remove dupilcates
i = 0
for i in range(0, len(pc) - 1):
if pc[i] == pc[i+1]:
pc.pop(i)
i -= 1
i += 1
# find nearest peak in real data
# We downsample x to srate to get maxidxs. ex) 1000 Hz -> 100 Hz
# Therefore, the position found by maxidx may differ by raw_srate / srate.
maxlist = []
ratio = math.ceil(raw_srate / srate)
for maxidx in pc:
idx = int(maxidx * raw_srate / srate) # extimated idx -> not precise
maxlist.append(max_idx(raw_data, idx - ratio - 1, idx + ratio + 1))
# get the minlist from maxlist
minlist = []
for i in range(len(maxlist) - 1):
minlist.append(min_idx(raw_data, maxlist[i], maxlist[i+1]))
return [minlist, maxlist]
| 17,784
|
def unsubscribe_user( address ):
""" Unsubscribe user completely from the Mechanical Mooc - all sequences
"""
# remove from sequence group
signups = signup_model.get_all_user_signups(address)
for user_signup in signups:
sequence_list = sequence_model.sequence_list_name(user_signup['sequence'])
mailgun_api.remove_list_member(sequence_list, address)
# remove from small groups
groups = groups_model.get_member_groups(address)
for group in groups:
groups_model.remove_group_member(group['uri'], address)
groups_model.sync_group_with_mailgun(group['uri'])
# mark as unsubscribed in the signups
for user_signup in signups:
signup_model.delete_signup(address, user_signup['sequence'])
mailgun_api.delete_all_unsubscribes(address)
| 17,785
|
def add_entity_to_watchlist(client: Client, args) -> Tuple[str, Dict, Dict]:
"""Adds an entity to a watchlist.
Args:
client: Client object with request.
args: Usually demisto.args()
Returns:
Outputs.
"""
watchlist_name = args.get('watchlist_name')
entity_type = args.get('entity_type')
entity_name = args.get('entity_name')
expiry_days = args.get('expiry_days') if 'expiry_days' in args else '30'
response = client.add_entity_to_watchlist_request(watchlist_name, entity_type, entity_name, expiry_days)
if 'successfull' not in response:
raise Exception(f'Failed to add entity {entity_name} to the watchlist {watchlist_name}.\n'
f'Error from Securonix is: {response}.')
human_readable = f'Added successfully the entity {entity_name} to the watchlist {watchlist_name}.'
return human_readable, {}, response
| 17,786
|
def install():
"""install openssl locally."""
# clone repository
try:
clone_repository(openssl_repo_link, openssl_repo_path, openssl_name)
except LocationExists as exception:
pass
# remove local changes
try:
remove_local_changes(openssl_repo_path, openssl_name)
except NotAGitRepo as exception:
print_msg_titled(
"Error while updating {} repository".format(openssl_name), str(exception)
)
# update local repository
try:
update_repository(openssl_repo_path, openssl_name)
except NotAGitRepo as exception:
print_msg_titled(
"Error while updating {} repository".format(openssl_name), str(exception)
)
# Configure
returncode, stdout, stderr = Configure(
openssl_repo_path,
[
"--prefix={}".format(openssl_install_path),
"--openssldir={}".format(openssl_directory),
],
openssl_name,
)
if returncode != 0:
print_stdoutputs(
"[bold red]Error while Configuring {}[/]".format(openssl_name),
stdout,
stderr,
)
# make
returncode, stdout, stderr = make(openssl_repo_path, [], openssl_name)
if returncode != 0:
print_stdoutputs(
"[bold red]Error while compiling {}[/]".format(openssl_name), stdout, stderr
)
# make install
returncode, stdout, stderr = make_install(openssl_repo_path, [], openssl_name)
if returncode != 0:
print_stdoutputs(
"[bold red]Error while installing {}[/]".format(openssl_name),
stdout,
stderr,
)
console.print(
"[bold green]{} has been installed with success[/]".format(openssl_name)
)
| 17,787
|
def is_base255(channels):
"""check if a color is in base 01"""
if isinstance(channels, str):
return False
return all(_test_base255(channels).values())
| 17,788
|
def make_unrestricted_prediction(solution: SolverState) -> tuple[Role, ...]:
"""
Uses a list of true/false statements and possible role sets
to return a rushed list of predictions for all roles.
Does not restrict guesses to the possible sets.
"""
all_role_guesses, curr_role_counts = get_basic_guesses(solution)
solved = recurse_assign(solution, all_role_guesses, curr_role_counts, False)
switch_dict = get_switch_dict(solution)
final_guesses = tuple(solved[switch_dict[i]] for i in range(len(solved)))
if len(final_guesses) != const.NUM_ROLES:
raise RuntimeError("Could not find unrestricted assignment of roles.")
return final_guesses
| 17,789
|
def get_args():
"""Parse command-line arguments."""
parser = argparse.ArgumentParser(description="Expression aggregator")
parser.add_argument(
"-e", "--expressions", nargs="+", help="Expressions", required=True
)
parser.add_argument(
"-d", "--descriptors", nargs="+", help="Descriptors", required=True
)
parser.add_argument("-s", "--source", help="Source", required=True)
parser.add_argument(
"-t", "--expression-type", help="Expression type", required=True
)
parser.add_argument("-g", "--group-by", help="Group by", required=True)
parser.add_argument("-a", "--aggregator", help="Aggregator")
parser.add_argument("-b", "--box-plot-output", help="Box plot output file name")
parser.add_argument(
"-l", "--log-box-plot-output", help="Log box plot output file name"
)
parser.add_argument(
"-x", "--expressions-output", help="Expressions output file name"
)
return parser.parse_args()
| 17,790
|
def check(value, msg=""):
"""Check value for membership; raise ValueError if fails."""
if not value:
raise ValueError(f"ERROR {msg}: {value} should be true")
| 17,791
|
def initworker(logq, progq, ctrlq, stdin=None):
"""initializer that sets up logging and progress from sub procs """
logToQueue(logq)
progressToQueue(progq)
controlByQueue(ctrlq)
setproctitle(current_process().name)
signal.signal(signal.SIGINT, signal.SIG_IGN)
if hasattr(signal, 'SIGBREAK'):
signal.signal(signal.SIGBREAK, signal.SIG_IGN)
getLogger().debug("Worker " + current_process().name + " logging started")
if stdin is not None:
sys.stdin = os.fdopen(stdin, 'r')
print "Worker " + current_process().name + " opened stdin"
| 17,792
|
def computeFourteenMeVPoint(xs, E14='14.2 MeV', useCovariance=True, covariance=None):
"""
Compute the value of the cross section at 14.2 MeV.
If the covariance is provided, the uncertainty on the 14.2 MeV point will be computed.
:param xs: reference to the cross section
:param E14: the 14 MeV point to use (in case you want to override the default of 14.2 MeV)
:param useCovariance: use this to override covariance usage
:type useCovariance: bool
:param covariance: covariance to use when computing uncertainty on the spectral average.
If None (default: None), no uncertainty is computed.
:type covariance: covariance instance or None
:rtype: PQU
"""
return computeValueAtAPoint(xs, E14, useCovariance=useCovariance, covariance=covariance)
| 17,793
|
def conn_reshape_directed(da, net=False, sep='-', order=None, rm_missing=False,
fill_value=np.nan, to_dataframe=False,
inplace=False):
"""Reshape a raveled directed array of connectivity.
This function takes a DataArray of shape (n_pairs, n_directions) or
(n_pairs, n_times, n_direction) where n_pairs reflects pairs of roi
(e.g 'roi_1-roi_2') and n_direction usually contains bidirected 'x->y' and
'y->x'. At the end, this function reshape the input array so that rows
contains the sources and columns the targets leading to a non-symmetric
DataArray of shape (n_roi, n_roi, n_times). A typical use case for this
function would be after computing the covariance based granger causality.
Parameters
----------
da : xarray.DataArray
Xarray DataArray of shape (n_pairs, n_times, n_directions) where
actually the roi dimension contains the pairs (roi_1-roi_2, roi_1-roi_3
etc.). The dimension n_directions should contains the dimensions 'x->y'
and 'y->x'
sep : string | '-'
Separator used to separate the pairs of roi names.
order : list | None
List of roi names to reorder the output.
rm_missing : bool | False
When reordering the connectivity array, choose if you prefer to reindex
even if there's missing regions (rm_missing=False) or if missing
regions should be removed (rm_missing=True)
fill_value : float | np.nan
Value to use for filling missing pairs (e.g diagonal)
to_dataframe : bool | False
Dataframe conversion. Only possible if the da input does not contains
a time axis.
Returns
-------
da_out : xarray.DataArray
DataArray of shape (n_roi, n_roi, n_times)
See also
--------
conn_covgc
"""
assert isinstance(da, xr.DataArray)
if not inplace:
da = da.copy()
assert ('roi' in list(da.dims)) and ('direction' in list(da.dims))
if 'times' not in list(da.dims):
da = da.expand_dims("times")
# get sources, targets names and sorted full list
sources, targets, roi_tot = _untangle_roi(da, sep)
# transpose, reindex and reorder (if needed)
da_xy, da_yx = da.sel(direction='x->y'), da.sel(direction='y->x')
if net:
da = xr.concat((da_xy - da_yx, da_xy - da_yx), 'roi')
else:
da = xr.concat((da_xy, da_yx), 'roi')
da, order = _dataarray_unstack(da, sources, targets, roi_tot, fill_value,
order, rm_missing)
# dataframe conversion
if to_dataframe:
da = _dataframe_conversion(da, order)
return da
| 17,794
|
def cmd_appetite(manifest, extra_params, num_threads=1, delete_logs=False):
"""Run appetite with defined params
:param manifest: manifest to reference
:param extra_params: extra params if needed
:param num_threads: Number of threads to use
:param delete_logs: Delete logs before running
:return: output from appetite call
"""
if delete_logs:
delete_log_dir()
create_log()
cmd = list(COMMON_CMD) + shlex.split("--num-conns %s --apps-manifest %s %s" % (
num_threads, manifest, extra_params))
return subprocess.check_call(cmd, cwd=SCRIPT_PATH, shell=False)
| 17,795
|
def args_fixup():
"""
Various cleanups/initializations based on result of parse_args().
"""
global saved_key_handle
saved_key_handle = args.hmac_kh
args.key_handle = pyhsm.util.key_handle_to_int(args.hmac_kh)
if not (args.mode_otp or args.mode_short_otp or args.mode_totp or args.mode_hotp or args.mode_pwhash):
my_log_message(args, syslog.LOG_ERR, 'No validation mode enabled')
sys.exit(1)
global client_ids
if args.clients_file != None:
if not args.mode_otp:
my_log_message(args, syslog.LOG_ERR, 'Clients file should only be used with --otp.')
sys.exit(1)
client_ids = load_clients_file(args.clients_file)
if not client_ids:
my_log_message(args, syslog.LOG_ERR, 'Failed loading clients file "%s"' % (args.clients_file))
sys.exit(1)
else:
# we accept failure to load this file when the default is used
loaded_client_ids = load_clients_file(default_clients_file)
if loaded_client_ids:
args.clients_file = default_clients_file
client_ids = loaded_client_ids
| 17,796
|
def create_manager(user):
"""
Return a ManageDNS object associated with user (for history)
"""
if 'REVERSE_ZONE' in app.config:
revzone = app.config['REVERSE_ZONE']
else:
revzone = None
return ManageDNS(nameserver=app.config['SERVER'], forward_zone=app.config['FORWARD_ZONE'],
reverse_zone=revzone, user=user, key_name=key_name,
key_hash=key_hash)
| 17,797
|
def ncar_topo_adj(input_forcings,ConfigOptions,GeoMetaWrfHydro,MpiConfig):
"""
Topographic adjustment of incoming shortwave radiation fluxes,
given input parameters.
:param input_forcings:
:param ConfigOptions:
:return:
"""
if MpiConfig.rank == 0:
ConfigOptions.statusMsg = "Performing topographic adjustment to incoming " \
"shortwave radiation flux."
err_handler.log_msg(ConfigOptions, MpiConfig)
# Establish where we have missing values.
try:
indNdv = np.where(input_forcings.final_forcings == ConfigOptions.globalNdv)
except:
ConfigOptions.errMsg = "Unable to perform NDV search on input forcings"
err_handler.log_critical(ConfigOptions, MpiConfig)
return
# By the time this function has been called, necessary input static grids (height, slope, etc),
# should have been calculated for each local slab of data.
DEGRAD = math.pi/180.0
DPD = 360.0/365.0
try:
DECLIN, SOLCON = radconst(ConfigOptions)
except:
ConfigOptions.errMsg = "Unable to calculate solar constants based on datetime information."
err_handler.log_critical(ConfigOptions, MpiConfig)
return
try:
coszen_loc, hrang_loc = calc_coszen(ConfigOptions,DECLIN,GeoMetaWrfHydro)
except:
ConfigOptions.errMsg = "Unable to calculate COSZEN or HRANG variables for topographic adjustment " \
"of incoming shortwave radiation"
err_handler.log_critical(ConfigOptions, MpiConfig)
return
try:
TOPO_RAD_ADJ_DRVR(GeoMetaWrfHydro,input_forcings,coszen_loc,DECLIN,SOLCON,
hrang_loc)
except:
ConfigOptions.errMsg = "Unable to perform final topographic adjustment of incoming " \
"shortwave radiation fluxes."
err_handler.log_critical(ConfigOptions, MpiConfig)
return
# Assign missing values based on our mask.
input_forcings.final_forcings[indNdv] = ConfigOptions.globalNdv
# Reset variables to free up memory
DECLIN = None
SOLCON = None
coszen_loc = None
hrang_loc = None
indNdv = None
| 17,798
|
def rank_urls(urls, year=None, filename=None):
"""
Takes a list of URLs and searches for them in
Hacker News submissions. Prints or saves each
URL and its total points to a given filename
in descending order of points. Searches for
submissions from all years, unless year is given.
"""
now = datetime.now()
if year:
if year > now.year:
print("Please enter a valid year parameter (example: " + str(now.year) + ") or leave out for all time.")
return None
else:
pass
leaderboard = {}
count = 0
for url in urls:
query = 'http://hn.algolia.com/api/v1/search?query=' + url + '&restrictSearchableAttributes=url'
r = requests.get(query)
if r:
data = json.loads(r.text)
total_score = 0
for item in data['hits']:
date = item['created_at'][:-5]
date = datetime.strptime(date, '%Y-%m-%dT%H:%M:%S')
now = datetime.now()
if not year:
total_score += item['points']
elif date.year != year:
pass
else:
total_score += item['points']
count += 1
progress = (count / len(urls) ) * 100.00
sys.stdout.write(" Progress: %d%% \r" % (progress) )
sys.stdout.flush()
leaderboard[url] = total_score
time.sleep(1) # Limit to 1 api request per second
sorted_leaderboard = reversed(sorted(leaderboard.items(), key=operator.itemgetter(1)))
if filename:
f = open(filename, 'w')
for key, value in sorted_leaderboard:
f.write(str(value) + "," + key + '\n')
f.close()
print('Results saved to ' + filename)
else:
for key, value in sorted_leaderboard:
print(str(value) + "," + key)
| 17,799
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.