content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import random
def generator_order_code(sex=None):
"""
生成顺序码
:param sex:
:return:
"""
order_code = random.randint(101, 1000)
if sex == 1:
order_code = order_code - 1 if order_code % 2 == 0 else order_code
if sex == 0:
order_code = order_code if order_code % 2 == 0 else order_code - 1
return str(order_code) | 52be4dd5d9a40d261511adbe57271824a09592bf | 3,627,300 |
def fasta_reader(fa, q, *filters):
# TO DO
# Check packing/unpacking
# Should this be just a list, even if it's empty?
"""
Reader worker for the fa file in the specified q(ueue)
Applies a filter on the sequence length > 1. This
is there to parse out
(a) empty sequence strings
(b) sequences that are only represented as 1
Parsing is done with Bio.SeqIO.parse()
The optional `filters` can be any number of callables that can
be applied to a SeqRecord object. Each should return a single
boolean True or False, if the record is to be kept. True to
keep, False to discard. If a record must be kept, all filters
should return True. If one fails, the record is skipped.
Arguments:
fa: Path obj: The Path representation of the fasta to read
q: Queue obj: A multiprocessing.Manager.Queue() instance
*filters: callables: Filtering rules that apply a test to
the record object. They should return a single True or
False value.
Return:
seq_dict: dict: Dictionary that holds seq descriptions as seq ids
and sequences.
A 'skipped' key is there to also gather sequence ids that were
skipped due to the filtering. Of the form
{
seq.description : sequence,
...,
'skipped': [seq.description_1, ...]
...
}
"""
seq_dict = {"skipped": []}
for record in SeqIO.parse(fa, "fasta"):
keep = has_valid_seq(record)
if filters and keep is True:
keep = all(f(record) for f in filters[0])
if keep is True:
seq_dict[record.description] = record.seq
else:
seq_dict["skipped"].append(record.id)
pass
q.put(seq_dict)
return seq_dict | dde75f77b33d7dc65c4c37087bd4e04eb7b0c8c3 | 3,627,301 |
def test(model, test_inputs, test_labels):
"""
Runs through one epoch - all testing examples
:returns: accuracy
"""
num_predict_corrections = 0
num_times_we_guessed_zero = 0
num_batches = len(test_labels)
for batch_no in range(num_batches):
test_input = test_inputs[batch_no]
test_label = test_labels[batch_no]
probs = model.call(test_input)[0]
predicted_diagnosis = tf.argmax(probs)
#print("predicted, actual = ", predicted_diagnosis, test_labels[batch_no])
if predicted_diagnosis == test_label:
num_predict_corrections += 1
if predicted_diagnosis == 0:
num_times_we_guessed_zero += 1
return num_predict_corrections / len(test_labels), num_times_we_guessed_zero / len(test_labels) | c84b705ce321d60fe294de10ded6f6c474b79dae | 3,627,302 |
def mirantis(cred):
"""
:returns: list
[{'region': str, 'timestamp': int, 'nodes': list}]
"""
return _openstack(Provider.OPENSTACK, cred) | d5ad35b8cdb6bc48d4d75d4d8d72715fe0cfb566 | 3,627,303 |
def load(path, encoding="utf-8"):
"""
从文件夹加载 CDTB 格式文档
:param path: 文档文件夹路径
:param encoding: 文档编码
:return structure.tree.Discourse 生成器
"""
return CDTB.load(path, encoding=encoding) | 8993d79e112ff97ced08c6315523433e0d20b85d | 3,627,304 |
import os
def load_spikes(filename, recording_number):
"""
Loads spike data, using memory mapping to improve performance
The returned data is not memory mapped
Input:
=====
filename - string
path to the data file (.spikes, .events, or .continuous)
recording_index - int
index of the recording (0, 1, 2, etc.)
Output:
======
timestamps - np.array (N x 0)
Timestamps for each of N spikes
waveforms - np.array (N x channels x samples)
Waveforms for each spike
header - dict
Information from file header
"""
header = readHeader(filename)
f = open(filename, 'rb')
numChannels = np.fromfile(f, np.dtype('<u2'), 1, offset=1043)[0]
numSamples = np.fromfile(f, np.dtype('<u2'), 1)[0] # can be 0, ideally 40 (divisible by 8)
SPIKE_RECORD_SIZE = 42 + \
2 * numChannels * numSamples + \
4 * numChannels + \
2 * numChannels + 2
POST_BYTES = 4 * numChannels + \
2 * numChannels + 2
NUM_HEADER_BYTES = 1024
f = open(filename,'rb')
numSpikes = (os.fstat(f.fileno()).st_size - NUM_HEADER_BYTES) // SPIKE_RECORD_SIZE
timestamps = np.zeros((numSpikes,), dtype='<i8')
f.seek(NUM_HEADER_BYTES + 1)
for i in range(len(timestamps)):
timestamps[i] = np.fromfile(f, np.dtype('<i8'), 1)
f.seek(NUM_HEADER_BYTES + 1 + SPIKE_RECORD_SIZE * i)
data = np.memmap(filename, mode='r', dtype='<u2',
shape = (numSpikes, SPIKE_RECORD_SIZE//2),
offset = NUM_HEADER_BYTES)
mask = data[:,-1] == recording_number
waveforms = np.copy(data[mask, 21:-POST_BYTES//2].reshape((np.sum(mask), numChannels, numSamples))).astype('float32')
waveforms -= 32768
waveforms /= 20000 # gain
waveforms *= 1000
return timestamps, waveforms, header | c411c2ffdb0459b3beed0e9d86660a1e1e8fcf59 | 3,627,305 |
import io
def _get_exchange_info() -> pd.DataFrame:
"""
Returns a dataframe of exchange listings for initializing the Universe class
Called upon initialization of the Universe class, updates the stock listings
that are available by default when gathering stock data from the internet
Returns:
A dataframe of all available stock listings
"""
# Getting updated stock listings
base = "ftp://ftp.nasdaqtrader.com/symboldirectory/"
dfs: list[pd.DataFrame] = []
for file in ["nasdaqlisted.txt", "otherlisted.txt"]:
_data = io.StringIO(request.urlopen(f"{base}{file}").read().decode())
data = pd.read_csv(_data, sep="|")
dfs.append(data[:-1])
# Dropping test stocks
dfs = [df[df["Test Issue"] == "N"].drop("Test Issue", axis=1) for df in dfs]
# Unpacking the dfs
nasdaq, other = dfs
# Adding Exchange info for nasdaq listings, dropping columns that dont match
nasdaq["Exchange"] = "NASDAQ"
nasdaq.drop(
["Market Category", "Financial Status", "NextShares"], axis=1, inplace=True
)
# Converting exchange info to human-readable format
converter = {"A": "NYSEMKT", "N": "NYSE", "P": "NYSEARCA", "Z": "BATS", "V": "IEXG"}
other["Exchange"] = other["Exchange"].map(lambda s: converter[s])
# Dropping unnecessary data, matching column labels
other.drop(["CQS Symbol", "NASDAQ Symbol"], axis=1, inplace=True)
other.rename({"ACT Symbol": "Symbol"}, axis=1, inplace=True)
# Joining frames
data = pd.concat(dfs).sort_values("Symbol")
return data.reset_index(drop=True) | 7384879bb4009851bd761093c04d00c60aee2feb | 3,627,306 |
import os
def create_pie_chart(data, rngs, colors=['#244268', '#426084', '#67809F', '#95A9C1', '#C6D2E0'],
unit_scale=1.0, measure_quantity='m^3', figsize=(33, 15),
legend_loc=(0.383, -0.25), zebra_color=(False, 3),
legend_fontsize=50, chart_fontsize=60, dpi=72, name=None,
output_dir=None):
"""Plots the piechart of from a given data.
Parameters
----------
data : 1D array
Indicates the array containing the values.
rngs : tuple of tuples
Indicates the ranges of the piechart.
colors : array
Indicates the color for the region of the piechart corresponding to the
specific range.
unit_scale : float
Indicates the scale factor of the data values.
measure_quantity : str
Indicates the name of measure of the values.
figsize : tuple of integers
Indicates the size of the output figure.
legend_loc : tuple
Indicates the position of the legend of the figure.
zebra_color : tuple
Allows to change the text color of the region to white from the first to
the speficied index of the region (True, reg_index).
legend_fontsize : integer
Indicates the fontsize of the legend.
chart_fontsize : integer
Indicates the fontsize of the figure.
dpi : integer
Indicates the DPI of the output image.
name : str
Indicates the name of the output png file.
output_dir : str
Indicates the path to the output folder where the image will be stored.
"""
def number(val):
if val < 1000:
return '%d' % val
sv = str(val)
return '$\mathregular{10^{%d}}$' % (len(sv)-2) if val % 10 == 0 else '%0.0e' % val
def get_title(v1, v2, measure_quantity):
ftm = '%s $\minus$ %s %s'
return ftm % (number(v1), number(v2), measure_quantity)
data_ranges = []
df = data * unit_scale
for rng in rngs:
rng_min, rng_max = rng[0], rng[1]
data_rng = df[(df > rng_min) & (df < rng_max)]
data_ranges.append(data_rng)
num_elem = [len(p) for p in data_ranges]
se = sum(num_elem)
print(f'Num of particles: {se}')
proc_particles = [n/float(se) * 100.0 for n in num_elem]
for size, rng in zip(num_elem, rngs):
print('{}-{}: {}'%(rng[0], rng[1], size))
titles = [get_title(minv, maxv, measure_quantity) for minv, maxv in rngs]
textprops = {'fontsize': chart_fontsize,
'weight': 'normal',
'family': 'sans-serif'}
pie_width = 0.5
fig, ax = plt.subplots(figsize=figsize)
ax.axis('equal')
patches, texts, autotexts = ax.pie(proc_particles,
textprops=textprops,
colors=colors,
autopct='%1.1f%%',
radius=1,
pctdistance=1-pie_width/2)
if (zebra_color is not None) and (zebra_color[0]):
for tt in autotexts[:zebra_color[1]]:
tt.set_color('white')
plt.setp(patches,
width=pie_width,
edgecolor='white')
plt.legend(patches, titles, loc=legend_loc, fontsize=legend_fontsize)
_d, _offset, _di = [1, -1], [0.45, 0.45], 0
for t, p in zip(autotexts, proc_particles):
if p < 2.0:
pos = list(t.get_position())
pos[0] = pos[0] + _d[_di] * _offset[_di]
t.set_position(pos)
_di += 1
if (output_dir is not None) and (name is not None):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
plt.tight_layout()
fig.savefig(os.path.join(output_dir, f'{name}_chart.png'),
bbox_inches='tight', transparent=True, pad_inches=0.1, dpi=dpi) | 6a83bb0a80d988655cf7159bad1158530f87eabb | 3,627,307 |
import pathlib
def read_xr_and_concat(fname: pathlib.Path):
"""Reads the given filename and concatenates it into a single file.
Assumes that the filename is an xarray file which was made by parsing
many calcium analysis results.
"""
data = xr.open_dataset(fname).dff
return np.vstack(data) | 2b23bffcbbae0ed9e9d4cfeaaecc72107ba8cdc6 | 3,627,308 |
import os
def ee_dask_deploy(config, pb_id, image, n_workers=1, buffers=[]):
"""Deploy Dask execution engine.
:param config: configuration DB handle
:param pb_id: processing block ID
:param image: Docker image to deploy
:param n_workers: number of Dask workers
:param buffers: list of buffers to mount on Dask workers
:return: deployment ID and Dask client handle
"""
# Make deployment
deploy_id = "proc-{}-dask".format(pb_id)
values = {"image": image, "worker.replicas": n_workers}
for i, b in enumerate(buffers):
values["buffers[{}]".format(i)] = b
deploy = ska_sdp_config.Deployment(
deploy_id, "helm", {"chart": "dask", "values": values}
)
for txn in config.txn():
txn.create_deployment(deploy)
# Wait for scheduler to become available
scheduler = deploy_id + "-scheduler." + os.environ["SDP_HELM_NAMESPACE"] + ":8786"
client = None
while client is None:
try:
client = distributed.Client(scheduler, timeout=1)
except:
pass
return deploy_id, client | 738d5d1245927915c5d916aeca8544c233b4bec6 | 3,627,309 |
def _filterProviders(providers, item, cfgData, dataCache):
"""
Take a list of providers and filter/reorder according to the
environment variables
"""
eligible = []
preferred_versions = {}
sortpkg_pn = {}
# The order of providers depends on the order of the files on the disk
# up to here. Sort pkg_pn to make dependency issues reproducible rather
# than effectively random.
providers.sort()
# Collate providers by PN
pkg_pn = {}
for p in providers:
pn = dataCache.pkg_fn[p]
if pn not in pkg_pn:
pkg_pn[pn] = []
pkg_pn[pn].append(p)
logger.debug(1, "providers for %s are: %s", item, list(sorted(pkg_pn.keys())))
# First add PREFERRED_VERSIONS
for pn in sorted(pkg_pn):
sortpkg_pn[pn] = sortPriorities(pn, dataCache, pkg_pn)
preferred_versions[pn] = findPreferredProvider(pn, cfgData, dataCache, sortpkg_pn[pn], item)
if preferred_versions[pn][1]:
eligible.append(preferred_versions[pn][1])
# Now add latest versions
for pn in sorted(sortpkg_pn):
if pn in preferred_versions and preferred_versions[pn][1]:
continue
preferred_versions[pn] = findLatestProvider(pn, cfgData, dataCache, sortpkg_pn[pn][0])
eligible.append(preferred_versions[pn][1])
if len(eligible) == 0:
logger.error("no eligible providers for %s", item)
return 0
# If pn == item, give it a slight default preference
# This means PREFERRED_PROVIDER_foobar defaults to foobar if available
for p in providers:
pn = dataCache.pkg_fn[p]
if pn != item:
continue
(newvers, fn) = preferred_versions[pn]
if not fn in eligible:
continue
eligible.remove(fn)
eligible = [fn] + eligible
return eligible | cc108fd5a15c524a1c32bd37d3b4111d92a5ae67 | 3,627,310 |
def is_win(record):
"""
Test for specific domains
:param record: line to analyse
:return: True if is specific for Windows, False otherwise
"""
return win_reg1.search(record) or win_reg2.search(record) or win_reg3.search(record) \
or win_reg4.search(record) or win_reg5.search(record) or win_reg6.search(record) \
or win_reg7.search(record) or win_reg8.search(record) or win_reg9.search(record) \
or win_reg10.search(record) or win_reg11.search(record) or win_reg12.search(record) \
or win_reg13.search(record) or win_reg14.search(record) or win_reg15.search(record) \
or win_reg16.search(record) or win_reg17.search(record) or win_reg18.search(record) \
or win_reg19.search(record) or win_reg20.search(record) or win_reg21.search(record) \
or win_reg22.search(record) or win_reg23.search(record) or win_reg24.search(record) \
or win_reg25.search(record) or win_reg26.search(record) or win_reg27.search(record) \
or win_reg28.search(record) or win_reg29.search(record) or win_reg30.search(record) \
or win_reg31.search(record) or win_reg32.search(record) or win_reg33.search(record) | 3c2f3af4a6c203d16e22341d2545f4cfe262d7c0 | 3,627,311 |
import logging
def initialize_nose_logger() -> logging.Logger:
"""Configures the logger to be used by the "nose" package.
"""
print("initialize_nose_logger()")
logger_config = {
"version": 1,
"disable_existing_loggers": False,
"handlers": {
"console": {
"level": logging.DEBUG,
"class": "logging.StreamHandler",
"formatter": "verbose"
}
},
"formatters": {
"verbose": {
"format": "%(levelname)s|%(threadName)s|%(asctime)s|"
"%(filename)s:%(lineno)s - %(funcName)10s(): %(message)s",
"datefmt": "%d/%b/%Y %H:%M:%S"
},
},
"loggers": {
"nose.core": {
"handlers": ["console"],
"level": logging.DEBUG,
"propagate": False
}
}
}
logging.config.dictConfig(logger_config)
return logging.getLogger("nose.core") | 715fe7378999841a2bb8c46205fb508917f175be | 3,627,312 |
def sq_sums(a_gpu, b_gpu, GSZ=GSZ):
"""
Compute squared summations of rows from GPUArrays and then their pairwise summations.
Parameters
----------
A : GPUArray
B : GPUArray
GSZ : int, optional
Grid size for CUDA kernel invocation
Returns
-------
out : GPUArray
Compute squared summations of each row for each of the inputs on GPU
giving us two 1D arrays. Then, compute the pairwise summation of
elements from them, leading to a 2D array.
The output would still reside on the GPU device.
"""
M,N,R = a_gpu.shape[0], b_gpu.shape[0], a_gpu.shape[1]
if R>2048:
raise Exception("Number of columns > 2048 not yet supported!")
BSZ = 2**int(np.ceil(np.log(R)/np.log(2))-1)
out_gpu1 = gpuarray.empty((M),dtype=np.float32)
out_gpu2 = gpuarray.empty((N),dtype=np.float32)
shp = np.int32([M,R,N])
sq_sum_gpu(out_gpu1, out_gpu2, a_gpu, b_gpu, drv.In(shp), block=(BSZ,1,1), grid=(GSZ,1))
out_gpu = addvecs_gpu(out_gpu1, out_gpu2)
return out_gpu | 4de45c202abcb665ca3b33aee504c646727ba486 | 3,627,313 |
def timealize_img(experiment, image, label):
"""
Transforms an image to an image with timesteps to be fed into a SNN.
Copies the image timesteps times and stacks them together.
Equivalent of feeding the SNN a raw input current, not feeding it spikes.
:param experiment:
:param image:
:param label:
:return:
"""
img_with_time = tf.stack([image] * experiment.timesteps)
return img_with_time, label | acefe04bc5de1fcc5a6cdf6fd400bb69ebe0e224 | 3,627,314 |
def simpleClosedPath(listPts):
"""Returns the closed path of the given points"""
points = listPts[:] # making a compy of the listPts
y_min_index = getBottomRight(listPts)
y_min_point = points.pop(y_min_index)
closed_path = [(y_min_point, 0)]
for point in points:
angle = theta(y_min_point, point)
closed_path.append((point, angle))
closed_path.sort(key=lambda x: x[1])
return [x[0] for x in closed_path] | 151967973bd1c777a4aa0ff65c1516ffa6d50016 | 3,627,315 |
def home():
"""
Route for displaying the homepage.
"""
return render_template("home.html", home=True) | e7426ac8cd8f2e2792ec0940ae8555444b47e962 | 3,627,316 |
def timefunc(func, msg, *args, **kwargs):
"""Benchmark *func* and print out its runtime.
Args:
msg (str):
func:
*args:
**kwargs:
Returns:
object:
"""
# Make sure the function is compiled before we start the benchmark
res = func(*args, **kwargs)
# Timeit
print(msg.ljust(20), end=" ")
timespan = min(repeat(lambda: func(*args, **kwargs), number=5, repeat=2))
print(format_time(timespan))
return res | 67c2c1b8abe0f8dc05020d88b45154619fd49012 | 3,627,317 |
def button(where=None, x=0, y=0, label="", width=0, height=30, idle=None, over=None, down=None, color=(50,50,50)):
"""Display a button whose graphics are images (np.ndarray). The button accepts three images to describe its states, which are idle (no mouse interaction), over (mouse is over the button) and down (mouse clicked the button). The button size will be defined by the width and height of the images.
Args:
where (np.ndarray) : image/frame where the component should be rendered.
x (int) : Position X where the component should be placed.
y (int) : Position Y where the component should be placed.
width (int) : Width of the button.
height (int) : Height of the button.
label (str) : Text displayed inside the button.
idle (np.ndarray) : An image that will be rendered when the button is not interacting with the mouse cursor.
over (np.ndarray) : An image that will be rendered when the mouse cursor is over the button.
down (np.ndarray) : An image that will be rendered when the mouse cursor clicked the button (or is clicking).
Returns
flag (bool) : ``True`` everytime the user clicks the button.
Examples:
>>> #=== If you use "width", "height", "label" ===
>>> import cv2
>>> import numpy as np
>>> from pycharmers.opencv import cvui
...
>>> WINDOW_NAME = 'Button shortcut'
>>> frame = np.zeros((150, 650, 3), np.uint8)
>>> # Init cvui and tell it to use a value of 20 for cv2.waitKey()
>>> # because we want to enable keyboard shortcut for
>>> # all components, e.g. button with label "&Quit".
>>> # If cvui has a value for waitKey, it will call
>>> # waitKey() automatically for us within cvui.update().
>>> cvui.init(windowNames=WINDOW_NAME, delayWaitKey=20);
>>> while (True):
... # Fill the frame with a nice color
... frame[:] = (49, 52, 49)
...
... cvui.text(where=frame, x=40, y=40, text='To exit this app click the button below or press Q (shortcut for the button below).')
...
... # Exit the application if the quit button was pressed.
... # It can be pressed because of a mouse click or because
... # the user pressed the "q" key on the keyboard, which is
... # marked as a shortcut in the button label ("&Quit").
... if cvui.button(where=frame, x=300, y=80, label="&Quit"):
... break
...
... # Since cvui.init() received a param regarding waitKey,
... # there is no need to call cv.waitKey() anymore. cvui.update()
... # will do it automatically.
... cvui.update()
... cv2.imshow(WINDOW_NAME, frame)
>>> cv2.destroyWindow(WINDOW_NAME)
...
>>> #=== If you use "idle", "over", "down" ===
>>> import cv2
>>> import numpy as np
>>> from pycharmers.opencv import cvui, SAMPLE_LENA_IMG, cv2read_mpl
...
>>> WINDOW_NAME = 'Image button'
>>> frame = np.zeros(shape=(600, 512, 3), dtype=np.uint8)
>>> idle = cv2.imread(SAMPLE_LENA_IMG)
>>> down = np.repeat(cv2.imread(SAMPLE_LENA_IMG, cv2.IMREAD_GRAYSCALE).reshape(*idle.shape[:2], 1), repeats=3, axis=2)
>>> over = cv2read_mpl(SAMPLE_LENA_IMG)
>>> cvui.init(WINDOW_NAME)
...
>>> while (True):
... # Fill the frame with a nice color
... frame[:] = (49, 52, 49)
...
... # Render an image-based button. You can provide images
... # to be used to render the button when the mouse cursor is
... # outside, over or down the button area.
... if cvui.button(frame, 0, 80, idle=idle, over=over, down=down):
... print('Image button clicked!')
...
... cvui.text(frame, 150, 30, 'This image behaves as a button')
...
... # Render a regular button.
... if cvui.button(frame, 0, 80, 'Button'):
... print('Regular button clicked!')
...
... # This function must be called *AFTER* all UI components. It does
... # all the behind the scenes magic to handle mouse clicks, etc.
... cvui.update()
...
... # Show everything on the screen
... cv2.imshow(WINDOW_NAME, frame)
...
... # Check if ESC key was pressed
... if cv2.waitKey(20) == cvui.ESCAPE:
... break
>>> cv2.destroyWindow(WINDOW_NAME)
"""
handleTypeError(types=[np.ndarray, NoneType], where=where)
if isinstance(where, np.ndarray):
__internal.screen.where = where
block = __internal.screen
else:
block = __internal.topBlock()
x += block.anchor.x
y += block.anchor.y
if width*height>0:
return __internal.buttonWH(block, x, y, width, height, label, color=color, updateLayout=True)
if all([e is not None for e in [idle, over, down]]):
return __internal.buttonI(block, x, y, idle, over, down, True)
else:
return __internal.button(block, x, y, label, color=color) | ced0cb82fca6a8af9913a899a6714ea9cc0f8c4d | 3,627,318 |
def action_checker(target_name):
"""
Checks in on targets.json and determines what actions are available for the requesting target.
Parameters
----------
target_name: str
Returns
-------
A list of available actions for the target.
"""
target_json = get_target_data(target_name)
supported_actions = target_json['supported_actions']
return [action for action, boolean in supported_actions.items() if boolean is True] | 4168219285f0c884d6640103eb2e9b95d94bf6c9 | 3,627,319 |
def get_licence(html):
"""
Searches the HTML content for a mention of a CC licence.
"""
if "creative-commons" in html or "Creative Commons" in html:
licence = "CC"
else:
licence = "N/A"
return licence | 07dcd2439455fd23b034e11204d2a474c9502cdf | 3,627,320 |
def get_selected_ctrl():
"""
the joints from current selection.
:return: <tuple> array of joints from selection.
"""
selected_obj = object_utils.get_selected_node()
if selected_obj and object_utils.is_shape_curve(selected_obj):
return selected_obj | 60f935377302c4a6ebb6ddc3afed161306666e6d | 3,627,321 |
def sum_temp(s, n):
"""
:param s: int, sum of total value of temperature
:param n: int, latest information to add up
:return: s + n
"""
s += n
return s | 4dc7da032fd91da86d73bf545fdf527497c12cd5 | 3,627,322 |
def foreground_rainbow(ncolors=20, background_color=BLACK):
"""
A rainbow gradient of `ncolors` ColorPairs with a given background color.
"""
return [
ColorPair(*foreground_color, *background_color)
for foreground_color in _rainbow_gradient(ncolors)
] | 44a934ea8ee4a95b46ad3559e91e6044c0f0494b | 3,627,323 |
import re
def get_method_parameter_values(code, line, full_sig):
""" Returns a List of parameter values for the method at a given line """
param_list = []
offset = int((get_offset(code, line, "catch(") - 3))
line += (1 + int(offset / 2))
for i in range(int(offset / 2)):
param_pattern = re.compile(rf", ?(?:'' ?\+ ?)?(?:{R_VAR}\(?.*, ?)?(.*[^\)])\)\)?;")
nested_boolean_pattern = re.compile(rf".*\?(('?{R_VAR}'?):('?{R_VAR}'?))")
param = re.findall(param_pattern, code[line])[0].replace("'", "")
# if parameter is boolean, append 0
if (
nested_boolean_pattern.search(code[line])
or full_sig[i].startswith(COMPLEX_TYPES["BOOLEAN"])
):
param_list.append("§0§")
# if parameter is numeric, append directly
elif is_number(param) or full_sig[i].startswith("I"):
param_list.append(f"§{param if is_number(param) else randint(0,99)}§")
# else, treat as tring and append with previx
else:
param = param.replace(" ", "_").replace("|", "\\!").replace("\\", "\\\\")
param_list.append(f"§param_{param}§")
line += 1
return param_list | 0c4803710daaffcb015361665dcd3604d0b9960b | 3,627,324 |
def mrc_to_dask(fname: Pathlike, chunks: tuple):
"""
Generate a dask array backed by a memory-mapped .mrc file
"""
with access_mrc(fname, mode="r") as mem:
shape, dtype = mrc_shape_dtype_inference(mem)
chunks_ = normalize_chunks(chunks, shape)
def chunk_loader(fname, block_info=None):
idx = tuple(slice(*idcs) for idcs in block_info[None]["array-location"])
result = np.array(access_mrc(fname, mode="r").data[idx]).astype(dtype)
return result
arr = da.map_blocks(chunk_loader, fname, chunks=chunks_, dtype=dtype)
return arr | 10da8a3a8d9abf13fafbd98be3e3f1f4b2bc7264 | 3,627,325 |
import os
def get_local_path_kind(pathname):
"""Determine if there is a path in the filesystem and if the path
is a directory or non-directory."""
try:
os.stat(pathname)
if os.path.isdir(pathname):
status = LOCAL_PATH_DIR
else:
status = LOCAL_PATH_NON_DIR
except OSError:
status = LOCAL_PATH_NONE
return status | 787683925a8f01441f5f7d7cd5b0563755c70283 | 3,627,326 |
def mock_random_choice(seq):
"""Always returns first element from the sequence."""
# We could try to mock a particular |seq| to be a list with a single element,
# but it does not work well, as random_choice returns a 'mock.mock.MagicMock'
# object that behaves differently from the actual type of |seq[0]|.
return seq[0] | a889c7ca32b6d494493000134c1d9d26fe5e97c3 | 3,627,327 |
from typing import Tuple
from typing import Callable
import sys
def mount_remote_volumes(
runner: Runner, remote_info: RemoteInfo, ssh: SSH, allow_all_users: bool
) -> Tuple[str, Callable]:
"""
sshfs is used to mount the remote system locally.
Allowing all users may require root, so we use sudo in that case.
Returns (path to mounted directory, callable that will unmount it).
"""
# Docker for Mac only shares some folders; the default TMPDIR on OS X is
# not one of them, so make sure we use /tmp:
span = runner.span()
mount_dir = mkdtemp(dir="/tmp")
sudo_prefix = ["sudo"] if allow_all_users else []
middle = ["-o", "allow_other"] if allow_all_users else []
try:
runner.check_call(
sudo_prefix + [
"sshfs",
"-p",
str(ssh.port),
# Don't load config file so it doesn't break us:
"-F",
"/dev/null",
# Don't validate host key:
"-o",
"StrictHostKeyChecking=no",
# Don't store host key:
"-o",
"UserKnownHostsFile=/dev/null",
] + middle + ["telepresence@localhost:/", mount_dir]
)
mounted = True
except CalledProcessError:
print(
"Mounting remote volumes failed, they will be unavailable"
" in this session. If you are running"
" on Windows Subystem for Linux then see"
" https://github.com/datawire/telepresence/issues/115,"
" otherwise please report a bug, attaching telepresence.log to"
" the bug report:"
" https://github.com/datawire/telepresence/issues/new",
file=sys.stderr
)
mounted = False
def no_cleanup():
pass
def cleanup():
if sys.platform.startswith("linux"):
runner.check_call(
sudo_prefix + ["fusermount", "-z", "-u", mount_dir]
)
else:
runner.get_output(sudo_prefix + ["umount", "-f", mount_dir])
span.end()
return mount_dir, cleanup if mounted else no_cleanup | 5522e4f25a2d3755d5b5013e3d356af91af98fa0 | 3,627,328 |
def find_entries_without_field(path_or_db, field):
"""Return entries without field."""
_, db = _load_or_use(path_or_db)
lacking_entries = []
for entry in db.entries:
if field not in entry:
lacking_entries += [entry]
return lacking_entries | 308aab183bb1a8977f7c7404515911076eb0450d | 3,627,329 |
import numpy
def valley_width_transform(valleys):
"""Calculate the approximate distributed valley width
`from bluegeo.water import valley_width_transform;test = valley_width_transform('/Users/devin/Desktop/valley.tif')`
Arguments:
valleys {[type]} -- [description]
"""
valleys = Raster(valleys)
mask = valleys.array != valleys.nodata
# Calculate distance to the bank over all valleys
print("Calculating a distance transform")
distances = distance_transform_edt(
mask, sampling=(valleys.csy, valleys.csx))
# Calculate local maxima
print("Calculating local maxima")
local_maxi = peak_local_max(
distances, indices=False, footprint=numpy.ones((3, 3)), labels=mask)
# Use a watershed segmentation algorithm to produce labeled width breaks
def label_map(a):
shape = a.shape
a = a.ravel()
indices = numpy.argsort(a)
bins = numpy.bincount(a)
indices = numpy.split(indices, numpy.cumsum(bins[bins > 0][:-1]))
return dict(list(zip(numpy.unique(a), [numpy.unravel_index(ind, shape) for ind in indices])))
print("Labeling maxima")
breaks = ndi_label(local_maxi)[0]
distance_map = {brk: dist for brk, dist in zip(
breaks[local_maxi], distances[local_maxi])}
print("Performing Watershed Segmentation")
labels = watershed(-distances, breaks, mask=mask)
print("Assigning distances to labels")
for label, inds in list(label_map(labels).items()):
if label == 0:
continue
distances[inds] = distance_map[label]
print("Doubling dimensions")
max_distance = numpy.sqrt(valleys.csy**2 + valleys.csx**2) * 2
distances[distances > max_distance] *= 2
output = valleys.astype('float32')
output[:] = distances.astype('float32')
return output | 2f76a3484264ca1c509ad6c7e07839c6655a47af | 3,627,330 |
def add_contents_entry(section, page_variable, feature, parent):
"""
Adds a new row to the table of contents table.
<p>
The table should be called 'report_contents' and be
structured with a 'section' and a 'page' column. No
detailed checks are made to confirm this structure is
in place.
</p><p>
The page will be taken from the designated project
variable as defined by the parameter 'page_variable'-
</p>
<h2>Example usage:</h2>
<ul>
<li>add_contents_entry('Section 1','page_counter')</li>
</ul>
<p>
Returns None.
</p>
"""
project = QgsProject.instance()
page = 0
try:
page = int(QgsExpressionContextUtils.projectScope(
project).variable(page_variable))
if page is None:
page = 0
except:
pass
layer=None
layers = QgsProject.instance().mapLayersByName('report_contents')
if layers:
layer = layers[0]
else:
return None
feature = QgsFeature(layer.fields())
feature.setAttribute('section', section)
feature.setAttribute('page', page)
layer.dataProvider().addFeatures([feature]) | 9e50715bd40bad8f94c560ada09833a270e7d512 | 3,627,331 |
def V_6_3_3(b, h0, ft):
"""
不配置箍筋和弯起钢筋的一般板类受弯构件斜截面承载力
"""
if h0<800:
beta_h = 1
elif h0<2000:
beta_h = (800/h0)**0.25
else:
beta_h = (800/2000)**0.25
return 0.7*beta_h*ft*b*h0 | 79e9d97fcc755ed163cedb5f775d07abb6c26595 | 3,627,332 |
import ast
def make_cond_block():
"""
if flor.skip_stack.peek().should_execute(not flor.SKIP):
pass
TODO: Extend to accept predicate
"""
previous_arg = ast.UnaryOp(
op=ast.Not(),
operand=ast.Attribute(
value=ast.Name('flor', ast.Load()),
attr='SKIP',
ctx=ast.Load()
)
)
safe_arg = ast.NameConstant(value=True)
return ast.If(
test=ast.Call(
func=ast.Attribute(
value=make_attr_call('skip_stack', 'peek'),
attr='should_execute',
ctx=ast.Load()
),
args=[safe_arg],
keywords=[]
),
body=[ast.Pass()],
orelse=[]
) | 5b2d2a61c295d765f192f9d24950799f46f34ef0 | 3,627,333 |
def removeInteger(string):
"""
Remove an integer from a string.
Args:
string: write your description
"""
_checkSequenceError(string=string, start=bytesHexB, expected="02")
length, lengthLen = _readLength(string[1:])
numberBytes = string[1 + lengthLen:1 + lengthLen + length]
rest = string[1 + lengthLen + length:]
nBytes = numberBytes[0] if isinstance(
numberBytes[0], intTypes
) else ord(numberBytes[0])
assert nBytes < hex160
return int(BinaryAscii.hexFromBinary(numberBytes), 16), rest | 54e2905bf9f68224ba35b3c9e2d5c94bf81bdfd0 | 3,627,334 |
import select
def get_urls():
"""The get_urls function fetches 16 urls from the database that need to be scraped by the bee.
The Url which are set with a priority in the database will be retrieved first.
"""
return select(u for u in Url if u.date_scraped is None).order_by(desc(Url.priority_scrape))[:8] | 2e181424ae6795838f2a468cd3e59a6153cd3d5c | 3,627,335 |
import json
def convert_input_to_userid(input_id):
"""
Take user input from app (Steam user ID or vanity URL) and output Steam user ID for further API calls ]
"""
req = Request('http://api.steampowered.com/ISteamUser/ResolveVanityURL/v0001/?key=%s&vanityurl=%s'%(api_key, input_id))
try:
data_raw = urlopen(req).read()
except HTTPError:
return input_id
data_json = json.loads(data_raw)
try:
return int(data_json['response']['steamid'])
except KeyError:
return input_id | 7f18e720dca48c33c892b7ad3e1970c057d4e6b8 | 3,627,336 |
def create_citydist(create_environment, create_building):
"""
Pytest fixture function to generate city district with three
res. buildings (with demands) on positions (0, 0), (0, 10), (10, 10)
Parameters
----------
create_environment : object
Environment object (as fixture of pytest)
Returns
-------
create_citydistrict : object
CityDistrict object of PyCity
"""
create_citydist = citydist.CityDistrict()
# Add environment
create_empty_citydist.environment = create_environment
create_citydist.addEntity(entity=create_building,
position=point.Point(0, 0))
create_citydist.addEntity(entity=create_building,
position=point.Point(0, 10))
create_citydist.addEntity(entity=create_building,
position=point.Point(10, 10))
return create_citydist | 116cc1974b295198aade37b9bc0ee49a5e141a2b | 3,627,337 |
def read_log(log_file, skip_log_rows=None, skip_log_conditions=None):
"""Reads the behavioral log file with information about each EEG trial."""
# Check if data are already in a DataFrame
if isinstance(log_file, pd.DataFrame):
log = log_file
else:
# Detect file encoding
with open(log_file, 'rb') as f:
data = f.read()
chardet_res = chardet.detect(data)
encoding = chardet_res['encoding']
# Read into DataFrame
if '.csv' in log_file:
log = pd.read_csv(log_file, encoding=encoding)
else:
log = pd.read_csv(log_file, delimiter='\t', encoding=encoding)
# Remove rows via indices (e.g., if the EEG was paused accidently)
if skip_log_rows is not None:
log = log.drop(skip_log_rows)
# Remove rows via conditions (e.g., for filler stimuli without triggers)
if skip_log_conditions is not None:
assert isinstance(skip_log_conditions, dict), \
'"skip_log_conditions" must be a dict ({column: [conditions]})'
for col, values in skip_log_conditions.items():
if not isinstance(values, list):
log = log[log[col] != values]
else:
log = log[~log[col].isin(values)]
return log | b52a852092b9e0d56cbe264301b5000d2df777f8 | 3,627,338 |
import hashlib
def sha1_base32(buf: bytes, n: int = None):
"""
Return a base32 representation of the first n bytes of SHA1(buf).
If n = None, the entire buffer will be encoded.
"""
return base32(hashlib.sha1(buf).digest()[slice(0, n)]) | f5cae574c2dfaf6e2a031203858145239289e41e | 3,627,339 |
def get_lsf_grid_name(fibre_number):
"""
Return the appropriate LSF name (a, b, c, or d) to use, given a mean fiber number.
:param fiber_number:
The mean fiber number of observations.
:returns:
A one-length string describing which LSF grid to use ('a', 'b', 'c', or 'd').
"""
if 50 >= fibre_number >= 1:
return "d"
if 145 >= fibre_number > 50:
return "c"
if 245 >= fibre_number > 145:
return "b"
if 300 >= fibre_number > 245:
return "a" | 009b20027f895e19c5b6cabb4476cf41a222e465 | 3,627,340 |
from typing import List
def get_named_layers_and_params_by_regex(
module: Module,
param_names: List[str],
params_strict: bool = False,
) -> List[NamedLayerParam]:
"""
:param module: the module to get the matching layers and params from
:param param_names: a list of names or regex patterns to match with full parameter
paths. Regex patterns must be specified with the prefix 're:'
:param params_strict: if True, this function will raise an exception if there a
parameter is not found to match every name or regex in param_names
:return: a list of NamedLayerParam tuples whose full parameter names in the given
module match one of the given regex patterns or parameter names
"""
named_layers_and_params = []
found_param_names = []
for layer_name, layer in module.named_modules():
for param_name, param in layer.named_parameters():
if "." in param_name: # skip parameters of nested layers
continue
full_param_name = "{}.{}".format(layer_name, param_name)
if any_str_or_regex_matches_param_name(full_param_name, param_names):
named_layers_and_params.append(
NamedLayerParam(layer_name, layer, param_name, param)
)
found_param_names.append(full_param_name)
elif layer_name.endswith(".module"):
# unwrap layers wrapped with a QuantWrapper and check if they match
parent_layer_name = ".".join(layer_name.split(".")[:-1])
parent_layer = get_layer(parent_layer_name, module)
skip_wrapper_name = "{}.{}".format(parent_layer_name, param_name)
if (
QuantWrapper is not None
and isinstance(parent_layer, QuantWrapper)
and any_str_or_regex_matches_param_name(
skip_wrapper_name, param_names
)
):
named_layers_and_params.append(
NamedLayerParam(layer_name, layer, param_name, param)
)
found_param_names.append(skip_wrapper_name)
if params_strict:
validate_all_params_found(param_names, found_param_names)
return named_layers_and_params | 6f6e2a42158e2dbe7762bcc13d9991279967d3e0 | 3,627,341 |
def draw_box(to_draw, xmin, xmax, ymin, ymax, cname, cindex, class_colors, conf=None, extratext=""):
""" Draws a box on top of an image. The image is then returned.
Arguments:
to_draw -- image to draw on
xmin, xmax, ymin, ymax -- coordinates of the box
cname -- class name, to be written on the box
cindex -- class index, used for determining the color
class_colors -- a list of mutliple colors
conf -- confidence of box, to be written
extratext -- some other additional text to be written
"""
cv2.rectangle(to_draw, (xmin, ymin), (xmax, ymax), class_colors[cindex], 2)
text = cname
if conf is not None:
text += " " + ('%.2f' % conf)
text += " " + extratext
font = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 0.5
text_size = cv2.getTextSize(text, font, font_scale, 1)
text_top = (xmin, ymin-15)
text_bot = (xmin + text_size[0][0]+10, ymin-10+text_size[0][1])
text_pos = (xmin + 5, ymin-2)
cv2.rectangle(to_draw, text_top, text_bot, class_colors[cindex], -1)
cv2.putText(to_draw, text, text_pos, font, font_scale, (0,0,0), 1, cv2.LINE_AA)
return to_draw | db60a02cfe0e3cb438f078cabba3be1b445ea060 | 3,627,342 |
def _get_node(pending_set, pre_sel=[], opts={}):
""" Next node preferably in pre-selected nodes
"""
shuffle(pre_sel)
for node in pre_sel:
if node in pending_set:
pending_set.remove(node)
return node
# Random if not
return pending_set.pop() | 2dfcfd96479ec19c074cd52129dee9fd018243b0 | 3,627,343 |
import functools
def callCounter(func):
"""function call counter"""
@functools.wraps(func)
def helper(*args, **kwargs):
helper.calls += 1
return func(*args, **kwargs)
helper.calls = 0
return helper | a999210fd0f553ccc5d00ffc4c61ccc09deba4b9 | 3,627,344 |
def deprecated(func):
"""Print a deprecation warning once on first use of the function.
>>> @deprecated # doctest: +SKIP
... def f():
... pass
>>> f() # doctest: +SKIP
f is deprecated
"""
count = [0]
def wrapper(*args, **kwargs):
count[0] += 1
if count[0] == 1:
print(func.__name__, 'is deprecated')
return func(*args, **kwargs)
return wrapper | 882b26592fa620be65eb7e1306abdf1d138ca022 | 3,627,345 |
import numpy
def eigsh(a, k=6, *, which='LM', ncv=None, maxiter=None, tol=0,
return_eigenvectors=True):
"""Finds ``k`` eigenvalues and eigenvectors of the real symmetric matrix.
Solves ``Ax = wx``, the standard eigenvalue problem for ``w`` eigenvalues
with corresponding eigenvectors ``x``.
Args:
a (cupy.ndarray or cupyx.scipy.sparse.csr_matrix): A symmetric square
matrix with dimension ``(n, n)``.
k (int): The number of eigenvalues and eigenvectors to compute. Must be
``1 <= k < n``.
which (str): 'LM' or 'LA'. 'LM': finds ``k`` largest (in magnitude)
eigenvalues. 'LA': finds ``k`` largest (algebraic) eigenvalues.
ncv (int): The number of Lanczos vectors generated. Must be
``k + 1 < ncv < n``. If ``None``, default value is used.
maxiter (int): Maximum number of Lanczos update iterations.
If ``None``, default value is used.
tol (float): Tolerance for residuals ``||Ax - wx||``. If ``0``, machine
precision is used.
return_eigenvectors (bool): If ``True``, returns eigenvectors in
addition to eigenvalues.
Returns:
tuple:
If ``return_eigenvectors is True``, it returns ``w`` and ``x``
where ``w`` is eigenvalues and ``x`` is eigenvectors. Otherwise,
it returns only ``w``.
.. seealso:: :func:`scipy.sparse.linalg.eigsh`
.. note::
This function uses the thick-restart Lanczos methods
(https://sdm.lbl.gov/~kewu/ps/trlan.html).
"""
n = a.shape[0]
if a.ndim != 2 or a.shape[0] != a.shape[1]:
raise ValueError('expected square matrix (shape: {})'.format(a.shape))
if a.dtype.char not in 'fdFD':
raise TypeError('unsupprted dtype (actual: {})'.format(a.dtype))
if k <= 0:
raise ValueError('k must be greater than 0 (actual: {})'.format(k))
if k >= n:
raise ValueError('k must be smaller than n (actual: {})'.format(k))
if which not in ('LM', 'LA'):
raise ValueError('which must be \'LM\' or \'LA\' (actual: {})'
''.format(which))
if ncv is None:
ncv = min(max(8 * k, 20), n - 1)
else:
ncv = min(max(ncv, k + 2), n - 1)
if maxiter is None:
maxiter = 10 * n
if tol == 0:
tol = numpy.finfo(a.dtype).eps
alpha = cupy.zeros((ncv, ), dtype=a.dtype)
beta = cupy.zeros((ncv, ), dtype=a.dtype)
V = cupy.empty((ncv, n), dtype=a.dtype)
# Set initial vector
u = cupy.random.random((n, )).astype(a.dtype)
v = u / cupy.linalg.norm(u)
V[0] = v
# Lanczos iteration
u = _eigsh_lanczos_update(a, V, alpha, beta, 0, ncv)
iter = ncv
w, s = _eigsh_solve_ritz(alpha, beta, None, k, which)
x = V.T @ s
# Compute residual
beta_k = beta[-1] * s[-1, :]
res = cupy.linalg.norm(beta_k)
while res > tol and iter < maxiter:
# Setup for thick-restart
beta[:k] = 0
alpha[:k] = w
V[:k] = x.T
u -= u.T @ V[:k].conj().T @ V[:k]
v = u / cupy.linalg.norm(u)
V[k] = v
u = a @ v
alpha[k] = v.conj().T @ u
u -= alpha[k] * v
u -= V[:k].T @ beta_k
u -= u.T @ V[:k+1].conj().T @ V[:k+1]
beta[k] = cupy.linalg.norm(u)
v = u / beta[k]
V[k+1] = v
# Lanczos iteration
u = _eigsh_lanczos_update(a, V, alpha, beta, k+1, ncv)
iter += ncv - k
w, s = _eigsh_solve_ritz(alpha, beta, beta_k, k, which)
x = V.T @ s
# Compute residual
beta_k = beta[-1] * s[-1, :]
res = cupy.linalg.norm(beta_k)
if return_eigenvectors:
idx = cupy.argsort(w)
return w[idx], x[:, idx]
else:
return cupy.sort(w) | 5486b984e47b0e1702c57c077a55519371117c09 | 3,627,346 |
def calc_t_frame(n_col, n_row, n_amp, ins):
"""Calculates the frame time for a given ins/readmode/subarray.
Parameters
----------
n_col : int
Number of columns.
n_row : int
Number of rows.
n_amp : int
Amplifiers reading data.
ins : str
The instrument key.
Returns:
t_frame : float
The frame time (in seconds).
"""
n_col, n_amp, n_row = int(n_col), int(n_amp), int(n_row)
if ins == 'nirspec':
n = 2
if ins in ['nircam', 'niriss']:
n = 1
t_frame = (n_col/n_amp + 12)*(n_row + n)*(1e-5)
return t_frame | 3f26f0e29a3522c1a3ccf6fdc35635ac11e1648e | 3,627,347 |
def verify_bytesio(enc_message, verify_key_hex, signature):
""" Verify asymmetrically signed bytesreams.
:param bytes enc_message: encrypted data
:param bytes verify_key_hex: serialized verification key
:param bytes signature: signature
"""
verify_key = nacl.signing.VerifyKey(verify_key_hex, encoder=nacl.encoding.HexEncoder)
try:
verify_key.verify(enc_message, signature)
except BadSignatureError:
return False
return True | b98a9c9c9c4a14dc52e91ea5764143a2c2f36ab2 | 3,627,348 |
import logging
import json
def rest_error_message(error, jid):
"""Returns exception error message as valid JSON string to caller
:param error: Exception, error message
:param jid: string, job ID
:return: JSON string
"""
logging.exception(error)
e = str(error)
return json.dumps({'user_id': 'admin', 'result': {'error': e}, '_id': jid}) | 7422c77be37ed473ed15acc5fdfae9e85ff90812 | 3,627,349 |
def typedefn_from_root_element(el: UxsdElement) -> str:
"""Generate a C++ class declaration of a root element,
which inherits its content type and adds load and write functions.
"""
out = ""
out += "/** Generated from:\n"
out += utils.to_comment_body(el.source)
out += "\n*/\n"
out += "class %s : public %s {\n" % (el.name, el.type.cpp)
out += "public:\n"
out += "\tpugi::xml_parse_result load(std::istream &is);\n"
out += "\tvoid write(std::ostream &os);\n"
out += "};\n"
return out | 63ecabd9b8e778ef7ccd4d6516ba0f60ef450990 | 3,627,350 |
def store(url, date, content):
"""Store article in database."""
if type(date) is str: date = iso2ts(date)
return crawldb.add(url, content, version=date) | 0114dbdfebdf76b70378f32c9cb4698e99359df4 | 3,627,351 |
def Dict(val):
"""
Build a dict for key/value pairs.
"""
return dict(val) | 47864a91183070a7f8ce285e330d1278828b8352 | 3,627,352 |
def get_distracting_answer_by_visual7w_generation(qa_id, correct_answer, candidate_dict, res_cnt=3):
"""
Args:
qa_id: int
correct_answer: str
candidate_dict: dict of (id, candidate), generated by visual7w baseline model
"""
res = []
if qa_id not in candidate_dict:
print('%s not in candidate_dict' % (qa_id))
return res
for candidate in candidate_dict[qa_id]:
if is_semantic_equivalent(correct_answer, candidate):
continue
if candidate not in res:
res.append(candidate)
if len(res) >= res_cnt:
break
return res | f114a0da2b0598f48326a701c5d2e7fa1bc34023 | 3,627,353 |
def diag(req, resp):
""" Data about the state of the database """
return {
'driver': engine.driver,
'tables': engine.table_names()
} | d141aba9c726775505ed98c8ea80ca277b1acc95 | 3,627,354 |
import os
import pathlib
def as_path(path: PathLike) -> ReadWritePath:
"""Create a generic `pathlib.Path`-like abstraction.
This function
Args:
path: Pathlike object.
Returns:
path: The `pathlib.Path`-like abstraction.
"""
if isinstance(path, str):
if os.name == 'nt' and not path.startswith('gs://'):
# On windows, all path are `pathlib.WindowsPath`, as `gpath.GPath` is
# `PosixPurePath`
return pathlib.Path(path)
else:
return gpath.GPath(path) # On linux, or for `gs://`, uses `GPath`
elif isinstance(path, _PATHLIKE_CLS):
return path # Forward resource path, gpath,... as-is
elif isinstance(path, os.PathLike): # Other `os.fspath` compatible objects
return pathlib.Path(path)
else:
raise TypeError(f'Invalid path type: {path!r}') | 85157a165ba23c6f523154f53d94b46ee86936a9 | 3,627,355 |
def dc_loss(embedding, label):
"""
Deep clustering loss function.
Args:
embedding: (T,D)-shaped activation values
label: (T,C)-shaped labels
return:
(1,)-shaped squared flobenius norm of the difference
between embedding and label affinity matrices
"""
xp = cuda.get_array_module(label)
b = xp.zeros((label.shape[0], 2 ** label.shape[1]))
b[np.arange(label.shape[0]),
[int(''.join(str(x) for x in t), base=2) for t in label.data]] = 1
label_f = chainer.Variable(b.astype(np.float32))
loss = F.sum(F.square(F.matmul(embedding, embedding, True, False))) \
+ F.sum(F.square(F.matmul(label_f, label_f, True, False))) \
- 2 * F.sum(F.square(F.matmul(embedding, label_f, True, False)))
return loss | 8f1fb8a9307dc6af465f43e8236dee45b19fea86 | 3,627,356 |
import urllib
import json
import time
def ScrapeAdMetadataByKeyword(CurrentSession, Seed, NumAds = 2000):
"""
Returns a list of dictionaries that includes metadata of the Ad and
also includes it's performance details. Our program crawls 5000
ads in the first iteration and then 500 ads in the subsequent
iterations. This is done to optimize the script and work around
FB's load balancing.
"""
AllAdMetadata = []
totalAdCountCurrent = 0
IterationCount = 1
URLparameters = urllib.parse.urlencode(parameters_for_URL)
AdMetadataLink = adMetadataLinkTemplate % (Seed, NumAds, URLparameters) # Scapes 2000 ads
data = CurrentSession.get(AdMetadataLink)
DataRetrievedFromLink = data.text[prefix_length:]
DataRetrievedFromLinkJson = json.loads(DataRetrievedFromLink)
AllAdMetadata.append(DataRetrievedFromLinkJson)
totalAdCount = DataRetrievedFromLinkJson['payload']['totalCount']
totalAdCountCurrent += len(DataRetrievedFromLinkJson['payload']['results'])
while not DataRetrievedFromLinkJson['payload']['isResultComplete'] and totalAdCountCurrent < 8000:
# Limit ad collection to 8000 since FB kills connection after that.
# WIP to work around the 8K ad limit.
time.sleep(3)
IterationCount += 1
nextPageToken = DataRetrievedFromLinkJson["payload"]["nextPageToken"]
nextPageToken = urllib.parse.quote(nextPageToken)
DataRetrievedFromLink = ""
DataRetrievedFromLinkJson = {}
adMetadataLinkNextPage = \
adMetadataLinkNextPageTemplate % (Seed, nextPageToken, 2000, URLparameters)
for attempts in range(5):
try:
data = CurrentSession.get(adMetadataLinkNextPage)
DataRetrievedFromLink = data.text[prefix_length:]
DataRetrievedFromLinkJson = json.loads(DataRetrievedFromLink)
AllAdMetadata.append(DataRetrievedFromLinkJson)
totalAdCount = DataRetrievedFromLinkJson['payload']['totalCount']
totalAdCountCurrent += len(DataRetrievedFromLinkJson['payload']['results'])
time.sleep(1)
break
except:
if attempts == 4:
totalAdCountCurrent = 8000
break
print("Trying again")
time.sleep(3)
WriteToFiles(AllAdMetadata, "Contents", Seed) #List of dictionaries returned
return AllAdMetadata | e9446fb44d28c984bb3b252183d522d7438d9fab | 3,627,357 |
from typing import Optional
import os
def get_all_slots_metadata(player_id: str, page_size: int, consistent_read: bool, start_key: Optional[str]):
"""Get metadata for all save slots, or an empty list if no metadata is found."""
gamesaves_table = ddb.get_table(table_name=os.environ.get('GAMESAVES_TABLE_NAME'))
query_params = ddb.query_request_param(
key_id='player_id',
key_value=player_id,
response_limit=page_size,
use_consistent_read=consistent_read,
start_key=create_exclusive_start_key(player_id, start_key)
)
response = gamesaves_table.query(**query_params)
return response['Items'], create_next_start_key(response.get('LastEvaluatedKey')) | 2de88ba9fd04fd7f471f66c7339a00a6dbc155a3 | 3,627,358 |
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities):
"""Set up the platform from config_entry."""
# We should scan options for alert configs and then if at least 1 listener is enabled subscribe for the stream
hik_client = hass.data[const.DOMAIN][config_entry.entry_id][const.DATA_API_CLIENT]
entities = []
if should_listen_for_alerts(config_entry.options):
alerts_cfg = await start_isapi_alert_listeners(
hass, hass.data[const.DOMAIN][config_entry.entry_id], config_entry
)
inputs_map = {}
if config_entry.data.get(const.CONF_DEVICE_TYPE) == const.DEVICE_TYPE_NVR:
inputs = await hik_client.get_available_inputs()
for input in inputs:
inputs_map[input.input_id] = (
name_to_id(const.SENSOR_ID_PREFIX + str(input.input_id).rjust(2, "0")),
input.input_name,
)
else:
inputs_map[const.NON_NVR_CHANNEL_NUMBER] = (None, None)
for alert in alerts_cfg:
if alert.channel not in inputs_map:
_LOGGER.warning("Ignoring sensors for channel {} (device {})".format(alert.channel, config_entry.title))
input_prefix_id, input_prefix_name = inputs_map.get(alert.channel, (None, None))
sensor_id = "_".join(filter(None, (name_to_id(config_entry.title), input_prefix_id, alert.type.value)))
sensor_name = " ".join(
filter(
None,
(
config_entry.title,
input_prefix_name,
const.ALERT_TYPES_MAP.get(alert.type.value, alert.type.value),
),
)
)
entities.append(HikvisionAlertBinarySensor(hass, sensor_id, sensor_name, alert))
if len(entities) > 0:
async_add_entities(entities)
return True | 907de6fbb5245fc98b0de2cbac8387bdf147dee6 | 3,627,359 |
def image_pil2cv(img):
"""Convert a PIL image to an opencv image.
Args:
img (PIL image): A PIL image of uint8 between 0 and 255 using RGB channels.
Returns:
np.array: A numpy image loaded with opencv of uint8 between 0 and 255 using BGR channels.
Examples:
>>> img = Image.open('share/Lenna.png')
>>> img_conv = image_pil2cv(img)
>>> img_conv.shape
(512, 512, 3)
>>> img_base = cv2.imread('share/Lenna.png')
>>> np.all(img_base==img_conv)
True
"""
img_new = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
return img_new | a3e44b1f290dd412afd4af62e8ee8fcd138c1aba | 3,627,360 |
def _ww3_ounf_contents(run_date, run_type):
"""
:param run_date: :py:class:`arrow.Arrow`
:param str run_type:
:return: ww3_ounf.inp file contents
:rtype: str
"""
start_date = (
run_date.format("YYYYMMDD")
if run_type == "nowcast"
else run_date.shift(days=+1).format("YYYYMMDD")
)
run_hours = {"nowcast": 24, "forecast": 36, "forecast2": 30}
output_interval = 1800 # seconds
output_count = int(run_hours[run_type] * 60 * 60 / output_interval)
contents = f"""$ WAVEWATCH III NETCDF Grid output post-processing
$
$ First output time (YYYYMMDD HHmmss), output increment (s), number of output times
{start_date} 000000 {output_interval} {output_count}
$
$ Fields
N by name
HS LM WND CUR FP T02 DIR DP WCH WCC TWO FOC USS
$
$ netCDF4 output
$ real numbers
$ swell partitions
$ one file
4
4
0 1 2
T
$
$ File prefix
$ number of characters in date
$ IX, IY range
$
SoG_ww3_fields_
8
1 1000000 1 1000000
"""
return contents | 62618639265e419b5ad1ff9c7364e6d83aeca1c0 | 3,627,361 |
from typing import Tuple
def _search(matrix: np.ndarray, tour: np.ndarray, x: int, y: int, z: int) -> Tuple[int, float]:
""" Поиск лучшей замены, среди всех возможных замен
matrix: Матрица весов
tour: Список городов
x, y, z: Города, для которых пробуем найти изменение тура
return: Тип переворота, выигрыш
"""
s = len(tour)
a, b, c, d, e, f = tour[x % s], tour[(x + 1) % s], tour[y % s], tour[(y + 1) % s], tour[z % s], tour[(z + 1) % s]
base = current_min = matrix[a][b] + matrix[c][d] + matrix[e][f]
gain, exchange = 0, -1
if current_min > (current := matrix[a][e] + matrix[c][d] + matrix[b][f]): # 2-opt (a, e) (d, c) (b, f)
gain, exchange, current_min = base - current, 0, current
if current_min > (current := matrix[a][b] + matrix[c][e] + matrix[d][f]): # 2-opt (a, b) (c, e) (d, f)
gain, exchange, current_min = base - current, 1, current
if current_min > (current := matrix[a][c] + matrix[b][d] + matrix[e][f]): # 2-opt (a, c) (b, d) (e, f)
gain, exchange, current_min = base - current, 2, current
if current_min > (current := matrix[a][d] + matrix[e][c] + matrix[b][f]): # 3-opt (a, d) (e, c) (b, f)
gain, exchange, current_min = base - current, 3, current
if current_min > (current := matrix[a][d] + matrix[e][b] + matrix[c][f]): # 3-opt (a, d) (e, b) (c, f)
gain, exchange, current_min = base - current, 4, current
if current_min > (current := matrix[a][e] + matrix[d][b] + matrix[c][f]): # 3-opt (a, e) (d, b) (c, f)
gain, exchange, current_min = base - current, 5, current
if current_min > (current := matrix[a][c] + matrix[b][e] + matrix[d][f]): # 3-opt (a, c) (b, e) (d, f)
gain, exchange, current_min = base - current, 6, current
return exchange, gain | 0bd0468c2b1f206f27020ee3d9c5029606e1a8e8 | 3,627,362 |
from typing import Iterable
from typing import List
import logging
from typing import Callable
from typing import Optional
def __build_transfer_paths(
requests_with_sources: "Iterable[RequestWithSources]",
multihop_rses: "List[str]",
schemes: "List[str]",
failover_schemes: "List[str]",
logger: "Callable" = logging.log,
session: "Optional[Session]" = None,
):
"""
For each request, find all possible transfer paths from its sources, which respect the
constraints enforced by the request (attributes, type, etc) and the arguments of this function
build a multi-source transfer if possible: The scheme compatibility is important for multi-source transfers.
We iterate again over the single-hop sources and build a new transfer definition while enforcing the scheme compatibility
with the initial source.
Each path is a list of hops. Each hop is a transfer definition.
"""
ctx = _RseLoaderContext(session)
protocol_factory = ProtocolFactory()
unavailable_read_rse_ids = __get_unavailable_rse_ids(operation='read', session=session)
unavailable_write_rse_ids = __get_unavailable_rse_ids(operation='write', session=session)
# Disallow multihop via blocklisted RSEs
multihop_rses = list(set(multihop_rses).difference(unavailable_write_rse_ids).difference(unavailable_read_rse_ids))
candidate_paths_by_request_id, reqs_no_source, reqs_only_tape_source, reqs_scheme_mismatch = {}, set(), set(), set()
for rws in requests_with_sources:
ctx.ensure_fully_loaded(rws.dest_rse)
for source in rws.sources:
ctx.ensure_fully_loaded(source.rse)
transfer_schemes = schemes
if rws.previous_attempt_id and failover_schemes:
transfer_schemes = failover_schemes
logger(logging.DEBUG, 'Found following sources for %s: %s', rws, [str(src.rse) for src in rws.sources])
# Assume request doesn't have any sources. Will be removed later if sources are found.
reqs_no_source.add(rws.request_id)
# Check if destination is blocked
if rws.dest_rse.id in unavailable_write_rse_ids:
logger(logging.WARNING, 'RSE %s is blocked for write. Will skip the submission of new jobs', rws.dest_rse)
continue
# parse source expression
source_replica_expression = rws.attributes.get('source_replica_expression', None)
allowed_source_rses = None
if source_replica_expression:
try:
parsed_rses = parse_expression(source_replica_expression, session=session)
except InvalidRSEExpression as error:
logger(logging.ERROR, "Invalid RSE exception %s: %s", source_replica_expression, str(error))
continue
else:
allowed_source_rses = [x['id'] for x in parsed_rses]
filtered_sources = rws.sources
# Only keep allowed sources
if allowed_source_rses is not None:
filtered_sources = filter(lambda s: s.rse.id in allowed_source_rses, filtered_sources)
filtered_sources = filter(lambda s: s.rse.name is not None, filtered_sources)
# Ignore blocklisted RSEs
filtered_sources = filter(lambda s: s.rse.id not in unavailable_read_rse_ids, filtered_sources)
# For staging requests, the staging_buffer attribute must be correctly set
if rws.request_type == RequestType.STAGEIN:
filtered_sources = filter(lambda s: s.rse.attributes.get('staging_buffer') == rws.dest_rse.name, filtered_sources)
# Ignore tape sources if they are not desired
filtered_sources = list(filtered_sources)
had_tape_sources = len(filtered_sources) > 0
if not rws.attributes.get("allow_tape_source", True):
filtered_sources = filter(lambda s: not s.rse.is_tape_or_staging_required(), filtered_sources)
filtered_sources = list(filtered_sources)
if len(rws.sources) != len(filtered_sources):
logger(logging.DEBUG, 'Sources after filtering for %s: %s', rws, [str(src.rse) for src in filtered_sources])
any_source_had_scheme_mismatch = False
candidate_paths = []
if rws.request_type == RequestType.STAGEIN:
paths = __create_stagein_definitions(rws=rws,
sources=filtered_sources,
limit_dest_schemes=transfer_schemes,
operation_src='read',
operation_dest='write',
protocol_factory=protocol_factory)
else:
paths = __create_transfer_definitions(ctx,
rws=rws,
sources=filtered_sources,
multihop_rses=multihop_rses,
limit_dest_schemes=[],
operation_src='third_party_copy',
operation_dest='third_party_copy',
domain='wan',
protocol_factory=protocol_factory,
session=session)
for source in filtered_sources:
transfer_path = paths.get(source.rse.id)
if transfer_path is None:
logger(logging.WARNING, "Request %s: no path from %s to %s", rws.request_id, source.rse, rws.dest_rse)
continue
if not transfer_path:
any_source_had_scheme_mismatch = True
logger(logging.WARNING, "Request %s: no matching protocol between %s and %s", rws.request_id, source.rse, rws.dest_rse)
continue
if len(transfer_path) > 1:
logger(logging.DEBUG, 'From %s to %s requires multihop: %s', source.rse, rws.dest_rse, [str(hop) for hop in transfer_path])
candidate_paths.append(transfer_path)
if len(filtered_sources) != len(candidate_paths):
logger(logging.DEBUG, 'Sources after path computation for %s: %s', rws, [str(path[0].src.rse) for path in candidate_paths])
candidate_paths = __filter_multihops_with_intermediate_tape(candidate_paths)
candidate_paths = __compress_multihops(candidate_paths, rws.sources)
candidate_paths = list(__sort_paths(candidate_paths))
if not candidate_paths:
# It can happen that some sources are skipped because they are TAPE, and others because
# of scheme mismatch. However, we can only have one state in the database. I picked to
# prioritize setting only_tape_source without any particular reason.
if had_tape_sources and not filtered_sources:
logger(logging.DEBUG, 'Only tape sources found for %s' % rws)
reqs_only_tape_source.add(rws.request_id)
reqs_no_source.remove(rws.request_id)
elif any_source_had_scheme_mismatch:
logger(logging.DEBUG, 'Scheme mismatch detected for %s' % rws)
reqs_scheme_mismatch.add(rws.request_id)
reqs_no_source.remove(rws.request_id)
else:
logger(logging.DEBUG, 'No candidate path found for %s' % rws)
continue
candidate_paths_by_request_id[rws.request_id] = candidate_paths
reqs_no_source.remove(rws.request_id)
return candidate_paths_by_request_id, reqs_no_source, reqs_scheme_mismatch, reqs_only_tape_source | 294c1b97f9465b77429ff8c6d52c57fdb2a72794 | 3,627,363 |
import itertools
def new_min_max(_builtin_func, *args, **kwargs):
"""
To support the argument "default" introduced in python 3.4 for min and max
:param _builtin_func: builtin min or builtin max
:param args:
:param kwargs:
:return: returns the min or max based on the arguments passed
"""
for key, _ in kwargs.items():
if key not in set(['key', 'default']):
raise TypeError('Illegal argument %s', key)
if len(args) == 0:
raise TypeError
if len(args) != 1 and kwargs.get('default', _SENTINEL) is not _SENTINEL:
raise TypeError
if len(args) == 1:
iterator = iter(args[0])
try:
first = next(iterator)
except StopIteration:
if kwargs.get('default', _SENTINEL) is not _SENTINEL:
return kwargs.get('default')
else:
raise ValueError('{}() arg is an empty sequence'.format(_builtin_func.__name__))
else:
iterator = itertools.chain([first], iterator)
if kwargs.get('key') is not None:
return _builtin_func(iterator, key=kwargs.get('key'))
else:
return _builtin_func(iterator)
if len(args) > 1:
if kwargs.get('key') is not None:
return _builtin_func(args, key=kwargs.get('key'))
else:
return _builtin_func(args) | 4aae098143c4c96b53ec6b5fa9b240de936b9ecc | 3,627,364 |
from typing import Optional
import os
def get_database_config(parsed: ConfigParser, manager: ConfigManager) -> dict:
"""
Generate a populated
configuration dictionary.
TODO: This should be shared
with dbManager.py.
:param parsed: ConfigParser
:param manager: ConfigManager
:return: Dictionary
"""
config: dict = {}
config['engine']: str = parsed.get('database', 'engine') if \
parsed.has_option('database', 'engine') else \
'sqlite'
config['host']: str = parsed.get('database', 'host') if \
parsed.has_option('database', 'host') else \
'localhost'
config['username']: str = parsed.get('database', 'username') if \
parsed.has_option('database', 'username') else \
manager.getDbUser()
config['password']: str = parsed.get('database', 'password') if \
parsed.has_option('database', 'password') else \
manager.getDbPassword()
if config['engine'] == 'sqlite':
config['port']: Optional[int] = None
config['path']: str = parsed.get('database', 'path') if \
parsed.has_option('database', 'path') else \
os.path.join(
manager.getEtcDir(),
manager.getDbSchema() + '.sqlite'
)
elif config['engine'] == 'mysql':
config['port']: Optional[int] = manager.get('database', 'port') if \
manager.has_option('database', 'port') else \
3306
config['path']: str = manager.getDbSchema()
else:
raise NotImplementedError('{} is not supported'.format(
config['engine']
))
return config | 217dd99e5f1c0d40f184c82b7ac36d6a642b008c | 3,627,365 |
def eigenvector_centrality(
G, max_iter=100, tol=1.0e-6, normalized=True
):
"""
Compute the eigenvector centrality for a graph G.
Eigenvector centrality computes the centrality for a node based on the
centrality of its neighbors. The eigenvector centrality for node i is the
i-th element of the vector x defined by the eigenvector equation.
Parameters
----------
G : cuGraph.Graph or networkx.Graph
cuGraph graph descriptor with connectivity information. The graph can
contain either directed or undirected edges.
max_iter : int, optional (default=100)
The maximum number of iterations before an answer is returned. This can
be used to limit the execution time and do an early exit before the
solver reaches the convergence tolerance.
tol : float, optional (default=1e-6)
Set the tolerance the approximation, this parameter should be a small
magnitude value.
The lower the tolerance the better the approximation. If this value is
0.0f, cuGraph will use the default value which is 1.0e-6.
Setting too small a tolerance can lead to non-convergence due to
numerical roundoff. Usually values between 1e-2 and 1e-6 are
acceptable.
normalized : bool, optional, default=True
If True normalize the resulting eigenvector centrality values
Returns
-------
df : cudf.DataFrame or Dictionary if using NetworkX
GPU data frame containing two cudf.Series of size V: the vertex
identifiers and the corresponding eigenvector centrality values.
df['vertex'] : cudf.Series
Contains the vertex identifiers
df['eigenvector_centrality'] : cudf.Series
Contains the eigenvector centrality of vertices
Examples
--------
>>> gdf = cudf.read_csv(datasets_path / 'karate.csv', delimiter=' ',
... dtype=['int32', 'int32', 'float32'], header=None)
>>> G = cugraph.Graph()
>>> G.from_cudf_edgelist(gdf, source='0', destination='1')
>>> ec = cugraph.eigenvector_centrality(G)
"""
if (not isinstance(max_iter, int)) or max_iter <= 0:
raise ValueError(f"'max_iter' must be a positive integer"
f", got: {max_iter}")
if (not isinstance(tol, float)) or (tol <= 0.0):
raise ValueError(f"'tol' must be a positive float, got: {tol}")
G, isNx = ensure_cugraph_obj_for_nx(G)
srcs = G.edgelist.edgelist_df['src']
dsts = G.edgelist.edgelist_df['dst']
if 'weights' in G.edgelist.edgelist_df.columns:
weights = G.edgelist.edgelist_df['weights']
else:
# FIXME: If weights column is not imported, a weights column of 1s
# with type hardcoded to float32 is passed into wrapper
weights = cudf.Series(cupy.ones(srcs.size, dtype="float32"))
resource_handle = ResourceHandle()
graph_props = GraphProperties(is_multigraph=G.is_multigraph())
store_transposed = False
renumber = False
do_expensive_check = False
sg = SGGraph(resource_handle, graph_props, srcs, dsts, weights,
store_transposed, renumber, do_expensive_check)
vertices, values = pylib_eigen(resource_handle, sg,
tol, max_iter,
do_expensive_check)
vertices = cudf.Series(vertices)
values = cudf.Series(values)
df = cudf.DataFrame()
df["vertex"] = vertices
df["eigenvector_centrality"] = values
if G.renumbered:
df = G.unrenumber(df, "vertex")
if isNx is True:
dict = df_score_to_dictionary(df, "eigenvector_centrality")
return dict
else:
return df | b6e710f9955b86eb661bc955f6e5287033b7f552 | 3,627,366 |
def article(word, function=INDEFINITE, gender=MALE, role=SUBJECT):
""" Returns the indefinite (ein) or definite (der/die/das/die) article for the given word.
"""
return function == DEFINITE \
and definite_article(word, gender, role) \
or indefinite_article(word, gender, role) | fe21fdc34253c1736bcc17dae25f14d502ce301e | 3,627,367 |
import requests
def get_workbooks(server, auth_token, user_id, site_id):
"""
Queries all existing workbooks on the current site.
'server' specified server address
'auth_token' authentication token that grants user access to API calls
'user_id' ID of user with access to workbooks
'site_id' ID of the site that the user is signed into
Returns tuples for each workbook, containing its id and name.
"""
url = server + "/api/{0}/sites/{1}/users/{2}/workbooks".format(VERSION, site_id, user_id)
server_response = requests.get(url, headers={'x-tableau-auth': auth_token})
_check_status(server_response, 200)
server_response = ET.fromstring(_encode_for_display(server_response.text))
# Find all workbook ids
workbook_tags = server_response.findall('.//t:workbook', namespaces=xmlns)
# Tuples to store each workbook information:(workbook_id, workbook_name)
workbooks = [(workbook.get('id'), workbook.get('name')) for workbook in workbook_tags]
if len(workbooks) == 0:
error = "No workbooks found on this site"
raise LookupError(error)
return workbooks | 4a91fe94e5f71e6599689e3430f75239e8efd4aa | 3,627,368 |
def dirty_image_generate(dirty_image_uv, mask = None, baseline_threshold = 0, normalization = None,
resize = None, width_smooth = None, degpix = None, not_real = False,
image_filter_fn = 'filter_uv_uniform', pad_uv_image = None, filter = None,
vis_count = None, weights = None, beam_ptr = None, obs = None, psf = None, params = None,
fi_use = None, bi_use = None, mask_mirror_indices = False):
"""
TODO: Docstring
[summary]
Parameters
----------
dirty_image_uv : [type]
[description]
mask : [type], optional
[description], by default None
baseline_threshold : int, optional
[description], by default 0
normalization : [type], optional
[description], by default None
resize : [type], optional
[description], by default None
width_smooth : [type], optional
[description], by default None
degpix : [type], optional
[description], by default None
real : bool, optional
[description], by default False
image_filter_fn : str, optional
[description], by default 'filter_uv_uniform'
pad_uv_image : [type], optional
[description], by default None
filter : [type], optional
[description], by default None
vis_count : [type], optional
[description], by default None
weights : [type], optional
[description], by default None
beam_ptr : [type], optional
[description], by default None
obs : [type], optional
[description], by default None
psf : [type], optional
[description], by default None
params : [type], optional
[description], by default None
fi_use : [type], optional
[description], by default None
bi_use : [type], optional
[description], by default None
Returns
-------
[type]
[description]
"""
# dimension is columns, elements is rows
elements, dimension = dirty_image_uv.shape
di_uv_use = dirty_image_uv
# If the baseline threshold has been set
if baseline_threshold is not None:
# If width smooth hasn't been set, set it
if width_smooth is None:
width_smooth = np.floor(np.sqrt(dimension * elements) / 100)
rarray = np.sqrt((meshgrid(dimension, 1) - dimension / 2) ** 2 + (meshgrid(elements, 2) - elements / 2) ** 2)
# Get all the values that meet the threshold
if baseline_threshold >= 0:
cut_i = np.where(rarray.flatten() < baseline_threshold)
else:
cut_i = np.where(rarray.flatten() > np.abs(baseline_threshold))
# Create the mask array of ones
mask_bt = np.ones((elements, dimension))
# If there are values from cut, then use all those here and replace with 0
if np.size(cut_i) > 0:
mask_bt_flatiter = mask_bt.flat
mask_bt_flatiter[cut_i] = 0
if width_smooth is not None:
# Get the kernel width
kernel_width = np.max([width_smooth,1])
# In IDL if the kernel width is even one is added to make it odd
if kernel_width % 2 == 0:
kernel_width += 1
# Use a box width averaging filter over the mask, use valid so we can insert it in later
box_averages = convolve(mask_bt, Box2DKernel(kernel_width), mode = 'valid')
# Since IDL SMOOTH edges by default are the edges of the array used, ignore edges (its the reason why we used a valid convolve)
start = int(kernel_width // 2)
end = int(mask_bt.shape[1] - (kernel_width // 2))
mask_bt[start : end, start : end] = box_averages
# Apply boxed mask to the dirty image
di_uv_use *= mask_bt
# If a mask was supplied use that too
if mask is not None:
di_uv_use *= mask
# If a filter was supplied as a numpy array (we can adjust this to support different formats)
if filter is not None:
if isinstance(filter, np.ndarray):
# If the filter is already the right size, use it
if np.size(filter) == np.size(di_uv_use):
di_uv_use *= filter
# Otherwise use a filter function
else:
di_uv_use, _ = eval("filters.{}(di_uv_use, vis_count = vis_count, obs = obs, psf = psf, params = params, weights = weights, fi_use = fi_use, bi_use = bi_use, mask_mirror_indices = mask_mirror_indices)".format(image_filter_fn))
# Resize the dirty image by the factor resize
if resize is not None:
dimension *= resize
elements *= resize
di_uv_real = di_uv_use.real
di_uv_img = di_uv_use.imag
# Use rebin to resize, apply to real and complex separately
di_uv_real = rebin(di_uv_real, (elements, dimension))
di_uv_img = rebin(di_uv_img, (elements, dimension))
# Combine real and complex back together
di_uv_use = di_uv_real + di_uv_img * 1j
# Apply padding if it was supplied
if pad_uv_image is not None:
# dimension_new = int(np.max([np.max([dimension, elements]) * pad_uv_image, np.max([dimension, elements])]))
# di_uv1 = np.zeros((dimension_new, dimension_new), dtype = "complex")
# di_uv1[dimension_new // 2 - elements // 2 : dimension_new // 2 + elements // 2,
# dimension_new // 2 - dimension // 2 : dimension_new // 2 + dimension // 2] = di_uv_use
di_uv1 = np.pad(di_uv_use, np.max([dimension, elements]) // 2)
di_uv_use = di_uv1 * (pad_uv_image ** 2)
# FFT normalization
if degpix is not None:
di_uv_use /= np.radians(degpix) ** 2
# Multivariate Fast Fourier Transform
dirty_image = np.fft.fftshift(np.fft.fftn(np.fft.fftshift(di_uv_use), norm = "forward"))
if not not_real:
dirty_image = dirty_image.real
# filter_uv_optimal produces images that are weighted by one factor of the beam
# Weight by an additional factor of the beam to align with FHD's convention
if image_filter_fn == 'filter_uv_optimal' and beam_ptr is not None:
dirty_image *= beam_ptr
# If we are returning complex, make sure its complex
if not_real:
dirty_image = dirty_image.astype("complex")
else:
dirty_image = dirty_image.real
# Normalize by the matrix given, if it was given
if normalization is not None:
dirty_image *= normalization
return dirty_image, normalization
#Return
return dirty_image | 679afb90fd45c8c30805ad84f0ea08103b18d398 | 3,627,369 |
from datetime import datetime
from typing import MutableMapping
from typing import Any
def get_significant_states_with_session(
hass: HomeAssistant,
session: Session,
start_time: datetime,
end_time: datetime | None = None,
entity_ids: list[str] | None = None,
filters: Filters | None = None,
include_start_time_state: bool = True,
significant_changes_only: bool = True,
minimal_response: bool = False,
no_attributes: bool = False,
compressed_state_format: bool = False,
) -> MutableMapping[str, list[State | dict[str, Any]]]:
"""
Return states changes during UTC period start_time - end_time.
entity_ids is an optional iterable of entities to include in the results.
filters is an optional SQLAlchemy filter which will be applied to the database
queries unless entity_ids is given, in which case its ignored.
Significant states are all states where there is a state change,
as well as all states from certain domains (for instance
thermostat so that we get current temperature in our graphs).
"""
stmt = _significant_states_stmt(
_schema_version(hass),
start_time,
end_time,
entity_ids,
filters,
significant_changes_only,
no_attributes,
)
states = execute_stmt_lambda_element(
session, stmt, None if entity_ids else start_time, end_time
)
return _sorted_states_to_dict(
hass,
session,
states,
start_time,
entity_ids,
filters,
include_start_time_state,
minimal_response,
no_attributes,
compressed_state_format,
) | 2e2767d07e3b9a2bfa75acb4664a2a93c5d10472 | 3,627,370 |
def vc_to_oct(solution):
"""Convert a VertexCover solution into an OCT solution.
This is done by undoing the graph doubling done to reduce
OCT to VC. The certificate is recovered as the vertes
whose counterparts are both in the VC certificate.
Parameters
----------
solution : Solution
Solution object representing a VertexCover solution.
Returns
-------
Solution
Solution object representing the corresponding OCT solution.
"""
# Raise exception if there are an odd number of vertices
if solution.n % 2 != 0:
raise Exception('Vertices in VC->OCT solution must be even')
# Copy graph and remove doubled nodes
graph = solution.g.copy()
graph.remove_nodes_from(map(str, range(int(solution.n / 2), solution.n)))
# Construct new certificate
certificate = [
n
for n in graph.nodes()
if n in solution.certificate
and str(int(n) + len(graph.nodes())) in solution.certificate
]
# Return new solution
return Solution(
G=graph,
certificate=certificate,
threads=solution.threads,
mipgap=solution.mipgap,
time=solution.time,
cuts=solution.cuts
) | e08425dbc31f89dc4e0d79c257f83c884efb0714 | 3,627,371 |
def class_of ( object ):
""" Returns a string containing the class name of an object with the
correct indefinite article ('a' or 'an') preceding it (e.g., 'an Image',
'a PlotValue').
"""
if isinstance( object, py3compat.string_types ):
return add_article( object )
return add_article( object.__class__.__name__ ) | 4cfad61c30afd3ab726f52bcf5906a59dd711866 | 3,627,372 |
import jinja2
import shutil
import subprocess
import sys
import traceback
import platform
def _create_issue_body(command: str) -> str:
"""Generate a Github issue body based on given exception and command.
Args:
command: The command causing the exception to get thrown,
e.g. 'django-cloud-deploy new'.
Returns:
Github issue body in string.
"""
template_env = jinja2.Environment()
gcloud_path = shutil.which('gcloud')
if gcloud_path:
try:
gcloud_version = subprocess.check_output(
[gcloud_path, 'info', '--format=value(basic.version)'],
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True).rstrip()
except subprocess.CalledProcessError as e:
gcloud_version = 'Error: {!r}'.format(e.stderr)
else:
gcloud_version = 'Not installed or not on PATH'
docker_path = shutil.which('docker')
if docker_path:
try:
docker_version = subprocess.check_output(
['docker', '--version'],
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True).rstrip()
except subprocess.CalledProcessError:
docker_version = 'Error: {!r}'.format(e.stderr)
else:
docker_version = 'Not installed or not on PATH'
cloud_sql_proxy_path = shutil.which('cloud_sql_proxy')
if cloud_sql_proxy_path:
try:
cloud_sql_proxy_version = subprocess.check_output(
[cloud_sql_proxy_path, '--version'],
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True).rstrip()
except subprocess.CalledProcessError:
cloud_sql_proxy_version = 'Error: {!r}'.format(e.stderr)
else:
cloud_sql_proxy_version = 'Not installed or not on PATH'
template = template_env.from_string(_ISSUE_TEMPLATE)
options = {
'django_cloud_deploy_version': __version__.__version__,
'command': command,
'gcloud_version': gcloud_version,
'docker_version': docker_version,
'cloud_sql_proxy_version': cloud_sql_proxy_version,
'python_version': sys.version.replace('\n', ' '),
'traceback': traceback.format_exc(),
'platform': platform.platform(),
}
content = template.render(options)
return content | ad6fff41d8b7660e5d054a137956963b2407e198 | 3,627,373 |
def saturated_vapour_pressure_inst(t_air_i):
"""Like :func:`saturated_vapour_pressure` but as an instantaneous value
Parameters
----------
t_air_i : float
instantaneous air temperature
:math:`T_{a,i}`
[C]
Returns
-------
svp_i : float
instantaneous saturated vapour pressure
:math:`e_{s,i}`
[mbar]
"""
return saturated_vapour_pressure(t_air_i) | f5b45c6c354dfedf6e16d12bebd9b9f499e413ee | 3,627,374 |
from typing import Tuple
from typing import Optional
from datetime import datetime
def format_timestamp_range(
timestamp_range: Tuple[int, int],
timestamp_unit: Optional[str],
timestamp_format: Optional[str]
) -> str:
"""
Format a time range with unit.
:param timestamp_range: range of timestamps
:param timestamp_unit: timestamp unit, can be: formatted_timestamp or auto
:param timestamp_format: the timestamp format string
:return string:
"""
if timestamp_unit is None:
timestamp_unit = 'index'
elif timestamp_unit == 'auto':
guessed_units = tuple(guess_timestamp_posix_unit(ts) for ts in timestamp_range)
timestamp_unit = guessed_units[0] if guessed_units[0] == guessed_units[1] else 'index'
if timestamp_unit in FACTOR_TO_SECONDS:
try:
factor = FACTOR_TO_SECONDS[timestamp_unit]
dts = [datetime.utcfromtimestamp(ts * factor)
for ts in timestamp_range]
# print the first timestamp completely
if timestamp_format:
return ' : '.join(dt.strftime(timestamp_format) for dt in dts)
# if format not given, use ISO for 1st, and just print what change for second.
timestamp_format_d = {
'date': '%Y/%m/%d',
'time': '%H:%M:%S.%f'
}
timestamp_parts = [
{
pname: dt.strftime(pformat)
for pname, pformat in timestamp_format_d.items()
}
for dt in dts
]
timestamp_str = timestamp_parts[0]['date']
timestamp_str += ' '
timestamp_str += timestamp_parts[0]['time']
timestamp_str += ' : '
if timestamp_parts[0]['date'] != timestamp_parts[1]['date']:
timestamp_str = timestamp_parts[0]['date']
timestamp_str += ' '
timestamp_str += timestamp_parts[1]['time']
return timestamp_str
except ValueError as _: # noqa: F841
return ' : '.join(str(ts) for ts in timestamp_range) + f' ** FAIL to parse as posix {timestamp_unit}'
else: # not posix
return ' : '.join(str(ts) for ts in timestamp_range) | f0cf3cf0465ec8ebaafa73fd9a60e7b589df3c19 | 3,627,375 |
def make_eeg_average_ref_proj(info, activate=True, verbose=None):
"""Create an EEG average reference SSP projection vector
Parameters
----------
info : dict
Measurement info.
activate : bool
If True projections are activated.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
eeg_proj: instance of Projection
The SSP/PCA projector.
"""
if info.get('custom_ref_applied', False):
raise RuntimeError('Cannot add an average EEG reference projection '
'since a custom reference has been applied to the '
'data earlier.')
logger.info("Adding average EEG reference projection.")
eeg_sel = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude='bads')
ch_names = info['ch_names']
eeg_names = [ch_names[k] for k in eeg_sel]
n_eeg = len(eeg_sel)
if n_eeg == 0:
raise ValueError('Cannot create EEG average reference projector '
'(no EEG data found)')
vec = np.ones((1, n_eeg)) / n_eeg
eeg_proj_data = dict(col_names=eeg_names, row_names=None,
data=vec, nrow=1, ncol=n_eeg)
eeg_proj = Projection(active=activate, data=eeg_proj_data,
desc='Average EEG reference',
kind=FIFF.FIFFV_MNE_PROJ_ITEM_EEG_AVREF)
return eeg_proj | 125a9320d1e018de2bc709f1e777cbb34cf8f127 | 3,627,376 |
def generate_degree2_invariants_from_different(coeffs1, coeffs2):
"""
Generate degree 2 invariants from density projection coefficients.
Parameters
----------
coeffs1 : array[num_envs, num_species, nmax1, (lmax+1)**2]
Density projection coefficients. This could include the spherical
expansion coefficients in librascal terminology, or the coefficients
obtained from pyLODE.
coeffs2 : array[num_envs, num_species, nmax2, (lmax+1)**2]
A second set of coefficients, as for coeffs1. Note that nmax1 and nmax2
do not need to agree, but all other dimensions need to be the same.
Returns
-------
Array of degree 2 invariants
"""
# Make sure the shapes match
# Since it is allowed to use a different nmax for both inputs,
# there are no checks for nmax.
num_env, num_species, nmax1, lmmax = coeffs1.shape
assert num_env == coeffs2.shape[0]
assert num_species == coeffs2.shape[1]
assert lmmax == coeffs2.shape[3]
lmax = int(np.round(np.sqrt(lmmax))) - 1
nmax2 = coeffs2.shape[2]
# Prepare array for invariants
# If both inputs are different, use all possible species and radial
# combinations.
num_radial_inv = nmax1 * nmax2
num_species_inv = num_species**2
deg2_invariants = np.zeros((num_env, num_species_inv,
num_radial_inv, lmax+1))
# Start generating invariants
for ia1 in range(num_species):
for ia2 in range(num_species):
species_idx = ia1 * num_species + ia2
for in1 in range(nmax1):
for in2 in range(nmax2):
for l in range(lmax+1):
radial_idx = in1 * nmax2 + in2
vec1 = coeffs1[:,ia1,in1,l**2:(l+1)**2]
vec2 = coeffs2[:,ia2,in2,l**2:(l+1)**2]
inv = np.sum(vec1 * vec2, axis=1) / np.sqrt(2*l+1)
deg2_invariants[:, species_idx, radial_idx, l] = inv
return deg2_invariants | abdfa4806d32f4c5b34e6c8cccbc841b9cadbe19 | 3,627,377 |
def ts_css(text):
"""applies nice css to the type string"""
return '<span class="ts">%s</span>' % text | a505f4ffc8359bc886f0011295fb5309529be5bf | 3,627,378 |
import typing
def noop(val: typing.Any, *_args, **_kwargs) -> typing.Any:
"""A function does nothing.
>>> noop(1)
1
"""
return val | 99841c0b291a654d83741500e83441482f59d45a | 3,627,379 |
def method_detect(method: str):
"""Detects which method to use and returns its object"""
if method in POSTPROCESS_METHODS:
if method == "rtb-bnb":
return RemovingTooTransparentBordersHardAndBlurringHardBorders()
elif method == "rtb-bnb2":
return RemovingTooTransparentBordersHardAndBlurringHardBordersTwo()
elif method == "fba":
return FBAMatting()
else:
return None
else:
return False | 4d3a065b25ac25a15e24681a723b5aa9e2354bc9 | 3,627,380 |
def normalized_difference():
"""
Returns class instance of `NormalizedDifference`.
For more details, please have a look at the implementations inside `NormalizedDifference`.
Returns
-------
NormalizedDifference :
Class instance implementing all 'normalized_difference' processes.
"""
return NormalizedDifference() | e3325e48c1ea7d8b775d7d9613425c445cad4f57 | 3,627,381 |
def detect_event_no_plot(VLMuscle, VRMuscle, Time, Threshold):
"""
This function calculates the start, end and duration of swimming episode, as defined by a threshold.
Does not plot the result
:param VLMuscle: list or 1-D numpy array
:param VRMuscle: list or 1-D numpy array
:param Time: list or 1-D numpy array
:param threshold: float
:return: three 1-D numpy array with starting times, ending times and durations of the detected events.
"""
X = np.sum(VLMuscle, axis=0) + np.sum(VRMuscle, axis=0)
X = smooth(X, 500) #convolve with a step 50 ms wide
Xt = Time[np.where(X > Threshold)]
if not any(Xt):
end =[]
start=[]
duration = []
else:
end = Xt[[Xt[i+1] - Xt[i] > 0.2 for i in range(len(Xt)-1)]+[True]]
start = Xt[[True]+[Xt[i+1] - Xt[i] > 0.2 for i in range(len(Xt)-1)]]
duration = end - start
return start, end, duration | 3a7eae3a3b7b8e16e6c4365f7390227084676dc9 | 3,627,382 |
import torch
def build_dgl_graph_v15(nodes, edges, sent_nodes):
"""
Build DGL homogeneous graph based on New Graph structure.
1112: checked graph through visualisation
1116: qo_node feature: [-10, opt id]
1201: hotpotQA, store node separately and combine
"""
# split entity nodes and q_opt nodes
entity_nodes = nodes[:-1]
qo_nodes = nodes[-1:]
last_e_node_id = len(entity_nodes) - 1
# From top to end
global_node_idx = 1
global_node_feat = [torch.tensor([-1, -1])]
global_src = []
global_dst = []
'''
# root node as sent node 0
sent_node_id = 0
sent_node_feat = []
r_s_edges = [] # feat: -2
s_s_edges = [] # feat: -3
'''
# Step 1: root node -> ed_r_s -> sent node
# sent node -> ed_s_s -> sent node
# Add unidirectional edge, then dgl.add_reverse_edges()
global_node_idx += len(sent_nodes)
tmp_ed_feat = []
for s_idx, s_node in enumerate(sent_nodes):
global_src.extend([0, global_node_idx])
global_dst.extend([global_node_idx, s_idx + 1])
# update node
global_node_idx += 1
tmp_ed_feat.append(torch.tensor([-2, -2])) # root-sent node
global_node_feat.append(torch.tensor(s_node.token_pos)) # sent node feat
# add inter_sent node
if s_idx < len(sent_nodes) - 1:
global_src.extend([s_idx + 1, global_node_idx])
global_dst.extend([global_node_idx, s_idx + 2])
# update node
global_node_idx += 1
tmp_ed_feat.append(torch.tensor([-3, -3])) # inter-sent node
global_node_feat.extend(tmp_ed_feat)
assert len(global_node_feat) == global_node_idx
# PASSED test 1
# Step 2: sent node -> ed_s_e -> entity node
entity_node_base = global_node_idx
global_node_idx += len(entity_nodes)
tmp_ed_feat = []
for e_idx, e_node in enumerate(entity_nodes):
sent_node_id = e_node.ancestor + 1
global_src.extend([sent_node_id, global_node_idx])
global_dst.extend([global_node_idx, entity_node_base + e_idx])
# update node
global_node_idx += 1
tmp_ed_feat.append(torch.tensor([-4, -4])) # sent-entity node
global_node_feat.append(torch.tensor(e_node.token_pos)) # sent node feat
global_node_feat.extend(tmp_ed_feat)
assert len(global_node_feat) == global_node_idx
# Step 2.5: qo node -> ed_r_q -> root node
# Stores qo node global node idx into qo_idx_lst
qo_idx_lst = []
for qo_id, q_node in enumerate(qo_nodes):
global_src.extend([0, global_node_idx])
global_dst.extend([global_node_idx, global_node_idx + 1])
# update node
global_node_feat.append(torch.tensor([-8, -8])) # root-qo node
global_node_idx += 1
qo_idx_lst.append(global_node_idx)
# global_node_feat.append(torch.tensor(q_node.token_pos)) # q_opt node feat
# qo node feat change to [-10, qo_node_idx], for the convenience of encoding
global_node_feat.append(torch.tensor([-10, qo_id]))
global_node_idx += 1
g = dgl.graph((torch.tensor(global_src), torch.tensor(global_dst)))
g = dgl.add_reverse_edges(g)
# PASSED test 2
# Step 3: verb node -> ed_v_a -> arg node
# arg node -> ed_a_v -> verb node
# arg node <-> ed_a_a <-> arg node
add_src = []
add_dst = []
for edge in edges:
if edge[1] == 'Coref':
if edge[2] > last_e_node_id:
# sent-q edge: ed_s_q
assert edge[0] <= last_e_node_id
tmp_sent_node_id = entity_nodes[edge[0]].ancestor + 1
tmp_qo_node_id = qo_idx_lst[edge[2] - last_e_node_id - 1]
add_src.extend([tmp_sent_node_id, global_node_idx, tmp_qo_node_id, global_node_idx])
add_dst.extend([global_node_idx, tmp_qo_node_id, global_node_idx, tmp_sent_node_id])
# update node
global_node_idx += 1
global_node_feat.append(torch.tensor([-9, -9])) # sent-q node
else:
# normal coref edge: ed_a_a
add_src.extend([entity_node_base + edge[0], global_node_idx, entity_node_base + edge[2], global_node_idx])
add_dst.extend([global_node_idx, entity_node_base + edge[2], global_node_idx, entity_node_base + edge[0]])
# update node
global_node_idx += 1
global_node_feat.append(torch.tensor([-7, -7])) # arg-arg node
else:
# srl edge: ed_v_a
add_src.extend([entity_node_base + edge[0], global_node_idx])
add_dst.extend([global_node_idx, entity_node_base + edge[2]])
# update node
global_node_idx += 1
global_node_feat.append(torch.tensor([-5, -5])) # verb-arg node
# anti-srl edge: ed_a_v
add_src.extend([entity_node_base + edge[2], global_node_idx])
add_dst.extend([global_node_idx, entity_node_base + edge[0]])
# update node
global_node_idx += 1
global_node_feat.append(torch.tensor([-6, -6])) # arg-verb node
g = dgl.add_edges(g, torch.tensor(add_src), torch.tensor(add_dst))
assert len(global_node_feat) == g.num_nodes()
global_node_feat = torch.stack(global_node_feat)
g.ndata['pos'] = global_node_feat
return g, len(sent_nodes) | 6e8e5da0a3ff6d37006606a818a708998fa206e2 | 3,627,383 |
import os
def get_large_rand_ary_tfrecord(n_samples=100000, n_features=2000, dtype=np.float32):
"""
Args:
n_samples (int)
n_features (int)
Returns:
str: tfrecord data folder for training
str: tfrecord data path for prediction (finally generating embedding)
dict: {}
"""
X = get_rand_ary(n_samples, n_features, dtype)
train_data_folder = f'train_ary_{n_samples}_{n_features}_{dtype.__name__}_shards'
pred_data_path = f'pred_ary_{n_samples}_{n_features}_{dtype.__name__}.tfrecord'
if not (os.path.exists(train_data_folder) and os.path.exists(pred_data_path)):
write_ary_shards_to_tfrecord(X, tf_folder=train_data_folder, shard_num=10, shuffle=True)
write_ary_to_tfrecord(X, tf_path=pred_data_path, shuffle=False)
info_dict = {'n_samples': n_samples, 'n_features': n_features, 'issparse': False}
return (train_data_folder, pred_data_path), info_dict | 50cbb320b92ef32ef7869f8d10fe25286a06e5e3 | 3,627,384 |
def entropy_bubble(signal, delay=1, dimension=3, alpha=2, **kwargs):
"""**Bubble Entropy (BubblEn)**
Introduced by Manis et al. (2017) with the goal of being independent of parameters such as
*Tolerance* and *Dimension*. Bubble Entropy is based on :func:`permutation entropy <entropy_permutation>`,
but uses the bubble sort algorithm for the ordering procedure instead of the number of swaps
performed for each vector.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay : int
Time delay (often denoted *Tau* :math:`\\tau`, sometimes referred to as *lag*) in samples.
See :func:`complexity_delay` to estimate the optimal value for this parameter.
dimension : int
Embedding Dimension (*m*, sometimes referred to as *d* or *order*). See
:func:`complexity_dimension` to estimate the optimal value for this parameter.
tolerance : float
Tolerance (often denoted as *r*), distance to consider two data points as similar. If
``"sd"`` (default), will be set to :math:`0.2 * SD_{signal}`. See
:func:`complexity_tolerance` to estimate the optimal value for this parameter.
alpha : float
The *alpha* :math:`\\alpha` parameter (default to 1) for :func:`Rényi entropy <entropy_renyi>`).
**kwargs : optional
Other arguments.
See Also
--------
complexity_ordinalpatterns, entropy_permutation, entropy_renyi
Returns
----------
BubbEn : float
The Bubble Entropy.
info : dict
A dictionary containing additional information regarding the parameters used
to compute sample entropy.
Examples
----------
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=2, frequency=5)
BubbEn, info = nk.entropy_bubble(signal)
BubbEn
References
----------
* Manis, G., Aktaruzzaman, M. D., & Sassi, R. (2017). Bubble entropy: An entropy almost free of
parameters. IEEE Transactions on Biomedical Engineering, 64(11), 2711-2718.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Store parameters
info = {"Dimension": dimension, "Delay": delay}
H = [
_entropy_permutation(
signal,
dimension=d,
delay=delay,
algorithm=entropy_renyi,
sorting="bubblesort",
**kwargs,
)
for d in [dimension, dimension + 1]
]
BubbEn = np.diff(H) / np.log((dimension + 1) / (dimension - 1))
return BubbEn[0], info | 0574f271acc7922a85ef9f6d411386bd5e8d2ac4 | 3,627,385 |
from typing import ClassVar
from typing import Sequence
from typing import Optional
def open_set(dataset_class: ClassVar, public_classes: Sequence[str],
private_classes: Optional[Sequence[str]] = ()) -> ClassVar:
"""
Convert a dataset into its open-set version.
In other words, those samples which doesn't belong to `private_classes` will be marked as "unknown".
Be aware that `open_set` will change the label number of each category.
Args:
dataset_class (class): Dataset class. Only subclass of ``ImageList`` can be open-set.
public_classes (sequence[str]): A sequence of which categories need to be kept in the open-set dataset.\
Each element of `public_classes` must belong to the `classes` list of `dataset_class`.
private_classes (sequence[str], optional): A sequence of which categories need to be marked as "unknown" \
in the open-set dataset. Each element of `private_classes` must belong to the `classes` list of \
`dataset_class`. Default: ().
Examples::
>>> public_classes = ['back_pack', 'bike', 'calculator', 'headphones', 'keyboard']
>>> private_classes = ['laptop_computer', 'monitor', 'mouse', 'mug', 'projector']
>>> # create a open-set dataset class which has classes
>>> # 'back_pack', 'bike', 'calculator', 'headphones', 'keyboard' and 'unknown'.
>>> OpenSetOffice31 = open_set(Office31, public_classes, private_classes)
>>> # create an instance of the open-set dataset
>>> dataset = OpenSetDataset(root="data/office31", task="A")
"""
if not (issubclass(dataset_class, ImageList)):
raise Exception("Only subclass of ImageList can be openset")
class OpenSetDataset(dataset_class):
def __init__(self, **kwargs):
super(OpenSetDataset, self).__init__(**kwargs)
samples = []
all_classes = list(deepcopy(public_classes)) + ["unknown"]
for (path, label) in self.samples:
class_name = self.classes[label]
if class_name in public_classes:
samples.append((path, all_classes.index(class_name)))
elif class_name in private_classes:
samples.append((path, all_classes.index("unknown")))
self.samples = samples
self.classes = all_classes
self.class_to_idx = {cls: idx
for idx, cls in enumerate(self.classes)}
return OpenSetDataset | 6bb48c1f7b40ae88e30adeb9dbf5dff922ebe115 | 3,627,386 |
import string
import random
def generate_random_string(length,
using_digits=False,
using_ascii_letters=False,
using_punctuation=False):
"""
Example:
opting out for 50 symbol-long, [a-z][A-Z][0-9] string
would yield log_2((26+26+50)^50) ~= 334 bit strength.
"""
if not using_sysrandom:
return None
symbols = []
if using_digits:
symbols += string.digits
if using_ascii_letters:
symbols += string.ascii_letters
if using_punctuation:
symbols += string.punctuation \
.replace('"', '') \
.replace("'", '') \
.replace('\\', '')
return ''.join([random.choice(symbols) for _ in range(length)]) | d1606c450911183bd72353f6e4793dbc8d997271 | 3,627,387 |
def get_blocking_times_of_all_states_using_direct_approach(
lambda_1, mu, num_of_servers, threshold, system_capacity, buffer_capacity
):
"""Solve M*X = b using numpy.linalg.solve() where:
M = The array containing the coefficients of all b(u,v) equations
b = Vector of constants of equations
X = All b(u,v) variables of the equations
Parameters
----------
lambda_1 : float
mu : float
num_of_servers : int
threshold : int
system_capacity : int
buffer_capacity : int
Returns
-------
numpy.array
An MxN array that contains the blocking time for each state
"""
M, b = get_blocking_time_linear_system(
lambda_1, mu, num_of_servers, threshold, system_capacity, buffer_capacity
)
state_blocking_times = np.linalg.solve(M, b)
state_blocking_times = convert_solution_to_correct_array_format(
state_blocking_times, threshold, system_capacity, buffer_capacity
)
return state_blocking_times | 7fe8954e1f2f6e7cf3401557990d29e035292953 | 3,627,388 |
def create_read_only_text(title: str, example: str, value: str, layout: QLayout) -> QLineEdit:
"""
Creates and returns a read-only one-line text widget (QLineEdit)
with the given title, example contents and value in the given layout.
"""
widget = create_text(title, example, value, layout)
widget.setEnabled(False)
return widget | 046ec074cc46ba3717f03be8febb7e0dd657c925 | 3,627,389 |
def find_lane_pixels_around_poly(binary_warped, left_fit, right_fit, margin = 100):
"""
Returns the pixel coordinates contained within a margin from left and right polynomial fits.
Left and right fits shoud be from the previous frame.
PARAMETER
* margin: width around the polynomial fit
"""
# Grab activated pixels
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Search within the +/- margin of the polynomial from previous frame
left_lane_inds = ((nonzerox >= (np.polyval(left_fit,nonzeroy)-margin)) & (nonzerox <= (np.polyval(left_fit,nonzeroy)+margin))).nonzero()[0]
right_lane_inds = ((nonzerox >= (np.polyval(right_fit,nonzeroy)-margin)) & (nonzerox <= (np.polyval(right_fit,nonzeroy)+margin))).nonzero()[0]
# Extract left and right line pixel positions
xleft_lane = nonzerox[left_lane_inds]
yleft_lane = nonzeroy[left_lane_inds]
xright_lane = nonzerox[right_lane_inds]
yright_lane = nonzeroy[right_lane_inds]
return (xleft_lane,yleft_lane), (xright_lane,yright_lane) | 16e852fbf502424a44d80ff1c9177455a1a99151 | 3,627,390 |
def log_softmax_v2(logits, axis=None, name=None):
"""Computes log softmax activations.
For each batch `i` and class `j` we have
logsoftmax = logits - log(reduce_sum(exp(logits), axis))
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
if axis is None:
axis = -1
return _wrap_2d_function(logits, gen_nn_ops.log_softmax, axis, name) | 549fb730f89790230b8422607dd39ba23471695c | 3,627,391 |
def graphs_tuple_to_broadcasted_sharded_graphs_tuple(
graphs_tuple: jraph.GraphsTuple,
num_shards: int) -> ShardedEdgesGraphsTuple:
"""Converts a `GraphsTuple` to a `ShardedEdgesGraphsTuple` to use with `pmap`.
For a given number of shards this will compute device-local edge and graph
attributes, and add a batch axis of size num_shards. You can then use
`ShardedEdgesGraphNetwork` with `jax.pmap`.
Args:
graphs_tuple: The `GraphsTuple` to be converted to a sharded `GraphsTuple`.
num_shards: The number of devices to shard over.
Returns:
A ShardedEdgesGraphsTuple over the number of shards.
"""
# Note: this is not jittable, so to prevent using a device by accident,
# this is all happening in numpy.
nodes, edges, receivers, senders, globals_, n_node, n_edge = graphs_tuple
if np.sum(n_edge) % num_shards != 0:
raise ValueError(('The number of edges in a `graph.GraphsTuple` must be '
'divisible by the number of devices per replica.'))
if np.sum(np.array(n_edge)) == 0:
raise ValueError('The input `Graphstuple` must have edges.')
# Broadcast replicated features to have a `num_shards` leading axis.
# pylint: disable=g-long-lambda
broadcast = lambda x: np.broadcast_to(x[None, :], (num_shards,) + x.shape)
# pylint: enable=g-long-lambda
# `edges` will be straightforwardly sharded, with 1/num_shards of
# the edges on each device.
def shard_edges(edge_features):
return np.reshape(edge_features, (num_shards, -1) + edge_features.shape[1:])
edges = jax.tree_map(shard_edges, edges)
# Our sharded strategy is by edges - which means we need a device local
# n_edge, senders and receivers to do global aggregations.
# Senders and receivers are easy - 1/num_shards per device.
device_senders = shard_edges(senders)
device_receivers = shard_edges(receivers)
# n_edge is a bit more difficult. Let's say we have a graphs tuple with
# n_edge [2, 8], and we want to distribute this on two devices. Then
# we will have sharded the edges to [5, 5], so the n_edge per device will be
# [2,3], and [5]. Since we need to have each of the n_edge the same shape,
# we will need to pad this to [5,0]. This is a bit dangerous, as the zero
# here has a different meaning to a graph with zero edges, but we need the
# zero for the global broadcasting to be correct for aggregation. Since
# this will only be used in the first instance for global broadcasting on
# device I think this is ok, but ideally we'd have a more elegant solution.
# TODO(jonathangodwin): think of a more elegant solution.
edges_per_device = np.sum(n_edge) // num_shards
edges_in_current_split = 0
completed_splits = []
current_split = {'n_edge': [], 'device_graph_idx': []}
for device_graph_idx, x in enumerate(n_edge):
new_edges_in_current_split = edges_in_current_split + x
if new_edges_in_current_split > edges_per_device:
# A single graph may be spread across multiple replicas, so here we
# iteratively create new splits until the graph is exhausted.
# How many edges we are trying to allocate.
carry = x
# How much room there is in the current split for new edges.
space_in_current_split = edges_per_device - edges_in_current_split
while carry > 0:
if carry >= space_in_current_split:
# We've encountered a situation where we need to split a graph across
# >= 2 devices. We compute the number we will carry to the next split,
# and add a full split.
carry = carry - space_in_current_split
# Add the left edges to the current split, and complete the split
# by adding it to completed_splits.
current_split['n_edge'].append(space_in_current_split)
current_split['device_graph_idx'].append(device_graph_idx)
completed_splits.append(current_split)
# reset the split
current_split = {'n_edge': [], 'device_graph_idx': []}
space_in_current_split = edges_per_device
edges_in_current_split = 0
else:
current_split = {
'n_edge': [carry],
'device_graph_idx': [device_graph_idx]
}
edges_in_current_split = carry
carry = 0
# Since the total number of edges must be divisible by the number
# of devices, this code path can only be executed for an intermediate
# graph, thus it is not a complete split and we never need to add it
# to `completed splits`.
else:
# Add the edges and globals to the current split.
current_split['n_edge'].append(x)
current_split['device_graph_idx'].append(device_graph_idx)
# If we've reached the end of a split, complete it and start a new one.
if new_edges_in_current_split == edges_per_device:
completed_splits.append(current_split)
current_split = {'n_edge': [], 'device_graph_idx': []}
edges_in_current_split = 0
else:
edges_in_current_split = new_edges_in_current_split
# Flatten list of dicts to dict of lists.
completed_splits = {
k: [d[k] for d in completed_splits] for k in completed_splits[0]
}
pad_split_to = max([len(x) for x in completed_splits['n_edge']])
pad = lambda x: np.pad(x, (0, pad_split_to - len(x)), mode='constant')
device_n_edge = np.array([pad(x) for x in completed_splits['n_edge']])
device_graph_idx = np.array(
[pad(x) for x in completed_splits['device_graph_idx']])
return ShardedEdgesGraphsTuple(
nodes=jax.tree_map(broadcast, nodes),
device_edges=edges,
device_receivers=device_receivers,
device_senders=device_senders,
receivers=broadcast(receivers),
senders=broadcast(senders),
device_graph_idx=device_graph_idx,
globals=jax.tree_map(broadcast, globals_),
n_node=broadcast(n_node),
n_edge=broadcast(n_edge),
device_n_edge=device_n_edge) | 6ef661b440d5bc26b97ed352184b18496260c717 | 3,627,392 |
def apply_reflection(reflection_name, coordinate):
"""
Given a reflection type and a canonical coordinate, applies the reflection
and describes a circuit which enacts the reflection + a global phase shift.
"""
reflection_scalars, reflection_phase_shift, source_reflection_gates = reflection_options[
reflection_name
]
reflected_coord = [x * y for x, y in zip(reflection_scalars, coordinate)]
source_reflection = QuantumCircuit(2)
for gate in source_reflection_gates:
source_reflection.append(gate(np.pi), [0])
return reflected_coord, source_reflection, reflection_phase_shift | 2ef16ecb01747d216717438413f30b7a2b4f9f22 | 3,627,393 |
import json
def _get_vcpus_from_pricing_file(instance_type):
"""
Read pricing file and get number of vcpus for the given instance type.
:param instance_type: the instance type to search for.
:return: the number of vcpus or -1 if the instance type cannot be found
"""
with open(pricing_file) as f:
instances = json.load(f)
try:
vcpus = int(instances[instance_type]["vcpus"])
log.info("Instance %s has %s vcpus." % (instance_type, vcpus))
except KeyError:
log.error("Unable to get vcpus from file %s. Instance type %s not found." % (pricing_file, instance_type))
vcpus = -1
return vcpus | 43d83826ef9d59101ed6928567e921173e58cbda | 3,627,394 |
def route_from_text(obj, route):
"""
Recursive function to look for the requested object
:param obj:
:param route:
:return:
"""
_LOG.debug(f'Looking for {route} in {obj}')
if len(route) > 1 and ':' in route[0]:
_LOG.debug('Is a dictionary nested inside a list')
res = [d for d in obj if d[route[0].split(':')[0]] == route[0].split(':')[1]][0]
tail = route_from_text(res, route[1:])
elif len(route) > 1 and ':' not in route[0]:
_LOG.debug('Is a dictionary nested inside a dictionary')
res = obj[route[0]]
tail = route_from_text(res, route[1:])
elif len(route) == 1:
_LOG.debug('Is an object')
tail = obj[route[0]]
else:
raise ValueError(obj, route)
return tail | b502f3db20271879b6924e76131fc73090ecc460 | 3,627,395 |
def rot_mol(rot, struct, wrt="origin", degrees=True, seq="xyz"):
"""
Rotate molecule using rotation matrix.
Arguments
---------
rot: array
Can be either a list of 3 euler angles in the given order or a 3,3
rotation matrix.
wrt: str
Rotation performed with respect to any of the following options,
["origin", "com"]. Although, only origin implemented now.
order: str
Order for euler angles if rotation is given as euler angles.
"""
if wrt != "origin":
raise Exception("Not Implemented")
rot = np.array(rot)
if rot.shape == (3,3):
pass
elif rot.ravel().shape == (3,):
### Assume euler angles
Rot = R.from_euler(seq, rot.ravel(), degrees=degrees)
rot = Rot.as_matrix()
else:
raise Exception(
"Only rotation matrices and euler angles are currently implemented.")
geo = struct.get_geo_array()
ele = struct.elements
rot_geo = np.dot(rot, geo.T).T
struct.from_geo_array(rot_geo, ele)
return struct | 19ae4c586a84f2130389456a527a960ae3a03afc | 3,627,396 |
from typing import Dict
import asyncio
async def _send_multipart(data: Dict[str, str], boundary: str,
headers: HeadersType,
chunk_size: int = _CHUNK_SIZE) -> bytes:
"""Send multipart data by streaming."""
# TODO: precalculate body size and stream request, precalculate file sizes by os.path.getsize
to_send = b''
for key, val in data.items():
# write --boundary + field
to_send += ('--%s%s' % (boundary, _NEW_LINE)).encode()
if isinstance(val, IOBase):
# TODO: Utility to accept files with multipart metadata (Content-Type, custom filename, ...),
# write Contet-Disposition
to_write = 'Content-Disposition: form-data; ' + \
'name="%s"; filename="%s"%s%s' % (
key, basename(val.name), _NEW_LINE, _NEW_LINE)
to_send += to_write.encode()
# read and write chunks
loop = asyncio.get_event_loop()
while True:
data = await loop.run_in_executor(
None, val.read, chunk_size)
if not data:
break
to_send += data
val.close()
else:
to_send += (
'Content-Disposition: form-data; name="%s"%s%s' % (
key,
_NEW_LINE,
_NEW_LINE
)
).encode()
to_send += val.encode() + _NEW_LINE.encode()
# write --boundary-- for finish
to_send += ('--%s--' % boundary).encode()
headers['Content-Length'] = str(len(to_send))
return to_send | a6cc24034b5b684558770f40115c4efac07d8179 | 3,627,397 |
def fit_naive_bayes_model(matrix, labels):
"""Fit a naive bayes model.
This function should fit a Naive Bayes model given a training matrix and labels.
The function should return the state of that model.
Feel free to use whatever datatype you wish for the state of the model.
Args:
matrix: A numpy array containing word counts for the training data
labels: The binary (0 or 1) labels for that training data
Returns: The trained model
"""
# *** START CODE HERE ***
class naiveBayes():
def __init__(self):
self.phi_pos = None
self.phi_neg = None
self.prob_pos = None
def fit(self, matrix, labels):
exist_matrix = (matrix>0).astype('int')
# Calculate phi_j, using Laplace smoothing
spam_count = exist_matrix * labels.reshape(labels.shape[0], 1)
self.phi_pos = (np.sum(spam_count, axis=0, keepdims=True) + 1) /(np.sum(labels) + 2)
nonspam_count = exist_matrix * (labels==0).reshape((labels==0).shape[0], 1)
self.phi_neg = (np.sum(nonspam_count, axis=0, keepdims=True) + 1) /(np.sum(labels==0) + 2)
# Calculate probability of positive as a whole
self.prob_pos = np.mean(labels)
naiveBayesModel = naiveBayes()
naiveBayesModel.fit(matrix, labels)
return naiveBayesModel
# *** END CODE HERE *** | 637a355434911b6cf25f12d704f6bc9680d5c548 | 3,627,398 |
from typing import Union
from contextlib import suppress
def get_checkBox_entry(checkBox: etree._Element) -> str:
"""Create text representation for a checkBox element.
:param checkBox: a checkBox xml element
:returns:
1. attempt to get ``checked.w:val`` and return "\u2610" or "\u2612"
2. attempt to get ``default.w:val`` and return "\u2610" or "\u2612"
3. return ``--checkbox failed--``
Docx xml has at least two types of checkbox elements::
1. ``checkBox`` can only be checked when the form is locked. These do not
contain a text element, so this function is needed to select one from the
``w:checked`` or ``w:default`` sub-elements.
2. ``checkbox`` can be checked any time. Prints text as "\u2610" or "\u2612".
Docx2Python can safely ignore this second type, as there will be a <w:t>
element inside with a checkbox character.
<w:checkBox>
<w:sizeAuto/>
<w:default w:val="1"/>
<w:checked w:val="0"/>
</w:checkBox>
If the ``checked`` attribute is absent, return the default
If the ``checked`` attribute is present, but not w:val is given, return unchecked
"""
def get_wval() -> Union[str, None]:
with suppress(StopIteration):
checked = next(checkBox.iterfind(qn("w:checked")))
return str(checked.attrib.get(qn("w:val")) or "1")
with suppress(StopIteration, KeyError):
default = next(checkBox.iterfind(qn("w:default")))
return str(default.attrib[qn("w:val")])
return None
return {"0": "\u2610", "1": "\u2612", None: "----checkbox failed----"}[get_wval()] | d71e7821ac5fb7bafe9d5db5b579def2fe369609 | 3,627,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.