content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def orthogonalize(vec1, vec2):
"""Given two vectors vec1 and vec2, project out the component of vec1
that is along the vec2-direction.
@param[in] vec1 The projectee (i.e. output is some modified version of vec1)
@param[in] vec2 The projector (component subtracted out from vec1 is parallel to this)
@return answer A copy of vec1 but with the vec2-component projected out.
"""
v2u = vec2/np.linalg.norm(vec2)
return vec1 - v2u*np.dot(vec1, v2u) | 27,900 |
def tokenize(text):
"""
Function to process text data taking following steps:
1) normalization and punctuation removal: convert to lower case and remove punctuations
2) tokenization: splitting each sentence into sequence of words
3) stop words removal: removal of words which do not add a meaning to the sentence
4) lemmatization: reducting words to their root form
Args:
text (str): string with message
Returns:
clean_tokens: cleaned tokens of the message with word list
"""
# normalize case and remove punctuation
text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower())
# tokenize text and innitiate lemmatizer
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
# remove stopwords
tokens = [w for w in tokens if w not in stopwords.words('english')]
# iterate through each token
clean_tokens = []
for tok in tokens:
# lemmatize and remove leading/ trailing white space
clean_tok = lemmatizer.lemmatize(tok).strip()
clean_tokens.append(clean_tok)
return clean_tokens | 27,901 |
def extract_text(bucketname, filepath):
"""Return OCR data associated with filepaths"""
textract = boto3.client('textract')
response = textract.detect_document_text(
Document={
'S3Object': {
'Bucket': bucketname,
'Name': filepath
}
})
return response | 27,902 |
def box1_input(input):
"""uses above to return input to player 1"""
return get_input(1, input) | 27,903 |
def test_join_query_string():
"""Union de query string"""
pass | 27,904 |
def test_force_stop(process_engine, exec_infos, sync_server):
"""Test forcing the stop of the engine.
"""
t = ExecThread(process_engine, exec_infos)
t.start()
sync_server.wait('test1')
process_engine.stop(force=True)
t.join()
assert 'engine' in t.value.errors
assert 'terminated' in t.value.errors['engine']
assert process_engine.status == 'Stopped' | 27,905 |
def _get_xy_from_geometry(df):
"""
Return a numpy array with two columns, where the
first holds the `x` geometry coordinate and the second
column holds the `y` geometry coordinate
"""
# NEW: use the centroid.x and centroid.y to support Polygon() and Point() geometries
x = df.geometry.centroid.x
y = df.geometry.centroid.y
return np.column_stack((x, y)) | 27,906 |
def create_styled_figure(
title,
name=None,
tooltips=None,
plot_width=PLOT_WIDTH,
):
"""Return a styled, empty figure of predetermined height and width.
Args:
title (str): Title of the figure.
name (str): Name of the plot for later retrieval by bokeh. If not given the
title is set as name
tooltips (list, optional): List of bokeh tooltips to add to the figure.
Returns:
fig (bokeh Figure)
"""
assert plot_width is not None
name = name if name is not None else title
fig = figure(
plot_height=PLOT_HEIGHT,
plot_width=plot_width,
title=title.title(),
tooltips=tooltips,
name=name,
y_axis_type="linear",
sizing_mode="scale_width",
)
fig.title.text_font_size = "15pt"
# set minimum borders
fig.min_border_left = MIN_BORDER_LEFT
fig.min_border_right = MIN_BORDER_RIGHT
fig.min_border_top = MIN_BORDER_TOP
fig.min_border_bottom = MIN_BORDER_BOTTOM
# remove toolbar
fig.toolbar_location = TOOLBAR_LOCATION
# remove grid
fig.grid.visible = GRID_VISIBLE
# remove minor ticks
fig.axis.minor_tick_line_color = MINOR_TICK_LINE_COLOR
# remove tick lines
fig.axis.major_tick_out = MAJOR_TICK_OUT
fig.axis.major_tick_in = MAJOR_TICK_IN
# remove outline
fig.outline_line_width = OUTLINE_LINE_WIDTH
return fig | 27,907 |
def multiNodeMultDiskStripTest():
"""
if you want to work from the monitoring vm: (remote option)
on monitoring vm do, to make sure there are keys & ssh-agent is loaded
js 'j.do.loadSSHAgent(createkeys=True)'
#now logout & back login into that node, this only needs to happen once
"""
nrdisks=6
j.tools.perftesttools.init(monitorNodeIp="192.168.103.252",sshPort=22,sshkey=mgmtkey)
monitor=j.tools.perftesttools.getNodeMonitor()
nasses=[]
nasipaddr=["192.168.103.240","192.168.103.239","192.168.103.238","192.168.103.237"]
#first init all nasses which takes some time
for ipaddr in nasipaddr:
nas=j.tools.perftesttools.getNodeNas(ipaddr,22,nrdisks=6,fstype="xfs")
nasses.append(nas)
nas.startMonitor(cpu=0,disks=1,net=0)
#now start all the nas perftests
for i in range(len(nasipaddr)):
nas=nasses[i]
#will write 3 parallel file sequential on each disk
#each as has 6 disks, so 18 parallel writes
nas.perftester.sequentialWriteReadBigBlock(nrfilesParallel=3)
hosts=[]
hostsip=["10.10.10.1","10.10.10.2","10.10.10.3","10.10.10.4"]
for ipaddr in hostsip:
host=j.tools.perftesttools.getNodeHost(ipaddr,22)
hosts.append(host)
host.startMonitor(cpu=1,disks=0,net=0) | 27,908 |
def render_text(string, padding=5, width=None, height=None,
size=12, font="Arial", fgcolor=(0, 0, 0), bgcolor=None):
"""
Render text to an image and return it
Not specifying bgcolor will give a transparent image, but that will take a *lot* more work to build.
Specifying a bgcolor, width, and height will heavily optimize things.
"""
actor = text.text_actor(string, fgcolor, size, font)
if bgcolor is None:
mask = True
# Set it to the opposite of fgcolor so we can mask using it
bgcolor = (1 - fgcolor[0], 1 - fgcolor[1], 1 - fgcolor[1])
else:
mask = False
lines = string.split("\n")
if width is None:
# EM is defined as the square of the line height, and is the guide for making fonts
# We can use that as an upper bound (assuming font size is ~ line
# height)
width = size * max([len(s) for s in lines])
if height is None:
height = size * len(lines)
image = actor_to_image(actor, bgcolor, width, height)
if mask:
image = mask_color(image, bgcolor)
image = crop_blank_space(image)
width, height, _ = image.GetDimensions()
return pad_image(
image, pad_width=width + padding * 2, pad_height=height + padding * 2)
else:
return image | 27,909 |
def pretty_spectrogram(d,log = True, thresh= 5, fft_size = 512, step_size = 64):
"""
creates a spectrogram
log: take the log of the spectrgram
thresh: threshold minimum power for log spectrogram
"""
specgram = np.abs(stft(d, fftsize=fft_size, step=step_size, real=False,
compute_onesided=True))
if log == True:
specgram /= specgram.max() # volume normalize to max 1
specgram = np.log10(specgram) # take log
specgram[specgram < -thresh] = -thresh # set anything less than the threshold as the threshold
else:
specgram[specgram < thresh] = thresh # set anything less than the threshold as the threshold
return specgram | 27,910 |
def check_for_win(position, board, player):
"""
check for wins on 3x3 board on rows,cols,diag,anti-diag
args: position (int 1-9, user input)
board (np.array 2d)
player ("X" or "O")
"""
#initialize win to False
win = False
#check win on rows
for row in board:
if np.all(row==player):
win = True
#check win on cols (index 0,1,2)
for i in range(3):
if(np.all(board[:,i]==player)):
win = True
#check win on diagonals
if np.all(board.diagonal()==player):
win = True
#check win on anti-diagonals
if np.all(np.fliplr(board).diagonal()==player):
win = True
return win | 27,911 |
def wrap_http_exception(app: FastAPI):
"""
https://doc.acrobits.net/api/client/intro.html#web-service-responses
"""
@app.exception_handler(StarletteHTTPException)
async def http_exception_handler(request, exc):
return JSONResponse({'message': exc.detail}, exc.status_code) | 27,912 |
def perm_2sample(group1, group2, nrand=10000, tail=0, paired=True):
# Take from JW's functions
"""
non-parametric permutation test (Efron & Tibshirani, 1998)
tail = 0 (test A~=B), 1 (test A>B), -1 (test A<B)
"""
a = group1
b = group2
ntra = len(a)
ntrb = len(b)
meana = np.mean(a)
meanb = np.mean(b)
triala = np.zeros(nrand)
trialb = np.zeros(nrand)
if paired:
for i in range(nrand):
alldat = np.vstack((a,b)).T
for j in range(ntra):
alldat[j,:] = alldat[j,np.argsort(np.random.rand(2))]
triala[i] = alldat[:,0].mean()
trialb[i] = alldat[:,1].mean()
else:
alldat = np.concatenate((a,b))
indices = np.arange(alldat.shape[0])
for i in range(nrand):
random.shuffle(indices)
triala[i] = np.mean(alldat[indices[:ntra]])
trialb[i] = np.mean(alldat[indices[ntra:]])
if tail == 0:
p_value = sum(abs(triala-trialb)>=abs(meana-meanb)) / float(nrand)
else:
p_value = sum((tail*(triala-trialb))>=(tail*(meana-meanb))) / float(nrand)
return(meana-meanb, p_value) | 27,913 |
def _merge_cols(blocks, out_blocks, blocks_shape, skip):
"""
Merges the blocks into a single list of blocks where each block has bn
as number of rows (the number of cols remains the same per block).
"""
bn, bm = blocks_shape
data = Array._merge_blocks(blocks)
for i in range(0, ceil(data.shape[0] / bn)):
out_blocks[i] = data[i * bn: (i + 1) * bn, skip:bm + skip] | 27,914 |
def test_reorder_lrt_ltr_var_rpts():
"""
This reordering with variable repeats is not supported. In priciple
it is possible (TI1_R1, TI2_R1, TI2_R2, TI2_R3) but this seems unlikely
and is probably more likely an error
"""
d = np.zeros([5, 5, 5, 8])
for z in range(8): d[..., z] = z
img = AslImage(name="asldata", image=d, tis=[1, 2], rpts = [1, 3], iaf="tc", order='lrt')
with pytest.raises(Exception):
img.reorder("ltr")
#assert img.ntis == 2
#assert img.tis == [1, 2]
#assert not img.have_plds
#assert img.rpts == [1, 3]
#assert img.ntc == 2
#assert img.order == "ltr"
#data = img.nibImage.get_fdata()
#assert list(data.shape) == [5, 5, 5, 8]
#for znew, zold in enumerate([0, 1, 2, 3, 4, 5, 6, 7]):
# assert np.all(data[..., znew] == zold) | 27,915 |
def softmax_op(node):
""" This function computes its softmax along an axis.
Parameters:
----
node : Node
Input variable.
Returns:
----
A new Node instance created by Op.
"""
return SoftmaxOp()(node) | 27,916 |
def test_module_attribute() -> None:
"""Ensure module attributes' signatures."""
assert hasattr(lmp.model, 'BaseModel')
assert hasattr(lmp.model, 'ElmanNet')
assert hasattr(lmp.model, 'LSTM1997')
assert hasattr(lmp.model, 'LSTM2000')
assert hasattr(lmp.model, 'LSTM2002')
assert hasattr(lmp.model, 'ALL_MODELS')
assert lmp.model.ALL_MODELS == [
lmp.model.ElmanNet,
lmp.model.LSTM1997,
lmp.model.LSTM2000,
lmp.model.LSTM2002,
]
assert hasattr(lmp.model, 'MODEL_OPTS')
assert lmp.model.MODEL_OPTS == {
lmp.model.ElmanNet.model_name: lmp.model.ElmanNet,
lmp.model.LSTM1997.model_name: lmp.model.LSTM1997,
lmp.model.LSTM2000.model_name: lmp.model.LSTM2000,
lmp.model.LSTM2002.model_name: lmp.model.LSTM2002,
} | 27,917 |
def test_adls_gen2_file_event_filepath_when_whole_file_mode_disabled(sdc_builder, sdc_executor, azure):
"""
We want to make sure that the filepath attribute of an event generated by the ADLS gen2 destination contains only
a file path and doesn't contain a schema. A misbehaviour normally happened before when an old file was found
during a pipeline start-up. This wasn't observed for events that were sent for files created during pipeline
execution. The goal is to make the behaviour consistent, and since the name of the attribute is the filepath
not URI we decided to remove the schema part from it.
Pipeline:
Dev Raw Data Source >> ADLS gen2 FS >= wiretap
When the pipeline stops we assert the filepath attribute of the event generate by ADLS gen2 FS.
"""
builder = sdc_builder.get_pipeline_builder()
dl_fs = azure.datalake.file_system
directory_name = get_random_string(string.ascii_letters)
try:
dl_fs.mkdir(directory_name)
dl_fs.touch(f'{directory_name}/_tmp_sdc_0')
dl_fs.write(f'{directory_name}/_tmp_sdc_0', 'message1\n')
data_source = builder.add_stage('Dev Raw Data Source')
data_source.stop_after_first_batch = True
data_source.data_format = 'TEXT'
data_source.raw_data = 'message2\n'
azure_data_lake_store_destination = builder.add_stage(name='com_streamsets_pipeline_stage_destination_datalake_gen2_DataLakeGen2DTarget')
azure_data_lake_store_destination.set_attributes(data_format='TEXT',
directory_template=f'/{directory_name}',
files_prefix='sdc',
files_suffix='')
wiretap = builder.add_wiretap()
data_source >> azure_data_lake_store_destination >= wiretap.destination
pipeline = builder.build().configure_for_environment(azure)
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_finished()
history = sdc_executor.get_pipeline_history(pipeline)
pipeline_record_count = history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count
stage_record_count = len(wiretap.output_records)
assert stage_record_count == 2
assert pipeline_record_count == stage_record_count + 1
for event_record in wiretap.output_records:
assert event_record.get_field_data('/filepath').value.startswith(f'/{directory_name}/sdc_')
finally:
dl_fs.rmdir(directory_name, recursive=True) | 27,918 |
def full_chain():
"""
:return: Returns entire blockchain in memory (current_chain.blockchain)
"""
response = {
'chain': current_chain.blockchain,
'length': len(current_chain.blockchain),
}
return response, 200 | 27,919 |
def normalizer(x, mi, ma, eps=1e-20, dtype=np.float32):
"""
Number expression evaluation for normalization
Parameters
----------
x : np array of Image patch
mi : minimum input percentile value
ma : maximum input percentile value
eps: avoid dividing by zero
dtype: type of numpy array, float 32 defaut
"""
if dtype is not None:
x = x.astype(dtype, copy=False)
mi = dtype(mi) if np.isscalar(mi) else mi.astype(dtype, copy=False)
ma = dtype(ma) if np.isscalar(ma) else ma.astype(dtype, copy=False)
eps = dtype(eps)
x = (x - mi) / (ma - mi + eps)
x = normalizeZeroOne(x)
return x | 27,920 |
def do(args):
""" Main Entry Point. """
build_worktree = qibuild.parsers.get_build_worktree(args)
sourceme = build_worktree.generate_sourceme()
print(sourceme)
return sourceme | 27,921 |
def policy_action(module,
state=None,
policy_name=None,
policy_arn=None,
policy_document=None,
path=None,
description=None):
"""
Execute the actions needed to bring the policy into the specified state.
Args:
module (obj): Ansible module
state (str): Ansible state - 'present' | 'absent'
policy_name (str): Policy name. One and only one of policy name or policy ARN must be given.
policy_arn (str): Policy ARN. One and only one of policy name or policy ARN must be given.
policy_document(dict): JSON policy document
path (str): Policy path
description (str): Policy description. Defaults to 'policy_name'
Returns:
Success:
(bool) changed, (dict) policy object (see boto3.get_policy docs)
Failure:
Invokes module.fail_json with suitable text at point of error
"""
changed = False
policy = None
error = {}
if state == 'present':
try:
if isinstance(policy_document, dict):
policy_document = json.dumps(policy_document)
response = policy_m.create_policy(
policy_name=policy_name,
path=path,
policy_document=policy_document,
description=description)
if 'error' in response:
error = response['error']
else:
if response['state'] == 'New':
changed = True
policy = response['policy']
except Exception as e:
module.fail_json(msg='policy action {0} failed: {1} {2}'.format('present', e,traceback.format_exc()))
elif state == 'absent':
try:
response = policy_m.delete_policy(
policy_name=policy_name,
path=path)
if 'error' in response:
error = response['error']
else:
changed = True
policy = response['policy']
except Exception as e:
module.fail_json(msg='policy action {0} failed: {1} {2}'.format('absent', e,traceback.format_exc()))
else:
error = {"error": "state must be either 'present' or 'absent'"}
if error:
module.fail_json(msg='policy action failed: {0}'.format(error))
return changed, policy | 27,922 |
def GET_v1_metrics_location(days=1):
"""Return some data about the locations users have reported from.
"""
if days > 7:
days = 7
from_time = f'-{days}d'
locations = fetch_graphite_sum('*.geoip.*', from_time=from_time)
return jsonify(locations=locations) | 27,923 |
def present(*args, **kwargs):
"""
basically calls show if not embeded.
Kwargs:
max_rows, row_first, no_tile, monitor_num, percent_w, percent_h,
hide_toolbar
CommandLine:
python -m wbia.plottool.fig_presenter present
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.plottool.fig_presenter import * # NOQA
>>> result = present()
>>> print(result)
>>> import wbia.plottool as pt
>>> pt.show_if_requested()
"""
if VERBOSE:
print('[pt] present')
if not ut.get_argflag('--noshow'):
# print('[fig_presenter] Presenting figures...')
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
all_figures_tile(*args, **kwargs)
# Both of these lines cause the weird non-refresh black border behavior
all_figures_show()
all_figures_bring_to_front() | 27,924 |
def add_default_to_usage_help(
usage_help: str, default: str or int or float or bool
) -> str:
"""Adds default value to usage help string.
Args:
usage_help (str):
usage help for click option.
default (str or int or float):
default value as string for click option.
Returns:
str:
New usage_help value.
"""
if default is not None:
return f"{usage_help} default={default}"
return usage_help | 27,925 |
def describe_deformation(el_disps, bfg):
"""
Describe deformation of a thin incompressible 2D membrane in 3D
space, composed of flat finite element faces.
The coordinate system of each element (face), i.e. the membrane
mid-surface, should coincide with the `x`, `y` axes of the `x-y`
plane.
Parameters
----------
el_disps : array
The displacements of element nodes, shape `(n_el, n_ep, dim)`.
bfg : array
The in-plane base function gradients, shape `(n_el, n_qp, dim-1,
n_ep)`.
Returns
-------
mtx_c ; array
The in-plane right Cauchy-Green deformation tensor
:math:`C_{ij}`, :math:`i, j = 1, 2`.
c33 : array
The component :math:`C_{33}` computed from the incompressibility
condition.
mtx_b : array
The discrete Green strain variation operator.
"""
sh = bfg.shape
n_ep = sh[3]
dim = el_disps.shape[2]
sym2 = dim2sym(dim-1)
# Repeat el_disps by number of quadrature points.
el_disps_qp = insert_strided_axis(el_disps, 1, bfg.shape[1])
# Transformed (in-plane) displacement gradient with
# shape (n_el, n_qp, 2 (-> a), 3 (-> i)), du_i/dX_a.
du = dot_sequences(bfg, el_disps_qp)
# Deformation gradient F w.r.t. in plane coordinates.
# F_{ia} = dx_i / dX_a,
# a \in {1, 2} (rows), i \in {1, 2, 3} (columns).
mtx_f = du + nm.eye(dim - 1, dim, dtype=du.dtype)
# Right Cauchy-Green deformation tensor C.
# C_{ab} = F_{ka} F_{kb}, a, b \in {1, 2}.
mtx_c = dot_sequences(mtx_f, mtx_f, 'ABT')
# C_33 from incompressibility.
c33 = 1.0 / (mtx_c[..., 0, 0] * mtx_c[..., 1, 1]
- mtx_c[..., 0, 1]**2)
# Discrete Green strain variation operator.
mtx_b = nm.empty((sh[0], sh[1], sym2, dim * n_ep), dtype=nm.float64)
mtx_b[..., 0, 0*n_ep:1*n_ep] = bfg[..., 0, :] * mtx_f[..., 0, 0:1]
mtx_b[..., 0, 1*n_ep:2*n_ep] = bfg[..., 0, :] * mtx_f[..., 0, 1:2]
mtx_b[..., 0, 2*n_ep:3*n_ep] = bfg[..., 0, :] * mtx_f[..., 0, 2:3]
mtx_b[..., 1, 0*n_ep:1*n_ep] = bfg[..., 1, :] * mtx_f[..., 1, 0:1]
mtx_b[..., 1, 1*n_ep:2*n_ep] = bfg[..., 1, :] * mtx_f[..., 1, 1:2]
mtx_b[..., 1, 2*n_ep:3*n_ep] = bfg[..., 1, :] * mtx_f[..., 1, 2:3]
mtx_b[..., 2, 0*n_ep:1*n_ep] = bfg[..., 1, :] * mtx_f[..., 0, 0:1] \
+ bfg[..., 0, :] * mtx_f[..., 1, 0:1]
mtx_b[..., 2, 1*n_ep:2*n_ep] = bfg[..., 0, :] * mtx_f[..., 1, 1:2] \
+ bfg[..., 1, :] * mtx_f[..., 0, 1:2]
mtx_b[..., 2, 2*n_ep:3*n_ep] = bfg[..., 0, :] * mtx_f[..., 1, 2:3] \
+ bfg[..., 1, :] * mtx_f[..., 0, 2:3]
return mtx_c, c33, mtx_b | 27,926 |
def remove_emails(text):
"""Returns A String with the emails removed """
result = re.sub(EMAIL_REGEX, "", text)
return result | 27,927 |
def _fit_model_residual_with_radial(lmparams, star, self, interpfunc):
"""Residual function for fitting individual profile parameters
:param lmparams: lmfit Parameters object
:param star: A Star instance.
:param self: PSF instance
:param interpfunc: The interpolation function
:returns chi: Chi of observed pixels to model pixels
"""
all_params = lmparams.valuesdict().values()
flux, du, dv = all_params[:3]
params = all_params[3:]
prof = self.getProfile(params)
image, weight, image_pos = star.data.getImage()
# use for getting drawprofile
star.fit.flux = flux
star.fit.center = (du, dv)
star_drawn = drawProfile(self, star, prof, params, use_fit=True, interpfunc=interpfunc)
image_model = star_drawn.image
chi = (np.sqrt(weight.array) * (image_model.array - image.array)).flatten()
return chi | 27,928 |
def _whoami():
# type: () -> Tuple[str,str]
"""
Return the current operating system account as (username, fullname)
"""
username = getuser()
fullname = username
if GET_PW_NAM:
pwnam = getpwnam(username)
if pwnam:
fullname = pwnam.pw_gecos.split(",", 1)[0]
return (username, fullname) | 27,929 |
def text_set_class(
set_class: Sequence,
) -> str:
"""Converts a set class into a string representing its interval vector.
"""
id_dict = {0: "one",
1: "two",
2: "three",
3: "four",
4: "five",
5: "six"}
result = ""
for i, el in enumerate(interval_vector(set_class)):
for _ in range(el):
result += id_dict[i] + " "
return result.rstrip() | 27,930 |
def Span_read(stream):
"""Read a span from an 88.1 protocol stream."""
start = Address_read(stream)
width = Offset_read(stream)
return Span(start, width) | 27,931 |
def analyze(label):
"""
Combination of the methods provided by ramanspectrum class.
"""
spec = ramanspectrum(label + '/data_' + label + '.txt', label = label)
spec.SelectSpectrum()
spec.SelectBaseline()
spec.SelectPeaks()
spec.FitSpectrum()
spec.SaveFitParams() | 27,932 |
def test_initial_states():
"""Check the initial state of all parsers."""
p = TextParser()
assert p is not None
p = MarkdownParser()
assert p is not None
p = HtmlParser()
assert p is not None
p = ReStructuredTextParser()
assert p is not None
p = AsciidocParser()
assert p is not None
p = TextileParser()
assert p is not None
p = RdocParser()
assert p is not None
p = OrgParser()
assert p is not None
p = CreoleParser()
assert p is not None
p = MediawikiParser()
assert p is not None
p = PodParser()
assert p is not None | 27,933 |
def temp_database(tmpdir_factory):
""" Initalize the Database """
tmpdb = str(tmpdir_factory.mktemp('temp'))+"/testdb.sqlite"
return tmpdb | 27,934 |
def play_smithy(game):
"""
+3 Cards
"""
game.player_state.draw_cards(3) | 27,935 |
def walk_with_depth(
base_dir:str,
depth=1,
followlinks=True,
) -> Iterator[Tuple[str, List[str], List[str]]]:
"""Walk a directory with a max depth.
This is similar to os.walk except it has an optional maximum directory depth that it will walk
"""
base_dir = base_dir.rstrip(os.path.sep)
assert os.path.isdir(base_dir)
num_sep = base_dir.count(os.path.sep)
for root, dirs, files in os.walk(base_dir, followlinks=followlinks):
yield root, dirs, files
num_sep_this = root.count(os.path.sep)
if num_sep + depth <= num_sep_this:
del dirs[:] | 27,936 |
def get_classes(pyfile_path):
"""
Obtiene las clases que están dentro de un fichero python
:param str pyfile_path: nombre del fichero a inspeccionar
:return: devuelve una lista con todas las clases dentro de un fichero python
:rtype: list
.. code-block:: python
>> get_classes('./data.py')
['Module', 'PythonFile']
"""
with open(pyfile_path, 'r') as f:
inspection = ast.parse(f.read())
return [class_.name for class_ in inspection.body if isinstance(class_, ast.ClassDef)] | 27,937 |
def transpose_outer_dimensions(outer_dimensions: ST_Type, diff_dimensions: ST_Type, ports_to_transpose: List) -> Kind:
"""
Transpose the outer dimensions of a set of ports, move them inside the diff dimensions. The outer dimensions
that are sseqs are the same for all elements, so treat as inner dimensions.
:param outer_dimensions: The outer dimensions that need to be moved inside
:param diff_dimensions: The dimensions that need to be moved outside
:param ports_to_transpose: The ports
:return:
"""
# always remove tseqs as they don't affect the magma types
num_outer_dimensions = num_nested_layers(remove_tseqs(outer_dimensions))
num_diff_dimensions = num_nested_layers(remove_tseqs(diff_dimensions))
# these are the indexes of the dimensions on the untransposed type
outer_dimensions_indexes_untransposed = list(range(num_outer_dimensions))
diff_dimensions_indexes_untransposed = list(range(num_outer_dimensions, num_outer_dimensions + num_diff_dimensions))
sseq_dims_transposed = diff_dimensions_indexes_untransposed + outer_dimensions_indexes_untransposed
# performing the transpose with blockers added so right dimensions not converted
ports_to_transpose_with_block = add_blocker(ports_to_transpose, len(sseq_dims_transposed))
orig_arr = np.asarray(ports_to_transpose_with_block)
transposed_arr = orig_arr.transpose(sseq_dims_transposed)
transposed_list_with_blocks = transposed_arr.tolist()
return remove_blocker(transposed_list_with_blocks) | 27,938 |
def pad_or_clip_nd(tensor, output_shape):
"""Pad or Clip given tensor to the output shape.
Args:
tensor: Input tensor to pad or clip.
output_shape: A list of integers / scalar tensors (or None for dynamic dim)
representing the size to pad or clip each dimension of the input tensor.
Returns:
Input tensor padded and clipped to the output shape.
"""
tensor_shape = tf.shape(tensor)
clip_size = [
tf.where(tensor_shape[i] - shape > 0, shape, -1)
if shape is not None else -1 for i, shape in enumerate(output_shape)
]
clipped_tensor = tf.slice(
tensor,
begin=tf.zeros(len(clip_size), dtype=tf.int32),
size=clip_size)
# Pad tensor if the shape of clipped tensor is smaller than the expected
# shape.
clipped_tensor_shape = tf.shape(clipped_tensor)
trailing_paddings = [
shape - clipped_tensor_shape[i] if shape is not None else 0
for i, shape in enumerate(output_shape)
]
paddings = tf.stack(
[
tf.zeros(len(trailing_paddings), dtype=tf.int32),
trailing_paddings
],
axis=1)
padded_tensor = tf.pad(clipped_tensor, paddings=paddings)
output_static_shape = [
dim if not isinstance(dim, tf.Tensor) else None for dim in output_shape
]
padded_tensor.set_shape(output_static_shape)
return padded_tensor | 27,939 |
def _hr_ensemble_plot_end_phase_annotation(ax: plt.Axes, data: pd.DataFrame, phase: str, i: int, **kwargs):
"""Add End Phase annotations to heart rate ensemble plot.
Parameters
----------
ax : :class:`matplotlib.axes.Axes`
axes object
data : :class:`~pandas.DataFrame`
data belonging to ``phase``
phase : str
phase to add annotations
i : int
counter of phase
"""
end_phase_text = kwargs.get("end_phase_text", _hr_ensemble_plot_params.get("end_phase_text"))
end_phase_line_color = kwargs.get("end_phase_line_color", _hr_ensemble_plot_params.get("end_phase_line_color"))
end_phase_line_style = kwargs.get("end_phase_line_style", _hr_ensemble_plot_params.get("end_phase_line_style"))
end_phase_line_width = kwargs.get("end_phase_line_width", _hr_ensemble_plot_params.get("end_phase_line_width"))
ax.vlines(
x=len(data),
ymin=0,
ymax=1,
transform=ax.get_xaxis_transform(),
ls=end_phase_line_style,
lw=end_phase_line_width,
colors=end_phase_line_color,
zorder=3,
)
ax.annotate(
text=end_phase_text.format(phase),
xy=(len(data), 0.85 - 0.075 * i),
xytext=(-5, 0),
xycoords=ax.get_xaxis_transform(),
textcoords="offset points",
ha="right",
fontsize="small",
bbox=dict(facecolor="#e0e0e0", alpha=0.7, boxstyle="round"),
zorder=5,
) | 27,940 |
def dynamic_lstm(x, n_neuron, act_fn=tanh, seq_len=None):
""" assert x is batch_major, aka [batch, time, ...] """
cell_class = lstm
with tf.variable_scope("fw"):
cell_fw = cell_class(n_neuron, activation=act_fn, cell_clip=15.0)
o, s = tf.nn.dynamic_rnn(
cell_fw, x, seq_len, dtype=tf.float32)
return o, s | 27,941 |
def ho2ax_single(ho):
"""Conversion from a single set of homochoric coordinates to an
un-normalized axis-angle pair :cite:`rowenhorst2015consistent`.
Parameters
----------
ho : numpy.ndarray
1D array of (x, y, z) as 64-bit floats.
Returns
-------
ax : numpy.ndarray
1D array of (x, y, z, angle) as 64-bit floats.
Notes
-----
This function is optimized with Numba, so care must be taken with
array shapes and data types.
"""
# Constants stolen directly from EMsoft
# fmt: off
fit_parameters = np.array([
0.9999999999999968, -0.49999999999986866, -0.025000000000632055,
-0.003928571496460683, -0.0008164666077062752, -0.00019411896443261646,
-0.00004985822229871769, -0.000014164962366386031, -1.9000248160936107e-6,
-5.72184549898506e-6, 7.772149920658778e-6, -0.00001053483452909705,
9.528014229335313e-6, -5.660288876265125e-6, 1.2844901692764126e-6,
1.1255185726258763e-6, -1.3834391419956455e-6, 7.513691751164847e-7,
-2.401996891720091e-7, 4.386887017466388e-8, -3.5917775353564864e-9
])
# fmt: on
ho_magnitude = np.sum(ho**2)
if (ho_magnitude > -1e-8) and (ho_magnitude < 1e-8):
ax = np.array([0, 0, 1, 0], dtype=np.float64)
else:
# Convert the magnitude to the rotation angle
hom = ho_magnitude
s = fit_parameters[0] + fit_parameters[1] * hom
for i in nb.prange(2, 21):
hom = hom * ho_magnitude
s = s + fit_parameters[i] * hom
hon = ho / np.sqrt(ho_magnitude)
s = 2 * np.arccos(s)
if np.abs(s - np.pi) < 1e-8: # pragma: no cover
ax = np.append(hon, np.pi)
else:
ax = np.append(hon, s)
return ax | 27,942 |
def construct():
"""Construct file to be used in graph."""
def add(data, br, km, year, events):
"""Calculate critical level and insert events and in data dict."""
if br not in data:
data[br] = {}
if km not in data[br]:
data[br][km] = {}
accidents = events.get("accidents") or 0
deaths = events.get("deaths") or 0
infractions = events.get("infractions") or 1
critical = round((25*deaths + 5*accidents + 0.005*infractions), 3)
events.update({"critical": critical})
current_critical = data[br][km].get("critical") or 0
data[br][km][year] = events
data[br][km]["critical"] = round(current_critical + critical /
(int(YEARS[-1])-int(year)+1), 3)
data = {}
f_data = read_json(FILES_FOLDER + '/all.json')
for br, kms in f_data.items():
for km, years in kms.items():
for year, events in years.items():
if year in YEARS:
add(data, br, km, year, events)
data[br][km]["coordinates"] = f_data[br][km]["coordinates"]
save_json(data, FILES_FOLDER + '/final.json') | 27,943 |
def task_export_commit(request):
"""提交导出任务"""
try:
datas = json.loads(request.body.decode())
taskSetting = PlTaskSetting.objects.get(id=datas["params"]["id"])
try:
exportJob = PlExportJob.objects.get(task_setting_id=taskSetting.id)
except ObjectDoesNotExist:
exportJob = PlExportJob(task_setting_id=taskSetting.id, run_time=timezone.now() - timedelta(weeks=100))
exportJob.save() # 先保存一遍,保证数据库中一定存在,因为下面要使用update语句更新符合条件的这个任务,防止并发问题
if 0 != exportJob.status:
return response(-3, message="已经有导出任务提交,请先终止")
# 执行更新,使用更新带条件操作是为了防止并发
updateRows = PlExportJob.objects.filter(task_setting_id=taskSetting.id, status=0).update(
status = 1,
req_stop = 0,
process = 0,
worker_name = "",
download_addr = "",
task_setting_info = json.dumps(model_to_dict(taskSetting)),
export_setting_info = json.dumps(datas["params"]["setting"])
)
if updateRows <= 0:
return response(-4, message="更新失败")
result = response()
except ObjectDoesNotExist:
result = response(-1, message="监控任务不存在,可能已经被删除。")
except DatabaseError:
result = response(-2, message="数据库查询异常")
return result | 27,944 |
def getExceptionMessage(exceptionDetails: dict) -> str:
"""Get exception message from `exceptionDetails` object."""
exception = exceptionDetails.get('exception')
if exception:
return exception.get('description')
message = exceptionDetails.get('text', '')
stackTrace = exceptionDetails.get('stackTrace', dict())
if stackTrace:
for callframe in stackTrace.get('callFrames'):
location = (
str(callframe.get('url', '')) + ':' +
str(callframe.get('lineNumber', '')) + ':' +
str(callframe.get('columnNumber'))
)
functionName = callframe.get('functionName', '<anonymous>')
message = message + f'\n at {functionName} ({location})'
return message | 27,945 |
def evaluate_by_net(net, input_fn, **kwargs):
"""encapsulate evaluate
"""
ret = evaluate(
graph=net.graph, sess=net.session,
fea_ph=net.features_ph, label_ph=net.labels_ph, outputs=net.outputs,
input_fn=input_fn, **kwargs
)
return ret | 27,946 |
def flush():
"""Try to flush all stdio buffers, both from python and from C."""
try:
sys.stdout.flush()
sys.stderr.flush()
except (AttributeError, ValueError, IOError):
pass | 27,947 |
def ordinate(values,maxrange,levels):
"""Ordinate values given a maximum data range and number of levels
Parameters:
1. values: an array of continuous values to ordinate
2. maxrange: the maximum data range. Values larger than this will be saturated.
3. levels: the number of levels at which values are ordinated
"""
quantizer=lambda dist,maxrange,levels: int(1.0*max(1,dist-1)*levels/maxrange)+1
if type(values)==list or type(values)==tuple or type(values)==array:
ordinated=[]
for v in values:
if v==0:
ordinated.append(v)
else:
ordinated.append(quantizer(v,maxrange,levels))
return ordinated
else:
if values==0:
return values
else:
return quantizer(values,maxrange,levels) | 27,948 |
def is_start_state(state):
"""
Checks if the given state is a start state.
"""
return (state.g_pos.value == 0) and (state.theta.value == 'N') | 27,949 |
def days_in_month(year, month):
""" return number of days in that month in that year """
if not 1 <= month <= 12:
return 'Invalid Month'
if month == 2 and is_leap(year):
return 29
return month_days[month] | 27,950 |
def tmm_normal(fPath, bFilter=True):
"""
Function to obtain the Voom normal Count
Args:
fPath string Path with the raw counts
outPath string File output
bFilter Bool Bool to FIlter low expression genes
Returns:
tmm dataframe DataFrame with the log2(TMM) counts
"""
tmm = tmm_normalization(fPath, str(bFilter))
return tmm | 27,951 |
def StrainFitness(all_cix_series,
all_cntrl_sum,
debug_print=False):
"""
Args:
all_cix_series (pandas Series): The current experiment name column of values from all_df_used
length = nAllStrainsCentralGoodGenes
all_cntrl_sum (pandas Series): The sum of the current control values without the current index;
Is a pandas series the same length as all_cix series,
but with the sum of the other control values
length = nAllStrainsCentralGoodGenes
debug_print (bool): Decides whether to print out this function's results and stop
the program
Returns:
fit: pandas Series (float) with a computation applied to values
Same length as inputs: nAllStrainsCentralGoodGenes
se: pandas Series (float) with computations applied to values
Same length as inputs: nAllStrainsCentralGoodGenes
Description:
fit: Median-Normalized log2 difference between Current experiment and the time0s
se: Standard Error of the values
"
# simple log-ratio with pseudocount (of 1) and normalized so each scaffold has a median of 0
# note is *not* normalized except to set the total median to 0
"
"""
sf_fit = mednorm( (1+all_cix_series).apply(np.log2) - (1 + all_cntrl_sum).apply(np.log2) )
sf_se = (1/(1 + all_cix_series) + 1/(1 + all_cntrl_sum)).apply(math.sqrt)/ np.log(2)
return {
"fit": sf_fit,
"se": sf_se
} | 27,952 |
def np_to_base64(img_np):
"""
Convert numpy image (RGB) to base64 string
"""
img = Image.fromarray(img_np.astype("uint8"), "RGB")
buffered = BytesIO()
img.save(buffered, format="PNG")
return "data:image/png;base64," + base64.b64encode(
buffered.getvalue()).decode("ascii") | 27,953 |
def sample_unknown_parameters(_params, _n=None):
"""
AW - sample_unknown_parameters - Sample the parameters we do not fix and hence wish to marginalize over.
:param _params: SimpNameSp: dot accessible simple name space of simulation parameters.
:return: SimpNameSp: dot accessible simple name space of simulation parameters, where those parameters
that are not fixed have been re-drawn from the prior.
"""
if _n is None:
_n = len(_params.log_a)
_params_from_unknown = dc(_params)
_params_from_prior = sample_prior_parameters(_params_from_unknown, _n)
for _k in _params.uncontrolled_parameters:
setattr(_params_from_unknown, _k, getattr(_params_from_prior, _k))
return _params_from_unknown | 27,954 |
def vector2Table (hdu, xlabel='wavelength',ylabel='flux') :
"""
Reads a 1-D vector from a FITS HDU into a Table.
If present, the wavelength scale is hopefully in a simple, linear WCS!
"""
hdr = hdu.header
if hdr['NAXIS'] != 1 :
logging.error ('vector2Table can only construct 1-D tables!')
return None
nw = hdr['NAXIS1']
pixl = np.arange(nw)
wave = None
# GET FLUX
bscale = 1.0
bzero = 0.0
"""
if 'BSCALE' in hdr and 'BZERO' in hdr :
bscale = hdr['BSCALE']
bzero = hdr['BZERO']
"""
flux = hdu.data*bscale+bzero
# GET WAVELENGTH
if 'CRVAL1' in hdr and 'CDELT1' in hdr : # SIMPLE WCS
crpix1 = 1
if 'CRPIX1' in hdr :
crpix1 = hdr['CRPIX1']
w0 = hdr['CRVAL1']
dwdx = hdr['CDELT1']
wave = w0+dwdx*(pixl+1-(crpix1-1))
# GET UNITS
if 'CUNIT1' in hdr :
cunit1 = hdr['CUNIT1']
elif wave is not None : # ASSUME ASTRONOMERS USE ANGSTROMS
cunit1 = 'nm'
wave /= 10.
else :
cunit1 = 'pix'
# CONSTRUCT Table
t = Table()
if wave is not None :
t[xlabel] = Column(wave,unit=cunit1, description=xlabel)
else :
t[xlabel] = Column(pixl,unit=cunit1, description=xlabel)
t[ylabel] = Column(flux,unit='unknown', description=ylabel)
t.meta = hdr
return t | 27,955 |
def subscribe(symbol: Text, app_name: str = "current_app") -> None:
"""订阅"""
if app_name == "current_app":
app = current_app
else:
app = get_app(app_name)
if not app.config.get("MD_FUNC"):
raise MarketError(message="行情功能未开启, 无法进行订阅")
app.market.subscribe(symbol) | 27,956 |
def get_zarr_size(fn):
"""Get size of zarr file excluding metadata"""
# Open file
grp = zarr.open_group(fn)
# Collect size
total = 0
for var in list(grp.keys()):
total += grp[var].nbytes_stored
return total | 27,957 |
def rotation_matrix(a, b):
""" Calculate rotation matrix M, such that Ma is aligned to b
Args:
a: Initial vector direction
b: Target direction
"""
# np.allclose might be safer here
if np.array_equal(a, b):
return np.eye(3)
# Allow cases where a,b are not unit vectors, so normalise
a = a / np.linalg.norm(a)
b = b / np.linalg.norm(b)
# Anti-parallel - rotate 180 degrees about any axis.
if np.array_equal(a, -b):
# If vector is (anti)parallel to z, rotate around x
if np.array_equal(np.abs(a), np.array([0, 0, 1])):
return np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]])
# Otherwise rotate around z
return np.array([[-1, 0, 0], [0, -1, 0], [0, 0, 1]])
v = np.cross(a, b)
s = np.linalg.norm(v)
t = np.dot(a, b)
vx = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
return np.eye(3) + vx + np.dot(vx, vx) * (1-t) / (s**2) | 27,958 |
def bind(filename=None, blockpairs=1):
""" Open a connection. If filename is not given or None, a filename
is chosen automatically. This function returns blockpairs number
of Writer, Reader pairs.
"""
# Open memory mapped file, deduced file size from number of blocks
size = HEAD_SIZE + blockpairs * 2 * BLOCK_SIZE
m = Mmap(filename, size=size)
# Write header
m[0:5] = 'yoton'.encode('ascii')
m[5] = uint8(version_info[0])
m[6] = uint8(version_info[1])
# Create blocks
blocks = []
for i in range(blockpairs):
b1 = Writer(m, (2 * i + 0) * BLOCK_SIZE + HEAD_SIZE)
b2 = Reader(m, (2 * i + 1) * BLOCK_SIZE + HEAD_SIZE)
blocks.extend([b1, b2])
return tuple(blocks) | 27,959 |
def tmp_trestle_dir(tmpdir: pathlib.Path) -> pathlib.Path:
"""Create and return a new trestle project directory using std tmpdir fixture.
Note that this fixture relies on the 'trestle init' command and therefore may
misbehave if there are errors in trestle init.
"""
pytest_cwd = pathlib.Path.cwd()
os.chdir(tmpdir)
testargs = ['trestle', 'init']
with patch.object(sys, 'argv', testargs):
# FIXME: Correctly capture return codes
Trestle().run()
yield tmpdir
os.chdir(pytest_cwd) | 27,960 |
def saveuserprefs():
""" Fetch the preferences of the current user in JSON form """
user = current_user()
j = request.get_json(silent=True)
# Return the user preferences in JSON form
uf = UserForm()
uf.init_from_dict(j)
err = uf.validate()
if err:
return jsonify(ok=False, err=err)
uf.store(user)
return jsonify(ok=True) | 27,961 |
def process_args(args):
""" Process the options got from get_args()
"""
args.input_dir = args.input_dir.strip()
if args.input_dir == '' or not os.path.exists(os.path.join(args.input_dir, 'model.meta')):
raise Exception("This scripts expects the input model was exist in '{0}' directory.".format(args.input_dir))
if args.tar_file == '':
if args.ranges_file == '' or not os.path.exists(args.ranges_file):
raise Exception("The specified range file '{0}' not exist.".format(args.ranges_file))
if args.scp_file == '' or not os.path.exists(args.scp_file):
raise Exception("The specified scp file '{0}' not exist.".format(args.scp_file))
else:
if not os.path.exists(args.tar_file):
raise Exception("The specified tar file '{0}' not exist.".format(args.tar_file))
if not os.path.exists(args.tar_file.replace('.tar', '.npy')):
raise Exception("There is no corresponding npy label file for tar file '{0}'.".format(args.tar_file))
if args.dropout_proportion > 1.0 or args.dropout_proportion < 0.0:
raise Exception("The value of dropout-proportion must be in range [0 - 1].")
return args | 27,962 |
def make_features(batch, side, data_type='text'):
"""
Args:
batch (Tensor): a batch of source or target data.
side (str): for source or for target.
data_type (str): type of the source input.
Options are [text|img|audio].
Returns:
A sequence of src/tgt tensors with optional feature tensors
of size (len x batch).
"""
assert side in ['src', 'conversation', 'tgt']
if isinstance(batch.__dict__[side], tuple):
data = batch.__dict__[side][0]
else:
data = batch.__dict__[side]
feat_start = side + "_feat_"
keys = sorted([k for k in batch.__dict__ if feat_start in k])
features = [batch.__dict__[k] for k in keys]
levels = [data] + features
if data_type == 'text':
return torch.cat([level.unsqueeze(2) for level in levels], 2)
else:
return levels[0] | 27,963 |
def page_dirs_to_file_name(page_dirs):
"""
[カテゴリ1,カテゴリ2,ページ]というディレクトリの配列状態になっているページパスを
「カテゴリ1_._カテゴリ2_._ページ」というファイル名形式に変換する。
:param page_dirs:
:return:
"""
file_name = ""
for page_dir in page_dirs:
if page_dir:
file_name = file_name + page_dir.strip() + '_._'
file_name = file_name[0:-len('_._')]
file_name = _replace_windows_ng_word(file_name)
return file_name | 27,964 |
def has_field(feature_class, field_name):
"""Returns true if the feature class has a field named field_name."""
for field in arcpy.ListFields(feature_class):
if field.name.lower() == field_name.lower():
return True
return False | 27,965 |
def test_connection_dialog_remembers_input_with_ssh_passphrase(
qtbot, connection_dialog_factory):
"""
Test that the dialog remembers the user's kernel connection
settings and ssh key passphrase when the user checks the
save checkbox.
"""
dlg = connection_dialog_factory.submit_filled_dialog(use_keyfile=True,
save_settings=True)
# Press ok and save connection settings
qtbot.mouseClick(dlg.accept_btns.button(QDialogButtonBox.Ok),
Qt.LeftButton)
# create new dialog and check fields
new_dlg = connection_dialog_factory.get_default_dialog()
assert new_dlg.cf.text() == pytest.cf_path
assert new_dlg.rm_group.isChecked()
assert new_dlg.hn.text() == pytest.hn
assert new_dlg.un.text() == pytest.un
assert new_dlg.pn.text() == str(pytest.pn)
assert new_dlg.kf.text() == pytest.kf
if (not sys.platform.startswith('linux') or
not os.environ.get('CI') is not None):
assert new_dlg.kfp.text() == pytest.kfp | 27,966 |
def avg_speed_over_time(file_path):
"""[Graphs average speed over time]
"""
# reads in activities df
df = pd.read_csv(file_path)
#Create new dataframe with only columns I care about
cols = ['name', 'upload_id', 'type', 'distance', 'moving_time',
'average_speed', 'max_speed','total_elevation_gain',
'activity_date'
]
df = df[cols]
# change avg speed
#df.loc[df["average_speed"] == 0,'average_speed'] = (df["distance"] / df["moving_time"])*1000
# select only runs (although tbh, it's already nearly just that)
runs = df.loc[df['type'] == 'Run']
fig = plt.figure()
ax1 = fig.add_subplot(111)
x = np.asarray(runs.activity_date)
y = np.asarray(runs.average_speed)
ax1.plot_date(x, y)
ax1.set_title('Average Speed over Time')
fig.autofmt_xdate(rotation=45)
fig.tight_layout()
# saves image
general_utils.save_image(plt,"avg_speed_over_time") | 27,967 |
def process_settings(settings: AttrDict, params: T.Optional[T.Set[str]] = None, ignore: T.Iterable[str]=()) -> AttrDict:
"""
Process an dict-like input parameters, according to the rules specified in the
`Input parameter documentation <https://sqsgenerator.readthedocs.io/en/latest/input_parameters.html>`_. This function
should be used for processing user input. Therefore, exports the parser functions defined in
``sqsgenerator.settings.readers``. To specify a specify subset of parameters the {params} argument is used.
To {ignore} specifc parameters pass a list of parameter names
:param settings: the dict-like user configuration
:type settings: AttrDict
:param params: If specified only the subset of {params} is processed (default is ``None``)
:type params: Optional[Set[``None``]]
:param ignore: a list/iterable of params to ignore (default is ``()``)
:type ignore: Iterable[``str``]
:return: the processed settings dictionary
:rtype: AttrDict
"""
params = params if params is not None else set(parameter_list())
last_needed_parameter = max(params, key=parameter_index)
ignore = set(ignore)
for index, (param, processor) in enumerate(__parameter_registry.items()):
if param not in params:
# we can only skip this parameter if None of the other parameters depends on param
if parameter_index(param) > parameter_index(last_needed_parameter):
continue
if param in ignore:
continue
settings[param] = processor(settings)
return settings | 27,968 |
def serialize_block(block: dict) -> Block:
"""Serialize raw block from dict to structured and filtered custom Block object
Parameters
----------
block : dict
Raw KV block data from gRPC response
Returns
-------
Block
Structured, custom defined Block object for more controlled data access
"""
return Block(
block.get("id", None),
block.get("number", None),
block.get("header", {}).get("timestamp", None),
block.get("header", {}).get("producer", None),
block.get("unfilteredTransactionCount", 0),
block.get("unfilteredTransactionTraceCount", 0),
block.get("unfilteredExecutedInputActionCount", 0),
block.get("unfilteredExecutedTotalActionCount", 0),
block.get("filteringIncludeFilterExpr", 0),
block.get("filteredTransactionTraceCount", 0),
block.get("filteredExecutedInputActionCount", 0),
block.get("filteredExecutedTotalActionCount", 0),
list(
map(
lambda tx_trace: TransactionTrace(
tx_trace.get("id", None),
tx_trace.get("blockNum", None),
tx_trace.get("blockTime", None),
tx_trace.get("receipt", {}).get("status", None),
tx_trace.get("receipt", {}).get("cpuUsageMicroSeconds", None),
tx_trace.get("netUsage", None),
tx_trace.get("elapsed", None),
list(
map(
lambda act_trace: ActionTrace(
act_trace.get("transactionId", None),
act_trace.get("blockNum", None),
act_trace.get("actionOrdinal", None),
Action(
act_trace.get("action", {}).get("account", None),
act_trace.get("action", {}).get("name", None),
act_trace.get("action", {}).get(
"jsonData", {"from": None, "to": None}
),
),
act_trace.get("elapsed", None),
act_trace.get("action", {}).get(
"authorization", [{"actor": None}]
)[0]["actor"],
act_trace.get("receiver", None),
),
tx_trace.get("actionTraces", None),
)
),
),
block.get("filteredTransactionTraces", []),
)
),
block.get("filteredTransactionCount", 0),
) | 27,969 |
def add_f08_to_env(env) -> None:
"""Add Builders and construction variables for f08 dialect."""
try:
F08Suffixes = env['F08FILESUFFIXES']
except KeyError:
F08Suffixes = ['.f08']
try:
F08PPSuffixes = env['F08PPFILESUFFIXES']
except KeyError:
F08PPSuffixes = []
DialectAddToEnv(env, "F08", F08Suffixes, F08PPSuffixes, support_mods=True) | 27,970 |
def repr_pandas_Series(series, _):
"""
This function can be configured by setting the `max_rows` attributes.
"""
from pandas import get_option
return series.to_string(
max_rows=repr_pandas_Series.max_rows,
name=series.name,
dtype=series.dtype,
length=get_option("display.show_dimensions"),
) | 27,971 |
def convert_fmt(fmt):
"""rs.format to pyglet format string"""
return {
rs.format.rgb8: 'RGB',
rs.format.bgr8: 'BGR',
rs.format.rgba8: 'RGBA',
rs.format.bgra8: 'BGRA',
rs.format.y8: 'L',
}[fmt] | 27,972 |
def train_ALS(train_data, validation_data, num_iters, reg_param, ranks):
"""
Grid Search Function to select the best model based on RMSE of hold-out data
"""
# initial
min_error = float('inf')
best_rank = -1
best_regularization = 0
best_model = None
for rank in ranks:
for reg in reg_param:
# train ALS model
model = ALS.train(
ratings=train_data, # (userID, productID, rating) tuple
iterations=num_iters,
rank=rank,
lambda_=reg, # regularization param
seed=99)
# make prediction
valid_data = validation_data.map(lambda p: (p[0], p[1]))
predictions = model.predictAll(valid_data).map(lambda r: ((r[0], r[1]), r[2]))
# get the rating result
ratesAndPreds = validation_data.map(lambda r: ((r[0], r[1]), r[2])).join(predictions)
# get the RMSE
MSE = ratesAndPreds.map(lambda r: (r[1][0] - r[1][1])**2).mean()
error = math.sqrt(MSE)
print('{} latent factors and regularization = {}: validation RMSE is {}'.format(rank, reg, error))
if error < min_error:
min_error = error
best_rank = rank
best_regularization = reg
best_model = model
print('\nThe best model has {} latent factors and regularization = {}'.format(best_rank, best_regularization))
return best_model | 27,973 |
def coral_command_bootstrap(tsinghua_mirror=False):
"""
Bootstrap the command by installing the dependencies.
"""
# pylint: disable=unused-variable
local_host = ssh_host.get_local_host(ssh=False)
log = clog.get_log(console_format=clog.FMT_NORMAL)
distro = local_host.sh_distro(log)
if distro is None:
log.cl_error("failed to get distro of host [%s]",
local_host.sh_hostname)
sys.exit(-1)
missing_rpms, missing_pips = \
install_common.command_missing_packages(distro)
if missing_rpms is None:
log.cl_error("failed to get the missing packages of host [%s]",
local_host.sh_hostname)
sys.exit(-1)
command = "mkdir -p %s" % constant.CORAL_BUILD_CACHE_PIP_DIR
retval = local_host.sh_run(log, command, timeout=None)
if retval.cr_exit_status:
log.cl_error("failed to run command [%s] on host [%s], "
"ret = %d, stdout = [%s], stderr = [%s]",
command, local_host.sh_hostname,
retval.cr_exit_status, retval.cr_stdout,
retval.cr_stderr)
sys.exit(-1)
ret = install_common.bootstrap_from_internet(log, local_host, missing_rpms,
missing_pips,
constant.CORAL_BUILD_CACHE_PIP_DIR,
tsinghua_mirror=tsinghua_mirror)
if ret:
log.cl_error("failed to bootstrap the coral command from Internet")
sys.exit(ret) | 27,974 |
def parse_input_vspec (opts):
"""Parses input from vspec and returns excitation energies in the
form [energy, f], in eV and atomic units units, respectively."""
lines = sys.stdin.readlines ()
inside_data = False
roots = []
for l in lines:
if "<START>" in l:
try:
ls = l.split()
tag = ls[0]
nexcite = int (ls[1])
except:
raise Exception ("Failed to parse <START> tag and number: {0}".format(l))
iexcite = 0
inside_data = True
continue
if inside_data:
if "<END>" in l:
inside_data = False
continue
# break
try:
line_split = l.strip().split()
n = int (line_split[0])
occ = int (line_split[1]) #not used
virtual = int (line_split[2]) #not used
energy_ev = float (line_split[3])
osc = float (line_split[7])
except:
raise Exception ("Failed to parse data line: {0}".format(l))
iexcite = iexcite + 1
if n != iexcite:
raise Exception ("Expected excitation number {0}, found {1}".format(iexcite, n))
if energy_ev < 0.0:
print ("{0} Warning: Ignored negative vpsec excitation: {1} eV, {2}".format(opts.cchar, energy_ev, osc))
if opts.verbose:
sys.stderr.write ("Warning: Ignored negative vpsec excitation: {0} eV, {1}\n".format(energy_ev, osc))
else:
roots.append ([energy_ev, osc])
# if not inside_data:
# raise Exception ("Failed to find <START> tag")
if iexcite != nexcite:
print ("{0} Warning: Expected {1} excitations, found {2}".format(opts.cchar, nexcite, iexcite))
if opts.verbose:
sys.stderr.write ("Warning: Expected {0} excitations, found {1}\n".format(nexcite,iexcite))
if opts.verbose:
sys.stderr.write ("{0}: Found {1} vspec excitations\n".format(pname, len(roots)))
return roots | 27,975 |
def is_yaml_file(filename):
"""Return true if 'filename' ends in .yml or .yaml, and false otherwise."""
return os.path.splitext(filename)[1] in (".yaml", ".yml") | 27,976 |
def response_ack(**kwargs):
""" Policy-based provisioning of ACK value. """
try:
tlv, code, policy, post_c2c = kwargs["tlv"], kwargs["code"], kwargs["policy"], kwargs["post_c2c"]
new_tlv = copy.deepcopy(tlv)
if post_c2c is not True:
ret = policy.get_available_policy(new_tlv)
if ret == None:
new_tlv["notAvailable"]
new_tlv['ope'] = 'info'
return [new_tlv]
except Exception as ex:
print("Exception in response_ack()", ex)
return None | 27,977 |
def is_file(path, use_sudo=False):
"""
Check if a path exists, and is a file.
"""
func = use_sudo and sudo or run
with settings(hide('running', 'warnings'), warn_only=True):
return func('[ -f "%(path)s" ]' % locals()).succeeded | 27,978 |
def test_p6():
""" Test some joint and conditional perplexities """
d = D(['00', '11'], [1/2]*2)
assert P(d) == pytest.approx(2)
assert P(d, [0], [1]) == pytest.approx(1)
assert P(d, [1], [0]) == pytest.approx(1) | 27,979 |
def component(project_dir, *args, **kwargs):
"""CLI for generate MLOps archetypes."""
try:
cwd = Path().cwd() / project_dir
Interface().component(cwd, *args, **kwargs)
except Exception as error:
logger.exception(error)
sys.exit(0) | 27,980 |
def bounded_random_walk(minval, maxval, delta_min, delta_max, T,
dtype=tf.float32, dim=1):
"""
Simulates a random walk with boundary conditions. Used for data augmentation
along entire tube.
Based on: https://stackoverflow.com/questions/48777345/vectorized-random-
walk-in-python-with-boundaries
Args:
minval (int/float): Minimum value.
maxval (int/float): Maximum value.
delta_min (int/float): Minimum change.
delta_max (int/float): Maximum change.
T (int): Length of sequence.
dtype (type): Data type of walk.
dim (int): Dimension.
Returns:
Tensor (T x dim).
"""
if maxval <= minval:
return tf.ones((T, dim)) * minval
# Don't do this yet for consistency
if minval == delta_min and maxval == delta_max:
print('Using the old data augmentation!')
walk = tf.random_uniform(
shape=(T, dim),
minval=minval,
maxval=maxval,
dtype=dtype,
)
return walk
start = tf.random_uniform(
shape=(1, dim),
minval=minval,
maxval=maxval,
dtype=dtype,
)
size = maxval - minval
walk = tf.cumsum(tf.random_uniform(
shape=(T, dim),
minval=delta_min,
maxval=delta_max,
dtype=dtype,
))
return tf.abs((walk + start - minval + size) % (2 * size) - size) + minval | 27,981 |
def validate_photo_url(photo_url, required=False):
"""Parses and validates the given URL string."""
if photo_url is None and not required:
return None
if not isinstance(photo_url, str) or not photo_url:
raise ValueError(
'Invalid photo URL: "{0}". Photo URL must be a non-empty '
'string.'.format(photo_url))
try:
parsed = parse.urlparse(photo_url)
if not parsed.netloc:
raise ValueError('Malformed photo URL: "{0}".'.format(photo_url))
return photo_url
except Exception:
raise ValueError('Malformed photo URL: "{0}".'.format(photo_url)) | 27,982 |
def to_flp(stipples, dpi=300, x_mm=0, y_mm=0, laser_pwr=35000,
ticks=500, base=100):
"""" Converts a set of stipples into a list of FLP packets
dpi is the image's DPI
x_mm and y_mm are the corner location of the image (default 0,0)
(where 0,0 is the center of the build platform)
laser_power is the laser's power level in ticks
ticks is the number of frames the laser spends a black point
base is the number of frames the laser spends on a white point
"""
# Accumulated list of FLP packets
packets = F.Packets()
# Sort by X to reduce the amount of laser moves necessary
stipples = sorted(stipples, key=lambda s: s[0])
# Draw stuff for every point
for x, y, i in stipples:
# Center position in mm
x = mm_to_pos(x / float(dpi) * 25.4 + x_mm)
y = mm_to_pos(y / float(dpi) * 25.4 + y_mm)
# Decide how long to stay on this point (longer time = darker point)
t = int(ceil((ticks - base) * (1 - i)) + base)
if t == 0:
continue
# Move to this stipple's location with the laser off, then pause
# briefly to let the controller stabilize
packets.append(F.LaserPowerLevel(0))
packets.append(F.XYMove([[x, y, 200], [x, y, 100]]))
# Draw the spot with the laser on
packets.append(F.LaserPowerLevel(laser_pwr))
packets.append(F.XYMove([[x, y, t]]))
return packets | 27,983 |
def GetSchema(component):
"""convience function for finding the parent XMLSchema instance.
"""
parent = component
while not isinstance(parent, XMLSchema):
parent = parent._parent()
return parent | 27,984 |
def save_status(filename: str, status: StatusList) -> None:
"""
ファイルにステータスを追記する
--------
:type status: StatusList
:param filename: 書き込むファイル名
:param status: 書き込むデータ
:return: None
"""
result = status.out_tuple()
with open(filename, mode='a', encoding="utf-8")as f:
writer = csv.writer(f, lineterminator='\n')
writer.writerow(result)
return | 27,985 |
def ranked_bots_query(alias="ranked_bots"):
"""
Builds a query that ranks all bots.
This is a function in case you need this as a subquery multiple times.
"""
return sqlalchemy.sql.select([
bots.c.user_id,
bots.c.id.label("bot_id"),
bots.c.mu,
bots.c.sigma,
bots.c.score,
bots.c.games_played,
bots.c.version_number,
bots.c.language,
bots.c.update_time,
bots.c.compile_status,
sqlalchemy.sql.func.rank().over(
order_by=bots.c.score.desc()
).label("bot_rank"),
sqlalchemy.sql.func.rank().over(
partition_by=users.c.organization_id,
order_by=bots.c.score.desc()
).label("bot_organization_rank"),
]).select_from(
bots.join(users, bots.c.user_id == users.c.id)
).where(
users.c.is_active == True
).order_by(
bots.c.score.desc()
).alias(alias) | 27,986 |
def marathon_deployments_check(service):
"""Checks for consistency between deploy.yaml and the marathon yamls"""
the_return = True
pipeline_deployments = get_pipeline_config(service)
pipeline_steps = [step['instancename'] for step in pipeline_deployments]
pipeline_steps = [step for step in pipeline_steps if step not in DEPLOY_PIPELINE_NON_DEPLOY_STEPS]
marathon_steps = get_marathon_steps(service)
in_marathon_not_deploy = set(marathon_steps) - set(pipeline_steps)
if len(in_marathon_not_deploy) > 0:
print "%s There are some instance(s) you have asked to run in marathon that" % x_mark()
print " do not have a corresponding entry in deploy.yaml:"
print " %s" % PaastaColors.bold(", ".join(in_marathon_not_deploy))
print " You should probably add entries to deploy.yaml for them so they"
print " are deployed to those clusters."
the_return = False
in_deploy_not_marathon = set(pipeline_steps) - set(marathon_steps)
if len(in_deploy_not_marathon) > 0:
print "%s There are some instance(s) in deploy.yaml that are not referenced" % x_mark()
print " by any marathon instance:"
print " %s" % PaastaColors.bold((", ".join(in_deploy_not_marathon)))
print " You should probably delete these deploy.yaml entries if they are unused."
the_return = False
if the_return is True:
print success("All entries in deploy.yaml correspond to a marathon entry")
print success("All marathon instances have a corresponding deploy.yaml entry")
return the_return | 27,987 |
def get_book_info(book_id, books):
"""Obtain meta data of certain books.
:param book_id: Books to look up
:type: int or list of ints
:param books: Dataframe containing the meta data
:type: pandas dataframe
:return: Meta data for the book ids
:rtype: List[str], List[str], List[str]
"""
if not isinstance(book_id, list):
book_id = [book_id]
book_authors, book_titles, book_img_urls = [], [], []
for i in book_id:
book_info = books.loc[books["book_id"]==i].squeeze()
if book_info.shape[0]==0:
raise ValueError("Could not find book_id {} in the dataset.".format(book_id))
book_authors.append(book_info.authors)
book_titles.append(book_info.title)
book_img_urls.append(book_info.image_url)
return book_authors, book_titles, book_img_urls | 27,988 |
def save_error_message(func):
"""
This function will work only if transition_entity is defined in kwargs and
transition_entity is instance of ErrorMessageMixin
"""
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as exception:
message = six.text_type(exception)
transition_entity = kwargs['transition_entity']
if message:
transition_entity.error_message = message
transition_entity.save(update_fields=['error_message'])
raise exception
return wrapped | 27,989 |
def EMV(data,n=20,m=23):
"""
"""
def emv(high,low,vol,n=14):
MID = np.zeros(len(high))
MID[1:] = (np.array(high[1:])+np.array(low[1:])-np.array(high[:-1])-np.array(low[:-1]))/2.
BRO = np.array(vol)/(100000000.*(np.array(high)-np.array(low)))
EM = MID/BRO
return ta.SMA(EM,n)
data['emv'] = emv(np.array(data.high),np.array(data.low),np.array(data.vol),n)
data['maemv'] = ta.SMA(np.array(data['emv']),m)
signal = pd.DataFrame(index=data.index)
#strategy 1
"""
EMV 大于0,买入,信号为1
EMV 小于0,卖出,信号为-1
常用参数:n=14
"""
signal['1'] = (data['emv']>0)*2 - 1
#strategy 2
"""
EMV 大于MAEMV,买入,信号为1
EMV 小于MAEMV,卖出,信号为-1
参数设为n=20,m=23
"""
signal['2'] = (data['emv'] > data['maemv'])*2 - 1
signal = signal.fillna(0)
return signal | 27,990 |
def desired_directions(state: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Given the current state and destination, compute desired direction."""
destination_vectors = state[:, 4:6] - state[:, 0:2]
directions, dist = normalize(destination_vectors)
return directions, dist | 27,991 |
def binary_repr(number, max_length = 1025):
"""
Return the binary representation of the input *number* as a
string.
This is more efficient than using :func:`base_repr` with base 2.
Increase the value of max_length for very large numbers. Note that
on 32-bit machines, 2**1023 is the largest integer power of 2
which can be converted to a Python float.
"""
#assert number < 2L << max_length
shifts = map (operator.rshift, max_length * [number], \
range (max_length - 1, -1, -1))
digits = map (operator.mod, shifts, max_length * [2])
if not digits.count (1): return 0
digits = digits [digits.index (1):]
return ''.join (map (repr, digits)).replace('L','') | 27,992 |
def df_fc_overlap_9():
"""Scenario case with 3 sets of 2 overlapping fragments, bound to a common combination of 2 redundant fragments."""
mol = Chem.MolFromSmiles('NC1C(O)C(CCCC2CC2CCC2CC2)C1CCC1CC(C(N)C1O)C1CCC(O)C(N)C1')
return DataFrame([
['mol_fc_overlap_9', 'XXX', 'O1', 0, 'O1:0', 'O2', 0, 'O2:0', 'ffo', 'fusion', 'false_positive', 'overlap', (30, 29, 28, 27, 26, 33, 31), (32, 31, 29, 28, 27, 26, 33), 34, mol, mol_o1, mol_o2, 'O1:0@1,2,3,4,5,6[ffo]O2:0@1,2,3,4,5,6'],
['mol_fc_overlap_9', 'XXX', 'O1', 0, 'O1:0', 'O5', 0, 'O4:0', 'cm', 'connection', 'monopodal', '', (30, 29, 28, 27, 26, 33, 31), (25, 24, 22, 21, 20, 19), 34, mol, mol_o1, mol_o4, 'O1:0@4[cm]O4:0@3'],
['mol_fc_overlap_9', 'XXX', 'O1', 0, 'O1:0', 'O5', 0, 'O5:0', 'cm', 'connection', 'monopodal', '', (30, 29, 28, 27, 26, 33, 31), (23, 22, 21, 20, 19, 24), 34, mol, mol_o1, mol_o5, 'O1:0@4[cm]O5:0@2'],
['mol_fc_overlap_9', 'XXX', 'O2', 0, 'O2:0', 'O5', 0, 'O4:0', 'cm', 'connection', 'monopodal', '', (32, 31, 29, 28, 27, 26, 33), (25, 24, 22, 21, 20, 19), 34, mol, mol_o2, mol_o4, 'O2:0@5[cm]O4:0@3'],
['mol_fc_overlap_9', 'XXX', 'O2', 0, 'O2:0', 'O5', 0, 'O5:0', 'cm', 'connection', 'monopodal', '', (32, 31, 29, 28, 27, 26, 33), (23, 22, 21, 20, 19, 24), 34, mol, mol_o2, mol_o5, 'O2:0@5[cm]O5:0@2'],
['mol_fc_overlap_9', 'XXX', 'O5', 0, 'O4:0', 'O5', 0, 'O5:0', 'ffo', 'fusion', 'false_positive', 'overlap', (25, 24, 22, 21, 20, 19), (23, 22, 21, 20, 19, 24), 34, mol, mol_o4, mol_o5, 'O4:0@1,2,3,4,5[ffo]O5:0@1,2,3,4,5'],
['mol_fc_overlap_9', 'XXX', 'O5', 0, 'O4:0', 'O6', 0, 'O6:0', 'cm', 'connection', 'monopodal', '', (25, 24, 22, 21, 20, 19), (1, 2, 4, 16), 34, mol, mol_o4, mol_o6, 'O4:0@5[cm]O6:0@3'],
['mol_fc_overlap_9', 'XXX', 'O5', 0, 'O4:0', 'O8', 0, 'O8:0', 'cm', 'connection', 'monopodal', '', (25, 24, 22, 21, 20, 19), (0, 1, 2, 4, 16), 34, mol, mol_o4, mol_o8, 'O4:0@5[cm]O8:0@4'],
['mol_fc_overlap_9', 'XXX', 'O5', 0, 'O4:0', 'O9', 0, 'O9:0', 'cm', 'connection', 'monopodal', '', (25, 24, 22, 21, 20, 19), (3, 2, 1, 16, 4), 34, mol, mol_o4, mol_o9, 'O4:0@5[cm]O9:0@3'],
['mol_fc_overlap_9', 'XXX', 'O5', 0, 'O5:0', 'O6', 0, 'O6:0', 'cm', 'connection', 'monopodal', '', (23, 22, 21, 20, 19, 24), (1, 2, 4, 16), 34, mol, mol_o5, mol_o6, 'O5:0@4[cm]O6:0@3'],
['mol_fc_overlap_9', 'XXX', 'O5', 0, 'O5:0', 'O8', 0, 'O8:0', 'cm', 'connection', 'monopodal', '', (23, 22, 21, 20, 19, 24), (0, 1, 2, 4, 16), 34, mol, mol_o5, mol_o8, 'O5:0@4[cm]O8:0@4'],
['mol_fc_overlap_9', 'XXX', 'O5', 0, 'O5:0', 'O9', 0, 'O9:0', 'cm', 'connection', 'monopodal', '', (23, 22, 21, 20, 19, 24), (3, 2, 1, 16, 4), 34, mol, mol_o5, mol_o9, 'O5:0@4[cm]O9:0@3'],
['mol_fc_overlap_9', 'XXX', 'O6', 0, 'O6:0', 'O7', 0, 'O7:0', 'cm', 'connection', 'monopodal', '', (1, 2, 4, 16), (8, 9, 10), 34, mol, mol_o6, mol_o7, 'O6:0@2[cm]O7:0@0'],
['mol_fc_overlap_9', 'XXX', 'O6', 0, 'O6:0', 'O8', 0, 'O8:0', 'ffs', 'fusion', 'false_positive', 'substructure', (1, 2, 4, 16), (0, 1, 2, 4, 16), 34, mol, mol_o6, mol_o8, 'O6:0@0,1,2,3[ffs]O8:0@1,2,3,4'],
['mol_fc_overlap_9', 'XXX', 'O6', 0, 'O6:0', 'O9', 0, 'O9:0', 'ffs', 'fusion', 'false_positive', 'substructure', (1, 2, 4, 16), (3, 2, 1, 16, 4), 34, mol, mol_o6, mol_o9, 'O6:0@0,1,2,3[ffs]O9:0@1,2,3,4'],
['mol_fc_overlap_9', 'XXX', 'O7', 0, 'O7:0', 'O7', 1, 'O7:1', 'cm', 'connection', 'monopodal', '', (8, 9, 10), (13, 14, 15), 34, mol, mol_o7, mol_o7, 'O7:0@2[cm]O7:1@0'],
['mol_fc_overlap_9', 'XXX', 'O7', 0, 'O7:0', 'O8', 0, 'O8:0', 'cm', 'connection', 'monopodal', '', (8, 9, 10), (0, 1, 2, 4, 16), 34, mol, mol_o7, mol_o8, 'O7:0@0[cm]O8:0@3'],
['mol_fc_overlap_9', 'XXX', 'O7', 0, 'O7:0', 'O9', 0, 'O9:0', 'cm', 'connection', 'monopodal', '', (8, 9, 10), (3, 2, 1, 16, 4), 34, mol, mol_o7, mol_o9, 'O7:0@0[cm]O9:0@4'],
['mol_fc_overlap_9', 'XXX', 'O8', 0, 'O8:0', 'O9', 0, 'O9:0', 'ffo', 'fusion', 'false_positive', 'overlap', (0, 1, 2, 4, 16), (3, 2, 1, 16, 4), 34, mol, mol_o8, mol_o9, 'O8:0@1,2,3,4[ffo]O9:0@1,2,3,4'],
], columns=['idm', 'inchikey', 'idf1', 'idxf1', 'fid1', 'idf2', 'idxf2', 'fid2', 'fcc', 'category', 'type', 'subtype', '_aidxf1', '_aidxf2', 'hac', 'mol', 'mol_frag_1', 'mol_frag_2', 'fc']) | 27,993 |
def check_cal(es_url, es_index, id):
"""Query for calibration file with specified input ID."""
query = {
"query":{
"bool":{
"must": [
{ "term": { "_id": id } },
]
}
},
"fields": [],
}
if es_url.endswith('/'):
search_url = '%s%s/_search' % (es_url, es_index)
else:
search_url = '%s/%s/_search' % (es_url, es_index)
#logger.info("search_url: %s" % search_url)
r = requests.post(search_url, data=json.dumps(query))
if r.status_code == 200:
result = r.json()
#logger.info(pformat(result))
total = result['hits']['total']
id = 'NONE' if total == 0 else result['hits']['hits'][0]['_id']
else:
logger.error("Failed to query %s:\n%s" % (es_url, r.text))
logger.error("query: %s" % json.dumps(query, indent=2))
logger.error("returned: %s" % r.text)
if r.status_code == 404: total, id = 0, 'NONE'
else: r.raise_for_status()
return total, id | 27,994 |
def mountpoint_create(name, size):
"""Service Layer to create mountpoint"""
mountpoint = MountPoint(name, size)
return mountpoint | 27,995 |
def test_torch_transform(ctx):
"""try using torch.Transform in combination with bgflow.Flow"""
torch.manual_seed(10)
x = torch.torch.randn(10, 3, **ctx)
flow = SequentialFlow([
TorchTransform(IndependentTransform(SigmoidTransform(), 1)),
TorchTransform(
AffineTransform(
loc=torch.randn(3, **ctx),
scale=2.0+torch.rand(3, **ctx), event_dim=1
),
),
BentIdentity(),
# test the reinterpret_batch_ndims arguments
TorchTransform(SigmoidTransform(), 1)
])
z, dlogp = flow.forward(x)
y, neg_dlogp = flow.forward(z, inverse=True)
tol = 1e-7 if ctx["dtype"] is torch.float64 else 1e-5
assert torch.allclose(x, y, atol=tol)
assert torch.allclose(dlogp, -neg_dlogp, atol=tol) | 27,996 |
def test_get_desired_value():
"""Test that sensor returns correct desired value in different states."""
const = get_const("1.4")
sensor_id = 1
child_id = 0
value_type = const.SetReq.V_LIGHT_LEVEL
wrong_child_id = 100
wrong_value_type = 9999
sensor = Sensor(sensor_id)
sensor.add_child_sensor(child_id, const.Presentation.S_LIGHT_LEVEL)
assert sensor.get_desired_value(child_id, value_type) is None
sensor.update_child_value(child_id, value_type, "50")
assert sensor.get_desired_value(child_id, value_type) == "50"
sensor.init_smart_sleep_mode()
sensor.set_child_desired_state(child_id, value_type, "90")
assert sensor.get_desired_value(child_id, value_type) == "90"
sensor.update_child_value(child_id, value_type, "70")
assert sensor.get_desired_value(child_id, value_type) == "70"
assert sensor.get_desired_value(wrong_child_id, value_type) is None
assert sensor.get_desired_value(child_id, wrong_value_type) is None | 27,997 |
def test_halo_update_timer(
zeros_quantity_list,
communicator_list,
n_points_update,
n_points,
numpy,
subtests,
boundary_dict,
ranks_per_tile,
):
"""
test that halo update produces nonzero timings for all expected labels
"""
halo_updater_list = []
for communicator, quantity in zip(communicator_list, zeros_quantity_list):
halo_updater = communicator.start_halo_update(quantity, n_points_update)
halo_updater_list.append(halo_updater)
for halo_updater in halo_updater_list:
halo_updater.wait()
required_times_keys = ("pack", "unpack", "Isend", "Irecv", "wait")
for communicator in communicator_list:
with subtests.test(rank=communicator.rank):
assert isinstance(communicator.timer, fv3gfs.util.Timer)
times = communicator.timer.times
missing_keys = set(required_times_keys).difference(times.keys())
assert len(missing_keys) == 0
extra_keys = set(times.keys()).difference(required_times_keys)
assert len(extra_keys) == 0
for key in required_times_keys:
assert times[key] > 0.0
assert isinstance(times[key], float) | 27,998 |
def install(user=False, symlink=False, overwrite=False, **kwargs):
"""Install the bqplot nbextension.
Parameters
----------
user: bool
Install for current user instead of system-wide.
symlink: bool
Symlink instead of copy (for development).
overwrite: bool
Overwrite previously-installed files for this extension
**kwargs: keyword arguments
Other keyword arguments passed to the install_nbextension command
"""
directory = join(dirname(abspath(__file__)), 'nbextension')
install_nbextension(directory, destination='bqplot',
symlink=symlink, user=user, overwrite=overwrite,
**kwargs) | 27,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.