content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def test_torch():
"""Make sure we know how to cast dypes and devices of models"""
model = SimpleNN()
device = "cuda"
model = model.to(device)
x = torch.arange(10.0).to(device)
output = model.forward(x)
assert x.device.type == device
assert model.mm.device.type == device
assert output.device.type == device
assert x.dtype == torch.float32
assert model.mm.dtype == torch.float32
assert output.dtype == torch.float32
model = model.double()
x = x.double()
output = model.forward(x)
assert x.device.type == device
assert model.mm.device.type == device
assert output.device.type == device
assert x.dtype == torch.float64
assert model.mm.dtype == torch.float64
assert output.dtype == torch.float64
| 5,344,300
|
def sRGB_to_sd_Mallett2019(RGB):
"""
Recovers the spectral distribution of given *sRGB* colourspace array using
*Mallett and Yuksel (2019)* method.
Parameters:
-----------
RGB : array_like, (3,)
*sRGB* colourspace array. Do not apply a transfer function to the
*RGB* values.
Returns
-------
SpectralDistribution
Recovered reflectance.
"""
basis = MultiSpectralDistributions(BASIS_sRGB_MALLETT2019,
SPECTRAL_SHAPE_sRGB_MALLETT2019.range())
return RGB_to_sd_Mallett2019(RGB, basis)
| 5,344,301
|
def test_model_with_circular_imports():
"""
Basic test for FQNImportURI + circular imports
"""
#################################
# META MODEL DEF
#################################
my_meta_model = metamodel_from_file(
join(abspath(dirname(__file__)),
'interface_model1', 'Interface.tx'))
my_meta_model.register_scope_providers(
{"*.*": scoping_providers.FQNImportURI()})
#################################
# MODEL PARSING
#################################
my_model = my_meta_model.model_from_file(
join(abspath(dirname(__file__)),
"interface_model1", "model_c", "A.if"))
#################################
# TEST MODEL
#################################
imports = get_children_of_type("Import", my_model)
assert len(imports) > 0
for i in imports:
assert 1 == len(i._tx_loaded_models) # one file / load import
assert i.importURI in i._tx_loaded_models[0]._tx_filename
check_unique_named_object_has_class(my_model, "A", "Interface")
a = get_unique_named_object(my_model, "A")
a_self = get_children(lambda x: hasattr(x, 'name') and x.name == "self", a)
assert len(a_self) == 1
a_self = a_self[0]
a_other = get_children(
lambda x: hasattr(x, 'name') and x.name == "other", a)
assert len(a_other) == 1
a_other = a_other[0]
a_other_self = get_children(
lambda x: hasattr(x, 'name') and x.name == "self", a_other.ref)
assert len(a_other_self) == 1
a_other_self = a_other_self[0]
a_other_other = get_children(
lambda x: hasattr(x, 'name') and x.name == "other", a_other.ref)
assert len(a_other_other) == 1
a_other_other = a_other_other[0]
assert a_self.ref == a_other_other.ref
assert a_self.ref != a_other.ref
assert a_other.ref == a_other_self.ref
assert a_other.ref != a_other_other.ref
#################################
# END
#################################
| 5,344,302
|
def stationary_traffic_matrix(topology, mean, stddev, gamma, log_psi, n,
max_u=0.9,
origin_nodes=None, destination_nodes=None):
"""
Return a stationary sequence of traffic matrices.
The sequence is generated by first generating a single matrix assigning
traffic volumes drawn from a lognormal distribution and assigned to
specific origin-destination pairs using the Ranking Metrics Heuristic
method proposed by Nucci et al. [2]_. Then, all matrices of the sequence
are generated by adding zero-mean normal fluctuation in the traffic
volumes. This process was originally proposed by [2]_
Stationary sequences of traffic matrices are generally suitable for
modeling network traffic over short periods (up to 1.5 hours). Over longer
periods, real traffic exhibits diurnal patterns and they are better
modelled by cyclostationary sequences
Parameters
----------
topology : topology
The topology for which the traffic matrix is calculated. This topology
can either be directed or undirected. If it is undirected, this
function assumes that all links are full-duplex.
mean : float
The mean volume of traffic among all origin-destination pairs
stddev : float
The standard deviation of volumes among all origin-destination pairs.
gamma : float
Parameter expressing relation between mean and standard deviation of
traffic volumes of a specific flow over the time
log_psi : float
Parameter expressing relation between mean and standard deviation of
traffic volumes of a specific flow over the time
n : int
Number of matrices in the sequence
max_u : float, optional
Represent the max link utilization. If specified, traffic volumes are
scaled so that the most utilized link of the network has an utilization
equal to max_u. If None, volumes are not scaled, but in this case links
may end up with an utilization factor greater than 1.0
origin_nodes : list, optional
A list of all nodes which can be traffic sources. If not specified
all nodes of the topology are traffic sources
destination_nodes : list, optional
A list of all nodes which can be traffic destinations. If not specified
all nodes of the topology are traffic destinations
Returns
-------
tms : TrafficMatrixSequence
References
----------
.. [2] Nucci et al., The problem of synthetically generating IP traffic
matrices: initial recommendations, ACM SIGCOMM Computer Communication
Review, 35(3), 2005
"""
tm_sequence = TrafficMatrixSequence()
static_tm = static_traffic_matrix(topology, mean, stddev, max_u=None,
origin_nodes=origin_nodes,
destination_nodes=destination_nodes)
volume_unit = static_tm.attrib['volume_unit']
mean_dict = static_tm.flows()
psi = exp(log_psi)
if psi == 0.0:
raise ValueError("The value of log_psi provided is too small and "
"causes psi=0.0, which makes the standard deviation "
"of random fluctuation to become infinite. Try with "
"a greater value of log_psi")
std_dict = {(o, d): (m / psi) ** (1.0 / gamma)
for (o, d), m in mean_dict.items()}
if any(isinf(std) for std in std_dict.values()):
raise ValueError("The value of log_psi or gamma provided are too "
"small and causes the standard deviation of random "
"fluctuations to become infinite. Try with a greater "
"value of log_psi and/or gamma")
flows = {}
for o, d in mean_dict:
# Implementation without Numpy:
# flows[(o, d)] = [max([0, normalvariate(mean_dict[(o, d)],
# std_dict[(o, d)])]) for _ in range(n)]
flows[(o, d)] = [max((0, normal(mean_dict[(o, d)], std_dict[(o, d)])))\
for _ in range(n)]
for i in range(n):
traffic_marix = TrafficMatrix(volume_unit=volume_unit)
for o, d in mean_dict:
traffic_marix.add_flow(o, d, flows[(o, d)][i])
tm_sequence.append(traffic_marix)
if max_u is not None:
if origin_nodes is not None:
shortest_path = dict(
(node, nx.single_source_dijkstra_path(topology,
node,
weight='weight'))
for node in origin_nodes)
else:
shortest_path = dict(nx.all_pairs_dijkstra_path(topology,
weight='weight'))
current_max_u = max((max(link_loads(topology,
tm_sequence.get(i),
shortest_path
).values())
for i in range(n)))
norm_factor = max_u / current_max_u
for i in range(n):
for o, d in mean_dict:
tm_sequence.matrix[i].flow[o][d] *= norm_factor
return tm_sequence
| 5,344,303
|
def gen_coverage_badge(
input_file=None,
output_file=None,
webshields=None,
verbose=None,
silent=None
):
"""
This command generates a badge for the coverage results, from an XML file in
the 'coverage' format. Such a file can be for example generated using the
python `coverage` tool, or java `cobertura`.
By default the input file is the relative `./reports/coverage/coverage.xml`
and the output file is `./coverage-badge.svg`. You can change these settings
with the `-i/--input_file` and `-o/--output-file` options.
You can use the verbose flag `-v/--verbose` to display information on the
input file contents, for verification.
The resulting badge will by default look like this: [coverage | 98.1%] where
98.1 is the total coverage, obtained from the branch and line coverages
using the formula
(nb_lines_covered + nb_branches_covered) / (nb_lines / nb_branches)
and multiplying this by 100.
"""
# Process i/o files
input_file, input_file_path = _process_infile(input_file, "reports/coverage/coverage.xml")
output_file, output_file_path, is_stdout = _process_outfile(output_file, "coverage-badge.svg")
# First retrieve the coverage info from the coverage xml
try:
cov_stats = get_coverage_stats(coverage_xml_file=input_file)
except FileNotFoundError:
raise click.exceptions.FileError(input_file, hint="File not found")
if not silent and verbose and not is_stdout:
click.echo("""Coverage results parsed successfully from %(ifp)r
- Branch coverage: %(bcp).2f%% (%(bc)s/%(bv)s)
- Line coverage: %(lcp).2f%% (%(lc)s/%(lv)s)
- Total coverage: %(tcp).2f%% ((%(bc)s+%(lc)s)/(%(bv)s+%(lv)s))
""" % dict(ifp=input_file_path, tcp=cov_stats.total_coverage,
bcp=cov_stats.branch_coverage, bc=cov_stats.branches_covered, bv=cov_stats.branches_valid,
lcp=cov_stats.line_coverage, lc=cov_stats.lines_covered, lv=cov_stats.lines_valid))
# Generate the badge
badge = get_coverage_badge(cov_stats)
badge.write_to(output_file if is_stdout else output_file_path, use_shields=webshields)
if not silent and not is_stdout:
click.echo("SUCCESS - Coverage badge created: %r" % str(output_file_path))
| 5,344,304
|
def test_MuonRingFitter(method):
"""test MuonRingFitter"""
# flashCam example
center_xs = 0.3 * u.m
center_ys = 0.6 * u.m
radius = 0.3 * u.m
width = 0.05 * u.m
muon_model = toymodel.RingGaussian(
x=center_xs, y=center_ys, radius=radius, sigma=width,
)
# testing with flashcam
geom = CameraGeometry.from_name("FlashCam")
charge, _, _ = muon_model.generate_image(geom, intensity=1000, nsb_level_pe=5,)
survivors = tailcuts_clean(geom, charge, 10, 12)
muonfit = MuonRingFitter(fit_method=method)
fit_result = muonfit(geom.pix_x, geom.pix_y, charge, survivors)
print(fit_result)
print(center_xs, center_ys, radius)
assert u.isclose(fit_result.center_x, center_xs, 5e-2)
assert u.isclose(fit_result.center_y, center_ys, 5e-2)
assert u.isclose(fit_result.radius, radius, 5e-2)
| 5,344,305
|
def _extract_dims(
m: ArrayLike,
target: int,
depth: int = 0
) -> Iterator[ArrayLike]:
"""
Extract the requested dimension.
Mainly used only to extract the last two dimensions of a matrix.
As not really generalized for "any" dimension, not really good to expose publicly.
"""
if depth == target:
if isinstance(m[0], Sequence):
yield cast(ArrayLike, [[cast(ArrayLike, x)[r] for x in m] for r in range(len(m[0]))])
else:
yield m
else:
for m2 in m:
yield from cast(ArrayLike, _extract_dims(cast(ArrayLike, m2), target, depth + 1))
| 5,344,306
|
def lsst_exposure_time(bands=''):
"""
Sample from the LSST exposure time distribution
"""
dist = {'u': 15.0, 'g': 15.0, 'r': 15.0, 'i': 15.0, 'z': 15.0, 'Y': 15.0}
return [dist[b] for b in bands.split(',')]
| 5,344,307
|
def bin2hexstring(bin_str):
"""
二进制串转十六进制串,按照 4:1 比例转换
:param bin_str: 二进制串
:return: 十六进制串
"""
bin_len = len(bin_str)
left = 0
right = 4
re_str = hex(int(bin_str[left:right], 2))[2:]
for i in range(right, bin_len, 4):
left = right
right += 4
re_str += hex(int(bin_str[left:right], 2))[2:]
return re_str
| 5,344,308
|
def check_dict_word(word, target):
"""
Check dict word. If one character not in searching word, then not add the word to python_dict.
:param word: str, word in dictionary.txt.
:param target: str, the searching word
:return: True, all character within are in searching word.
"""
# Level one: check len
if len(word) == len(target):
# Check all the word: contains -> contains, contais
for ch in word:
if ch not in target:
return False
else:
if ch == word[len(word)-1]:
return True
| 5,344,309
|
def print_content():
"""
Print all content.
"""
content = TARIELI.get_full_content()
print(content)
app_continue()
| 5,344,310
|
def get_memory_in_GB(memory_str):
"""Returns the memory value in GB from a given string in kB"""
try:
return '{0} GB'.format(int(memory_str[:-2]) / 1000000)
except (ValueError, TypeError):
return ''
| 5,344,311
|
def test_explain_instance_classification(caplog):
"""
Tests :mod:`fatf.transparency.lime.Lime.explain_instance` method.
These tests are for a classification task.
"""
runtime_error_no_predictor = 'A predictive function is not available.'
runtime_error_non_prob = ('The predictive model is not probabilistic. '
'Please specify a predictive function instead.')
# Check logging
assert len(caplog.records) == 0
# Non-probabilistic model -- function -- probabilistic function
with pytest.warns(None) as warning:
lime = ftl.Lime(
futt.NUMERICAL_NP_ARRAY,
model=CLF_NON_PROBA,
predict_fn=CLF.predict_proba,
class_names=CLASS_NAMES,
feature_names=FEATURE_NAMES)
assert len(warning) == 2
assert str(warning[0].message) == FUTURE_WARNING
assert str(warning[1].message) == USER_WARNING_MODEL_PRED
explained = lime.explain_instance(SAMPLE, predict_fn=CLF.predict_proba)
assert futt.is_explanation_equal_list(explained, NUMERICAL_RESULTS)
# Non-probabilistic model -- function -- no function
with pytest.warns(None) as warning:
lime = ftl.Lime(
futt.NUMERICAL_NP_ARRAY,
model=CLF_NON_PROBA,
predict_fn=CLF.predict_proba,
class_names=CLASS_NAMES,
feature_names=FEATURE_NAMES)
assert len(warning) == 2
assert str(warning[0].message) == FUTURE_WARNING
assert str(warning[1].message) == USER_WARNING_MODEL_PRED
explained = lime.explain_instance(SAMPLE)
assert futt.is_explanation_equal_list(explained, NUMERICAL_RESULTS)
# Non-probabilistic model -- no function -- probabilistic function
with pytest.warns(FutureWarning) as warning:
lime = ftl.Lime(
futt.NUMERICAL_NP_ARRAY,
model=CLF_NON_PROBA,
class_names=CLASS_NAMES,
feature_names=FEATURE_NAMES)
assert len(warning) == 1
assert str(warning[0].message) == FUTURE_WARNING
explained = lime.explain_instance(SAMPLE, predict_fn=CLF.predict_proba)
assert futt.is_explanation_equal_list(explained, NUMERICAL_RESULTS)
# Non-probabilistic model -- no function -- no function
with pytest.warns(FutureWarning) as warning:
lime = ftl.Lime(
futt.NUMERICAL_NP_ARRAY,
model=CLF_NON_PROBA,
class_names=CLASS_NAMES,
feature_names=FEATURE_NAMES)
assert len(warning) == 1
assert str(warning[0].message) == FUTURE_WARNING
with pytest.raises(RuntimeError) as exin:
lime.explain_instance(SAMPLE_STRUCT)
assert str(exin.value) == runtime_error_non_prob
# Check logging
assert len(caplog.records) == 4
for i in range(4):
assert caplog.records[i].levelname == 'WARNING'
assert caplog.records[i].getMessage() == LOG_WARNING
# No model -- function -- probabilistic function
with pytest.warns(FutureWarning) as warning:
lime = ftl.Lime(
futt.NUMERICAL_STRUCT_ARRAY,
predict_fn=CLF.predict_proba,
class_names=CLASS_NAMES,
feature_names=FEATURE_NAMES)
assert len(warning) == 1
assert str(warning[0].message) == FUTURE_WARNING
explained = lime.explain_instance(SAMPLE, predict_fn=CLF.predict_proba)
assert futt.is_explanation_equal_list(explained, NUMERICAL_RESULTS)
# No model -- function -- no function
with pytest.warns(FutureWarning) as warning:
lime = ftl.Lime(
futt.NUMERICAL_STRUCT_ARRAY,
predict_fn=CLF.predict_proba,
class_names=CLASS_NAMES,
feature_names=FEATURE_NAMES)
assert len(warning) == 1
assert str(warning[0].message) == FUTURE_WARNING
explained = lime.explain_instance(SAMPLE)
assert futt.is_explanation_equal_list(explained, NUMERICAL_RESULTS)
# No model -- no function -- probabilistic function
with pytest.warns(FutureWarning) as warning:
lime = ftl.Lime(
futt.NUMERICAL_NP_ARRAY,
class_names=CLASS_NAMES,
feature_names=FEATURE_NAMES)
assert len(warning) == 1
assert str(warning[0].message) == FUTURE_WARNING
explained = lime.explain_instance(SAMPLE, predict_fn=CLF.predict_proba)
assert futt.is_explanation_equal_list(explained, NUMERICAL_RESULTS)
# No model -- no function -- no function
with pytest.warns(FutureWarning) as warning:
lime = ftl.Lime(
futt.NUMERICAL_NP_ARRAY,
class_names=CLASS_NAMES,
feature_names=FEATURE_NAMES)
assert len(warning) == 1
assert str(warning[0].message) == FUTURE_WARNING
with pytest.raises(RuntimeError) as exin:
lime.explain_instance(SAMPLE)
assert str(exin.value) == runtime_error_no_predictor
# Check logging
assert len(caplog.records) == 4
# Probabilistic model -- probabilistic function -- empty call
with pytest.warns(None) as warning:
lime = ftl.Lime(
futt.NUMERICAL_NP_ARRAY,
model=CLF,
predict_fn=CLF.predict_proba,
class_names=CLASS_NAMES,
feature_names=FEATURE_NAMES)
assert len(warning) == 2
assert str(warning[0].message) == FUTURE_WARNING
assert str(warning[1].message) == USER_WARNING_MODEL_PRED
explained = lime.explain_instance(SAMPLE_STRUCT)
assert futt.is_explanation_equal_list(explained, NUMERICAL_RESULTS)
#
# Probabilistic model -- probabilistic function -- non-empty call
with pytest.warns(None) as warning:
lime = ftl.Lime(
futt.NUMERICAL_NP_ARRAY,
model=CLF,
predict_fn=CLF.predict_proba,
class_names=CLASS_NAMES,
feature_names=FEATURE_NAMES)
assert len(warning) == 2
assert str(warning[0].message) == FUTURE_WARNING
assert str(warning[1].message) == USER_WARNING_MODEL_PRED
explained = lime.explain_instance(SAMPLE, predict_fn=CLF.predict_proba)
assert futt.is_explanation_equal_list(explained, NUMERICAL_RESULTS)
#
# Probabilistic model -- no function -- empty call
with pytest.warns(FutureWarning) as warning:
lime = ftl.Lime(
futt.NUMERICAL_STRUCT_ARRAY,
model=CLF,
class_names=CLASS_NAMES,
feature_names=FEATURE_NAMES)
assert len(warning) == 1
assert str(warning[0].message) == FUTURE_WARNING
explained = lime.explain_instance(SAMPLE)
assert futt.is_explanation_equal_list(explained, NUMERICAL_RESULTS)
#
# Probabilistic model -- no function -- non-empty call
with pytest.warns(FutureWarning) as warning:
lime = ftl.Lime(
futt.NUMERICAL_STRUCT_ARRAY,
model=CLF,
class_names=CLASS_NAMES,
feature_names=FEATURE_NAMES)
assert len(warning) == 1
assert str(warning[0].message) == FUTURE_WARNING
explained = lime.explain_instance(
SAMPLE_STRUCT, predict_fn=CLF.predict_proba)
assert futt.is_explanation_equal_list(explained, NUMERICAL_RESULTS)
# Check logging
assert len(caplog.records) == 4
###########################################################################
# Test with categorical features: feat0 and feat1
cat_feat = [0, 1]
with pytest.warns(FutureWarning) as warning:
lime = ftl.Lime(
futt.NUMERICAL_NP_ARRAY,
model=CLF,
class_names=CLASS_NAMES,
feature_names=FEATURE_NAMES,
categorical_features=cat_feat)
assert len(warning) == 1
assert str(warning[0].message) == FUTURE_WARNING
explained = lime.explain_instance(SAMPLE_STRUCT)
assert futt.is_explanation_equal_list(CATEGORICAL_RESULTS, explained)
cat_feat = ['a', 'b']
with pytest.warns(FutureWarning) as warning:
lime = ftl.Lime(
futt.NUMERICAL_STRUCT_ARRAY,
model=CLF,
class_names=CLASS_NAMES,
feature_names=FEATURE_NAMES,
categorical_features=cat_feat)
assert len(warning) == 1
assert str(warning[0].message) == FUTURE_WARNING
explained = lime.explain_instance(SAMPLE)
assert futt.is_explanation_equal_list(CATEGORICAL_RESULTS, explained)
# Check logging
assert len(caplog.records) == 4
| 5,344,312
|
def get_slot_dict(token_present=False):
"""Compiles a dictionary of the available slots
:returns: A python dictionary of the available slots
"""
ret, slot_list = c_get_slot_list(token_present)
if (ret != 0):
return ret
slot_dict = {}
ret = CKR_OK
for slot in slot_list:
ret, data = c_get_slot_info(slot)
if ret != CKR_OK:
LOG.error("C_GetSlotInfo failed at slot %s")
break
slot_dict[slot] = data
return ret, slot_dict
| 5,344,313
|
def submit_bwa_map(insert_size, ref_genome, read_1, read_2, output):
"""The `bwa mem` function."""
map_cmd = "bwa mem -t 24 -I {} {} {} {}".format(insert_size, ref_genome, read_1, read_2)
map_cmd += " | samtools sort -m 5G -@24 -O bam -T {} -o {}.bam".format(output, output)
"""Create a .sh files with the `bwa mem` function."""
file = open('{}.sh'.format(output),'w')
file.write('#!/bin/bash \n')
file.write('#SBATCH --partition normal \n')
file.write('#SBATCH --mem 256G \n')
file.write('#SBATCH -c 24 \n')
file.write('#SBATCH --time=12:00:00 \n')
file.write(map_cmd)
file.write('\n')
file.close()
"""Submit the .sh to the server"""
sub_cmd = "sbatch -o {}.out {}.sh".format(output, output)
subprocess.call(sub_cmd, shell=True)
| 5,344,314
|
def main():
"""Command line conversion of a PPK file to an OpenSSH file
python -m puttykeys myprivatekey.ppk [password] > id_rsa"""
if len(sys.argv) > 1:
f=open(sys.argv[1],'r')
ppkraw = f.read()
f.close()
if len(sys.argv) > 2:
sys.stdout.write(ppkraw_to_openssh(ppkraw, sys.argv[2]))
else:
sys.stdout.write(ppkraw_to_openssh(ppkraw))
| 5,344,315
|
def load_segment_by_patient(patient):
"""
Load the pixels for a patient and segment all of them
"""
pixels = load_pixels_by_patient(patient)
segments = []
for pixel in pixels:
segments.append(segment(pixel))
return np.array(segments)
| 5,344,316
|
def filehash(thisfile, filesha):
"""
First parameter, filename
Returns SHA1 sum as a string of hex digits
"""
try:
filehandle = open(thisfile, "rb")
except:
return ""
data = filehandle.read()
while data != b"":
filesha.update(data)
data = filehandle.read()
filehandle.close()
return filesha.hexdigest()
| 5,344,317
|
def object_size(o):
"""
Calls `getsizeof <https://docs.python.org/3/library/sys.html#sys.getsizeof>`_.
@param o object
@return size of the object, that excludes references objects.
"""
return getsizeof(o)
| 5,344,318
|
def show_date(
enode,
_shell='vtysh',
_shell_args={
'matches': None,
'newline': True,
'timeout': None,
'connection': None
}
):
"""
Display system date information
This function runs the following vtysh command:
::
# show date
:param dict kwargs: arguments to pass to the send_command of the
vtysh shell.
:param str _shell: shell to be selected
:param dict _shell_args: low-level shell API arguments
:return: A dictionary as returned by
:func:`topology_lib_vtysh.parser.parse_show_date`
"""
cmd = [
'show date'
]
shell = enode.get_shell(_shell)
shell.send_command(
(' '.join(cmd)).format(**locals()), **_shell_args
)
result = shell.get_response(
connection=_shell_args.get('connection', None)
)
return parse_show_date(result)
| 5,344,319
|
def show_score(connection, amt):
"""
show_score
:param connection: :class:`sqlite3`
:param amt: int
:return: int
"""
sand = read_sum(connection, "sand", amt)
putt = read_sum(connection, "putt", amt)
return sand + putt
| 5,344,320
|
def current_time():
""" current_time() -> str
>>> current_time()
14:28:04
Returns the current local time in 24 clock system.
"""
return time.strftime('%X', (time.localtime()))
| 5,344,321
|
def kernel_s_xz2(y, x, z, zc, yp, xp, zp):
"""
Kernel for xz-component of stress in the semi-infinite space domain
(2nd system)
"""
# Y = y - yp
# X = x - xp
# Z = z - zp - 2 * zc
Y = yp - y
X = xp - x
Z = zp - z + 2 * zc
rho = np.sqrt(Y ** 2 + X ** 2 + Z ** 2)
kernel = (
safe_log(Y + rho)
)
return kernel
| 5,344,322
|
def prepare_hex_string(number, base=10):
"""
Gets an int number, and returns the hex representation with even length padded to the left with zeroes
"""
int_number = int(number, base)
hex_number = format(int_number, 'X')
# Takes the string and pads to the left to make sure the number of characters is even
justify_hex_number = hex_number.rjust((len(hex_number) % 2) + len(hex_number), '0')
return justify_hex_number
| 5,344,323
|
def parse_function(image_size, raw_image_key_name):
"""Generate parse function for parsing the TFRecord training dataset.
Read the image example and resize it to desired size.
Args:
image_size: int, target size to resize the image to
raw_image_key_name: str, name of the JPEG image in each TFRecord entry
Returns:
A map function to use with tf.data.Dataset.map() .
"""
def func(example_proto):
"""A generator to be used as representative_dataset for TFLiteConverter."""
image_raw = tf.io.parse_single_example(
example_proto,
features={raw_image_key_name: tf.FixedLenFeature([], tf.string)},
)
image = tf.image.decode_jpeg(image_raw[raw_image_key_name])
image = tf.expand_dims(image, axis=0)
image = tf.image.resize_bilinear(image, (image_size, image_size))
image = tf.squeeze(image, axis=0)
image = image / 255.0
return image
return func
| 5,344,324
|
def clean_coverage(x):
"""
Cleans the coverage polygons by remove small multipolygon shapes.
Parameters
---------
x : polygon
Feature to simplify.
Returns
-------
MultiPolygon : MultiPolygon
Shapely MultiPolygon geometry without tiny shapes.
"""
# if its a single polygon, just return the polygon geometry
if x.geometry.geom_type == 'Polygon':
if x.geometry.area > 1e7:
return x.geometry
# if its a multipolygon, we start trying to simplify and
# remove shapes if its too big.
elif x.geometry.geom_type == 'MultiPolygon':
threshold = 1e7
# save remaining polygons as new multipolygon for
# the specific country
new_geom = []
for y in x.geometry:
if y.area > threshold:
new_geom.append(y)
return MultiPolygon(new_geom)
| 5,344,325
|
def send():
"""For testing: Example of activating a background task."""
log.info("executing a background task")
bgtasks.send_email.spool(email="tomi@tomicloud.com",
subject="Hello world!", template="welcome.html")
return jsonify({"reply":"background task will start"}), 200
| 5,344,326
|
def get_today_month_and_day() -> str:
"""Returns today's month and day in the format: %m-%d"""
return datetime.date.today().strftime("%m-%d")
| 5,344,327
|
def climate_zone_to_tmy3_stations(climate_zone):
"""Return TMY3 weather stations falling within in the given climate zone.
Parameters
----------
climate_zone : str
String representing a climate zone.
Returns
-------
stations : list of str
Strings representing TMY3 station ids.
"""
return _load_climate_zone_to_tmy3_stations_index().get(climate_zone, None)
| 5,344,328
|
def artist_html_file_path(artist) -> Path: # Used
"""Return absolute artists HTML file path.
Parameters
----------
artist
Artist name.
Returns
-------
:cod:`Path`
Absolute artists HTML file path.
"""
artist_file_name = re.sub(r"[\s/]", "_", artist)
return artists_dir_path().joinpath(f"{artist_file_name}.html")
| 5,344,329
|
def _deepfoolx_batch(model, epochs, eta, clip_min, clip_max):
"""DeepFool for multi-class classifiers in batch mode.
"""
original_model_X = model.X
y0 = tf.stop_gradient(model.prob)
B, ydim = tf.shape(y0)[0], y0.get_shape().as_list()[1]
k0 = tf.argmax(y0, axis=1, output_type=tf.int32)
k0 = tf.stack((tf.range(B), k0), axis=1)
xshape = original_model_X.get_shape().as_list()[1:]
xdim = _prod(xshape)
perm = list(range(len(xshape) + 2))
perm[0], perm[1] = perm[1], perm[0]
def _cond(i, z):
return tf.less(i, epochs)
def _body(i, z):
xadv = tf.clip_by_value(original_model_X + z*(1+eta), clip_min, clip_max)
model.X = xadv
model.build_arch()
model.normalize_scores()
y = model.prob
gs = [tf.gradients(y[:, j], xadv)[0] for j in range(ydim)]
g = tf.stack(gs, axis=0)
g = tf.transpose(g, perm)
yk = tf.expand_dims(tf.gather_nd(y, k0), axis=1)
gk = tf.expand_dims(tf.gather_nd(g, k0), axis=1)
a = tf.abs(y - yk)
b = g - gk
c = tf.norm(tf.reshape(b, [-1, ydim, xdim]), axis=-1)
# Assume 1) 0/0=tf.nan 2) tf.argmin ignores nan
score = a / c
ind = tf.argmin(score, axis=1, output_type=tf.int32)
ind = tf.stack((tf.range(B), ind), axis=1)
si, bi = tf.gather_nd(score, ind), tf.gather_nd(b, ind)
si = tf.reshape(si, [-1] + [1]*len(xshape))
dx = si * bi
return i+1, z+dx
_, noise = tf.while_loop(_cond, _body, [0, tf.zeros_like(original_model_X)],
name='_deepfoolx_batch', back_prop=False)
return noise
| 5,344,330
|
def tokenize(text):
"""
The function to tokenize and lemmatize the text.
Inputs:
text: the text which needs to be tokenized
Outputs:
tokens: tokens which can be used in machine learning
"""
stop_words = stopwords.words("english")
lemmatizer = WordNetLemmatizer()
text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower())
tokens = word_tokenize(text)
tokens = [lemmatizer.lemmatize(word) for word in tokens if word not in stop_words]
return tokens
| 5,344,331
|
def remove_task(name: str):
"""
Delete a task based on information "name":
- **name**: each tasks must have a name
"""
name_idx = _db_has_name(name)
if name_idx == None:
raise HTTPException(status_code = 400, detail = {"message" : "name doesn't exists"})
else:
del db["tasks"][name_idx]
_write_json()
return name
| 5,344,332
|
def set_dj_definition(cls, type_map: dict = None) -> None:
"""Set the definition property of a class by inspecting its attributes.
Params:
cls: The class whose definition attribute should be set
type_map: Optional additional type mappings
"""
# A mapping between python types and DataJoint types
_type_map = {
"int": "int",
"str": "varchar(256)",
"float": "float",
"Quantity": "float",
"datetime": "datetime",
"datetime.datetime": "datetime",
"bool": "tinyint",
"list": "longblob",
}
# A list of python types which have no DataJoint
# equivalent and so are unsupported
unsupported = [dict]
if type_map:
_type_map.update(type_map)
dj_def = "%s_id: int auto_increment\n---\n" % cls.__name__.lower()
cls_items = cls.__annotations__.items()
for attr, type_hint in cls_items:
if type_hint in unsupported:
continue
name = getattr(type_hint, "__name__", type_hint)
default = getattr(cls, attr)
if isinstance(default, str):
default = '"%s"' % default
elif isinstance(default, bool):
default = int(default)
else:
default = "NULL"
if getattr(type_hint, '_name', "") == 'Dict':
cls = handle_dict(cls, _type_map, attr, type_hint)
continue
elif name in _type_map:
dj_def += "%s = %s : %s\n" % (attr, default, _type_map[name])
else:
dj_def += "-> %s\n" % name
cls.definition = dj_def
return cls
| 5,344,333
|
def test_location_string():
"""Tests that Locations __str__ method returns correctly."""
test_obj = Location("Hovel", "a drab and dingy room", (0, 0, 0))
assert test_obj.__str__() == "Hovel - a drab and dingy room"
| 5,344,334
|
def mmap_zeros(shape, dtype):
"""
Create an empty shared memory array.
"""
new = anonymousmemmap(shape, dtype)
new[:] = 0.0
return new
| 5,344,335
|
def edge_disjoint_paths(g: Graph, source: Node, sink: Node) -> Iterable:
""" Given directed graph G, and two nodes s and t, find k paths from
s to t such that no two paths share an edge.
Menger’s Theorem: Given a directed graph G with nodes s,t the maximum number of
edge-disjoint s-t paths equals the minimum number of edges whose
removal separates s from t.
Suppose you want to send k large files from s to t but never have two files use
the same network link (to avoid congestion on the links).
"""
for u in g:
for v in g[u]:
g[u][v].cap = 1
fifo_push_relabel(g, source, sink)
# use dfs to find the paths
S, paths = [source], []
visited = defaultdict(lambda: False)
pred = defaultdict(lambda: None)
while S:
u = S.pop()
if u == sink:
path = [sink]
current = pred[sink]
while current is not None:
path.append(current)
current = pred[current]
paths.append(tuple(reversed(path)))
continue
if visited[u]:
continue
visited[u] = True
for v in adjacency(g, u):
if not visited[u] and g[u][v].flow:
S.append(v)
pred[v] = u
return iter(paths)
| 5,344,336
|
def listToMLlibVectorUDF(col):
""" Map struct column from list to MLlib vector """
return Column(default().listToMLlibVectorUDF(col._jc))
| 5,344,337
|
def change_file_paths_to_showcase(df, showcase_dir="/showcase_data/raw_data"):
"""Changes file paths to use showcase directory"""
output = df.copy()
if "file_path" in df.columns:
output.loc[:, "file_path"] = df.file_path.apply(
lambda x: add_path(x, showcase_dir)
)
if "file_path_feature_values" in df.columns:
output.loc[:, "file_path_feature_values"] = df.file_path_feature_values.apply(
lambda x: add_path(x, showcase_dir)
)
if "cluster_id_path" in df.columns:
output.loc[:, "cluster_id_path"] = df.cluster_id_path.apply(
lambda x: add_path(x, showcase_dir)
)
if "thumbnail_path" in df.columns:
output.loc[:, "thumbnail_path"] = df.thumbnail_path.apply(
lambda x: add_path(x, showcase_dir)
)
if "file_path_small" in df.columns:
output.loc[:, "file_path_small"] = df.file_path_small.apply(
lambda x: add_path(x, showcase_dir)
)
return output
| 5,344,338
|
def clear_message(self, trace_number):
"""
UI function
"""
if(trace_number == 0):
self.Message1.configure(background="green", font=("Helvetica",24))
elif(trace_number == 1):
self.Message1.configure(background="#d9d9d9", font=("Helvetica",10))
self.Message2.configure(background="green", font=("Helvetica",24))
elif(trace_number == 2):
self.Message2.configure(background="#d9d9d9", font=("Helvetica",10))
self.Message3.configure(background="green", font=("Helvetica",24))
elif(trace_number == 3):
self.Message3.configure(background="#d9d9d9", font=("Helvetica",10))
self.Message4.configure(background="green", font=("Helvetica",24))
elif(trace_number == 4):
self.Message4.configure(background="#d9d9d9", font=("Helvetica",10))
self.Message5.configure(background="green", font=("Helvetica",24))
| 5,344,339
|
def match_conftest_error(line):
"""
Extract `ConftestImportFailure` error message from a string.
:param line: A string to pattern match against.
:returns: A dictionary where the key `file_path` holds the file path and the
key `error` the error description. If not matched, the dictionary is
empty.
"""
return match_pattern(
r"^E\s+.*ConftestImportFailure: "
"\(local\('(?P<file_path>.*)'\), \((?P<error>.*)\)\)$",
line,
)
| 5,344,340
|
def modifyModlist(
old_entry,new_entry,ignore_attr_types=None,ignore_oldexistent=0
):
"""
Build differential modify list for calling LDAPObject.modify()/modify_s()
old_entry
Dictionary holding the old entry
new_entry
Dictionary holding what the new entry should be
ignore_attr_types
List of attribute type names to be ignored completely
ignore_oldexistent
If non-zero attribute type names which are in old_entry
but are not found in new_entry at all are not deleted.
This is handy for situations where your application
sets attribute value to '' for deleting an attribute.
In most cases leave zero.
"""
ignore_attr_types = list_dict(map(string.lower,(ignore_attr_types or [])))
modlist = []
attrtype_lower_map = {}
for a in old_entry.keys():
attrtype_lower_map[string.lower(a)]=a
for attrtype in new_entry.keys():
attrtype_lower = string.lower(attrtype)
if ignore_attr_types.has_key(attrtype_lower):
# This attribute type is ignored
continue
# Filter away null-strings
new_value = filter(lambda x:x!=None,new_entry[attrtype])
if attrtype_lower_map.has_key(attrtype_lower):
old_value = old_entry.get(attrtype_lower_map[attrtype_lower],[])
old_value = filter(lambda x:x!=None,old_value)
del attrtype_lower_map[attrtype_lower]
else:
old_value = []
if not old_value and new_value:
# Add a new attribute to entry
modlist.append((ldap.MOD_ADD,attrtype,new_value))
elif old_value and new_value:
# Replace existing attribute
replace_attr_value = len(old_value)!=len(new_value)
if not replace_attr_value:
old_value_dict=list_dict(old_value)
new_value_dict=list_dict(new_value)
delete_values = []
for v in old_value:
if not new_value_dict.has_key(v):
replace_attr_value = 1
break
add_values = []
if not replace_attr_value:
for v in new_value:
if not old_value_dict.has_key(v):
replace_attr_value = 1
break
if replace_attr_value:
modlist.append((ldap.MOD_DELETE,attrtype,None))
modlist.append((ldap.MOD_ADD,attrtype,new_value))
elif old_value and not new_value:
# Completely delete an existing attribute
modlist.append((ldap.MOD_DELETE,attrtype,None))
if not ignore_oldexistent:
# Remove all attributes of old_entry which are not present
# in new_entry at all
for a in attrtype_lower_map.keys():
if ignore_attr_types.has_key(a):
# This attribute type is ignored
continue
attrtype = attrtype_lower_map[a]
modlist.append((ldap.MOD_DELETE,attrtype,None))
return modlist
| 5,344,341
|
def temporary_worker():
"""A pytest fixture that creates a temporary directory and a config file to match. Deletes directory after test"""
def run_worker():
with rq.Connection():
qs = 'labmanager_unittests'
w = rq.Worker(qs)
w.work()
# This task is used to kill the worker. Sometimes if tests fail the worker runs forever and
# holds up the entire process. This gives each test 25 seconds to run before killing the worker
# and forcing the test to fail.
def watch_proc(p):
count = 0
while count < 12:
count = count + 1
time.sleep(1)
try:
p.terminate()
except:
pass
r = redis.Redis()
r.flushall()
worker_proc = multiprocessing.Process(target=run_worker)
worker_proc.daemon = True
worker_proc.start()
watchdog_thread = threading.Thread(target=watch_proc, args=(worker_proc,))
watchdog_thread.daemon = True
watchdog_thread.start()
dispatcher = Dispatcher('labmanager_unittests')
assert worker_proc.is_alive()
yield worker_proc, dispatcher
| 5,344,342
|
def regnety_3200m(**kwargs):
"""
Constructs a RegNet-Y model under 3200M FLOPs.
"""
model = RegNet(regnetY_3200M_config, **kwargs)
return model
| 5,344,343
|
def first_and_last_index(arr, number):
"""
Given a sorted array that may have duplicate values, use binary
search to find the first and last indexes of a given value.
Args:
arr(list): Sorted array (or Python list) that may have duplicate values
number(int): Value to search for in the array
Returns:
a list containing the first and last indexes of the given value
"""
# TODO: Write your first_and_last function here
# Note that you may want to write helper functions to find the start
# index and the end index
pass
| 5,344,344
|
def _get_exposure(fname, stop=None):
"""
:param fname:
path of the XML file containing the exposure
:param stop:
node at which to stop parsing (or None)
:returns:
a pair (Exposure instance, list of asset nodes)
"""
[exposure] = nrml.read(fname, stop=stop)
if not exposure.tag.endswith('exposureModel'):
raise InvalidFile('%s: expected exposureModel, got %s' %
(fname, exposure.tag))
description = exposure.description
try:
conversions = exposure.conversions
except AttributeError:
conversions = Node('conversions', nodes=[Node('costTypes', [])])
try:
inslimit = conversions.insuranceLimit
except AttributeError:
inslimit = Node('insuranceLimit', text=True)
try:
deductible = conversions.deductible
except AttributeError:
deductible = Node('deductible', text=True)
try:
area = conversions.area
except AttributeError:
# NB: the area type cannot be an empty string because when sending
# around the CostCalculator object we would run into this numpy bug
# about pickling dictionaries with empty strings:
# https://github.com/numpy/numpy/pull/5475
area = Node('area', dict(type='?'))
try:
occupancy_periods = exposure.occupancyPeriods.text or ''
except AttributeError:
occupancy_periods = ''
try:
tagNames = exposure.tagNames
except AttributeError:
tagNames = Node('tagNames', text='')
tagnames = ~tagNames or []
tagnames.insert(0, 'taxonomy')
# read the cost types and make some check
cost_types = []
retrofitted = False
for ct in conversions.costTypes:
with context(fname, ct):
ctname = ct['name']
if ctname == 'structural' and 'retrofittedType' in ct.attrib:
if ct['retrofittedType'] != ct['type']:
raise ValueError(
'The retrofittedType %s is different from the type'
'%s' % (ct['retrofittedType'], ct['type']))
if ct['retrofittedUnit'] != ct['unit']:
raise ValueError(
'The retrofittedUnit %s is different from the unit'
'%s' % (ct['retrofittedUnit'], ct['unit']))
retrofitted = True
cost_types.append(
(ctname, valid.cost_type_type(ct['type']), ct['unit']))
if 'occupants' in cost_types:
cost_types.append(('occupants', 'per_area', 'people'))
cost_types.sort(key=operator.itemgetter(0))
cost_types = numpy.array(cost_types, cost_type_dt)
insurance_limit_is_absolute = il = inslimit.get('isAbsolute')
deductible_is_absolute = de = deductible.get('isAbsolute')
cc = CostCalculator(
{}, {}, {},
True if de is None else de,
True if il is None else il,
{name: i for i, name in enumerate(tagnames)},
)
for ct in cost_types:
name = ct['name'] # structural, nonstructural, ...
cc.cost_types[name] = ct['type'] # aggregated, per_asset, per_area
cc.area_types[name] = area['type']
cc.units[name] = ct['unit']
assets = []
asset_refs = []
exp = Exposure(
exposure['id'], exposure['category'],
description.text, cost_types, occupancy_periods,
insurance_limit_is_absolute, deductible_is_absolute, retrofitted,
area.attrib, assets, asset_refs, cc, TagCollection(tagnames))
return exp, exposure.assets
| 5,344,345
|
def salmon(**kwargs):
"""Convert output of Salmon into a feature counts file"""
from sequana import salmon
salmon_input = kwargs["input"]
output = kwargs["output"]
if os.path.exists(salmon_input) is False:
logger.critical("Input file does not exists ({})".format(salmon_input))
gff = kwargs["gff"]
attribute = kwargs["attribute"]
feature = kwargs["feature"]
# reads file generated by salmon and generated count file as expected by
# DGE.
s = salmon.Salmon(salmon_input, gff)
s.save_feature_counts(output, feature=feature, attribute=attribute)
| 5,344,346
|
async def test_internal_discovery_callback_fill_out_default_manufacturer(hass):
"""Test internal discovery automatically filling out information."""
discover_cast, _, _ = await async_setup_cast_internal_discovery(hass)
info = get_fake_chromecast_info(host="host1")
zconf = get_fake_zconf(host="host1", port=8009)
full_info = attr.evolve(
info, model_name="google home", friendly_name="Speaker", uuid=FakeUUID
)
with patch(
"homeassistant.components.cast.helpers.dial.get_device_status",
return_value=full_info,
), patch(
"homeassistant.components.cast.discovery.ChromeCastZeroconf.get_zeroconf",
return_value=zconf,
):
signal = MagicMock()
async_dispatcher_connect(hass, "cast_discovered", signal)
discover_cast("the-service", info)
await hass.async_block_till_done()
# when called with incomplete info, it should use HTTP to get missing
discover = signal.mock_calls[0][1][0]
assert discover == attr.evolve(full_info, manufacturer="Google Inc.")
| 5,344,347
|
def dcos_api_session(dcos_api_session_factory):
""" Overrides the dcos_api_session fixture to use
exhibitor settings currently used in the cluster
"""
args = dcos_api_session_factory.get_args_from_env()
exhibitor_admin_password = None
expanded_config = get_expanded_config()
if expanded_config['exhibitor_admin_password_enabled'] == 'true':
exhibitor_admin_password = expanded_config['exhibitor_admin_password']
api = dcos_api_session_factory(
exhibitor_admin_password=exhibitor_admin_password,
**args)
api.wait_for_dcos()
return api
| 5,344,348
|
def BarycentricInterpolation(bins, pnts):
"""
barycentricinterpolation for given points,
return the barycentric coordinates for points within the grids
INPUT
bins - grids for discretization,
m-length array where bins[i] indicates the mesh along dimension i
pnts - an array of pnts, each points is an m-length indicates the Cartesian coordinates
can be n pnts in total
RETURN
indices - an n-length list of indices, each indices is d-length (d=m+1) for interpolating points invovled
coeffs - an n-length list of coefficients, each coefficients is d-length for reconstructing points n
A pythonic version barycentricinterpolation from Russ' drake utility function
does not support dcoefs currently...
"""
#note here the layout of input and output is different from the C++ version of drake
m = pnts.shape[1]
n = pnts.shape[0]
d = m+1
if len(bins) != m:
print 'The number of bins must equal to the dimension of the points.' #validation
return None, None
binsize = [len(bins[i]) for i in range(m)]
nskip = np.concatenate([[1], np.cumprod([binsize[i] for i in range(m-1)])])
#a list of bary points for future sorting...
b = [{'dim':0, 'fracway':0.0, 'dfracway':0.0} for i in range(d)]
indices = np.zeros((n, d))
coeffs = np.zeros((n, d))
for j in range(n):
sidx = 0 # 0-index in our case...
for i in range(m):
pt = pnts[j, i]
curr_bin = bins[i]
curr_bin_size = binsize[i]
b[i]['dim'] = i
if curr_bin_size == 1: #singleton dimensions
#sidx is unchanged
b[i]['fracway'] = 1.0
elif pt > curr_bin[curr_bin_size-1]:
#larger than max bound of bin
sidx += nskip[i] * (curr_bin_size-1)
b[i]['fracway'] = 1.0
b[i]['dfracway'] = 0.0
elif pt < curr_bin[0]:
#less than min bound of bin
sidx += nskip[i]
b[i]['fracway'] = 0.0
b[i]['dfracway'] = 0.0
else:
#Russ commented that smarter search can be done here...
#i guess we can do it in a pythonic way...
next_bin_index = np.argmax(curr_bin>pt)
sidx += nskip[i]*next_bin_index
b[i]['fracway'] = (pt - curr_bin[next_bin_index-1])/(curr_bin[next_bin_index]- curr_bin[next_bin_index-1])
b[i]['dfracway'] = 1./(curr_bin[next_bin_index]- curr_bin[next_bin_index-1])
#sort dimension based on fracway (lowest to highest)
b_sorted = sorted(b[:-1], key=lambda b_elem: b_elem['fracway'])
# final element of b_sorted,
b_sorted.append({'dim':m-1,'fracway':1.0, 'dfracway':0.0})
# top right corner
indices[j, 0] = sidx
coeffs[j, 0] = b_sorted[0]['fracway']
for i in range(m):
if binsize[b_sorted[i]['dim']] > 1:
#support singletone dimension
sidx -= nskip[b_sorted[i]['dim']]
indices[j, i+1] = sidx
coeffs[j, i+1] = b_sorted[i+1]['fracway'] - b_sorted[i]['fracway']
return indices, coeffs
| 5,344,349
|
def submit_rgi_job(sample_instance: AnalysisSample) -> RGIResult:
"""
Given an input AnalysisSample instance, runs RGI and stores result in the database
:param sample_instance: Instance of AnalysisSample object
:return: Populated RGIResult object generated by the method
"""
logger.info(f"Received RGI job request for {sample_instance}")
assembly_instance = SampleAssemblyData.objects.get(sample_id=sample_instance.sample_id)
rgi_dir_name = f'RGI_{sample_instance.user}_{sample_instance.pk}'
root_sample_instance = Sample.objects.get(sample_id=sample_instance.sample_id)
outdir = MEDIA_ROOT / Path(str(sample_instance.sample_id.fwd_reads)).parent / rgi_dir_name
if not assembly_instance.assembly_exists():
logger.warning(f"Could not find assembly for {assembly_instance} - cannot proceed with job")
return
else:
assembly_path = assembly_instance.get_assembly_path()
# Remove previous analysis if it exists
if outdir.exists():
shutil.rmtree(outdir, ignore_errors=True)
outdir.mkdir(parents=True)
# Call RGI
rgi_text_results, rgi_json_results = call_rgi_main(fasta=assembly_path, outdir=outdir,
sample_id=root_sample_instance.sample_id)
# Populate database with results
rgi_result_object = RGIResult.objects.create(analysis_sample=sample_instance)
rgi_result_object.rgi_main_text_results = upload_analysis_file(instance=root_sample_instance,
filename=rgi_text_results.name,
analysis_folder=rgi_dir_name)
rgi_result_object.rgi_main_json_results = upload_analysis_file(instance=root_sample_instance,
filename=rgi_json_results.name,
analysis_folder=rgi_dir_name)
rgi_result_object.save()
logger.info(f"Completed running RGI on {sample_instance}")
return rgi_result_object
| 5,344,350
|
def test_case_citation_redirect(client, citation):
"""Should allow various forms of citation, should redirect to normalized_cite"""
url = api_reverse("casemetadata-detail", args=[citation.normalized_cite])
# should have received a redirect
response = client.get(url)
check_response(response, status_code=302)
response = client.get(url, follow=True)
check_response(response)
content = response.json()['results']
case = citation.case
# should only have one case returned
assert len(content) == 1
assert content[0]['id'] == case.id
# should only have one citation for this case
citations_result = content[0]['citations']
assert len(citations_result) == 1
assert citations_result[0]['cite'] == citation.cite
# allow user to enter real citation (not normalized)
url = api_reverse("casemetadata-get-cite", args=[citation.cite])
response = client.get(url, follow=True)
check_response(response)
content = response.json()['results']
case = citation.case
assert len(content) == 1
assert content[0]['id'] == case.id
# citation redirect should work with periods in the url, too
new_citation = CitationFactory(cite='1 Mass. 1', normalized_cite='1-mass-1', case=citation.case)
new_citation.save()
url = api_reverse("casemetadata-get-cite", args=[new_citation.cite])
response = client.get(url)
check_response(response, status_code=302)
response = client.get(url, follow=True)
check_response(response)
content = response.json()['results']
case = citation.case
assert len(content) == 1
assert content[0]['id'] == case.id
| 5,344,351
|
def forwardslash2shift(args=None):
"""
Make forward slash shift when pressed with another key
"""
run_mapper(premade.ForwardSlash2Shift)
return 0
| 5,344,352
|
def user_details_force_sync(auth_entry, strategy, details, user=None, *args, **kwargs): # lint-amnesty, pylint: disable=keyword-arg-before-vararg
"""
Update normally protected user details using data from provider.
This step in the pipeline is akin to `social_core.pipeline.user.user_details`, which updates
the user details but has an unconfigurable protection over updating the username & email, and
is unable to update information such as the user's full name which isn't on the user model, but
rather on the user profile model.
Additionally, because the email field is normally used to log in, if the email is changed by this
forced synchronization, we send an email to both the old and new emails, letting the user know.
This step is controlled by the `sync_learner_profile_data` flag on the provider's configuration.
"""
current_provider = provider.Registry.get_from_pipeline({'backend': strategy.request.backend.name, 'kwargs': kwargs})
if user and current_provider.sync_learner_profile_data:
# Keep track of which incoming values get applied.
changed = {}
# Map each incoming field from the provider to the name on the user model (by default, they always match).
field_mapping = {field: (user, field) for field in details.keys() if hasattr(user, field)}
# This is a special case where the field mapping should go to the user profile object and not the user object,
# in some cases with differing field names (i.e. 'fullname' vs. 'name').
field_mapping.update({
'fullname': (user.profile, 'name'),
'country': (user.profile, 'country'),
})
# Remove username from list of fields for update
field_mapping.pop('username', None)
# Track any fields that would raise an integrity error if there was a conflict.
integrity_conflict_fields = {'email': user.email, 'username': user.username}
for provider_field, (model, field) in field_mapping.items():
provider_value = details.get(provider_field)
current_value = getattr(model, field)
if provider_value is not None and current_value != provider_value:
if field in integrity_conflict_fields and User.objects.filter(**{field: provider_value}).exists():
logger.warning('[THIRD_PARTY_AUTH] Profile data synchronization conflict. '
'UserId: {user_id}, Provider: {provider}, ConflictField: {conflict_field}, '
'ConflictValue: {conflict_value}'.format(
user_id=user.id,
provider=current_provider.name,
conflict_field=field,
conflict_value=provider_value))
continue
changed[provider_field] = current_value
setattr(model, field, provider_value)
if changed:
logger.info(
'[THIRD_PARTY_AUTH] User performed SSO and data was synchronized. '
'Username: {username}, Provider: {provider}, UpdatedKeys: {updated_keys}'.format(
username=user.username,
provider=current_provider.name,
updated_keys=list(changed.keys())
)
)
# Save changes to user and user.profile models.
strategy.storage.user.changed(user)
user.profile.save()
# Send an email to the old and new email to alert the user that their login email changed.
if changed.get('email'):
old_email = changed['email']
new_email = user.email
email_context = {'old_email': old_email, 'new_email': new_email}
# Subjects shouldn't have new lines.
subject = ''.join(render_to_string(
'emails/sync_learner_profile_data_email_change_subject.txt',
email_context
).splitlines())
body = render_to_string('emails/sync_learner_profile_data_email_change_body.txt', email_context)
from_email = configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL)
email = EmailMessage(subject=subject, body=body, from_email=from_email, to=[old_email, new_email])
email.content_subtype = "html"
try:
email.send()
except SMTPException:
logger.exception('[THIRD_PARTY_AUTH] Error sending IdP learner data sync-initiated email change '
'notification email. Username: {username}'.format(username=user.username))
| 5,344,353
|
def test_get_datasource_by_id(mocker, dm, datasource_details_result):
"""Test getting datasource by uuid"""
dm.client.get = mocker.Mock(return_value=datasource_details_result)
datasource = dm.get_datasource_by_id("abc")
# Make sure we hit the right endpoint
assert dm.client.get.call_count == 1
assert dm.client.get.call_args[0][0] == "/rest/v1/datasource/abc"
# a couple of spot checks.
assert datasource.category == "category"
assert datasource.cadence == "daily"
assert datasource.splitColumns == ["category", "country"]
assert datasource.type == "datasource"
assert datasource.earliestData == "2015-01-01"
assert datasource.latestData == "2018-10-01"
# Make sure we didn't go through the client again for the details
assert dm.client.get.call_count == 1
| 5,344,354
|
def plotly_figure(figure, id: str):
"""
:param figure: plotly graph object or px figure
:param id: unique id string of format 'id_xxx' with x representin a number
:return: html style string containing a plotly figure
"""
json_figure = figure.to_json()
html = """
<div id="""+id+"""></div>
<script>
var plotly_data = {}
Plotly.react("""+id+""", plotly_data.data, plotly_data.layout);
</script>
"""
local_text = html.format(json_figure)
return local_text
| 5,344,355
|
def _add_resources_to_vault_obj(obj, data, columns):
"""Add associated resources to column and data tuples
"""
i = 0
for s in obj.resources:
if obj.resources[i].id:
name = 'resource_id_' + str(i + 1)
data += (obj.resources[i].id,)
columns = columns + (name,)
name = 'resource_type_' + str(i + 1)
data += (obj.resources[i].type,)
columns = columns + (name,)
i += 1
return data, columns
| 5,344,356
|
def classify_helmet_belt_worn(x):
"""
This function returns a strinig representation of the int value of the field which specifies whether the
person was wearing a setabelt or a helmet. This specification is from the Road Crash Statistics Victoria , 2013 Edition
document.
:param x: int value representing the classify helmet belt worn field
:return: string representation of the integer value
"""
if x == 1:
return 'Seatbelt Worn'
elif x == 2:
return 'Seatbelt Not Worn'
elif x == 3:
return 'Child Restraint Worn'
elif x == 4:
return 'Child Restraint Not Worn'
elif x == 5:
return 'Seatbelt/restraint Not fitted'
elif x == 6:
return 'Crash Helmet Worn'
elif x == 7:
return 'Crash Helmet Not Worn'
elif x == 8:
return 'Not Appropriate'
else:
return 'Not Known'
| 5,344,357
|
def history_kernels ( estimated_stimulus_kernel, estimated_response_kernel, ci_kernels, ax=None, presentation="left/right", ground_truth=None ):
"""plot history kernels
:Parameters:
*estimated_stimulus_kernel*
stimulus kernel estimated from the data
*estimated_response_kernel*
response kernel estimated from the data
*ci_kernels*
a sequence of confidence regions for the kernels as returned by
statistics.history_kernel_ci()
*ax*
pylab.axes where the plot should go
*presentation*
how should the kernels be presented? Selection of either 'left/right'
or 'correct/incorrect'
:Example:
>>> skernel = [1.2,.5,.3,.1]
>>> rkernel = [.1,.1,0,0]
>>> ci_kernels = [ [[1.3,.6,.4,.2],[.8,.3,.1,-.05]],[[.2,.2,.1,.1],[-.05,0.,-.1,-.1]],[[1.5,.8,.5,.3],[.7,.3,0.,-.2]],[[1.2,.5,.5,.2],[.9,.2,0.,-.05]] ]
>>> history_kernels ( skernel, rkernel, ci_kernels )
>>> pl.savefig ( 'test/history_kernels.png' ); pl.close()
"""
if presentation=="left/right":
kernels = (estimated_stimulus_kernel,estimated_response_kernel)
colors = (stimulus_color,response_color)
labels = ("stimulus","response")
if not ci_kernels is None:
CI = np.array(ci_kernels[:2])
else:
CI = None
if not ground_truth is None:
true_kernels = ground_truth['stimulus_kernel'],\
ground_truth['response_kernel']
elif presentation=="correct/incorrect":
kernels = (estimated_stimulus_kernel+estimated_response_kernel,-estimated_stimulus_kernel+estimated_response_kernel)
colors = (correct_color,incorrect_color)
labels = ("correct","incorrect")
if not ci_kernels is None:
CI = np.array(ci_kernels[2:])
else:
CI = None
if not ground_truth is None:
true_kernels = ground_truth['stimulus_kernel']+\
ground_truth['response_kernel'],\
-ground_truth['stimulus_kernel']+\
ground_truth['response_kernel']
else:
raise ValueError("presentation should be either 'left/right' or 'correct/incorrect'")
if CI is None:
CI = np.array([[kernels[0],kernels[0]],[kernels[1],kernels[1]]])
if ax is None:
ax = pl.gca()
ax = prepare_axes ( ax )
# Plot confidence regions
lags = np.arange ( len(estimated_stimulus_kernel) ) + 1
for i in [0,1]:
fc = 0.5*np.array(colors[i])+0.5*np.ones(3)
ax.fill ( np.concatenate ( (lags,lags[::-1]) ), np.concatenate ( (CI[i,0,:],CI[i,1,::-1]) ),
facecolor=fc, edgecolor=0.5*colors[i], alpha=0.7 )
kernellines = []
for i in [0,1]:
if not ground_truth is None:
ax.plot ( lags, true_kernels[i], color=0.5*colors[i] )
kernellines += ax.plot ( lags, kernels[i], 'o',
markerfacecolor=colors[i], markeredgecolor=0.5*colors[i], label=labels[i] )
ax.set_xlim ( 1-0.01*len(estimated_stimulus_kernel),len(estimated_stimulus_kernel)+0.01*len(estimated_stimulus_kernel) )
ax.set_xticks ( lags )
# label_axes ( title="history kernels", xlabel="lag", ylabel="equivalent stimulus strength", legend='best', ax=ax )
return kernellines
| 5,344,358
|
def init_application():
"""Main entry point for initializing the Deckhand API service.
Create routes for the v1.0 API and sets up logging.
"""
config_files = _get_config_files()
paste_file = config_files[-1]
CONF([], project='deckhand', default_config_files=config_files)
setup_logging(CONF)
policy.Enforcer(CONF)
LOG.debug('Starting WSGI application using %s configuration file.',
paste_file)
db_api.drop_db()
db_api.setup_db(CONF.database.connection)
app = deploy.loadapp('config:%s' % paste_file, name='deckhand_api')
return app
| 5,344,359
|
def specs_url(self):
"""
The Swagger specifications absolute url (ie. `swagger.json`)
:rtype: str
"""
return url_for(self.endpoint('specs'), _external=False)
| 5,344,360
|
def construct_db(db: str) -> sqlite3:
"""Build empty database 'db'."""
conn = sqlite3.connect(db)
c = conn.cursor()
c.executescript('''
CREATE TABLE files (
ID INTEGER PRIMARY KEY,
Name TEXT,
Path TEXT,
FullPath TEXT,
isDir INTEGER,
Size INTEGER,
Mtime INTEGER,
Atime INTEGER,
Ctime INTEGER,
Btime INTEGER,
UID INTEGER,
GID INTEGER,
iNode INTEGER,
DevID INTEGER,
DP INTEGER,
XCount INTEGER,
MIME INTEGER,
Type INTEGER,
Offset INTEGER
);
CREATE TABLE xattrs (
ID INTEGER PRIMARY KEY AUTOINCREMENT,
FileID INTEGER,
Key TEXT,
Value TEXT,
Raw BLOB
);
CREATE TABLE mtypes(
ID INTEGER PRIMARY KEY AUTOINCREMENT,
MIME TEXT
);
CREATE TABLE ftypes (
ID INTEGER PRIMARY KEY,
Type TEXT
);
CREATE VIEW localtime as
select
files.ID,
Name,
Path,
FullPath,
isDir,
Size,
datetime(mtime, 'unixepoch', 'localtime') as Mtime,
datetime(atime, 'unixepoch', 'localtime') as Atime,
datetime(ctime, 'unixepoch', 'localtime') as Ctime,
datetime(btime, 'unixepoch', 'localtime') as Btime,
UID,
GID,
iNode,
DevID as DeviceID,
mtypes.MIME,
ftypes.Type,
Xcount as ExtraAttrs,
'Offset' as ZipOffset,
Key as XattrKey,
Value as XattrValue,
Raw
from files
left join xattrs on files.ID = xattrs.FileID
left join mtypes on files.MIME = mtypes.ID
left join ftypes on files.Type = ftypes.ID;
CREATE VIEW utc as
select
files.ID,
Name,
Path,
FullPath,
isDir,
Size,
datetime(mtime, 'unixepoch') as Mtime,
datetime(atime, 'unixepoch') as Atime,
datetime(ctime, 'unixepoch') as Ctime,
datetime(btime, 'unixepoch') as Btime,
UID,
GID,
iNode,
DevID as DeviceID,
mtypes.MIME,
ftypes.Type,
Xcount as ExtraAttrs,
'Offset' as ZipOffset,
Key as XattrKey,
Value as XattrValue,
Raw
from files
left join xattrs on files.ID = xattrs.FileID
left join mtypes on files.MIME = mtypes.ID
left join ftypes on files.Type = ftypes.ID;
''')
conn.commit()
return conn
| 5,344,361
|
def class_javadoc(ns, stmt):
""" Generate javadoc for class (string without '/**' and '*/' but with * on new line) """
description = ''
desc_stmt = search_one(stmt, 'description')
if desc_stmt is not None:
description += ''.join([str(desc_stmt.arg).replace('\n', '\n * ')])
description += ''.join(['\n * <br/>\n * Namespace: ', ns])
return description
| 5,344,362
|
def r_precision(r):
"""Score is precision after all relevant documents have been retrieved
Relevance is binary (nonzero is relevant).
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
R Precision
"""
r = np.asarray(r) != 0
z = r.nonzero()[0]
if not z.size:
return 0.
return np.mean(r[:z[-1] + 1])
| 5,344,363
|
def evaluate_response_selections(
response_selection_results: List[ResponseSelectionEvaluationResult],
output_directory: Optional[Text],
successes: bool,
errors: bool,
disable_plotting: bool,
) -> Dict: # pragma: no cover
"""Creates summary statistics for response selection.
Only considers those examples with a set response.
Others are filtered out. Returns a dictionary of containing the
evaluation result.
Args:
response_selection_results: response selection evaluation results
output_directory: directory to store files to
successes: if True success are written down to disk
errors: if True errors are written down to disk
disable_plotting: if True no plots are created
Returns: dictionary with evaluation results
"""
import sklearn.metrics
import sklearn.utils.multiclass
from rasa.test import get_evaluation_metrics
# remove empty response targets
num_examples = len(response_selection_results)
response_selection_results = remove_empty_response_examples(
response_selection_results
)
logger.info(
f"Response Selection Evaluation: Only considering those "
f"{len(response_selection_results)} examples that have a defined response out "
f"of {num_examples} examples."
)
response_to_intent_target = {}
for result in response_selection_results:
response_to_intent_target[result.response_target] = result.intent_target
target_responses, predicted_responses = _targets_predictions_from(
response_selection_results, "response_target", "response_prediction"
)
confusion_matrix = sklearn.metrics.confusion_matrix(
target_responses, predicted_responses
)
labels = sklearn.utils.multiclass.unique_labels(
target_responses, predicted_responses
)
if output_directory:
report, precision, f1, accuracy = get_evaluation_metrics(
target_responses, predicted_responses, output_dict=True
)
report = _add_confused_labels_to_report(report, confusion_matrix, labels)
report_filename = os.path.join(
output_directory, "response_selection_report.json"
)
io_utils.dump_obj_as_json_to_file(report_filename, report)
logger.info(f"Classification report saved to {report_filename}.")
else:
report, precision, f1, accuracy = get_evaluation_metrics(
target_responses, predicted_responses
)
if isinstance(report, str):
log_evaluation_table(report, precision, f1, accuracy)
if successes:
successes_filename = "response_selection_successes.json"
if output_directory:
successes_filename = os.path.join(output_directory, successes_filename)
# save classified samples to file for debugging
write_response_successes(response_selection_results, successes_filename)
if errors:
errors_filename = "response_selection_errors.json"
if output_directory:
errors_filename = os.path.join(output_directory, errors_filename)
# log and save misclassified samples to file for debugging
write_response_errors(response_selection_results, errors_filename)
if not disable_plotting:
confusion_matrix_filename = "response_selection_confusion_matrix.png"
if output_directory:
confusion_matrix_filename = os.path.join(
output_directory, confusion_matrix_filename
)
_labels = [
response_to_intent_target[label]
if label in response_to_intent_target
else f"'{label[:20]}...' (response not present in test data)"
for label in labels
]
plot_utils.plot_confusion_matrix(
confusion_matrix,
classes=_labels,
title="Response Selection Confusion Matrix",
output_file=confusion_matrix_filename,
)
histogram_filename = "response_selection_histogram.png"
if output_directory:
histogram_filename = os.path.join(output_directory, histogram_filename)
plot_attribute_confidences(
response_selection_results,
histogram_filename,
"response_target",
"response_prediction",
title="Response Selection Prediction Confidence Distribution",
)
predictions = [
{
"text": res.message,
"intent_target": res.intent_target,
"response_target": res.response_target,
"response_predicted": res.response_prediction,
"confidence": res.confidence,
}
for res in response_selection_results
]
return {
"predictions": predictions,
"report": report,
"precision": precision,
"f1_score": f1,
"accuracy": accuracy,
}
| 5,344,364
|
def _async_climate_updater(
lookin_protocol: LookInHttpProtocol,
uuid: str,
) -> Callable[[], Coroutine[None, Any, Remote]]:
"""Create a function to capture the cell variable."""
async def _async_update() -> Climate:
return await lookin_protocol.get_conditioner(uuid)
return _async_update
| 5,344,365
|
def load_natural_movies(cpd=1.00):
"""load natural movies dataset
Parameters
----------
- cpd: float of cycles per degree, should be 1.00 or 1.33
"""
if cpd not in {1.00, 1.33}:
raise Exception('cpd must be in {1.00, 1.33}')
if cpd == 1.00:
cpd = '1.00'
elif cpd == 1.33:
cpd = '1.33'
else:
raise Exception('cpd must be in {1.00, 1.33}')
# load X
X_path = '/auto/k6/nbilenko/preproc_data/movie/dir{cpd}cpd_{dataset}stim.npy'
Xtrain = np.load(X_path.format(cpd=cpd, dataset='t'))
Xtest = np.load(X_path.format(cpd=cpd, dataset='v'))
# load Y
Y_path = 'auto/k8/anunez/proj/snmovies/datasets/snmovies_braindata_AH3T.hdf'
cci = glabtools.io.get_cc_interface('anunez_raid', verbose=False)
Y_data = cci.cloud2dict(Y_path, verbose=False)
Ytrain = Y_data['Ytrain']
Ytest = Y_data['Yval']
return {
'natural_movies_gabor_pyramid': {
'Xtrain': Xtrain,
'Ytrain': Ytrain,
'Xtest': Xtest,
'Ytest': Ytest,
},
'natural_movies_mean_gabor': {
'Xtrain': Xtrain.mean(1, keepdims=True),
'Ytrain': Ytrain,
'Xtest': Xtest.mean(1, keepdims=True),
'Ytest': Ytest,
},
}
| 5,344,366
|
def sort_dataset_by_len(dataset):
"""
returns a dict mapping length -> list of items of that length
an OrderedDict is used to that the mapping is sorted from smallest to largest
"""
sorted_dataset = collections.OrderedDict()
lengths = sorted(list(set(len(x[1]) for x in dataset)))
for l in lengths:
sorted_dataset[l] = []
for item in dataset:
sorted_dataset[len(item[1])].append(item)
return sorted_dataset
| 5,344,367
|
def add_note(note_dict):
"""
Add note entries in the db
"""
# Cleanup: Remove print() and set logger here.
pprint.pprint(note_dict)
# Cleanup: Remove print() and set logger here.
print("Adding your note to the database!")
| 5,344,368
|
def rule_VisibleTo_if_in_same_visible_container(x, actor, world) :
"""Anything in the same visible container to the actor is visible
if the visible container is lit. We treat doors specially: if x
is in the get_room_doors of the visible container, then the door
is visible, too."""
actor_vis_cont = world[VisibleContainer(world[Location(actor)])]
if x in world.activity.get_room_doors(actor_vis_cont) :
return True
if actor_vis_cont == x :
# otherwise we'd be looking too many levels high
x_vis_cont = x
else :
loc = world[Location(x)]
if not loc : raise NotHandled()
x_vis_cont = world[VisibleContainer(loc)]
if actor_vis_cont == x_vis_cont and world[ContainsLight(actor_vis_cont)] :
return True
raise NotHandled()
| 5,344,369
|
def select_random_user_goals(user_goals_no_req_slots, user_goals_with_req_slots, cardinality_no_req, cardinality_req):
"""
Helper method to randomly select user goals
"""
random_user_goals = {}
random_user_goals['all'] = []
# select randomly user goals without request slots
random_user_goals['all'].extend(copy.deepcopy(random.sample(user_goals_no_req_slots, cardinality_no_req)))
# select randomly user goals with request slots
random_user_goals['all'].extend(copy.deepcopy(random.sample(user_goals_with_req_slots, cardinality_req)))
return random_user_goals
| 5,344,370
|
def dict_items_recursive_apply(config_dict, apply_method, **apply_method_parameters):
"""Recursive apply method to dict elements
>>> dict_items_recursive_apply(
... {"foo": {"bar":"baz"}, "qux": ["a","b"]},
... lambda k,v,x: v.upper()+x, **{"x":"!"}
... ) == {'foo': {'bar': 'BAZ!'}, 'qux': ['A!', 'B!']}
True
:param config_dict: input nested dictionnary
:type config_dict: dict
:param apply_method: method to be applied to dict elements
:type apply_method: :func:`apply_method`
:param apply_method_parameters: optional parameters passed to the method
:type apply_method_parameters: dict
:returns: updated dict
:rtype: dict
"""
result_dict = copy.deepcopy(config_dict)
for dict_k, dict_v in result_dict.items():
if isinstance(dict_v, dict):
result_dict[dict_k] = dict_items_recursive_apply(
dict_v, apply_method, **apply_method_parameters
)
elif any(isinstance(dict_v, t) for t in (list, tuple)):
result_dict[dict_k] = list_items_recursive_apply(
dict_v, apply_method, **apply_method_parameters
)
else:
result_dict[dict_k] = apply_method(
dict_k, dict_v, **apply_method_parameters
)
return result_dict
| 5,344,371
|
def d_B_nu_d_T_d_nu_dimensionless(x):
"""
Calculates d^2(B_nu) / d (T) / d (nu),
as a function of dimensionless units, x = (h nu / k_B T)
Parameters
----------
x : float
Returns
-------
d_B_nu_d_T_d_nu_dimensionless : float
Not normalized to anything meaningful
"""
return - np.exp(x)*x**3 * (np.exp(x)*(x-4)+x+4) / (np.exp(x)-1)**3
| 5,344,372
|
def test_sbas():
"""./ReadRinex.py -q tests/demo3.10n -o r3sbas.nc
"""
pytest.importorskip('netCDF4')
truth = xarray.open_dataset(R/'r3sbas.nc', group='NAV', autoclose=True)
nav = gr.load(R/'demo3.10n')
assert nav.equals(truth)
| 5,344,373
|
def node_to_get_batch_value(shape_node: Node):
"""
The function returns a node that produces the batch value which is usually the element of the shape with index 0
:param shape_node: the node of 1D output shape to get batch from
:return: the node producing batch value
"""
return node_to_get_shape_value_of_range(shape_node, [0])
| 5,344,374
|
def compare_table_defs(psql, db, table, cur_tbl_def, tmp_tbl_def):
"""
Compare table definitions before allowing supported modifications.
Currently, only adding new columns is allowed.
Args:
psql -- handle to talk to redshift
db -- redshift database containing table
table -- table name for which definition may change
cur_tbl_def -- table definition for existing table
tmp_tbl_def -- table definition for temp table which may contain changes
Return: None
"""
copy_tmp_tbl_def = copy.deepcopy(tmp_tbl_def)
if not has_table_def(cur_tbl_def):
raise ValueError("missing existing table: {0}".format(table))
if len(tmp_tbl_def) < len(cur_tbl_def):
raise ValueError("{0}: new schema has less columns".format(table))
for row in cur_tbl_def:
tmp_row = copy_tmp_tbl_def.pop(0)
diff = [i for i in range(len(row)) if row[i] != tmp_row[i]
and i not in [PgTableDef.TableName, PgTableDef.Encoding]]
if diff:
raise ValueError("{0}: change to column '{1}' not allowed".format(
table, row[PgTableDef.Column]
))
| 5,344,375
|
def re2_full_match(input, pattern): # pylint: disable=redefined-builtin
"""Extract regex groups
Args:
input: A `tf.string` tensor
pattern: A pattern string.
"""
return core_ops.io_re2_full_match(input, pattern)
| 5,344,376
|
def user_agent():
"""
Return a User-Agent that identifies this client.
Example:
python-requests/2.9.1 edx-rest-api-client/1.7.2 ecommerce
The last item in the list will be the application name, taken from the
OS environment variable EDX_REST_API_CLIENT_NAME. If that environment
variable is not set, it will default to the hostname.
"""
client_name = 'unknown_client_name'
try:
client_name = os.environ.get("EDX_REST_API_CLIENT_NAME") or socket.gethostbyname(socket.gethostname())
except: # pylint: disable=bare-except
pass # using 'unknown_client_name' is good enough. no need to log.
return "{} edx-rest-api-client/{} {}".format(
requests.utils.default_user_agent(), # e.g. "python-requests/2.9.1"
__version__, # version of this client
client_name
)
| 5,344,377
|
def optional_tools_or_packages_arg(multiple=False):
""" Decorate click method as optionally taking in the path to a tool
or directory of tools or a Conda package. If no such argument is given
the current working directory will be treated as a directory of tools.
"""
name = "paths" if multiple else "path"
nargs = -1 if multiple else 1
return click.argument(
name,
metavar="TARGET",
nargs=nargs,
)
| 5,344,378
|
def revoke_api_access(application):
"""
Revoke the API access of this application
"""
try:
file = open(PATH + '/../DB/access.json', 'r')
accessData = json.load(file)
if (application in accessData):
accessData.pop(application, None)
with open(PATH + '/../DB/access.json', 'w') as f:
f.write(json.dumps(accessData, indent=4, sort_keys=True))
except:
raise
| 5,344,379
|
def linear_interpolate_cdf(base_cdf):
"""Linear interpolate regions of straight lines in the CDF.
Parameters:
base_cdf (list): n elements of non-decreasing order.
Returns:
list of length base_cdf where consecutive elements of straight lines
are linearly interpolated between the left and right sides.
"""
target_cdf = list(base_cdf)
index = 0
left_val = 0
while index < len(base_cdf)-1:
if base_cdf[index] == base_cdf[index+1]:
# search for where it ends
offset = index+1
while (offset < len(base_cdf)-1 and
base_cdf[offset] == base_cdf[offset+1]):
offset += 1
# linearly interpolate between index and offset
right_val = base_cdf[offset]
interp_val = numpy.interp(
list(range(index, offset+1, 1)),
[index-1, offset],
[float(left_val), float(right_val)])
target_cdf[index:offset+1] = interp_val
left_val = right_val
index = offset+1
else:
left_val = base_cdf[index]
index += 1
return target_cdf
| 5,344,380
|
def test_mean_los_velocity_vs_rp_correctness1():
""" This function tests that the
`~halotools.mock_observables.mean_los_velocity_vs_rp` function returns correct
results for a controlled distribution of points whose mean radial velocity
is analytically calculable.
For this test, the configuration is two tight localizations of points,
the first at (1, 0, 0.1), the second at (1, 0.2, 0.25).
The first set of points is moving at +50 in the z-direction;
the second set of points is at rest.
PBCs are set to infinity in this test.
So in this configuration, the two sets of points are moving towards each other,
and so the relative z-velocity should be -50 for cross-correlations
in separation bins containing the pair of points. For any separation bin containing only
one set or the other, the auto-correlations should be 0 because each set of
points moves coherently.
The tests will be run with the two point configurations passed in as
separate ``sample1`` and ``sample2`` distributions, as well as bundled
together into the same distribution.
"""
correct_relative_velocity = -50
npts = 100
xc1, yc1, zc1 = 1, 0, 0.1
xc2, yc2, zc2 = 1, 0.2, 0.25
sample1 = generate_locus_of_3d_points(npts, xc=xc1, yc=yc1, zc=zc1, seed=fixed_seed)
sample2 = generate_locus_of_3d_points(npts, xc=xc2, yc=yc2, zc=zc2, seed=fixed_seed)
velocities1 = np.zeros(npts*3).reshape(npts, 3)
velocities2 = np.zeros(npts*3).reshape(npts, 3)
velocities1[:, 2] = 50.
rp_bins, pi_max = np.array([0, 0.1, 0.15, 0.21, 0.25]), 0.2
s1s1, s1s2, s2s2 = mean_los_velocity_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2)
assert np.allclose(s1s1[0:2], 0, rtol=0.01)
assert np.allclose(s1s2[0:2], 0, rtol=0.01)
assert np.allclose(s2s2[0:2], 0, rtol=0.01)
assert np.allclose(s1s1[2], 0, rtol=0.01)
assert np.allclose(s1s2[2], correct_relative_velocity, rtol=0.01)
assert np.allclose(s2s2[2], 0, rtol=0.01)
assert np.allclose(s1s1[3], 0, rtol=0.01)
assert np.allclose(s1s2[3], 0, rtol=0.01)
assert np.allclose(s2s2[3], 0, rtol=0.01)
# Now bundle sample2 and sample1 together and only pass in the concatenated sample
sample = np.concatenate((sample1, sample2))
velocities = np.concatenate((velocities1, velocities2))
s1s1 = mean_los_velocity_vs_rp(sample, velocities, rp_bins, pi_max)
assert np.allclose(s1s1[0:2], 0, rtol=0.01)
assert np.allclose(s1s1[2], correct_relative_velocity, rtol=0.01)
assert np.allclose(s1s1[3], 0, rtol=0.01)
| 5,344,381
|
def hash_str(string: str) -> int:
"""
Create the hash for a string (poorly).
"""
hashed = 0
results = map(ord, string)
for result in results:
hashed += result
return hashed
| 5,344,382
|
def graph(g: nx.Graph, s: Optional[list] = None, plot_size: Tuple = (500, 500)): # pragma: no cover
"""Creates a plot of the input graph.
This function can plot the input graph only, or the graph with a specified subgraph highlighted.
Graphs are plotted using the Kamada-Kawai layout with an aspect ratio of 1:1.
**Example usage:**
>>> graph = nx.complete_graph(10)
>>> fig = plot.graph(graph, [0, 1, 2, 3])
>>> fig.show()
.. image:: ../../_static/complete_graph.png
:width: 40%
:align: center
:target: javascript:void(0);
Args:
g (nx.Graph): input graph
s (list): optional list of nodes comprising the subgraph to highlight
plot_size (int): size of the plot in pixels, given as a pair of integers ``(x_size,
y_size)``
Returns:
Figure: figure for graph and optionally highlighted subgraph
"""
try:
import plotly.graph_objects as go
import plotly.io as pio
except ImportError:
raise ImportError(plotly_error)
try:
in_notebook = get_ipython().__class__.__name__ == "ZMQInteractiveShell"
except NameError:
in_notebook = False
if not in_notebook:
pio.renderers.default = "browser"
l = nx.kamada_kawai_layout(g)
g_nodes = go.Scatter(
**_node_coords(g, l),
mode="markers",
hoverinfo="text",
marker=dict(color=graph_node_colour, size=graph_node_size, line_width=2),
)
g_edges = go.Scatter(
**_edge_coords(g, l),
line=dict(width=1, color=graph_edge_colour),
hoverinfo="none",
mode="lines",
)
g_nodes.text = [str(i) for i in g.nodes()]
layout = go.Layout(
showlegend=False,
hovermode="closest",
xaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
yaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
margin=dict(b=0, l=0, r=0, t=25),
height=plot_size[1],
width=plot_size[0],
plot_bgcolor="#ffffff",
)
if s is not None:
s = g.subgraph(s)
s_edges = go.Scatter(
**_edge_coords(s, l),
line=dict(width=2, color=subgraph_edge_colour),
hoverinfo="none",
mode="lines",
)
s_nodes = go.Scatter(
**_node_coords(s, l),
mode="markers",
hoverinfo="text",
marker=dict(color=subgraph_node_colour, size=subgraph_node_size, line_width=2),
)
s_nodes.text = [str(i) for i in s.nodes()]
f = go.Figure(data=[g_edges, s_edges, g_nodes, s_nodes], layout=layout)
else:
f = go.Figure(data=[g_edges, g_nodes], layout=layout)
return f
| 5,344,383
|
def intersect(p1x, p1y, p2x, p2y, x0, y0):
"""Intersect segment defined by p1 and p2 with ray coming out of x0,y0 ray
can be horizontal y=y0 x=x0+dx , want dx>0.
Args:
p1x (float): x coordinate of point 1 of segment
p1y (float): y coordinate of point 1 of segment
p2x (float): x coordinate of point 2 of segment
p2y (float): y coordinate of point 2 of segment
x0 (float): x coordinate anchoring the intersection ray
y0 (float): y coordinate anchoring the intersection ray
Returns:
boolean int: (1) if intersecting, (0) if not intersecting
"""
if p1x != p2x and p1y != p2y:
m = (p2y - p1y) / (p2x - p1x)
x_inter = (y0 - p1y) / m + p1x
if x_inter >= x0 and np.min([p1y, p2y]) <= y0 <= np.max([p1y, p2y]):
ans = 1
else:
ans = 0
else:
if p1x == p2x: # vertical segment
if x0 <= p1x and np.min([p1y, p2y]) <= y0 <= np.max([p1y, p2y]):
ans = 1
else:
ans = 0
if p1y == p2y: # horizontal segment
if y0 == p1y:
ans = 1
else:
ans = 0
return ans
| 5,344,384
|
def impulse_matrix(params, dt, reduced=False):
"""Calculate the matrix exponential for integration of MAT model"""
from scipy import linalg
a1, a2, b, w, R, tm, t1, t2, tv, tref = params
if not reduced:
A = - np.matrix([[1 / tm, -1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 1 / t1, 0, 0, 0],
[0, 0, 0, 1 / t2, 0, 0],
[0, 0, 0, 0, 1 / tv, -1],
[b / tm, -b, 0, 0, 0, 1 / tv]])
else:
A = - np.matrix([[1 / tm, -1, 0, 0],
[0, 0, 0, 0],
[0, 0, 1 / tv, -1],
[b / tm, -b, 0, 1 / tv]])
return linalg.expm(A * dt)
| 5,344,385
|
def get_vimg(request):
"""
获取验证码
:param request:
:return:
"""
text, image = vcode.gen_captcha_text_and_image()
v_key = request.GET.get('vk')
ex_key = request.GET.get('ex')
if ex_key:
try:
redis_conn.delete(ex_key)
except Exception as e:
logger.error(e)
redis_conn.set(v_key, text, 60*3)
return HttpResponse(image.getvalue(), content_type='image/jpg')
| 5,344,386
|
def generate_fcm_token():
"""Generate an FCM token
nLAUJTr5RIJ:MNmSQ8O52FoJSvfWEPF4KvWopcNScNFRPHHbXdepwzuXJJMfadpEfb2JlHoqEhWanFz7-N0sfPg-pW4gNubNdxyikiI0lrvGeWGTp86fn9-NA3sZ-Eizv9QE7YKHCOIa70fR38N1ZYsb
"""
return '{}:{}-{}-{}-{}-{}'.format(random_all(11),
random_all(68),
random_all(6),
random_all(30),
random_all(5),
random_all(27))
| 5,344,387
|
def build_latex():
"""Builds the LaTeX from source
"""
proc = subprocess.Popen(
['pdflatex {}'.format(LATEX_TEMPORARY_TXT)],
cwd=LATEX_TEMPORARY_DIR,
shell=True,
stdout=subprocess.PIPE)
(_, _) = proc.communicate()
| 5,344,388
|
def test_read_text_by_index():
"""
This function tests the read_text_columns_by_index function to
ensure it properly reads in a space delimited text file with
a header in the top row
"""
if plat in lin_plat:
file_name = '../data/test/textcol3.txt'
else:
file_name = r'..\data\test\textcol3.txt'
headers = [0, 1, 2, 3]
names = ['ID', 'Inventory', 'Weight_per', 'Number']
dat = [np.int64, str, np.float64, np.int64]
df = read_text_columns_by_index(file_name, headers, dat, names)
new_id = np.array([1, 2, 3, 4], dtype=int)
inventory = np.array(['shoes', 't-shirt', 'coffee', 'books'], dtype=str)
weight = np.array([1.5, 1.8, 2.1, 3.2], dtype=float)
number = np.array([5, 3, 15, 40], dtype=int)
for i in range(len(df)):
assert new_id[i] == df['ID'][i]
assert isinstance(df['ID'][i], np.int64)
assert inventory[i] == df['Inventory'][i]
assert isinstance(df['Inventory'][i], str)
assert weight[i] == df['Weight_per'][i]
assert isinstance(df['Weight_per'][i], np.float64)
assert number[i] == df['Number'][i]
assert isinstance(df['Number'][i], np.int64)
| 5,344,389
|
def _create_group(username: str, gid: Optional[int] = None, system: bool = False) -> Result[Group]:
"""
Create a new group.
"""
try:
get_group(username)
except KeyError:
pass
else:
raise ValueError("Username {!r} is already in use".format(username))
args = ["/usr/sbin/addgroup", username]
if gid:
try:
group = grp.getgrgid(gid)
except KeyError:
args[-1:-1] = ["--gid", str(gid)]
else:
raise ValueError("GID {} is already in use by {!r}".format(gid, group.gr_name))
if system:
args[-1:-1] = ["--system"]
command(args)
group = get_group(username)
LOG.debug("Created UNIX group: %r", group)
return Result(State.created, group)
| 5,344,390
|
def evaluate(data_loader):
"""Evaluate given the data loader
Parameters
----------
data_loader : DataLoader
Returns
-------
avg_loss : float
Average loss
real_translation_out : list of list of str
The translation output
"""
translation_out = []
all_inst_ids = []
avg_loss_denom = 0
avg_loss = 0.0
for _, (src_seq, tgt_seq, src_valid_length, tgt_valid_length, inst_ids) \
in enumerate(data_loader):
src_seq = src_seq.as_in_context(ctx)
tgt_seq = tgt_seq.as_in_context(ctx)
src_valid_length = src_valid_length.as_in_context(ctx)
tgt_valid_length = tgt_valid_length.as_in_context(ctx)
# Calculating Loss
out, _ = model(src_seq, tgt_seq[:, :-1], src_valid_length, tgt_valid_length - 1)
loss = loss_function(out, tgt_seq[:, 1:], tgt_valid_length - 1).mean().asscalar()
all_inst_ids.extend(inst_ids.asnumpy().astype(np.int32).tolist())
avg_loss += loss * (tgt_seq.shape[1] - 1)
avg_loss_denom += (tgt_seq.shape[1] - 1)
# Translate
samples, _, sample_valid_length =\
translator.translate(src_seq=src_seq, src_valid_length=src_valid_length)
max_score_sample = samples[:, 0, :].asnumpy()
sample_valid_length = sample_valid_length[:, 0].asnumpy()
for i in range(max_score_sample.shape[0]):
translation_out.append(
[tgt_vocab.idx_to_token[ele] for ele in
max_score_sample[i][1:(sample_valid_length[i] - 1)]])
avg_loss = avg_loss / avg_loss_denom
real_translation_out = [None for _ in range(len(all_inst_ids))]
for ind, sentence in zip(all_inst_ids, translation_out):
real_translation_out[ind] = sentence
return avg_loss, real_translation_out
| 5,344,391
|
def adding_equation(thetas, eta0, eta1, eta2, eta3, kappa3 = 0.0, polarized=False, tau1=0.0, tau2=0.0):
""" Return the reflectance of a 4 layers material (3 interfaces)
with all inter-reflections, using adding equation """
zeros = [np.zeros_like(thetas),np.zeros_like(thetas)] if polarized else np.zeros_like(thetas)
R01 = fresnel(np.cos(thetas), eta1/eta0, polarized=polarized) if eta1 != eta0 else zeros
ones = np.ones_like(R01)
T01 = ones - R01
thetas_t1 = clamp(np.arcsin(eta0 / eta1 * np.sin(thetas)))
thetas_t1 = np.where(thetas_t1 is not np.nan, thetas_t1, 0.0);
R10 = fresnel(np.cos(thetas_t1), eta0/eta1, polarized=polarized) if eta1 != eta0 else zeros
R12 = fresnel(np.cos(thetas_t1), eta2/eta1, polarized=polarized) if eta1 != eta2 else zeros
T12 = ones - R12
thetas_t2 = clamp(np.arcsin(eta1/eta2 * np.sin(thetas_t1)))
thetas_t2 = np.where(thetas_t2 is not np.nan, thetas_t2, 0.0);
R21 = fresnel(np.cos(thetas_t2), eta1/eta2, polarized=polarized) if eta1 != eta2 else zeros
k = 0.0 if kappa3 == 0.0 else kappa3/eta2
R23 = fresnel(np.cos(thetas_t2), eta3/eta2, k, polarized=polarized)
if polarized:
res = []
for i in range(2):
R13 = add_with_absorption(R12[i], R23[i], tau2, thetas_t2)
R03 = add_with_absorption(R01[i], R13, tau1, thetas_t1)
#R13 = add(R12[i], T12[i], R21[i], R23[i])
#R03 = add(R01[i], T01[i], R10[i], R13)
res.append(np.where(np.isfinite(R03), R03, ones[0]))
return res
#R13 = add(R12, T12, R21, R23)
#R03 = add(R01, T01, R10, R13)
R13 = add_with_absorption(R12, R23, tau2, thetas_t2)
R03 = add_with_absorption(R01, R13, tau1, thetas_t1)
return np.where(np.isfinite(R03), R03, 1.0)
| 5,344,392
|
def text_expand(context):
"""
Give context, pick out the bible indexes, turn them into normalized scripture, and put the scripture back into the context
"""
output = []
end = 0
for m in candidate_filter(context):
output.append(m.group('out'))
try:
bucket = get_bucket(m)
formated = format_bucket(bucket)
output.extend(['《',':'.join(list(formated)), '》'])
except KeyError:
output.append(m.group(0))
except AttributeError:
output.append(m.group(0))
except:
logging.warning(print(context))
end = m.end()
output.append(context[end:])
return ''.join(output)
| 5,344,393
|
def json_custom_parser(obj):
"""
A custom json parser to handle json.dumps calls properly for Decimal and
Datetime data types.
"""
if not isinstance(obj, string_types) and isinstance(obj, Iterable):
return list(obj)
elif isinstance(obj, datetime.datetime) or isinstance(obj, datetime.date):
dot_ix = 19 # 'YYYY-MM-DDTHH:MM:SS.mmmmmm+HH:MM'.find('.')
return obj.isoformat()[:dot_ix]
else:
raise TypeError(obj)
| 5,344,394
|
def like():
""" Function to automatically like a picture
:return: 0 or 1 where 1 = one picture liked
:rtype: int
"""
like_icons = driver.find_elements_by_xpath("//*[contains(@aria-label, 'Like')]")
unlike_icons = driver.find_elements_by_xpath("//*[contains(@aria-label, 'Unlike')]")
for icon in unlike_icons or like_icons:
height = icon.get_attribute("height")
fill_color = icon.get_attribute("fill")
# Ensuring it is the correct icon and that it has not been liked before
if height == "24" and fill_color == "#ed4956":
# Fill color of a post already liked is #ed4956
print("Picture already liked.")
return 0
elif height == "24" and fill_color == "#262626":
# Fill color of post NOT liked is #262626
# ('..') is used here to fetch the parent of icon using xpath
like_button = icon.find_element_by_xpath('..')
like_button.click()
print("Picture liked :)")
sleep(2)
return 1
else: # pragma: no cover
pass
| 5,344,395
|
def entity_tsv(args):
""" Get list of entities in TSV format. Download files for which the
encoding is undetected (e.g. ZIP archives). """
r = fapi.get_entities_tsv(args.project, args.workspace,
args.entity_type, args.attrs, args.model)
fapi._check_response_code(r, 200)
if r.apparent_encoding is not None:
return r.content.decode(r.apparent_encoding)
else:
content = r.headers['Content-Disposition'].split('; ')[-1].split('=')
if len(content) == 2 and content[0] == 'filename':
filename = content[1]
if os.path.exists(filename) and (args.yes or not _confirm_prompt(
'This will overwrite {}'.format(filename))):
return
with open(filename, 'wb') as outfile:
for chunk in r:
outfile.write(chunk)
print('Downloaded {}.'.format(filename))
return
else:
eprint("Unable to determine name of file to download.")
return 1
| 5,344,396
|
def compare_files(file_name1, file_name2):
"""
Compare two files, line by line, for equality.
Arguments:
file_name1 (str or unicode): file name.
file_name2 (str or unicode): file name.
Returns:
bool: True if files are equal, False otherwise.
"""
with open(file_name1) as file1, open(file_name2) as file2:
for line1, line2 in zip(file1, file2):
if line1 != line2:
file1.close()
file2.close()
return False
file1.close()
file2.close()
return True
| 5,344,397
|
def print_cv_folds_dates(dates: Dict[Any, Any], freq: str) -> None:
"""Displays a message in streamlit dashboard with cross-validation folds' dates.
Parameters
----------
dates : Dict
Dictionary containing cross-validation dates information.
freq : str
Dataset frequency.
"""
horizon, cutoffs_text = dates["folds_horizon"], []
for i, cutoff in enumerate(dates["cutoffs"]):
cutoffs_text.append(f"""Fold {i + 1}: """)
if freq in ["s", "H"]:
cutoffs_text.append(
f"""Train: \n"""
f"""[ {dates['train_start_date'].strftime('%Y/%m/%d %H:%M:%S')} - """
f"""{cutoff.strftime('%Y/%m/%d %H:%M:%S')} ] """
)
cutoffs_text.append(
f"""Valid: \n"""
f"""] {cutoff.strftime('%Y/%m/%d %H:%M:%S')} - """
f"""{(cutoff + timedelta(seconds=convert_into_nb_of_seconds(freq, horizon)))
.strftime('%Y/%m/%d %H:%M:%S')} ] \n"""
)
else:
cutoffs_text.append(
f"""Train: \n"""
f"""[ {dates['train_start_date'].strftime('%Y/%m/%d')} - """
f"""{cutoff.strftime('%Y/%m/%d')} ] """
)
cutoffs_text.append(
f"""Valid: \n"""
f"""] {cutoff.strftime('%Y/%m/%d')} - """
f"""{(cutoff + timedelta(days=convert_into_nb_of_days(freq, horizon)))
.strftime('%Y/%m/%d')} ] \n"""
)
cutoffs_text.append("")
st.success("\n".join(cutoffs_text))
| 5,344,398
|
def like(request, pk):
"""Add a user to those who liked the post.
Only authenticated users are able to like a post.
"""
if request.method == 'POST':
# query the post in question
try:
post = Post.objects.get(pk=pk)
except Post.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
# add a user to the list of those who liked this post
# won't duplicate the relationship
post.users_who_liked.add(request.user)
return Response({'message': f'Liked the post {pk}.'})
| 5,344,399
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.