content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import tempfile
def representative_sample(X, num_samples, save=False):
"""Sample vectors in X, prefering edge cases and vectors farthest from other vectors in sample set
"""
X = X.values if hasattr(X, 'values') else np.array(X)
N, M = X.shape
rownums = np.arange(N)
np.random.shuffle(rownums)
idx = AnnoyIndex(M)
for i, row in enumerate(X):
idx.add_item(i, row)
idx.build(int(np.log2(N)) + 1)
if save:
if isinstance(save, (bytes, str)):
idxfilename = save
else:
idxfile = tempfile.NamedTemporaryFile(delete=False)
idxfile.close()
idxfilename = idxfile.name
idx.save(idxfilename)
idx = AnnoyIndex(M)
idx.load(idxfile.name)
samples = -1 * np.ones(shape=(num_samples,), dtype=int)
samples[0] = rownums[0]
# FIXME: some integer determined by N and num_samples and distribution
j, num_nns = 0, min(1000, int(num_samples / 2. + 1))
for i in rownums:
if i in samples:
continue
nns = idx.get_nns_by_item(i, num_nns)
# FIXME: pick vector furthest from past K (K > 1) points or outside of a hypercube
# (sized to uniformly fill the space) around the last sample
samples[j + 1] = np.setdiff1d(nns, samples)[-1]
if len(num_nns) < num_samples / 3.:
num_nns = min(N, 1.3 * num_nns)
j += 1
return samples | e5352bbc31257eda3b5cfc24dbfba41fdcc84c7a | 26,900 |
import os
import logging
def _heal(batch):
"""Heal whisper files.
This method will backfill data present in files in the
staging dir if not present in the local files for
points between 'start' and 'stop' (unix timestamps).
"""
for metric in batch.metrics_fs:
src = os.path.join(batch.staging_dir, metric)
dst = os.path.join(STORAGE_DIR, metric)
try:
kwargs = {}
if HAVE_HEAL_WITH_TIME_RANGE:
kwargs['start_time'] = batch.start_time
kwargs['end_time'] = batch.end_time
if HAVE_HEAL_WITH_OVERWRITE:
kwargs['overwrite'] = batch.overwrite
carbonate_sync.heal_metric(src, dst, **kwargs)
except Exception as e:
logging.exception("Failed to heal %s" % dst)
return batch | 244c1cd8244e6d325c1d9fd099379c88b0cc7619 | 26,901 |
def fix_saving_name(name):
"""Neutralizes backslashes in Arch-Vile frame names"""
return name.rstrip('\0').replace('\\', '`') | ba7063766f3397b955a427b4304605fa2add48fb | 26,902 |
from typing import OrderedDict
def read_dataset_genomes(dataset):
"""Read genomes of the given dataset.
Args:
dataset: instance of datasets.GenomesDataset
Returns:
list of genome.Genome
"""
genomes = []
if dataset.is_multi_chr():
# The genomes in this dataset have more than one chromosome.
logger.debug("Reading dataset %s broken up by chromosome",
dataset.__name__)
if dataset.seq_header_to_genome is None:
# The dataset should have one or more paths to FASTA files
# (dataset.fasta_paths), where each file gives one genome
# (sample) and the sequences in that file correspond to the
# different chromosomes. The header of each sequence should
# specify a chromosome in dataset.chrs.
for fn in dataset.fasta_paths:
seq_map = read_fasta(fn)
seq_map_by_chr = {dataset.seq_header_to_chr(header): seq_map[header]
for header in seq_map.keys()}
seqs = OrderedDict(seq_map_by_chr)
genomes += [genome.Genome.from_chrs(seqs)]
else:
# The dataset should have one or more paths to FASTA files
# (dataset.fasta_paths), where each file gives one or more
# genomes and their corresponding chromosomes. The header of
# each sequence should specify a genome ID and a chromosome
# in dataset.chrs.
genomes_by_id = defaultdict(list)
for fn in dataset.fasta_paths:
seq_map = read_fasta(fn)
for header, seq in seq_map.items():
genome_id = dataset.seq_header_to_genome(header)
chrom = dataset.seq_header_to_chr(header)
genomes_by_id[genome_id].append((chrom, seq))
for genome_id, seq_tups in genomes_by_id.items():
seqs = OrderedDict(seq_tups)
genomes += [genome.Genome.from_chrs(seqs)]
else:
# There is just one sequence (chromosome) for each genome in
# this dataset. The dataset should have a path to one or more
# FASTA files (dataset.fasta_paths), and the sequences in each
# file should correspond to the different genomes. The headers of
# each sequence are ignored.
logger.debug("Reading dataset %s with one chromosome per genome",
dataset.__name__)
for fn in dataset.fasta_paths:
seqs = list(read_fasta(fn).values())
for seq in seqs:
genomes += [genome.Genome.from_one_seq(seq)]
return genomes | ad7b3d8991084da3152f60c065af508aa2a75bb8 | 26,903 |
import os
import re
def _parse_sum_ouput(exit_code):
"""Parse the SUM output log file.
This method parses through the SUM log file in the
default location to return the SUM update status. Sample return
string:
"Summary: The installation of the component failed. Status of updated
components: Total: 5 Success: 4 Failed: 1"
:param exit_code: A integer returned by the SUM after command execution.
:returns: A string with the statistics of the updated/failed
components and 'None' when the exit_code is not 0, 1, 3 or 253.
"""
log_data = _get_log_file_data_as_encoded_content()
if exit_code == 3:
return {"Summary": EXIT_CODE_TO_STRING.get(exit_code),
"Log Data": log_data}
if exit_code in (0, 1, 2, 253):
if os.path.exists(OUTPUT_FILES[0]):
with open(OUTPUT_FILES[0], 'r') as f:
output_data = f.read()
ret_data = output_data[(output_data.find('Deployed Components:')
+ len('Deployed Components:')):
output_data.find('Exit status:')]
failed = 0
success = 0
for line in re.split('\n\n', ret_data):
if line:
if 'Success' not in line:
failed += 1
else:
success += 1
return {
"Summary": (
"%(return_string)s Status of updated components: Total: "
"%(total)s Success: %(success)s Failed: %(failed)s." %
{"return_string": EXIT_CODE_TO_STRING.get(exit_code),
"total": (success + failed), "success": success,
"failed": failed}),
"Log Data": log_data}
return "UPDATE STATUS: UNKNOWN" | 5dbaf7489efbb872b221b2c669c91c9e202fb440 | 26,904 |
def compute_metrics(logits, labels, lengths):
"""Computes metrics and returns them."""
loss = cross_entropy_loss(logits, labels, lengths)
# Computes sequence accuracy, which is the same as the accuracy during
# inference, since teacher forcing is irrelevant when all output are correct.
token_accuracy = jnp.argmax(logits, -1) == jnp.argmax(labels, -1)
sequence_accuracy = (
jnp.sum(mask_sequences(token_accuracy, lengths), axis=-1) == lengths
)
accuracy = jnp.mean(sequence_accuracy)
metrics = {
'loss': loss,
'accuracy': accuracy,
}
return metrics | 2fa08ad9c06e5860f4cb57ca891087b7d67e7806 | 26,905 |
def replace_prelu(input_graph_def: util.GraphDef) -> util.GraphDef:
"""
Replace all Prelu-activations in the graph with supported TF-operations.
Args:
input_graph_def: TF graph definition to examine
Returns:
Updated copy of the input graph with Prelu-nodes replaced by supported
TF operations
"""
def _predicate(node): return node.op == 'Prelu'
return util.replace_matching_nodes(input_graph_def, _predicate,
_split_prelu) | 694d84f2661261a320a3d9cee360dd5980bb3a9d | 26,906 |
def tf_parse_filename_classes(filename, normalization='None', normalization_factor=1, augmentation=False):
"""Take batch of filenames and create point cloud and label"""
idx_lookup = {'airplane': 0, 'bathtub': 1, 'bed': 2, 'bench': 3, 'bookshelf': 4,
'bottle': 5, 'bowl': 6, 'car': 7, 'chair': 8, 'cone': 9,
'cup': 10, 'curtain': 11, 'desk': 12, 'door': 13, 'dresser': 14,
'flower_pot': 15, 'glass_box': 16, 'guitar': 17, 'keyboard': 18,
'lamp': 19, 'laptop': 20, 'mantel': 21, 'monitor': 22, 'night_stand': 23,
'person': 24, 'piano': 25, 'plant': 26, 'radio': 27, 'range_hood': 28,
'sink': 29, 'sofa': 30, 'stairs': 31, 'stool': 32, 'table': 33,
'tent': 34, 'toilet': 35, 'tv_stand': 36, 'vase': 37, 'wardrobe': 38,
'xbox': 39, 'sockettt':0, 'sockettf': 1, 'can':2, 'tin_can':3, 'mug':4, 'jar':5, 'AC1':0, 'AC5_2':1,
'AC6_2':2, 'AC8_20200518':3, 'AC9':4, 'AC12':5}
def parse_filename_classes(filename_batch, normalization='None', normalization_factor=1, augmentation=False):
pt_clouds = []
labels = []
inds = []
pt_cloud_no_outliers = np.asarray([])
for filename in filename_batch:
inds = []
if tf.strings.split(filename, '.')[-1].numpy().decode() == 'npy':
# Read in point cloud
filename_str = filename.numpy().decode()
pt_cloud = np.load(filename_str)
else:
filename_str = filename.numpy().decode()
pc = o3d.io.read_point_cloud(filename_str)
pt_cloud = np.asarray(pc.points)
pt_cloud = np.asarray(pc.points)
#inds.extend(abs(pt_cloud[:,2] - np.mean(pt_cloud[:,2])) > 0.008 * np.std(pt_cloud[:,2]))
#inds.extend(abs(pt_cloud[:,1] - np.mean(pt_cloud[:,1])) > 0.008 * np.std(pt_cloud[:,1]))
#inds.extend(abs(pt_cloud[:,0] - np.mean(pt_cloud[:,0])) > 0.008 * np.std(pt_cloud[:,0]))
#inds = np.unique(np.asarray(inds))
#print(len(inds))
#pt_cloud_no_outliers = np.asarray([pt_cloud[i] for i in range(len(pt_cloud)) if i not in inds])
#_, inds = o3d.geometry.PointCloud().remove_statistical_outlier(20, 0.2)
#o3d.visualization.draw_geometries([pc_no_outliers])
#pt_cloud_no_outliers = np.asarray(pc.points)[inds]
#center = [np.mean(pt_cloud_no_outliers[:,0]), np.mean(pt_cloud_no_outliers[:,1]), np.mean(pt_cloud_no_outliers[:,2])]
#pt_cloud = pt_cloud - np.asarray(center)
center = [np.mean(pt_cloud[:,0]), np.mean(pt_cloud[:,1]), np.mean(pt_cloud[:,2])]
pt_cloud = pt_cloud - np.asarray(center)
#inds.extend(np.argwhere(abs(pt_cloud[:,2] - np.mean(pt_cloud[:,2])) > 2 * np.std(pt_cloud[:,2])))
#inds.extend(np.argwhere(abs(pt_cloud[:,1] - np.mean(pt_cloud[:,1])) > 2 * np.std(pt_cloud[:,1])))
#inds.extend(np.argwhere(abs(pt_cloud[:,0] - np.mean(pt_cloud[:,0])) > 2 * np.std(pt_cloud[:,0])))
#inds = np.unique(np.asarray(inds))
#pt_cloud_no_outliers = np.asarray([pt_cloud[i] for i in range(len(pt_cloud)) if i not in inds])
#tf.print(inds.shape)
#dists = np.linalg.norm(pt_cloud, axis=1)
if normalization=='Single':
#old_range = (np.max(np.linalg.norm(pt_cloud, axis=1))- np.min(np.linalg.norm(pt_cloud, axis=1)))
#new_range = 1
#pt_cloud = ((pt_cloud - np.min(np.linalg.norm(pt_cloud, axis=1)))/old_range) + 0.5
pt_cloud = pt_cloud/np.max(np.linalg.norm(pt_cloud, axis=1))#pt_cloud_no_outliers, axis=1))
# Add rotation and jitter to point cloud
if augmentation:
theta = np.random.random() * 2*3.141
A = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
offsets = np.random.normal(0, 0.01, size=pt_cloud.shape)
pt_cloud = np.matmul(pt_cloud, A) + offsets
# Create classification label
obj_type = filename_str.split('/')[1]# e.g., airplane, bathtub
#label = np.zeros(40, dtype=np.float32)
#label[idx_lookup[obj_type]] = 1.0
label = idx_lookup[obj_type]
labels.append(label)
pt_clouds.append(pt_cloud)
#max_individual = np.asarray([np.max(np.linalg.norm(pc, axis=1)) for pc in pt_clouds])
#if normalization.numpy().decode()=='Single':
# pt_clouds = [pc/max_individual[i] for i,pc in enumerate(pt_clouds)]
#elif normalization.numpy().decode()=='Relative':
# pt_clouds = [pc/normalization_factor for i,pc in enumerate(pt_clouds)]
return np.stack(pt_clouds), np.stack(labels)
x,y = tf.py_function(parse_filename_classes, [filename, normalization, normalization_factor, augmentation], [tf.float32, tf.int32])
x.set_shape([None for _ in range(3)])
y.set_shape([None for _ in range(1)])
return x, y | 75cc87faa46f25485e2c097c5243f0a44bda1554 | 26,907 |
def next_traj_points(dimension: int, last_point):
"""
:param dimension: dimension of our fake latent trajectory
:param last_point: the last point that was sent to the client as a numpy array
:return: here we are sending 3 points at a time from a noisy Lorenz system
"""
#Euler step size
step = 0.001
#Lorenz parameters
beta = 2.666666
sigma = 10
rho = 28
point = np.zeros((4, dimension), dtype='float32')
point[0] = last_point
#compute the next few points
for i in range(1, 4):
x, y, z = point[i - 1, 0], point[i - 1, 1], point[i - 1, 2]
#Lorenz system
dx, dy, dz = sigma*(y - x), x*(rho - z) - y, x*y - beta*z
point[i, 0] = x + step*dx
point[i, 1] = y + step*dy
point[i, 2] = z + step*dz
#simple uniform noise
point[i] = point[i - 1] + np.rand(dimension, dtype='float32')
new_point = point[2]
#we will actually send a scaled down version to the server for visualization purposes
point *= 0.01
string = bs.BitArray(bits='')
for i in range(1, 4):
for j in range(dimension):
string = string + bs.Bits(point[i, j])
return string, new_point | 6e33833b877cc8bc218d8c0a74357c6b4d8d2a9b | 26,908 |
from six.moves import urllib
def has_internet():
"""
Test if Internet is available.
Failure of connecting to the site "http://www.sagemath.org" within a second
is regarded as internet being not available.
EXAMPLES::
sage: from sage.doctest.external import has_internet
sage: has_internet() # random
True
"""
try:
urllib.request.urlopen("http://www.sagemath.org",timeout=1)
return True
except urllib.error.URLError:
return False | d9cacc17a315abe85022e9a889d4a1da3c9b6a49 | 26,909 |
def read_warfle_text(path: str) -> str:
"""Returns text from *.warfle files"""
try:
with open(path, "r") as text:
return text.read()
except Exception as e:
raise Exception(e) | ba15fe6a62fbefe492054b0899dcdbff35462154 | 26,910 |
from typing import List
from typing import OrderedDict
def from_mido(midi: MidiFile, duplicate_note_mode: str = "fifo") -> Music:
"""Return a mido MidiFile object as a Music object.
Parameters
----------
midi : :class:`mido.MidiFile`
Mido MidiFile object to convert.
duplicate_note_mode : {'fifo', 'lifo', 'all'}, default: 'fifo'
Policy for dealing with duplicate notes. When a note off
message is presetned while there are multiple correspoding note
on messages that have not yet been closed, we need a policy to
decide which note on messages to close.
- 'fifo' (first in first out): close the earliest note on
- 'lifo' (first in first out): close the latest note on
- 'all': close all note on messages
Returns
-------
:class:`muspy.Music`
Converted Music object.
"""
if duplicate_note_mode.lower() not in ("fifo", "lifo", "all"):
raise ValueError(
"`duplicate_note_mode` must be one of 'fifo', 'lifo' and " "'all'."
)
def _get_active_track(t_idx, program, channel):
"""Return the active track."""
key = (program, channel)
if key in tracks[t_idx]:
return tracks[t_idx][key]
tracks[t_idx][key] = Track(program, _is_drum(channel))
return tracks[t_idx][key]
# Raise MIDIError if the MIDI file is of Type 2 (i.e., asynchronous)
if midi.type == 2:
raise MIDIError("Type 2 MIDI file is not supported.")
# Raise MIDIError if ticks_per_beat is not positive
if midi.ticks_per_beat < 1:
raise MIDIError("`ticks_per_beat` must be positive.")
time = 0
song_title = None
tempos: List[Tempo] = []
key_signatures: List[KeySignature] = []
time_signatures: List[TimeSignature] = []
lyrics: List[Lyric] = []
annotations: List[Annotation] = []
copyrights = []
# Create a list to store converted tracks
tracks: List[OrderedDict] = [
OrderedDict() for _ in range(len(midi.tracks))
]
# Create a list to store track names
track_names = [None] * len(midi.tracks)
# Iterate over MIDI tracks
for track_idx, midi_track in enumerate(midi.tracks):
# Set current time to zero
time = 0
# Keep track of the program used in each channel
channel_programs = [0] * 16
# Keep track of active note on messages
active_notes = defaultdict(list)
# Iterate over MIDI messages
for msg in midi_track:
# Update current time (delta time is used in a MIDI message)
time += msg.time
# === Meta Data ===
# Tempo messages
if msg.type == "set_tempo":
tempos.append(
Tempo(time=int(time), qpm=float(tempo2bpm(msg.tempo)))
)
# Key signature messages
elif msg.type == "key_signature":
if msg.key.endswith("m"):
mode = "minor"
root = note_str_to_note_num(msg.key[:-1])
else:
mode = "major"
root = note_str_to_note_num(msg.key)
key_signatures.append(
KeySignature(time=int(time), root=root, mode=mode)
)
# Time signature messages
elif msg.type == "time_signature":
time_signatures.append(
TimeSignature(
time=int(time),
numerator=int(msg.numerator),
denominator=int(msg.denominator),
)
)
# Lyric messages
elif msg.type == "lyrics":
lyrics.append(Lyric(time=int(time), lyric=str(msg.text)))
# Marker messages
elif msg.type == "marker":
annotations.append(
Annotation(
time=int(time),
annotation=str(msg.text),
group="marker",
)
)
# Text messages
elif msg.type == "text":
annotations.append(
Annotation(
time=int(time), annotation=str(msg.text), group="text"
)
)
# Copyright messages
elif msg.type == "copyright":
copyrights.append(str(msg.text))
# === Track specific Data ===
# Track name messages
elif msg.type == "track_name":
if midi.type == 0 or track_idx == 0:
song_title = msg.name
else:
track_names[track_idx] = msg.name
# Program change messages
elif msg.type == "program_change":
# Change program of the channel
channel_programs[msg.channel] = msg.program
# Note on messages
elif msg.type == "note_on" and msg.velocity > 0:
# Will later be closed by a note off message
active_notes[(msg.channel, msg.note)].append(
(time, msg.velocity)
)
# Note off messages
# NOTE: A note on message with a zero velocity is also
# considered a note off message
elif msg.type == "note_off" or (
msg.type == "note_on" and msg.velocity == 0
):
# Skip it if there is no active notes
note_key = (msg.channel, msg.note)
if not active_notes[note_key]:
continue
# Get the active track
program = channel_programs[msg.channel]
track = _get_active_track(track_idx, program, msg.channel)
# NOTE: There is no way to disambiguate duplicate notes
# (of the same pitch on the same channel). Thus, we
# need a policy for handling duplicate notes.
# 'FIFO': (first in first out) close the earliest note
if duplicate_note_mode.lower() == "fifo":
onset, velocity = active_notes[note_key][0]
track.notes.append(
Note(
time=int(onset),
pitch=int(msg.note),
duration=int(time - onset),
velocity=int(velocity),
)
)
del active_notes[note_key][0]
# 'LIFO': (last in first out) close the latest note on
elif duplicate_note_mode.lower() == "lifo":
onset, velocity = active_notes[note_key][-1]
track.notes.append(
Note(
time=int(onset),
pitch=int(msg.note),
duration=int(time - onset),
velocity=int(velocity),
)
)
del active_notes[note_key][-1]
# 'close_all' - close all note on messages
elif duplicate_note_mode.lower() == "close_all":
for onset, velocity in active_notes[note_key]:
track.notes.append(
Note(
time=int(onset),
pitch=int(msg.note),
duration=int(time - onset),
velocity=int(velocity),
)
)
del active_notes[note_key]
# Control change messages
elif msg.type == "control_change":
# Get the active track
program = channel_programs[msg.channel]
track = _get_active_track(track_idx, program, msg.channel)
# Append the control change message as an annotation
track.annotations.append(
Annotation(
time=int(time),
annotation={
"number": int(msg.control),
"value": int(msg.value),
},
group="control_changes",
)
)
# End of track message
elif msg.type == "end_of_track":
break
# Close all active notes
for (channel, note), note_ons in active_notes.items():
program = channel_programs[channel]
track = _get_active_track(track_idx, program, channel)
for onset, velocity in note_ons:
track.notes.append(
Note(
time=int(onset),
pitch=int(note),
duration=int(time - onset),
velocity=int(velocity),
)
)
music_tracks = []
for track, track_name in zip(tracks, track_names):
for sub_track in track.values():
sub_track.name = track_name
music_tracks.extend(track.values())
# Sort notes
for music_track in music_tracks:
music_track.notes.sort(
key=attrgetter("time", "pitch", "duration", "velocity")
)
# Meta data
metadata = Metadata(
title=str(song_title),
source_format="midi",
copyright=" ".join(copyrights) if copyrights else None,
)
return Music(
metadata=metadata,
resolution=int(midi.ticks_per_beat),
tempos=tempos,
key_signatures=key_signatures,
time_signatures=time_signatures,
lyrics=lyrics,
tracks=music_tracks,
) | 795b13dcba41a7c10270e6cd799ef439a0ceb426 | 26,911 |
def iterable_validator(iterable_type, member_type):
# type: (ISINSTANCE, ISINSTANCE) -> Callable[[object, Attribute, Iterable[Any]], None]
"""``attrs`` validator to perform deep type checking of iterables."""
def _validate_iterable(instance, attribute, value):
# type: (object, Attribute, Iterable[Any]) -> None
# pylint: disable=unused-argument
"""Validate that a dictionary is structured as expected.
:raises TypeError: if ``value`` is not of ``iterable_type`` type
:raises TypeError: if ``value`` members are not all of ``member_type`` type
"""
if not isinstance(value, iterable_type):
raise TypeError('"{name}" must be a {type}'.format(name=attribute.name, type=iterable_type))
for member in value:
if not isinstance(member, member_type):
raise TypeError(
'"{name}" members must all be of type "{type}"'.format(name=attribute.name, type=member_type)
)
return _validate_iterable | 708738c7bc55e4bb4c4fa9ae93cf56ddb038ebda | 26,912 |
def sample_filtering(df, metadata, filter_by):
"""Filter samples based on selected features and values."""
# Get the variable a values specified for sample filtering
filter_col = filter_by[0]
filter_values = filter_by[1].split(sep=',')
# Saving a new metadata file containing only the samples remaining after filtering
filt_metadata = pd.DataFrame()
for i in filter_values:
filt_metadata = filt_metadata.append(metadata[metadata[filter_col] == i])
filt_metadata_file = 'filtered_metadata_norm_test.csv'
filt_metadata.to_csv(filt_metadata_file, index=False)
# Saving a new input file containing only the samples remaining after filtering
from_formularity = [
'Mass', 'C', 'H', 'O', 'N', 'C13', 'S', 'P', 'Na', 'El_comp', 'Class',
'NeutralMass', 'Error_ppm', 'Candidates'
]
col_df = filt_metadata['SampleID'].to_list()
from_formularity.extend(col_df)
filt_df = df[from_formularity]
filt_df_file = 'filtered_input_norm_test.csv'
filt_df.to_csv(filt_df_file, index=False)
return filt_df_file, filt_metadata_file | 437391b946ed61817292c160402b1c6e0b81fa94 | 26,913 |
def blog_post_historia(request, slug,template="blog/blog_post_historia.html"):
"""Display a list of contenidos that are filtered by slug,
"""
templates = []
#listamos todos los pacientes..
pacientes = BlogPost.objects.published(for_user=request.user)
paciente = get_object_or_404(pacientes, title=slug)
lista_estudios=Estudio.objects.all()
lista_estudios=lista_estudios.filter(paciente__title=slug).order_by("-created")
templates.append(u"blog/blog_post_historia_%s.html" % str(slug))
#Fecha,estudio_grupo,estudio_item,valor
context = {"estudios": lista_estudios,"paciente":paciente}
templates.append(template)
return render(request, templates, context) | 125a612afdaefe8dff494655ae5d53df1d125072 | 26,914 |
def user_is_registered_or_more(user_id):
"""Check that user is registered, moderator, or admin."""
user = Users.query.filter_by(UserID=user_id).first()
user_map = UsersAccessMapping.query.filter_by(UserID=user_id).first()
if user is None:
return False
if user_map is None:
return False
if user_map.UsersAccessLevelID >= CONFIG.REGISTERED_LEVEL:
return True
return False | a405ee98673ffa87f260519ec8fc9ff88efa2089 | 26,915 |
import os
def avi_common_argument_spec():
"""
Returns common arguments for all Avi modules
:return: dict
"""
return dict(
controller=dict(default=os.environ.get('AVI_CONTROLLER', '')),
username=dict(default=os.environ.get('AVI_USERNAME', '')),
password=dict(default=os.environ.get('AVI_PASSWORD', ''), no_log=True),
tenant=dict(default='admin'),
tenant_uuid=dict(default='')) | 5bed3408a2f843053271656d98f5919feaee4b02 | 26,916 |
from typing import Union
from typing import Sequence
from typing import Optional
from typing import Type
def run(
cmds: Union[str, Sequence[Union[str, Sequence[str]]]],
shell: Optional[Union[str, bool]] = None,
mode: Type[Mode] = str,
block: bool = True,
**kwargs
) -> Processes:
"""
Runs several commands that pipe to each other in a python-aware way.
Args:
cmds: Any number of commands (lists or strings) to pipe together. This may be
a string, in which case it will be split on the pipe ('|') character to
get the component commands.
shell: Can be a boolean specifying whether to execute the command
using the shell, or a string value specifying the shell executable to use
(which also implies shell=True). If None, the value is auto-detected - `True` if `cmds`
is a string otherwise `False. If `true` the command is executed via the default shell
(which, according to the `subprocess` docs, is `/bin/sh`).
mode: I/O mode; can be str (text) or bytes (raw).
block: Whether to block until all processes have completed.
kwargs: Additional keyword arguments to pass to :class:`Processes`
constructor.
Returns:
A :class:`subby.Processes` object.
Raises:
subprocess.CalledProcessError: if any subprocess in pipe returns exit
code not 0.
Examples:
Usage 1: Pipe multiple commands together and print output to file
example_cmd1 = ['dx', 'download', 'file-xxxx']
example_cmd2 = ['gunzip']
out_f = "somefilename.fasta"
chain([example_cmd1, example_cmd2], stdout=out_f)
This function will print and execute the following command:
'dx download file-xxxx | gunzip > somefilename.fasta'
Usage 2: Pipe multiple commands together and return output
example_cmd1 = ['gzip', 'file.txt']
example_cmd2 = ['dx', 'upload', '-', '--brief']
file_id = chain([example_cmd1, example_cmd2], block=True).output
This function will print and execute the following command:
'gzip file.txt | dx upload - --brief '
and return the output.
Usage 3: Run a single command with output to file
run('echo "hello world"', stdout='test2.txt')
Note: This calls the run function instead of chain.
Usage 4: A command failing mid-pipe should return CalledProcessedError
chain(
[['echo', 'hi:bye'], ['grep', 'blah'], ['cut', '-d', ':', '-f', '1']]
)
Traceback (most recent call last):
...
CalledProcessError: Command '['grep', 'blah']' returned non-zero
exit status 1
"""
if isinstance(cmds, str):
cmds = [c.strip() for c in cmds.split("|")]
if shell is None:
shell = True
else:
cmds = list(cmds)
if len(cmds) == 0:
raise ValueError("'cmds' cannot be an empty list")
if shell is None:
shell = False
if shell is False:
cmds = utils.command_strings_to_lists(cmds)
else:
cmds = utils.command_lists_to_strings(cmds)
if shell is True:
executable = DEFAULT_EXECUTABLE
elif isinstance(shell, str):
executable = shell
else:
executable = None
processes = Processes(
cmds, mode=mode, shell=(shell is not False), executable=executable, **kwargs
)
if block:
with processes:
processes.block()
else:
processes.run()
return processes | 370f4867701fcda0683030749e0a9754bf2b518e | 26,917 |
def _infer_labels(center_e):
"""Create labels based on center extrema."""
# Infer labels
if center_e == 'trough':
labels = ['Trough', 'Peak', 'Inflection']
keys = ['sample_trough', 'sample_next_peak', 'sample_end']
elif center_e == 'peak':
labels = ['Peak', 'Trough', 'Inflection']
keys = ['sample_peak', 'sample_next_trough', 'sample_end']
return labels, keys | 854a6bbe1c45a806d3c6ecd15bee11f3ec9496a4 | 26,918 |
def MXXXtoMYYY(redshift = 0.3,
MXXX = 6E14,
CXXX = 3.0,
wrt = "crit",
new_wrt = "crit",
XXX = 500.0,
YYY = 500.0,
cosmo = cosmo):
"""
It converts the (MXXX,CXXX) into (MYYY,CYYY) for a halo at a given redshift assuming spherical sysmetry.
(1) XXX and YYY are float and negative value means virial estimation.
(2) it returns (the MXXX,CXXX,RXXX,MYYY,CYYY,RYYY,rhos,rs)
where MXXX, MYYY are in the Msun, RXXX and RYYY are in Mpc.
rhos is in Msun/Mpc^3 and rs is in Mpc.
---
a. It first solves the NFW model parameters - rhos, rs.
rhos = FactorO(redshift,XXX) * (CXXX**3 / 3) / (log(1+CXXX) - CXXX/(1+CXXX))
where FactorO(z,XXX) = OverDen(z) * rho_m(z) if XXX = VIR
XXX * rho_crit(z) if XXX != VIR
b. It solves for CYYY:
FactorO(redshift,YYY) * (CYYY**3 / 3) / (log(1+CYYY) - CYYY/(1+CYYY))
= FactorO(redshift,XXX) * (CXXX**3 / 3) / (log(1+CXXX) - CXXX/(1+CXXX))
c. Solve for rs:
rs**3 = MXXX / ( 4*pi*rhos*(log(1+CXXX) - CXXX/(1+CXXX)) )
d. MYYY = 4*pi*rhos*rs**3 * [log(1+CYYY) - CYYY/(1+CYYY)]
e. RYYY = CYYY * rs and RXXX = CXXX * rs
---
Parameters:
-`redshift`: float, the redshift of the halo.
-`MXXX`: float, the mass of the halo in the unit of Msun.
-`CXXX`: float, the concentration of the halo associating to the overdensity of the input mass.
-`wrt`: string. It has to be either 'crit' or 'mean'. It will be overwritten as 'vir' if XXX < 0.0.
-`new_wrt`: string. Same as above, but it will be overwritten as 'vir' if YYY < 0.0.
-`XXX`: float, the overdensity against the rho_crit for the given input halo mass.
Negative if it is with respect to the virial mass.
-`YYY`: float, the overdensity against the rho_crit for the desired output halo mass.
Negative if it is with respect to the virial mass.
-`cosmo`: dict. The cosmology parameter for this halo. It has to be compatible with
the format of the cosmolopy module.
Return:
-`MXXX`: float, the input halo mass in the unit of Msun.
-`CXXX`: float, the input concentration.
-`RXXX`: float, the radius for the given input halo mass in the unit of Mpc.
-`MYYY`: float, the output halo mass in the unit of Msun.
-`CYYY`: float, the output concentration.
-`RYYY`: float, the radius for the output halo mass in the unit of Mpc.
-`rhos`: float, the normalization of the NFW model (Msun/Mpc^3)
-`rs`: float, the core radius of the NFW model (Mpc).
"""
# sanitiy check
if not ( (redshift >= 0.0) and (MXXX > 0.0) and (CXXX > 0.0) ):
raise ValueError("The input halo params are wrong, (redshift, MXXX, CXXX):", redshift, MXXX, CXXX, ".")
# sanity check on wrt
if wrt not in ["crit", "mean"]:
raise NameError("The input wrt", wrt, "has to be crit or mean.")
if new_wrt not in ["crit", "mean"]:
raise NameError("The input new_wrt", new_wrt, "has to be crit or mean.")
# set up wrt
if XXX < 0.0: wrt = "vir"
if YYY < 0.0: new_wrt = "vir"
# Define the function form for the convenience
def FactorCC(CXXX):
return (CXXX**3 / 3.0) / (log(1.0+CXXX) - CXXX / (1.0+CXXX))
# Define the function FactorO
def FactorO(redshift, XXX, wrt):
if wrt == "crit":
return XXX * \
cosdens.cosmo_densities(**cosmo)[0] * cosdist.e_z(redshift, **cosmo)**2
elif wrt == "mean":
return XXX * \
cosdens.cosmo_densities(**cosmo)[0] * cosdist.e_z(redshift, **cosmo)**2 * cosdens.omega_M_z(redshift, **cosmo)
elif wrt == "vir":
return calc_vir_overden(zd = redshift, cosmo = cosmo) * \
cosdens.cosmo_densities(**cosmo)[0] * cosdist.e_z(redshift, **cosmo)**2 * cosdens.omega_M_z(redshift, **cosmo)
# Solve rhos Msun/Mpc^3
rhos = FactorO(redshift = redshift, XXX = XXX, wrt = wrt) * FactorCC(CXXX = CXXX)
#Define the function we solve for CYYY:
def Solve4CYYY_Func(CYYY):
return FactorO(redshift = redshift, XXX = YYY, wrt = new_wrt) * FactorCC(CXXX = CYYY) - \
FactorO(redshift = redshift, XXX = XXX, wrt = wrt) * FactorCC(CXXX = CXXX)
# Solve for CYYY
CYYY = optimize.newton(Solve4CYYY_Func, CXXX, fprime=None, args=(), tol=1.48e-08, maxiter=50)
#Solve for rs [Mpc]
rs = ( MXXX / ( 4.0 * pi * rhos * ( log(1.0+CXXX) - CXXX/(1.0+CXXX) ) ) )**(1.0/3.0)
#Solve for MYYY [Msun]
MYYY = 4.0 * pi * rhos * rs**3 * ( log(1.0+CYYY) - CYYY/(1.0+CYYY) )
#Solve for RXXX and RYYY [Mpc]
RXXX = CXXX * rs
RYYY = CYYY * rs
return np.array([ MXXX, CXXX, RXXX, MYYY, CYYY, RYYY, rhos, rs ], dtype=float) | f6c7e628ce79785c9d28939e6e3de8fe3ed3878a | 26,919 |
import html
def gldas_to_cycles(
latitude,
longitude,
output_file,
start_date="2000-01-01",
end_date="2017-12-31",
gldas_path="/raw-data/GLDAS",
):
"""Transform GLDAS to Cycles."""
j = Job("gldas_to_cycles")
j.addProfile(Profile(Namespace.CONDOR, key="+SingularityImage", value=html.unescape(""/cvmfs/singularity.opensciencegrid.org/mintproject/cycles:0.9.4-alpha"")))
j.addArguments("--start-date", start_date)
j.addArguments("--end-date", end_date)
j.addArguments("--latitude", latitude)
j.addArguments("--longitude", longitude)
j.addArguments("--gldas-path", gldas_path)
j.addArguments("--output", output_file)
j.uses(File(output_file), link=Link.OUTPUT, transfer=True)
return j | 1ce787266d5b232f0b8c76d639328a0dc4384e2f | 26,920 |
def max_pool_2x2(x):
"""max_pool_2x2 downsamples a feature map by 2X."""
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='VALID') | 45141be3715fd821c8712bc81c644498871d7b8c | 26,921 |
import os
def get_largest_files_new(directory: str, num: int) -> list:
"""
Return a sorted list containing up to num of the largest files from the directory.
Preconditions:
- num > 0
"""
# ACCUMULATOR: Priority queue so far
list_so_far = []
for root in os.walk(directory):
path = root[0]
for file in root[2]:
try:
list_so_far.append((os.path.getsize(os.path.join(path, file)), os.path.join(path, file)))
except FileNotFoundError:
print("Couldn't find file at ", os.path.join(path, file))
return sorted(list_so_far)[-num:] | a9966a03166d1102cc2567fbb5bc19f336893b35 | 26,922 |
def set_bit_value(val, offs, value):
"""Set bit at offset 'offs' to a specific value in 'val'."""
if value:
return set_bit(val, offs)
else:
return clear_bit(val, offs) | 793165cc53adc140b60521b6fc772efa80b69ebb | 26,923 |
def gradientFunction(theta, X, y):
"""
Compute cost and gradient for logistic regression with regularization
computes the cost of using theta as the parameter for regularized logistic
regression and the gradient of the cost w.r.t. to the parameters.
"""
# Initialize some useful values
# number of training examples
m = X.shape[0]
# number of parameters
n = X.shape[1]
theta = theta.reshape((n, 1)) # due to the use of fmin_tnc
# gradient variable
grad = 0.
# ====================== YOUR CODE HERE ======================
# Instructions: Compute the gradient of a particular choice of theta.
# Compute the partial derivatives and set grad to the partial
# derivatives of the cost w.r.t. each parameter in theta
# h_teta = sigmoid(X @ theta)
grad = (1 / m) * X.T @ (sigmoid(X @ theta) - y)
# inner = (h_teta - y) * X
# grad = sum(inner) / m
# =============================================================
return grad | 5ca8c355474c9cab10b2b255b71b943b2b6b0aa1 | 26,924 |
def splice(tree, rep, tag):
"""Splice in a tree into another tree.
Walk ``tree``, replacing the first occurrence of a ``Name(id=tag)`` with
the tree ``rep``.
This is convenient for first building a skeleton with a marker such as
``q[name["_here_"]]``, and then splicing in ``rep`` later. See ``forall``
and ``envify`` for usage examples.
"""
@Walker
def doit(tree, *, stop, **kw):
if type(tree) is Name and tree.id == tag:
stop()
return rep
return tree
return doit.recurse(tree) | b42c5300b7ad9d5d04ba0233c94c735686e1300a | 26,925 |
def get_devp2p_cmd_id(msg: bytes) -> int:
"""Return the cmd_id for the given devp2p msg.
The cmd_id, also known as the payload type, is always the first entry of the RLP, interpreted
as an integer.
"""
return rlp.decode(msg[:1], sedes=rlp.sedes.big_endian_int) | bd930be7205871183ac9cb4814ae793f5524964d | 26,926 |
def custom_cached(name, *old_method, **options):
"""
decorator to convert a method or function into a lazy one.
note that this cache type supports expire time and will consider method inputs
in caching. the result will be calculated once and then it will be cached.
each result will be cached using a tuple of class type, method name, inputs,
current user and component key as a key in the cache.
that this decorator could be used on both instance or class level methods and
properties or stand-alone functions.
:param str name: the cache name to be used.
for example: `redis`, `memcached`, `complex` or ...
:param function | property old_method: the original decorated method or function.
:keyword bool consider_user: specifies that current user must be included in
key generation. if not provided, it will be get
from `caching` config store.
:keyword int expire: expire time for given key in milliseconds.
if not provided, it will be get from `caching`
config store.
:keyword bool refreshable: specifies that cached item's expire time must be
extended on each hit. if not provided, it will be
get from `caching` config store.
:returns: method or function result.
"""
def decorator(method):
"""
decorates the given method or function and makes it a lazy one.
:param function | property method: decorated method or function.
:returns: method or function result.
"""
def wrapper(*args, **kwargs):
"""
decorates the given method or function and makes it a lazy one.
:param object args: function positional arguments.
:param object kwargs: function keyword arguments.
:returns: method or function result.
"""
result = caching_services.try_get(name, method, args,
kwargs, **options)
if result is not None:
return result
result = method(*args, **kwargs)
caching_services.try_set(name, result, method,
args, kwargs, **options)
return result
return update_wrapper(wrapper, method)
if len(old_method) > 0:
return decorator(old_method[0])
return decorator | a6732fee6cd484068d3171079bf4989d5367adbc | 26,927 |
def _full_url(url):
"""
Assemble the full url
for a url.
"""
url = url.strip()
for x in ['http', 'https']:
if url.startswith('%s://' % x):
return url
return 'http://%s' % url | cfb56cf98d3c1dd5ee2b58f53a7792e927c1823f | 26,928 |
from rx.core.operators.replay import _replay
from typing import Optional
import typing
from typing import Callable
from typing import Union
def replay(mapper: Optional[Mapper] = None,
buffer_size: Optional[int] = None,
window: Optional[typing.RelativeTime] = None,
scheduler: Optional[typing.Scheduler] = None
) -> Callable[[Observable], Union[Observable, ConnectableObservable]]:
"""The `replay` operator.
Returns an observable sequence that is the result of invoking the
mapper on a connectable observable sequence that shares a single
subscription to the underlying sequence replaying notifications
subject to a maximum time length for the replay buffer.
This operator is a specialization of Multicast using a
ReplaySubject.
Examples:
>>> res = replay(buffer_size=3)
>>> res = replay(buffer_size=3, window=0.5)
>>> res = replay(None, 3, 0.5)
>>> res = replay(lambda x: x.take(6).repeat(), 3, 0.5)
Args:
mapper: [Optional] Selector function which can use the
multicasted source sequence as many times as needed,
without causing multiple subscriptions to the source
sequence. Subscribers to the given source will receive all
the notifications of the source subject to the specified
replay buffer trimming policy.
buffer_size: [Optional] Maximum element count of the replay
buffer.
window: [Optional] Maximum time length of the replay buffer.
scheduler: [Optional] Scheduler the observers are invoked on.
Returns:
An operator function that takes an observable source and
returns an observable sequence that contains the elements of a
sequence produced by multicasting the source sequence within a
mapper function.
"""
return _replay(mapper, buffer_size, window, scheduler=scheduler) | 8a5ff1cbbc5c12d63e0773f86d02550bf5be65c4 | 26,929 |
import io
import torch
def read_from_mc(path: str, flush=False) -> object:
"""
Overview:
read file from memcache, file must be saved by `torch.save()`
Arguments:
- path (:obj:`str`): file path in local system
Returns:
- (:obj`data`): deserialized data
"""
global mclient
_ensure_memcached()
value = mc.pyvector()
if flush:
mclient.Get(path, value, mc.MC_READ_THROUGH)
return
else:
mclient.Get(path, value)
value_buf = mc.ConvertBuffer(value)
value_str = io.BytesIO(value_buf)
value_str = torch.load(value_str, map_location='cpu')
return value_str | c606b131ba3d65c6b3dd320ae6a71983a79420c8 | 26,930 |
import wave
import struct
def write_wav(file, samples, nframes=-1, nchannels=2, sampwidth=2, framerate=44100, bufsize=2048):
"""
Writes the samples to a wav file.
:param file: can be a filename, or a file object.
:param samples: the samples
:param nframes: the number of frames
:param nchannels: the number of channels
:param sampwidth: the width of the sample in bytes
:param framerate: the frame rate
:param bufsize: the size of the buffer to write into the file
:return: file
"""
w = wave.open(file, 'wb')
w.setparams((nchannels, sampwidth, framerate, nframes, 'NONE', 'not compressed'))
max_amplitude = float(int((2 ** (sampwidth * 8)) / 2) - 1)
# split the samples into chunks (to reduce memory consumption and improve performance)
for chunk in grouper(bufsize, samples):
frames = b''.join(
b''.join(struct.pack('h', int(max_amplitude * sample)) for sample in channels) for channels in chunk if
channels is not None)
w.writeframesraw(frames)
w.close()
return file | ec38069d59dde8dafd5aa98a826ee699ded15b29 | 26,931 |
async def check_login(self) -> dict:
"""Check loging and return user credentials."""
session = await get_session(self.request)
loggedin = UserAdapter().isloggedin(session)
if not loggedin:
informasjon = "Logg inn for å se denne siden"
return web.HTTPSeeOther(location=f"/login?informasjon={informasjon}") # type: ignore
return {"name": session["username"], "token": session["token"]} | 48dd910c143e4ca8f90d8d3da2be2ce6ed275b1b | 26,932 |
from typing import Callable
def get_knn_func_data_points(
data_points: np.ndarray,
pairwise_distances: np.ndarray = None,
approx_nn: ApproxNN = None,
metric: Callable = fastdist.euclidean,
metric_name: str = "euclidean",
) -> KnnFunc:
"""
Gets a K-nearest neighbour callable for data points, used in `compute_gad`.
Parameters
----------
data_points : np.ndarray
Data points.
pairwise_distances : np.ndarray, optional
Pairwise distances of data points (defaults to None).
approx_nn : ApproxNN, optional
ApproxNN instance.
metric : Callable, optional
fastdist metric; only required if `pairwise_distances` and `approx_nn` are None
(defaults to fastdist.euclidean).
metric_name : str, optional
String name of the `metric` callable (defaults to "euclidean").
Returns
-------
knn_func : KnnFunc
K-nearest neighbour callable for data points.
"""
if approx_nn is not None:
return lambda point_idx, k_neighbours: approx_nn.search(
query_vector=data_points[point_idx],
k_neighbours=k_neighbours,
excluded_neighbour_indices=[point_idx],
return_distances=True,
)
elif pairwise_distances is not None:
return lambda point_idx, k_neighbours: get_nearest_neighbours(
distances=pairwise_distances[point_idx],
k_neighbours=k_neighbours,
)
else:
return lambda point_idx, k_neighbours: get_nearest_neighbours(
distances=fastdist.vector_to_matrix_distance(
u=data_points[point_idx],
m=data_points,
metric=metric,
metric_name=metric_name,
),
k_neighbours=k_neighbours,
) | 897289271aef24610dc949fefd14761a3bea4322 | 26,933 |
from aiida import orm
def get_database_nodecount():
"""Description pending"""
query = orm.QueryBuilder()
query.append(orm.Node)
return query.count() | dcb71a022d36c2602125cbba4dccd1c7ccb16281 | 26,934 |
def shower_profile(xdat, alpha, beta, x0):
"""Function that represents the shower profile.
Takes in the event and predicts total gamma energy using alpha and beta to fit.
Described in source in README.
shower_optimize() fits for alpha and beta.
"""
#measured_energy = event.measured_energy
#hits = event.hits
#measured_energy, x, y, z = xdat
#pos = np.array((x, y, z))
gamma_energy, distance = xdat
#start_pos = hits[0]
#end_pos = hits[-1]
#distance = np.linalg.norm(start_pos - end_pos)
gamma = special.gamma(alpha)
numerator = (beta * distance)**(alpha - 1) * beta * np.exp(-1 * beta * distance * x0)
return gamma_energy * (numerator / gamma) | 99c94604a742ffd44e21b4c5cd2061f6293a4d72 | 26,935 |
def has_prefix(s, sub_index):
"""
This function can make sure that the current string(recorded in index) is in the dictionary.
(or it will return False and stop finding.
:param s: string, the user input word
:param sub_index: list, current list (recorded in the index type)
:return: (bool) If there is any words with prefix stored in current list(recorded in index)
"""
current_str = ''
for digit in sub_index:
current_str += s[digit]
for word in dictionary:
if word.startswith(current_str):
return True
return False | a33ce5d13b473f264636bfb430d0191450103020 | 26,936 |
def get_experiment_tag(name):
"""Interfaces to callables that add a tag to the matplotlib axis.
This is a light-weight approach to a watermarking of a plot in a way
that is common in particle physics experiments and groups.
`name` can be an identifier for one of the styles provided here.
Alternatively, a custom callable can be defined.
By using this function we have a common interface for both cases.
"""
if name in provided_experiment_tags:
return provided_experiment_tags[name]
elif callable(name):
# This option allows providing your own tags.
return name
else:
valid_keys = ", ".join(provided_experiment_tags)
print(
f"Ignored invalid experiment tag: {name}. " f"Choose one of: {valid_keys}."
)
return do_nothing | 9dc11a6caac2010aa99e288897a6f60273d1a372 | 26,937 |
def _add_layer1(query, original_data):
"""Add data from successful layer1 MIB query to original data provided.
Args:
query: MIB query object
original_data: Two keyed dict of data
Returns:
new_data: Aggregated data
"""
# Process query
result = query.layer1()
new_data = _add_data(
result, original_data)
# Return
return new_data | ab2d3ad95435dd2fcc6b99745f115cab08aa6699 | 26,938 |
def encodeUcs2(text):
""" UCS2 text encoding algorithm
Encodes the specified text string into UCS2-encoded bytes.
@param text: the text string to encode
@return: A bytearray containing the string encoded in UCS2 encoding
@rtype: bytearray
"""
result = bytearray()
for b in map(ord, text):
result.append(b >> 8)
result.append(b & 0xFF)
return result | da2243ffc959db64a196a312522f967dce1da9d1 | 26,939 |
def w_kvtype(stype: str, ctx: dict) -> dict:
"""
Make definition from ktype or vtype option
"""
stypes = {'Boolean': 'boolean', 'Integer': 'integer', 'Number': 'number', 'String': 'string'}
if stype in stypes:
return {'type': stypes[stype]}
if stype[0] in (OPTION_ID['enum'], OPTION_ID['pointer']):
tdef = ctx['type_defs'][stype[1:]]
topts = topts_s2d(tdef[TypeOptions])
fields = get_enum_items(tdef, topts, ctx['type_defs'])
idopt = 'id' in topts
return w_enum(fields, FieldID if idopt else FieldName, FieldDesc, idopt, ctx)
return {'$ref': f'#/definitions/{stype}'} | e09bc0ceaed2edc2927ddbc4b6bc38bcec5a345d | 26,940 |
from typing import Optional
def create_article_number_sequence(
shop_id: ShopID, prefix: str, *, value: Optional[int] = None
) -> ArticleNumberSequence:
"""Create an article number sequence."""
sequence = DbArticleNumberSequence(shop_id, prefix, value=value)
db.session.add(sequence)
try:
db.session.commit()
except IntegrityError as exc:
db.session.rollback()
raise ArticleNumberSequenceCreationFailed(
f'Could not sequence with prefix "{prefix}"'
) from exc
return _db_entity_to_article_number_sequence(sequence) | 3be2f0399fee0c01a117ffef0f790bae80750db0 | 26,941 |
import sys
import bz2
def queryMultifield(queryWords, listOfFields, pathOfFolder, fVocabulary):
"""
Multifield query:
"""
fileList = defaultdict(dict)
df = {}
for i in range(len(queryWords)):
word, key = queryWords[i], listOfFields[i]
returnedList, mid= findFileNumber(0, len(offset), offset, sys.argv[1], word, fVocabulary)
if len(returnedList) > 0:
fileNumber = returnedList[0]
fileName = pathOfFolder+'/'+key+str(fileNumber)+('.bz2' if COMPRESS_INDEX else ".txt")
fieldFile = bz2.BZ2File(fileName,'rb') if COMPRESS_INDEX else open(fileName)
returnedList, docfreq = findFileList(fileName,fileNumber,key,pathOfFolder,word,fieldFile)
fileList[word][key], df[word] = returnedList, docfreq
return fileList, df | 4062ce462f6155f86a741db15cf3e63490532f30 | 26,942 |
import scanpy as sc
def csv_to_im(
image_in,
csv_path,
labelkey='label',
key='dapi',
name='',
maxlabel=0,
normalize=False,
scale_uint16=False,
replace_nan=False,
channel=-1,
outpath='',
):
"""Write segment backprojection."""
if isinstance(image_in, Image):
labels = image_in
else:
labels = LabelImage(image_in)
labels.load(load_data=False)
if not maxlabel:
labels.set_maxlabel()
maxlabel = labels.maxlabel
if csv_path.endswith('.csv'):
df = pd.read_csv(csv_path)
df = df.astype({labelkey: int})
elif csv_path.endswith('.h5ad'):
adata = sc.read(csv_path)
if not csv_path.endswith('_nofilter.h5ad'):
adata.X = adata.raw.X
df = adata.obs[labelkey].astype(int)
df = pd.concat([df, adata[:, key].to_df()], axis=1)
# for key in keys: # TODO
fw = np.zeros(maxlabel + 1, dtype='float')
for index, row in df.iterrows():
fw[int(row[labelkey])] = row[key]
if replace_nan:
fw = np.nan_to_num(fw)
if normalize:
def normalize_data(data):
"""Normalize data between 0 and 1."""
data = data.astype('float64')
datamin = np.amin(data)
datamax = np.amax(data)
data -= datamin
data *= 1/(datamax-datamin)
return data, [datamin, datamax]
fw_n, fw_minmax = normalize_data(fw)
fw_n *= 65535
fw = fw_n
elif scale_uint16: # for e.g. pseudotime / FA / etc / any [0, 1] vars
fw *= 65535
out = labels.forward_map(list(fw))
if outpath.endswith('.ims'):
mo = Image(outpath, permission='r+')
mo.load(load_data=False)
if channel >= 0 and channel < mo.dims[3]:
ch = channel
else:
mo.create()
ch = mo.dims[3] - 1
mo.slices[3] = slice(ch, ch + 1, 1)
mo.write(out.astype(mo.dtype)) # FIXME: >65535 wraps around
cpath = 'DataSetInfo/Channel {}'.format(ch)
name = name or key
mo.file[cpath].attrs['Name'] = np.array([c for c in name], dtype='|S1')
mo.close()
elif outpath.endswith('.nii.gz'):
props = labels.get_props()
if not labels.path.endswith('.nii.gz'):
props = transpose_props(props, outlayout='xyz')
out = out.transpose()
mo = write_output(outpath, out, props)
else:
outpath = outpath or gen_outpath(labels, key)
mo = write_output(outpath, out, labels.get_props())
return mo | c89e3d0c9ffa9b04b23c905148d45790ea782371 | 26,943 |
import vtk
def get_defaults(vtkName, debug=False):
"""Get default values for VTK Set methods
Example:
--------
>>> print(get_defaults("vtkSphere"))
>>> # {'SetCenter': (0.0, 0.0, 0.0), 'SetRadius': 0.5}
"""
t = getattr(vtk, vtkName)()
ignores = [
'SetAbortExecute',
'SetDebug', 'SetDefaultExecutivePrototype',
'SetExecutive', 'SetGlobalWarningDisplay', 'SetInformation',
'SetInputConnection', 'SetInputDataObject', 'SetMemkindDirectory',
'SetProgressObserver', 'SetProgressShiftScale', 'SetProgressText',
'SetReferenceCount'
]
defaults = {}
for option in dir(t):
if option.startswith("Set"):
if not option in ignores:
if debug is True:
print(option)
getfn = "Get" + option[3:]
try:
default = getattr(t, getfn)()
if isinstance(default, (int, float, tuple, list)):
if debug is True:
print(" -- default = " + str(default))
defaults[option] = default
except:
if debug is True:
print(" -- default = None")
defaults[option] = None
return defaults | 5fbdad8c93138e9c1f03d3bdca40b85a52808e18 | 26,944 |
import math
def transform_side(side,theta):
"""Transform the coordinates of the side onto the perpendicular plane using Euler-Rodrigues formula
Input: side coordinates, plane
Output: new coordinates
"""
new_side = list()
#calculating axis of rotation
axis = side[len(side)-1][0]-side[0][0],0,0
#converting theta to radians
rad = math.radians(theta)
for i in side:
#calculating vector for each point in side
side_vector = i[0],i[1],0
#Euler-Rodrigues formula to rotate vectors
axis = np.asarray(axis)
theta = np.asarray(rad)
axis = axis/math.sqrt(np.dot(axis, axis))
a = math.cos(theta/2)
b, c, d = -axis*math.sin(theta/2)
aa, bb, cc, dd = a*a, b*b, c*c, d*d
bc, ad, ac, ab, bd, cd = b*c, a*d, a*c, a*b, b*d, c*d
multiplier = np.array([[aa+bb-cc-dd, 2*(bc+ad), 2*(bd-ac)],
[2*(bc-ad), aa+cc-bb-dd, 2*(cd+ab)],
[2*(bd+ac), 2*(cd-ab), aa+dd-bb-cc]])
transform_vector = (np.dot(multiplier, side_vector))
#round points to nearest whole number, add to list of transformed side coordinates
folded_vector = round(transform_vector[0]),round(transform_vector[1]),round(transform_vector[2])
new_side.append(folded_vector)
return new_side
#moved_side = move_to_actual_coord(new_side,actual_coordinates)
#return moved_side | 41e71676ee138cc355ae3990e74aeae6176d4f94 | 26,945 |
def get_approved_listings():
"""
Gets pending listings for a user
:param user_id
:return:
"""
user_id = request.args.get('user_id')
approved_listings = []
if user_id:
approved_listings = Listing.query.filter_by(approved=True, created_by=user_id)
else:
approved_listings = Listing.query.filter_by(approved=True)
return jsonify({
"listings": [listing.serialize for listing in approved_listings]
}) | 1d20094c8b3ca10a23a49aa6c83020ae3cdf65e3 | 26,946 |
def edit_profile(request):
"""
编辑公司信息
:param request:
:return:
"""
user_id = request.session.get("user_id")
email = request.session.get("email")
# username = request.session.get("username")
if request.session.get("is_superuser"):
# 管理员获取全部公司信息
data = models.IotProfile.objects.all().values() # 测试时间 分页 换算
# values方便转化为json
else:
data = models.Message.objects.filter(user_id=user_id).values()
return JsonResponse({"result":0,"message":"sussess", "data": data}) | f106b3f6f35497bf5db0a71b81b520ac023c2b37 | 26,947 |
from typing import Optional
def get_vault(vault_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVaultResult:
"""
This data source provides details about a specific Vault resource in Oracle Cloud Infrastructure Kms service.
Gets the specified vault's configuration information.
As a provisioning operation, this call is subject to a Key Management limit that applies to
the total number of requests across all provisioning read operations. Key Management might
throttle this call to reject an otherwise valid request when the total rate of provisioning
read operations exceeds 10 requests per second for a given tenancy.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_vault = oci.kms.get_vault(vault_id=oci_kms_vault["test_vault"]["id"])
```
:param str vault_id: The OCID of the vault.
"""
__args__ = dict()
__args__['vaultId'] = vault_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:kms/getVault:getVault', __args__, opts=opts, typ=GetVaultResult).value
return AwaitableGetVaultResult(
compartment_id=__ret__.compartment_id,
crypto_endpoint=__ret__.crypto_endpoint,
defined_tags=__ret__.defined_tags,
display_name=__ret__.display_name,
freeform_tags=__ret__.freeform_tags,
id=__ret__.id,
is_primary=__ret__.is_primary,
management_endpoint=__ret__.management_endpoint,
replica_details=__ret__.replica_details,
restore_from_file=__ret__.restore_from_file,
restore_from_object_store=__ret__.restore_from_object_store,
restore_trigger=__ret__.restore_trigger,
restored_from_vault_id=__ret__.restored_from_vault_id,
state=__ret__.state,
time_created=__ret__.time_created,
time_of_deletion=__ret__.time_of_deletion,
vault_id=__ret__.vault_id,
vault_type=__ret__.vault_type) | 586bc794d066cd3e4040aff04a39a81762016c41 | 26,948 |
import os
def build_docker_build_command(configuration):
"""
Translate a declarative docker `configuration` to a `docker build` command.
Parameters
----------
configuration : dict
configuration
Returns
-------
args : list
sequence of command line arguments to build an image
"""
parts = configuration.pop('docker', 'docker').split()
parts.append('build')
build = configuration.pop('build')
build['path'] = os.path.join(configuration['workspace'], build['path'])
build['file'] = os.path.join(build['path'], build['file'])
parts.extend(build_parameter_parts(
build, 'tag', 'file', 'no-cache', 'quiet', 'cpu-shares', 'memory'))
parts.extend(build_dict_parameter_parts(build, 'build-arg'))
parts.append(build.pop('path'))
return parts | 89869a37a07694270df5e0eebd0ff80f95e6e949 | 26,949 |
def _make_dist_mat_sa_utils():
"""Generate a sample distance matrix to test spatial_analysis_utils
Returns:
xarray.DataArray:
a sample distance matrix to use for testing spatial_analysis_utils
"""
dist_mat = np.zeros((10, 10))
np.fill_diagonal(dist_mat, 0)
# Create distance matrix where cells positive for marker 1 and 2 are within the dist_lim of
# each other, but not the other groups. This is repeated for cells positive for marker 3 and 4,
# and for cells positive for marker 5.
dist_mat[1:4, 0] = 50
dist_mat[0, 1:4] = 50
dist_mat[4:9, 0] = 200
dist_mat[0, 4:9] = 200
dist_mat[9, 0] = 500
dist_mat[0, 9] = 500
dist_mat[2:4, 1] = 50
dist_mat[1, 2:4] = 50
dist_mat[4:9, 1] = 150
dist_mat[1, 4:9] = 150
dist_mat[9, 1:9] = 200
dist_mat[1:9, 9] = 200
dist_mat[3, 2] = 50
dist_mat[2, 3] = 50
dist_mat[4:9, 2] = 150
dist_mat[2, 4:9] = 150
dist_mat[4:9, 3] = 150
dist_mat[3, 4:9] = 150
dist_mat[5:9, 4] = 50
dist_mat[4, 5:9] = 50
dist_mat[6:9, 5] = 50
dist_mat[5, 6:9] = 50
dist_mat[7:9, 6] = 50
dist_mat[6, 7:9] = 50
dist_mat[8, 7] = 50
dist_mat[7, 8] = 50
# add some randomization to the ordering
coords_in_order = np.arange(dist_mat.shape[0])
coords_permuted = deepcopy(coords_in_order)
np.random.shuffle(coords_permuted)
dist_mat = dist_mat[np.ix_(coords_permuted, coords_permuted)]
# we have to 1-index coords because people will be labeling their cells 1-indexed
coords_dist_mat = [coords_permuted + 1, coords_permuted + 1]
dist_mat = xr.DataArray(dist_mat, coords=coords_dist_mat)
return dist_mat | 706ce73e1a5e66cdf521df7d0e1bf2c43bd09d02 | 26,950 |
def _workSE(args):
"""Worker function for batch source extraction."""
imageKey, imagePath, weightPath, weightType, psfPath, configs, \
checkImages, catPostfix, workDir, defaultsPath = args
catalogName = "_".join((str(imageKey), catPostfix))
se = SourceExtractor(imagePath, catalogName, weightPath=weightPath,
weightType=weightType, psfPath=psfPath, configs=configs,
workDir=workDir,
defaultsPath=defaultsPath)
if checkImages is not None:
se.set_check_images(checkImages, workDir)
se.run()
return imageKey, se | be98194a91108268bb7873fb275416a394aee3c1 | 26,951 |
def split_match(date,time,station):
"""
Function to find and extract the measuremnt from Jack Walpoles splititng data for the same event.
This matching is done by finding an entry with the same date stamp. Inital testing has shown this to be a unique identifier.
station MUST be a string of a station code
date MUST be a int/float of with the format yyyyjjj where j is julian day
"""
# -------
# First we need to read in the splitting observations made by Jack Walpole.
# We also slice out splitting observations just from the station of interest and then reset the indexing so WL_split's indicies start from [0]
# -------
WL_split = Jacks_SKS_RAW(station)
# -------
# Using a Pandas DataFrame we can slice out any rows that match out dare stamp
# The the iloc function is used to extract the requisite values (Here this is trivial as match should be a single row dataframe, but the values still need to be extracted this way)
#
match = WL_split[(WL_split['DATE'] == date)] # slicies rows in WL_split that have the same datestamp as date. In theory this should return a single row DataFrame
if len(match) == 1:
(fast,dfast,tlag,dtlag,wbeg,wend) = (match.iloc[0]['FAST'],match.iloc[0]['DFAST'],match.iloc[0]['TLAG'],match.iloc[0]['DTLAG'],match.iloc[0]['WBEG'],match.iloc[0]['WEND'])
elif len(match) == 0:
print("The provided datestamp {} does not match any obervations made by JW".format(date))
(fast,dfast,tlag,dtlag,wbeg,wend) = ('NaN','NaN','NaN','NaN','40','80')
else:
print("There has been more than one match, now testing by timestamp also!\n")
time_test = int(str(time).zfill(6)[0:4]) #Jacks timestamps are only hhmm so I need to strip off the seconds from my timestamps. WARNING it is possible my timestamps are different to Jacks!!
print('My timestamp {}, Jacks timestamp {}'.format(time_test,match.iloc[0]['TIME']))
match2 = WL_split[(WL_split['DATE'] == date) & (WL_split['TIME'] == time_test)]
# print(match2)
(fast,dfast,tlag,dtlag,wbeg,wend) = (match.iloc[0]['FAST'],match.iloc[0]['DFAST'],match.iloc[0]['TLAG'],match.iloc[0]['DTLAG'],match.iloc[0]['WBEG'],match.iloc[0]['WEND'])
if len(match2) == 0: #If there is still no match
(fast,dfast,tlag,dtlag,wbeg,wend) = ('NaN','NaN','NaN','NaN','NaN','NaN')
print("No match found")
return fast,dfast,tlag,dtlag,wbeg,wend | b1a7a1b4265719c8d79274169593754a7f683bc2 | 26,952 |
import numpy
def schoolf_eq(temps, B0, E, E_D, T_pk):
"""Schoolfield model, used for calculating trait values at a given temperature"""
function = B0 * exp(-E * ((1/(K*temps)) - (1/(K*283.15)))) / (1 + (E/(E_D - E)) * exp((E_D / K) * (1 / T_pk - 1 / temps)))
return numpy.array(map(log,function), dtype=numpy.float64) | 67969317bee63d759071c86840e4ae9ecfb924b4 | 26,953 |
import argparse
def get_parser():
""" Builds the argument parser for the program. """
parser = argparse.ArgumentParser()
parser.add_argument('-c', type=str, dest='clf_key', default='dt', choices=['dt', 'xts', 'rf'], help='A classifier to use.')
parser.add_argument('-m', type=str, dest='mode', default='test', choices=['cv', 'test'], help='Mode to run the program in (cross-validation or test).')
parser.add_argument('-k', type=int, dest='cv', default=5, help='Number of folds in KFold cross-validation.')
parser.add_argument('-d', '--data', type=str, dest='data_name', default='econbiz', help='Name of the dataset to use (econbiz or pubmed).')
parser.add_argument('-f', type=float, dest='data_fraction', default=0.1, help='The fraction of the data to be used (0, 1>.')
parser.add_argument('-t', type=float, dest='test_size', default=0.1, help='Test size (0, 1>.')
parser.add_argument('--max_depth', type=int, dest='max_depth', default=None, help='The maximum depth of the tree.')
parser.add_argument('--min_ss', type=int, dest='min_ss', default=2, help='The minimum number of samples required to split an internal tree node.')
parser.add_argument('--max_features', type=str, dest='max_features', default=None, help='The number of features to consider when looking for the best split in the tree.')
parser.add_argument('-n', type=int, dest='n_estimators', default=10, help='The number of estimators in the ensemble.')
parser.add_argument('-j', type=int, dest='n_jobs', default=-1, help='The number of jobs to run in parallel.')
parser.add_argument('-v', type=int, dest='verbose', default=0, help='Verbosity of the program.')
parser.add_argument('-b', '--batch', dest='is_batch_mode', action='store_true', default=False, help='Whether the program runs in a batch mode (affects file locations).')
return parser | 6246e9105d1435715b5297afe87de15288b5f7ea | 26,954 |
from typing import Union
from typing import Type
from typing import Any
from typing import Iterable
def heat_type_of(
obj: Union[str, Type[datatype], Any, Iterable[str, Type[datatype], Any]]
) -> Type[datatype]:
"""
Returns the corresponding HeAT data type of given object, i.e. scalar, array or iterable. Attempts to determine the
canonical data type based on the following priority list:
1. dtype property
2. type(obj)
3. type(obj[0])
Parameters
----------
obj : scalar or DNDarray or iterable
The object for which to infer the type.
Raises
-------
TypeError
If the object's type cannot be inferred.
"""
# attempt to access the dtype property
try:
return canonical_heat_type(obj.dtype)
except (AttributeError, TypeError):
pass
# attempt type of object itself
try:
return canonical_heat_type(type(obj))
except TypeError:
pass
# last resort, type of the object at first position
try:
return canonical_heat_type(type(obj[0]))
except (KeyError, IndexError, TypeError):
raise TypeError("data type of {} is not understood".format(obj)) | 2637d7559bb1ff3d6a1b07d9cedd10f1eb57e564 | 26,955 |
from typing import Union
from typing import Tuple
from typing import Dict
from typing import Any
def get_field_from_acc_out_ty(
acc_out_ty_or_dict: Union[Tuple, Dict[str, Any]], field: str
):
"""
After tracing NamedTuple inputs are converted to standard tuples, so we cannot
access them by name directly. Use this helper instead.
"""
if isinstance(acc_out_ty_or_dict, dict):
acc_out_ty = acc_out_ty_or_dict["acc_out_ty"]
else:
acc_out_ty = acc_out_ty_or_dict
return acc_out_ty[TensorMetadata._fields.index(field)] | 44b0cac3737823c6ea7aa4b924683d17184711a6 | 26,956 |
import os
import sys
def stop_gracefully(db, no_exit=False):
"""
A mechanism to stop the python process that is reading/writing to brunodb and
shutdown gracefully, i.e. close the database first. Better than a hard stop
which might corrupt the database. Particularly for when running a load
with block=False.
:param db: the database object
:param no_exit: Default False, if True, won't exit Python and
instead will just return the stop Boolean so the program can handle the
exit.
:return: Boolean, whether to stop (if no_exit is True)
"""
stop = os.path.exists(STOP_FILE)
if not stop:
return
db.close()
message = "Brunodb stop file, %s exists so database will be closed" % STOP_FILE
if no_exit:
print(message)
return True
message += ' and python will exit'
print(message)
sys.exit() | 037a61aedfaa066d4e38b8f60991bbcaa916b77e | 26,957 |
def bhc(data,alpha,beta=None):
"""
This function does a bayesian clustering.
Alpha: Hyperparameter
Beta: Hyperparameter
If beta is not given, it uses the Multinomial-Dirichlet.
Otherwise it uses Bernoulli-Beta.
"""
n_cluster = data.shape[0]
nodekey = n_cluster
list_clusters = [i for i in range(n_cluster)]
clusters = dict()
clusters["n_cluster"] = n_cluster
clusters[n_cluster] = (1,[str(i+1) for i in range(n_cluster)])
tree = {str(i+1):Node(key=i+1,data=np.array([data[i,:]]),alpha=alpha,beta=beta,left=None,right=None,parent=None)
for i in range(n_cluster)}
while n_cluster > 1:
"Find the pair with the highest probability of the merged hypothesis"
r_k_max = -1000000
for left,right in list(it.combinations(tree.keys(), 2)):
nodekey += 1
aux_data = np.vstack((tree[left].data,tree[right].data))
aux_node = Node(nodekey,aux_data,alpha,beta=beta,left=tree[left],right=tree[right])
r_k = posterior(aux_node)
#print(r_k)
if r_k > r_k_max:
r_k_max = r_k
merged_left = left
merged_right = right
merged_node = aux_node
merged_node.r_k = r_k_max
merged_node.left.parent = merged_node
merged_node.right.parent = merged_node
newkey = merged_left+','+ merged_right
del tree[merged_left]
del tree[merged_right]
tree[newkey] = merged_node
n_cluster -= 1
clusters[n_cluster] = (r_k_max,list(tree.keys()))
nodekey +=1
return clusters,merged_node | a0c6cb588a66dce92a2041a02eb60b66a12422ae | 26,958 |
import logging
def get_sql_value(conn_id, sql):
"""
get_sql_value executes a sql query given proper connection parameters.
The result of the sql query should be one and only one numeric value.
"""
hook = _get_hook(conn_id)
result = hook.get_records(sql)
if len(result) > 1:
logging.info("Result: %s contains more than 1 entry", str(result))
raise ValueError("Result from sql query contains more than 1 entry")
if len(result) < 1:
raise ValueError("No result returned from sql query")
if len(result[0]) != 1:
logging.info("Result: %s does not contain exactly 1 column", str(result[0]))
raise ValueError("Result from sql query does not contain exactly 1 column")
return result[0][0] | 24cc8c633f855b5b07c602d247a543268972d615 | 26,959 |
def get_rendered_config(path: str) -> str:
"""Return a config as a string with placeholders replaced by values of the corresponding
environment variables."""
with open(path) as f:
txt = f.read()
matches = pattern.findall(txt)
for match in matches:
txt = txt.replace("[" + match + "]", _get_env_var(match))
return txt | 93445db04960fd66cc88673f397eb959d2e982ec | 26,960 |
def units_to_msec(units, resolution):
"""Convert BLE specific units to milliseconds."""
time_ms = units * float(resolution) / 1000
return time_ms | 49588d7961593b2ba2e57e1481d6e1430b4a3671 | 26,961 |
def IR(numOfLayer, useIntraGCN, useInterGCN, useRandomMatrix, useAllOneMatrix, useCov, useCluster, class_num):
"""Constructs a ir-18/ir-50 model."""
model = Backbone(numOfLayer, useIntraGCN, useInterGCN, useRandomMatrix, useAllOneMatrix, useCov, useCluster, class_num)
return model | dbe638d3cd38c66387c67e0854b07ea7f800909f | 26,962 |
import re
def extractNextPageToken(resultString):
"""
Calling GASearchVariantsResponse.fromJsonString() can be slower
than doing the variant search in the first place; instead we use
a regexp to extract the next page token.
"""
m = re.search('(?<=nextPageToken": )(?:")?([0-9]*?:[0-9]*)|null',
resultString)
if m is not None:
return m.group(1)
return None | 151a5697561b687aeff8af51c4ec2f73d47c441d | 26,963 |
def underscore(msg):
""" return underlined msg """
return __apply_style(__format['underscore'],msg) | ded741e58d1f6e46fc4b9f56d57947903a8a2587 | 26,964 |
def get_slice(dimspins, y):
"""
Get slice of variable `y` inquiring the spinboxes `dimspins`.
Parameters
----------
dimspins : list
List of tk.Spinbox widgets of dimensions
y : ndarray or netCDF4._netCDF4.Variable
Input array or netcdf variable
Returns
-------
ndarray
Slice of `y` chosen by with spinboxes.
Examples
--------
>>> vy = vardim2var(y)
>>> yy = self.fi.variables[vy]
>>> miss = get_miss(self, yy)
>>> yy = get_slice_y(self.yd, yy).squeeze()
>>> yy = set_miss(miss, yy)
"""
methods = ['all']
methods.extend(DIMMETHODS)
dd = []
ss = []
for i in range(y.ndim):
dim = dimspins[i].get()
if dim in methods:
s = slice(0, y.shape[i])
else:
idim = int(dim)
s = slice(idim, idim+1)
dd.append(dim)
ss.append(s)
if len(ss) > 0:
imeth = list_intersection(dd, DIMMETHODS)
if len(imeth) > 0:
yout = y[tuple(ss)]
ii = [ i for i, d in enumerate(dd) if d in imeth ]
ii.reverse() # last axis first
for i in ii:
if dd[i] == 'mean':
yout = np.ma.mean(yout, axis=i)
elif dd[i] == 'std':
yout = np.ma.std(yout, axis=i)
elif dd[i] == 'min':
yout = np.ma.min(yout, axis=i)
elif dd[i] == 'max':
yout = np.ma.max(yout, axis=i)
elif dd[i] == 'ptp':
yout = np.ma.ptp(yout, axis=i)
elif dd[i] == 'sum':
yout = np.ma.sum(yout, axis=i)
elif dd[i] == 'median':
yout = np.ma.median(yout, axis=i)
elif dd[i] == 'var':
yout = np.ma.var(yout, axis=i)
return yout
else:
return y[tuple(ss)]
else:
return np.array([], dtype=y.dtype) | 3545391babb06c7cae5dc8fc6f413d34e40da57c | 26,965 |
import requests
import json
def query_real_confs(body=None): # noqa: E501
"""
query the real configuration value in the current hostId node
query the real configuration value in the current hostId node # noqa: E501
:param body:
:type body: dict | bytes
:rtype: List[RealConfInfo]
"""
if connexion.request.is_json:
body = ConfHost.from_dict(connexion.request.get_json()) # noqa: E501
domain = body.domain_name
host_list = body.host_ids
check_res = Format.domainCheck(domain)
if not check_res:
num = 400
base_rsp = BaseResponse(num, "Failed to verify the input parameter, please check the input parameters.")
return base_rsp, num
# check the domain is Exist
is_exist = Format.isDomainExist(domain)
if not is_exist:
code_num = 400
base_rsp = BaseResponse(code_num, "The current domain does not exist, please create the domain first.")
return base_rsp, code_num
# check whether the host is configured in the domain
is_host_list_exist = Format.isHostInDomain(domain)
print("is_host_list_exist is : {}".format(is_host_list_exist))
if not is_host_list_exist:
code_num = 400
base_rsp = BaseResponse(code_num, "The host information is not set in the current domain." +
"Please add the host information first")
return base_rsp, code_num
# get all hosts managed by the current domain.
# If host_list is empty, query all hosts in the current domain.
# If host_list is not empty, the actual contents of the currently given host are queried.
conf_tools = ConfTools()
port = conf_tools.load_port_by_conf()
exist_host = []
failed_host = []
if len(host_list) > 0:
host_tool = HostTools()
exist_host, failed_host = host_tool.getHostExistStatus(domain, host_list)
else:
print("############## get the host in domain ##############")
url = "http://0.0.0.0:" + port + "/host/getHost"
headers = {"Content-Type": "application/json"}
get_man_host = DomainName(domain_name=domain)
response = requests.post(url, data=json.dumps(get_man_host), headers=headers) # post request
print("host/getHost response is : {}".format(response.text))
res_code = response.status_code
res_text = json.loads(response.text)
print("host/getHost return code is : {}".format(response.status_code))
if len(exist_host) == 0 or len(failed_host) == len(host_list):
code_num = 400
base_rsp = BaseResponse(code_num, "The host information is not set in the current domain." +
"Please add the host information first")
return base_rsp, code_num
# get the management conf in domain
print("############## get the management conf in domain ##############")
url = "http://0.0.0.0:" + port + "/management/getManagementConf"
headers = {"Content-Type": "application/json"}
get_man_conf_body = DomainName(domain_name=domain)
print("body is : {}".format(get_man_conf_body))
response = requests.post(url, data=json.dumps(get_man_conf_body), headers=headers) # post request
print("response is : {}".format(response.text))
res_code = response.status_code
res_text = json.loads(response.text)
print("return code is : {}".format(response.status_code))
if res_code != 200:
code_num = res_code
base_rsp = BaseResponse(code_num, "Failed to query the configuration items managed in the current domain. " +
"The failure reason is:" + res_text)
return base_rsp, code_num
conf_files = res_text.get("confFiles")
if len(conf_files) == 0:
code_num = 400
base_rsp = BaseResponse(code_num, "The configuration is not set in the current domain." +
"Please add the configuration information first")
return base_rsp, code_num
res = []
# get the real conf in host
conf_list = []
for d_conf in conf_files:
file_path = d_conf.get("filePath").split(":")[-1]
conf_list.append(file_path)
print("############## get the real conf in host ##############")
get_real_conf_body = {}
get_real_conf_body_info = []
for d_host in exist_host:
get_real_conf_body_infos = {}
get_real_conf_body_infos["host_id"] = d_host
get_real_conf_body_infos["config_list"] = conf_list
get_real_conf_body_info.append(get_real_conf_body_infos)
get_real_conf_body["infos"] = get_real_conf_body_info
url = conf_tools.load_url_by_conf().get("collect_url")
headers = {"Content-Type": "application/json"}
response = requests.post(url, data=json.dumps(get_real_conf_body), headers=headers) # post request
resp = json.loads(response.text).get("resp")
resp_code = json.loads(response.text).get("code")
if (resp_code != 200) and (resp_code != 206):
code_num = 404
code_string = "Failed to obtain the actual configuration, please check the file exists."
base_rsp = BaseResponse(code_num, code_string)
return base_rsp, code_num
if not resp or len(resp) == 0:
code_num = 500
code_string = "Failed to obtain the actual configuration, please check the host info for conf/collect."
base_rsp = BaseResponse(code_num, code_string)
return base_rsp, code_num
success_lists = {}
failed_lists = {}
for d_res in resp:
d_host_id = d_res.get("host_id")
fail_files = d_res.get("fail_files")
if len(fail_files) > 0:
failed_lists["host_id"] = d_host_id
failed_lists_conf = []
for d_failed in fail_files:
failed_lists_conf.append(d_failed)
failed_lists["failed_conf"] = failed_lists_conf
failed_lists["success_conf"] = []
else:
success_lists["host_id"] = d_host_id
success_lists["success_conf"] = []
success_lists["failed_conf"] = []
read_conf_info = RealConfInfo(domain_name=domain,
host_id=d_host_id,
conf_base_infos=[])
d_res_infos = d_res.get("infos")
for d_file in d_res_infos:
file_path = d_file.get("path")
content = d_file.get("content")
object_parse = ObjectParse()
content_string = object_parse.parse_conf_to_json(file_path, content)
file_atrr = d_file.get("file_attr").get("mode")
file_owner = "({}, {})".format(d_file.get("file_attr").get("group"), d_file.get("file_attr").get("owner"))
real_conf_base_info = RealconfBaseInfo(file_path=file_path,
file_attr=file_atrr,
file_owner=file_owner,
conf_contens=content_string)
read_conf_info.conf_base_infos.append(real_conf_base_info)
if len(fail_files) > 0:
failed_lists.get("success_conf").append(file_path)
else:
success_lists.get("success_conf").append(file_path)
res.append(read_conf_info)
print("***************************************")
print("success_lists is : {}".format(success_lists))
print("failed_lists is : {}".format(failed_lists))
if len(res) == 0:
code_num = 400
res_text = "The real configuration does not found."
base_rsp = BaseResponse(code_num, "Real configuration query failed." +
"The failure reason is : " + res_text)
return base_rsp, code_num
return res | 1313792649942f402694713df0d413fe39b8a77c | 26,966 |
def Run(get_initial_items,
switch_to_good,
switch_to_bad,
test_script,
test_setup_script=None,
iterations=50,
prune=False,
pass_bisect=None,
ir_diff=False,
noincremental=False,
file_args=False,
verify=True,
prune_iterations=100,
verbose=False,
resume=False):
"""Run binary search tool.
Equivalent to running through terminal.
Args:
get_initial_items: Script to enumerate all items being binary searched
switch_to_good: Script that will take items as input and switch them to good
set
switch_to_bad: Script that will take items as input and switch them to bad
set
test_script: Script that will determine if the current combination of good
and bad items make a "good" or "bad" result.
test_setup_script: Script to do necessary setup (building, compilation,
etc.) for test_script.
iterations: How many binary search iterations to run before exiting.
prune: If False the binary search tool will stop when the first bad item is
found. Otherwise then binary search tool will continue searching until all
bad items are found (or prune_iterations is reached).
pass_bisect: Script that takes single bad item from POPULATE_BAD and returns
the compiler command used to generate the bad item. This will turn on
pass/ transformation level bisection for the bad item. Requires that
'prune' be set to False, and needs support of `-opt-bisect-limit`(pass)
and `-print-debug-counter`(transformation) from LLVM.
ir_diff: Whether to print IR differences before and after bad
pass/transformation to verbose output. Defaults to False, only works when
pass_bisect is enabled.
noincremental: Whether to send "diffs" of good/bad items to switch scripts.
file_args: If True then arguments to switch scripts will be a file name
containing a newline separated list of the items to switch.
verify: If True, run tests to ensure initial good/bad sets actually produce
a good/bad result.
prune_iterations: Max number of bad items to search for.
verbose: If True will print extra debug information to user.
resume: If True will resume using STATE_FILE.
Returns:
0 for success, error otherwise
"""
# Notice that all the argument checks are in the Run() function rather than
# in the Main() function. It is not common to do so but some wrappers are
# going to call Run() directly and bypass checks in Main() function.
if resume:
logger.GetLogger().LogOutput('Resuming from %s' % STATE_FILE)
bss = BinarySearchState.LoadState()
if not bss:
logger.GetLogger().LogOutput(
'%s is not a valid binary_search_tool state file, cannot resume!' %
STATE_FILE)
return 1
logger.GetLogger().LogOutput('Note: resuming from previous state, '
'ignoring given options and loading saved '
'options instead.')
else:
if not (get_initial_items and switch_to_good and switch_to_bad and
test_script):
logger.GetLogger().LogOutput('The following options are required: '
'[-i, -g, -b, -t] | [-r]')
return 1
if pass_bisect and prune:
logger.GetLogger().LogOutput('"--pass_bisect" only works when '
'"--prune" is set to be False.')
return 1
if not pass_bisect and ir_diff:
logger.GetLogger().LogOutput('"--ir_diff" only works when '
'"--pass_bisect" is enabled.')
switch_to_good = _CanonicalizeScript(switch_to_good)
switch_to_bad = _CanonicalizeScript(switch_to_bad)
if test_setup_script:
test_setup_script = _CanonicalizeScript(test_setup_script)
if pass_bisect:
pass_bisect = _CanonicalizeScript(pass_bisect)
test_script = _CanonicalizeScript(test_script)
get_initial_items = _CanonicalizeScript(get_initial_items)
incremental = not noincremental
binary_search_perforce.verbose = verbose
bss = BinarySearchState(get_initial_items, switch_to_good, switch_to_bad,
test_setup_script, test_script, incremental, prune,
pass_bisect, ir_diff, iterations, prune_iterations,
verify, file_args, verbose)
bss.DoVerify()
bss.DoSearchBadItems()
if pass_bisect:
bss.DoSearchBadPass()
bss.RemoveState()
logger.GetLogger().LogOutput(
'Total execution time: %s' % bss.ElapsedTimeString())
return 0 | 65034be624fa8b1ddd6807c4dac0207c92d9710b | 26,967 |
def is_data(data):
""" Check if a packet is a data packet. """
return len(data) > 26 and ord(data[25]) == 0x08 and ord(data[26]) in [0x42, 0x62] | edb2a6b69fde42aef75923a2afbd5736d1aca660 | 26,968 |
def _bool_value(ctx, define_name, default, *, config_vars = None):
"""Looks up a define on ctx for a boolean value.
Will also report an error if the value is not a supported value.
Args:
ctx: A Starlark context. Deprecated.
define_name: The name of the define to look up.
default: The value to return if the define isn't found.
config_vars: A dictionary (String to String) of configuration variables. Can be from ctx.var.
Returns:
True/False or the default value if the define wasn't found.
"""
if not config_vars:
config_vars = ctx.var
value = config_vars.get(define_name, None)
if value != None:
if value.lower() in ("true", "yes", "1"):
return True
if value.lower() in ("false", "no", "0"):
return False
fail("Valid values for --define={} are: true|yes|1 or false|no|0.".format(
define_name,
))
return default | c60799e3019c6acefd74115ca02b76feb9c72237 | 26,969 |
def get_tf_metric(text):
"""
Computes the tf metric
Params:
text (tuple): tuple of words
Returns:
tf_text: format: ((word1, word2, ...), (tf1, tf2, ...))
"""
counts = [text.count(word) for word in text]
max_count = max(counts)
tf = [counts[i]/max_count for i in range(0, len(counts))]
return text, tf | 6397e150fa55a056358f4b28cdf8a74abdc7fdb6 | 26,970 |
import torch
def R_transform_th(R_src, R_delta, rot_coord="CAMERA"):
"""transform R_src use R_delta.
:param R_src: matrix
:param R_delta:
:param rot_coord:
:return:
"""
if rot_coord.lower() == "model":
R_output = torch.matmul(R_src, R_delta)
elif rot_coord.lower() == "camera" or rot_coord.lower() == "naive" or rot_coord.lower() == "camera_new":
# dR_m2c x R_src_m2c
R_output = torch.matmul(R_delta, R_src)
else:
raise Exception("Unknown rot_coord in R_transform: {}".format(rot_coord))
return R_output | 67d4b94bcc9382fae93cc926246fb2436eac7173 | 26,971 |
def calculate_UMI_with_mismatch(UMIs):
"""
Corrected the mismatches in UMIs
input: UMI sequences and their counts;
return: Corrected unique UMI sequences
"""
if len(UMIs.keys()) == 1:
return [x for x in UMIs if UMIs[x]>0]
UMIs = sorted(UMIs.items(), key=lambda k: k[1], reverse=True)
UMI_info = {x[0]:x[1] for x in UMIs}
umi_num = len(UMIs)
if umi_num <= 10:
for idx1 in range(0, umi_num-1):
for idx2 in range(idx1+1, umi_num):
umi_1 = UMIs[idx1][0]
umi_2 = UMIs[idx2][0]
if HammingDistance(umi_1, umi_2) <= 1:
UMI_info[umi_1] += UMI_info[umi_2]
UMI_info[umi_2] = 0
return [x for x in UMI_info if UMI_info[x]>0] | c0e24bf7043b3041043187ca78c8b8f5cafae7cc | 26,972 |
def create_import_data(properties):
"""
This function collects and creates all the asset data needed for the import process.
:param object properties: The property group that contains variables that maintain the addon's correct state.
:return list: A list of dictionaries containing the both the mesh and action import data.
"""
# if using ue2rigify un-hide the source rig
if properties.use_ue2rigify:
set_source_rig_hide_value(False)
# get the mesh and rig objects from their collections
mesh_objects = utilities.get_from_collection(properties.mesh_collection_name, 'MESH', properties)
rig_objects = utilities.get_from_collection(properties.rig_collection_name, 'ARMATURE', properties)
# if the combine meshes option is on, get only meshes with unique armature parents
mesh_objects = utilities.get_unique_parent_mesh_objects(rig_objects, mesh_objects, properties)
# get the asset data for all the mesh objects
mesh_data = create_mesh_data(mesh_objects, rig_objects, properties)
# get the asset data for all the actions on the rig objects
action_data = create_action_data(rig_objects, properties)
# if using ue2rigify re-hide the source rig
if properties.use_ue2rigify:
set_source_rig_hide_value(True)
return mesh_data + action_data | b8b28ac4a1d753214dbcd1361b1ababf7f366b55 | 26,973 |
def get_data(URL, pl_start, pl_end):
"""Generic function. Should be called only when
it is checked if the URL is a cached playlist.
Returns a tuple containing the songs and name of
the playlist.
"""
logger.debug("Extracting Playlist Contents")
cached_playlist = CachedIE(URL, pl_start, pl_end)
cached_playlist.get_data()
return cached_playlist.list_content_tuple, cached_playlist.playlist_name | 73de68869e86cb4325c4574cc8ca7aff1f8b737d | 26,974 |
def _parse_vertex_tuple(s):
"""Parse vertex indices in '/' separated form (like 'i/j/k', 'i//k'.
...).
"""
vt = [0, 0, 0]
for i, c in enumerate(s.split("/")):
if c:
vt[i] = int(c)
return tuple(vt) | 37e53236ef7a96f55aed36e929abe4472911b9ea | 26,975 |
def getKeyFromValue(dictionary, value):
"""
dictionary内に指定したvalueを持つKeyを検索して取得
"""
keys = [key for key, val in dictionary.items() if val == value]
if len(keys) > 0:
return keys[0]
return None | d2bb42938a809677f4a96e869e9e03c194a28561 | 26,976 |
def withdraw(dest):
"""
This function defines all the FlowSpec rules to be withdrawn via the iBGP Update.
////***update*** Add port-range feature similar to announce() - ADDED in TBowlby's code.
Args:
dest (str): IP Address of the Victim host.
Calls:
send_requests(messages): Calls a function to execute requests API commands to be sent to the Flask Server.
Returns:
Returns the string 'route_withdrawn' to confirm the withdrawal of routes so the entry can be deleted
from the MySQL database.
"""
messages = [
'withdraw flow route { match { destination %s/32; source-port =53; protocol udp; } then { rate-limit DNS_RATE_LIMIT; community [ COMMUNITY ]; } }' % dest,
'sleep',
'withdraw flow route { match { destination %s/32; source-port =123; protocol udp; packet-length =468; } then { discard; community [ COMMUNITY ]; } }' % dest,
'sleep',
'withdraw flow route { match { destination %s/32; protocol icmp; } then { rate-limit ICMP_RATE_LIMIT; community [ COMMUNITY ]; } }' % dest,
'sleep',
'withdraw flow route { match { destination %s/32; source-port =17 =19 =69 =111 =137 =138 =161 =162 =389 =520 =1434 =1701 =1900 =5353 =11211; protocol udp; } then { discard; community [ COMMUNITY ]; } }' % dest,
'sleep',
'withdraw flow route { match { destination %s/32; source-port =53; destination-port =4444; protocol udp; } then { discard; community [ COMMUNITY ]; } }' % dest,
'sleep',
'withdraw flow route { match { destination %s/32; protocol udp; fragment is-fragment; } then { discard; community [ COMMUNITY ]; } }' % dest,
'sleep',
'withdraw flow route { match { destination %s/32; protocol tcp; tcp-flags [ syn ]; } then { rate-limit SYN_RATE_LIMIT; community [ COMMUNITY ]; } }' % dest,
'sleep',
'withdraw flow route { match { destination %s/32; } then { rate-limit MAX_SPEED; community [ COMMUNITY ]; } }' % dest,
]
send_requests(messages)
return 'route_withdrawn' | 2e0767630c72d69a914175e6bcc808d9b088b247 | 26,977 |
def get_prefix(node):
"""
Strips off the name in the URI to give the prefixlabel...
:param node: The full URI string
:return: (prefix, label) as (string, string)
"""
if '#' in node:
name = node.split("#")[-1]
else:
# there must be no # in the prefix e.g. schema.org/
name = node.split("/")[-1]
return node[:-len(name)] | 5d005548da722751cdd0ae022994de5f39f9ac56 | 26,978 |
def sY(qubit: Qubit, coefficient: complex = 1.0) -> Pauli:
"""Return the Pauli sigma_Y operator acting on the given qubit"""
return Pauli.sigma(qubit, 'Y', coefficient) | 8d2444f4e9a4b9e3734a1d7ec1e686f06ded0c89 | 26,979 |
def load_data_and_labels(filename):
"""Load sentences and labels"""
df = pd.read_csv(filename, compression='zip', dtype={'faits': object}, encoding = 'utf8')
selected = [ATTRIBUTE_TO_PREDICT, 'faits']
non_selected = list(set(df.columns) - set(selected))
df = df.drop(non_selected, axis=1) # Drop non selected columns
df = df.dropna(axis=0, how='any', subset=selected) # Drop null rows
df = df.reindex(np.random.permutation(df.index)) # Shuffle the dataframe
# Map the actual labels to one hot labels
labels = sorted(list(set(df[selected[0]].tolist())))
one_hot = np.zeros((len(labels), len(labels)), int)
np.fill_diagonal(one_hot, 1)
label_dict = dict(zip(labels, one_hot))
chk_count['n'] = len(df[selected[1]])
x_raw = df[selected[1]].apply(lambda x: clean_str(x)).tolist()
y_raw = df[selected[0]].apply(lambda y: label_dict[y]).tolist()
return x_raw, y_raw, df, labels | 651b156801dcdd5b847ab1eb3330afe569c6b63e | 26,980 |
def ovc_search(request):
"""Method to do ovc search."""
try:
results = search_master(request)
except Exception as e:
print('error with search - %s' % (str(e)))
return JsonResponse(results, content_type='application/json',
safe=False)
else:
return JsonResponse(results, content_type='application/json',
safe=False) | f571627dba30a3f0a1e958e484c528fa3338defa | 26,981 |
import struct
def incdata(data, s):
"""
add 's' to each byte.
This is useful for finding the correct shift from an incorrectly shifted chunk.
"""
return b"".join(struct.pack("<B", (_ + s) & 0xFF) for _ in data) | 89633d232d655183bee7a20bd0e1c5a4a2cc7c05 | 26,982 |
from typing import Tuple
def nonsquare_hungarian_matching(
weights: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
"""Hungarian matching with arbitrary shape.
The matchers_ops.hungarian_matching supports only squared weight matrices.
This function generalizes the hungarian matching to nonsquare cases by padding
the weights to a square and running the square version matching. The property
of hungarian matching ensures that the solutions are equivalent for the padded
square problem and the original nonsquare problem.
Args:
weights: A [batch, shape1, shape2] float32 tf.Tensor.
Returns:
square_permutation: A [batch, max(shape1, shape2), max(shape1, shape2)]
float32 tf.Tensor that is the permutation matrix that achieves the minimum
total weight. Note that a permutation matrix contains only value 0.0 and
1.0, with each row and each column sums to 1.0.
nonsquare_permutation: A [batch, shape1, shape2] float32 tf.Tensor. The
nonsquare part of the permutation matrix.
"""
_, height, width = weights.get_shape().as_list()
max_height_width = max(height, width)
# Padding a constant on one axis does not affect matching results.
weights = tf.pad(weights,
[[0, 0], # Do not pad the batch dimension.
[0, max_height_width - height],
[0, max_height_width - width]],
constant_values=_MATCHING_NEGATIVE_CONSTANT)
square_permutation = matchers_ops.hungarian_matching(weights)
square_permutation = tf.cast(square_permutation, tf.float32)
return square_permutation, square_permutation[:, :height, :width] | 02968da51da1d65020b544bb2467ecbe3ba4ab96 | 26,983 |
from collections import defaultdict
from json import load
from HUGS.Modules import Datasource, ObsSurface
from HUGS.Util import (get_datetime_now, get_datetime_epoch, create_daterange_str,
timestamp_tzaware, get_datapath)
def search(
locations,
species=None,
inlet=None,
instrument=None,
find_all=True,
start_datetime=None,
end_datetime=None,
):
""" Search for gas data (optionally within a daterange)
TODO - review this function - feel like it can be tidied and simplified
Args:
species (str or list): Terms to search for in Datasources
locations (str or list): Where to search for the terms in species
inlet (str, default=None): Inlet height such as 100m
instrument (str, default=None): Instrument name such as picarro
find_all (bool, default=True): Require all search terms to be satisfied
start_datetime (datetime, default=None): Start datetime for search
If None a start datetime of UNIX epoch (1970-01-01) is set
end_datetime (datetime, default=None): End datetime for search
If None an end datetime of the current datetime is set
Returns:
dict: List of keys of Datasources matching the search parameters
"""
# if species is not None and not isinstance(species, list):
if not isinstance(species, list):
species = [species]
if not isinstance(locations, list):
locations = [locations]
# Allow passing of location names instead of codes
site_codes_json = get_datapath(filename="site_codes.json")
with open(site_codes_json, "r") as f:
d = load(f)
site_codes = d["name_code"]
updated_locations = []
# Check locations, if they're longer than three letters do a lookup
for loc in locations:
if len(loc) > 3:
try:
site_code = site_codes[loc.lower()]
updated_locations.append(site_code)
except KeyError:
raise ValueError(f"Invalid site {loc} passed")
else:
updated_locations.append(loc)
locations = updated_locations
if start_datetime is None:
start_datetime = get_datetime_epoch()
if end_datetime is None:
end_datetime = get_datetime_now()
# Ensure passed datetimes are timezone aware
start_datetime = timestamp_tzaware(start_datetime)
end_datetime = timestamp_tzaware(end_datetime)
# Here we want to load in the ObsSurface module for now
obs = ObsSurface.load()
datasource_uuids = obs.datasources()
# Shallow load the Datasources so we can search their metadata
datasources = [Datasource.load(uuid=uuid, shallow=True) for uuid in datasource_uuids]
# First we find the Datasources from locations we want to narrow down our search
location_sources = defaultdict(list)
# If we have locations to search
for location in locations:
for datasource in datasources:
if datasource.search_metadata(search_terms=location):
location_sources[location].append(datasource)
# This is returned to the caller
results = defaultdict(dict)
# With both inlet and instrument specified we bypass the ranking system
if inlet is not None and instrument is not None:
for site, sources in location_sources.items():
for sp in species:
for datasource in sources:
# Just match the single source here
if datasource.search_metadata(search_terms=[sp, site, inlet, instrument], find_all=True):
daterange_str = create_daterange_str(start=start_datetime, end=end_datetime)
# Get the data keys for the data in the matching daterange
in_date = datasource.in_daterange(daterange=daterange_str)
data_date_str = strip_dates_keys(in_date)
key = f"{sp}_{site}_{inlet}_{instrument}".lower()
# Find the keys that match the correct data
results[key]["keys"] = {data_date_str: in_date}
results[key]["metadata"] = datasource.metadata()
return results
for location, sources in location_sources.items():
# Loop over and look for the species
species_data = defaultdict(list)
for datasource in sources:
for s in species:
search_terms = [x for x in (s, location, inlet, instrument) if x is not None]
# Check the species and the daterange
if datasource.search_metadata(search_terms=search_terms, find_all=True):
species_data[s].append(datasource)
# For each location we want to find the highest ranking sources for the selected species
for sp, sources in species_data.items():
ranked_sources = {}
# How to return all the sources if they're all 0?
for source in sources:
rank_data = source.get_rank(start_date=start_datetime, end_date=end_datetime)
# With no rank set we get an empty dictionary
if not rank_data:
ranked_sources[0] = 0
continue
# Just get the highest ranked datasources and return them
# Find the highest ranked data from this site
highest_rank = sorted(rank_data.keys())[-1]
if highest_rank == 0:
ranked_sources[0] = 0
continue
ranked_sources[source.uuid()] = {"rank": highest_rank, "dateranges": rank_data[highest_rank], "source": source}
# If it's all zeroes we want to return all sources
if list(ranked_sources) == [0]:
for source in sources:
key = f"{source.species()}_{source.site()}_{source.inlet()}_{source.instrument()}".lower()
daterange_str = create_daterange_str(start=start_datetime, end=end_datetime)
data_keys = source.in_daterange(daterange=daterange_str)
if not data_keys:
continue
# Get a key that covers the daterange of the actual data and not from epoch to now
# if no start/end datetimes are passed
data_date_str = strip_dates_keys(data_keys)
results[key]["keys"] = {data_date_str: data_keys}
results[key]["metadata"] = source.metadata()
continue
else:
# TODO - find a cleaner way of doing this
# We might have a zero rank, delete it as we have higher ranked data
try:
del ranked_sources[0]
except KeyError:
pass
# Otherwise iterate over the sources that are ranked and extract the keys
for uid in ranked_sources:
source = ranked_sources[uid]["source"]
source_dateranges = ranked_sources[uid]["dateranges"]
key = f"{source.species()}_{source.site()}_{source.inlet()}_{source.instrument()}".lower()
data_keys = {}
# Get the keys for each daterange
for d in source_dateranges:
keys_in_date = source.in_daterange(daterange=d)
d = d.replace("+00:00", "")
if keys_in_date:
data_keys[d] = keys_in_date
if not data_keys:
continue
results[key]["keys"] = data_keys
results[key]["metadata"] = source.metadata()
return results | 793a467f4705854c6334f25b4ffa05812e9a71e3 | 26,984 |
import string
def strip_non_printable(value):
"""
Removes any non-printable characters and adds an indicator to the string
when binary characters are fonud
:param value: the value that you wish to strip
"""
if value is None:
return None
# Filter all non-printable characters
# (note that we must use join to account for the fact that Python 3
# returns a generator)
printable_value = ''.join(filter(lambda c: c in string.printable, value))
if printable_value != value:
if printable_value:
printable_value += ' '
printable_value += '(contains binary)'
return printable_value | 279ea769bd7d57ee3e4feb9faf10f2a3af3aa657 | 26,985 |
def flatten(name):
"""Get a flatten layer.
Parameters
----------
name : string
the name of the flatten layer
Returns
-------
flatten : keras.layers.core.Flatten
"""
if LIB_TYPE == "keras":
return Flatten(name=name) | b395162b7551d4292a89a2128b651305df294069 | 26,986 |
import math
def tangent_circle(dist, radius):
"""
return tangent angle to a circle placed at (dist, 0.0) with radius=radius
For non-existing tangent use 100 degrees.
"""
if dist >= radius:
return math.asin(radius/float(dist))
return math.radians(100) | bcde88456a267239566f22bb6ea5cf00f64fa08e | 26,987 |
import re
def find_backup_path(docsents, q, cand, k=40):
"""
If no path is found create a dummy backup path
:param docsents:
:param q:
:param cand:
:param k:
:return:
"""
path_for_cand_dict = {"he_docidx": None,
"he_locs": None,
"e1wh_loc": None,
"e1_docidx": None,
"e1_locs": None,
"cand_docidx": None,
"cand_locs": None,
"he_words": ["BACKUP"],
"e1wh": "BACKUP",
"e1": "BACKUP",
"cand_words": ["BACKUP"]
}
ent_words = [qtok for qtok in q if qtok not in STOPWORDS]
flag = 0
for entw in ent_words:
he = entw.lower()
if len(he.split()) == 0:
path_for_cand_dict['he_docidx'] = 0
path_for_cand_dict['he_locs'] = [(-1, -1)]
else:
pat_he = re.compile('(^|\W)' + re.escape(he) + '\W')
for docssidx, docss in enumerate(docsents):
doc = ' '.join(' '.join(sum(docss, [])).split())
doc = doc.lower()
he_objs = []
for x in pat_he.finditer(doc):
he_objs.append(x)
if len(he_objs) > 0:
flag = 1
path_for_cand_dict['he_docidx'] = docssidx
path_for_cand_dict['he_locs'] = get_locs_given_objs(doc, he, he_objs)[:k]
break
if flag == 1:
break
cand_toks = cand.split()
cand_words = [candtok for candtok in cand_toks if candtok not in STOPWORDS]
flag = 0
for cand in cand_words:
cand = cand.lower()
pat_cand = re.compile('(^|\W)' + re.escape(cand) + '\W')
for docssidx, docss in enumerate(docsents):
doc = ' '.join(' '.join(sum(docss, [])).split())
doc = doc.lower()
ca_objs = []
for x in pat_cand.finditer(doc):
ca_objs.append(x)
if len(ca_objs) > 0:
flag = 1
path_for_cand_dict['cand_docidx'] = docssidx
path_for_cand_dict['cand_locs'] = get_locs_given_objs(doc, cand, ca_objs)[:k]
break
if flag == 1:
break
if path_for_cand_dict['he_docidx'] is None or path_for_cand_dict['he_locs'] is None:
path_for_cand_dict['he_docidx'] = 0
path_for_cand_dict['he_locs'] = [(-1, -1)]
if path_for_cand_dict['cand_docidx'] is None or path_for_cand_dict['cand_locs'] is None:
path_for_cand_dict['cand_docidx'] = 0
path_for_cand_dict['cand_locs'] = [(0, 0)]
return path_for_cand_dict | 10a71c623da6c185a1e1cc1242b2e0402208837c | 26,988 |
def state_transitions():
"""Simplified state transition dictionary"""
return {
"E": {"A": {"(0, 9)": 1}},
"A": {"I": {"(0, 9)": 1}},
"I": {"H": {"(0, 9)": 1}},
"H": {"R": {"(0, 9)": 1}}
} | f8c79f8071f2b61ceacaacf3406a198b2c54c917 | 26,989 |
import json
def send(socket, action, opts=None, request_response=True, return_type='auto'):
"""Send a request to an RPC server.
Parameters
----------
socket : zmq socket
The ZeroMQ socket that is connected to the server.
action : str
Name of action server should perform. See :func:`RPCClient.send()` for
a list of actions and their associated options.
opts : dict or None
An optional dict of options specifying the behavior of the action.
request_response : bool
If True, then the server is asked to send a response.
return_type : str
'proxy' to force the server to send return values by proxy, or 'auto'
to allow the server to decide whether to return by proxy or by value.
"""
global next_req_id
# If we want the server to send a response, then we must supply a unique ID
# for the request. Otherwise, send -1 as the request ID to indicate that
# the server should not send a reply.
if request_response:
req_id = next_req_id
next_req_id += 1
else:
req_id = -1
# Serialize opts if it was specified, otherwise send an empty string.
if opts is None:
opts_str = b''
else:
opts_str = json.dumps(opts).encode()
# Tell the server which serializer we are using
ser_type = b'json'
# Send the request as a multipart message
msg = [str(req_id).encode(), action.encode(), return_type.encode(), ser_type, opts_str]
socket.send_multipart(msg)
# Print so we can see what the final json-encoded message looks like
msg = '\n'.join([' ' + m.decode() for m in msg])
print("\n>>> send to %s:\n%s" % (socket.last_endpoint.decode(), msg))
# Return the request ID we can use to listen for a response later.
return req_id | 9a2dcf2fb78c1458c0dead23c4bcc451f1316731 | 26,990 |
def brighter(data, data_mean=None):
"""
Brighter set of parameters for density remap.
Parameters
----------
data : numpy.ndarray
data_mean : None|float|int
Returns
-------
numpy.ndarray
"""
return clip_cast(amplitude_to_density(data, dmin=60, mmult=40, data_mean=data_mean)) | d00688ac99fb509ad0fe0e2f35cc5c03f598bfee | 26,991 |
from typing import List
import os
def get_configuration(args_in: List[str]) -> Configuration:
"""
Retrieves configuration from the command line or environment variables.
Parameters
----------
args_in: List[str]
The system arguments received by the main script
Returns
-------
An object of type Configuration
"""
parser = ArgParser()
parser.add(
"-t",
"--api-token",
required=True,
help="An API token acquired from https://www.inaturalist.org/users/api_token",
type=str,
env_var="INAT_API_TOKEN"
)
parser.add(
"-z",
"--log-level",
default="INFO",
help="Standard Python logging level, e.g. ERROR, WARNING, INFO, DEBUG",
type=str,
env_var="LOG_LEVEL"
)
parser.add(
"-u",
"--user-name",
required=True,
help="iNaturalist user name",
type=str,
env_var="INAT_USER_NAME"
)
parser.add(
"-p",
"--project-slug",
required=True,
help="Slug (short name) of the project to extract",
type=str,
env_var="PROJECT_SLUG"
)
parser.add(
"-s",
"--page-size",
default=200,
help="Number of records to retrieve per request. Default: 200. Max: 200. Use lower value for testing.",
type=int,
env_var="PAGE_SIZE"
)
parser.add(
"-o",
"--output-directory",
default=os.path.join(".","out"),
help="Directory name for output files.",
type=str,
env_var="OUTPUT_DIR"
)
parser.add(
"-l",
"--last-id",
default="0",
help="The last observation ID from a previous download, used to start a fresh download from the next available observation.",
type=str,
env_var="LAST_ID"
)
parser.add(
"-i",
"--input-file",
help="An input file that will be merged with the downloaded file",
type=str,
env_var="INPUT_FILE",
default=None
)
args_parsed = parser.parse_args(args_in)
return Configuration(
api_token=args_parsed.api_token,
log_level=args_parsed.log_level,
user_name=args_parsed.user_name,
project_slug=args_parsed.project_slug,
page_size=args_parsed.page_size,
output_directory=args_parsed.output_directory,
last_id=args_parsed.last_id,
input_file=args_parsed.input_file
) | d68e09a23c216b035e4027070ed4cdb5138e793a | 26,992 |
import sys
def main() -> int:
"""Entry point for 'compress'."""
try:
argv = parser.parse_args()
buffersize = int(argv.buffersize * 1024**2)
action = compress if argv.decompress is False else decompress
schemes = {getattr(argv, scheme): scheme for scheme in SCHEMES}
if True not in schemes:
kind = 'gzip'
else:
kind = schemes[True]
options = {'kind': kind, 'encoding': argv.encoding}
if argv.compress:
options['level'] = argv.level
writer = sys.stdout if argv.encoding is not None else sys.stdout.buffer
with BinaryStream(*argv.sources) as stream:
for buff in action(stream.iterbuffers(buffersize), **options):
writer.write(buff)
except KeyboardInterrupt:
pass
finally:
sys.stdout.buffer.flush()
return 0 | 8d64975c689b8a19a38e85ab1d3ee569ab039ac0 | 26,993 |
import logging
import sqlite3
def get_lensed_host_fluxes(host_truth_db_file, image_dir, bands='ugrizy',
components=('bulge', 'disk'),
host_types=('agn', 'sne'), verbose=False):
"""
Loop over entries in `agn_hosts` and `sne_hosts` tables in
the host_truth_db_file and compute fluxes (with and without MW
extinction) in each band. Return dicts of fluxes and coordinates
keyed by object ids.
Parameters
----------
host_truth_db_file: str
File containing model parameters for lensed host of AGNs and SNe.
image_dir: str
Directory containing the FITS stamps.
bands: str or list-like ['ugrizy']
Bands for which to return magnorms.
components: list-like [('bulge', 'disk')]
Galaxy components of lensed hosts.
host_types: list-like [('agn', 'sne')]
Types of hosted objects.
verbose: bool [False]
Verbose flag.
Returns
-------
(dict, dict, dict): dicts of fluxes with MW extinction, w/out MW
extinction, and a dict of (ra, dec, redshift) tuples, all keyed
by object id.
"""
logger = logging.getLogger('get_lensed_host_fluxes')
if verbose:
logger.setLevel(logging.INFO)
logger.info('processing %s', host_truth_db_file)
band_fluxes = lambda: {band:0 for band in bands}
fluxes = defaultdict(band_fluxes)
fluxes_noMW = defaultdict(band_fluxes)
num_photons = defaultdict(band_fluxes)
coords = dict()
mag_norms = dict()
with sqlite3.connect(host_truth_db_file) as conn:
for host_type in host_types:
df = pd.read_sql(f'select * from {host_type}_hosts', conn)
for component in components:
mag_norms[component] = get_mag_norms(host_type, component,
image_dir)
for iloc in range(len(df)):
logger.info('%s %d %d', host_type, iloc, len(df))
row = df.iloc[iloc]
ra = row['ra_lens']
dec = row['dec_lens']
redshift = row['redshift']
unique_id = str(row['unique_id'])
coords[unique_id] = [ra, dec, redshift]
gAv = row['av_mw']
gRv = row['rv_mw']
for component in components:
if unique_id not in mag_norms[component]:
continue
sed_file = find_sed_file(
row[f'sed_{component}_host'].lstrip('b').strip("'"))
iAv = row[f'av_internal_{component}']
iRv = row[f'rv_internal_{component}']
for band in bands:
mag_norm = mag_norms[component][unique_id][band]
synth_phot = SyntheticPhotometry(sed_file, mag_norm,
redshift=redshift,
iAv=iAv, iRv=iRv)
fluxes_noMW[unique_id][band] \
+= synth_phot.calcFlux(band)
synth_phot.add_dust(gAv, gRv, 'Galactic')
fluxes[unique_id][band] += synth_phot.calcFlux(band)
bp = synth_phot.bp_dict[band]
photpars = PhotometricParameters(nexp=1, exptime=30,
gain=1, bandpass=band)
num_photons[unique_id][band] \
+= synth_phot.sed.calcADU(bp, photpars)
return dict(fluxes), dict(fluxes_noMW), dict(num_photons), coords | 1b28c58ed824b988e02d40091ac633bc8187d27a | 26,994 |
from pathlib import Path
def load_challenges() -> list[Challenge]:
"""
Loads all challenges.
Returns
-------
list[Challenge]
All loaded challenges.
"""
__challenges.clear()
modules = []
for lib in (Path(__file__).parent / "saves/challenges").iterdir():
if not lib.name.endswith(".py") or lib.name.startswith("_"):
continue
modules.append(lib.name.removesuffix(".py"))
for module in sorted(modules):
__challenges.append(
import_module(".saves.challenges." + module, __package__).challenge # noqa
)
return __challenges | d6cd5d65d572ba081d5f2ba0bca67c755bc57d2d | 26,995 |
def get_new_user_data(GET_params):
"""Return the data necessary to create a new OLD user or update an existing one.
:param GET_params: the ``request.GET`` dictionary-like object generated by
Pylons which contains the query string parameters of the request.
:returns: A dictionary whose values are lists of objects needed to create or
update user.
If ``GET_params`` has no keys, then return all data. If ``GET_params`` does
have keys, then for each key whose value is a non-empty string (and not a
valid ISO 8601 datetime) add the appropriate list of objects to the return
dictionary. If the value of a key is a valid ISO 8601 datetime string, add
the corresponding list of objects *only* if the datetime does *not* match
the most recent ``datetime_modified`` value of the resource. That is, a
non-matching datetime indicates that the requester has out-of-date data.
"""
# model_name_map maps param names to the OLD model objects from which they are
# derived.
model_name_map = {'orthographies': 'Orthography'}
# getter_map maps param names to getter functions that retrieve the
# appropriate data from the db.
getter_map = {'orthographies': h.get_mini_dicts_getter('Orthography')}
# result is initialized as a dict with empty list values.
result = dict([(key, []) for key in getter_map])
result['roles'] = h.user_roles
result['markup_languages'] = h.markup_languages
# There are GET params, so we are selective in what we return.
if GET_params:
for key in getter_map:
val = GET_params.get(key)
# Proceed so long as val is not an empty string.
if val:
val_as_datetime_obj = h.datetime_string2datetime(val)
if val_as_datetime_obj:
# Value of param is an ISO 8601 datetime string that
# does not match the most recent datetime_modified of the
# relevant model in the db: therefore we return a list
# of objects/dicts. If the datetimes do match, this
# indicates that the requester's own stores are
# up-to-date so we return nothing.
if val_as_datetime_obj != h.get_most_recent_modification_datetime(
model_name_map[key]):
result[key] = getter_map[key]()
else:
result[key] = getter_map[key]()
# There are no GET params, so we get everything from the db and return it.
else:
for key in getter_map:
result[key] = getter_map[key]()
return result | 61ee952088bb37a2f5171f0bdd0ed9a59d66bed7 | 26,996 |
import six
def add_heatmap_summary(feature_query, feature_map, name):
"""Plots dot produce of feature_query on feature_map.
Args:
feature_query: Batch x embedding size tensor of goal embeddings
feature_map: Batch x h x w x embedding size of pregrasp scene embeddings
name: string to name tensorflow summaries
Returns:
Batch x h x w x 1 heatmap
"""
batch, dim = feature_query.shape
reshaped_query = tf.reshape(feature_query, (int(batch), 1, 1, int(dim)))
heatmaps = tf.reduce_sum(
tf.multiply(feature_map, reshaped_query), axis=3, keep_dims=True)
tf.summary.image(name, heatmaps)
shape = tf.shape(heatmaps)
softmaxheatmaps = tf.nn.softmax(tf.reshape(heatmaps, (int(batch), -1)))
tf.summary.image(
six.ensure_str(name) + 'softmax', tf.reshape(softmaxheatmaps, shape))
return heatmaps | d7807942a2e3d92b4653d822b24686b649a6df88 | 26,997 |
def ParseVecFile(filename):
"""Parse a vector art file and return an Art object for it.
Right now, handled file types are: EPS, Adobe Illustrator, PDF
Args:
filename: string - name of the file to read and parse
Returns:
geom.Art: object containing paths drawn in the file.
Return None if there was a major problem reading the file.
"""
(major, minor) = ClassifyFile(filename)
if (major == "error"):
print("Couldn't get Art:", minor)
return None
if major == "pdf" or (major == "ai" and minor == "pdf"):
contents = pdf.ReadPDFPageOneContents(filename)
if contents:
toks = TokenizeAIEPS(contents)
return ParsePS(toks, major, minor)
else:
return None
elif major == "eps" or (major == "ai" and minor == "eps"):
toks = TokenizeAIEPSFile(filename)
return ParsePS(toks, major, minor)
elif major == "svg":
return svg.ParseSVGFile(filename)
else:
return None | accf0446a4600de77cf41fdc5a586f930156dfd6 | 26,998 |
import torch
def batchnorm_to_float(module):
"""Converts batch norm to FP32"""
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
module.float()
for child in module.children():
batchnorm_to_float(child)
return module | bf9ad7cbda5984465f5dcb5f693ba71c8a0ab583 | 26,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.