content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def _parse_class(s):
"""
Parse a key, value pair, separated by '='
On the command line (argparse) a declaration will typically look like:
foo=hello
or
foo="hello world"
"""
items = s.split('=')
key = items[0].strip() # we remove blanks around keys, as is logical
if len(items) > 1:
# rejoin the rest:
value = '='.join(items[1:])
return (key, value)
| 5,340,600
|
def data_base():
"""mock the database"""
my_db = database.Database(":memory:")
yield my_db
my_db.database.close()
| 5,340,601
|
def getStringSimilarity(string1:str,string2:str):
"""
This function will return a similarity of two strings.
"""
import difflib
return difflib.SequenceMatcher(None,string1,string2).quick_ratio()
| 5,340,602
|
def test_hover_parameter_bool():
"""Test that hovering over parameters shows their values LOGICAL"""
string = write_rpc_request(1, "initialize", {"rootPath": str(test_dir)})
file_path = test_dir / "hover" / "parameters.f90"
string += hover_req(file_path, 8, 38)
errcode, results = run_request(string, fortls_args=["--sort_keywords"])
assert errcode == 0
ref_results = ["LOGICAL(kind=8), PARAMETER :: long_bool = .true."]
validate_hover(results, ref_results)
| 5,340,603
|
def release(ctx):
"""Release the packages to Pypi!"""
dist_dir = os.path.join(ROOT_DIR, "dist")
if not os.path.isdir(dist_dir):
sys.exit("Dist directory does not exist. Build first?")
print("This is what you are about to upload:")
for fname in sorted(os.listdir(dist_dir)):
s = os.stat(os.path.join(dist_dir, fname)).st_size
print(" {:0.0f} KiB {}".format(s / 2**10, fname))
while True:
x = input("Are you sure you want to upload now? [Y/N]: ")
if x.upper() == "N":
return
elif x.upper() == "Y":
break
if sys.platform.startswith("win"):
sys.exit("Cannot release from Windows: the exes wont be chmodded properly!")
subprocess.check_call([sys.executable, "-m", "twine", "upload", "dist/*"])
| 5,340,604
|
def model_downloader(
handler_type: HandlerType,
bucket_name: str,
model_name: str,
model_version: str,
model_path: str,
temp_dir: str,
model_dir: str,
) -> Optional[datetime.datetime]:
"""
Downloads model to disk. Validates the s3 model path and the downloaded model.
Args:
handler_type: The handler type as implemented by the API.
bucket_name: Name of the bucket where the model is stored.
model_name: Name of the model. Is part of the model's local path.
model_version: Version of the model. Is part of the model's local path.
model_path: Model prefix of the versioned model.
temp_dir: Where to temporarily store the model for validation.
model_dir: The top directory of where all models are stored locally.
Returns:
The model's timestamp. None if the model didn't pass the validation, if it doesn't exist or if there are not enough permissions.
"""
logger.info(
f"downloading from bucket {bucket_name}/{model_path}, model {model_name} of version {model_version}, temporarily to {temp_dir} and then finally to {model_dir}"
)
client = S3(bucket_name)
# validate upstream S3 model
sub_paths, ts = client.search(model_path)
try:
validate_model_paths(sub_paths, handler_type, model_path)
except CortexException:
logger.info(f"failed validating model {model_name} of version {model_version}")
return None
# download model to temp dir
temp_dest = os.path.join(temp_dir, model_name, model_version)
try:
client.download_dir_contents(model_path, temp_dest)
except CortexException:
logger.info(
f"failed downloading model {model_name} of version {model_version} to temp dir {temp_dest}"
)
shutil.rmtree(temp_dest)
return None
# validate model
model_contents = glob.glob(os.path.join(temp_dest, "**"), recursive=True)
model_contents = util.remove_non_empty_directory_paths(model_contents)
try:
validate_model_paths(model_contents, handler_type, temp_dest)
except CortexException:
logger.info(
f"failed validating model {model_name} of version {model_version} from temp dir"
)
shutil.rmtree(temp_dest)
return None
# move model to dest dir
model_top_dir = os.path.join(model_dir, model_name)
ondisk_model_version = os.path.join(model_top_dir, model_version)
logger.info(
f"moving model {model_name} of version {model_version} to final dir {ondisk_model_version}"
)
if os.path.isdir(ondisk_model_version):
shutil.rmtree(ondisk_model_version)
shutil.move(temp_dest, ondisk_model_version)
return max(ts)
| 5,340,605
|
def calc_simcoef_distr(patfeats, labels, id_dict, simcoef):
"""
Calculates the score distributions
Inputs:
- simcoef: simcoef the values are calculated with (string)
- labels: list of strings with the scores to be calculated (e.g.: ['cited', 'random'])
- id_dict: dictionary containing the patent ID pairs for the respective label
Output:
- scores: dictionary containing the scores for each label
"""
scores = dict.fromkeys(labels)
for label in labels:
print label
scores[label] = []
combis = id_dict[label]
for combi in combis:
score = compute_sim(patfeats[combi[0]], patfeats[combi[1]], simcoef)
scores[label].append(score)
return scores
| 5,340,606
|
def test_nodes_of_empty_weight_weight_graph_is_empty(empty_weight_graph):
"""Test that the list of nodes for an empty graph is empty."""
assert empty_weight_graph.nodes() == []
| 5,340,607
|
def fft(signal, sampling_rate, plot=False, show_grid=True, fig_size=(10, 5)):
"""
Perform FFT on signal.
Compute 1D Discrete Fourier Transform using Fast Fourier Transform.
Optionally, plot the power spectrum of the frequency domain.
Parameters
----------
signal : ndarray
Input array to be transformed.
sampling_rate : float
Sampling rate of the input signal.
plot : bool, optional
Toggle to display a plot of the power spectrum.
show_grid : bool, optional
If creating a plot, toggle to show grid lines on the figure.
fig_size : tuple, optional
If plotting, set the width and height of the resulting figure.
Returns
-------
signal_fft : ndarray
Transformation of the original input signal.
"""
n = len(signal)
t = 1.0 / sampling_rate
time = range(n) # Time vector
xf = np.linspace(0.0, 1.0 / (2.0 * t), n // 2)
yf = np.fft.fft(signal) / n # FFT and normalize
if plot:
f, axarr = plt.subplots(2, 1, figsize=fig_size)
axarr[0].plot(time, signal)
axarr[0].set_xlim(min(time), max(time))
axarr[0].set_xlabel("Time Steps")
axarr[0].set_ylabel("Amplitude")
axarr[0].grid(show_grid)
axarr[1].plot(xf, abs(yf[0 : n // 2]), "r") # Plot the spectrum
axarr[1].set_xlabel("Freq (Hz)")
axarr[1].set_ylabel("|Y(freq)|")
axarr[1].grid(show_grid)
f.subplots_adjust(hspace=0.5)
plt.suptitle("Power Spectrum", size=16)
plt.show()
return yf
| 5,340,608
|
def pattern_count(data, **params):
"""
Count occurrences of a given pattern.
Args:
data (list): values.
params (kwargs):
pattern (str or list): the pattern to be sought in data (obligatory)
metric (str): 'identity' counts identical positions,
'euclid' calculates the Euclidean distance (L2 norm),
'taxi' calculates the taxicab (Manhattan) distance
(L1 norm).
'sup' returns maximum distance between positions,
'inf' returns minimum distance between positions.
Only 'identity' can be used with non-numerical data.
radius (number): the similarity cutoff (non-negative)
normalized (bool): whether the number of occurrences is to be
divided by the maximum number of occurrences.
(default:False)
Returns the number of occurrences of the pattern in the data.
Invokes internal function '_pattern_common', which raises:
NameError when 'pattern' is not given,
TypeError if 'pattern' is neither string nor list,
ValueError if 'radius' is negative or unsupported distance method used.
"""
pattern, patlen, radius, metric = _pattern_common(**params)
normalized = params['normalized'] if 'normalized' in params else False
counts = 0
for pos in range(len(data) - patlen + 1):
if _list_distance(data[pos:pos + patlen], pattern, metric) <= radius:
counts += 1
return counts if not normalized \
else 1.0 * counts / (len(data) - patlen + 1)
| 5,340,609
|
def pathify(path):
"""\
Generator that returns values suitable for path store navigation:
- No values, if path is None.
- One value, if path is a string or isn't iterable.
- Each value, otherwise.
"""
if path is not None:
if isinstance(path, str):
yield path
else:
try:
for item in path:
yield item
except TypeError:
yield path
| 5,340,610
|
def read_directory(directory):
"""
Read file names from directory recursively
Parameters
----------
directory : string
directory/folder name where to read the file names from
Returns
---------
files : list of strings
list of file names
"""
try:
return glob2.glob(os.path.join(directory, '**' , '*.*'))
except Exception(e):
logging.error("[{}] : {}".format(sys._getframe().f_code.co_name,e))
| 5,340,611
|
def do(hostname):
"""
Performs a GET request.
Parameters
----------
hostname : str
Target request
Return
------
The request results
"""
try:
return requests.get(hostname, timeout=10)
except TimeoutException:
print("\033[1;31mRequest timeout: test aborted\n\033[1;m")
return None
except requests.ConnectionError:
print("\033[1;31mServer not found: test aborted\n\033[1;m")
return None
finally:
signal.alarm(0)
| 5,340,612
|
def getAssignmentReport(assignment):
"""
Produces an ABET assignment report (as a markdown-formatted string)
for the given assignment (which is expected to be a codepost API
object) by pulling all relevant data as well as source
code files (and grader comments) for randomly selected A, B and C samples
"""
courseId = assignment.course
course = codepost.course.retrieve(id=courseId)
courseName = course.name
coursePeriod = course.period
assignmentName = assignment.name
assignmentPts = assignment.points
assignmentMean = assignment.mean
assignmentMedian = assignment.median
summary = f"""
# {courseName} - {coursePeriod}
## {assignmentName}
* Points: {assignmentPts}
* Mean: {assignmentMean}
* Median: {assignmentMedian}\n\n"""
# find ideal A, B, C samples
submissions = assignment.list_submissions()
aSubmission = submissions[0]
bSubmission = submissions[0]
cSubmission = submissions[0]
# we only expect 1 submission per student since submissions are via our
# scripts, but in any case, find the 3 closest to A=max%, B = 85%, C = 75%
for submission in submissions:
if submission.grade > aSubmission.grade:
aSubmission = submission
if abs(submission.grade / assignmentPts - .85) < abs(bSubmission.grade / assignmentPts - .85):
bSubmission = submission
if abs(submission.grade / assignmentPts - .75) < abs(cSubmission.grade / assignmentPts - .75):
cSubmission = submission
aSummary, aDetail = submissionToMarkdown(aSubmission,"A",assignmentPts)
bSummary, bDetail = submissionToMarkdown(bSubmission,"B",assignmentPts)
cSummary, cDetail = submissionToMarkdown(cSubmission,"C",assignmentPts)
return summary + aSummary + bSummary + cSummary + "\n\n" + aDetail + bDetail + cDetail
| 5,340,613
|
def start_game():
"""
Method to start
:return: Choice selection for new game or load game
"""
maximize_console()
print_title()
print('Do you want to start a new game (enter 1) or resume an ongoing game (enter 2)?')
choice = input('||> ')
print()
return choice
| 5,340,614
|
def manage(cmd):
"""Update a testing database"""
_local('django-admin.py {}'.format(cmd))
| 5,340,615
|
def reply_to_mention(mention, payload):
""" Replies to a mention with the emojify'd text """
try:
request_response = requests.post('http://emojifythis.org/emojifi', data=json.dumps(payload))
json_response = request_response.json()
mention.reply(json_response['text'] + '\n\n ============================= \n I am a bot. '
'\n My source code is at https://github.com/bretttjohnson1/EmojiFi/ '
'\n Our website is at http://emojifythis.org')
mention.mark_read()
except Exception:
pass
| 5,340,616
|
def copy_dir(src_fs, src_path, dst_fs, dst_path,
walker=None, on_copy=None):
"""Copy a directory from one filesystem to another.
Arguments:
src_fs (FS or str): Source filesystem (instance or URL).
src_path (str): Path to a directory on the source filesystem.
dst_fs (FS or str): Destination filesystem (instance or URL).
dst_path (str): Path to a directory on the destination filesystem.
walker (~fs.walk.Walker, optional): A walker object that will be
used to scan for files in ``src_fs``. Set this if you only
want to consider a sub-set of the resources in ``src_fs``.
on_copy (callable, optional): A function callback called after
a single file copy is executed. Expected signature is
``(src_fs, src_path, dst_fs, dst_path)``.
"""
on_copy = on_copy or (lambda *args: None)
walker = walker or Walker()
_src_path = abspath(normpath(src_path))
_dst_path = abspath(normpath(dst_path))
with manage_fs(src_fs) as src_fs:
with manage_fs(dst_fs, create=True) as dst_fs:
with src_fs.lock(), dst_fs.lock():
dst_fs.makedir(_dst_path, recreate=True)
for dir_path, dirs, files in walker.walk(src_fs, _src_path):
copy_path = combine(
_dst_path,
frombase(_src_path, dir_path)
)
for info in dirs:
dst_fs.makedir(
info.make_path(copy_path),
recreate=True
)
for info in files:
src_path = info.make_path(dir_path)
dst_path = info.make_path(copy_path)
copy_file(
src_fs,
src_path,
dst_fs,
dst_path
)
on_copy(src_fs, src_path, dst_fs, dst_path)
| 5,340,617
|
def p_data_page_after(p):
"""datasort : datasort '@' AFTER '(' pagelist ')' """
p[0] = p[1].with_after(p[5])
| 5,340,618
|
def modified_files():
"""
Gets a list of modified files in the repo.
:return: A list of absolute paths to all changed files in the repo
"""
repo_root_dir = repo_root()
return [os.path.join(repo_root_dir, d.b_path) for d in get().head.commit.diff() if not (d.new_file or d.deleted_file)]
| 5,340,619
|
def test_subtraction(
lcs_lhs, lcs_rhs, orientation_exp, coordinates_exp, time_exp, time_ref_exp
):
"""Test the subtraction of 2 coordinate systems.
Parameters
----------
lcs_lhs:
Left hand side coordinate system
lcs_rhs:
Right hand side coordinate system
orientation_exp:
Expected orientations of the resulting coordinate system
coordinates_exp:
Expected coordinates of the resulting coordinate system
time_exp:
Expected time of the resulting coordinate system
time_ref_exp:
Expected reference time of the resulting coordinate system
"""
check_coordinate_system(
lcs_lhs - lcs_rhs,
orientation_exp,
coordinates_exp,
True,
time_exp,
time_ref_exp,
)
| 5,340,620
|
def encode_data(dataset_path=DATASET_PATH):
"""Encodes the symbloc music in the dataset folder.
:param dataset_path (str): Path to the dataset
:return data, filenames (list): Encoded songs and their file names
"""
# encoded songs and their file names
data = []
filenames = []
# loop through the dataset folder
for dirpath, dirlist, filelist in os.walk(dataset_path):
# process each file
for this_file in filelist:
# ensure extension is valid
if os.path.splitext(this_file)[-1] not in EXTENSION:
continue
# parse the file
filename = os.path.join(dirpath, this_file)
try:
score = converter.parse(filename)
except:
print("Warning: Failed to read \"%s\"" %filename)
continue
print("Parsing \"%s\"" %filename)
# keep the first part (usually is the melody) of score
score = score.parts[0].flat
# transpose to C major/A minor
score = transpose(score)
# encoded song
song = []
# process each note (chord) in the score
for element in score.recurse():
if isinstance(element, note.Note):
note_pitch = element.pitch.midi
note_duration = element.quarterLength
elif isinstance(element, note.Rest):
note_pitch = 0
note_duration = element.quarterLength
elif isinstance(element, chord.Chord):
note_pitch = element.notes[-1].pitch.midi
note_duration = element.quarterLength
else:
continue
# ensure duration is valid
if note_duration%0.25 == 0:
# encode note
note_step = int(note_duration/0.25)
song += [str(note_pitch)] + ['-']*(note_step-1)
else:
# unacceptable duration found
song = None
print("Warning: Found an unacceptable duration when reading the file \"%s\"" %filename)
break
if song!=None:
# save the encoded song and its name
data.append(song)
filenames.append(os.path.splitext(os.path.basename(filename))[0])
print("Successfully encoded %d songs" %(len(data)))
return data, filenames
| 5,340,621
|
def test_extract_params_no_arguments(all_param_type_transmute_func):
""" if no arguments are passed, use the defaults """
extractor = ParamExtractorMock()
extractor._query_argument = lambda *args: NoArgument
extractor._header_argument = lambda *args: NoArgument
extractor._path_argument = lambda *args: NoArgument
args, kwargs = extractor.extract_params(
default_context, all_param_type_transmute_func, "application/json"
)
assert args == []
assert kwargs == {"query": 1, "header": 2, "path": 3, "body": "body"}
| 5,340,622
|
def get_inception_score(images, batch_size, splits=10):
"""
the function is to calculate the inception score of the generated images
image is a numpy array with values should be in the range[0, 255]
images 299x299x3
"""
assert(type(images) == np.ndarray)
inception_model = inception_v3
inception_model.eval()
def get_softmax(x):
x = inception_model(x)
return tf.nn.softmax(x)
n = len(images) // batch_size
preds = np.zeros([len(images), 1000], dtype=np.float32)
tfe.enable_egaer_execution()
dataloader = tf.data.Dataset.from_tensor_slices(images)
dataloader = data.batch(batch_size)
for i, batch in enumerate(tfe.Iterator(dataloader), 0):
batch_x = tf.Variable(batch) # images
# softmax
preds[i * batch_size:(i + 1) * batch_size] = get_softmax(batch_x)
scores = []
# IS score
for i in range(splits):
part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :]
kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))
kl = np.mean(np.sum(kl, 1))
scores.append(np.exp(kl))
return np.mean(scores), np.std(scores)
| 5,340,623
|
def generate_audio_testing(raw_gain, raw_freq, raw_dampings, modal_fir, reverb, impulse_profile,
gains, frequencies, dampings, modal_response,
noise, acceleration_scale, revc, audio_sample_rate, example_secs, scratch='controls'):
"""Generate DiffImpact's estimate of impact sound based on current model variables."""
# Generate impulse --> impact profile
# magnitude_envelopes, taus, prediction['stdevs']
# impc = impact.get_controls(mags, stdevs, taus, 0) # needs to be 2D?
# impulse_profile = impact.get_signal(impc['magnitudes'], impc['taus'])
print("impulse profile shape: ", impulse_profile.shape) # force profile
zero_freq = np.zeros_like(raw_freq)
zero_gain = np.random.rand(1,256) #np.zeros_like(raw_gain)
zero_damp = np.zeros_like(raw_dampings)
# Get modal response from raw freqs, gains, and dampings
irc_scratch = modal_fir.get_controls(raw_gain, raw_freq, raw_dampings)
ir_scratch = modal_fir.get_signal(irc_scratch['gains'], irc_scratch['frequencies'], irc_scratch['dampings'])
# Get modal response from scaled (passed through get_controls) freqs, gains, dampings
ir = modal_fir.get_signal(gains, frequencies, dampings)
print("ir: ", ir)
print("model's output modal response: ", modal_response)
#print("ir_scratch: ", ir_scratch)
# Convolve together for modal vibration sounds
if scratch == 'raw':
audio = ddsp.core.fft_convolve(impulse_profile, ir_scratch)
elif scratch == 'controls' or scratch =='control':
audio = ddsp.core.fft_convolve(impulse_profile, ir)
else:
audio = ddsp.core.fft_convolve(impulse_profile, modal_response)
print("convolved shape: ", audio.shape)
# Generate and add time constant noise
# Note that in the context, clips.shape[0] is batch size (which is 1 for all testing here)
# clips.shape[1] is the actual clip size (like 441000 for 10 seconds of 44100 audio sampling rate)
#unfiltered_noise = tf.random.uniform((clips.shape[0], int(clips.shape[1] * sample_factor)),
#minval=-1.0, maxval=1.0)
#noise = ddsp.core.frequency_filter(unfiltered_noise, ddsp.core.exp_sigmoid(noise_magnitudes - 4.0), 257)
audio += noise
print("after adding noise: ", audio.shape)
# Add acceleration sound
audio += impulse_profile * acceleration_scale
print("after acceleration sound: ", audio.shape)
# Add reverb
# revc = reverb.get_controls(audio, reverb_gains, reverb_decay)
audio = reverb.get_signal(audio, revc)#revc['ir'])
print("after reverb: ", audio.shape)
# Downsample from internal sampling rate to original recording sampling rate
# audio = ddsp.core.resample(audio, clips.shape[1], 'linear')
# Note that the resample function will return shape [n_timesteps], which is the second parameter
print("audio sample rate: ", audio_sample_rate)
audio = ddsp.core.resample(audio, int(audio_sample_rate)*example_secs, 'linear')
return audio
| 5,340,624
|
def dice_coef_multilabel(y_true, y_pred, numLabels=4, channel='channel_first'):
"""
calculate channel-wise dice similarity coefficient
:param y_true: the ground truth
:param y_pred: the prediction
:param numLabels: the number of classes
:param channel: 'channel_first' or 'channel_last'
:return: the dice score
"""
assert channel=='channel_first' or channel=='channel_last', r"channel has to be either 'channel_first' or 'channel_last'"
dice = 0
if channel == 'channel_first':
y_true = np.moveaxis(y_true, 1, -1)
y_pred = np.moveaxis(y_pred, 1, -1)
for index in range(1, numLabels):
temp = dice_coef(y_true[..., index], y_pred[..., index])
dice += temp
dice = dice / (numLabels - 1)
return dice
| 5,340,625
|
def gaussian(sigma, fs, t=None):
""" return a gaussian smoothing filter
Args:
sigma: standard deviation of a Gaussian envelope
fs: sampling frequency of input signals
t: time scale
Return:
a Gaussian filter and corresponding time scale
"""
if t is None:
t = np.linspace(-sigma*4.0, sigma*4.0, int(sigma*8.0*fs))
gss = np.exp(-0.5 * (t ** 2.0) / sigma ** 2.0)
gss /= np.sum(gss)
return gss, t
| 5,340,626
|
def score_sent(sent):
"""Returns a score btw -1 and 1"""
sent = [e.lower() for e in sent if e.isalnum()]
total = len(sent)
pos = len([e for e in sent if e in positive_wds_with_negation])
neg = len([e for e in sent if e in negative_wds_with_negation])
if total > 0:
return (pos - neg) / total
else:
return 0
| 5,340,627
|
def flipud(tensor):
"""
Flips a given tensor along the first dimension (up to down)
Parameters
----------
tensor
a tensor at least two-dimensional
Returns
-------
Tensor
the flipped tensor
"""
return torch.flip(tensor, dims=[0])
| 5,340,628
|
def connect():
"""Function to connect to database on Amazon Web Services"""
try:
engine = create_engine('mysql+mysqlconnector://dublinbikesadmin:dublinbikes2018@dublinbikes.cglcinwmtg3w.eu-west-1.rds.amazonaws.com/dublinbikes')
port=3306
connection = engine.connect()
Session.configure(bind=engine)
return engine
#https://campus.datacamp.com/courses/introduction-to-relational-databases-in-python/advanced-sqlalchemy-queries?ex=2#skiponboarding
except Exception as err:
print ("An error occurred when connecting to the database: ", err)
#https://dev.mysql.com/doc/connector-python/en/connector-python-api-errors-error.html
| 5,340,629
|
def get_file_type(filepath):
"""Returns the extension of a given filepath or url."""
return filepath.split(".")[-1]
| 5,340,630
|
def set_viewport_height(height: int):
"""Sets the viewport's height.
Returns:
None
"""
internal_dpg.configure_viewport(0, height=height)
| 5,340,631
|
def calcB1grad(B2grad,W2,A2):
"""
Calculates the gradient of the cost with respect to B1 using the chain rule
INPUT: B2grad, [layer3Len,1] ; W2, [layer2Len, layer3Len] ;
A2, [layer2len, 1]
OUTPUT: B1grad, [layer2Len, 1]
"""
temp1 = np.dot(W2,B2grad) #layer2Len * 1 vector
sigmGradient = sigmoidGradient(A2) #layer2len * 1 vector
B1grad = np.multiply(sigmGradient,temp1)
return B1grad
| 5,340,632
|
def main():
"""The main function."""
import argparse
parser = argparse.ArgumentParser(
description='Train DB-CNN for BIQA.')
parser.add_argument('--base_lr', dest='base_lr', type=float, default=1e-1,
help='Base learning rate for training.')
parser.add_argument('--batch_size', dest='batch_size', type=int,
default=64, help='Batch size.')
parser.add_argument('--epochs', dest='epochs', type=int,
default=30, help='Epochs for training.')
parser.add_argument('--weight_decay', dest='weight_decay', type=float,
default=5e-4, help='Weight decay.')
args = parser.parse_args()
if args.base_lr <= 0:
raise AttributeError('--base_lr parameter must >0.')
if args.batch_size <= 0:
raise AttributeError('--batch_size parameter must >0.')
if args.epochs < 0:
raise AttributeError('--epochs parameter must >=0.')
if args.weight_decay <= 0:
raise AttributeError('--weight_decay parameter must >0.')
options = {
'base_lr': args.base_lr,
'batch_size': args.batch_size,
'epochs': args.epochs,
'weight_decay': args.weight_decay,
}
path = {
'kadis': '/media/zwx-sjtu/data/kadis700k',
'model': '/home/zwx-sjtu/codebase/DBCNN-PyTorch-master/models'
}
manager = SCNNManager(options, path)
# manager.getStat()
manager.train()
| 5,340,633
|
def after(*args, **kwargs):
"""After advice"""
print "After true"
| 5,340,634
|
def plot_3d(x, y, z, title, labels):
"""
Returns a matplotlib figure containing the 3D T-SNE plot.
Args:
x, y, z: arrays
title: string with name of the plot
labels: list of strings with label names: [x, y, z]
"""
plt.rcParams.update({'font.size': 30, 'legend.fontsize': 20})
plt.rc('font', size=30)
plt.rc('axes', titlesize=35)
labelpad = 30
figure = plt.figure(figsize=(12,12))
ax = figure.add_subplot(projection='3d')
ax.scatter(x, y, z)
ax.set_title(title)
ax.set_xlabel(labels[0], labelpad=labelpad)
ax.set_ylabel(labels[1], labelpad=labelpad)
ax.set_zlabel(labels[2], labelpad=labelpad)
plt.tight_layout()
return figure
| 5,340,635
|
def crossvalidate(splitter, axis, *arg):
"""split each input in arg into train and test splits by indexing along the axis
using the indices returned by splitter. Both returns are lists."""
for trainind, testind in splitter:
# from list of arg where each entry is [train, test] to [trainarg, testarg]
yield zip(*[(np.compress(trainind, thisdata, axis=axis),
np.compress(testind, thisdata, axis=axis)) for thisdata in arg])
| 5,340,636
|
def test_against_pytpm_doc_example():
"""
Check that Astropy's Ecliptic systems give answers consistent with pyTPM
Currently this is only testing against the example given in the pytpm docs
"""
fk5_in = SkyCoord('12h22m54.899s', '15d49m20.57s', frame=FK5(equinox='J2000'))
pytpm_out = BarycentricMeanEcliptic(lon=178.78256462*u.deg,
lat=16.7597002513*u.deg,
equinox='J2000')
astropy_out = fk5_in.transform_to(pytpm_out)
assert pytpm_out.separation(astropy_out) < (1*u.arcsec)
| 5,340,637
|
def is_rating_col_name(col:str)->bool:
"""
Checks to see if the name matches the naming convention for a rating
column of data, i.e. A wrt B
:param col: The name of the column
:return: T/F
"""
if col is None:
return False
elif isinstance(col, (float, int)) and np.isnan(col):
return False
elif is_pw_col_name(col):
return False
else:
return __RATING_COL_REGEX.search(col) is not None
| 5,340,638
|
def _make_buildifier_command():
"""Returns a list starting with the buildifier executable, followed by any
required default arguments."""
return [
find_data(_BUILDIFIER),
"-add_tables={}".format(find_data(_TABLES))]
| 5,340,639
|
def infer_path_type(path: str) -> Union[XPath, JSONPath]:
"""
Infers the type of a path (XPath or JSONPath) based on its syntax.
It performs some basic sanity checks to differentiate a JSONPath from an XPath.
:param path: A valid XPath or JSONPath string.
:return: An instance of JSONPath or XPath
"""
if not path:
raise ValueError("No path given")
if path[0] in ['$', '@']:
return JSONPath(path)
else:
if path[0] in ['.', '/']:
return XPath(path)
else:
raise ValueError("Couldn't identify the path type for {}".format(path))
| 5,340,640
|
def ensure_dir_exists(dirname):
"""Ensure a directory exists, creating if necessary."""
try:
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
| 5,340,641
|
def get_proximity_angles():
"""Get the angles used for the proximity sensors."""
angles = []
# Left-side of the agent
angles.append(3 * pi / 4) # 135° (counter-clockwise)
for i in range(5): # 90° until 10° with hops of 20° (total of 5 sensors)
angles.append(pi / 2 - i * pi / 9)
# Center
angles.append(0) # 0°
# Right-side of the agent
for i in range(5): # -10° until -90° with hops of 20° (total of 5 sensors)
angles.append(-pi / 18 - i * pi / 9)
angles.append(-3 * pi / 4) # -135° (clockwise)
return angles
| 5,340,642
|
def delete(request, scenario_id):
"""
Delete the scenario
"""
# Retrieve the scenario
session = SessionMaker()
scenario = session.query(ManagementScenario).filter(ManagementScenario.id == scenario_id).one()
# Delete the current scenario
session.delete(scenario)
session.commit()
return redirect('parleys_creek_management:jobs')
| 5,340,643
|
def main():
"""main"""
#Initialize acl
acl_resource = AclResource()
acl_resource.init()
#Create a detection network instance, currently using the vgg_ssd network.
# When the detection network is replaced, instantiate a new network here
detect = VggSsd(acl_resource, MODEL_WIDTH, MODEL_HEIGHT)
#Load offline model
model = Model(MODEL_PATH)
#Connect to the presenter server according to the configuration,
# and end the execution of the application if the connection fails
chan = presenteragent.presenter_channel.open_channel(FACE_DETEC_CONF)
if chan is None:
print("Open presenter channel failed")
return
#Open the CARAMER0 camera on the development board
cap = Camera(0)
while True:
#Read a picture from the camera
image = cap.read()
if image is None:
print("Get memory from camera failed")
break
#The detection network processes images into model input data
model_input = detect.pre_process(image)
if model_input is None:
print("Pre process image failed")
break
#Send data to offline model inference
result = model.execute(model_input)
#Detecting network analysis inference output
jpeg_image, detection_list = detect.post_process(result, image)
if jpeg_image is None:
print("The jpeg image for present is None")
break
chan.send_detection_data(CAMERA_FRAME_WIDTH, CAMERA_FRAME_HEIGHT,
jpeg_image, detection_list)
| 5,340,644
|
def get_python_list(file_path):
"""
Find all the .py files in the directory and append them to a raw_files list.
:params:
file_path = the path to the folder where the to-be read folders are.
:returns:
raw_files : list of all files ending with '.py' in the read folder.
"""
python_files = []
for file in os.listdir(file_path):
if file.endswith(".py"):
python_files.append(file)
print('\nThese are all the .py files inside the folder: \n')
for i in python_files:
print(i)
return python_files
| 5,340,645
|
def angle(p1, p2, p3):
"""Returns an angle from a series of 3 points (point #2 is centroid).
Angle is returned in degrees.
Parameters
----------
p1,p2,p3 : numpy arrays, shape = [n_points, n_dimensions]
Triplets of points in n-dimensional space, aligned in rows.
Returns
-------
angles : numpy array, shape = [n_points]
Series of angles in degrees
"""
v1 = p1 - p2
v2 = p3 - p2
return angle_2v(v1, v2)
| 5,340,646
|
def transformer_parsing_base():
"""HParams for parsing on WSJ only."""
hparams = transformer_base()
hparams.attention_dropout = 0.2
hparams.layer_prepostprocess_dropout = 0.2
hparams.max_length = 512
hparams.learning_rate_warmup_steps = 16000
hparams.hidden_size = 1024
hparams.learning_rate = 0.05
hparams.shared_embedding_and_softmax_weights = False
return hparams
| 5,340,647
|
def split_protocol(urlpath):
"""Return protocol, path pair"""
urlpath = stringify_path(urlpath)
if "://" in urlpath:
protocol, path = urlpath.split("://", 1)
if len(protocol) > 1:
# excludes Windows paths
return protocol, path
return None, urlpath
| 5,340,648
|
def createParPythonMapJob(info):
"""
Create map job json for IGRA matchup.
Example:
job = {
'type': 'test_map_parpython',
'params': {
'year': 2010,
'month': 7
},
'localize_urls': [
]
}
"""
print("Info:")
pprint(info, indent=2)
# build parrams
job = {
'type': 'test_map_parpython',
'name': 'test_map_parpython-%04d-%02d' % (int(info['year']), int(info['month'])),
'params': info,
'localize_urls': []
}
print("Job:")
pprint(job, indent=2)
return job
| 5,340,649
|
def calc_lampam_from_delta_lp_matrix(stack, constraints, delta_lampams):
"""
returns the lamination parameters of a laminate
INPUTS
- ss: laminate stacking sequences
- constraints: design and manufacturing guidelines
- delta_lampams: ply partial lamination parameters
"""
lampam = np.zeros((12,), float)
for ind_ply in range(delta_lampams.shape[0]):
lampam += delta_lampams[
ind_ply, constraints.ind_angles_dict[stack[ind_ply]]]
return lampam
| 5,340,650
|
def _save_plugin_metadata(order_model, plugin_meta):
"""Add plugin metadata to an order."""
if not isinstance(plugin_meta, dict):
plugin_meta = {}
order_plugin_meta_repo = repos.get_order_plugin_meta_repository()
order_plugin_meta_repo.save(plugin_meta, order_model)
| 5,340,651
|
def perp(i):
"""Calculates the perpetuity to present worth factor.
:param i: The interest rate.
:return: The calculated factor.
"""
return 1 / i
| 5,340,652
|
def apply_aircraft_layout(flight_id, aircraft_layout_id):
"""
Apply an aircraft layout to a flight, copying across seat allocations
:param flight_id: ID of the flight to apply the layout to
:param aircraft_layout_id: ID of the aircraft layout to apply
"""
# TODO : This needs refactoring but works well enough as a demo for now
# Get the aircraft layout and make sure it's valid for the specified flight
aircraft_layout = _retrieve_and_validate_new_layout(flight_id, aircraft_layout_id)
# Get the current seating allocations and remove the existing seats
current_allocations = get_current_seat_allocations(flight_id)
remove_seats(flight_id)
# Create the new seats
_create_seats_from_layout(flight_id, aircraft_layout)
# Copy seating allocations across
not_allocated = copy_seat_allocations(flight_id, current_allocations)
# It's possible some seats don't exist in the new layout compared to the old. If there are any passengers
# who were in those seats, move them to the next available seats
if not_allocated:
allocate_available_seats(flight_id, not_allocated)
| 5,340,653
|
def chebyshev(x, y):
"""chebyshev distance.
Args:
x: pd.Series, sample feature value.
y: pd.Series, sample feature value.
Returns:
chebyshev distance value.
"""
return np.max(x-y)
| 5,340,654
|
def soda_url_helper(*, build_url, config, year, **_):
"""
This helper function uses the "build_url" input from flowbyactivity.py,
which is a base url for data imports that requires parts of the url text
string to be replaced with info specific to the data year. This function
does not parse the data, only modifies the urls from which data is
obtained.
:param build_url: string, base url
:param config: dictionary, items in FBA method yaml
:param year: year
:return: list, urls to call, concat, parse, format into Flow-By-Activity
format
"""
url = build_url
url = url.replace('__format__', str(config['formats'][year]))
url = url.replace('__url_text__', str(config['url_texts'][year]))
return [url]
| 5,340,655
|
def generate_base_provider_parser():
"""Function that generates the base provider to be used by all dns providers."""
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('action', help='specify the action to take', default='list',
choices=['create', 'list', 'update', 'delete'])
parser.add_argument(
'domain', help='specify the domain, supports subdomains as well')
parser.add_argument('type', help='specify the entry type', default='TXT',
choices=['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SOA', 'TXT', 'SRV', 'LOC'])
parser.add_argument('--name', help='specify the record name')
parser.add_argument('--content', help='specify the record content')
parser.add_argument('--ttl', type=int,
help='specify the record time-to-live')
parser.add_argument('--priority', help='specify the record priority')
parser.add_argument(
'--identifier', help='specify the record for update or delete actions')
parser.add_argument('--log_level', help='specify the log level', default='ERROR',
choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'NOTSET'])
parser.add_argument('--output',
help=('specify the type of output: by default a formatted table (TABLE), '
'a formatted table without header (TABLE-NO-HEADER), '
'a JSON string (JSON) or no output (QUIET)'),
default='TABLE', choices=['TABLE', 'TABLE-NO-HEADER', 'JSON', 'QUIET'])
return parser
| 5,340,656
|
def search(
submitted_before: Optional[datetime] = None,
submitted_after: Optional[datetime] = None,
awaiting_service: Optional[str] = None,
url:Optional[str] = None,
token:Optional[str] = None,
quiet:bool = False
) -> List[dict]:
"""Query metadatasets according to search critera. If datetimes are
specified without a timezone, they are assumed to be local time. Note that
specifying a timezone is only possible programmatically."""
config = get_config(url, token)
# Converting the datetimes to UTC is done only to have any timezone
# information at all. datetime objects without a timezone will be rejected
# by the API as invalid ISO strings. In principle they can be submitted in
# an arbitrary timezone. Applying `astimezone(utc)` to datetime objects
# without a timezone annotation assumes local time.
args = {
'submitted_before': _add_timezone(submitted_before),
'submitted_after': _add_timezone(submitted_after),
'awaiting_service': awaiting_service
}
args = { k: v for k, v in args.items() if v is not None }
info("Sending query to server", quiet)
with ApiClient(config) as api_client:
api_instance = metadata_api.MetadataApi(api_client)
api_response = api_instance.get_meta_data_sets(**args)
res = [elem.to_dict() for elem in api_response]
return result(res, quiet)
| 5,340,657
|
def _target_js_variable_is_used(
*, var_name: str, exp_lines: List[str]) -> bool:
"""
Get a boolean value whether target variable is used in
js expression or not.
Parameters
----------
var_name : str
Target variable name.
exp_lines : list of str
js expression lines.
Returns
-------
result : bool
If target variable is used in js expression, True will be
returned.
"""
var_pattern: Pattern = re.compile(pattern=rf'var ({var_name}) = ')
used_pattern_1: Pattern = re.compile(
pattern=rf'{var_name}[ ;\)\.}},\]\[]')
used_pattern_2: Pattern = re.compile(
pattern=rf'{var_name}$')
for line in exp_lines:
if '//' in line:
continue
if var_name not in line:
continue
match: Optional[Match] = var_pattern.search(string=line)
if match is not None:
continue
match = used_pattern_1.search(string=line)
if match is not None:
return True
match = used_pattern_2.search(string=line)
if match is not None:
return True
return False
| 5,340,658
|
def rnn_temporal(x, h0, Wx, Wh, b):
"""
Run a vanilla RNN forward on an entire sequence of data. We assume an input
sequence composed of T vectors, each of dimension D. The RNN uses a hidden
size of H, and we work over a minibatch containing N sequences. After running
the RNN forward, we return the hidden states for all timesteps.
Inputs:
- x: Input data for the entire timeseries, of shape (N, T, D).
- h0: Initial hidden state, of shape (N, H)
- Wx: Weight matrix for input-to-hidden connections, of shape (D, H)
- Wh: Weight matrix for hidden-to-hidden connections, of shape (H, H)
- b: Biases of shape (H,)
Returns a tuple of:
- h: Hidden states for the entire timeseries, of shape (N, T, H).
"""
N, T, _ = x.shape
H = h0.shape[1]
h = np.zeros([N, 0, H])
for t in range(T):
h_step = rnn_step(x[:, t, :], h0 if t == 0 else h[:, t - 1, :], Wx, Wh,
b).reshape(N, 1, H)
h = np.append(h, h_step, axis=1)
return h
| 5,340,659
|
def rejection_fixed_lag_stitch(fixed_particle: np.ndarray,
last_edge_fixed: np.ndarray,
last_edge_fixed_length: float,
new_particles: MMParticles,
adjusted_weights: np.ndarray,
stitch_time_interval: float,
min_resample_time_indices: Union[list, np.ndarray],
dist_prior_bound: float,
mm_model: MapMatchingModel,
max_rejections: int,
break_on_zero: bool = False) -> Union[np.ndarray, None, int]:
"""
Attempt up to max_rejections of rejection sampling to stitch a single fixed particle
:param fixed_particle: trajectory prior to stitching time
:param last_edge_fixed: row of last fixed particle
:param last_edge_fixed_length: length of last fixed edge (so don't have to call get_geometry)
:param new_particles: particles proposed to stitching
:param adjusted_weights: non-interacting stitching weights
:param stitch_time_interval: time between stitching observations
:param min_resample_time_indices: indices for row of min_resample_time in new_particles
:param dist_prior_bound: bound on distance transition density (given positive if break_on_zero)
:param mm_model: MapMatchingModel
:param max_rejections: number of rejections to attempt, if none succeed return None
:param break_on_zero: whether to return 0 if new_stitching_distance=0
:return: stitched particle
"""
n = len(new_particles)
for reject_ind in range(max_rejections):
new_index = np.random.choice(n, 1, p=adjusted_weights)[0]
new_particle = new_particles[new_index].copy()
# Reject if new_particle starts from different edge
if not np.array_equal(last_edge_fixed[1:4], new_particle[0, 1:4]):
continue
# Reject if new_particle doesn't overtake fixed_particles
elif np.array_equal(last_edge_fixed[1:4], new_particle[1, 1:4]) and \
new_particle[1, 4] < last_edge_fixed[4]:
continue
# Calculate stitching distance
first_distance_j_to_k = (new_particle[1, 4] - last_edge_fixed[4]) * last_edge_fixed_length
first_distance_k = new_particle[1, -1]
change_dist = np.round(first_distance_j_to_k - first_distance_k, 5)
new_particle[1:(min_resample_time_indices[new_index] + 1), -1] += change_dist
new_stitching_distance = new_particle[min_resample_time_indices[new_index], -1]
if break_on_zero and new_stitching_distance < 1e-5:
return 0
# Evaluate distance prior
new_stitching_distance_prior = mm_model.distance_prior_evaluate(new_stitching_distance, stitch_time_interval)
new_stitching_deviation_prior = mm_model.deviation_prior_evaluate(fixed_particle[-1, 5:7],
new_particle[None,
min_resample_time_indices[new_index], 5:7],
new_stitching_distance)
accept_prob = new_stitching_distance_prior * new_stitching_deviation_prior / dist_prior_bound
if accept_prob > (1 - 1e-5) or np.random.uniform() < accept_prob:
out_particle = np.append(fixed_particle, new_particle[1:], axis=0)
return out_particle
return None
| 5,340,660
|
def zip_equalize_lists(a, b):
"""
A zip implementation which will not stop when reaching the end of the
smallest list, but will append None's to the smaller list to fill the gap
"""
a = list(a)
b = list(b)
a_len = len(a)
b_len = len(b)
diff = abs(a_len - b_len)
if a_len < b_len:
for _ in range(diff):
a.append(None)
if b_len < a_len:
for _ in range(diff):
b.append(None)
return zip(a, b)
| 5,340,661
|
def test_encounter_detail(client, project, species):
"""Verify that we can navigate to the encounter detail page (status
code=200) and that the template is the one we think it is.
"""
encounter = EncounterFactory(project=project, species=species)
response = client.get(
reverse("tfat:encounter_detail", kwargs={"encounter_id": encounter.id})
)
assert "tfat/encounter_detail.html" in [x.name for x in response.templates]
assert response.status_code == 200
| 5,340,662
|
def cover_line(line):
"""
This function takes a string containing a line that should
potentially have an execution count and returns a version
of that line that does have an execution count if deemed
appropriate by the rules in validate_line().
Basically, if there is currently no number where there should
be an execution count (indicating this line did not make
it into the compiled binary), a zero is placed there to
indicate that this line was executed 0 times. Test coverage
viewers will interpret this to mean that the line could
potentially have been executed.
"""
first_bar = line.find("|")
second_bar = line.find("|", first_bar+1)
if validate_line(line, second_bar) and \
line[second_bar-1].strip() == "":
# If this line could have been executed but wasn't (no
# number between first and second bars), put a zero
# before the second bar, indicating that it was
# executed zero times. Test coverage viewers will interpret
# this as meaning the line should have been covered
# but wasn't.
return "".join([line[:second_bar-1],
"0", line[second_bar:]])
# There's already an execution count - this
# template must have been instantiated
return line
| 5,340,663
|
def find_period(samples_second):
""" # Find Period
Args:
samples_second (int): number of samples per second
Returns:
float: samples per period divided by samples per second
"""
samples_period = 4
return samples_period / samples_second
| 5,340,664
|
def sqrt(x: int) -> int:
"""
Babylonian Square root implementation
"""
z = (x + 1) // 2
y = x
while z < y:
y = z
z = ( (x // z) + z) // 2
return y
| 5,340,665
|
def is_consecutive_list(list_of_integers):
"""
# ========================================================================
IS CONSECUTIVE LIST
PURPOSE
-------
Reports if elments in a list increase in a consecutive order.
INPUT
-----
[[List]] [list_of_integers]
- A list of integers.
Return
------
[BOOLEAN]
- Returns true is a list is consecutive or false if the same
number appears consecutively.
# ========================================================================
"""
for i in range(1, len(list_of_integers)):
if list_of_integers[i] - list_of_integers[i - 1] != 1:
return False
return True
| 5,340,666
|
def deploy_wazzap(l_dir=env.local_directory):
"""Deploys wazzap to our remote location
Can set local location by using l_dir='<local path>'
"""
env.local_directory = l_dir
deploy_app(host_=env.myhost)
| 5,340,667
|
def circles(x, y, s, c='b', vmin=None, vmax=None, **kwargs):
"""
See https://gist.github.com/syrte/592a062c562cd2a98a83
Make a scatter plot of circles.
Similar to plt.scatter, but the size of circles are in data scale.
Parameters
----------
x, y : scalar or array_like, shape (n, )
Input data
s : scalar or array_like, shape (n, )
Radius of circles.
c : color or sequence of color, optional, default : 'b'
`c` can be a single color format string, or a sequence of color
specifications of length `N`, or a sequence of `N` numbers to be
mapped to colors using the `cmap` and `norm` specified via kwargs.
Note that `c` should not be a single numeric RGB or RGBA sequence
because that is indistinguishable from an array of values
to be colormapped. (If you insist, use `color` instead.)
`c` can be a 2-D array in which the rows are RGB or RGBA, however.
vmin, vmax : scalar, optional, default: None
`vmin` and `vmax` are used in conjunction with `norm` to normalize
luminance data. If either are `None`, the min and max of the
color array is used.
kwargs : `~matplotlib.collections.Collection` properties
Eg. alpha, edgecolor(ec), facecolor(fc), linewidth(lw), linestyle(ls),
norm, cmap, transform, etc.
Returns
-------
paths : `~matplotlib.collections.PathCollection`
Examples
--------
a = np.arange(11)
circles(a, a, s=a*0.2, c=a, alpha=0.5, ec='none')
plt.colorbar()
License
--------
This code is under [The BSD 3-Clause License]
(http://opensource.org/licenses/BSD-3-Clause)
"""
if np.isscalar(c):
kwargs.setdefault('color', c)
c = None
if 'fc' in kwargs:
kwargs.setdefault('facecolor', kwargs.pop('fc'))
if 'ec' in kwargs:
kwargs.setdefault('edgecolor', kwargs.pop('ec'))
if 'ls' in kwargs:
kwargs.setdefault('linestyle', kwargs.pop('ls'))
if 'lw' in kwargs:
kwargs.setdefault('linewidth', kwargs.pop('lw'))
# You can set `facecolor` with an array for each patch,
# while you can only set `facecolors` with a value for all.
zipped = np.broadcast(x, y, s)
patches = [Circle((x_, y_), s_)
for x_, y_, s_ in zipped]
collection = PatchCollection(patches, **kwargs)
if c is not None:
c = np.broadcast_to(c, zipped.shape).ravel()
collection.set_array(c)
collection.set_clim(vmin, vmax)
ax = plt.gca()
ax.add_collection(collection)
ax.autoscale_view()
plt.draw_if_interactive()
if c is not None:
plt.sci(collection)
return collection
| 5,340,668
|
def test_read_mzxml():
"""Test reading peak data from an mzXML metabolomics file.
GIVEN an mzXML file
WHEN that mzXML file is parsed into a list of Peak objects
THEN make sure those Peak objects are correct
"""
mzxml_path = DATA_DIR / "test_metabolomics/test.mzXML"
with open(mzxml_path, "r") as infile:
mzxml_data = infile.read()
peaks = read_mzxml(mzxml_data, charge="+")
test_peak1 = peaks[0]
test_peak2 = peaks[1]
assert test_peak1.charge == "+"
assert test_peak1.mz == 100.5
assert test_peak1.r_time == 420
assert test_peak2.charge == "+"
assert test_peak2.mz == 131.0
assert test_peak2.r_time == 305
| 5,340,669
|
def generate_agency_tracking_id():
""" Generate an agency tracking ID for the transaction that has some random
component. I include the date in here too, in case that's useful. (The
current non-random tracking id has the date in it.)
@todo - make this more random"""
random = str(uuid4()).replace('-', '')
today = datetime.now().strftime("%m%d")
return 'PCOCI%s%s' % (today, random[0:6])
| 5,340,670
|
def get_enabled_gems(cmake_file: pathlib.Path) -> set:
"""
Gets a list of enabled gems from the cmake file
:param cmake_file: path to the cmake file
:return: set of gem targets found
"""
cmake_file = pathlib.Path(cmake_file).resolve()
if not cmake_file.is_file():
logger.error(f'Failed to locate cmake file {cmake_file}')
return set()
gem_target_set = set()
with cmake_file.open('r') as s:
in_gem_list = False
for line in s:
line = line.strip()
if line.startswith(enable_gem_start_marker):
# Set the flag to indicate that we are in the ENABLED_GEMS variable
in_gem_list = True
# Skip pass the 'set(ENABLED_GEMS' marker just in case their are gems declared on the same line
line = line[len(enable_gem_start_marker):]
if in_gem_list:
# Since we are inside the ENABLED_GEMS variable determine if the line has the end_marker of ')'
if line.endswith(enable_gem_end_marker):
# Strip away the line end marker
line = line[:-len(enable_gem_end_marker)]
# Set the flag to indicate that we are no longer in the ENABLED_GEMS variable after this line
in_gem_list = False
# Split the rest of the line on whitespace just in case there are multiple gems in a line
gem_name_list = list(map(lambda gem_name: gem_name.strip('"'), line.split()))
gem_target_set.update(gem_name_list)
return gem_target_set
| 5,340,671
|
def celeryAdd3(a,b):
"""This is for a specific Celery workflow
f = (a+b) * (a+b)
We'll use chord, group and chain"""
if request.method == 'GET':
# When a worker receives an expired task it will mark the task as REVOKED
res = (group(add.s(a,b), add.s(a,b)) | mul.s()).apply_async(expires=60) #https://docs.celeryproject.org/en/stable/userguide/calling.html#expiration
_ret = """ <p>result: 200</p>
<p>msg: "Added value is calculating at task ID: {0}"</p>
<p>htmlmsg: <a href="/api/v1_0/status/{0}">{0}</a></p>""".format(res.id)
# return jsonify(_ret)
return _ret
| 5,340,672
|
def print_success(text):
"""
Print a success message.
Parameters
----------
text : str
The message to display.
"""
print(colorize(text, Colors.SUCCESS))
| 5,340,673
|
def random_walk_model(
data,
ep,
basic_R_prior=None,
r_walk_noise_scale_prior=0.15,
r_walk_period=7,
n_days_seeding=7,
seeding_scale=3.0,
infection_noise_scale=5.0,
output_noise_scale_prior=5.0,
**kwargs,
):
"""
Random walk only model
:param data: PreprocessedData object
:param ep: EpidemiologicalParameters object
:param basic_R_prior: basic r prior dict
:param r_walk_noise_scale_prior: scale of random walk noise scale prior
:param r_walk_period: period of random walk
:param n_days_seeding: number of days of seeding
:param seeding_scale: scale of seeded infection prior
:param infection_noise_scale: scale of infection noise
:param output_noise_scale_prior: output noise scale prior
:param kwargs: additional kwargs (not used, but maintain function signature)
"""
for k in kwargs.keys():
print(f"{k} is not being used")
basic_R = sample_basic_R(data.nRs, basic_R_prior)
# number of 'noise points'
nNP = (
int(data.nDs / r_walk_period) - 1
) # -1 since no change for the first 2 weeks. +1 (round up) - 2 since fixed for the first 2 weeks
r_walk_noise_scale = numpyro.sample(
"r_walk_noise_scale", dist.HalfNormal(scale=r_walk_noise_scale_prior)
)
# rescaling variables by 10 for better NUTS adaptation
r_walk_noise = numpyro.sample(
"r_walk_noise",
dist.Normal(loc=jnp.zeros((data.nRs, nNP)), scale=1.0 / 10),
)
# only apply the noise every "r_walk_period" - to get full noise, repeat
expanded_r_walk_noise = jnp.repeat(
r_walk_noise_scale * 10.0 * jnp.cumsum(r_walk_noise, axis=-1),
r_walk_period,
axis=-1,
)[: data.nRs, : (data.nDs - 2 * r_walk_period)]
# except that we assume no noise for the first 3 weeks
full_log_Rt_noise = jnp.zeros((data.nRs, data.nDs))
full_log_Rt_noise = jax.ops.index_update(
full_log_Rt_noise, jax.ops.index[:, 2 * r_walk_period :], expanded_r_walk_noise
)
Rt = numpyro.deterministic(
"Rt",
jnp.exp(jnp.log(basic_R.reshape((data.nRs, 1))) + full_log_Rt_noise),
)
# collect variables in the numpyro trace
numpyro.deterministic("Rt_walk", jnp.exp(full_log_Rt_noise))
numpyro.deterministic("Rt_cm", jnp.exp(jnp.log(basic_R.reshape((data.nRs, 1)))))
# Infection Model
seeding_padding = n_days_seeding
total_padding = ep.GIv.size - 1
# note; seeding is also rescaled
init_infections, total_infections_placeholder = seed_infections(
seeding_scale, data.nRs, data.nDs, seeding_padding, total_padding
)
discrete_renewal_transition = get_discrete_renewal_transition(ep)
# we need to transpose R because jax.lax.scan scans over the first dimension.
# We want to scan over time instead of regions!
_, infections = jax.lax.scan(
discrete_renewal_transition,
init_infections,
Rt.T,
)
# corrupt infections with additive noise, adding robustness at small case and death
# counts
infection_noise = numpyro.sample(
"infection_noise",
dist.Normal(loc=0, scale=0.1 * jnp.ones((data.nRs, data.nDs))),
)
# enforce positivity!
infections = jax.nn.softplus(
infections + (infection_noise_scale * (10.0 * infection_noise.T))
)
total_infections = jax.ops.index_update(
total_infections_placeholder,
jax.ops.index[:, :seeding_padding],
init_infections[:, -seeding_padding:],
)
total_infections = numpyro.deterministic(
"total_infections",
jax.ops.index_update(
total_infections, jax.ops.index[:, seeding_padding:], infections.T
),
)
# Time constant case fatality rate (ascertainment rate assumed to be 1
# throughout the whole period).
cfr = numpyro.sample("cfr", dist.Uniform(low=1e-3, high=jnp.ones((data.nRs, 1))))
future_cases_t = numpyro.deterministic("future_cases_t", total_infections)
future_deaths_t = numpyro.deterministic(
"future_deaths_t", jnp.multiply(total_infections, cfr)
)
# collect expected cases and deaths
expected_cases = numpyro.deterministic(
"expected_cases",
jax.scipy.signal.convolve2d(future_cases_t, ep.DPC, mode="full")[
:, seeding_padding : seeding_padding + data.nDs
],
)
expected_deaths = numpyro.deterministic(
"expected_deaths",
jax.scipy.signal.convolve2d(future_deaths_t, ep.DPD, mode="full")[
:, seeding_padding : seeding_padding + data.nDs
],
)
# country specific psi cases and deaths.
# We will use the 'RC' matrix to pull the correct local area value.
psi_cases = numpyro.sample(
"psi_cases",
dist.HalfNormal(scale=output_noise_scale_prior * jnp.ones(len(data.unique_Cs))),
)
psi_deaths = numpyro.sample(
"psi_deaths",
dist.HalfNormal(scale=output_noise_scale_prior * jnp.ones(len(data.unique_Cs))),
)
# use the per country psi_cases and psi_deaths and form a nRs x nDs array
# to use for the output distribution.
cases_conc = (
(data.RC_mat @ psi_cases).reshape((data.nRs, 1)).repeat(data.nDs, axis=-1)
)
deaths_conc = (
(data.RC_mat @ psi_deaths).reshape((data.nRs, 1)).repeat(data.nDs, axis=-1)
)
with numpyro.handlers.mask(mask=jnp.logical_not(data.new_cases.mask)):
numpyro.sample(
"observed_cases",
dist.GammaPoisson(
concentration=cases_conc,
rate=cases_conc / expected_cases,
),
obs=data.new_cases.data,
)
with numpyro.handlers.mask(mask=jnp.logical_not(data.new_deaths.mask)):
numpyro.sample(
"observed_deaths",
dist.GammaPoisson(
concentration=deaths_conc,
rate=deaths_conc / expected_deaths,
),
obs=data.new_deaths.data,
)
| 5,340,674
|
def create_unmerge_cells_request(sheet_id, start, end):
"""
Create v4 API request to unmerge rows and/or columns for a
given worksheet.
"""
start = get_cell_as_tuple(start)
end = get_cell_as_tuple(end)
return {
"unmergeCells": {
"range": {
"sheetId": sheet_id,
"startRowIndex": start[ROW] - 1,
"endRowIndex": end[ROW],
"startColumnIndex": start[COL] - 1,
"endColumnIndex": end[COL],
}
}
}
| 5,340,675
|
async def status(ctx: click.Context) -> None:
"""Get on-chain staker status."""
cfg = cli_config(ctx)
account_name = ctx.obj["ACCOUNT_NAME"]
if account_name:
account = ChainedAccount.get(account_name)
else:
accounts = find_accounts(chain_id=cfg.main.chain_id)
account = accounts[0]
staker_status, date_staked = await get_staker_info(ctx, account.address)
if not staker_status:
print("Failed to retrieve staker status. Check command arguments")
return
if account_name:
print(f"Account name: {account_name}")
print(f"Status: {staker_status}")
if staker_status != "NotStaked":
print(f"Staked on {date_staked} ({date_staked.age} ago)")
| 5,340,676
|
def list_to_str(slist, seperator=None):
"""Convert list of any type to string seperated by seperator."""
if not seperator:
seperator = ','
if not slist:
return ""
slist = squash_int_range(slist)
return seperator.join([str(e) for e in slist])
| 5,340,677
|
def log_web_error(msg):
"""Take a screenshot of a web browser based error
Use this function to capture a screen shot of the web browser
when using Python's `assert` keyword to perform assertions.
"""
screenshot = selene.helpers.take_screenshot(selene.browser.driver(),)
msg = '''{original_msg}
screenshot: file://{screenshot}'''.format(original_msg=msg, screenshot=screenshot)
return msg
| 5,340,678
|
def timelines(
T,
infile,
outfile,
limit,
timeline_limit,
use_search,
hide_progress,
**kwargs,
):
"""
Fetch the timelines of every user in an input source of tweets. If
the input is a line oriented text file of user ids or usernames that will
be used instead.
The infile can be:
- A file containing one user id per line (either quoted or unquoted)
- A JSONL file containing tweets collected in the Twitter API V2 format
"""
total_count = 0
line_count = 0
seen = set()
kwargs = _process_expansions_shortcuts(kwargs)
with FileLineProgressBar(infile, outfile, disable=hide_progress) as progress:
for line in infile:
progress.update()
line_count += 1
line = line.strip()
if line == "":
log.warn("skipping blank line on line %s", line_count)
continue
users = None
try:
# assume this the line contains some tweet json
data = json.loads(line)
# if it parsed as a string or int assume it's a username
if isinstance(data, str) or isinstance(data, int):
users = set([line])
# otherwise try to flatten the data and get the user ids
else:
try:
users = set([t["author"]["id"] for t in ensure_flattened(data)])
except (KeyError, ValueError):
log.warn(
"ignored line %s which didn't contain users", line_count
)
continue
except json.JSONDecodeError:
# maybe it's a single user?
users = set([line])
if users is None:
click.echo(
click.style(
f"unable to find user or users on line {line_count}",
fg="red",
),
err=True,
)
break
for user in users:
# only process a given user once
if user in seen:
log.info("already processed %s, skipping", user)
continue
# ignore what don't appear to be a username or user id since
# they can cause the Twitter API to throw a 400 error
if not re.match(r"^((\w{1,15})|(\d+))$", user):
log.warn(
'invalid username or user id "%s" on line %s', line, line_count
)
continue
seen.add(user)
tweets = _timeline_tweets(
T,
use_search=use_search,
user_id=user,
**kwargs,
)
timeline_count = 0
for response in tweets:
_write(response, outfile)
timeline_count += len(response["data"])
if timeline_limit != 0 and timeline_count >= timeline_limit:
break
total_count += len(response["data"])
if limit != 0 and total_count >= limit:
return
| 5,340,679
|
def parent_id_name_and_quotes_for_table(sqltable):
""" Return tuple with 2 items (nameof_field_of_parent_id, Boolean)
True - if field data type id string and must be quoted), False if else """
id_name = None
quotes = False
for colname, sqlcol in sqltable.sql_columns.iteritems():
# root table
if not sqltable.root.parent and \
sqlcol.node == sqltable.root.get_id_node():
id_name = colname
if sqlcol.typo == "STRING":
quotes = True
break
else: # nested table
if sqlcol.node.reference:
id_name = colname
if sqlcol.typo == "STRING":
quotes = True
break
return (id_name, quotes)
| 5,340,680
|
def chunks(l, n):
""" Yield successive chunks from l which are at least of length n
From http://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks-in-python
"""
for i in xrange(0, len(l), n):
yield l[i:i+n]
| 5,340,681
|
def test_transcoded_hls_video():
""" Tests that Video.transcoded_videos returns transcoded HLS videofile"""
video = VideoFactory()
videofiles = [
VideoFileFactory(
video=video, s3_object_key="original.mp4", encoding=EncodingNames.ORIGINAL
),
VideoFileFactory(
video=video, s3_object_key="video.m3u8", encoding=EncodingNames.HLS
),
]
assert len(video.transcoded_videos) == 1
assert video.transcoded_videos[0] == videofiles[1]
| 5,340,682
|
def client():
"""Returns a Flask client for the app."""
return app.test_client()
| 5,340,683
|
def pushTwitterConnections(self, twits, user, friends=True, cacheKey=False):
"""Push the Twitter connections of a given user to Neo4J.
Positional arguments:
twits -- a list of Twitter users as returned by Twython
user -- The screen_name of the user
Keyword arguments:
friends -- "twits" are the user's friends if True, (default) else they're followers
cacheKey -- a Redis key that identifies an on-going task to grab a user's friends or followers
"""
if friends:
job = ' FRIENDS'
else:
job = ' FOLLOWERS'
if twits:
rendered_twits = [renderTwitterUser(twit) for twit in twits]
pushRenderedConnections2Neo.delay(user, rendered_twits, friends=friends)
if cacheKey: # These are the last connections, tell the scraper we're done.
cache.set(cacheKey, 'done')
logger.info('*** %s: DONE WITH %s ***' % (user, job))
| 5,340,684
|
def _get_horizons_ephem(
id,
start: Time,
stop: Time,
step: str = "12H",
id_type: str = "smallbody",
location: str = "@TESS",
quantities: str = "2,3,9,19,20,43",
):
"""Returns JPL Horizons ephemeris.
This is simple cached wrapper around astroquery's Horizons.ephemerides.
"""
epochs = {"start": start.iso, "stop": stop.iso, "step": step}
log.debug(
f"Horizons query parameters:\n\tid={id}\n\tlocation={location}\n\tepochs={epochs}"
)
t = Horizons(id=id, id_type=id_type, location=location, epochs=epochs)
result = t.ephemerides(quantities=quantities)
log.debug(f"Received {len(result)} ephemeris results")
return result
| 5,340,685
|
def show_modal_data(nat_freq, damping):
"""
Show modal data in a table-like structure.
"""
print(' Nat. f. Damping')
print(23*'-')
for i, f in enumerate(nat_freq):
print(f'{i+1}) {f:6.1f}\t{damping[i]:5.4f}')
| 5,340,686
|
def plot_map(map, affine, cut_coords=None, anat=None, anat_affine=None,
figure=None, axes=None, title=None, threshold=None,
annotate=True, draw_cross=True,
do3d=False, **kwargs):
""" Plot three cuts of a given activation map (Frontal, Axial, and Lateral)
Parameters
----------
map : 3D ndarray
The activation map, as a 3D image.
affine : 4x4 ndarray
The affine matrix going from image voxel space to MNI space.
cut_coords: 3-tuple of floats or None
The MNI coordinates of the point where the cut is performed, in
MNI coordinates and order.
If None is given, the cut point is calculated automaticaly.
anat : 3D ndarray or False, optional
The anatomical image to be used as a background. If None, the
MNI152 T1 1mm template is used. If False, no anat is displayed.
anat_affine : 4x4 ndarray, optional
The affine matrix going from the anatomical image voxel space to
MNI space. This parameter is not used when the default
anatomical is used, but it is compulsory when using an
explicite anatomical image.
figure : integer or matplotlib figure, optional
Matplotlib figure used or its number. If None is given, a
new figure is created.
axes : matplotlib axes or 4 tuple of float: (xmin, xmax, ymin, ymin), optional
The axes, or the coordinates, in matplotlib figure space,
of the axes used to display the plot. If None, the complete
figure is used.
title : string, optional
The title dispayed on the figure.
threshold : a number, None, or 'auto'
If None is given, the maps are not thresholded.
If a number is given, it is used to threshold the maps:
values below the threshold are plotted as transparent. If
auto is given, the threshold is determined magically by
analysis of the map.
annotate: boolean, optional
If annotate is True, positions and left/right annotation
are added to the plot.
draw_cross: boolean, optional
If draw_cross is True, a cross is drawn on the plot to
indicate the cut plosition.
do3d: {True, False or 'interactive'}, optional
If True, Mayavi is used to plot a 3D view of the
map in addition to the slicing. If 'interactive', the
3D visualization is displayed in an additional interactive
window.
kwargs: extra keyword arguments, optional
Extra keyword arguments passed to pylab.imshow
Notes
-----
Arrays should be passed in numpy convention: (x, y, z)
ordered.
Use masked arrays to create transparency:
import numpy as np
map = np.ma.masked_less(map, 0.5)
plot_map(map, affine)
"""
map, affine = _xyz_order(map, affine)
nan_mask = np.isnan(np.asarray(map))
if np.any(nan_mask):
map = map.copy()
map[nan_mask] = 0
# Deal with automatic settings of plot parameters
if threshold == 'auto':
threshold = _fast_abs_percentile(map)
if cut_coords is None:
x_map, y_map, z_map = find_cut_coords(map,
activation_threshold=threshold)
cut_coords = coord_transform(x_map, y_map, z_map, affine)
if threshold is not None:
if threshold == 0:
map = np.ma.masked_equal(map, 0, copy=False)
else:
map = np.ma.masked_inside(map, -threshold, threshold, copy=False)
if do3d:
try:
from enthought.mayavi import version
if not int(version.version[0]) > 2:
raise ImportError
except ImportError:
warnings.warn('Mayavi > 3.x not installed, plotting only 2D')
do3d = False
# Make sure that we have a figure
if not isinstance(figure, Figure):
if do3d:
size = (10, 2.6)
else:
size = (6.6, 2.6)
fig = pl.figure(figure, figsize=size, facecolor='w')
else:
fig = figure
if isinstance(axes, Axes):
assert axes.figure is figure, ("The axes passed are not "
"in the figure")
canonical_anat = False
if anat is None:
try:
anat, anat_affine, vmax_anat = _AnatCache.get_anat()
canonical_anat = True
except OSError, e:
anat = False
warnings.warn(repr(e))
# Use Mayavi for the 3D plotting
if do3d:
from .maps_3d import plot_map_3d, m2screenshot
from enthought.tvtk.api import tvtk
version = tvtk.Version()
offscreen = True
if (version.vtk_major_version, version.vtk_minor_version) < (5, 2):
offscreen = False
if do3d == 'interactive':
offscreen = False
cmap = kwargs.get('cmap', pl.cm.cmap_d[pl.rcParams['image.cmap']])
# Computing vmin and vmax is costly in time, and is needed
# later, so we compute them now, and store them for future
# use
vmin = kwargs.get('vmin', map.min())
kwargs['vmin'] = vmin
vmax = kwargs.get('vmax', map.max())
kwargs['vmax'] = vmax
from enthought.mayavi import mlab
plot_map_3d(np.asarray(map), affine, cut_coords=cut_coords,
anat=anat, anat_affine=anat_affine,
offscreen=offscreen, cmap=cmap,
threshold=threshold,
vmin=vmin, vmax=vmax)
ax = fig.add_axes((0.001, 0, 0.29, 1))
ax.axis('off')
m2screenshot(mpl_axes=ax)
axes = (0.3, 0, .7, 1.)
if offscreen:
# Clean up, so that the offscreen engine doesn't become the
# default
mlab.clf()
engine = mlab.get_engine()
from enthought.mayavi.core.registry import registry
for key, value in registry.engines.iteritems():
if value is engine:
registry.engines.pop(key)
break
if axes is None:
axes = [0., 0., 1., 1.]
if operator.isSequenceType(axes):
axes = fig.add_axes(axes)
axes.axis('off')
ortho_slicer = OrthoSlicer(cut_coords, axes=axes)
# Check that we should indeed plot an anat: we have one, and the
# cut_coords are in its range
x, y, z = cut_coords
if (anat is not False
and np.all(
np.array(coord_transform(x, y, z, np.linalg.inv(anat_affine)))
< anat.shape)):
anat_kwargs = kwargs.copy()
anat_kwargs['cmap'] = pl.cm.gray
anat_kwargs.pop('alpha', 1.)
if canonical_anat:
# We special-case the 'canonical anat', as we don't need
# to do a few transforms to it.
anat_kwargs['vmin'] = 0
anat_kwargs['vmax'] = vmax_anat
else:
anat_kwargs.pop('vmin', None)
anat_kwargs.pop('vmax', None)
anat, anat_affine = _xyz_order(anat, anat_affine)
ortho_slicer.plot_map(anat, anat_affine, **anat_kwargs)
ortho_slicer.plot_map(map, affine, **kwargs)
if annotate:
ortho_slicer.annotate()
if draw_cross:
ortho_slicer.draw_cross(color='k')
if title is not None and not title == '':
ortho_slicer.title(title)
return ortho_slicer
| 5,340,687
|
def get_mode(h5,songidx=0):
"""
Get mode from a HDF5 song file, by default the first song in it
"""
return h5.root.analysis.songs.cols.mode[songidx]
| 5,340,688
|
def read_label_schema(path):
"""
Reads json file and returns deserialized LabelSchema.
"""
with open(path, encoding="UTF-8") as read_file:
serialized_label_schema = json.load(read_file)
return LabelSchemaMapper().backward(serialized_label_schema)
| 5,340,689
|
def Dump(root):
"""Return a string representing the contents of an object.
This function works only if root.ValidateExports() would pass.
Args:
root: the object to dump.
Returns:
A big string containing lines of the format:
Object.SubObject.
Object.SubObject.ParameterName = %r
"""
h = Handle(root)
out = []
for i in h.ListExports(recursive=True):
if i.endswith('.'):
out.append(' %s' % (i,))
else:
out.append(' %s = %r' % (i, h.GetExport(i)))
return '\n'.join(out)
| 5,340,690
|
def clean_profit_data(profit_data):
"""清理权益全为0的垃圾结算日"""
for i in list(range(len(profit_data)))[::-1]:
profit = profit_data[i][1] == 0
closed = profit_data[i][2] == 0
hold = profit_data[i][3] == 0
if profit and closed and hold:
profit_data.pop(i)
return profit_data
| 5,340,691
|
def convert_single_example(ex_index, example, max_word_length,max_sen_length,
tokenizer):
"""Converts a single `InputExample` into a single `InputFeatures`."""
text_sen = example.text_sen.strip().split()
text_pos=example.text_pos.strip().split()
text_ps = example.text_ps.strip().split() # 这里就是一个元素【日期】
if example.text_label is None:
text_label=["o"*len(text_sen)]
else:
text_label = example.text_label.strip().split() #这里训练集的时候和句子一样长,测试集的时候为一个【'o'】
assert len(text_sen)==len(text_pos)
assert len(text_ps)==1
assert len(text_label)==len(text_sen)
text_word=[]
for word in text_sen:
text_word.append(tokenizer.tokenize(word))
#这里是二位列表
# [
# [许,海,明],
# [喜 ,欢] ,
# [玩]
# ]
text_sen=text_word
# Account for [SEP] with "- 1" #注意这里是句子的长度 原来的
if len(text_sen) > max_sen_length - 1:
text_sen = text_sen[0:(max_sen_length - 1)]
text_pos = text_pos[0:(max_sen_length - 1)]
text_label=text_label[0:(max_sen_length - 1)]
text_sen.append(["[SEP]"])
text_pos.append(["[SEP]"])
text_label.append("o")
len_sen=len(text_word)
len_pos=len(text_pos)
len_label=len(text_label)
while len(text_sen) < max_sen_length:
text_sen.append(["[PAD]"])
text_pos.append(["[PAD]"])
text_label.append("o")
'''
处理单词级别
'''
#处理每个单词
# Account for [CLS] ,[SEP] with "- 2" #注意这里是每个单词的长度
for i,wordlist in enumerate(text_sen):
if len(wordlist) > max_word_length - 2:
text_word[i]=wordlist[0:(max_word_length - 2)]
# 为每一个单词添加 [CLS] [SEP]
segment_ids=[] #这是一个二维列表
len_words=[]
for i,wordlist in enumerate(text_sen):
wordlist.insert(0,"[CLS]")
wordlist.append("[SEP]")
len_words.append(len(wordlist))
while len(wordlist) < max_word_length:
wordlist.append(["PAD"])
segment_ids.append([0]*len(wordlist))
text_sen[i]=wordlist
input_word_ids =[]
for tokens in text_sen:
input_word_ids.append(tokenizer.convert_tokens_to_ids(tokens)) #这是一个二维
input_pos_ids = tokenizer.convert_pos_to_ids(text_pos) #这是一个list
input_ps_id = tokenizer.convert_ps_to_ids(text_ps)[0] #这就是一个数字 0到48
input_label_ids= tokenizer.convert_label_to_ids(text_label)
# 制作一个input_sen_mask 这是句子级别的
input_sen_mask = [1] * len_sen
input_pos_mask = [1] * len_pos
input_label_mask = [1]*len_label
# Zero-pad up to the sequence length.
while len(input_sen_mask) < max_sen_length:
input_sen_mask.append(0)
input_pos_mask.append(0)
input_label_mask.append(0)
#为每一个单词制作一个mask
input_words_mask=[]
for word_len in len_words:
word_mask = [1] * word_len
while len(word_mask) < max_word_length:
word_mask.append(0)
input_words_mask.append(word_mask)
assert len(input_word_ids) == max_sen_length #句子长度
assert len(input_pos_ids) == max_sen_length #句子长度
assert len(input_label_ids)==max_sen_length
assert len(input_word_ids[0])==max_word_length
assert len(input_pos_mask) == max_sen_length
assert len(input_label_mask) == max_sen_length
assert len(input_words_mask) == max_sen_length
assert len(segment_ids) == max_sen_length
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("句子单词: %s" % " ".join(
["["+" ".join(x)+"]" for x in text_word]))
tf.logging.info("句子的ids: %s" % " ".join(
["[" + ",".join(list(map(str,word_ids)))+"]" for word_ids in input_word_ids]))
tf.logging.info("句子的mask: %s" % " ".join([str(x) for x in input_sen_mask]))
tf.logging.info("句子中每个单词的mask: %s" % " ".join(
["[" + ",".join(list(map(str,word_ids)))+"]" for word_ids in input_words_mask]))
print("\n")
tf.logging.info("input_pos_ids: %s" % " ".join(
["[" + ",".join(list(map(str, word_ids))) + "]" for word_ids in input_pos_ids]))
tf.logging.info("input_pos_ids: %s" % " ".join(
["[" + ",".join(list(map(str, word_ids))) + "]" for word_ids in input_pos_ids]))
tf.logging.info("input_label_ids: %s" % " ".join(
["[" + ",".join(list(map(str, word_ids))) + "]" for word_ids in input_label_ids]))
tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.logging.info("ps: %s (id = %d)" % (example.text_ps, input_ps_id))
feature = InputFeatures(
input_words_ids=input_word_ids,
input_pos_ids=input_pos_ids,
input_ps_id=input_ps_id,
input_label_ids=input_label_ids,
input_sen_mask=input_sen_mask,
input_words_mask=input_words_mask,
input_pos_mask=input_pos_mask,
input_label_mask=input_label_mask,
segment_ids=segment_ids,
is_real_example=True)
return feature
| 5,340,692
|
def scrub_literal(value):
"""
Scrubs control characters from the incoming values to remove
things like form feeds (\f) and line breaks (\n) which might
cause problems with Jena.
Data with these characters was found in the Backstage data.
"""
if not value:
return None
if isinstance(value, int):
return value
text = ''.join([c for c in value if not ascii.iscntrl(c)\
if not ascii.isctrl(c)])
text = text.replace('"', '')
text = text.replace('\ufffd', '')
text = clean_char(text)
if isinstance(text, str):
text = str(text, errors='replace')
return text.strip()
| 5,340,693
|
def mock_interface_settings_mismatch_protocol(mock_interface_settings, invalid_usb_device_protocol):
"""
Fixture that yields mock USB interface settings that is an unsupported device protocol.
"""
mock_interface_settings.getProtocol.return_value = invalid_usb_device_protocol
return mock_interface_settings
| 5,340,694
|
def run_eval(exp_name: str) -> Mapping[str, Any]:
""" """
pred_log_dir = f"{_ROOT}/test_data/eval_tracking_dummy_logs_pred"
gt_log_dir = f"{_ROOT}/test_data/eval_tracking_dummy_logs_gt"
out_fpath = f"{_ROOT}/test_data/{exp_name}.txt"
out_file = open(out_fpath, "w")
eval_tracks(
path_tracker_output_root=pred_log_dir,
path_dataset_root=gt_log_dir,
d_min=0,
d_max=100,
out_file=out_file,
centroid_method="average",
diffatt=None,
category="VEHICLE",
)
out_file.close()
with open(out_fpath, "r") as f:
result_lines = f.readlines()
result_vals = result_lines[0].strip().split(" ")
fn, num_frames, mota, motp_c, motp_o, motp_i, idf1 = result_vals[:7]
most_track, most_lost, num_fp, num_miss, num_sw, num_frag = result_vals[7:]
result_dict = {
"filename": fn,
"num_frames": int(num_frames),
"mota": float(mota),
"motp_c": float(motp_c),
"motp_o": float(motp_o),
"motp_i": float(motp_i),
"idf1": float(idf1),
"most_track": float(most_track),
"most_lost": float(most_lost),
"num_fp": int(num_fp),
"num_miss": int(num_miss),
"num_sw": int(num_sw),
"num_frag": int(num_frag),
}
shutil.rmtree(pred_log_dir)
shutil.rmtree(gt_log_dir)
return result_dict
| 5,340,695
|
def check_dir(path):
"""
检查文件夹是否存在,存在返回True;不存在则创建,返回False
"""
if not os.path.exists(path):
os.makedirs(path)
return False
return True
| 5,340,696
|
def MakeLocalSsds(messages, ssd_configs):
"""Constructs the repeated local_ssd message objects."""
if ssd_configs is None:
return []
local_ssds = []
disk_msg = (
messages.
AllocationSpecificSKUAllocationAllocatedInstancePropertiesAllocatedDisk)
interface_msg = disk_msg.InterfaceValueValuesEnum
for s in ssd_configs:
if s['interface'].upper() == 'NVME':
interface = interface_msg.NVME
else:
interface = interface_msg.SCSI
m = disk_msg(
diskSizeGb=s['size'],
interface=interface)
local_ssds.append(m)
return local_ssds
| 5,340,697
|
def valid_variant(s, is_coding=True):
"""
Returns True if s is a valid coding or noncoding variant, else False.
Parameters
----------
s : `str`
Variant string to validate.
is_coding : `bool`
Indicates if the variant string represents a coding variant.
"""
_validate_str(s)
if s == WILD_TYPE_VARIANT:
return True
else:
if is_coding:
for mut in s.split(", "):
match = re_coding.match(mut)
if match is None:
return False
return True
else:
for mut in s.split(", "):
match = re_noncoding.match(mut)
if match is None:
return False
return True
| 5,340,698
|
def is_icon_address_valid(address: str) -> bool:
"""Check whether address is in icon address format or not
:param address: (str) address string including prefix
:return: (bool)
"""
try:
if isinstance(address, str) and len(address) == 42:
prefix, body = split_icon_address(address)
if prefix == ICON_EOA_ADDRESS_PREFIX or \
prefix == ICON_CONTRACT_ADDRESS_PREFIX:
return is_lowercase_hex_string(body)
finally:
pass
return False
| 5,340,699
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.