content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def make_mlp(dim_list, activation_list, batch_norm=False, dropout=0): """ Generates MLP network: Parameters ---------- dim_list : list, list of number for each layer activation_list : list, list containing activation function for each layer batch_norm : boolean, use batchnorm at each layer, default: False dropout : float [0, 1], dropout probability applied on each layer (except last layer) Returns ------- nn.Sequential with layers """ layers = [] index = 0 for dim_in, dim_out in zip(dim_list[:-1], dim_list[1:]): activation = activation_list[index] layers.append(nn.Linear(dim_in, dim_out)) if batch_norm: layers.append(nn.BatchNorm1d(dim_out)) if activation == 'relu': layers.append(nn.ReLU()) elif activation == 'tanh': layers.append(nn.Tanh()) elif activation == 'leakyrelu': layers.append(nn.LeakyReLU()) elif activation == 'sigmoid': layers.append(nn.Sigmoid()) if dropout > 0 and index < len(dim_list) - 2: layers.append(nn.Dropout(p=dropout)) index += 1 return nn.Sequential(*layers)
dc2677ccd1291942f474eb6fe7719103731f4cfc
3,646,900
import uuid import os def pddobj_video_file_path(instance, filename): """Generate file path for new video file""" ext = filename.split('.')[-1] filename = f'{uuid.uuid4()}.{ext}' return os.path.join('uploads/videos/', filename)
103f6f895983090100c245ce8845f744d4dce2aa
3,646,901
def load_and_prep_image(filename): """ Reads an image from filename, turns it into a tensor and reshapes it to (img_shape, img_shape, colour_channel). """ image = cv2.imread(filename) # gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) face_cascade = cv2.CascadeClassifier(haarcascade) faces = face_cascade.detectMultiScale(image, scaleFactor=1.3, minNeighbors=5) for (x, y, w, h) in faces: cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2) face = image[y:y + h, x:x + w] gray = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY) cropped_img = np.expand_dims(np.expand_dims(cv2.resize(gray, (48, 48)), -1), 0) return cropped_img
cbf3b1a840ecc931adf0dd7f84cff68e83180efe
3,646,902
def DoH(im, canvas, max_sigma=30, threshold=0.1, display=True): """ Difference of Hessian blob detector :param im: grayscale image :param max_sigma: maximum sigma of Gaussian kernel :param threshold: absolute lower bound Local maxima smaller than threshold ignore """ blobs = blob_doh(im, max_sigma=30, threshold=.1) for blob in blobs: y, x, r = blob cv2.circle(canvas, (int(x), int(y)), int(r), (0, 0, 255), 2) if display: cv2.imshow('Difference of Hessian', canvas) cv2.waitKey(0) return blobs
965ce92cdba24514fa9802a9e8891a96d97f1cf5
3,646,903
def _parse_class(s): """ Parse a key, value pair, separated by '=' On the command line (argparse) a declaration will typically look like: foo=hello or foo="hello world" """ items = s.split('=') key = items[0].strip() # we remove blanks around keys, as is logical if len(items) > 1: # rejoin the rest: value = '='.join(items[1:]) return (key, value)
db517a277e21448eb83ba25244a8bfa3892f18a4
3,646,904
import difflib def getStringSimilarity(string1:str,string2:str): """ This function will return a similarity of two strings. """ return difflib.SequenceMatcher(None,string1,string2).quick_ratio()
292f552449569206ee83ce862c2fb49f6063dc9e
3,646,905
from typing import Optional from datetime import datetime import os import shutil import glob def model_downloader( handler_type: HandlerType, bucket_name: str, model_name: str, model_version: str, model_path: str, temp_dir: str, model_dir: str, ) -> Optional[datetime.datetime]: """ Downloads model to disk. Validates the s3 model path and the downloaded model. Args: handler_type: The handler type as implemented by the API. bucket_name: Name of the bucket where the model is stored. model_name: Name of the model. Is part of the model's local path. model_version: Version of the model. Is part of the model's local path. model_path: Model prefix of the versioned model. temp_dir: Where to temporarily store the model for validation. model_dir: The top directory of where all models are stored locally. Returns: The model's timestamp. None if the model didn't pass the validation, if it doesn't exist or if there are not enough permissions. """ logger.info( f"downloading from bucket {bucket_name}/{model_path}, model {model_name} of version {model_version}, temporarily to {temp_dir} and then finally to {model_dir}" ) client = S3(bucket_name) # validate upstream S3 model sub_paths, ts = client.search(model_path) try: validate_model_paths(sub_paths, handler_type, model_path) except CortexException: logger.info(f"failed validating model {model_name} of version {model_version}") return None # download model to temp dir temp_dest = os.path.join(temp_dir, model_name, model_version) try: client.download_dir_contents(model_path, temp_dest) except CortexException: logger.info( f"failed downloading model {model_name} of version {model_version} to temp dir {temp_dest}" ) shutil.rmtree(temp_dest) return None # validate model model_contents = glob.glob(os.path.join(temp_dest, "**"), recursive=True) model_contents = util.remove_non_empty_directory_paths(model_contents) try: validate_model_paths(model_contents, handler_type, temp_dest) except CortexException: logger.info( f"failed validating model {model_name} of version {model_version} from temp dir" ) shutil.rmtree(temp_dest) return None # move model to dest dir model_top_dir = os.path.join(model_dir, model_name) ondisk_model_version = os.path.join(model_top_dir, model_version) logger.info( f"moving model {model_name} of version {model_version} to final dir {ondisk_model_version}" ) if os.path.isdir(ondisk_model_version): shutil.rmtree(ondisk_model_version) shutil.move(temp_dest, ondisk_model_version) return max(ts)
4b7dea6f21278af92dd625f58bc5e28129f0da22
3,646,906
def calc_simcoef_distr(patfeats, labels, id_dict, simcoef): """ Calculates the score distributions Inputs: - simcoef: simcoef the values are calculated with (string) - labels: list of strings with the scores to be calculated (e.g.: ['cited', 'random']) - id_dict: dictionary containing the patent ID pairs for the respective label Output: - scores: dictionary containing the scores for each label """ scores = dict.fromkeys(labels) for label in labels: print label scores[label] = [] combis = id_dict[label] for combi in combis: score = compute_sim(patfeats[combi[0]], patfeats[combi[1]], simcoef) scores[label].append(score) return scores
897bcb2e30e0587173557772c17589fe43841e60
3,646,907
def fft(signal, sampling_rate, plot=False, show_grid=True, fig_size=(10, 5)): """ Perform FFT on signal. Compute 1D Discrete Fourier Transform using Fast Fourier Transform. Optionally, plot the power spectrum of the frequency domain. Parameters ---------- signal : ndarray Input array to be transformed. sampling_rate : float Sampling rate of the input signal. plot : bool, optional Toggle to display a plot of the power spectrum. show_grid : bool, optional If creating a plot, toggle to show grid lines on the figure. fig_size : tuple, optional If plotting, set the width and height of the resulting figure. Returns ------- signal_fft : ndarray Transformation of the original input signal. """ n = len(signal) t = 1.0 / sampling_rate time = range(n) # Time vector xf = np.linspace(0.0, 1.0 / (2.0 * t), n // 2) yf = np.fft.fft(signal) / n # FFT and normalize if plot: f, axarr = plt.subplots(2, 1, figsize=fig_size) axarr[0].plot(time, signal) axarr[0].set_xlim(min(time), max(time)) axarr[0].set_xlabel("Time Steps") axarr[0].set_ylabel("Amplitude") axarr[0].grid(show_grid) axarr[1].plot(xf, abs(yf[0 : n // 2]), "r") # Plot the spectrum axarr[1].set_xlabel("Freq (Hz)") axarr[1].set_ylabel("|Y(freq)|") axarr[1].grid(show_grid) f.subplots_adjust(hspace=0.5) plt.suptitle("Power Spectrum", size=16) plt.show() return yf
70296c900e8ad7342be3c6ee18ff8b34e481ac0e
3,646,908
def pattern_count(data, **params): """ Count occurrences of a given pattern. Args: data (list): values. params (kwargs): pattern (str or list): the pattern to be sought in data (obligatory) metric (str): 'identity' counts identical positions, 'euclid' calculates the Euclidean distance (L2 norm), 'taxi' calculates the taxicab (Manhattan) distance (L1 norm). 'sup' returns maximum distance between positions, 'inf' returns minimum distance between positions. Only 'identity' can be used with non-numerical data. radius (number): the similarity cutoff (non-negative) normalized (bool): whether the number of occurrences is to be divided by the maximum number of occurrences. (default:False) Returns the number of occurrences of the pattern in the data. Invokes internal function '_pattern_common', which raises: NameError when 'pattern' is not given, TypeError if 'pattern' is neither string nor list, ValueError if 'radius' is negative or unsupported distance method used. """ pattern, patlen, radius, metric = _pattern_common(**params) normalized = params['normalized'] if 'normalized' in params else False counts = 0 for pos in range(len(data) - patlen + 1): if _list_distance(data[pos:pos + patlen], pattern, metric) <= radius: counts += 1 return counts if not normalized \ else 1.0 * counts / (len(data) - patlen + 1)
0c943554b4c5b7739a6ca16aa739b3cd614ab79d
3,646,909
import os import logging import sys def read_directory(directory): """ Read file names from directory recursively Parameters ---------- directory : string directory/folder name where to read the file names from Returns --------- files : list of strings list of file names """ try: return glob2.glob(os.path.join(directory, '**' , '*.*')) except Exception(e): logging.error("[{}] : {}".format(sys._getframe().f_code.co_name,e))
a3e272b437c0bf4ea59cfe7afdc9ebab6718954c
3,646,910
import requests import signal def do(hostname): """ Performs a GET request. Parameters ---------- hostname : str Target request Return ------ The request results """ try: return requests.get(hostname, timeout=10) except TimeoutException: print("\033[1;31mRequest timeout: test aborted\n\033[1;m") return None except requests.ConnectionError: print("\033[1;31mServer not found: test aborted\n\033[1;m") return None finally: signal.alarm(0)
7e300e4be98beecad29e28594b76230e6c19382d
3,646,911
def getAssignmentReport(assignment): """ Produces an ABET assignment report (as a markdown-formatted string) for the given assignment (which is expected to be a codepost API object) by pulling all relevant data as well as source code files (and grader comments) for randomly selected A, B and C samples """ courseId = assignment.course course = codepost.course.retrieve(id=courseId) courseName = course.name coursePeriod = course.period assignmentName = assignment.name assignmentPts = assignment.points assignmentMean = assignment.mean assignmentMedian = assignment.median summary = f""" # {courseName} - {coursePeriod} ## {assignmentName} * Points: {assignmentPts} * Mean: {assignmentMean} * Median: {assignmentMedian}\n\n""" # find ideal A, B, C samples submissions = assignment.list_submissions() aSubmission = submissions[0] bSubmission = submissions[0] cSubmission = submissions[0] # we only expect 1 submission per student since submissions are via our # scripts, but in any case, find the 3 closest to A=max%, B = 85%, C = 75% for submission in submissions: if submission.grade > aSubmission.grade: aSubmission = submission if abs(submission.grade / assignmentPts - .85) < abs(bSubmission.grade / assignmentPts - .85): bSubmission = submission if abs(submission.grade / assignmentPts - .75) < abs(cSubmission.grade / assignmentPts - .75): cSubmission = submission aSummary, aDetail = submissionToMarkdown(aSubmission,"A",assignmentPts) bSummary, bDetail = submissionToMarkdown(bSubmission,"B",assignmentPts) cSummary, cDetail = submissionToMarkdown(cSubmission,"C",assignmentPts) return summary + aSummary + bSummary + cSummary + "\n\n" + aDetail + bDetail + cDetail
fd2a49c8fa8e3a15a878e06d29ec9598912034c6
3,646,912
def start_game(): """ Method to start :return: Choice selection for new game or load game """ maximize_console() print_title() print('Do you want to start a new game (enter 1) or resume an ongoing game (enter 2)?') choice = input('||> ') print() return choice
5780468f4239a8a519538a18feb12a0956dd4170
3,646,913
import os def modified_files(): """ Gets a list of modified files in the repo. :return: A list of absolute paths to all changed files in the repo """ repo_root_dir = repo_root() return [os.path.join(repo_root_dir, d.b_path) for d in get().head.commit.diff() if not (d.new_file or d.deleted_file)]
70e4aa10a754792782c3bdf76c11644730d7064d
3,646,914
import os def encode_data(dataset_path=DATASET_PATH): """Encodes the symbloc music in the dataset folder. :param dataset_path (str): Path to the dataset :return data, filenames (list): Encoded songs and their file names """ # encoded songs and their file names data = [] filenames = [] # loop through the dataset folder for dirpath, dirlist, filelist in os.walk(dataset_path): # process each file for this_file in filelist: # ensure extension is valid if os.path.splitext(this_file)[-1] not in EXTENSION: continue # parse the file filename = os.path.join(dirpath, this_file) try: score = converter.parse(filename) except: print("Warning: Failed to read \"%s\"" %filename) continue print("Parsing \"%s\"" %filename) # keep the first part (usually is the melody) of score score = score.parts[0].flat # transpose to C major/A minor score = transpose(score) # encoded song song = [] # process each note (chord) in the score for element in score.recurse(): if isinstance(element, note.Note): note_pitch = element.pitch.midi note_duration = element.quarterLength elif isinstance(element, note.Rest): note_pitch = 0 note_duration = element.quarterLength elif isinstance(element, chord.Chord): note_pitch = element.notes[-1].pitch.midi note_duration = element.quarterLength else: continue # ensure duration is valid if note_duration%0.25 == 0: # encode note note_step = int(note_duration/0.25) song += [str(note_pitch)] + ['-']*(note_step-1) else: # unacceptable duration found song = None print("Warning: Found an unacceptable duration when reading the file \"%s\"" %filename) break if song!=None: # save the encoded song and its name data.append(song) filenames.append(os.path.splitext(os.path.basename(filename))[0]) print("Successfully encoded %d songs" %(len(data))) return data, filenames
bd454a1b85906077f484f51a496668c13acc85d7
3,646,915
def get_inception_score(images, batch_size, splits=10): """ the function is to calculate the inception score of the generated images image is a numpy array with values should be in the range[0, 255] images 299x299x3 """ assert(type(images) == np.ndarray) inception_model = inception_v3 inception_model.eval() def get_softmax(x): x = inception_model(x) return tf.nn.softmax(x) n = len(images) // batch_size preds = np.zeros([len(images), 1000], dtype=np.float32) tfe.enable_egaer_execution() dataloader = tf.data.Dataset.from_tensor_slices(images) dataloader = data.batch(batch_size) for i, batch in enumerate(tfe.Iterator(dataloader), 0): batch_x = tf.Variable(batch) # images # softmax preds[i * batch_size:(i + 1) * batch_size] = get_softmax(batch_x) scores = [] # IS score for i in range(splits): part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :] kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0))) kl = np.mean(np.sum(kl, 1)) scores.append(np.exp(kl)) return np.mean(scores), np.std(scores)
9e14691a5c885b6b95e6ff9ecff014db0cca119e
3,646,916
def generate_audio_testing(raw_gain, raw_freq, raw_dampings, modal_fir, reverb, impulse_profile, gains, frequencies, dampings, modal_response, noise, acceleration_scale, revc, audio_sample_rate, example_secs, scratch='controls'): """Generate DiffImpact's estimate of impact sound based on current model variables.""" # Generate impulse --> impact profile # magnitude_envelopes, taus, prediction['stdevs'] # impc = impact.get_controls(mags, stdevs, taus, 0) # needs to be 2D? # impulse_profile = impact.get_signal(impc['magnitudes'], impc['taus']) print("impulse profile shape: ", impulse_profile.shape) # force profile zero_freq = np.zeros_like(raw_freq) zero_gain = np.random.rand(1,256) #np.zeros_like(raw_gain) zero_damp = np.zeros_like(raw_dampings) # Get modal response from raw freqs, gains, and dampings irc_scratch = modal_fir.get_controls(raw_gain, raw_freq, raw_dampings) ir_scratch = modal_fir.get_signal(irc_scratch['gains'], irc_scratch['frequencies'], irc_scratch['dampings']) # Get modal response from scaled (passed through get_controls) freqs, gains, dampings ir = modal_fir.get_signal(gains, frequencies, dampings) print("ir: ", ir) print("model's output modal response: ", modal_response) #print("ir_scratch: ", ir_scratch) # Convolve together for modal vibration sounds if scratch == 'raw': audio = ddsp.core.fft_convolve(impulse_profile, ir_scratch) elif scratch == 'controls' or scratch =='control': audio = ddsp.core.fft_convolve(impulse_profile, ir) else: audio = ddsp.core.fft_convolve(impulse_profile, modal_response) print("convolved shape: ", audio.shape) # Generate and add time constant noise # Note that in the context, clips.shape[0] is batch size (which is 1 for all testing here) # clips.shape[1] is the actual clip size (like 441000 for 10 seconds of 44100 audio sampling rate) #unfiltered_noise = tf.random.uniform((clips.shape[0], int(clips.shape[1] * sample_factor)), #minval=-1.0, maxval=1.0) #noise = ddsp.core.frequency_filter(unfiltered_noise, ddsp.core.exp_sigmoid(noise_magnitudes - 4.0), 257) audio += noise print("after adding noise: ", audio.shape) # Add acceleration sound audio += impulse_profile * acceleration_scale print("after acceleration sound: ", audio.shape) # Add reverb # revc = reverb.get_controls(audio, reverb_gains, reverb_decay) audio = reverb.get_signal(audio, revc)#revc['ir']) print("after reverb: ", audio.shape) # Downsample from internal sampling rate to original recording sampling rate # audio = ddsp.core.resample(audio, clips.shape[1], 'linear') # Note that the resample function will return shape [n_timesteps], which is the second parameter print("audio sample rate: ", audio_sample_rate) audio = ddsp.core.resample(audio, int(audio_sample_rate)*example_secs, 'linear') return audio
1b7932c165c9615096b79b5d0c19859bc6dd113d
3,646,917
def dice_coef_multilabel(y_true, y_pred, numLabels=4, channel='channel_first'): """ calculate channel-wise dice similarity coefficient :param y_true: the ground truth :param y_pred: the prediction :param numLabels: the number of classes :param channel: 'channel_first' or 'channel_last' :return: the dice score """ assert channel=='channel_first' or channel=='channel_last', r"channel has to be either 'channel_first' or 'channel_last'" dice = 0 if channel == 'channel_first': y_true = np.moveaxis(y_true, 1, -1) y_pred = np.moveaxis(y_pred, 1, -1) for index in range(1, numLabels): temp = dice_coef(y_true[..., index], y_pred[..., index]) dice += temp dice = dice / (numLabels - 1) return dice
16af1961d900add04f0f277335524ba1568feb12
3,646,918
def gaussian(sigma, fs, t=None): """ return a gaussian smoothing filter Args: sigma: standard deviation of a Gaussian envelope fs: sampling frequency of input signals t: time scale Return: a Gaussian filter and corresponding time scale """ if t is None: t = np.linspace(-sigma*4.0, sigma*4.0, int(sigma*8.0*fs)) gss = np.exp(-0.5 * (t ** 2.0) / sigma ** 2.0) gss /= np.sum(gss) return gss, t
aba5d419bb22cd0bfe0a702346dd77735b7f0d4c
3,646,919
def score_sent(sent): """Returns a score btw -1 and 1""" sent = [e.lower() for e in sent if e.isalnum()] total = len(sent) pos = len([e for e in sent if e in positive_wds_with_negation]) neg = len([e for e in sent if e in negative_wds_with_negation]) if total > 0: return (pos - neg) / total else: return 0
cc70d035e932513ae27743bbca66ae8d870fcc91
3,646,920
import torch def flipud(tensor): """ Flips a given tensor along the first dimension (up to down) Parameters ---------- tensor a tensor at least two-dimensional Returns ------- Tensor the flipped tensor """ return torch.flip(tensor, dims=[0])
b0fd62172b0055d9539b554a8c967c058e46b397
3,646,921
def connect(): """Function to connect to database on Amazon Web Services""" try: engine = create_engine('mysql+mysqlconnector://dublinbikesadmin:dublinbikes2018@dublinbikes.cglcinwmtg3w.eu-west-1.rds.amazonaws.com/dublinbikes') port=3306 connection = engine.connect() Session.configure(bind=engine) return engine #https://campus.datacamp.com/courses/introduction-to-relational-databases-in-python/advanced-sqlalchemy-queries?ex=2#skiponboarding except Exception as err: print ("An error occurred when connecting to the database: ", err) #https://dev.mysql.com/doc/connector-python/en/connector-python-api-errors-error.html
81da870305a853b621f374b521bb680e435d852b
3,646,922
def get_file_type(filepath): """Returns the extension of a given filepath or url.""" return filepath.split(".")[-1]
070a1b22508eef7ff6e6778498ba764c1858cccb
3,646,923
def calcB1grad(B2grad,W2,A2): """ Calculates the gradient of the cost with respect to B1 using the chain rule INPUT: B2grad, [layer3Len,1] ; W2, [layer2Len, layer3Len] ; A2, [layer2len, 1] OUTPUT: B1grad, [layer2Len, 1] """ temp1 = np.dot(W2,B2grad) #layer2Len * 1 vector sigmGradient = sigmoidGradient(A2) #layer2len * 1 vector B1grad = np.multiply(sigmGradient,temp1) return B1grad
e214f79be1377b4fc0f36690accf6072fee27884
3,646,924
def plot_3d(x, y, z, title, labels): """ Returns a matplotlib figure containing the 3D T-SNE plot. Args: x, y, z: arrays title: string with name of the plot labels: list of strings with label names: [x, y, z] """ plt.rcParams.update({'font.size': 30, 'legend.fontsize': 20}) plt.rc('font', size=30) plt.rc('axes', titlesize=35) labelpad = 30 figure = plt.figure(figsize=(12,12)) ax = figure.add_subplot(projection='3d') ax.scatter(x, y, z) ax.set_title(title) ax.set_xlabel(labels[0], labelpad=labelpad) ax.set_ylabel(labels[1], labelpad=labelpad) ax.set_zlabel(labels[2], labelpad=labelpad) plt.tight_layout() return figure
624a62f9dc941d6b7cfed06e10250fae8c8defa9
3,646,925
def is_rating_col_name(col:str)->bool: """ Checks to see if the name matches the naming convention for a rating column of data, i.e. A wrt B :param col: The name of the column :return: T/F """ if col is None: return False elif isinstance(col, (float, int)) and np.isnan(col): return False elif is_pw_col_name(col): return False else: return __RATING_COL_REGEX.search(col) is not None
57802e888f5a75cdc521a08115ad3b74a56da43d
3,646,926
def _make_buildifier_command(): """Returns a list starting with the buildifier executable, followed by any required default arguments.""" return [ find_data(_BUILDIFIER), "-add_tables={}".format(find_data(_TABLES))]
ab480ff1bc7b21685a4dd95bbc12ae5ee223bdc0
3,646,927
from typing import Union def infer_path_type(path: str) -> Union[XPath, JSONPath]: """ Infers the type of a path (XPath or JSONPath) based on its syntax. It performs some basic sanity checks to differentiate a JSONPath from an XPath. :param path: A valid XPath or JSONPath string. :return: An instance of JSONPath or XPath """ if not path: raise ValueError("No path given") if path[0] in ['$', '@']: return JSONPath(path) else: if path[0] in ['.', '/']: return XPath(path) else: raise ValueError("Couldn't identify the path type for {}".format(path))
abeed8003b05dd5b66ada1367d0a5acf39102d60
3,646,928
def get_proximity_angles(): """Get the angles used for the proximity sensors.""" angles = [] # Left-side of the agent angles.append(3 * pi / 4) # 135° (counter-clockwise) for i in range(5): # 90° until 10° with hops of 20° (total of 5 sensors) angles.append(pi / 2 - i * pi / 9) # Center angles.append(0) # 0° # Right-side of the agent for i in range(5): # -10° until -90° with hops of 20° (total of 5 sensors) angles.append(-pi / 18 - i * pi / 9) angles.append(-3 * pi / 4) # -135° (clockwise) return angles
29c093d1aef0d10d24968af8bee06e6d050e9119
3,646,929
def delete(request, scenario_id): """ Delete the scenario """ # Retrieve the scenario session = SessionMaker() scenario = session.query(ManagementScenario).filter(ManagementScenario.id == scenario_id).one() # Delete the current scenario session.delete(scenario) session.commit() return redirect('parleys_creek_management:jobs')
4d9c7090d66f8cd3bd055c6f383870b4648a3828
3,646,930
import os def get_python_list(file_path): """ Find all the .py files in the directory and append them to a raw_files list. :params: file_path = the path to the folder where the to-be read folders are. :returns: raw_files : list of all files ending with '.py' in the read folder. """ python_files = [] for file in os.listdir(file_path): if file.endswith(".py"): python_files.append(file) print('\nThese are all the .py files inside the folder: \n') for i in python_files: print(i) return python_files
b771814b86c5405d5810694ad58b7a8fe80b1885
3,646,931
def angle(p1, p2, p3): """Returns an angle from a series of 3 points (point #2 is centroid). Angle is returned in degrees. Parameters ---------- p1,p2,p3 : numpy arrays, shape = [n_points, n_dimensions] Triplets of points in n-dimensional space, aligned in rows. Returns ------- angles : numpy array, shape = [n_points] Series of angles in degrees """ v1 = p1 - p2 v2 = p3 - p2 return angle_2v(v1, v2)
3e57121a20f18f2ee5728eeb1ea2ffb39500db40
3,646,932
def transformer_parsing_base(): """HParams for parsing on WSJ only.""" hparams = transformer_base() hparams.attention_dropout = 0.2 hparams.layer_prepostprocess_dropout = 0.2 hparams.max_length = 512 hparams.learning_rate_warmup_steps = 16000 hparams.hidden_size = 1024 hparams.learning_rate = 0.05 hparams.shared_embedding_and_softmax_weights = False return hparams
f86b3fe446866ff3de51f02278c2d2c9d7f1b126
3,646,933
def split_protocol(urlpath): """Return protocol, path pair""" urlpath = stringify_path(urlpath) if "://" in urlpath: protocol, path = urlpath.split("://", 1) if len(protocol) > 1: # excludes Windows paths return protocol, path return None, urlpath
e9b006d976847daa9a94eb46a9a1c2f53cd9800f
3,646,934
import pprint def createParPythonMapJob(info): """ Create map job json for IGRA matchup. Example: job = { 'type': 'test_map_parpython', 'params': { 'year': 2010, 'month': 7 }, 'localize_urls': [ ] } """ print("Info:") pprint(info, indent=2) # build parrams job = { 'type': 'test_map_parpython', 'name': 'test_map_parpython-%04d-%02d' % (int(info['year']), int(info['month'])), 'params': info, 'localize_urls': [] } print("Job:") pprint(job, indent=2) return job
142afc4b4be0d77b4921e57c358494dbfc43c6ab
3,646,935
def calc_lampam_from_delta_lp_matrix(stack, constraints, delta_lampams): """ returns the lamination parameters of a laminate INPUTS - ss: laminate stacking sequences - constraints: design and manufacturing guidelines - delta_lampams: ply partial lamination parameters """ lampam = np.zeros((12,), float) for ind_ply in range(delta_lampams.shape[0]): lampam += delta_lampams[ ind_ply, constraints.ind_angles_dict[stack[ind_ply]]] return lampam
a1d179f441368f2ebef8dc4be4e2a364d41cf84e
3,646,936
def perp(i): """Calculates the perpetuity to present worth factor. :param i: The interest rate. :return: The calculated factor. """ return 1 / i
2fe59a039ac5ecb295eb6c443143b15e41fdfddb
3,646,937
def chebyshev(x, y): """chebyshev distance. Args: x: pd.Series, sample feature value. y: pd.Series, sample feature value. Returns: chebyshev distance value. """ return np.max(x-y)
876f0d441c48a7ab4a89b1826eb76459426ad9a3
3,646,938
def soda_url_helper(*, build_url, config, year, **_): """ This helper function uses the "build_url" input from flowbyactivity.py, which is a base url for data imports that requires parts of the url text string to be replaced with info specific to the data year. This function does not parse the data, only modifies the urls from which data is obtained. :param build_url: string, base url :param config: dictionary, items in FBA method yaml :param year: year :return: list, urls to call, concat, parse, format into Flow-By-Activity format """ url = build_url url = url.replace('__format__', str(config['formats'][year])) url = url.replace('__url_text__', str(config['url_texts'][year])) return [url]
b4e0f8c781a966d0291dad7d897eba02dc7a4e09
3,646,939
import argparse def generate_base_provider_parser(): """Function that generates the base provider to be used by all dns providers.""" parser = argparse.ArgumentParser(add_help=False) parser.add_argument('action', help='specify the action to take', default='list', choices=['create', 'list', 'update', 'delete']) parser.add_argument( 'domain', help='specify the domain, supports subdomains as well') parser.add_argument('type', help='specify the entry type', default='TXT', choices=['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SOA', 'TXT', 'SRV', 'LOC']) parser.add_argument('--name', help='specify the record name') parser.add_argument('--content', help='specify the record content') parser.add_argument('--ttl', type=int, help='specify the record time-to-live') parser.add_argument('--priority', help='specify the record priority') parser.add_argument( '--identifier', help='specify the record for update or delete actions') parser.add_argument('--log_level', help='specify the log level', default='ERROR', choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'NOTSET']) parser.add_argument('--output', help=('specify the type of output: by default a formatted table (TABLE), ' 'a formatted table without header (TABLE-NO-HEADER), ' 'a JSON string (JSON) or no output (QUIET)'), default='TABLE', choices=['TABLE', 'TABLE-NO-HEADER', 'JSON', 'QUIET']) return parser
9f677188835a8bddaefcdf2ab173d0392285e630
3,646,940
from typing import Optional from datetime import datetime from typing import List def search( submitted_before: Optional[datetime] = None, submitted_after: Optional[datetime] = None, awaiting_service: Optional[str] = None, url:Optional[str] = None, token:Optional[str] = None, quiet:bool = False ) -> List[dict]: """Query metadatasets according to search critera. If datetimes are specified without a timezone, they are assumed to be local time. Note that specifying a timezone is only possible programmatically.""" config = get_config(url, token) # Converting the datetimes to UTC is done only to have any timezone # information at all. datetime objects without a timezone will be rejected # by the API as invalid ISO strings. In principle they can be submitted in # an arbitrary timezone. Applying `astimezone(utc)` to datetime objects # without a timezone annotation assumes local time. args = { 'submitted_before': _add_timezone(submitted_before), 'submitted_after': _add_timezone(submitted_after), 'awaiting_service': awaiting_service } args = { k: v for k, v in args.items() if v is not None } info("Sending query to server", quiet) with ApiClient(config) as api_client: api_instance = metadata_api.MetadataApi(api_client) api_response = api_instance.get_meta_data_sets(**args) res = [elem.to_dict() for elem in api_response] return result(res, quiet)
a2ce0d86fde2792365f27cf386e7c9ef0d4a0fa1
3,646,941
from typing import List from typing import Pattern import re from typing import Optional from typing import Match def _target_js_variable_is_used( *, var_name: str, exp_lines: List[str]) -> bool: """ Get a boolean value whether target variable is used in js expression or not. Parameters ---------- var_name : str Target variable name. exp_lines : list of str js expression lines. Returns ------- result : bool If target variable is used in js expression, True will be returned. """ var_pattern: Pattern = re.compile(pattern=rf'var ({var_name}) = ') used_pattern_1: Pattern = re.compile( pattern=rf'{var_name}[ ;\)\.}},\]\[]') used_pattern_2: Pattern = re.compile( pattern=rf'{var_name}$') for line in exp_lines: if '//' in line: continue if var_name not in line: continue match: Optional[Match] = var_pattern.search(string=line) if match is not None: continue match = used_pattern_1.search(string=line) if match is not None: return True match = used_pattern_2.search(string=line) if match is not None: return True return False
be07cb1628676717b2a02723ae7c01a7ba7364d6
3,646,942
def rnn_temporal(x, h0, Wx, Wh, b): """ Run a vanilla RNN forward on an entire sequence of data. We assume an input sequence composed of T vectors, each of dimension D. The RNN uses a hidden size of H, and we work over a minibatch containing N sequences. After running the RNN forward, we return the hidden states for all timesteps. Inputs: - x: Input data for the entire timeseries, of shape (N, T, D). - h0: Initial hidden state, of shape (N, H) - Wx: Weight matrix for input-to-hidden connections, of shape (D, H) - Wh: Weight matrix for hidden-to-hidden connections, of shape (H, H) - b: Biases of shape (H,) Returns a tuple of: - h: Hidden states for the entire timeseries, of shape (N, T, H). """ N, T, _ = x.shape H = h0.shape[1] h = np.zeros([N, 0, H]) for t in range(T): h_step = rnn_step(x[:, t, :], h0 if t == 0 else h[:, t - 1, :], Wx, Wh, b).reshape(N, 1, H) h = np.append(h, h_step, axis=1) return h
794fed02ef96c97d9b4ccb6a7278fc72b81eea33
3,646,943
from typing import Union def rejection_fixed_lag_stitch(fixed_particle: np.ndarray, last_edge_fixed: np.ndarray, last_edge_fixed_length: float, new_particles: MMParticles, adjusted_weights: np.ndarray, stitch_time_interval: float, min_resample_time_indices: Union[list, np.ndarray], dist_prior_bound: float, mm_model: MapMatchingModel, max_rejections: int, break_on_zero: bool = False) -> Union[np.ndarray, None, int]: """ Attempt up to max_rejections of rejection sampling to stitch a single fixed particle :param fixed_particle: trajectory prior to stitching time :param last_edge_fixed: row of last fixed particle :param last_edge_fixed_length: length of last fixed edge (so don't have to call get_geometry) :param new_particles: particles proposed to stitching :param adjusted_weights: non-interacting stitching weights :param stitch_time_interval: time between stitching observations :param min_resample_time_indices: indices for row of min_resample_time in new_particles :param dist_prior_bound: bound on distance transition density (given positive if break_on_zero) :param mm_model: MapMatchingModel :param max_rejections: number of rejections to attempt, if none succeed return None :param break_on_zero: whether to return 0 if new_stitching_distance=0 :return: stitched particle """ n = len(new_particles) for reject_ind in range(max_rejections): new_index = np.random.choice(n, 1, p=adjusted_weights)[0] new_particle = new_particles[new_index].copy() # Reject if new_particle starts from different edge if not np.array_equal(last_edge_fixed[1:4], new_particle[0, 1:4]): continue # Reject if new_particle doesn't overtake fixed_particles elif np.array_equal(last_edge_fixed[1:4], new_particle[1, 1:4]) and \ new_particle[1, 4] < last_edge_fixed[4]: continue # Calculate stitching distance first_distance_j_to_k = (new_particle[1, 4] - last_edge_fixed[4]) * last_edge_fixed_length first_distance_k = new_particle[1, -1] change_dist = np.round(first_distance_j_to_k - first_distance_k, 5) new_particle[1:(min_resample_time_indices[new_index] + 1), -1] += change_dist new_stitching_distance = new_particle[min_resample_time_indices[new_index], -1] if break_on_zero and new_stitching_distance < 1e-5: return 0 # Evaluate distance prior new_stitching_distance_prior = mm_model.distance_prior_evaluate(new_stitching_distance, stitch_time_interval) new_stitching_deviation_prior = mm_model.deviation_prior_evaluate(fixed_particle[-1, 5:7], new_particle[None, min_resample_time_indices[new_index], 5:7], new_stitching_distance) accept_prob = new_stitching_distance_prior * new_stitching_deviation_prior / dist_prior_bound if accept_prob > (1 - 1e-5) or np.random.uniform() < accept_prob: out_particle = np.append(fixed_particle, new_particle[1:], axis=0) return out_particle return None
aeec4fc1956c7a63f15812988a38d54b63234de4
3,646,944
def zip_equalize_lists(a, b): """ A zip implementation which will not stop when reaching the end of the smallest list, but will append None's to the smaller list to fill the gap """ a = list(a) b = list(b) a_len = len(a) b_len = len(b) diff = abs(a_len - b_len) if a_len < b_len: for _ in range(diff): a.append(None) if b_len < a_len: for _ in range(diff): b.append(None) return zip(a, b)
1cf5b9cadf4b75f6dab6c42578583585ea7abdfc
3,646,945
def cover_line(line): """ This function takes a string containing a line that should potentially have an execution count and returns a version of that line that does have an execution count if deemed appropriate by the rules in validate_line(). Basically, if there is currently no number where there should be an execution count (indicating this line did not make it into the compiled binary), a zero is placed there to indicate that this line was executed 0 times. Test coverage viewers will interpret this to mean that the line could potentially have been executed. """ first_bar = line.find("|") second_bar = line.find("|", first_bar+1) if validate_line(line, second_bar) and \ line[second_bar-1].strip() == "": # If this line could have been executed but wasn't (no # number between first and second bars), put a zero # before the second bar, indicating that it was # executed zero times. Test coverage viewers will interpret # this as meaning the line should have been covered # but wasn't. return "".join([line[:second_bar-1], "0", line[second_bar:]]) # There's already an execution count - this # template must have been instantiated return line
612cd295b78ce9a0d960b902027827c03733f609
3,646,946
def find_period(samples_second): """ # Find Period Args: samples_second (int): number of samples per second Returns: float: samples per period divided by samples per second """ samples_period = 4 return samples_period / samples_second
c4a53e1d16be9e0724275034459639183d01eeb3
3,646,947
def sqrt(x: int) -> int: """ Babylonian Square root implementation """ z = (x + 1) // 2 y = x while z < y: y = z z = ( (x // z) + z) // 2 return y
1a91d35e5783a4984f2aca5a9b2a164296803317
3,646,948
def is_consecutive_list(list_of_integers): """ # ======================================================================== IS CONSECUTIVE LIST PURPOSE ------- Reports if elments in a list increase in a consecutive order. INPUT ----- [[List]] [list_of_integers] - A list of integers. Return ------ [BOOLEAN] - Returns true is a list is consecutive or false if the same number appears consecutively. # ======================================================================== """ for i in range(1, len(list_of_integers)): if list_of_integers[i] - list_of_integers[i - 1] != 1: return False return True
3b165eb8d50cc9e0f3a13b6e4d47b7a8155736b9
3,646,949
def circles(x, y, s, c='b', vmin=None, vmax=None, **kwargs): """ See https://gist.github.com/syrte/592a062c562cd2a98a83 Make a scatter plot of circles. Similar to plt.scatter, but the size of circles are in data scale. Parameters ---------- x, y : scalar or array_like, shape (n, ) Input data s : scalar or array_like, shape (n, ) Radius of circles. c : color or sequence of color, optional, default : 'b' `c` can be a single color format string, or a sequence of color specifications of length `N`, or a sequence of `N` numbers to be mapped to colors using the `cmap` and `norm` specified via kwargs. Note that `c` should not be a single numeric RGB or RGBA sequence because that is indistinguishable from an array of values to be colormapped. (If you insist, use `color` instead.) `c` can be a 2-D array in which the rows are RGB or RGBA, however. vmin, vmax : scalar, optional, default: None `vmin` and `vmax` are used in conjunction with `norm` to normalize luminance data. If either are `None`, the min and max of the color array is used. kwargs : `~matplotlib.collections.Collection` properties Eg. alpha, edgecolor(ec), facecolor(fc), linewidth(lw), linestyle(ls), norm, cmap, transform, etc. Returns ------- paths : `~matplotlib.collections.PathCollection` Examples -------- a = np.arange(11) circles(a, a, s=a*0.2, c=a, alpha=0.5, ec='none') plt.colorbar() License -------- This code is under [The BSD 3-Clause License] (http://opensource.org/licenses/BSD-3-Clause) """ if np.isscalar(c): kwargs.setdefault('color', c) c = None if 'fc' in kwargs: kwargs.setdefault('facecolor', kwargs.pop('fc')) if 'ec' in kwargs: kwargs.setdefault('edgecolor', kwargs.pop('ec')) if 'ls' in kwargs: kwargs.setdefault('linestyle', kwargs.pop('ls')) if 'lw' in kwargs: kwargs.setdefault('linewidth', kwargs.pop('lw')) # You can set `facecolor` with an array for each patch, # while you can only set `facecolors` with a value for all. zipped = np.broadcast(x, y, s) patches = [Circle((x_, y_), s_) for x_, y_, s_ in zipped] collection = PatchCollection(patches, **kwargs) if c is not None: c = np.broadcast_to(c, zipped.shape).ravel() collection.set_array(c) collection.set_clim(vmin, vmax) ax = plt.gca() ax.add_collection(collection) ax.autoscale_view() plt.draw_if_interactive() if c is not None: plt.sci(collection) return collection
cb3b2c4316ec573aa29cf5d500f50fbcd64f47b5
3,646,950
from datetime import datetime def generate_agency_tracking_id(): """ Generate an agency tracking ID for the transaction that has some random component. I include the date in here too, in case that's useful. (The current non-random tracking id has the date in it.) @todo - make this more random""" random = str(uuid4()).replace('-', '') today = datetime.now().strftime("%m%d") return 'PCOCI%s%s' % (today, random[0:6])
d54e74c392bed6f4b3d8ec7409a6d7709a3bc8f2
3,646,951
import pathlib def get_enabled_gems(cmake_file: pathlib.Path) -> set: """ Gets a list of enabled gems from the cmake file :param cmake_file: path to the cmake file :return: set of gem targets found """ cmake_file = pathlib.Path(cmake_file).resolve() if not cmake_file.is_file(): logger.error(f'Failed to locate cmake file {cmake_file}') return set() gem_target_set = set() with cmake_file.open('r') as s: in_gem_list = False for line in s: line = line.strip() if line.startswith(enable_gem_start_marker): # Set the flag to indicate that we are in the ENABLED_GEMS variable in_gem_list = True # Skip pass the 'set(ENABLED_GEMS' marker just in case their are gems declared on the same line line = line[len(enable_gem_start_marker):] if in_gem_list: # Since we are inside the ENABLED_GEMS variable determine if the line has the end_marker of ')' if line.endswith(enable_gem_end_marker): # Strip away the line end marker line = line[:-len(enable_gem_end_marker)] # Set the flag to indicate that we are no longer in the ENABLED_GEMS variable after this line in_gem_list = False # Split the rest of the line on whitespace just in case there are multiple gems in a line gem_name_list = list(map(lambda gem_name: gem_name.strip('"'), line.split())) gem_target_set.update(gem_name_list) return gem_target_set
0b4c8c68230b075d2c27d72b1290217864fc6888
3,646,952
from operator import add from operator import mul def celeryAdd3(a,b): """This is for a specific Celery workflow f = (a+b) * (a+b) We'll use chord, group and chain""" if request.method == 'GET': # When a worker receives an expired task it will mark the task as REVOKED res = (group(add.s(a,b), add.s(a,b)) | mul.s()).apply_async(expires=60) #https://docs.celeryproject.org/en/stable/userguide/calling.html#expiration _ret = """ <p>result: 200</p> <p>msg: "Added value is calculating at task ID: {0}"</p> <p>htmlmsg: <a href="/api/v1_0/status/{0}">{0}</a></p>""".format(res.id) # return jsonify(_ret) return _ret
84599389542663207ff57a07e3b58cecc9b6427b
3,646,953
def create_unmerge_cells_request(sheet_id, start, end): """ Create v4 API request to unmerge rows and/or columns for a given worksheet. """ start = get_cell_as_tuple(start) end = get_cell_as_tuple(end) return { "unmergeCells": { "range": { "sheetId": sheet_id, "startRowIndex": start[ROW] - 1, "endRowIndex": end[ROW], "startColumnIndex": start[COL] - 1, "endColumnIndex": end[COL], } } }
3fd560a82522738099bacd3f606bbea948de7226
3,646,954
def list_to_str(slist, seperator=None): """Convert list of any type to string seperated by seperator.""" if not seperator: seperator = ',' if not slist: return "" slist = squash_int_range(slist) return seperator.join([str(e) for e in slist])
64d20b744a7b465e58e50caf60e0e1aaf9b0c2e7
3,646,955
def log_web_error(msg): """Take a screenshot of a web browser based error Use this function to capture a screen shot of the web browser when using Python's `assert` keyword to perform assertions. """ screenshot = selene.helpers.take_screenshot(selene.browser.driver(),) msg = '''{original_msg} screenshot: file://{screenshot}'''.format(original_msg=msg, screenshot=screenshot) return msg
8f5e9f6c586e6739d6581986c66689881d812316
3,646,956
def parent_id_name_and_quotes_for_table(sqltable): """ Return tuple with 2 items (nameof_field_of_parent_id, Boolean) True - if field data type id string and must be quoted), False if else """ id_name = None quotes = False for colname, sqlcol in sqltable.sql_columns.iteritems(): # root table if not sqltable.root.parent and \ sqlcol.node == sqltable.root.get_id_node(): id_name = colname if sqlcol.typo == "STRING": quotes = True break else: # nested table if sqlcol.node.reference: id_name = colname if sqlcol.typo == "STRING": quotes = True break return (id_name, quotes)
6f3319dc6ae0ea70af5d2c9eda90fb1a9fb9daac
3,646,957
def client(): """Returns a Flask client for the app.""" return app.test_client()
40d3cf2c330d2f82b6ae7514e833ef5d1bcb9594
3,646,958
def _get_horizons_ephem( id, start: Time, stop: Time, step: str = "12H", id_type: str = "smallbody", location: str = "@TESS", quantities: str = "2,3,9,19,20,43", ): """Returns JPL Horizons ephemeris. This is simple cached wrapper around astroquery's Horizons.ephemerides. """ epochs = {"start": start.iso, "stop": stop.iso, "step": step} log.debug( f"Horizons query parameters:\n\tid={id}\n\tlocation={location}\n\tepochs={epochs}" ) t = Horizons(id=id, id_type=id_type, location=location, epochs=epochs) result = t.ephemerides(quantities=quantities) log.debug(f"Received {len(result)} ephemeris results") return result
f57fae233cb16f365b1db2a78aa3de29df479aea
3,646,959
from enthought.mayavi import version from .maps_3d import plot_map_3d, m2screenshot from enthought.tvtk.api import tvtk from enthought.mayavi import mlab from enthought.mayavi.core.registry import registry def plot_map(map, affine, cut_coords=None, anat=None, anat_affine=None, figure=None, axes=None, title=None, threshold=None, annotate=True, draw_cross=True, do3d=False, **kwargs): """ Plot three cuts of a given activation map (Frontal, Axial, and Lateral) Parameters ---------- map : 3D ndarray The activation map, as a 3D image. affine : 4x4 ndarray The affine matrix going from image voxel space to MNI space. cut_coords: 3-tuple of floats or None The MNI coordinates of the point where the cut is performed, in MNI coordinates and order. If None is given, the cut point is calculated automaticaly. anat : 3D ndarray or False, optional The anatomical image to be used as a background. If None, the MNI152 T1 1mm template is used. If False, no anat is displayed. anat_affine : 4x4 ndarray, optional The affine matrix going from the anatomical image voxel space to MNI space. This parameter is not used when the default anatomical is used, but it is compulsory when using an explicite anatomical image. figure : integer or matplotlib figure, optional Matplotlib figure used or its number. If None is given, a new figure is created. axes : matplotlib axes or 4 tuple of float: (xmin, xmax, ymin, ymin), optional The axes, or the coordinates, in matplotlib figure space, of the axes used to display the plot. If None, the complete figure is used. title : string, optional The title dispayed on the figure. threshold : a number, None, or 'auto' If None is given, the maps are not thresholded. If a number is given, it is used to threshold the maps: values below the threshold are plotted as transparent. If auto is given, the threshold is determined magically by analysis of the map. annotate: boolean, optional If annotate is True, positions and left/right annotation are added to the plot. draw_cross: boolean, optional If draw_cross is True, a cross is drawn on the plot to indicate the cut plosition. do3d: {True, False or 'interactive'}, optional If True, Mayavi is used to plot a 3D view of the map in addition to the slicing. If 'interactive', the 3D visualization is displayed in an additional interactive window. kwargs: extra keyword arguments, optional Extra keyword arguments passed to pylab.imshow Notes ----- Arrays should be passed in numpy convention: (x, y, z) ordered. Use masked arrays to create transparency: import numpy as np map = np.ma.masked_less(map, 0.5) plot_map(map, affine) """ map, affine = _xyz_order(map, affine) nan_mask = np.isnan(np.asarray(map)) if np.any(nan_mask): map = map.copy() map[nan_mask] = 0 # Deal with automatic settings of plot parameters if threshold == 'auto': threshold = _fast_abs_percentile(map) if cut_coords is None: x_map, y_map, z_map = find_cut_coords(map, activation_threshold=threshold) cut_coords = coord_transform(x_map, y_map, z_map, affine) if threshold is not None: if threshold == 0: map = np.ma.masked_equal(map, 0, copy=False) else: map = np.ma.masked_inside(map, -threshold, threshold, copy=False) if do3d: try: if not int(version.version[0]) > 2: raise ImportError except ImportError: warnings.warn('Mayavi > 3.x not installed, plotting only 2D') do3d = False # Make sure that we have a figure if not isinstance(figure, Figure): if do3d: size = (10, 2.6) else: size = (6.6, 2.6) fig = pl.figure(figure, figsize=size, facecolor='w') else: fig = figure if isinstance(axes, Axes): assert axes.figure is figure, ("The axes passed are not " "in the figure") canonical_anat = False if anat is None: try: anat, anat_affine, vmax_anat = _AnatCache.get_anat() canonical_anat = True except OSError, e: anat = False warnings.warn(repr(e)) # Use Mayavi for the 3D plotting if do3d: version = tvtk.Version() offscreen = True if (version.vtk_major_version, version.vtk_minor_version) < (5, 2): offscreen = False if do3d == 'interactive': offscreen = False cmap = kwargs.get('cmap', pl.cm.cmap_d[pl.rcParams['image.cmap']]) # Computing vmin and vmax is costly in time, and is needed # later, so we compute them now, and store them for future # use vmin = kwargs.get('vmin', map.min()) kwargs['vmin'] = vmin vmax = kwargs.get('vmax', map.max()) kwargs['vmax'] = vmax plot_map_3d(np.asarray(map), affine, cut_coords=cut_coords, anat=anat, anat_affine=anat_affine, offscreen=offscreen, cmap=cmap, threshold=threshold, vmin=vmin, vmax=vmax) ax = fig.add_axes((0.001, 0, 0.29, 1)) ax.axis('off') m2screenshot(mpl_axes=ax) axes = (0.3, 0, .7, 1.) if offscreen: # Clean up, so that the offscreen engine doesn't become the # default mlab.clf() engine = mlab.get_engine() for key, value in registry.engines.iteritems(): if value is engine: registry.engines.pop(key) break if axes is None: axes = [0., 0., 1., 1.] if operator.isSequenceType(axes): axes = fig.add_axes(axes) axes.axis('off') ortho_slicer = OrthoSlicer(cut_coords, axes=axes) # Check that we should indeed plot an anat: we have one, and the # cut_coords are in its range x, y, z = cut_coords if (anat is not False and np.all( np.array(coord_transform(x, y, z, np.linalg.inv(anat_affine))) < anat.shape)): anat_kwargs = kwargs.copy() anat_kwargs['cmap'] = pl.cm.gray anat_kwargs.pop('alpha', 1.) if canonical_anat: # We special-case the 'canonical anat', as we don't need # to do a few transforms to it. anat_kwargs['vmin'] = 0 anat_kwargs['vmax'] = vmax_anat else: anat_kwargs.pop('vmin', None) anat_kwargs.pop('vmax', None) anat, anat_affine = _xyz_order(anat, anat_affine) ortho_slicer.plot_map(anat, anat_affine, **anat_kwargs) ortho_slicer.plot_map(map, affine, **kwargs) if annotate: ortho_slicer.annotate() if draw_cross: ortho_slicer.draw_cross(color='k') if title is not None and not title == '': ortho_slicer.title(title) return ortho_slicer
d7ef70bb98849532e94d7b975303cbd370fe8bbe
3,646,960
def get_mode(h5,songidx=0): """ Get mode from a HDF5 song file, by default the first song in it """ return h5.root.analysis.songs.cols.mode[songidx]
9a9eb7cfed2bc525a3b5d3c8cb251c7e170a589c
3,646,961
import json def read_label_schema(path): """ Reads json file and returns deserialized LabelSchema. """ with open(path, encoding="UTF-8") as read_file: serialized_label_schema = json.load(read_file) return LabelSchemaMapper().backward(serialized_label_schema)
8f15a6e63864c6f737f465abb3011193ce136db6
3,646,962
def Dump(root): """Return a string representing the contents of an object. This function works only if root.ValidateExports() would pass. Args: root: the object to dump. Returns: A big string containing lines of the format: Object.SubObject. Object.SubObject.ParameterName = %r """ h = Handle(root) out = [] for i in h.ListExports(recursive=True): if i.endswith('.'): out.append(' %s' % (i,)) else: out.append(' %s = %r' % (i, h.GetExport(i))) return '\n'.join(out)
7f6a9229f6b0c250a56324570fae249c0bf1d246
3,646,963
def clean_profit_data(profit_data): """清理权益全为0的垃圾结算日""" for i in list(range(len(profit_data)))[::-1]: profit = profit_data[i][1] == 0 closed = profit_data[i][2] == 0 hold = profit_data[i][3] == 0 if profit and closed and hold: profit_data.pop(i) return profit_data
d1b7fe9d747a1149f04747b1b3b1e6eba363c639
3,646,964
def convert_single_example(ex_index, example, max_word_length,max_sen_length, tokenizer): """Converts a single `InputExample` into a single `InputFeatures`.""" text_sen = example.text_sen.strip().split() text_pos=example.text_pos.strip().split() text_ps = example.text_ps.strip().split() # 这里就是一个元素【日期】 if example.text_label is None: text_label=["o"*len(text_sen)] else: text_label = example.text_label.strip().split() #这里训练集的时候和句子一样长,测试集的时候为一个【'o'】 assert len(text_sen)==len(text_pos) assert len(text_ps)==1 assert len(text_label)==len(text_sen) text_word=[] for word in text_sen: text_word.append(tokenizer.tokenize(word)) #这里是二位列表 # [ # [许,海,明], # [喜 ,欢] , # [玩] # ] text_sen=text_word # Account for [SEP] with "- 1" #注意这里是句子的长度 原来的 if len(text_sen) > max_sen_length - 1: text_sen = text_sen[0:(max_sen_length - 1)] text_pos = text_pos[0:(max_sen_length - 1)] text_label=text_label[0:(max_sen_length - 1)] text_sen.append(["[SEP]"]) text_pos.append(["[SEP]"]) text_label.append("o") len_sen=len(text_word) len_pos=len(text_pos) len_label=len(text_label) while len(text_sen) < max_sen_length: text_sen.append(["[PAD]"]) text_pos.append(["[PAD]"]) text_label.append("o") ''' 处理单词级别 ''' #处理每个单词 # Account for [CLS] ,[SEP] with "- 2" #注意这里是每个单词的长度 for i,wordlist in enumerate(text_sen): if len(wordlist) > max_word_length - 2: text_word[i]=wordlist[0:(max_word_length - 2)] # 为每一个单词添加 [CLS] [SEP] segment_ids=[] #这是一个二维列表 len_words=[] for i,wordlist in enumerate(text_sen): wordlist.insert(0,"[CLS]") wordlist.append("[SEP]") len_words.append(len(wordlist)) while len(wordlist) < max_word_length: wordlist.append(["PAD"]) segment_ids.append([0]*len(wordlist)) text_sen[i]=wordlist input_word_ids =[] for tokens in text_sen: input_word_ids.append(tokenizer.convert_tokens_to_ids(tokens)) #这是一个二维 input_pos_ids = tokenizer.convert_pos_to_ids(text_pos) #这是一个list input_ps_id = tokenizer.convert_ps_to_ids(text_ps)[0] #这就是一个数字 0到48 input_label_ids= tokenizer.convert_label_to_ids(text_label) # 制作一个input_sen_mask 这是句子级别的 input_sen_mask = [1] * len_sen input_pos_mask = [1] * len_pos input_label_mask = [1]*len_label # Zero-pad up to the sequence length. while len(input_sen_mask) < max_sen_length: input_sen_mask.append(0) input_pos_mask.append(0) input_label_mask.append(0) #为每一个单词制作一个mask input_words_mask=[] for word_len in len_words: word_mask = [1] * word_len while len(word_mask) < max_word_length: word_mask.append(0) input_words_mask.append(word_mask) assert len(input_word_ids) == max_sen_length #句子长度 assert len(input_pos_ids) == max_sen_length #句子长度 assert len(input_label_ids)==max_sen_length assert len(input_word_ids[0])==max_word_length assert len(input_pos_mask) == max_sen_length assert len(input_label_mask) == max_sen_length assert len(input_words_mask) == max_sen_length assert len(segment_ids) == max_sen_length if ex_index < 5: tf.logging.info("*** Example ***") tf.logging.info("guid: %s" % (example.guid)) tf.logging.info("句子单词: %s" % " ".join( ["["+" ".join(x)+"]" for x in text_word])) tf.logging.info("句子的ids: %s" % " ".join( ["[" + ",".join(list(map(str,word_ids)))+"]" for word_ids in input_word_ids])) tf.logging.info("句子的mask: %s" % " ".join([str(x) for x in input_sen_mask])) tf.logging.info("句子中每个单词的mask: %s" % " ".join( ["[" + ",".join(list(map(str,word_ids)))+"]" for word_ids in input_words_mask])) print("\n") tf.logging.info("input_pos_ids: %s" % " ".join( ["[" + ",".join(list(map(str, word_ids))) + "]" for word_ids in input_pos_ids])) tf.logging.info("input_pos_ids: %s" % " ".join( ["[" + ",".join(list(map(str, word_ids))) + "]" for word_ids in input_pos_ids])) tf.logging.info("input_label_ids: %s" % " ".join( ["[" + ",".join(list(map(str, word_ids))) + "]" for word_ids in input_label_ids])) tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) tf.logging.info("ps: %s (id = %d)" % (example.text_ps, input_ps_id)) feature = InputFeatures( input_words_ids=input_word_ids, input_pos_ids=input_pos_ids, input_ps_id=input_ps_id, input_label_ids=input_label_ids, input_sen_mask=input_sen_mask, input_words_mask=input_words_mask, input_pos_mask=input_pos_mask, input_label_mask=input_label_mask, segment_ids=segment_ids, is_real_example=True) return feature
faf13bd6db6a07a4546531cc968bad5443b95a12
3,646,965
def scrub_literal(value): """ Scrubs control characters from the incoming values to remove things like form feeds (\f) and line breaks (\n) which might cause problems with Jena. Data with these characters was found in the Backstage data. """ if not value: return None if isinstance(value, int): return value text = ''.join([c for c in value if not ascii.iscntrl(c)\ if not ascii.isctrl(c)]) text = text.replace('"', '') text = text.replace('\ufffd', '') text = clean_char(text) if isinstance(text, str): text = str(text, errors='replace') return text.strip()
e0e77bb0edecc810cc6fe051020936ca0ee9bf62
3,646,966
def mock_interface_settings_mismatch_protocol(mock_interface_settings, invalid_usb_device_protocol): """ Fixture that yields mock USB interface settings that is an unsupported device protocol. """ mock_interface_settings.getProtocol.return_value = invalid_usb_device_protocol return mock_interface_settings
61958439a2869d29532e50868efb39fe3da6c8b5
3,646,967
from typing import Mapping from typing import Any import shutil def run_eval(exp_name: str) -> Mapping[str, Any]: """ """ pred_log_dir = f"{_ROOT}/test_data/eval_tracking_dummy_logs_pred" gt_log_dir = f"{_ROOT}/test_data/eval_tracking_dummy_logs_gt" out_fpath = f"{_ROOT}/test_data/{exp_name}.txt" out_file = open(out_fpath, "w") eval_tracks( path_tracker_output_root=pred_log_dir, path_dataset_root=gt_log_dir, d_min=0, d_max=100, out_file=out_file, centroid_method="average", diffatt=None, category="VEHICLE", ) out_file.close() with open(out_fpath, "r") as f: result_lines = f.readlines() result_vals = result_lines[0].strip().split(" ") fn, num_frames, mota, motp_c, motp_o, motp_i, idf1 = result_vals[:7] most_track, most_lost, num_fp, num_miss, num_sw, num_frag = result_vals[7:] result_dict = { "filename": fn, "num_frames": int(num_frames), "mota": float(mota), "motp_c": float(motp_c), "motp_o": float(motp_o), "motp_i": float(motp_i), "idf1": float(idf1), "most_track": float(most_track), "most_lost": float(most_lost), "num_fp": int(num_fp), "num_miss": int(num_miss), "num_sw": int(num_sw), "num_frag": int(num_frag), } shutil.rmtree(pred_log_dir) shutil.rmtree(gt_log_dir) return result_dict
3eaead879a39a30d2524da037d82f4d9b68d17e7
3,646,968
import os def check_dir(path): """ 检查文件夹是否存在,存在返回True;不存在则创建,返回False """ if not os.path.exists(path): os.makedirs(path) return False return True
523cca18de4be3f2359151747a86ab5b5dfad633
3,646,969
def MakeLocalSsds(messages, ssd_configs): """Constructs the repeated local_ssd message objects.""" if ssd_configs is None: return [] local_ssds = [] disk_msg = ( messages. AllocationSpecificSKUAllocationAllocatedInstancePropertiesAllocatedDisk) interface_msg = disk_msg.InterfaceValueValuesEnum for s in ssd_configs: if s['interface'].upper() == 'NVME': interface = interface_msg.NVME else: interface = interface_msg.SCSI m = disk_msg( diskSizeGb=s['size'], interface=interface) local_ssds.append(m) return local_ssds
128e7a0358221fe3d93da4726924a7a783c65796
3,646,970
def valid_variant(s, is_coding=True): """ Returns True if s is a valid coding or noncoding variant, else False. Parameters ---------- s : `str` Variant string to validate. is_coding : `bool` Indicates if the variant string represents a coding variant. """ _validate_str(s) if s == WILD_TYPE_VARIANT: return True else: if is_coding: for mut in s.split(", "): match = re_coding.match(mut) if match is None: return False return True else: for mut in s.split(", "): match = re_noncoding.match(mut) if match is None: return False return True
8cb6c37bed303a052a8655dfb0832bfba638f0d6
3,646,971
def is_icon_address_valid(address: str) -> bool: """Check whether address is in icon address format or not :param address: (str) address string including prefix :return: (bool) """ try: if isinstance(address, str) and len(address) == 42: prefix, body = split_icon_address(address) if prefix == ICON_EOA_ADDRESS_PREFIX or \ prefix == ICON_CONTRACT_ADDRESS_PREFIX: return is_lowercase_hex_string(body) finally: pass return False
9666d22d04d568706356b7bafd0f202cb9178892
3,646,972
import base64 def _b64urldec(input: str) -> bytes: """ Deocde data from base64 urlsafe with stripped padding (as specified in the JWS RFC7515). """ # The input is stripped of padding '='. These are redundant when decoding (only relevant # for concatenated sequences of base64 encoded data) but the decoder checks for them. # Appending two (the maximum number) of padding '=' is the easiest way to ensure it won't choke # on too little padding. return base64.urlsafe_b64decode(input + '==')
fb535072b560b8565916ae8ec3f32c61c41115d8
3,646,973
def get_sns_topic_arn(aws_creds, ec2_region): """ Retrieves the sns topic arn for the account """ rgt_client = ResourceGroupsTaggingClient(aws_creds, ec2_region, logger) sns_topic_arn = rgt_client.get_sns_topic_arn(SNS_TOPIC_TAG_KEY, SNS_TOPIC_TAG_VALUE) if not sns_topic_arn: raise SnsTopicNotFound(f"Account doesn't have the SNS topic tagged with " f"key: '{SNS_TOPIC_TAG_KEY}' and value: '{SNS_TOPIC_TAG_VALUE}'") return sns_topic_arn
760caa77acf414eacf4bb177dd9252fe6578a505
3,646,974
import scipy def create_bspline_basis(knots, spline_order, dt=0.02): """Create B-spline basis.""" # The repeated boundary knots are appended as it is required for Cox de Boor # recursive algorithm. See https://math.stackexchange.com/questions/2817170/ # what-is-the-purpose-of-having-repeated-knots-in-a-b-spline and the link # https://en.wikipedia.org/wiki/De_Boor%27s_algorithm. knots = list(knots) knots = [knots[0]] * spline_order + knots + [knots[-1]] * spline_order num_basis = len(knots) - spline_order - 1 # Query token is in format: [knots, basis coefficients, spline order] # See https://docs.scipy.org/doc/scipy/reference/generated/ # scipy.interpolate.splev.html query_token = [0, 0, spline_order] query_token[0] = np.array(knots) time_line = np.linspace(knots[0], knots[-1], int(np.round(knots[-1]/dt)) + 1) # Add column for the constent term. basis_matrix = np.zeros((len(time_line), num_basis + 1)) basis_matrix[:, -1] = np.ones(len(time_line)) # Constant term. for basis_index in range(num_basis): basis_coefficients = np.zeros(num_basis) basis_coefficients[basis_index] = 1.0 query_token[1] = basis_coefficients.tolist() base = scipy.interpolate.splev(time_line, query_token) basis_matrix[:, basis_index] = base return basis_matrix, time_line
8256b282ffc5f19a9e00d59c689e57664600b2f4
3,646,975
def execute( device, commands, creds=None, incremental=None, with_errors=False, timeout=settings.DEFAULT_TIMEOUT, command_interval=0, force_cli=False ): """ Connect to a ``device`` and sequentially execute all the commands in the iterable ``commands``. Returns a Twisted ``Deferred`` object, whose callback will get a sequence of all the results after the connection is finished. ``commands`` is usually just a list, however, you can have also make it a generator, and have it and ``incremental`` share a closure to some state variables. This allows you to determine what commands to execute dynamically based on the results of previous commands. This implementation is experimental and it might be a better idea to have the ``incremental`` callback determine what command to execute next; it could then be a method of an object that keeps state. BEWARE: Your generator cannot block; you must immediately decide what next command to execute, if any. Any ``None`` in the command sequence will result in a ``None`` being placed in the output sequence, with no command issued to the device. If any command returns an error, the connection is dropped immediately and the errback will fire with the failed command. You may set ``with_errors`` to get the exception objects in the list instead. Connection failures will still fire the errback. `~trigger.exceptions.LoginTimeout` errors are always possible if the login process takes longer than expected and cannot be disabled. :param device: A `~trigger.netdevices.NetDevice` object :param commands: An iterable of commands to execute (without newlines). :param creds: (Optional) A 2-tuple of (username, password). If unset it will fetch it from ``.tacacsrc``. :param incremental: (Optional) A callback that will be called with an empty sequence upon connection and then called every time a result comes back from the device, with the list of all results. :param with_errors: (Optional) Return exceptions as results instead of raising them :param timeout: (Optional) Command response timeout in seconds. Set to ``None`` to disable. The default is in ``settings.DEFAULT_TIMEOUT``. `~trigger.exceptions.CommandTimeout` errors will result if a command seems to take longer to return than specified. :param command_interval: (Optional) Amount of time in seconds to wait between sending commands. :param force_cli: (Optional) Juniper-only: Force use of CLI instead of Junoscript. :returns: A Twisted ``Deferred`` object """ execute_func = _choose_execute(device, force_cli=force_cli) return execute_func(device=device, commands=commands, creds=creds, incremental=incremental, with_errors=with_errors, timeout=timeout, command_interval=command_interval)
ead00377f7c50d8bfdb6da39a7a1fe1820d9bcc7
3,646,976
import hashlib import sys def _attempt_get_hash_function(hash_name, hashlib_used=hashlib, sys_used=sys): """Wrapper used to try to initialize a hash function given. If successful, returns the name of the hash function back to the user. Otherwise returns None. """ try: _fetch_hash = getattr(hashlib_used, hash_name, None) if _fetch_hash is None: return None _attempt_init_of_python_3_9_hash_object(_fetch_hash, sys_used) return hash_name except ValueError: # if attempt_init_of_python_3_9 throws, this is typically due to FIPS being enabled # however, if we get to this point, the viable hash function check has either been # bypassed or otherwise failed to properly restrict the user to only the supported # functions. As such throw the UserError as an internal assertion-like error. return None
240d375ecf422c47be0366f34c5ccd9af44aa3e4
3,646,977
import requests def get_proxy(usage: str): """ 通过WEB API接口获取代理 :param usage: 目标站点,对应WEB_AVAILABLE_PROXIES的key :return: 可用代理或None """ url = API_SERVER + "/proxy?usage={}".format(usage) res = requests.get(url, timeout=5) try: if res.status_code == 200: return res.json().get("resource").get("proxy") else: return None except Exception: return None
2c836f0a7a4dce2e5442080ee93a1b32d10dac3d
3,646,978
def column_names_get(subject: str) -> tuple: """ Returns column names. """ if subject == c.SUBJECT.PLANETS: return c.HEADERS.PLANETS elif subject == c.SUBJECT.STARSHIPS: return c.HEADERS.STARSHIPS elif subject == c.SUBJECT.VEHICLES: return c.HEADERS.VEHICLES elif subject == c.SUBJECT.PEOPLE: return c.HEADERS.PEOPLE else: raise ValueError(f'There are no column names for the {subject} subject.')
a544574d5ac66e2ea7e045fa2bba37fe78df20f5
3,646,979
import os def is_morepath_template_auto_reload(): """ Returns True if auto reloading should be enabled. """ auto_reload = os.environ.get("MOREPATH_TEMPLATE_AUTO_RELOAD", "") return auto_reload.lower() in {"1", "yes", "true", "on"}
72839bf2ab0a70cefe4627e294777533d8b0087f
3,646,980
def relabel_prometheus(job_config): """Get some prometheus configuration labels.""" relabel = { 'path': '__metrics_path__', 'scheme': '__scheme__', } labels = { relabel[key]: value for key, value in job_config.items() if key in relabel.keys() } # parse __param_ parameters for param, value in job_config.get('params', {}).items(): labels['__param_%s' % (param,)] = value return labels
eb08f617903fe66f462a5922f8149fd8861556ad
3,646,981
import random def q_geography_capital(): """Ask what the capital of a given country is.""" question = QuestionGenerator() question.set_type('geography') # select country all_countries = facts.get_geography_countries_list() country = random.choice(all_countries) # formulate question question.ask(f"Was ist die Hauptstadt von {country}") # answer capital = facts.get_geography_capital(country) question.set_answer(capital) # other options other_capitals = [c for c in facts.get_geography_capitals_set() if c != capital] for c in other_capitals: question.add_wrong_option(c) return question.create(num_options=3)
63362aae1eb0e117da3ade0c9bff22edb5504689
3,646,982
from functools import reduce def binary_stabilizer_to_pauli_stabilizer(stabilizer_tableau): """ Convert a stabilizer tableau to a list of PauliTerms :param stabilizer_tableau: Stabilizer tableau to turn into pauli terms :return: a list of PauliTerms representing the tableau :rytpe: List of PauliTerms """ stabilizer_list = [] num_qubits = (stabilizer_tableau.shape[1] - 1) // 2 for nn in range(stabilizer_tableau.shape[0]): # iterate through the rows stabilizer_element = [] for ii in range(num_qubits): if stabilizer_tableau[nn, ii] == 1 and stabilizer_tableau[nn, ii + num_qubits] == 0: stabilizer_element.append(sX(ii)) elif stabilizer_tableau[nn, ii] == 0 and stabilizer_tableau[nn, ii + num_qubits] == 1: stabilizer_element.append(sZ(ii)) elif stabilizer_tableau[nn, ii] == 1 and stabilizer_tableau[nn, ii + num_qubits] == 1: stabilizer_element.append(sY(ii)) stabilizer_term = reduce(lambda x, y: x * y, stabilizer_element) * ((-1) ** stabilizer_tableau[nn, -1]) stabilizer_list.append(stabilizer_term) return stabilizer_list
78183d9ecd436267d7732ba50cb6591fea54984e
3,646,983
def checkGroup(self, group, colls): """ Args: group: colls: Returns: """ cut = [] for elem in group: if elem in colls: cut.append(elem) if len(cut) == len(group): return cut else: return []
ca30648c536bcf26a1438d908f93a5d3dcc131c9
3,646,984
def get_node_shapes(input_graph_def, target_nodes): """Get shapes of target nodes from input_graph_def, shapes may be partial""" node_shapes = [] for target in target_nodes: for node in input_graph_def.node: if node.name == target: if not 'shape' in node.attr: print("Warning: Fail to get output shape of node: {}".format(node)) node_shapes.append( tensor_shape.as_shape(node.attr['shape'].shape).as_list()) return node_shapes
0a70a81f0be826697d47b52dc2a2e63c0c73b3b4
3,646,985
def calculate_cost(A3, Y): """ 计算损失函数cost值 Args: A3: 正向传播的输出,尺寸大小为(输出尺寸, 样本数量) Y: 真实标签向量,尺寸大小和a3相同 Return: cost: 损失函数cost值 """ m = Y.shape[1] logprobs = np.multiply(-np.log(A3), Y) + np.multiply( -np.log(1 - A3), 1 - Y) cost = 1. / m * np.nansum(logprobs) return cost
0a85baae5acc6f9ceec417f2942727cd3d96a34e
3,646,986
import sys def _replace_sysarg(match): """Return the substitution for the $<n> syntax, .e.g. $1 for the first command line parameter. """ return sys.argv[int(match.group(1))]
efd338c537ecf2ef9113bc71d7970563ac9e5553
3,646,987
def qsammobilenetv2(**kwargs): """Constructs a QSAMMobileNetv2 model. """ model = QSAMMobileNetV2(**kwargs) return model
4d0b9f23a3c40ab2386d30fe45ca70a401f41b1a
3,646,988
def get_frame_labels_fields( sample_collection, frame_labels_field=None, frame_labels_prefix=None, frame_labels_dict=None, dataset_exporter=None, required=False, force_dict=False, ): """Gets the frame label field(s) of the sample collection matching the specified arguments. Provide one of ``frame_labels_field``, ``frame_labels_prefix``, ``frame_labels_dict``, or ``dataset_exporter``. Args: sample_collection: a :class:`SampleCollection` frame_labels_field (None): the name of the frame labels field to export frame_labels_prefix (None): a frame labels field prefix; the returned labels dict will contain all frame-level fields whose name starts with the given prefix frame_labels_dict (None): a dictionary mapping frame-level label field names to keys dataset_exporter (None): a :class:`fiftyone.utils.data.exporters.DatasetExporter` to use to choose appropriate frame label field(s) required (False): whether at least one matching frame field must be found force_dict (False): whether to always return a labels dict rather than an individual label field Returns: a frame label field or dict mapping frame label fields to keys """ if frame_labels_prefix is not None: frame_labels_dict = _get_frame_labels_dict_for_prefix( sample_collection, frame_labels_prefix ) if frame_labels_dict is not None: return frame_labels_dict if frame_labels_field is None and dataset_exporter is not None: frame_labels_field = _get_default_frame_label_fields_for_exporter( sample_collection, dataset_exporter, required=required ) if frame_labels_field is None and required: raise ValueError( "Unable to find any frame label fields matching the provided " "arguments" ) if ( force_dict and frame_labels_field is not None and not isinstance(frame_labels_field, dict) ): return {frame_labels_field: frame_labels_field} return frame_labels_field
0bdb1346f154f125b2f39f638929ae6e5d661db7
3,646,989
def _rpc_code_to_error_code(rpc_code): """Maps an RPC code to a platform error code.""" return _RPC_CODE_TO_ERROR_CODE.get(rpc_code, exceptions.UNKNOWN)
7cb6ef3d7b751c915673f99a88800bdb53e81f72
3,646,990
def peak_values(dataframe_x, dataframe_y, param): """Outputs x (potentials) and y (currents) values from data indices given by peak_detection function. Parameters ---------- DataFrame_x : pd.DataFrame should be in the form of a pandas DataFrame column. For example, df['potentials'] could be input as the column of x data. DataFrame_y : pd.DataFrame should be in the form of a pandas DataFrame column. For example, df['currents'] could be input as the column of y data. param: dict Dictionary of parameters governing the CV run. Returns ------- peak_array : np.array Array of coordinates at peaks in the following order: potential of peak on top curve, current of peak on top curve, potential of peak on bottom curve, current of peak on bottom curve """ peak_values = [] potential_p, potential_n = split(dataframe_x, param) current_p, current_n = split(dataframe_y, param) peak_top_index = peak_detection(current_p, 'positive') peak_bottom_index = peak_detection(current_n, 'negative') # TOPX (bottom part of curve is peak_values.append(potential_p[(peak_top_index['peak_top'])]) # the first part of DataFrame) # TOPY peak_values.append(current_p[(peak_top_index['peak_top'])]) # BOTTOMX peak_values.append(potential_n[(peak_bottom_index['peak_bottom'])]) # BOTTOMY peak_values.append(current_n[(peak_bottom_index['peak_bottom'])]) peak_array = np.array(peak_values) return peak_array
3e0d656d80ef5806abcd2d71e80be544eca585cb
3,646,991
def _infer_geometry(value): """Helper method that tries to infer the $geometry shape for a given value""" if isinstance(value, dict): if "$geometry" in value: return value elif 'coordinates' in value and 'type' in value: return {"$geometry": value} raise InvalidQueryError("Invalid $geometry dictionary should have " "type and coordinates keys") elif isinstance(value, (list, set)): # TODO: shouldn't we test value[0][0][0][0] to see if it is MultiPolygon? # TODO: should both TypeError and IndexError be alike interpreted? try: value[0][0][0] return {"$geometry": {"type": "Polygon", "coordinates": value}} except (TypeError, IndexError): pass try: value[0][0] return {"$geometry": {"type": "LineString", "coordinates": value}} except (TypeError, IndexError): pass try: value[0] return {"$geometry": {"type": "Point", "coordinates": value}} except (TypeError, IndexError): pass raise InvalidQueryError("Invalid $geometry data. Can be either a dictionary " "or (nested) lists of coordinate(s)")
b381d647881bac5122f420cb9806e90ebf56b716
3,646,992
import typing import scipy def random_rotation_operator_tensor (operand_space_shape:typing.Tuple[int,...]) -> np.ndarray: """NOTE: Not a uniform distribution.""" if vorpy.tensor.dimension_of_shape(operand_space_shape) == 0: raise Exception(f'invalid dimension for vector space having rotation') A = random_antisymmetric_operator_tensor(np.pi, operand_space_shape) return scipy.linalg.expm(vorpy.tensor.as_linear_operator(A)).reshape(A.shape)
fd4442bb6178824fe71c6050f44539f8af34c149
3,646,993
from . import highlevel import platform import sys from typing import OrderedDict def get_system_details(backends=True): """Return a dictionary with information about the system """ buildno, builddate = platform.python_build() if sys.maxunicode == 65535: # UCS2 build (standard) unitype = 'UCS2' else: # UCS4 build (most recent Linux distros) unitype = 'UCS4' bits, linkage = platform.architecture() d = { 'platform': platform.platform(), 'processor': platform.processor(), 'executable': sys.executable, 'implementation': getattr(platform, 'python_implementation', lambda: 'n/a')(), 'python': platform.python_version(), 'compiler': platform.python_compiler(), 'buildno': buildno, 'builddate': builddate, 'unicode': unitype, 'bits': bits, 'pyvisa': __version__, 'backends': OrderedDict() } if backends: for backend in highlevel.list_backends(): if backend.startswith('pyvisa-'): backend = backend[7:] try: cls = highlevel.get_wrapper_class(backend) except Exception as e: d['backends'][backend] = ['Could not instantiate backend', '-> %s' % str(e)] continue try: d['backends'][backend] = cls.get_debug_info() except Exception as e: d['backends'][backend] = ['Could not obtain debug info', '-> %s' % str(e)] return d
d33237bfe1d51c374e9e7e698cd27a9927ddf0df
3,646,994
def get_early_stopping(callback_config:dict): """ Get tf keras EarlyStopping callback. Args: callback_config: config info to build callback """ return keras.callbacks.EarlyStopping(**callback_config)
6f9f5e26b69765ff817c89b6ebbb59a62bc76266
3,646,995
def decode(rdf, hint=[]): """Decode ReDIF document.""" def decode(encoding): rslt = rdf.decode(encoding) if rslt.lower().find("template-type") == -1: raise RuntimeError("Decoding Error") return rslt encodings = hint + ["windows-1252", "utf-8", "utf-16", "latin-1"] if rdf[:3] == b"\xef\xbb\xbf": encodings = ["utf-8-sig"] + encodings for enc in encodings: try: return decode(enc) except Exception: continue raise RuntimeError("Decoding Error")
f42eed2caaba90f4d22622643885b4d87b9df98b
3,646,996
def pad_square(x): """ Pad image to meet square dimensions """ r,c = x.shape d = (c-r)/2 pl,pr,pt,pb = 0,0,0,0 if d>0: pt,pd = int(np.floor( d)),int(np.ceil( d)) else: pl,pr = int(np.floor(-d)),int(np.ceil(-d)) return np.pad(x, ((pt,pb),(pl,pr)), 'minimum')
3a0b248f9403d0cb392e1aff306af435b5a43396
3,646,997
from typing import Union from typing import Any from typing import Tuple def get_env_properties( env: Union[gym.Env, VecEnv], network: Union[str, Any] = "mlp" ) -> (Tuple[int]): """ Finds important properties of environment :param env: Environment that the agent is interacting with :type env: Gym Environment :param network: Type of network architecture, eg. "mlp", "cnn" :type network: str :returns: (State space dimensions, Action space dimensions, discreteness of action space and action limit (highest action value) :rtype: int, float, ...; int, float, ...; bool; int, float, ... """ if network == "cnn": state_dim = env.framestack elif network == "mlp": state_dim = env.observation_space.shape[0] elif isinstance(network, (BasePolicy, BaseValue)): state_dim = network.state_dim elif isinstance(network, BaseActorCritic): state_dim = network.actor.state_dim else: raise TypeError if isinstance(env.action_space, gym.spaces.Discrete): action_dim = env.action_space.n discrete = True action_lim = None elif isinstance(env.action_space, gym.spaces.Box): action_dim = env.action_space.shape[0] action_lim = env.action_space.high[0] discrete = False else: raise NotImplementedError return state_dim, action_dim, discrete, action_lim
6a377830cb24bc215b7d1c6b09b08ed63ab383ef
3,646,998
import os import json def gen_abc_json(abc_gt_dir, abc_json_path, image_dir): """ 根据abcnet的gt标注生成coco格式的json标注 :param abc_gt_dir: :param abc_json_path: :param image_dir: :return: """ # Desktop Latin_embed. cV2 = [' ', '!', '"', '#', '$', '%', '&', '\'', '(', ')', '*', '+', ',', '-', '.', '/', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?', '@', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '[', '\\', ']', '^', '_', '`', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '{', '|', '}', '~'] dataset = { 'licenses': [], 'info': {}, 'categories': [], 'images': [], 'annotations': [] } with open('./classes.txt') as f: classes = f.read().strip().split() for i, cls in enumerate(classes, 1): dataset['categories'].append({ 'id': i, 'name': cls, 'supercategory': 'beverage', 'keypoints': ['mean', 'xmin', 'x2', 'x3', 'xmax', 'ymin', 'y2', 'y3', 'ymax', 'cross'] # only for BDN }) def get_category_id(cls): for category in dataset['categories']: if category['name'] == cls: return category['id'] # 遍历abcnet txt 标注 indexes = sorted([f.split('.')[0] for f in os.listdir(abc_gt_dir)]) print(indexes) j = 1 # 标注边框id号 for index in indexes: # if int(index) >3: continue print('Processing: ' + index) im = cv2.imread(os.path.join(image_dir, '{}.jpg'.format(index))) height, width, _ = im.shape dataset['images'].append({ 'coco_url': '', 'date_captured': '', 'file_name': index + '.jpg', 'flickr_url': '', 'id': int(index.split('_')[-1]), # img_1 'license': 0, 'width': width, 'height': height }) anno_file = os.path.join(abc_gt_dir, '{}.txt'.format(index)) with open(anno_file) as f: lines = [line for line in f.readlines() if line.strip()] # 没有清晰的标注,跳过 if len(lines) == 0: continue for i, line in enumerate(lines): pttt = line.strip().split('||||') parts = pttt[0].split(',') ct = pttt[-1].strip() cls = 'text' segs = [float(kkpart) for kkpart in parts[:16]] xt = [segs[ikpart] for ikpart in range(0, len(segs), 2)] yt = [segs[ikpart] for ikpart in range(1, len(segs), 2)] xmin = min([xt[0], xt[3], xt[4], xt[7]]) ymin = min([yt[0], yt[3], yt[4], yt[7]]) xmax = max([xt[0], xt[3], xt[4], xt[7]]) ymax = max([yt[0], yt[3], yt[4], yt[7]]) width = max(0, xmax - xmin + 1) height = max(0, ymax - ymin + 1) if width == 0 or height == 0: continue max_len = 100 recs = [len(cV2) + 1 for ir in range(max_len)] ct = str(ct) print('rec', ct) for ix, ict in enumerate(ct): if ix >= max_len: continue if ict in cV2: recs[ix] = cV2.index(ict) else: recs[ix] = len(cV2) dataset['annotations'].append({ 'area': width * height, 'bbox': [xmin, ymin, width, height], 'category_id': get_category_id(cls), 'id': j, 'image_id': int(index.split('_')[-1]), # img_1 'iscrowd': 0, 'bezier_pts': segs, 'rec': recs }) j += 1 # 写入json文件 folder = os.path.dirname(abc_json_path) if not os.path.exists(folder): os.makedirs(folder) with open(abc_json_path, 'w') as f: json.dump(dataset, f)
5b565fa632eeb69313035bc14ba35021de321142
3,646,999