content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def efficientnet_b0(pretrained=False, num_classes=1000, in_chans=3, **kwargs): """EfficientNet-B0""" model_name = "tf_efficientnet_b0" default_cfg = default_cfgs[model_name] # NOTE for train, drop_rate should be 0.2 # kwargs['drop_connect_rate'] = 0.2 # set when training, TODO add as cmd arg model = _gen_efficientnet( model_name=model_name, channel_multiplier=1.0, depth_multiplier=1.0, num_classes=num_classes, in_chans=in_chans, **kwargs ) model.default_cfg = default_cfg if pretrained: load_pretrained(model, default_cfgs[model_name], num_classes) return model
b01644546a005b856ba35a9965fbff33e9bae132
3,645,900
import urllib def application(environ, start_response): """ make Passenger interpret PATH_INFO the same way that the WSGI standard does """ environ["PATH_INFO"] = urllib.parse.unquote(environ["PATH_INFO"]) return app.app(environ, start_response)
3de57b31206a374da0378788f20e7bd8b1eca9af
3,645,901
from railrl.torch.pytorch_util import set_gpu_mode import random def run_experiment_here( experiment_function, variant=None, exp_id=0, seed=0, use_gpu=True, # Logger params: exp_prefix="default", snapshot_mode='last', snapshot_gap=1, git_infos=None, script_name=None, logger=default_logger, trial_dir_suffix=None, randomize_seed=False, **setup_logger_kwargs ): """ Run an experiment locally without any serialization. :param experiment_function: Function. `variant` will be passed in as its only argument. :param exp_prefix: Experiment prefix for the save file. :param variant: Dictionary passed in to `experiment_function`. :param exp_id: Experiment ID. Should be unique across all experiments. Note that one experiment may correspond to multiple seeds,. :param seed: Seed used for this experiment. :param use_gpu: Run with GPU. By default False. :param script_name: Name of the running script :param log_dir: If set, set the log directory to this. Otherwise, the directory will be auto-generated based on the exp_prefix. :return: """ if variant is None: variant = {} variant['exp_id'] = str(exp_id) if randomize_seed or (seed is None and 'seed' not in variant): seed = random.randint(0, 100000) variant['seed'] = str(seed) reset_execution_environment(logger=logger) actual_log_dir = setup_logger( exp_prefix=exp_prefix, variant=variant, exp_id=exp_id, seed=seed, snapshot_mode=snapshot_mode, snapshot_gap=snapshot_gap, git_infos=git_infos, script_name=script_name, logger=logger, trial_dir_suffix=trial_dir_suffix, **setup_logger_kwargs ) set_seed(seed) set_gpu_mode(use_gpu) run_experiment_here_kwargs = dict( variant=variant, exp_id=exp_id, seed=seed, use_gpu=use_gpu, exp_prefix=exp_prefix, snapshot_mode=snapshot_mode, snapshot_gap=snapshot_gap, git_infos=git_infos, script_name=script_name, **setup_logger_kwargs ) save_experiment_data( dict( run_experiment_here_kwargs=run_experiment_here_kwargs ), actual_log_dir ) return experiment_function(variant)
ee8b0f727027d8dcee804565606a7f82f2c77ca9
3,645,902
from typing import Tuple def normalize_chunks(chunks: Tuple[Tuple[int, int]]) -> Tuple[Tuple[int, int]]: """ Minimize the amount of chunks needed to describe a smaller portion of a file. :param chunks: A tuple with (start, end,) offsets :return: A tuple containing as few as possible (start, end,) offsets """ out = [] start1, end1 = chunks[0] if len(chunks) > 1: for start2, end2 in chunks[1:]: if start2 == end1: end1 = end2 else: out.append((start1, end1)) start1, end1 = start2, end2 out.append((start1, end1)) return tuple(out)
d49d1abed0573a86e0eeee5d2e5ed2e129f3274e
3,645,903
def learning_rate_schedule(adjusted_learning_rate, lr_warmup_init, lr_warmup_step, first_lr_drop_step, second_lr_drop_step, global_step): """Handles linear scaling rule, gradual warmup, and LR decay.""" # lr_warmup_init is the starting learning rate; the learning rate is linearly # scaled up to the full learning rate after `lr_warmup_steps` before decaying. linear_warmup = (lr_warmup_init + (tf.cast(global_step, dtype=tf.float32) / lr_warmup_step * (adjusted_learning_rate - lr_warmup_init))) learning_rate = tf.where(global_step < lr_warmup_step, linear_warmup, adjusted_learning_rate) lr_schedule = [[1.0, lr_warmup_step], [0.1, first_lr_drop_step], [0.01, second_lr_drop_step]] for mult, start_global_step in lr_schedule: learning_rate = tf.where(global_step < start_global_step, learning_rate, adjusted_learning_rate * mult) return learning_rate
26021c7cbdb264ddc84fad94d4a01b51913f3a72
3,645,904
from typing import List import logging def set_power_state_server(power_state: ServerPowerState) -> List[float]: """Record the current power limit and set power limit using nvidia-smi.""" # Record current power limits. if power_state.power_limit: cmd = "nvidia-smi --query-gpu=power.limit --format=csv,noheader,nounits" logging.info(f"Getting current GPU power limits: {cmd}") output = run_command(cmd, get_output=True, tee=False) current_limits = [float(line) for line in output] # Set power limit to the specified value. cmd = f"sudo nvidia-smi -pl {power_state.power_limit}" logging.info(f"Setting current GPU power limits: {cmd}") run_command(cmd) if power_state.cpu_freq: set_cpufreq(power_state.cpu_freq) return ServerPowerState(current_limits, None)
8a9ac60dbd58dedf5f39386a7233b88b7cc5aa79
3,645,905
from typing import Union def score_normalization(extracted_score: Union[str, None]): """ Sofa score normalization. If available, returns the integer value of the SOFA score. """ score_range = list(range(0, 30)) if (extracted_score is not None) and (int(extracted_score) in score_range): return int(extracted_score)
74501e9351296037ecc90ae647155e3c6b76ae01
3,645,906
import os import requests def get_session(): """Creates an authorized Requests Session.""" credentials = service_account.Credentials.from_service_account_file( filename=os.environ["GOOGLE_APPLICATION_CREDENTIALS"], scopes=["https://www.googleapis.com/auth/cloud-platform"], ) # Create a requests Session object with the credentials. session = requests.AuthorizedSession(credentials) return session
3afcba8e4cb9b71f384110d30f39fa737fc7a6d9
3,645,907
def wrap(wrapping_key_public, plaintext): """ RSA-OAEP key wrapping. Args: wrapping_key_public: The public key of the RSA wrapping key plaintext: The plaintext key to wrap """ rsa_cipher = PKCS1_OAEP.new( key=wrapping_key_public, hashAlgo=SHA256, mgfunc=lambda x, y: pss.MGF1(x, y, SHA1)) return rsa_cipher.encrypt(plaintext)
171074a46440184138ccb1684754f328afc50efe
3,645,908
from datetime import datetime import os def get_output_filenames(output_path: str): """Returns a dict of output filenames.""" now = datetime.datetime.now() now_string = now.strftime("%Y%m%d_%H%M%S") filenames ={ 'train': os.path.join(output_path, "train_split_"+now_string+".csv"), 'val': os.path.join(output_path, "val_split_"+now_string+".csv") } write_file("/tmp/train.txt", filenames['train']) write_file("/tmp/val.txt", filenames['val']) return filenames
b943544c03009ca470aeb3aac0502baafe50b44f
3,645,909
def compute_horizontal_vessel_purchase_cost(W, D, F_M): """ Return the purchase cost [Cp; in USD] of a horizontal vessel, including thes cost of platforms and ladders. Parameters ---------- W : float Weight [lb]. D : float Diameter [ft]. F_M : float Vessel material factor. Notes ----- The purchase cost is given by [1]_. See source code for details. The purchase cost is scaled according to BioSTEAM's Chemical Plant Cost Index, `biosteam.CE`. References ---------- .. [1] Seider, W. D., Lewin, D. R., Seader, J. D., Widagdo, S., Gani, R., & Ng, M. K. (2017). Product and Process Design Principles. Wiley. Cost Accounting and Capital Cost Estimation (Chapter 16) """ # C_v: Vessel cost # C_pl: Platforms and ladders cost C_v = exp(5.6336 - 0.4599*ln(W) + 0.00582*ln(W)**2) C_pl = 2275*D**0.20294 return bst.CE/567 * (F_M * C_v + C_pl)
22d38ffc38dddb992d2fd7b2c20c3dc1d0ddb53d
3,645,910
def format_dev_sub_dev_id(pciIdPair): """ pciIdPair (int pci device id, int pci sub device id or None) """ if pciIdPair[1] is None: return "(0x%08X, None)" % pciIdPair[0] return "(0x%08X, 0x%08X)" % pciIdPair
fded71eee57f4fac60175bfb015845bf1eba58f7
3,645,911
def mychats(): """ Show Chats where I can write :return: { error: 0, chats: [...Chat] } """ result = { 'error': 0, 'chats': [] } if 'user_id' in session: chats_rows = query_db('SELECT * FROM chats WHERE user1_id = ? OR user2_id = ?', [session['user_id'], session['user_id']]) result['chats'] = chats_rows # for chat in query_db('select * from chats'): # print(chat['name']) return result
4af7cd34fb8649ed10723b258e7a864e3e12edc2
3,645,912
def polynom_prmzt(x, t, order): """ Polynomial (deterministic) parameterization of fast variables (Y). NB: Only valid for system settings of Wilks'2005. Note: In order to observe an improvement in DA performance w higher orders, the EnKF must be reasonably tuned with There is very little improvement gained above order=1. """ if order == 4: # From Wilks d = 0.262 + 1.45*x - 0.0121*x**2 - 0.00713*x**3 + 0.000296*x**4 elif order == 3: # From Arnold d = 0.341 + 1.30*x - 0.0136*x**2 - 0.00235*x**3 elif order == 1: # From me -- see AdInf/illust_parameterizations.py d = 0.74 + 0.82*x elif order == 0: # From me -- see AdInf/illust_parameterizations.py d = 3.82 elif order == -1: # Leave as dxdt_trunc d = 0 else: raise NotImplementedError return d
80d3f9563c5f8a04a65de7d2d22f5d49d35c71fe
3,645,913
import sys def new_binning(xmin, xmax, nbin=25, bin_type='lin', out_type=int, custom_bins=None): """ Define the new binning. Parameters ---------- Returns ------- array the array with the edges of the new binning """ if bin_type == 'lin' and custom_bins is None: binning_ = np.linspace(xmin, xmax, num=nbin+1, dtype=out_type) elif bin_type == 'log' and custom_bins is None: if xmin == 0: xmin = 1 binning_ = np.logspace(np.log10(xmin), np.log10(xmax), num=nbin+1, dtype=out_type) elif type(custom_bins) == list or type(custom_bins) == np.ndarray: binning_ = np.array(custom_bins) else: logger.info('ERROR: Invalid binning type. Choose lin or log, or customize it.') sys.exit() logger.info('Multipole binning:%s'%str(binning_)) return binning_
cb4eacdb4648a968ff9dfcd3571ddfe157799588
3,645,914
def decode(s): """ Deserialize an EDS object from an EDS string. """ lexer = _EDSLexer.lex(s.splitlines()) return _decode_eds(lexer)
3c7eb8ac7e570aeb1297b052e35c804dd27b0f49
3,645,915
import errno import os def pid_exists(pid): """Check whether pid exists in the current process table.""" if pid < 0: return False try: os.kill(pid, 0) except OSError as e: return e.errno == errno.EPERM else: return True
0ebefcc958e629aac6d06e6d79d8aaa1acf7607b
3,645,916
def allowed_file(filename): """ Verifies if file extension is compatible """ return '.' in filename and \ filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
75e0047eff0787e33f687e7a9b689ad8661b7501
3,645,917
def clap_convert(txt): """convert string of clap values on medium to actualy number Args: txt (str): claps values Returns: number on claps (int) """ # Medium annotation if txt[-1] == "K": output = int(float(txt[:-1]) * 1000) return output else: return int(txt)
253e0e2be4f37f1994637bbfc80edfc5d72bc4e5
3,645,918
import io def write_phase1_capsummary(inst, isStringIO=True): """ Write out a multiweek summary of capacity, demand, understaffing. :param inst: Model instance :param isStringIO: True (default) to return StringIO object, False to return string :return: capacity summary as StringIO object or a string. """ param = 'period,day,week,dmd,cap,us1,us2,ustot\n' rows = [(i, j, w, inst.dmd_staff[i, j, w], inst.cov[i, j, w].value, inst.under1[i, j, w].value, inst.under2[i, j, w].value, inst.under1[i, j, w].value + inst.under2[i, j, w].value) for i in inst.PERIODS for j in inst.DAYS for w in inst.WEEKS ] for row in rows: row = [str(r) for r in row] data_row = ','.join(row) data_row += '\n' param += data_row if isStringIO: param_out = io.StringIO() param_out.write(param) return param_out.getvalue() else: return param
6d6e7d083693b74ea27e7f10cec4899735f32541
3,645,919
def glorot_uniform(shape): """ :param shape: tuple with the shape of the wanted output (filters_amount, depth, height, width) :return: array (it's shape=param shape) with initialized values using 'glorot uniform' initializer """ fan_in, fan_out = _calc_fans(shape) scale = 1. / ((fan_in + fan_out) / 2.) limit = np.sqrt(3.0 * scale) return np.random.uniform(low=-limit, high=limit, size=shape)
0cec12b0342db827286248a722b32852cab2bdad
3,645,920
import platform import os def get_server() -> str: """Generate a server information. :return: server info :rtype: str """ uname = platform.uname() fmt_plat = f"OS: {uname.system} {uname.release} v{uname.version}\n" fmt_plat += f"CPU: {uname.processor} ({os.cpu_count()} threads)\n" fmt_plat += f"PID: {os.getpid()}" return quote(fmt_plat, True, "py")
f37ad21b3d5526878fbc5484072986b77bbeff90
3,645,921
import warnings def second_order_moments(n_components, e2, m1, alpha0): """Second-Order Moments To prevent creating 2nd order moments explicitly, we construct its decomposition with `n_components`. check reference [?] section 5.2 for details. Parameters ---------- n_components: int Number of components e2: sparse matrix, shape=(n_features, n_features) Expectation of word pairs. e2[i, j] is the expectation of word `i` and `j` in the same document. m1: array, shape=(n_features,) Expectation of each words. alpha0: double Sum of topic topic concentration parameter Returns ------- m2_vals : array, shape=(n_components,) eigen values of sencond-order moments m2_vecs : array, shape=(n_features, n_components) eigen values of sencond-order moments """ # eigen values and vectors of E2 n_features = e2.shape[0] #print("%d ; %d" % (n_features, n_components)) if n_components == n_features: # run full svd, convert e2 to dense array first e2_vecs, e2_vals, _ = LA.svd(e2.toarray()) else: #e2_vals, e2_vecs = sp.linalg.eigsh(e2, k=n_components, which='LM') e2_vecs, e2_vals, _ = sp.linalg.svds(e2, k=n_components, which='LM', return_singular_vectors=True) e2_vals *= (alpha0 + 1.) m1_p = np.dot(e2_vecs.T, m1) # section 5.2 part 1. m2_p = (-1. * alpha0) * (m1_p * m1_p[:, np.newaxis]) m2_p[np.diag_indices_from(m2_p)] += e2_vals # section 5.2 part 1. # eigen values and vectors of M2 prime try: m2p_vecs, m2p_vals, _ = LA.svd(m2_p) m2_vals = m2p_vals m2_vecs = np.dot(e2_vecs, m2p_vecs) except LA.LinAlgError: # In order to pass `check_estimator` test. # convert this error to warnings. warnings.warn("SVD in second_order_moments did not converge. " "the algorithm will not work.", ConvergenceWarning) m2_vals = np.ones(m2_p.shape[0]) m2_vecs = m2_p return (m2_vals, m2_vecs)
4b2ac9d43352d856875d86cd1975ec59ac5664c8
3,645,922
import traceback def callback_query_wrapper(func): """Create a session, handle permissions and exceptions for callback queries.""" def wrapper(update, context): user = None if context.user_data.get("ban"): return temp_ban_time = context.user_data.get("temporary-ban-time") if temp_ban_time is not None and temp_ban_time == date.today(): update.callback_query.answer(i18n.t("callback.spam")) return session = get_session() try: user, statistic = get_user(session, update.callback_query.from_user) # Cache ban value, so we don't have to lookup the value in our database if user.banned: context.user_data["ban"] = True return # Cache temporary-ban time, so we don't have to create a connection to our database if statistic.votes > config["telegram"]["max_user_votes_per_day"]: update.callback_query.answer( i18n.t("callback.spam", locale=user.locale) ) context.user_data["temporary-ban-time"] = date.today() return func(context.bot, update, session, user) session.commit() except Exception as e: if not ignore_exception(e): if config["logging"]["debug"]: traceback.print_exc() sentry.captureException() locale = "English" if user is not None: locale = user.locale update.callback_query.answer(i18n.t("callback.error", locale=locale)) finally: session.close() return wrapper
296930fbc16480689ef6f4a0da5ef385ad9cb2df
3,645,923
def remove_names(df: pd.DataFrame) -> pd.DataFrame: """Convert personal names to numerical values.""" df = df.reset_index() df.drop(columns='Name', inplace=True) return df
9dab1803a153d5effd2e08b6e6ff5df30fee8407
3,645,924
import torch def handle_epoch_metrics(step_metrics, epoch_labels, epoch_predictions): """ Function that handles the metrics per epoch. Inputs: step_metrics - Dictionary containing the results of the steps of an epoch epoch_labels - List of labels from the different steps epoch_predictions - List of predictions from the different steps Outputs: epoch_merics - Dictionary containing the averaged results of an epoch """ # compute the loss loss = torch.mean(torch.stack(step_metrics['losses'], dim=0), dim=0) loss = round(loss.item(), 4) # compute the accuracy and f1 accuracy, f1 = compute_accuracy_f1(step_metrics['predictions'], step_metrics['labels']) # create a new epoch dictionary epoch_metrics = {'loss': loss, 'accuracy': accuracy, 'f1': f1} # return the epoch dictionary return epoch_metrics
a1d0180095535eec641258dd921c90808aa6858f
3,645,925
def project_disk_sed(bulge_sed, disk_sed): """Project the disk SED onto the space where it is bluer For the majority of observed galaxies, it appears that the difference between the bulge and the disk SEDs is roughly monotonic, making the disk bluer. This projection operator projects colors that are redder onto the same difference in color as the previous wavelength, similar to the way monotonicity works for the morphological `S` matrix of the model. While a single iteration of this model is unlikely to yield results that are as good as those in `project_disk_sed_mean`, after many iterations it is expected to converge to a better value. """ new_sed = disk_sed.copy() diff = bulge_sed - disk_sed for s in range(1, len(diff)-1): if diff[s]<diff[s-1]: new_sed[s] = new_sed[s] + diff[s-1] diff[s] = diff[s-1] return new_sed
5faf8f7d8d0d780f61586f7fae39f4ba04d3752d
3,645,926
import os def load_image_url(image_url, image_size=(256, 256), preserve_aspect_ratio=True): """Loads and preprocesses images from a given url.""" # Cache image file locally. image_path = tf.keras.utils.get_file( os.path.basename(image_url)[-128:], image_url) # Load and convert to float32 numpy array, add batch dimension, and normalize to range [0, 1]. img = plt.imread(image_path).astype(np.float32)[np.newaxis, ...] if img.max() > 1.0: img = img / 255. if len(img.shape) == 3: img = tf.stack([img, img, img], axis=-1) img = crop_center(img) img = tf.image.resize(img, image_size, preserve_aspect_ratio=True) return img
cad02eaf590431b922159ac3dedbf2d418d29335
3,645,927
def qlog_numpy(q): """ Applies logarithm map to q :param q: (4,) :return: (3,) """ if all(q[1:] == 0): q = np.zeros(3) else: q = np.arccos(q[0]) * q[1:] / np.linalg.norm(q[1:]) return q
82cf0ff2054c02e4cc3dc3a6500b1c8a0e3eb870
3,645,928
def get_ML_features(df: pd.DataFrame, protease: str='trypsin', **kwargs) -> pd.DataFrame: """ Uses the specified score in df to filter psms and to apply the fdr_level threshold. Args: df (pd.DataFrame): psms table of search results from alphapept. protease (str, optional): string specifying the protease that was used for proteolytic digestion. Defaults to 'trypsin'. Returns: pd.DataFrame: df including additional scores for subsequent ML. """ df['decoy'] = df['sequence'].str[-1].str.islower() df['abs_delta_m_ppm'] = np.abs(df['delta_m_ppm']) df['naked_sequence'] = df['sequence'].apply(lambda x: ''.join([_ for _ in x if _.isupper()])) df['n_AA']= df['naked_sequence'].str.len() df['matched_ion_fraction'] = df['hits']/(2*df['n_AA']) df['n_missed'] = df['naked_sequence'].apply(lambda x: count_missed_cleavages(x, protease)) df['n_internal'] = df['naked_sequence'].apply(lambda x: count_internal_cleavages(x, protease)) df['x_tandem'] = get_x_tandem_score(df) return df
4ac4202fa5c86b78b1bda1a2b96d5ed4b8552b4f
3,645,929
def revcomp(sequence): """ Find reverse complementary sequence :param sequence: The RNA sequence in string form :return: The reverse complement sequence in string form """ complement = {"A": "U", "U": "A", "C": "G", "G": "C", "N": "N"} revcompseq = "" sequence_list = list(sequence) sequence_list.reverse() for letter in sequence_list: revcompseq += complement[letter.upper()] return revcompseq
c66b9ad967e612fa97f18bb2932e7eb4bbee8245
3,645,930
def J(X, mean, r): """K-meansの目的関数(最小化を目指す)""" summation = 0.0 for n in range(len(X)): temp = 0.0 for k in range(K): temp += r[n, k] * np.linalg.norm(X[n] - mean[k]) ** 2 summation += temp return summation
1d2dd241fc30cb5897b0224285c5c7f2f2fec675
3,645,931
def get_all_subs(): """ Temporary function until we work out a better autocomplete for createpost """ # TODO return [x.name for x in Sub.select(Sub.name)]
5a956bc743f765026f86ed5928698f58d4791338
3,645,932
import time def timesince(): """ Get the amount of time since 00:00 on 1 January 1970, the raw date before formatting it. """ return time.time()
7e6944d74172947c4ac990c0fa993524ab865e18
3,645,933
def gencoords_outside(N, d, rad=None, truncmask=False, trunctype='circ'): """ generate coordinates of all points in an NxN..xN grid with d dimensions coords in each dimension are [-N/2, N/2) N should be even""" if not truncmask: _, truncc, _ = gencoords_outside(N, d, rad, True) return truncc c = geometry.gencoords_base(N, d) if rad is not None: if trunctype == 'circ': r2 = np.sum(c**2, axis=1) trunkmask = r2 > (rad*N/2.0)**2 elif trunctype == 'square': r = np.max(np.abs(c), axis=1) trunkmask = r > (rad*N/2.0) truncc = c[trunkmask, :] else: trunkmask = np.ones((c.shape[0],), dtype=np.bool8) truncc = c return c, truncc, trunkmask
0b4f3db165cb495e5d540412cb77bd36e8a42c62
3,645,934
def map_orientation(cur_orientation, cur_count): """ . . . . . x . . . . . x . . . . . x . . . . . x . . . . . x . . . . . x """ right_edge = 34905131040 """ . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . x x x x x x """ bottom_edge = 67645734912 """ we will check if each position of the game peice is valid by investigating if it touches the right edge or the bottom edge using a logica AND (&) operation. The & will be 0 if there is no overlap and <> 0 if there is Pass in peices positioned in the upper left corner so that this check can walk right and down to checkk all conditions """ room_to_move_right = True room_to_move_down = True safe_down = True while safe_down: room_to_move_right = True safe_right = True row_start = cur_orientation while safe_right: peice_orientation_list[cur_count] = cur_orientation cur_count += 1 """ moving piece right 1 bit is the same as multiplying by 2^1 . x . . . . x x x . . . = 450 . . x . . . . x x x . . = 900 """ if room_to_move_right: cur_orientation = cur_orientation << 1 room_to_move_right = ((cur_orientation & right_edge) == 0) else: safe_right = False """ moving down is the same as shifting right 6 times or multiplying by 2^6, aka 64 . x . . . . x x x . . . = 450 . x . . . . x x x . . . = 28,800 """ if room_to_move_down: cur_orientation = row_start << 6 room_to_move_down = ((cur_orientation & bottom_edge) == 0) else: safe_down = False return cur_count
5fa5e0c386da56cab336f33e560bf9591814060c
3,645,935
import ctypes def glGetShaderInfoLog( baseOperation, obj ): """Retrieve the shader's error messages as a Python string returns string which is '' if no message """ target = GLsizei() glGetShaderiv(obj, GL_INFO_LOG_LENGTH,target) length = target.value if length > 0: log = ctypes.create_string_buffer(length) baseOperation(obj, length, None, log) return log.value.strip(_NULL_8_BYTE) # null-termination return ''
0173aa2cbeac8c8b2cb9072d0d56584285af2e0d
3,645,936
def _CheckGrdTranslations(grd_file, grd_lines, wanted_locales): """Check all <file> elements that correspond to an .xtb output file. Args: grd_file: Input .grd file path. grd_lines: List of input .grd lines. wanted_locales: set of wanted Chromium locale names. Returns: List of error message strings. Empty on success. """ wanted_locales = wanted_locales - set([_DEFAULT_LOCALE]) intervals = _BuildIntervalList(grd_lines, _IsTranslationGrdOutputLine) errors = [] for start, end in intervals: errors += _CheckGrdElementRangeLang(grd_lines, start, end, wanted_locales) errors += _CheckGrdTranslationElementRange(grd_lines, start, end, wanted_locales) return errors
49b73187cd2ac3c7b9796a8a139d89f9a74c91a3
3,645,937
def choose_diverging_palette(as_cmap=False): """Launch an interactive widget to choose a diverging color palette. This corresponds with the :func:`diverging_palette` function. This kind of palette is good for data that range between interesting low values and interesting high values with a meaningful midpoint. (For example, change scores relative to some baseline value). Requires IPython 2+ and must be used in the notebook. Parameters ---------- as_cmap : bool If True, the return value is a matplotlib colormap rather than a list of discrete colors. Returns ------- pal or cmap : list of colors or matplotlib colormap Object that can be passed to plotting functions. See Also -------- diverging_palette : Create a diverging color palette or colormap. choose_colorbrewer_palette : Interactively choose palettes from the colorbrewer set, including diverging palettes. """ pal = [] if as_cmap: cmap = _init_mutable_colormap() @interact def choose_diverging_palette( h_neg=IntSlider(min=0, max=359, value=220), h_pos=IntSlider(min=0, max=359, value=10), s=IntSlider(min=0, max=99, value=74), l=IntSlider(min=0, max=99, value=50), # noqa: E741 sep=IntSlider(min=1, max=50, value=10), n=(2, 16), center=["light", "dark"] ): if as_cmap: colors = diverging_palette(h_neg, h_pos, s, l, sep, 256, center) _update_lut(cmap, colors) _show_cmap(cmap) else: pal[:] = diverging_palette(h_neg, h_pos, s, l, sep, n, center) palplot(pal) if as_cmap: return cmap return pal
0c2ffc8710a56e643e6c4ccdd453bd00cc59e6a2
3,645,938
def get_lm_model(args, device, config): """Get language model(based on GPT-2) used for sequence prediction.""" ninp = config["ninp"] nhead = config["nhead"] initrange = config["initrange"] dropout = config["dropout"] vocab_size = config["vocab_size"] nhid = config["nhid"] ndecoder = config["num_decoder_layers"] if args.ssd_offload: return transformer_lm.TransformerLM(vocab_size, ninp, nhead, nhid, dropout, initrange, ndecoder) else: return transformer_lm.TransformerLM(vocab_size, ninp, nhead, nhid, dropout, initrange, ndecoder).to(device)
071f43b9537ca8024f980271c64750e7afae864e
3,645,939
def get_meminfo(): """ Get and format the content of /proc/meminfo """ buf = open('/proc/meminfo').read() buf = ','.join([v.replace(' ', '') for v in buf.split('\n') if v]) return buf
47a0d57d1b8c90b4907fab8e39154b8e4ad5b7ee
3,645,940
def effective_area(true_energy, reco_energy, simu_area): """ Compute the effective area from a list of simulated energy and reconstructed energy Parameters ---------- true_energy: 1d numpy array reco_energy: 1d numpy array simu_area: float - area on which events are simulated Returns ------- float = effective area """ return simu_area * len(reco_energy) / len(true_energy)
b17efa390a1ae14bb8ecb959740bad8c391b1d2e
3,645,941
def execute(queries, arglists, fetchone=False): """Execute multiple queries to the sqlite3 jobtracker database. All queries will be executed as a single transaction. Return the result of the last query, or the ID of the last INSERT, whichever is applicaple. Inputs: queries: A list of queries to be execute. arglists: A list (same length as queries). Each entry contains the paramters to be substituted into the corresponding query. fetchone: If True, fetch and return only a single row. Otherwise, fetch and return all rows. (Only applies for SELECT statements. Default: fetch all rows). Outputs: results: Single row, or list of rows (for SELECT statements), depending on 'fetchone'. Or, the ID of the last entry INSERT'ed (for INSERT statements). """ not_connected = True count = 0 while not_connected: try: db_conn = sqlite3.connect(config.background.jobtracker_db,timeout=40.0) db_conn.isolation_level = 'DEFERRED' db_conn.row_factory = sqlite3.Row db_cur = db_conn.cursor() for q, args in zip(queries, arglists): db_cur.execute(q, args) db_conn.commit() if db_cur.lastrowid: results = db_cur.lastrowid else: if fetchone: results = db_cur.fetchone() else: results = db_cur.fetchall() db_conn.close() not_connected = False except sqlite3.OperationalError, e: try: db_conn.rollback() db_conn.close() except NameError: # Connection wasn't established, 'db_conn' is not defined. pass if (count % 60) == 0: print "Couldn't connect to DB for %d seconds. Will continue trying. " \ "Error message: %s" % (count, str(e)) time.sleep(1) count+=1 return results
a64e12262150514dbc5e6e7f4c193481ab8162aa
3,645,942
def physical_cpu_mhz(vir_connection): """ Get the CPU frequency in MHz using libvirt. :param vir_connection: A libvirt connection object. :type vir_connection: virConnect :return: The CPU frequency in MHz. :rtype: int """ return vir_connection.getInfo()[3]
f6a404a6d531940fbc762f493e90355e2fc78690
3,645,943
def addstream(bot, input): """Add a stream from the notify list""" if not input.admin: return False if not input.group(2): return stream = input.group(2).lower() if not stream in bot.config.streams: bot.config.set_add('streams', stream) bot.reply("Added {0} to stream list".format(stream)) else: bot.reply("{0} is already in the stream list".format(stream))
48465633ea58968efca31231eb5e1a47a537c979
3,645,944
def get_author(search): """ Queries google scholar to find an author given a search string. If != 0 results are found it gives an error """ authors = list(scholarly.search_author(search)) if len(authors) > 1: raise ValueError(f'Found >1 authors with search string: {searc}, try something more specifc') elif not authors: raise ValueError(f'Could not find authors with search string: {search}') return authors[0].fill(sections=['basics', 'indices', 'publications'])
8fffd75f588194db0707ddd7249823fb73324549
3,645,945
import torch def _named_tensor_generic_operation( tensor: torch.Tensor, tensor_ops_pre: callable = dummy, tensor_ops_post: callable = dummy, name_ops: callable = dummy) -> torch.Tensor: """ generic base function used by others First store the names Args: tensor (): the named tensor to work on tensor_ops_pre (): the operation before the name is removed tensor_ops_post (): the operation after the name is removed that act on the tensor name_ops (): the operation to act on names Returns: """ # Save the names in names_old and then remove the names from the tensor tensor = tensor_ops_pre(tensor) names_old = tensor.names tensor = tensor.rename(None) # operations names_new = name_ops(names_old) # modify the names tensor = tensor_ops_post(tensor) # change the tensor accordingly return tensor.refine_names(*names_new)
8a343f2ab2c4aeaebcf64e8fc5e75cb3d8776241
3,645,946
def normalize_address_components(parsed_addr): # type: (MutableMapping[str, str]) -> MutableMapping[str, str] """Normalize parsed sections of address as appropriate. Processes parsed address through subsets of normalization rules. :param parsed_addr: address parsed into ordereddict per usaddress. :type parsed_addr:Mapping :return: parsed_addr with normalization processing applied to elements. :rtype: dict """ parsed_addr = normalize_numbered_streets(parsed_addr) parsed_addr = normalize_directionals(parsed_addr) parsed_addr = normalize_street_types(parsed_addr) parsed_addr = normalize_occupancy_type(parsed_addr) return parsed_addr
48730c85ee7930b27260b97a6ad876bcecf1b5cc
3,645,947
def model_inputs(): """ 构造输入 返回:inputs, targets, learning_rate, source_sequence_len, target_sequence_len, max_target_sequence_len,类型为tensor """ inputs = tf.placeholder(tf.int32, [None, None], name="inputs") targets = tf.placeholder(tf.int32, [None, None], name="targets") learning_rate = tf.placeholder(tf.float32, name="learning_rate") source_sequence_len = tf.placeholder(tf.int32, (None,), name="source_sequence_len") # TODO 这里和[None]是不是一模一样 target_sequence_len = tf.placeholder(tf.int32, (None,), name="target_sequence_len") max_target_sequence_len = tf.placeholder(tf.int32, (None,), name="max_target_sequence_len") return inputs, targets, learning_rate, source_sequence_len, target_sequence_len, max_target_sequence_len
e9bc9464c826bc8b6c4ea172af189822359735e4
3,645,948
def is_key_in_store(loc, key): """ A quick check to determine whether the :class:`pandas.HDFStore` has datA for ``key`` :ARGS: loc: :class:`string` of path to :class:`pandas.HDFStore` key: :class:`string` of the ticker to check if currently available :RETURNS: whether ``key`` is currently a part of the data set """ try: store = pandas.HDFStore(path = loc, mode = 'r') except IOError: print loc + " is not a valid path to an HDFStore Object" return store_keys = store.keys() store.close() return key in map(lambda x: x.strip('/'), store_keys )
273bd534daa0f70831e77da88808033e4f1683eb
3,645,949
def transform_rows_nonlinear06(data, **kwargs): """ Nonlinear row transformation 06. 12 simulated data sources; Functions: 1.0, 0.5*(x+1)^2, sin(pi*x), sin(2*pi*x), cos(pi*x), cos(2*pi*x), x^5, exp2, log10(x-x.min()), boxcox(2), boxcox(4), boxcox(6). """ sources_transformers = [ 1.0, lambda x: 0.5 * np.power((x+1), 2), lambda x: np.sin(np.pi * x), lambda x: np.sin(2.0 * np.pi * x), lambda x: np.cos(np.pi * x), lambda x: np.cos(2.0 * np.pi * x), lambda x: np.power(x, 5), lambda x: np.exp2(x), lambda x: np.log10(x + (-1.0 * x.min()) + 0.01), lambda x: boxcox(x + (-1.0 * x.min()) + 0.01, 2.00), lambda x: boxcox(x + (-1.0 * x.min()) + 0.01, 4.00), lambda x: boxcox(x + (-1.0 * x.min()) + 0.01, 6.00), ] return _generic_data_transformation(data, sources_transformers, **kwargs)
dd51c838d9721fe310463fa8a0cdb0505e9c4f0f
3,645,950
def catch_parameter(opt): """Change the captured parameters names""" switch = {'-h': 'help', '-o': 'one_timestamp', '-a': 'activity', '-f': 'file_name', '-i': 'imp', '-l': 'lstm_act', '-d': 'dense_act', '-p': 'optim', '-n': 'norm_method', '-m': 'model_type', '-z': 'n_size', '-y': 'l_size', '-c': 'folder', '-b': 'model_file', '-x': 'is_single_exec', '-t': 'max_trace_size', '-e': 'splits', '-g': 'sub_group'} try: return switch[opt] except: raise Exception('Invalid option ' + opt)
ad3a25e3786b657947893f96a76e80f17eb3b0f0
3,645,951
from loopy.kernel import LoopKernel from loopy.translation_unit import make_program from loopy import CACHING_ENABLED from loopy.kernel import KernelState from loopy.preprocess import preprocess_program from loopy.type_inference import infer_unknown_types from loopy.schedule import linearize from loopy.check import pre_codegen_checks def generate_code_v2(program): """ Returns an instance of :class:`CodeGenerationResult`. :param program: An instance of :class:`loopy.TranslationUnit`. """ # {{{ cache retrieval if CACHING_ENABLED: input_program = program try: result = code_gen_cache[input_program] logger.debug(f"TranslationUnit with entrypoints {program.entrypoints}:" " code generation cache hit") return result except KeyError: pass # }}} if isinstance(program, LoopKernel): program = make_program(program) if program.state < KernelState.PREPROCESSED: # Note that we cannot have preprocessing separately for everyone. # Since, now the preprocessing of each one depends on the other. # So we check if any one of the callable kernels are not preprocesses # then, we have to do the preprocessing of every other kernel. program = preprocess_program(program) program = infer_unknown_types(program, expect_completion=True) program = linearize(program) # Why diverge? Generated code for a non-entrypoint kernel and an entrypoint # kernel isn't same for a general loopy target. For example in OpenCL, a # kernel callable from host and the one supposed to be callable from device # have different function signatures. To generate correct code, each # callable should be exclusively an entrypoint or a non-entrypoint kernel. program = diverge_callee_entrypoints(program) pre_codegen_checks(program) host_programs = {} device_programs = [] device_preambles = [] callee_fdecls = [] implemented_data_infos = {} # {{{ collect host/device programs for func_id in sorted(key for key, val in program.callables_table.items() if isinstance(val, CallableKernel)): cgr = generate_code_for_a_single_kernel(program[func_id], program.callables_table, program.target, func_id in program.entrypoints) if func_id in program.entrypoints: host_programs[func_id] = cgr.host_program implemented_data_infos[func_id] = cgr.implemented_data_info else: assert len(cgr.device_programs) == 1 callee_fdecls.append(cgr.device_programs[0].ast.fdecl) device_programs.extend(cgr.device_programs) device_preambles.extend(cgr.device_preambles) # }}} # {{{ collect preambles for clbl in program.callables_table.values(): device_preambles.extend(list(clbl.generate_preambles(program.target))) # }}} # adding the callee fdecls to the device_programs device_programs = ([device_programs[0].copy( ast=program.target.get_device_ast_builder().ast_module.Collection( callee_fdecls+[device_programs[0].ast]))] + device_programs[1:]) cgr = TranslationUnitCodeGenerationResult( host_programs=host_programs, device_programs=device_programs, device_preambles=device_preambles, implemented_data_infos=implemented_data_infos) if CACHING_ENABLED: code_gen_cache.store_if_not_present(input_program, cgr) return cgr
2f3e4a9b2dd7ea9994ec7cf1a1112db7abacf8bf
3,645,952
import sys import glob def serial_ports(): """ Lists serial port names :raises EnvironmentError: On unsupported or unknown platforms :returns: A list of the serial ports available on the system """ if sys.platform.startswith('win'): ports = ['COM%s' % (i + 1) for i in range(256)] elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'): # this excludes your current terminal "/dev/tty" ports = glob.glob('/dev/tty[A-Za-z]*') elif sys.platform.startswith('darwin'): ports = glob.glob('/dev/tty.*') else: raise EnvironmentError('Unsupported platform') result = [] for port in ports: try: s = serial.Serial(port) s.close() result.append(str(port)) except (OSError, serial.SerialException): pass return result
3693ed759d596308dc7fd817fd32a6643a3533e8
3,645,953
def get_mgr_pods(mgr_label=constants.MGR_APP_LABEL, namespace=None): """ Fetches info about mgr pods in the cluster Args: mgr_label (str): label associated with mgr pods (default: defaults.MGR_APP_LABEL) namespace (str): Namespace in which ceph cluster lives (default: defaults.ROOK_CLUSTER_NAMESPACE) Returns: list : of mgr pod objects """ namespace = namespace or config.ENV_DATA['cluster_namespace'] mgrs = get_pods_having_label(mgr_label, namespace) mgr_pods = [Pod(**mgr) for mgr in mgrs] return mgr_pods
8079d291d5e2b996547ecd615fbc00a8c70aa4e9
3,645,954
from typing import Union def normalise_architecture(architecture: Union[str, int]): """Convert any valid architecture alias to either 'x86_64' or 'i686'. Raise an error for invalid input. """ for (true_name, aliases) in architecture_aliases.items(): if architecture in aliases: return true_name raise ValueError( f"Invalid architecture {repr(architecture)}. " f"Legal 64 bit values are:\n {architecture_aliases['x86_64']}\n" f"And legal 32 bit values are:\n {architecture_aliases['i686']}\n" )
a7e99a2e8cc527028b82c7e628bd18f9c63c7f61
3,645,955
def process_m(filename, m, estimator): """Returns the list of file sizes and PSNR values for compression method m. """ filesize, psnr = [], [] for q in range(0, 101, 5): _size, _psnr = process_q(filename, q, m, estimator) filesize.append(_size / 1024) # in kilobyte(s) psnr.append(_psnr) return filesize, psnr
7bf1bbcdf31709393788006d8d5cd1bef3bf5509
3,645,956
def fmt(n): """format number with a space in front if it is single digit""" if n < 10: return " " + str(n) else: return str(n)
976acc22cafd6d6bdb4e251853f49a114b63ec21
3,645,957
def test_registration(): """Test registering a magic and getting a copy of it and de-registering.""" manager.MagicManager.clear_magics() def my_magic(cell=None, line=None): """This is a magic.""" if not cell: cell = 'foo' if not line: line = 'bar' return f'{cell}{line}' my_magic.magic_name = 'magical_function' my_magic.fn = my_magic manager.MagicManager.register_magic(my_magic) magic_from_manager = manager.MagicManager.get_magic('magical_function') assert magic_from_manager() == 'foobar' my_magic.magic_name = 'other_magic' def conditional(): return False manager.MagicManager.register_magic(my_magic, conditional=conditional) magic_from_manager = manager.MagicManager.get_magic('other_magic') assert magic_from_manager is None manager.MagicManager.register_magic(my_magic) magic_from_manager = manager.MagicManager.get_magic('other_magic') assert magic_from_manager() == 'foobar' manager.MagicManager.deregister_magic('other_magic') magic_from_manager = manager.MagicManager.get_magic('other_magic') assert magic_from_manager is None manager.MagicManager.deregister_magic('magical_function') magic_from_manager = manager.MagicManager.get_magic('magical_function') assert magic_from_manager is None with pytest.raises(KeyError): manager.MagicManager.deregister_magic('does_not_exist')
899573442f12e6e6544f32dcd472fd495eb9dc3b
3,645,958
def stop(): """Stop cleaning This is using docstrings for specifications. --- definitions: stop: type: object properties: did: type: string siid: type: integer aiid: type: integer code: type: integer out: type: array items: {} security: - Bearer: [] responses: 200: description: OK schema: $ref: '#/definitions/stop' 400: description: Bad Request 401: description: Unauthorized """ consoleOutput = ( popen("miiocli dreamevacuum --ip " + creds.ip + " --token " + creds.token + " play_sound") .read() .strip() .rstrip("\n") ) # 400 if consoleOutput.find("Error") != -1: return Response(response=consoleOutput.rstrip("\n"), status=400, mimetype="text/plain") # 200 result = consoleOutput.partition("\n")[2] print(result) if result.find("{'did'") != -1: return Response(response=result.replace("'", '"'), status=200, mimetype="application/json")
e9d7558f4433e73a92229fbf79628eb48357e12b
3,645,959
import json def save_key(access_key, output_filename=DEFAULT_ACCESS_KEY_FILE): """ saves access key to .yc json file """ with open(output_filename, "w+") as f: f.write(json.dumps(access_key, indent=4)) return output_filename
7f15a469ad9b74a39452d8bde46223ef214300d9
3,645,960
def PutObject(object_id: str): """Add/replace DRS object with a user-supplied ID. Args: object_id: Identifier of DRS object to be created/updated. Returns: Identifier of created/updated DRS object. """ return register_object( data=request.json, object_id=object_id, )
faf0aa633ef149c34f3fe0e80d8fdcc9df68dfec
3,645,961
def get_handler_name(method: str, url_path: str, path_params: dict): """ Возвращает имя необходимого хендлера для рефлексифного вызова метода :param method: Метод :param url_path: URL :param path_params: Параметры :return: """ handler = url_path.replace('/', '_') for key, value in path_params.items(): handler = handler.replace(value, key) return method.lower() + handler
e8060538a6bf73e6291ecbcbec14f11997a53507
3,645,962
import argparse def parse_args(): """Process input arguments""" parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('genotypes', metavar='G', help="Genotype table") parser.add_argument('mutations', metavar='M', help="Mutation table") parser.add_argument('--zygosity', '-z', default='both', type=str, help="Minimum number of variant alleles required to be variant\ (het, hom or both)") parser.add_argument('--nonsense', '-n', default=0, type=float, help="Only consider nonsense variants occuring in the first\ X portion of the protein") parser.add_argument('--total', '-t', action="store_true", help="Return the count of variants in each gene") parser.add_argument('--worst', '-w', action="store_true", help="Return the neutral probability of the most impactful variant\ in a gene") parser.add_argument('--sift', '-i', action="store_true", help="Use SIFT scores to calculate P(Neutral)") parser.add_argument('--blosum', '-b', action="store_true", help="Use BLOSUM62 scores to calculate P(Neutral)") parser.add_argument('--foldx', '-f', action="store_true", help="Use FoldX ddG scores to calculate P(Neutral)") return parser.parse_args()
276aed3028ae4d9614b7afae58ece670b8d2b806
3,645,963
import json def plot_AP(file_path: str): """ 绘制 AP 柱状图 """ with open(file_path, encoding='utf-8') as f: result = json.load(f) AP = [] classes = [] for k, v in result.items(): if k!='mAP': AP.append(v['AP']) classes.append(k) fig, ax = plt.subplots(1, 1, num='AP 柱状图') ax.barh(range(len(AP)), AP, height=0.6, tick_label=classes) ax.set(xlabel='AP', title=f'mAP: {result["mAP"]:.2%}') return fig, ax
7d691161d07d5f4a70c2b46b8971f54c93972a7b
3,645,964
def partner_data_ingest_new_files(source, destination): """ :param source : list of files to process: :param destination: destination to copy validated files check s3 path for new file, trigger partner_data_ingest for new files. """ hook = S3SyncHook(aws_conn_id="aws_default", verify=True) diff = hook.diff(source, destination) return partner_data_ingest(new_files=diff, destination=destination)
49685cac11c12af7aa3e2e9ecc152dc46f1b2c5e
3,645,965
import six def _mofval(value, indent, maxline, line_pos=0, end_space=0): """ Low level function that returns the MOF representation of a non-string value (i.e. a value that cannot not be split into multiple parts, for example a numeric or boolean value). If the MOF representation of the value does not fit into the remaining space of the current line, it is put into a new line, considering the specified indentation. NOTE: This method is derived from pywbem mofval but differs in that we want to output even if we violate the maxline limit on the new line. This method favors outputting data over exceptions. Parameters: value (:term:`unicode string`): The non-string value. Must not be `None`. indent (:term:`integer`): Number of spaces to indent any new lines that are generated. maxline (:term:`integer`): Maximum line length for the generated MOF. line_pos (:term:`integer`): Length of content already on the current line. end_space (:term:`integer`): Length of space to be left free on the last line. Returns: tuple of * :term:`unicode string`: MOF string. * new line_pos """ assert isinstance(value, six.text_type) # Check for output on current line # if fits or this is first entry on the line avl_len = maxline - line_pos - end_space if len(value) <= avl_len or line_pos == 0: line_pos += len(value) return value, line_pos mof_str = u'\n' + _indent_str(indent) + value line_pos = indent + len(value) return mof_str, line_pos
964e788a228ac88305fb8d82e7e9b9a4a8cd1a2f
3,645,966
import os def get_ammr_version(folder=None): """Return the AMMR version if possible. The function will walk up a directory tree looking for a ammr_verion.any file to parse. """ folder = folder or os.getcwd() any_version_file = "AMMR.version.any" xml_version_file = "AMMR.version.xml" files = os.listdir(folder) if any_version_file in files: return ammr_any_version(os.path.join(folder, any_version_file)) elif xml_version_file in files: return amm_xml_version(os.path.join(folder, xml_version_file)) else: return ""
ccb5380550d9cdaec6c4073e3884ea6052b4f448
3,645,967
def to_vector(texto,model,idf): """ Receives a sentence string along with a word embedding model and returns the vector representation of the sentence""" tokens = normalizer(texto).split() # splits the text by space and returns a list of words vec = np.zeros(300) # creates an empty vector of 300 dimensions for word in tokens: # iterates over the sentence if (word in model) & (word in idf): # checks if the word is both in the word embedding and the tf-idf model vec += model[word]*idf[word] # adds every word embedding to the vector if np.linalg.norm(vec) > 0: return vec / np.linalg.norm(vec) # divides the vector by their normal else: return vec
24f811110f9b6d9b0fc8a0f6ffcf2d37e1cd6feb
3,645,968
def evaluate_interval_detection(labels, predictions, event_val, def_val, seq_length, other_vals=[]): """Evaluate interval detection for sequences by calculating tp, fp, and fn. Extends the metric outlined by Kyritsis et al. (2019) in Modeling wrist micromovements to measure in-meal eating behavior from inertial sensor data https://ieeexplore.ieee.org/abstract/document/8606156/ by introducing additional possible events. Args: labels: The ground truth [batch_size, seq_length], encoding relevant sequences using the vals given in parameters. predictions: The predictions [batch_size, seq_length], encoding relevant sequences using the vals given in parameters. event_val: The value for true events. def_val: The default value for non-events. other_vals: List or 1-D tensor of vals for other events. seq_length: The sequence length. Returns: tp: True positives (number of true sequences of event_vals predicted with at least one predicting event_val) - scalar fp_1: False positives type 1 (number of excess predicting event_vals matching a true sequence of event_val in excess) - scalar fp_2: False positives type 2 (number of predicting event_vals matching def_val instead of event_val) - scalar fp_3: False positives type 3 (number of predicting event_vals matching other_vals instead of event_val) - 1D tensor with value for each element in other_vals fn: False negatives (number of true sequences of event_vals not matched by at least one predicting event_val) """ def sequence_masks(labels, event_val, def_val, batch_size, seq_length): """Generate masks [labels, max_seq_count, seq_length] for all event sequences in the labels""" # Mask non-event elements as False and event elements as True event_mask = tf.equal(labels, event_val) # Mask elements that are not equal to previous elements diff_mask = tf.not_equal(event_mask[:, 1:], event_mask[:, :-1]) prev_mask = tf.concat([tf.ones_like(labels[:, :1], tf.bool), diff_mask], axis=1) next_mask = tf.concat([diff_mask, tf.ones_like(labels[:, :1], tf.bool)], axis=1) # Test if there are no sequences empty = tf.equal(tf.reduce_sum(tf.cast(event_mask, tf.int32)), 0) # Mask sequence starts and ends seq_start_mask = tf.logical_and(prev_mask, event_mask) seq_end_mask = tf.logical_and(next_mask, event_mask) # Scatter seq_val seq_count_per_batch = tf.reduce_sum(tf.cast(seq_start_mask, tf.int32), axis=[1]) max_seq_count = tf.reduce_max(seq_count_per_batch) seq_val_idx_mask = tf.reshape(tf.sequence_mask(seq_count_per_batch, maxlen=max_seq_count), [-1]) seq_val_idx = tf.boolean_mask(tf.range(tf.size(seq_val_idx_mask)), seq_val_idx_mask) seq_vals = tf.boolean_mask(labels, seq_start_mask) seq_val = tf.scatter_nd( indices=tf.expand_dims(seq_val_idx, axis=1), updates=seq_vals, shape=tf.shape(seq_val_idx_mask)) seq_val = tf.reshape(seq_val, [batch_size, max_seq_count]) # Set elements of seq_val that are not event_val to def_val seq_val = tf.where( tf.not_equal(seq_val, tf.fill(tf.shape(seq_val), event_val)), x=tf.fill(tf.shape(seq_val), def_val), y=seq_val) # Scatter seq_start seq_start_idx = tf.where(seq_start_mask)[:,1] seq_start = tf.scatter_nd( indices=tf.expand_dims(seq_val_idx, axis=1), updates=seq_start_idx, shape=tf.shape(seq_val_idx_mask)) seq_start = tf.reshape(seq_start, [batch_size, max_seq_count]) # Scatter seq_end seq_end_idx = tf.where(seq_end_mask)[:,1] seq_end = tf.scatter_nd( indices=tf.expand_dims(seq_val_idx, axis=1), updates=seq_end_idx, shape=tf.shape(seq_val_idx_mask)) seq_end = tf.reshape(seq_end, [batch_size, max_seq_count]) def batch_seq_masks(starts, ends, length, vals, def_val): """Return seq masks for one batch""" def seq_mask(start, end, length, val, def_val): """Return one seq mask""" return tf.concat([ tf.fill([start], def_val), tf.fill([end-start+1], val), tf.fill([length-end-1], def_val)], axis=0) return tf.map_fn( fn=lambda x: seq_mask(x[0], x[1], length, x[2], def_val), elems=(starts, ends, vals), dtype=tf.int32) seq_masks = tf.cond(empty, lambda: tf.fill([batch_size, 0, seq_length], def_val), lambda: tf.map_fn( fn=lambda x: batch_seq_masks(x[0], x[1], seq_length, x[2], def_val), elems=(seq_start, seq_end, seq_val), dtype=tf.int32)) return seq_masks, max_seq_count labels = tf.cast(labels, dtype=tf.int32) predictions = tf.cast(predictions, dtype=tf.int32) def_val = tf.cast(def_val, dtype=tf.int32) event_val = tf.cast(event_val, dtype=tf.int32) # Dimensions batch_size = labels.get_shape()[0] # Compute whether labels are empty (no event_val sequences) event_mask = tf.equal(labels, event_val) empty = tf.equal(tf.reduce_sum(tf.cast(event_mask, tf.int32)), 0) # Derive positive ground truth mask; reshape to [n_gt_seq, seq_length] pos_mask, max_seq_count = sequence_masks(labels, event_val=event_val, def_val=def_val, batch_size=batch_size, seq_length=seq_length) pos_mask = tf.reshape(pos_mask, [-1, seq_length]) # Mask of default events def_mask = tf.equal(labels, def_val) # Masks for other events other_masks = tf.map_fn(fn=lambda x: tf.equal(labels, x), elems=tf.convert_to_tensor(other_vals, dtype=tf.int32), dtype=tf.bool) # Retain only event_val in predictions predictions = tf.where( tf.not_equal(predictions, tf.fill(tf.shape(predictions), event_val)), x=tf.fill(tf.shape(predictions), def_val), y=predictions) # Stack predictions accordingly pred_stacked = tf.reshape(tf.tile(tf.expand_dims(predictions, axis=1), [1, max_seq_count, 1]), [-1, seq_length]) # Remove empty masks and according preds keep_mask = tf.greater(tf.reduce_sum(tf.cast(tf.not_equal(pos_mask, def_val), tf.int32), axis=1), 0) pos_mask = tf.cond(empty, lambda: pos_mask, lambda: tf.boolean_mask(pos_mask, keep_mask)) pred_stacked = tf.cond(empty, lambda: pred_stacked, lambda: tf.boolean_mask(pred_stacked, keep_mask)) # Calculate number predictions per pos sequence # Reduce predictions to elements in pos_mask that equal event_val, then count them pred_sums = tf.map_fn( fn=lambda x: tf.reduce_sum(tf.cast(tf.equal(tf.boolean_mask(x[0], tf.equal(x[1], event_val)), event_val), tf.int32)), elems=(pred_stacked, pos_mask), dtype=tf.int32) # Calculate true positive, false positive and false negative count tp = tf.reduce_sum(tf.map_fn(lambda count: tf.cond(count > 0, lambda: 1, lambda: 0), pred_sums)) fn = tf.reduce_sum(tf.map_fn(lambda count: tf.cond(count > 0, lambda: 0, lambda: 1), pred_sums)) fp_1 = tf.cond(empty, lambda: 0, lambda: tf.reduce_sum(tf.map_fn(lambda count: tf.cond(count > 1, lambda: count-1, lambda: 0), pred_sums))) # False positives of type 2 are any detections on default events fp_2 = tf.reduce_sum(tf.cast(tf.equal(tf.boolean_mask(predictions, def_mask), event_val), tf.int32)) # False positives of type 3 are any detections on other events fp_3 = tf.map_fn( fn=lambda x: tf.reduce_sum(tf.cast(tf.equal(tf.boolean_mask(predictions, x), event_val), tf.int32)), elems=other_masks, dtype=tf.int32) tp = tf.cast(tp, tf.float32) fp_1 = tf.cast(fp_1, tf.float32) fp_2 = tf.cast(fp_2, tf.float32) fp_3 = tf.cast(fp_3, tf.float32) fn = tf.cast(fn, tf.float32) return tp, fp_1, fp_2, fp_3, fn
4149eaf357d28236077e9cfac7d7ed8ee113818c
3,645,969
def _div(v): """Pure spatial divergence""" return _div_id(np.vstack((v, [np.zeros_like(v[0])])), l1_ratio=0.)
706a97e4d5930067b6524210738fce5b27f407c5
3,645,970
import aiohttp import logging async def delete_share_handler(request: aiohttp.web.Request) -> aiohttp.web.Response: """Handle unshare-container endpoint query.""" try: await request.app["db_conn"].delete_share( request.match_info["owner"], request.match_info["container"], request.query["user"].split(","), ) except KeyError: # If can't find user from query, the client wants a bulk unshare return await delete_container_shares_handler(request) MODULE_LOGGER.log( logging.DEBUG, "Deleted following shared containers: %s", str(request.match_info["container"]), ) return aiohttp.web.Response(status=204, body="OK")
400888b0b75e8c4c73337b1f3444304b3526f510
3,645,971
def _always_run(*args, **kwargs) -> bool: """ This returns False to indicate that the step is not already completed. """ return False
db31e0ac20ac0eef410fb051928308ce7414f5b6
3,645,972
def generate_urls(search): """Generates a URLS in the correct format that brings to Google Image seearch page""" return [(BASE_URL+quote(word)+GOOGLE_PICTURE_ID) for word in search]
4d7d13cdf15fb3e029f11bb2e3f28920cf7c2f97
3,645,973
def batch_provider(data, batch_size, processor=None, worker_count=1, queue_size=16, report_progress=True): """ Return an object that produces a sequence of batches from input data Input data is split into batches of size :attr:`batch_size` which are processed with function :attr:`processor` Data is split and processed by separate threads and dumped into a queue allowing continuous provision of data. The main purpose of this primitive is to provide easy to use tool for parallel batch processing/generation in background while main thread runs the main algorithm. Batches are processed in parallel, allowing better utilization of CPU cores and disk that may improve GPU utilization for DL tasks with Storage/IO bottleneck. This primitive can be used in various ways. For small datasets, the input :attr:`data` list may contain actual dataset, while :attr:`processor` function does from small to no data processing. For larger datasets, :attr:`data` list may contain just filenames or keys while :attr:`processor` function reads data from disk or db. There are many purposes that function :attr:`processor` can be used for, depending on your use case. - Reading data from disk or db - Data decoding, e.g. from JPEG. - Augmenting data, flipping, rotating adding nose, etc. - Concatenation of data, stacking to single ndarray, conversion to a tensor, uploading to GPU. - Data generation. Note: Sequential order of batches is guaranteed only if number of workers is 1 (Default), otherwise batches might be supplied out of order. Args: data (list): Input data, each entry in the list should be a separate data point. batch_size (int): Size of a batch. If size of data is not divisible by :attr:`batch_size`, then the last batch will have smaller size. processor (Callable[[list], Any], optional): Function for processing batches. Receives slice of the :attr:`data` list as input. Can return object of any type. Defaults to None. worker_count (int, optional): Number of workers, should be greater or equal to one. To process data in parallel and fully load CPU :attr:`worker_count` should be close to the number of CPU cores. Defaults to one. queue_size (int, optional): Maximum size of the queue, which is number of batches to buffer. Should be larger than :attr:`worker_count`. Typically, one would want this to be as large as possible to amortize all disk IO and computational costs. Downside of large value is increased RAM consumption. Defaults to 16. report_progress (bool, optional): Print a progress bar similar to `tqdm`. You still may use `tqdm` if you set :attr:`report_progress` to False. To use `tqdm` just do :: for x in tqdm(batch_provider(...)): ... Defaults to True. Returns: Iterator: An object that produces a sequence of batches. :meth:`next()` method of the iterator will return object that was produced by :attr:`processor` function Raises: StopIteration: When all data was iterated through. Stops the for loop. Example: :: def process(batch): images = [misc.imread(x[0]) for x in batch] images = np.asarray(images, dtype=np.float32) images = images.transpose((0, 3, 1, 2)) labeles = [x[1] for x in batch] labeles = np.asarray(labeles, np.int) return torch.from_numpy(images) / 255.0, torch.from_numpy(labeles) data = [('some_list.jpg', 1), ('of_filenames.jpg', 2), ('etc.jpg', 4), ...] # filenames and labels batches = dlutils.batch_provider(data, 32, process) for images, labeles in batches: result = model(images) loss = F.nll_loss(result, labeles) loss.backward() optimizer.step() """ class State: def __init__(self): self.current_batch = 0 self.lock = Lock() self.data_len = len(data) self.batch_count = self.data_len // batch_size + (1 if self.data_len % batch_size != 0 else 0) self.quit_event = Event() self.queue = Queue(queue_size) self.batches_done_count = 0 self.progress_bar = None if report_progress: self.progress_bar = ProgressBar(self.batch_count) def get_next_batch_it(self): try: self.lock.acquire() if self.quit_event.is_set() or self.current_batch == self.batch_count: raise StopIteration cb = self.current_batch self.current_batch += 1 return cb finally: self.lock.release() def push_done_batch(self, batch): try: self.lock.acquire() self.queue.put(batch) self.batches_done_count += 1 finally: self.lock.release() def all_done(self): return self.batches_done_count == self.batch_count and self.queue.empty() if processor is None: def processor(x): return x def _worker(state): while not state.quit_event.is_set(): try: cb = state.get_next_batch_it() data_slice = data[cb * batch_size:min((cb + 1) * batch_size, state.data_len)] b = processor(data_slice) state.push_done_batch(b) except StopIteration: break class Iterator: def __init__(self): self.state = State() self.workers = [] for i in range(worker_count): worker = Thread(target=_worker, args=(self.state, )) worker.daemon = True worker.start() self.workers.append(worker) def __len__(self): return self.state.batch_count def __iter__(self): return self def __next__(self): if not self.state.quit_event.is_set() and not self.state.all_done(): item = self.state.queue.get() self.state.queue.task_done() if self.state.progress_bar is not None: self.state.progress_bar.increment() return item else: self.state.quit_event.set() raise StopIteration def __del__(self): self.state.quit_event.set() while not self.state.queue.empty(): self.state.queue.get(False) self.state.queue.task_done() for worker in self.workers: worker.join() return Iterator()
2760e9bc9977f4fcdc07624bf896d6b48ce1276d
3,645,974
import random def simplex(key, log_L_constraint, live_points_U, loglikelihood_from_constrained, prior_transform, sampler_state, replace_id): """ Samples from the prior restricted to the likelihood constraint. This undoes the shrinkage at each step to approximate a bound on the contours. First it does a scaling on each dimension. Args: key: log_L_constraint: live_points_U: loglikelihood_from_constrained: Returns: """ N,D = live_points_U.shape key, width_key = random.split(key, 2) def body(state): (key, i, u_test, x_test, log_L_test) = state key, sample_key, select_key, R_key = random.split(key, 4) i = random.randint(select_key, shape=(), minval=0, maxval=N + 1) # M,M R = random_ortho_matrix(R_key, D) # initial L, R for each direction # t_R[i] = max_(k) (points[k,j] - spawn_point_U[j]) @ R[j,i] # t_L[i] = max_(k) (points[k,j] - spawn_point_U[j]) @ -R[j,i] # N, M dx = live_points_U[sampler_state.knn_indices[i, :], :] - live_points_U[i, :] # [N, M] t = dx @ R # [M] t_R = jnp.maximum(jnp.max(t, axis=0), 0.) t_L = jnp.minimum(jnp.min(t, axis=0), 0.) u_test = live_points_U[i,:] + R @ random.uniform(sample_key, shape=[D], minval=t_L, maxval=t_R) u_test = jnp.clip(u_test, 0., 1.) x_test = prior_transform(u_test) log_L_test = loglikelihood_from_constrained(**x_test) return (key, i + 1, u_test, x_test, log_L_test) (key, num_likelihood_evaluations, u_new, x_new, log_L_new) = while_loop(lambda state: state[-1] <= log_L_constraint, body, (key, 0, live_points_U[0, :], prior_transform(live_points_U[0, :]), log_L_constraint)) new_dist = jnp.linalg.norm(u_new - dynamic_update_slice(live_points_U, u_new[None, :], [replace_id,0]), axis=1) new_dist = jnp.where(new_dist == 0., jnp.inf, new_dist) new_indices = jnp.argsort(new_dist)[:D+1] knn_indices = dynamic_update_slice(sampler_state.knn_indices, new_indices[None, :], [replace_id, 0]) sampler_state = sampler_state._replace(knn_indices=knn_indices) CubesResults = namedtuple('CubesResults', ['key', 'num_likelihood_evaluations', 'u_new', 'x_new', 'log_L_new', 'sampler_state']) return CubesResults(key, num_likelihood_evaluations, u_new, x_new, log_L_new, sampler_state)
f88038eca201b87fdc8f4b4722357a4eafd0366e
3,645,975
def has_anonymous_link(node, auth): """check if the node is anonymous to the user :param Node node: Node which the user wants to visit :param str link: any view-only link in the current url :return bool anonymous: Whether the node is anonymous to the user or not """ if auth.private_link: return auth.private_link.anonymous return False
c5941bce3f0110dfcd5e9bbb19bae0682c5e731f
3,645,976
def lstm(c_prev, x): """Long Short-Term Memory units as an activation function. This function implements LSTM units with forget gates. Let the previous cell state :math:`c_{\\text{prev}}` and the incoming signal :math:`x`. First, the incoming signal :math:`x` is split into four arrays :math:`a, i, f, o` of the same shapes along the second axis. It means that :math:`x` 's second axis must have 4 times the length of :math:`c_{\\text{prev}}`. The splitted input signals are corresponding to: - :math:`a` : sources of cell input - :math:`i` : sources of input gate - :math:`f` : sources of forget gate - :math:`o` : sources of output gate Second, it computes outputs as: .. math:: c &= \\tanh(a) \\text{sigmoid}(i) + c_{\\text{prev}} \\text{sigmoid}(f), \\\\ h &= \\tanh(c) \\text{sigmoid}(o). These are returned as a tuple of two variables. Args: c_prev (~chainer.Variable): Variable that holds the previous cell state. The cell state should be a zero array or the output of the previous call of LSTM. x (~chainer.Variable): Variable that holds the incoming signal. It must have the second dimension four times of that of the cell state, Returns: tuple: Two :class:`~chainer.Variable` objects ``c`` and ``h``. ``c`` is the updated cell state. ``h`` indicates the outgoing signal. See the original paper proposing LSTM with forget gates: `Long Short-Term Memory in Recurrent Neural Networks \ <http://www.felixgers.de/papers/phd.pdf>`_. .. admonition:: Example Assuming ``y`` is the current input signal, ``c`` is the previous cell state, and ``h`` is the previous output signal from an ``lstm`` function. Each of ``y``, ``c`` and ``h`` has ``n_units`` channels. Most typical preparation of ``x`` is: >>> model = FunctionSet(w=F.Linear(n_units, 4 * n_units), ... v=F.Linear(n_units, 4 * n_units), ... ...) >>> x = model.w(y) + model.v(h) >>> c, h = F.lstm(c, x) It corresponds to calculate the input sources :math:`a, i, f, o` from the current input ``y`` and the previous output ``h``. Different parameters are used for different kind of input sources. """ return LSTM()(c_prev, x)
795fb92554c04be29a75f770fe0fb88d4224f94a
3,645,977
import torch import time def hals(video, video_factorization, maxiter_hals=30, nnt=False, verbose=False, indent='', device='cuda', **kwargs): """Perform maxiter HALS updates To Temporal & Spatial Components Parameter: video: LowRankVideo class object video_factorization: localized NMF factors maxiter_hals: maximum number of iterations to tune hals nnt: whether or not temporal components should be constrained to be nonnegative verbose: whether or not print status update indent: previous identation for printing status update device: computation device **kwargs: optional additional input arguments Return: hals iteration counter """ for itr in range(maxiter_hals): if verbose: if device=='cuda': torch.cuda.synchronize() print(indent + '|--v HALS Iteration {:g}'.format(itr+1)) itr_t0 = time() step_t0 = itr_t0 # Spatial Update Step video_factorization.update_spatial(video) if verbose: if device=='cuda': torch.cuda.synchronize() print(indent + '| |--> Spatial update took {:g} seconds'.format(time()-step_t0)) step_t0 = itr_t0 # Remove Empty Components video_factorization.prune_empty_components() video_factorization.normalize_spatial() if verbose: if device=='cuda': torch.cuda.synchronize() print(indent + '| |--> Component prune after spatial update took {:g} seconds'.format(time()-step_t0)) step_t0 = itr_t0 # Temporal Update Step video_factorization.update_temporal(video, nonnegative=nnt) if verbose: if device=='cuda': torch.cuda.synchronize() print(indent + '| |--> Temporal update took {:g} seconds'.format(time()-step_t0)) print(indent + '| \'-total : {:g} seconds'.format(time()-itr_t0)) # Remove Empty Components video_factorization.prune_empty_components() if verbose: if device=='cuda': torch.cuda.synchronize() print(indent + '| |--> Component prune after temporal update took {:g} seconds'.format(time()-step_t0)) step_t0 = itr_t0 return itr + 1
a993f9e434c3196110c0569cb124d9edbb794dec
3,645,978
def generate_random_sd(error, seq = None): """ generates random sd with error% error rate If seq is specified, random sd is generated from a substring of it.""" if seq == None: seq1 = randSeq(rand(minLen, maxLen)) else: length = rand(minLen, maxLen) start = rand(0, len(seq) - length - 1) seq1 = seq[start:start + length] sED = rand(max(0, error - maxLED),min(maxSED, error)) seq2 = makeSmall(seq1, sED)[0] seq2 = makeLarge(seq2, error-sED)[0] return seq1, seq2, sED
34694895cd37e5714666c3f9f80ae5a010310d3c
3,645,979
def is_successful(gsm_log): """ Success is defined as having converged to a transition state. """ with open(gsm_log) as f: for line in reversed(f.readlines()): if '-XTS-' in line or '-TS-' in line: return True return False
9bab6837c8e6b818cceb025c5df9aed78074edcf
3,645,980
def indicator(function_array_to_be_indicated, its_domain, barrier): """the indicator influences the function argument, not value. So here it iterates through x-domain and cuts any values of function with an argument less than H""" indicated = [] for index in range(len(its_domain)): if its_domain[index] > barrier: indicated.append(function_array_to_be_indicated[index]) else: indicated.append(0) return indicated
440f423b7b25b0d152bc691acd3d7dea6c785aed
3,645,981
def list_calendars(service): """ Given a google 'service' object, return a list of calendars. Each calendar is represented by a dict, so that it can be stored in the session object and converted to json for cookies. The returned list is sorted to have the primary calendar first, and selected (that is, displayed in Google Calendars web app) calendars before unselected calendars. """ app.logger.debug("Entering list_calendars") calendar_list = service.calendarList().list().execute()["items"] result = [] for cal in calendar_list: kind = cal["kind"] id = cal["id"] if "description" in cal: desc = cal["description"] else: desc = "(no description)" summary = cal["summary"] # Optional binary attributes with False as default selected = ("selected" in cal) and cal["selected"] primary = ("primary" in cal) and cal["primary"] result.append( { "kind": kind, "id": id, "summary": summary, "selected": selected, "primary": primary }) return sorted(result, key=cal_sort_key)
c84efde3699a2c7b3e88dc0a0c799a0da6b4bebb
3,645,982
def _causes_name_clash(candidate, path_list, allowed_occurences=1): """Determine if candidate leads to a name clash. Args: candidate (tuple): Tuple with parts of a path. path_list (list): List of pathlib.Paths. allowed_occurences (int): How often a name can occur before we call it a clash. Returns: bool """ duplicate_counter = -allowed_occurences for path in path_list: parts = tuple(reversed(path.parts)) if len(parts) >= len(candidate) and parts[: len(candidate)] == candidate: duplicate_counter += 1 return duplicate_counter > 0
3b874e4ea6d8780483100e464e3325321c82689e
3,645,983
def run_eqm(results: Results, options: Options, state: PromisedObject) -> dict: """Run the eqm jobs.""" # set user-defined valuess results['job_opts_eqm'] = edit_calculator_options( options, ['eqm', 'xtpdft', 'esp2multipole']) cmd_eqm_write = create_promise_command( "xtp_parallel -e eqm -o {} -f {} -s 0 -j write", results['job_opts_eqm']['eqm'], state) results['job_setup_eqm'] = call_xtp_cmd( cmd_eqm_write, options.scratch_dir, expected_output={"eqm_jobs": "eqm.jobs"}) # Select the number of jobs to run based on the input provided by the user results['job_select_eqm_jobs'] = edit_jobs_file( results['job_setup_eqm']['eqm_jobs'], options.eqm_jobs) jobs_eqm = distribute_eqm_jobs(results, options, state) # Finally move all the OR_FILES to the same folder in the scratch_dir names = ('molecule_orb', 'dft_orb', 'mps_file') return move_results_to_workdir(jobs_eqm, names, options.scratch_dir)
941d51a0de22dd4d66f5de68fc315bf318d112cf
3,645,984
def is_within_boundary(boundary_right_most_x, boundary_top_most_y, boundary_left_most_x, boundary_bottom_most_y, cursor): """ Checks if cursor is within given boundary :param boundary_right_most_x: :param boundary_top_most_y: :param boundary_left_most_x: :param boundary_bottom_most_y: :param cursor: :return: boolean """ if cursor.y < boundary_top_most_y: return False elif cursor.y > boundary_bottom_most_y: return False elif cursor.x < boundary_left_most_x: return False elif cursor.x > boundary_right_most_x: return False return True
d53106c9d525eb1bb51cfe4c30bc7e143ac6a517
3,645,985
import os def mixrng(numbytes, port='COM4'): """Returns bitwise xor of an inbuilt and hardware CSRNG""" internal = os.urandom(numbytes) external = extrng(numbytes, port) return xorbytes(internal, external)
0b8eaa516c4afeb697474649f13e3e9d6c3f6867
3,645,986
def save_matchmaking_auth_key(auth_key: str) -> bool: """Register a new matchmaking auth key. !This will overwrite the existing matchmaking key for this chain! Args: auth_key: auth_key to add for matchmaking Returns: Boolean if successful """ try: redis.set_sync(MATCHMAKING_KEY_LOCATION, auth_key) return True except Exception: return False
5f18614f2b2950a942e7a98773911b7f58aabd74
3,645,987
import requests from bs4 import BeautifulSoup def get_game_page(url): """ Get the HTML for a given URL, where the URL is a game's page in the Xbox Store """ try: response = requests.get(url) except (requests.exceptions.MissingSchema, ConnectionError): return None game_page = BeautifulSoup(response.content, "html.parser") return game_page
40d578ce8cda0b5139515e03f8308911169e0442
3,645,988
from typing import List import logging import itertools import asyncio async def generate_spam_round_tx_xdrs(pool, prioritizers: List[Keypair], prioritized_builders, unprioritized_builders, rnd): """Generate transaction XDRs for a single spam round (ledger) according to given builders. Some of the generated transactions are prioritized using given prioritizer seeds, and some are unprioritized and not signed by a prioritizer account. All prioritized transactions are expected to be included in the next ledger. Only one out of all unprioritized transactions is expected to be included in the next ledger. Return a metadata dictionary with the generated XDRs along with additional information. """ logging.info('generating transaction xdrs for round %d', rnd) # make a cyclic list of builders. # we will use this list to fetch a destination address for each payment tx, # making all builders send a tx to the next builder right after them # in line in a cyclic manner. this is done in order to cycle through # destination addresses instead of sending call txs to a single destination # account. cycl = itertools.cycle(unprioritized_builders) next(cycl) # make sure the next cycle call will return the next builder after the current one # generate unprioritized payment transactions # we generate them first, thus will submit them first, # because we want to test if prioritized transactions actually get priority over them loop = asyncio.get_running_loop() futurs = [] for builder in unprioritized_builders: dest_address = next(cycl).keypair.address().decode() f = loop.run_in_executor( pool, build_and_sign, builder, dest_address, PAYMENT_AMOUNT, None) futurs.append(f) if not futurs: raise RuntimeError('no futures to gather') tx_metadata = {} for tx_hash, tx_xdr in await asyncio.gather(*futurs): tx_metadata[tx_hash] = {'round': rnd, 'prioritized': False, 'xdr': tx_xdr} # generate prioritized transactions futurs = [] cycl = itertools.cycle(prioritized_builders) for builder, prioritizer in zip(prioritized_builders, prioritizers): dest_address = next(cycl).keypair.address().decode() f = loop.run_in_executor( pool, build_and_sign, builder, dest_address, PAYMENT_AMOUNT, prioritizer.secret_seed) futurs.append(f) if not futurs: raise RuntimeError('no futures to gather') for tx_hash, tx_xdr in await asyncio.gather(*futurs): tx_metadata[tx_hash] = {'round': rnd, 'prioritized': True, 'xdr': tx_xdr} return tx_metadata
7e33455718f6c99ccb9fc1ad6a7c3de47964ec98
3,645,989
import collections from datetime import datetime def json_custom_parser(obj): """ A custom json parser to handle json.dumps calls properly for Decimal and Datetime data types. """ if isinstance(obj, Decimal): return float(obj) elif not isinstance(obj, basestring) and isinstance(obj, collections.Iterable): return list(obj) elif isinstance(obj, datetime.datetime) or isinstance(obj, datetime.date): dot_ix = 19 # 'YYYY-MM-DDTHH:MM:SS.mmmmmm+HH:MM'.find('.') return obj.isoformat()[:dot_ix] else: raise TypeError(obj)
fb5d14b4416df4540ed3091dcf229aa7b037003d
3,645,990
import sys def process(arguments: list = None) -> str: """ Run the process Parameters ---------- arguments : list Injectable arguments to execute Returns ------- str The name of the library command to execute """ if not arguments: arguments = [] __prepare() args = __parse_arguments(arguments if arguments else sys.argv[1:]) if not __validate_arguments(args): print_error("Argument combination not valid!") sys.exit(1) __check_devices(args) return __dispatch_command(args)
9216d8db0f806d7d097343f9e0aa906e031414d7
3,645,991
def fs_open(path, flag, mode=default_file_mode): """ Open a file, potentially creating it. Return the new fd's id or else -1 if file can not be opened (or potentially created) """ # Check if file should be created if it doesn't exist O_CREAT = 64 create = flag & 64 # If requested, try to create the file if create: try: filesys.add_file(path, mode, 0) except AlreadyExistsError: # File may already exist, which is ok with O_CREAT pass except Exception: return -1 # Call the virtual fs to open the file try: inodeid = filesys.open_file(path) except DoesNotExistError: return -1 # Add an fd for this file to the open files state return fstate.create_fd(inodeid)
218940a6fc14c47f7a3df6d9a4e1bbc971b6b0b5
3,645,992
def new_user(): """ Create Instance of User class to be used by the module """ user_details = ['Daudi', 'Jesee', 'dj@mail.com', 'password'] user = Users(user_details) return user
4d5b2c4cad858113fceef150143b9688488000f4
3,645,993
import math def normalise_angle(angle: float) -> float: """Normalises the angle in the range (-pi, pi]. args: angle (rad): The angle to normalise. return: angle (rad): The normalised angle. """ while angle > math.pi: angle -= 2 * math.pi while angle <= -math.pi: angle += 2 * math.pi return angle
0a4cfa6e9da58bfdbb6cd4a04e7a742e8c432002
3,645,994
def get_hdf_len(*path): """ Returns the number of rows in an hdf file as an int. """ path = construct_path(*path) with pd.HDFStore(path) as store: numrows = store.get_storer('data').nrows return numrows
ad188b2733612e7ed1950a2df0ef5164f9cda021
3,645,995
def matmul(A, B, transpose_A=False, transpose_B=False, master='/gpu:0'): """ distributed matrix multiplication. A: DistMat, B: single tensor or a list of tensors. Note: returns a single tensor or a list of tensors, Not a DistMat. """ if isinstance(A, tf.Tensor) or isinstance(A, tf.Variable): if isinstance(B, tf.Tensor) or isinstance(B, tf.Variable): return tf.matmul(A, B) else: raise NotImplementedError if transpose_B: raise NotImplementedError else: if transpose_A: # distributed dim is inner axis if isinstance(B, tf.Tensor) or isinstance(B, tf.Variable): # broadcast partial_sums = [] for i, t in enumerate(A.tensors): with tf.device(t.device): partial_sums.append(tf.matmul(t, B[A.partition[i]:A.partition[i+1],:], transpose_a=True)) with tf.device(master): return tf.add_n(partial_sums) else: partial_sums = [] for t_A, t_B in zip(A.tensors, B.tensors): #print(t_A.device) #print(t_B.device) #assert t_A.device == t_B.device with tf.device(t_A.device): partial_sums.append(tf.matmul(t_A, t_B, transpose_a=True)) with tf.device(master): return tf.add_n(partial_sums) # distributed computation necessary #return tf.add_n([tf.matmul(Apart, Bpart) for Apart, Bpart in zip(A.tensors, B.tensors)]) else: # non-distributed dim is inner axis. merely broacast B. if isinstance(B, tf.Tensor) or isinstance(B, tf.Variable): slices = [] for t in A.tensors: with tf.device(t.device): slices.append(tf.matmul(t, B)) return distmat.DistMat(slices) else: raise NotImplementedError
268068cd73b56ef747142ebc2df839d124d406d5
3,645,996
def celerybeat_started(): """ Returns true/false depending on whether the celerybeat service is started or not """ if is_systemd(): running = 'active' in fabric.api.sudo('systemctl is-active %s' % celerybeat_service_name()) return running return fabtools.service.is_running(celerybeat_service_name())
b3578b6dbe91b9a16342c53c488fe01fc37275cd
3,645,997
def highest_greedy_score(board, disks): """ Compute the highest possible score that can be obtained by dropping each of the given disks on the given board in a greedy way. - The disks must be dropped in the order in which they appear in the given list of disks. Each disk is dropped in the best column as computed by the function best_drop_for_disk. - Upon exit from the function, the board reflects the state obtained from dropping the disks. If not all the given disks can be dropped because the board gets completely filled, the function only drops the disks it can drop. - The function returns a tuple of (1) the highest score followed by (2) a tuple of columns in which the successive disks have been dropped. - Upon return, the given list of disks only stores disks that have not been dropped on the board. - The function will not take into account possible raises of level while dropping disks, i.e. the resulting score only reflects scores obtained from dropping disks as computed by the function drop_disk_at. - This function must be implemented in a RECURSIVE way. ASSUMPTIONS - The given board is a playable board, and each of the given disks is a proper disk for the given board. - None of the given disks is cracked. """ score = 0 columns = () if len(disks) == 0 or Board.is_full(board): # No more disks to drop return score, columns else: disk_to_drop = disks[0] column_best_drop, score_best_drop = best_drop_for_disk(board, disk_to_drop) del disks[0] score, columns = highest_greedy_score(board, disks) columns = (column_best_drop,) + columns score += score_best_drop return score, columns
22fc70db81d6051158bdb9bf80a42d81a215dba1
3,645,998
def normalize_and_discard(df: pd.DataFrame) -> pd.DataFrame: """ Normalize numeric values between 0 and 1 and discard records that are out of bounds. """ # ## 2. Discard values out of range of x and y df_cleaned = df[(df.x >= 0) & (df.x <= 120) & (df.y >= 0) & (df.y <= (160 / 3))] print(f'Shape difference {df.shape[0] - df_cleaned.shape[0]}') # ## 3. Normalize x, y , s, a, dis, o, dir on scale 0-1 # thresholds are determined by examining data from all weeks df_cleaned.x = df_cleaned.x / df.x.max() df_cleaned.y = df_cleaned.y / df.y.max() df_cleaned.s = df_cleaned.s / SPEED_MAX_THRESHOLD df_cleaned.a = df_cleaned.a / ACCELERATION_MAX_THRESHOLD df_cleaned.dis = df_cleaned.dis / DISTANCE_MAX_THRESHOLD df_cleaned.o = df_cleaned.o / 360 df_cleaned.dir = df_cleaned.dir / 360 df_n2 = df_cleaned[[ 'time', 'x', 'y', 's', 'a', 'dis', 'o', 'dir', 'event', 'frameId', 'team', 'gameId', 'playId', 'quarter', 'homeHasPossession', 'down', 'playType', 'defendersInTheBox', 'numberOfPassRushers', 'passResult', 'isDefensivePI' ]] df_n2.quarter /= 5.0 # max quarters df_n2.down /= 4.0 # max quarters df_n2.defendersInTheBox /= 11.0 df_n2.numberOfPassRushers /= 11.0 return df_n2
0b1a1e6ed76c72797cf7b3f65058592c6ec95b03
3,645,999