content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def _insert_text_func(s, readline): """Creates a function to insert text via readline.""" def inserter(): readline.insert_text(s) readline.redisplay() return inserter
06532be051cb69b92fa79ef339edb733b8f31c15
3,647,300
def dumps(ndb_model, **kwargs): """Custom json dumps using the custom encoder above.""" return NdbEncoder(**kwargs).encode(ndb_model)
0f0a74cdaedd81a95874f745ad1bc24881d1fb73
3,647,301
def classifier_on_rbm_scores(models, dataset_train, dataset_test, clfs=None): """ TODO: store progress on heirarchical clustering; that is the slowest step clfs: list of classifiers """ if clfs is None: clfs = [CLASSIFIER] features_order = list(models.keys()) feature_dim = len(features_order) def get_X_y_features(dataset): X = np.zeros((len(dataset), feature_dim)) y = np.zeros(len(dataset), dtype=int) for idx, pair in enumerate(dataset): elem_arr, elem_label = pair preprocessed_input = binarize_image_data(image_data_collapse(elem_arr), threshold=MNIST_BINARIZATION_CUTOFF) features = np.array([score_1digit_model(models[key], preprocessed_input) for key in features_order]) #features = np.array([score_1digit_model(models[idx], preprocessed_input) for idx in range(10)]) X[idx, :] = features y[idx] = elem_label return X, y print("[classifier_on_rbm_features] Step 1: get features for training") X_train_reduced, y_train = get_X_y_features(dataset_train) print("\tTraining data dimension", X_train_reduced.shape, y_train.shape) print("[classifier_on_rbm_features] Step 2: train classifier layer") for clf in clfs: print('fitting...') clf.fit(X_train_reduced, y_train) # fit data print("[classifier_on_rbm_features] Step 3: get features for testing") X_test_reduced, y_test = get_X_y_features(dataset_test) print("[classifier_on_rbm_features] Step 4: classification metrics and confusion matrix") cms = [0] * len(clfs) accs = [0] * len(clfs) for idx, clf in enumerate(clfs): print('predicting...') predictions = clf.predict(X_test_reduced).astype(int) confusion_matrix, matches = confusion_matrix_from_pred(predictions, y_test) acc = float(matches.count(True) / len(matches)) cms[idx] = confusion_matrix accs[idx] = acc print("Successful test cases: %d/%d (%.3f)" % (matches.count(True), len(matches), acc)) return cms, accs
acea7cdb174dfc61e2ab5f04d295024d6b58d2f4
3,647,302
import inspect def _evolve_no_collapse_psi_out(config): """ Calculates state vectors at times tlist if no collapse AND no expectation values are given. """ global _cy_rhs_func global _cy_col_spmv_func, _cy_col_expect_func global _cy_col_spmv_call_func, _cy_col_expect_call_func num_times = len(config.tlist) psi_out = np.array([None] * num_times) expect_out = [] for i in range(config.e_num): if config.e_ops_isherm[i]: # preallocate real array of zeros expect_out.append(np.zeros(num_times, dtype=float)) else: # preallocate complex array of zeros expect_out.append(np.zeros(num_times, dtype=complex)) expect_out[i][0] = \ cy_expect_psi_csr(config.e_ops_data[i], config.e_ops_ind[i], config.e_ops_ptr[i], config.psi0, config.e_ops_isherm[i]) if debug: print(inspect.stack()[0][3]) if not _cy_rhs_func: _mc_func_load(config) opt = config.options if config.tflag in [1, 10, 11]: ODE = ode(_cy_rhs_func) code = compile('ODE.set_f_params(' + config.string + ')', '<string>', 'exec') exec(code) elif config.tflag == 2: ODE = ode(_cRHStd) ODE.set_f_params(config) elif config.tflag in [20, 22]: if config.options.rhs_with_state: ODE = ode(_tdRHStd_with_state) else: ODE = ode(_tdRHStd) ODE.set_f_params(config) elif config.tflag == 3: if config.options.rhs_with_state: ODE = ode(_pyRHSc_with_state) else: ODE = ode(_pyRHSc) ODE.set_f_params(config) else: ODE = ode(cy_ode_rhs) ODE.set_f_params(config.h_data, config.h_ind, config.h_ptr) # initialize ODE solver for RHS ODE.set_integrator('zvode', method=opt.method, order=opt.order, atol=opt.atol, rtol=opt.rtol, nsteps=opt.nsteps, first_step=opt.first_step, min_step=opt.min_step, max_step=opt.max_step) # set initial conditions ODE.set_initial_value(config.psi0, config.tlist[0]) psi_out[0] = Qobj(config.psi0, config.psi0_dims, config.psi0_shape) for k in range(1, num_times): ODE.integrate(config.tlist[k], step=0) # integrate up to tlist[k] if ODE.successful(): state = ODE.y / dznrm2(ODE.y) psi_out[k] = Qobj(state, config.psi0_dims, config.psi0_shape) for jj in range(config.e_num): expect_out[jj][k] = cy_expect_psi_csr( config.e_ops_data[jj], config.e_ops_ind[jj], config.e_ops_ptr[jj], state, config.e_ops_isherm[jj]) else: raise ValueError('Error in ODE solver') return expect_out, psi_out
a87fe7c21b0bc31599d94845d41384714c0084da
3,647,303
import os def get_version_from_package() -> str: """Read the package version from the source without importing it.""" path = os.path.join(os.path.dirname(__file__), "arcpy2foss/__init__.py") path = os.path.normpath(os.path.abspath(path)) with open(path) as f: for line in f: if line.startswith("__version__"): _, version = line.split(" = ", 1) version = version.replace('"', "").strip() return version
67b0374794ede6c13560265ab3e6ebf3c5b2aef3
3,647,304
def create_critic_train_op(hparams, critic_loss, global_step): """Create Discriminator train op.""" with tf.name_scope('train_critic'): critic_optimizer = tf.train.AdamOptimizer(hparams.critic_learning_rate) output_vars = [ v for v in tf.trainable_variables() if v.op.name.startswith('critic') ] if FLAGS.critic_update_dis_vars: if FLAGS.discriminator_model == 'bidirectional_vd': critic_vars = [ v for v in tf.trainable_variables() if v.op.name.startswith('dis/rnn') ] elif FLAGS.discriminator_model == 'seq2seq_vd': critic_vars = [ v for v in tf.trainable_variables() if v.op.name.startswith('dis/decoder/rnn/multi_rnn_cell') ] critic_vars.extend(output_vars) else: critic_vars = output_vars print('\nOptimizing Critic vars:') for v in critic_vars: print(v) critic_grads = tf.gradients(critic_loss, critic_vars) critic_grads_clipped, _ = tf.clip_by_global_norm(critic_grads, FLAGS.grad_clipping) critic_train_op = critic_optimizer.apply_gradients( zip(critic_grads_clipped, critic_vars), global_step=global_step) return critic_train_op, critic_grads_clipped, critic_vars
4c33ba79dacebff375971c123e5bbafd34f5ab91
3,647,305
def interpolate(data, tstep): """Interpolate limit order data. Uses left-hand interpolation, and assumes that the data is indexed by timestamp. """ T, N = data.shape timestamps = data.index t0 = timestamps[0] - (timestamps[0] % tstep) # 34200 tN = timestamps[-1] - (timestamps[-1] % tstep) + tstep # 57600 timestamps_new = np.arange(t0 + tstep, tN + tstep, tstep) # [34200, ..., 57600] X = np.zeros((len(timestamps_new), N)) # np.array X[-1, :] = data.values[-1, :] t = timestamps_new[0] # keeps track of time in NEW sampling frequency for i in np.arange(0, T): # observations in data... if timestamps[i] > t: s = timestamps[i] - (timestamps[i] % tstep) tidx = int((t - t0) / tstep - 1) sidx = int((s - t0) / tstep) # plus one for python indexing (below) X[tidx:sidx, :] = data.values[i - 1, :] t = s + tstep else: pass return pd.DataFrame(X, index=timestamps_new, columns=data.columns)
ae1fde01529a4d11ea864b0f5757c9cde096a142
3,647,306
def doColorTransfer(org_content, output, raw_data, with_color_match = False): """ org_content path or 0-1 np array output path or 0-1 np array raw_data boolean | toggles input """ if not raw_data: org_content = imageio.imread(org_content, pilmode="RGB").astype(float)/256 output = imageio.imread(output, pilmode="RGB").astype(float)/256 org_content = skimage.transform.resize(org_content, output.shape) if with_color_match: output = match_color(output, org_content) org_content = rgb2luv(org_content) org_content[:,:,0] = output.mean(2) output = luv2rgb(org_content) output[output<0] = 0 output[output>1]=1 return output
8af9d8dc199fb4b111da619e4dbc935b2514c5b7
3,647,307
def tb_filename(tb): """Helper to get filename from traceback""" return tb.tb_frame.f_code.co_filename
75ac527b928d605f1dfc2b5034da6ab7e193fb82
3,647,308
async def fetch_symbol(symbol: str): """ get symbol info """ db = SessionLocal() s = db.query(SymbolSchema).filter(SymbolSchema.symbol == symbol).first() res = {"symbol": s.symbol, "name": s.name} return res
6e6a1e4e92ba7796f1893d3144cbdcc75bee6bbe
3,647,309
from typing import List def all_live_response_sessions(cb: CbResponseAPI) -> List: """List all LR sessions still in server memory.""" return [sesh for sesh in cb.get_object(f"{CBLR_BASE}/session")]
e4ea25c8d38e90e8048f6a5c220e5f413ce59da6
3,647,310
def unserialize_model_params(bin: bin): """Unserializes model or checkpoint or diff stored in db to list of tensors""" state = StatePB() state.ParseFromString(bin) worker = sy.VirtualWorker(hook=None) state = protobuf.serde._unbufferize(worker, state) model_params = state.tensors() return model_params
a1cad2172029b7e622486d50de73a7c87aaca9c0
3,647,311
def config(clazz): """Decorator allowing to transform a python object into a configuration file, and vice versa :param clazz: class to decorate :return: the decorated class """ return deserialize(serialize(dataclass(clazz)))
a5386d53c596b77355ee8e5067a0e1c8b4efb89e
3,647,312
import functools def wrap_method_once(func: t.Callable[..., t.Any]) -> t.Callable[..., t.Any]: """manage Runnable state for given method""" # we don't re-wrap methods that had the state management wrapper if hasattr(func, 'handler_wrapped'): return func @functools.wraps(func) def wrapped_runnable_method(*args, **kw): # check the first args, if it is self, otherwise call the wrapped function # we might wrapped a callable that is not a method if args and isinstance(args[0], Runnable): self, args = args[0], args[1:] return self._call_wrapped_method(func, *args, **kw) else: return func(*args, **kw) wrapped_runnable_method.handler_wrapped = True return wrapped_runnable_method
cf26f642f09fe5d4b54073eeda4ab7beb03ed538
3,647,313
from re import T async def all(iterable: ty.AsyncIterator[T]) -> bool: """Return ``True`` if **all** elements of the iterable are true (or if the iterable is empty). :param iterable: The asynchronous iterable to be checked. :type iterable: ~typing.AsyncIterator :returns: Whether all elements of the iterable are true or if the iterable is empty. :rtype: bool """ async for x in iter(iterable): if not x: return False return True
92c8c20d75318a0b0353ede9641c59d9337c60f6
3,647,314
def _safe_isnan(x): """Wrapper for isnan() so it won't fail on non-numeric values.""" try: return isnan(x) except TypeError: return False
7f4cb2e2f4c3e4ee9d66e6d8dbb73dcac5f343f0
3,647,315
def get_system( context, system_id = None ): """ Finds a system matching the given identifier and returns its resource Args: context: The Redfish client object with an open session system_id: The system to locate; if None, perform on the only system Returns: The system resource """ system_uri_pattern = "/redfish/v1/Systems/{}" avail_systems = None # If given an identifier, get the system directly if system_id is not None: system = context.get( system_uri_pattern.format( system_id ) ) # No identifier given; see if there's exactly one member else: avail_systems = get_system_ids( context ) if len( avail_systems ) == 1: system = context.get( system_uri_pattern.format( avail_systems[0] ) ) else: raise RedfishSystemNotFoundError( "Service does not contain exactly one system; a target system needs to be specified: {}".format( ", ".join( avail_systems ) ) ) # Check the response and return the system if the response is good try: verify_response( system ) except: if avail_systems is None: avail_systems = get_system_ids( context ) raise RedfishSystemNotFoundError( "Service does not contain a system called {}; valid systems: {}".format( system_id, ", ".join( avail_systems ) ) ) from None return system
7127a5cf0df89ba5832b7328746d29ef34c12c3e
3,647,316
import math def cartesian_to_polar(x, y, xorigin=0.0, yorigin=0.0): """ Helper function to convert Cartesian coordinates to polar coordinates (centred at a defined origin). In the polar coordinates, theta is an angle measured clockwise from the Y axis. :Parameters: x: float X coordinate of point y: float Y coordinate of point xorigin: float (optional) X coordinate of origin (if not zero) yorigin: float (optional) Y coordinate of origin (if not zero) :Returns: (r, theta): tuple of 2 floats Polar coordinates of point. NOTE: theta is in radians. """ xdiff = float(x) - float(xorigin) ydiff = float(y) - float(yorigin) distsq = (xdiff * xdiff) + (ydiff * ydiff) r = math.sqrt(distsq) theta = PIBY2 - math.atan2(ydiff, xdiff) # Adjust theta to be in the range 0 - 2*PI while theta < 0.0: theta += PI2 while theta > PI2: theta -= PI2 return (r, theta)
3fcbb0fc18b9c8d07bc1394995600fd354ee3c75
3,647,317
def keypoint_dict_to_struct(keypoint_dict): """ parse a keypoint dictionary form into a keypoint info structure """ keypoint_info_struct = KeypointInfo() if 'keypoints' in keypoint_dict: for k in keypoint_dict['keypoints']: keypoint = keypoint_info_struct.keypoints.add() keypoint.xloc = k[0] keypoint.yloc = k[1] if 'jacobians' in keypoint_dict: for j in keypoint_dict['jacobians']: jacobian = keypoint_info_struct.jacobians.add() jacobian.d11 = j[0][0] jacobian.d12 = j[0][1] jacobian.d21 = j[1][0] jacobian.d22 = j[1][1] keypoint_info_struct.pts = keypoint_dict['pts'] keypoint_info_struct.index = keypoint_dict['index'] return keypoint_info_struct
d805e6fbac9df7a3a8d7d7f73a03f63a2a4149d1
3,647,318
def softmax_regression(img): """ 定义softmax分类器: 只通过一层简单的以softmax为激活函数的全连接层,可以得到分类的结果 Args: img -- 输入的原始图像数据 Return: predict -- 分类的结果 """ predict = paddle.layer.fc( input=img, size=10, act=paddle.activation.Softmax()) return predict
0c522589a2130bbba512d557695f432eea50ef48
3,647,319
from typing import Optional from typing import Union from typing import Tuple def incremental_quality( wavelength: ndarray, flux: ndarray, *, mask: Optional[Union[Quantity, ndarray]] = None, percent: Union[int, float] = 10, **kwargs, ) -> Tuple[ndarray, ndarray]: """Determine spectral quality in incremental sections. Parameters ---------- wavelength: array-like or Quantity Wavelength of spectrum. flux: array-like or Quantity Flux of spectrum. mask: array-like, Quantity or None Pixel weight mask. percent: Union[int, float] (default=10) The percent size of chunk around each wavelength position. kwargs: Extra arguments passed onto quality() (including mask). Returns ------- x: ndarray Central wavelength values of each section. q: ndarray Spectral quality for each section. """ positions = log_chunks(wavelength, percent) qualities = [] for pos1, pos2 in zip(positions[:-1], positions[1:]): pos_mask = (wavelength >= pos1) & (wavelength < pos2) if np.sum(pos_mask) <= 1: # 1 or less points in this section continue x = wavelength[pos_mask] y = flux[pos_mask] if mask is not None: z = mask[pos_mask] else: z = mask # None try: q = quality(x, y, mask=z, **kwargs) except: q = np.nan qualities.append([np.nanmean(x), q]) x, q = np.asarray(qualities).T return x, q
69f7966af1bec5cde40d62e42346a28475c120f6
3,647,320
import platform def get_platform(): """Gets the users operating system. Returns: An `int` representing the users operating system. 0: Windows x86 (32 bit) 1: Windows x64 (64 bit) 2: Mac OS 3: Linux If the operating system is unknown, -1 will be returned. """ return defaultdict(lambda: -1, { "Windows": 1 if platform.machine().endswith("64") else 0, "Darwin": 2, "Linux:": 3, })[platform.system()]
2d77b76e7a7010dc1f104e83a7a042b7b7542954
3,647,321
def count_number_of_digits_with_unique_segment_numbers(displays: str) -> int: """Counts the number of 1, 4, 7 or 8s in the displays.""" displays = [Display.from_string(d) for d in displays.splitlines()] num_digits = 0 for display in displays: num_digits += sum(len(c) in (2, 3, 4, 7) for c in display.output_value) return num_digits
df9731371066cc89445fd5eeb94f40624cf24a2f
3,647,322
def svo_filter_url(telescope, photofilter, zeropoint='AB'): """ Returns the URL where the filter transmission curve is hiding. Requires arguments: telescope: SVO-like name of Telescope/Source of photometric system. photofilter: SVO-like name of photometric filter. Optional: zeropoint: String. Either 'AB', 'Vega', or 'ST'. Output: url: URL of the relevant file. """ url = 'http://svo2.cab.inta-csic.es/theory/fps3/fps.php?' + \ 'PhotCalID=' + telescope + '/' + photofilter + '/' + zeropoint return url
e3cbe6a3192fcc890fb15df8fc3c02620a7c69fb
3,647,323
def calculate_sampling_rate(timestamps): """ Parameters ---------- x : array_like of timestamps, float (unit second) Returns ------- float : sampling rate """ if isinstance(timestamps[0], float): timestamps_second = timestamps else: try: v_parse_datetime = np.vectorize(parse_datetime) timestamps = v_parse_datetime(timestamps) timestamps_second = [] timestamps_second.append(0) for i in range(1, len(timestamps)): timestamps_second.append((timestamps[i] - timestamps[ i - 1]).total_seconds()) except Exception: sampling_rate = None pass steps = np.diff(timestamps_second) sampling_rate = round(1 / np.min(steps[steps != 0])) return sampling_rate
0554866b55eb9310c64399819d8d3978108d5e1a
3,647,324
def compute_ss0(y, folds): """ Compute the sum of squares based on null models (i.e., always predict the average `y` of the training data). Parameters ---------- y : ndarray folds : list of ndarray Each element is an ndarray of integers, which are the indices of members of the fold. Returns ------- ss0 : float The sum of squares based on null models. """ yhat0 = np.zeros_like(y) for test_idx in folds: m = np.ones_like(y, dtype=bool) m[test_idx] = False yhat0[test_idx] = y[m].mean() ss0 = np.sum((y - yhat0)**2) return ss0
e546404eaa2637cc8073ddc0654f42987a07d972
3,647,325
import glob def read_logging_data(folder_path): """ Description:\n This function reads all csv files in the folder_path folder into one dataframe. The files should be in csv format and the name of each file should start with 'yrt' and contain '_food_data' in the middle. For example, yrt1999_food_data123.csv is a valid file name that would be read into the dataframe if it exists in the folder_path folder. Input:\n - folder_path(string) : path to the folder that contain the data. Output:\n - a dataframe contains all the csv files in the folder given. """ data_lst = glob.glob('{}/yrt*_food_data*.csv'.format(folder_path)) dfs = [] for x in data_lst: dfs.append(pd.read_csv(x)) df = pd.concat(dfs) return df.reset_index(drop=True)
964ad379eb70083b6a696655b22c77a2c61d101f
3,647,326
def get_user_strlist_options(*args): """ get_user_strlist_options(out) """ return _ida_kernwin.get_user_strlist_options(*args)
71c637c1d663d685ffd7e5fd5bba8cba04b18535
3,647,327
import torch def coord_sampler(img, coords): """ Sample img batch at integer (x,y) coords img: [B,C,H,W], coords: [B,2,N] returns: [B,C,N] points """ B,C,H,W = img.shape N = coords.shape[2] batch_ref = torch.meshgrid(torch.arange(B), torch.arange(N))[0] out = img[batch_ref, :, coords[:,1,:], coords[:,0,:]] return out.permute(0,2,1)
d4a1ac6125d11381933d59190074f33bd9a7e774
3,647,328
import os def directory(name, *args): """ Returns the directory with the specified name, as an absolute path. :param name: The name of the directory. One of "textures" or "models". :args Elements that will be appended to the named directory. :return: The full path of the named directory. """ top = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) dirs = { 'bin': 'bin', 'config': 'bin', 'top': '', 'textures': 'core/assets/minecraft/textures', 'blockstates': 'core/assets/minecraft/blockstates', 'models': "core/assets/minecraft/models", 'minecraft': "core/assets/minecraft", 'site': 'site', 'core': 'core', } is_defaults = name == 'defaults' if is_defaults: name = args[0] args = args[1:] path = dirs[name] if is_defaults: path = path.replace('core/', 'default_resourcepack/') path = os.path.join(top, path) for arg in args: for part in arg.split('/'): path = os.path.join(path, part) return path
61a8b4014f11fa449d455b941f032b415c393186
3,647,329
def adapt_ListOfX(adapt_X): """This will create a multi-column adapter for a particular type. Note that the type must itself need to be in array form. Therefore this function serves to seaprate out individual lists into multiple big lists. E.g. if the X adapter produces array (a,b,c) then this adapter will take an list of Xs and produce a master array: ((a1,a2,a3),(b1,b2,b3),(c1,c2,c3)) Takes as its argument the adapter for the type which must produce an SQL array string. Note that you should NOT put the AsIs in the adapt_X function. The need for this function arises from the fact that we may want to actually handle list-creating types differently if they themselves are in a list, as in the example above, we cannot simply adopt a recursive strategy. Note that master_list is the list representing the array. Each element in the list will represent a subarray (column). If there is only one subarray following processing then the outer {} are stripped to give a 1 dimensional array. """ def adapter_function(param): if not AsIs: raise ImportError('There was a problem importing psycopg2.') param = param.value result_list = [] for element in param: # Where param will be a list of X's result_list.append(adapt_X(element)) test_element = result_list[0] num_items = len(test_element.split(",")) master_list = [] for x in range(num_items): master_list.append("") for element in result_list: element = element.strip("{").strip("}") element = element.split(",") for x in range(num_items): master_list[x] = master_list[x] + element[x] + "," if num_items > 1: master_sql_string = "{" else: master_sql_string = "" for x in range(num_items): # Remove trailing comma master_list[x] = master_list[x].strip(",") master_list[x] = "{" + master_list[x] + "}" master_sql_string = master_sql_string + master_list[x] + "," master_sql_string = master_sql_string.strip(",") if num_items > 1: master_sql_string = master_sql_string + "}" return AsIs("'{}'".format(master_sql_string)) return adapter_function
15a6ada60c3b78110097c29222c245225a2669b9
3,647,330
def add_sighting(pokemon_name): """Add new sighting to a user's Pokédex.""" user_id = session.get('user_id') user = User.query.get_or_404(user_id) pokemon = Pokemon.query.filter_by(name=pokemon_name).first_or_404() # 16% chance logging a sighting of a Pokémon with ditto_chance = True will # instead be logged as a Ditto # Through manual spamming I tested this, and it does work! if pokemon.chance_of_ditto(): pokemon = Pokemon.query.filter_by(name='Ditto').first_or_404() pokemon_id = pokemon.pokemon_id user_sighting = Sighting.query.filter((Sighting.user_id == user_id) & (Sighting.pokemon_id == pokemon_id)).one_or_none() # Ensuring unique Pokémon only in a user's sightings if user_sighting is None: new_sighting = Sighting(user_id=user_id, pokemon_id=pokemon_id) new_sighting.save() flash('Professor Willow: Wonderful! Your work is impeccable. Keep up the good work!') return redirect(f'/user/{user_id}') else: flash('Professor Willow: You\'ve already seen this Pokémon!') return redirect(f'/user/{user_id}')
cbc078b71d31bb40f582731341aefd89e23f26dc
3,647,331
def edit_colors_names_group(colors, names): """ idx map to colors and its names. names index is 1 increment up based on new indexes (only 0 - 7) """ # wall colors[0] = np.array([120, 120, 120], dtype = np.uint8) names[1] = 'wall' # floor colors[1] = np.array([80, 50, 50], dtype = np.uint8) names[2] = 'floor' # plant colors[2] = np.array([4, 200, 3], dtype = np.uint8) names[3] = 'plant' # ceiling colors[3] = np.array([120, 120, 80], dtype = np.uint8) names[4] = 'ceiling' # furniture colors[4] = np.array([204, 5, 255], dtype = np.uint8) names[5] = 'furniture' # person colors[5] = np.array([150, 5, 61], dtype = np.uint8) names[6] = 'person' # door colors[6] = np.array([8, 255, 51], dtype = np.uint8) names[7] = 'door' # objects colors[7] = np.array([6, 230, 230], dtype = np.uint8) names[8] = 'objects' return colors, names
45e6977b2ff4339417900d31d8bd22e9c5d934d8
3,647,332
def semidoc_mass_dataset(params, file_names, num_hosts, num_core_per_host, seq_len, num_predict, is_training, use_bfloat16=False, num_threads=64, record_shuffle_size=256, sequence_shuffle_size=2048): # pylint: disable=g-doc-args """Get semi-doc level mass dataset. Notes: - Each sequence comes from the same document (except for boundary cases). This is different from the standard sent-level mass dataset. - No consecutivity is ensured across batches, which is different from the standard doc-level mass dataset. - Effectively, semi-doc dataset maintains short range (seq_len) dependency, which is more random than doc-level and less random than sent-level. Returns: a tf.data.Dataset """ # pylint: enable=g-doc-args bsz_per_core = params["batch_size"] if num_hosts > 1: host_id = params["context"].current_host else: host_id = 0 ##### Split input files across hosts if len(file_names) >= num_hosts: file_paths = file_names[host_id::num_hosts] else: file_paths = file_names tf.logging.info("Host %d handles %d files:", host_id, len(file_paths)) ##### Parse records dataset = tf.data.Dataset.from_tensor_slices(file_paths) dataset = parse_record(dataset=dataset, parser=get_record_parser(), is_training=is_training, num_threads=num_threads, file_shuffle_size=len(file_paths), record_shuffle_size=record_shuffle_size) # process dataset dataset = mass_process(dataset, seq_len, num_predict, use_bfloat16) # Sequence level shuffle if is_training and sequence_shuffle_size: tf.logging.info("Seqeunce level shuffle with size %d", sequence_shuffle_size) dataset = dataset.shuffle(buffer_size=sequence_shuffle_size) # batching dataset = dataset.batch(bsz_per_core, drop_remainder=True) # Prefetch dataset = dataset.prefetch(num_core_per_host) return dataset
9c5809268931c1235bdd32c40f89781b05f387ae
3,647,333
def ListChrootSnapshots(buildroot): """Wrapper around cros_sdk --snapshot-list.""" cmd = ['cros_sdk', '--snapshot-list'] cmd_snapshots = RunBuildScript( buildroot, cmd, chromite_cmd=True, stdout=True) return cmd_snapshots.output.splitlines()
5ae25b11dd0dba39834411a05c095fd38ea70494
3,647,334
import httpx import asyncio async def post_github_webhook( request: Request, logger: BoundLogger = Depends(logger_dependency), http_client: httpx.AsyncClient = Depends(http_client_dependency), arq_queue: ArqQueue = Depends(arq_dependency), ) -> Response: """Process GitHub webhook events.""" if not config.enable_github_app: return Response( "GitHub App is not enabled", status_code=status.HTTP_501_NOT_IMPLEMENTED, ) body = await request.body() if config.github_webhook_secret is None: return Response( "The webhook secret is not configured", status_code=status.HTTP_501_NOT_IMPLEMENTED, ) webhook_secret = config.github_webhook_secret.get_secret_value() event = Event.from_http(request.headers, body, secret=webhook_secret) # Bind the X-GitHub-Delivery header to the logger context; this identifies # the webhook request in GitHub's API and UI for diagnostics logger = logger.bind(github_delivery=event.delivery_id) logger.debug("Received GitHub webhook", payload=event.data) # Give GitHub some time to reach internal consistency. await asyncio.sleep(1) await webhook_router.dispatch(event, logger, arq_queue) return Response(status_code=status.HTTP_202_ACCEPTED)
56326d3084f1b525776a923bc76a1a1f1959b6f6
3,647,335
import os import stat import json def _load_cache(b_cache_path): """ Loads the cache file requested if possible. The file must not be world writable. """ cache_version = 1 if not os.path.isfile(b_cache_path): display.vvvv("Creating Galaxy API response cache file at '%s'" % to_text(b_cache_path)) with open(b_cache_path, 'w'): os.chmod(b_cache_path, 0o600) cache_mode = os.stat(b_cache_path).st_mode if cache_mode & stat.S_IWOTH: display.warning("Galaxy cache has world writable access (%s), ignoring it as a cache source." % to_text(b_cache_path)) return with open(b_cache_path, mode='rb') as fd: json_val = to_text(fd.read(), errors='surrogate_or_strict') try: cache = json.loads(json_val) except ValueError: cache = None if not isinstance(cache, dict) or cache.get('version', None) != cache_version: display.vvvv("Galaxy cache file at '%s' has an invalid version, clearing" % to_text(b_cache_path)) cache = {'version': cache_version} # Set the cache after we've cleared the existing entries with open(b_cache_path, mode='wb') as fd: fd.write(to_bytes(json.dumps(cache), errors='surrogate_or_strict')) return cache
17496359e4958e3059ef7a4b4fcaeaf474037fdc
3,647,336
def nmad_filter( dh_array: np.ndarray, inlier_mask: np.ndarray, nmad_factor: float = 5, max_iter: int = 20, verbose: bool = False ) -> np.ndarray: """ Iteratively remove pixels where the elevation difference (dh_array) in stable terrain (inlier_mask) is larger \ than nmad_factor * NMAD. Iterations will stop either when the NMAD change is less than 0.1, or after max_iter iterations. :params dh_array: 2D array of elevation difference. :params inlier_mask: 2D boolean array of areas to include in the analysis (inliers=True). :param nmad_factor: The factor by which the stable dh NMAD has to be multiplied to calculate the outlier threshold :param max_iter: Maximum number of iterations (normally not reached, just for safety) :param verbose: set to True to print some statistics to screen. :returns: 2D boolean array with updated inliers set to True """ # Mask unstable terrain dh_stable = dh_array.copy() dh_stable.mask[~inlier_mask] = True nmad_before = xdem.spatialstats.nmad(dh_stable) if verbose: print(f"NMAD before: {nmad_before:.2f}") print("Iteratively remove large outliers") # Iteratively remove large outliers for i in range(max_iter): outlier_threshold = nmad_factor * nmad_before dh_stable.mask[np.abs(dh_stable) > outlier_threshold] = True nmad_after = xdem.spatialstats.nmad(dh_stable) if verbose: print(f"Remove pixels where abs(value) > {outlier_threshold:.2f} -> New NMAD: {nmad_after:.2f}") # If NMAD change is loweer than a set threshold, stop iterations, otherwise stop after max_iter if nmad_before - nmad_after < 0.1: break nmad_before = nmad_after return ~dh_stable.mask
74275be5223531bc7cfc3f788cb562104514996c
3,647,337
def setup_output_vcf(outname, t_vcf): """ Create an output vcf.Writer given the input vcf file as a templte writes the full header and Adds info fields: sizeCat MEF Returns a file handler and a dict with {individual_id: column in vcf} """ out = open(outname, 'w') line = t_vcf.readline() samp_columns = {} while not line.startswith("#CHROM"): out.write(line) line = t_vcf.readline() # edit the header out.write('##INFO=<ID=sizeCat,Number=A,Type=String,Description="Size category of variant">\n') out.write('##INFO=<ID=MEF,Number=.,Type=String,Description="Names of families that contain mendelian error">\n') out.write(line) for pos, iid in enumerate(line.strip().split('\t')[9:]): samp_columns[iid] = pos + 9 return out, samp_columns
82870c9c8d46dbe3161c434a87fac9108ed644b2
3,647,338
import os import json def load_json_file(): """loads the json file""" dirname = os.path.dirname(os.path.abspath(__file__)) json_filepath = os.path.join(dirname, "rps101_data.json") with open(json_filepath) as f: json_load = json.load(f) return json_load
9bd4b77167af6fc84d05b1766470a165d3ee6bd1
3,647,339
def validate_processing_hooks(): """Validate the enabled processing hooks. :raises: MissingHookError on missing or failed to load hooks :raises: RuntimeError on validation failure :returns: the list of hooks passed validation """ hooks = [ext for ext in processing_hooks_manager()] enabled = set() errors = [] for hook in hooks: deps = getattr(hook.obj, 'dependencies', ()) missing = [d for d in deps if d not in enabled] if missing: errors.append('Hook %(hook)s requires the following hooks to be ' 'enabled before it: %(deps)s. The following hooks ' 'are missing: %(missing)s.' % {'hook': hook.name, 'deps': ', '.join(deps), 'missing': ', '.join(missing)}) enabled.add(hook.name) if errors: raise RuntimeError("Some hooks failed to load due to dependency " "problems:\n%s" % "\n".join(errors)) return hooks
2d76fb003a3e960c86e342f057ee5a9ea0a77def
3,647,340
def rnn_stability_loss(rnn_output, beta): """ REGULARIZING RNNS BY STABILIZING ACTIVATIONS https://arxiv.org/pdf/1511.08400.pdf :param rnn_output: [time, batch, features] :return: loss value """ if beta == 0.0: return 0.0 # [time, batch, features] -> [time, batch] l2 = tf.sqrt(tf.reduce_sum(tf.square(rnn_output), axis=-1)) # [time, batch] -> [] return beta * tf.reduce_mean(tf.square(l2[1:] - l2[:-1]))
55dcc6461c7dcc683ae5a3b2d642ed9172616c8b
3,647,341
import os import pprint import google def import_csv_to_calendar_api(): """The main route of the Project.\ If requested with a GET: renders the page of the import operation. If requested with POST: Starts importing events in the file to be uploaded present in the POST data. """ if request.method == "GET": return make_response(render_template('import.html.j2'), 200) elif request.method == "POST": max_events = int(request.form["max_events"]) if max_events == 0: max_events = None events_freq = int(request.form["events_freq"]) if 'file' not in request.files: flash(("No file part"), category='danger') return redirect(request.url) file = request.files['file'] if file.filename == "": flash("No file selected for upload", category='danger') return redirect(request.url) if file and allowed_file(file.filename): filename = secure_filename(file.filename) file__path = os.path.join(app.config["UPLOAD_FOLDER"], filename) file.save(file__path) operation_now = import_oprtation(number_events=max_events, filename=filename) db.session.add(operation_now) try: op_id = db.session.query(import_oprtation).order_by( import_oprtation.id.desc()).first().id except Exception as e: print(e) db.session.commit() all_events = csv_tt_to_json_events(file__path, dates, max_events=max_events, events_freq=events_freq) for event in all_events: resource = None teachers = [] std_sets_emails = [] if "resource" in event["attendees"][-1].keys(): resource = event["attendees"][-1] event["attendees"].remove(resource) for ev_att in event["attendees"]: if Teacher.query.filter_by( teacher_email=ev_att["email"]).first(): teachers.append(ev_att) else: std_sets_emails.append(ev_att) event["attendees"].clear() event["attendees"].extend(teachers) if resource is not None: event["attendees"].append(resource) for std_mail in std_sets_emails: cal_rec = Calendar.query.filter_by( std_email=std_mail["email"]).first() if cal_rec: calendar_id = cal_rec.calendar_id_google else: print("Calendar does not exist") pprint(event) print("_________") continue resp = google.post( "/calendar/v3/calendars/{}/events".format(calendar_id), json=event, params={"sendUpates": "none"}) if resp.status_code == 200: gevent_id = resp.json()["id"] try: event_logged = events__log( gevent_id=gevent_id, gcalendar_id=calendar_id, import_id=op_id) except Exception as e: print(e) print(event) try: db.session.add(event_logged) db.session.commit() except Exception as e: print(e) else: print("Could not insert event") print(resp.text) flash(("Added {} events to calendar".format(len(all_events))), category='success') return redirect(url_for('import_csv_to_calendar_api')) else: flash('Allowed file types are: csv', category="info") return redirect(request.url)
3205b303ec0c17b303fd6da5993880e3515564bb
3,647,342
def xproto_fields(m, table): """ Generate the full list of models for the xproto message `m` including fields from the classes it inherits. Inserts the special field "id" at the very beginning. Each time we descend a new level of inheritance, increment the offset field numbers by 100. The base class's fields will be numbered from 1-99, the first descendant will be number 100-199, the second descdendant numbered from 200-299, and so on. This assumes any particular model as at most 100 fields. """ model_fields = [x.copy() for x in m["fields"]] for field in model_fields: field["accessor"] = m["fqn"] fields = xproto_base_fields(m, table) + model_fields # The "id" field is a special field. Every model has one. Put it up front and pretend it's part of the if not fields: raise Exception( "Model %s has no fields. Check for missing base class." % m["name"] ) id_field = { "type": "int32", "name": "id", "options": {}, "id": "1", "accessor": fields[0]["accessor"], } fields = [id_field] + fields # Walk through the list of fields. They will be in depth-first search order from the base model forward. Each time # the model changes, offset the protobuf field numbers by 100. offset = 0 last_accessor = fields[0]["accessor"] for field in fields: if field["accessor"] != last_accessor: last_accessor = field["accessor"] offset += 100 field_id = int(field["id"]) if (field_id < 1) or (field_id >= 100): raise Exception( "Only field numbers from 1 to 99 are permitted, field %s in model %s" % (field["name"], field["accessor"]) ) field["id"] = int(field["id"]) + offset # Check for duplicates fields_by_number = {} for field in fields: id = field["id"] dup = fields_by_number.get(id) if dup: raise Exception( "Field %s has duplicate number %d with field %s in model %s" % (field["name"], id, dup["name"], field["accessor"]) ) fields_by_number[id] = field return fields
fffcbb99cf7ff851dde77fee3f16046e0e71582d
3,647,343
def create_decomp_expand_fn(custom_decomps, dev, decomp_depth=10): """Creates a custom expansion function for a device that applies a set of specified custom decompositions. Args: custom_decomps (Dict[Union(str, qml.operation.Operation), Callable]): Custom decompositions to be applied by the device at runtime. dev (qml.Device): A quantum device. decomp_depth: The maximum depth of the expansion. Returns: Callable: A custom expansion function that a device can call to expand its tapes within a context manager that applies custom decompositions. **Example** Suppose we would like a custom expansion function that decomposes all CNOTs into CZs. We first define a decomposition function: .. code-block:: python def custom_cnot(wires): return [ qml.Hadamard(wires=wires[1]), qml.CZ(wires=[wires[0], wires[1]]), qml.Hadamard(wires=wires[1]) ] We then create the custom function (passing a device, in order to pick up any additional stopping criteria the expansion should have), and then register the result as a custom function of the device: >>> custom_decomps = {qml.CNOT : custom_cnot} >>> expand_fn = qml.transforms.create_decomp_expand_fn(custom_decomps, dev) >>> dev.custom_expand(expand_fn) """ custom_op_names = [op if isinstance(op, str) else op.__name__ for op in custom_decomps.keys()] # Create a new expansion function; stop at things that do not have # custom decompositions, or that satisfy the regular device stopping criteria custom_fn = qml.transforms.create_expand_fn( decomp_depth, stop_at=qml.BooleanFn(lambda obj: obj.name not in custom_op_names), device=dev, ) # Finally, we set the device's custom_expand_fn to a new one that # runs in a context where the decompositions have been replaced. def custom_decomp_expand(self, circuit, max_expansion=decomp_depth): with _custom_decomp_context(custom_decomps): return custom_fn(circuit, max_expansion=max_expansion) return custom_decomp_expand
f7a4682bae3b520dcce87e715834439311e7d8b6
3,647,344
from typing import Dict from typing import List def get_lats_map(floor: float, ceil: float) -> Dict[int, List[float]]: """ Get map of lats in full minutes with quarter minutes as keys. Series considers lat range is [-70;69] and how objects are stored in s3. """ full = full_minutes([floor, ceil]) out = {d: [d + dd for dd in OBJ_COORD_PARTS] for d in full[1:-1]} start = full[0] out[start] = [start + d for d in OBJ_COORD_PARTS if start + d >= floor] end = full[-1] out[end] = [end + d for d in OBJ_COORD_PARTS if end + d <= ceil] return {k: d for k, d in out.items() if -70 <= k < 70}
e2bc82e509372d0ed20483a7ac97c5bbfbc9e2d5
3,647,345
from typing import Dict from typing import Any def format_dict(body: Dict[Any, Any]) -> str: """ Formats a dictionary into a multi-line bulleted string of key-value pairs. """ return "\n".join( [f" - {k} = {getattr(v, 'value', v)}" for k, v in body.items()] )
b3f66d086284772e6783b8281f4d46c3dd6c237d
3,647,346
def block3(x, filters, kernel_size=3, stride=1, groups=32, conv_shortcut=True, name=None): """A residual block. # Arguments x: input tensor. filters: integer, filters of the bottleneck layer. kernel_size: default 3, kernel size of the bottleneck layer. stride: default 1, stride of the first layer. groups: default 32, group size for grouped convolution. conv_shortcut: default True, use convolution shortcut if True, otherwise identity shortcut. name: string, block label. # Returns Output tensor for the residual block. """ bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1 if conv_shortcut is True: shortcut = Conv2D((64 // groups) * filters, 1, strides=stride, use_bias=False, name=name + '_0_conv')(x) shortcut = BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name + '_0_bn')(shortcut) else: shortcut = x x = Conv2D(filters, 1, use_bias=False, name=name + '_1_conv')(x) x = BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name + '_1_bn')(x) x = Activation('relu', name=name + '_1_relu')(x) c = filters // groups x = ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + '_2_pad')(x) x = DepthwiseConv2D(kernel_size, strides=stride, depth_multiplier=c, use_bias=False, name=name + '_2_conv')(x) print x_shape = backend.int_shape(x)[1:-1] x = Reshape(x_shape + (groups, c, c))(x) output_shape = x_shape + (groups, c) if backend.backend() == 'theano' else None x = Lambda(lambda x: sum([x[:, :, :, :, i] for i in range(c)]), output_shape=output_shape, name=name + '_2_reduce')(x) x = Reshape(x_shape + (filters,))(x) x = BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name + '_2_bn')(x) x = Activation('relu', name=name + '_2_relu')(x) x = Conv2D((64 // groups) * filters, 1, use_bias=False, name=name + '_3_conv')(x) x = BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name + '_3_bn')(x) x = Add(name=name + '_add')([shortcut, x]) x = Activation('relu', name=name + '_out')(x) return x
e04c2bd5662991203f5b3d86652d2469e06bd346
3,647,347
import time def random_sleep(n): """io""" time.sleep(2) return n
f626a2e64a266084f33a78dd9be4e4528ff88934
3,647,348
from typing import Concatenate def yolo2_mobilenet_body(inputs, num_anchors, num_classes, alpha=1.0): """Create YOLO_V2 MobileNet model CNN body in Keras.""" mobilenet = MobileNet(input_tensor=inputs, weights='imagenet', include_top=False, alpha=alpha) # input: 416 x 416 x 3 # mobilenet.output : 13 x 13 x (1024*alpha) # conv_pw_11_relu(layers[73]) : 26 x 26 x (512*alpha) conv_head1 = compose( DarknetConv2D_BN_Leaky(int(1024*alpha), (3, 3)), DarknetConv2D_BN_Leaky(int(1024*alpha), (3, 3)))(mobilenet.output) # conv_pw_11_relu output shape: 26 x 26 x (512*alpha) conv_pw_11_relu = mobilenet.layers[73].output conv_head2 = DarknetConv2D_BN_Leaky(int(64*alpha), (1, 1))(conv_pw_11_relu) # TODO: Allow Keras Lambda to use func arguments for output_shape? conv_head2_reshaped = Lambda( space_to_depth_x2, output_shape=space_to_depth_x2_output_shape, name='space_to_depth')(conv_head2) x = Concatenate()([conv_head2_reshaped, conv_head1]) x = DarknetConv2D_BN_Leaky(int(1024*alpha), (3, 3))(x) x = DarknetConv2D(num_anchors * (num_classes + 5), (1, 1), name='predict_conv')(x) return Model(inputs, x)
8938a226857ada5fe621c7206d99b32fcd36ee0f
3,647,349
def LeastSquare_nonlinearFit_general( X, Y, func, func_derv, guess, weights=None, maxRelativeError=1.0e-5, maxIteratitions=100, ): """Computes a non-linear fit using the least square method following: http://ned.ipac.caltech.edu/level5/Stetson/Stetson2_2_1.html It takes the following arguments: X - array of x-parameters Y - y-array func - the function f(X,parameters) func_derv - a function that returns a 2D array giving along the columns the derivatives of the function 'f' with respect to each fit parameter ( df /dp_i ) guess - a first guess for the fit parameters weights - the weights associated to each point maxRelativeError - stop the iteration once the error in each parameter is below this threshold maxIterations - stop the iteration after this many steps Returns: parameters, fit_error, chi_square, noPoints, succes (True if successful) """ functionName = "'analysis.LeastSquare_nonlinearFit_general'" if weights is None: weights = Y.copy() weights[:] = 1 noPoints = Y.size # number of data points used for the fit noParams = len(guess) # number of fit parameters # iterate starting with the initial guess until finding the best fit parameters a = guess iteration, notConverged = 0, True while iteration < maxIteratitions and notConverged: tempX = func_derv( X, a ) # the derivatives for the current parameter values tempDiff = Y - func( X, a ) # the difference between function values and Y-values std = (weights * tempDiff ** 2).sum() # the current sum of the squares step = np.linalg.lstsq(tempX, tempDiff)[0] while True: a2 = a + step tempStd = ( weights * (Y - func(X, a2)) ** 2 ).sum() # the sum of the squares for the new parameter values if tempStd > std: step /= ( 2.0 ) # wrong estimate for the step since it increase the deviation from Y values; decrease step by factor of 2 else: a += step break if (np.abs(step / a) < maxRelativeError).all(): notConverged = False # the iteration has converged iteration += 1 print(iteration, a, step, std, tempStd) # compute the standard deviation for the best fit parameters derivatives = func_derv(X, a) M = np.zeros((noParams, noParams), np.float64) for i in range(noParams): for j in range(i, noParams): M[i, j] = (weights * derivatives[:, i] * derivatives[:, j]).sum() M[j, i] = M[i, j] Minv = np.linalg.inv(M) chiSquare = (weights * (Y - func(X, a)) ** 2).sum() / ( noPoints - noParams ) # fit residuals a_error = np.zeros(noParams, np.float64) for i in range(noParams): a_error[i] = (chiSquare * Minv[i, i]) ** 0.5 return a, a_error, chiSquare, noPoints, iteration < maxIteratitions
255c5d11607aa13d1ab7d7ea28fbcd95396669e0
3,647,350
def _get_extracted_csv_table(relevant_subnets, tablename, input_path, sep=";"): """ Returns extracted csv data of the requested SimBench grid. """ csv_table = read_csv_data(input_path, sep=sep, tablename=tablename) if tablename == "Switch": node_table = read_csv_data(input_path, sep=sep, tablename="Node") bus_bus_switches = set(get_bus_bus_switch_indices_from_csv(csv_table, node_table)) else: bus_bus_switches = {} extracted_csv_table = _extract_csv_table_by_subnet(csv_table, tablename, relevant_subnets, bus_bus_switches=bus_bus_switches) return extracted_csv_table
81368b79bb737c24d4f614ebd8aff76ff9efcac0
3,647,351
def has_prefix(sub_s): """ :param sub_s: (str) A substring that is constructed by neighboring letters on a 4x4 square grid. :return: (bool) If there is any words with prefix stored in sub_s. """ for word in vocabulary_list: if word.strip().startswith(sub_s): return True # Check all vocabularies in dictionary.txt, if there is no vocabulary start with sub_s then return false. return False
4a99a3437a954173a7f4fe87ccd8970a59632aa8
3,647,352
def work_out_entity(context,entity): """ One of Arkestra's core functions """ # first, try to get the entity from the context entity = context.get('entity', None) if not entity: # otherwise, see if we can get it from a cms page request = context['request'] if request.current_page: entity = entity_for_page(request.current_page) else: # we must be in a plugin, either in the page or in admin page = context['plugin'].get("page", None) if page: entity = entity_for_page(page) else: entity = None return entity
8cc1039d8611aa03d8e2d1f708339d87845d0381
3,647,353
def rectify(link:str, parent:str, path:str): """A function to check a link and verify that it should be captured or not. For e.g. any external URL would be blocked. It would also take care that all the urls are properly formatted. Args: **link (str)**: the link to rectify. **parent (str)**: the complete url of the page from which the link was found. **path (str)**: the path (after the domain) of the page from which the link was found. Returns: **str**: the properly formatted link. """ if (link.startswith("#")) or (":" in link) or ("../" in link): return path if not link.startswith("/"): if parent.endswith("/"): if not path.endswith("/"): path += "/" return path + link else: path = "/".join(path.split("/")[:-1])+"/" return path + link return link
6ca5771fcbbb35fe6d99bab65082d447299bb93a
3,647,354
def nested_pids_and_relations(app, db): """Fixture for a nested PIDs and the expected serialized relations.""" # Create some PIDs and connect them into different nested PID relations pids = {} for idx in range(1, 12): pid_value = str(idx) p = PersistentIdentifier.create('recid', pid_value, object_type='rec', status=PIDStatus.REGISTERED) pids[idx] = p VERSION = resolve_relation_type_config('version').id # 1 (Version) # / | \ # 2 3 4 PIDRelation.create(pids[1], pids[2], VERSION, 0) PIDRelation.create(pids[1], pids[3], VERSION, 1) PIDRelation.create(pids[1], pids[4], VERSION, 2) # Define the expected PID relation tree for of the PIDs expected_relations = {} expected_relations[4] = { u'relations': { 'version': [ {u'children': [{u'pid_type': u'recid', u'pid_value': u'2'}, {u'pid_type': u'recid', u'pid_value': u'3'}, {u'pid_type': u'recid', u'pid_value': u'4'}], u'index': 2, u'is_child': True, u'previous': {'pid_type': 'recid', 'pid_value': '3'}, u'next': None, u'is_last': True, u'is_parent': False, u'parent': {u'pid_type': u'recid', u'pid_value': u'1'}, u'type': 'version' } ], } } return pids, expected_relations
b8039318ffa96f5f42cc7398398b761febf06349
3,647,355
def indentsize(line): """Return the indent size, in spaces, at the start of a line of text.""" expline = line.expandtabs() return len(expline) - len(expline.lstrip())
c1f307adfeb2c1ec51c5e926a0b87dd3841e1aff
3,647,356
import os def reprocess_subtitle_file(path, max_chars=30, max_stretch_time=3, max_oldest_start=10): """Combine subtitles across a time period""" file_name, ext = os.path.splitext(path) compressed_subtitle_file = file_name + '.ass' subs = pysubs2.load(path) compressed_subs = compress(subs, max_chars, max_stretch_time, max_oldest_start) compressed_subs.save(compressed_subtitle_file) logger.info(f'Combined {len(subs)} subtitles from {path} to {len(compressed_subs)} in {compressed_subtitle_file}') return compressed_subtitle_file, subs[len(subs) - 1].end
241454ffc83564a402635d2d47c18c8c8df07561
3,647,357
import os def load(dataFormat,path,ignoreEntities=[],ignoreComplexRelations=True): """ Load a corpus from a variety of formats. If path is a directory, it will try to load all files of the corresponding data type. For standoff format, it will use any associated annotations files (with suffixes .ann, .a1 or .a2) :param dataFormat: Format of the data files to load ('standoff','biocxml','pubannotation','simpletag') :param path: Path to data. Can be directory or an individual file. Should be the txt file for standoff. :param ignoreEntities: List of entity types to ignore while loading :param ignoreComplexRelations: Whether to filter out relations where one argument is another relation (must be True as kindred doesn't currently support complex relations) :type dataFormat: str :type path: str :type ignoreEntities: list :type ignoreComplexRelations: bool :return: Corpus of loaded documents :rtype: kindred.Corpus """ assert dataFormat in ['standoff','biocxml','pubannotation','simpletag'] assert ignoreComplexRelations == True, "ignoreComplexRelations must be True as kindred doesn't currently support complex relations" corpus = kindred.Corpus() if os.path.isdir(path): directory = path filenames = sorted(list(os.listdir(directory))) for filename in filenames: if dataFormat == 'standoff' and filename.endswith('.txt'): absPath = os.path.join(directory, filename) doc = loadDataFromStandoff(absPath,ignoreEntities=ignoreEntities) corpus.addDocument(doc) elif dataFormat == 'biocxml' and filename.endswith('.bioc.xml'): absPath = os.path.join(directory, filename) tempCorpus = loadDataFromBioC(absPath,ignoreEntities=ignoreEntities) corpus.documents += tempCorpus.documents elif dataFormat == 'pubannotation' and filename.endswith('.json'): absPath = os.path.join(directory, filename) doc = loadDataFromPubAnnotationJSON(absPath,ignoreEntities=ignoreEntities) corpus.addDocument(doc) elif dataFormat == 'simpletag' and filename.endswith('.simple'): absPath = os.path.join(directory, filename) with open(absPath,'r') as f: filecontents = f.read().strip() doc = parseSimpleTag(filecontents,ignoreEntities=ignoreEntities) doc.sourceFilename = filename corpus.addDocument(doc) if len(corpus.documents) == 0: raise RuntimeError("No documents loaded from directory (%s). Are you sure this directory contains the corpus (format: %s)" % (path,dataFormat)) elif dataFormat == 'standoff': doc = loadDataFromStandoff(path,ignoreEntities=ignoreEntities) corpus.addDocument(doc) elif dataFormat == 'biocxml': corpus = loadDataFromBioC(path,ignoreEntities=ignoreEntities) elif dataFormat == 'pubannotation': doc = loadDataFromPubAnnotationJSON(path,ignoreEntities=ignoreEntities) corpus.addDocument(doc) elif dataFormat == 'simpletag': with open(path,'r') as f: filecontents = f.read().strip() doc = parseSimpleTag(filecontents,ignoreEntities=ignoreEntities) doc.sourceFilename = os.path.basename(path) corpus.addDocument(doc) return corpus
dc60c4c91e34a6693877aa086824dd5c44d0b957
3,647,358
import os import codecs def read(*parts): """ Build an absolute path from *parts* and return the contents of the resulting file. Assumes UTF-8 encoding. """ here = os.path.abspath(os.path.dirname(__file__)) with codecs.open(os.path.join(here, *parts), "rb", "utf-8") as f: return f.read()
d19885ee8e1c9889ac6c3c0daff50dac8ce00aa6
3,647,359
import warnings def read_idl_catalog(filename_sav, expand_extended=True): """ Read in an FHD-readable IDL .sav file catalog. Deprecated. Use `SkyModel.read_fhd_catalog` instead. Parameters ---------- filename_sav: str Path to IDL .sav file. expand_extended: bool If True, return extended source components. Default: True Returns ------- :class:`pyradiosky.SkyModel` """ warnings.warn( "This function is deprecated, use `SkyModel.read_fhd_catalog` instead. " "This function will be removed in version 0.2.0.", category=DeprecationWarning, ) skyobj = SkyModel() skyobj.read_fhd_catalog( filename_sav, expand_extended=expand_extended, ) return skyobj
77680e111c9f67b628cd1da5eac26afcb036374b
3,647,360
import struct def guid2bytes(s): """Converts a GUID to the serialized bytes representation""" assert isinstance(s, str) assert len(s) == 36 p = struct.pack return b"".join([ p("<IHH", int(s[:8], 16), int(s[9:13], 16), int(s[14:18], 16)), p(">H", int(s[19:23], 16)), p(">Q", int(s[24:], 16))[2:], ])
f298497173f9011392b671267cb47f081d25a9da
3,647,361
import functools def config(config_class=None, name="config", group=None): """ Class decorator that registers a custom configuration class with `Hydra's ConfigStore API <https://hydra.cc/docs/tutorials/structured_config/config_store>`_. If defining your own custom configuration class, your class must do the following: * Register with `Hydra's ConfigStore API <https://hydra.cc/docs/tutorials/structured_config/config_store>`_ (which this decorator does for you). * Register as a `@dataclass <https://docs.python.org/3/library/dataclasses.html>`_. Example:: @config(name="db") @dataclass class DBConfig(BaseConfig): host: str = "localhost" .. note:: Make sure @dataclass comes after @config. This also supports `Hydra Config Groups <https://hydra.cc/docs/tutorials/structured_config/config_groups>`_, example:: @config @dataclass class Config(BaseConfig): db: Any = MISSING @config(name="mysql", group="db") @dataclass class MySqlConfig: host: str = "mysql://localhost" @config(name="postgres", group="db") @dataclass class PostgresConfig: host: str = "postgres://localhost" postgres_specific_data: str = "some special data" Then when running the job you can do the following:: $ python3 -m project_package db=mysql :param name: Name of the configuration, used to locate overrides. :type name: str :param group: Group name to support Hydra Config Groups. :type group: str """ @functools.wraps(config_class) def wrapper(config_class, name, group): cs = ConfigStore.instance() cs.store(name=name, node=config_class, group=group) return config_class if config_class: return wrapper(config_class, name, group) def recursive_wrapper(config_class): return config(config_class, name, group) return recursive_wrapper
6e3828c51835f8a9a9ed3afc4b5d9606d8ba9e50
3,647,362
def truncate_words(text, max_chars, break_words=False, padding=0): """ Truncate a string to max_chars, optionally truncating words """ if break_words: return text[:-abs(max_chars - len(text)) - padding] words = [] for word in text.split(): length = sum(map(len, words)) + len(word) + len(words) - 1 + padding if length >= max_chars: break words.append(word) return ' '.join(words)
b239822fd1cef6c2e3f7425a0ad5cbc32b8b1325
3,647,363
def interpolate_mean_tpr(FPRs=None, TPRs=None, df_list=None): """ mean_fpr, mean_tpr = interpolate_mean_tpr(FPRs=None, TPRs=None, df_list=None) FPRs: False positive rates (list of n arrays) TPRs: True positive rates (list of n arrays) df_list: DataFrames with TPR, FPR columns (list of n DataFrames) """ # seed empty linspace mean_tpr, mean_fpr = 0, np.linspace(0, 1, 101) if TPRs and FPRs: for idx, PRs in enumerate(zip(FPRs, TPRs)): mean_tpr += np.interp(mean_fpr, PRs[0], PRs[1]) elif df_list: for idx, df_ in enumerate(df_list): mean_tpr += np.interp(mean_fpr, df_.FPR, df_.TPR) else: print("Please give valid inputs.") return None, None # normalize by length of inputs (# indices looped over) mean_tpr /= (idx+1) # add origin point mean_fpr = np.insert(mean_fpr, 0, 0) mean_tpr = np.insert(mean_tpr, 0, 0) return mean_fpr, mean_tpr
47f53c0f5010620bf54119a0f35eff13143fa960
3,647,364
def _generate_copy_from_codegen_rule(plugin, target_name, thrift_src, file): """Generates a rule that copies a generated file from the plugin codegen output directory out into its own target. Returns the name of the generated rule. """ invoke_codegen_rule_name = _invoke_codegen_rule_name( plugin, target_name, thrift_src, ) plugin_path_prefix = "gen-cpp2-{}".format(plugin.name) rule_name = _copy_from_codegen_rule_name(plugin, target_name, thrift_src, file) cmd = " && ".join( [ "mkdir `dirname $OUT`", "cp $(location :{})/{} $OUT".format(invoke_codegen_rule_name, file), ], ) fb_native.genrule( name = rule_name, out = "{}/{}".format(plugin_path_prefix, file), cmd = cmd, ) return rule_name
652fad4701299d031fd6258ce4d2d8b097af3406
3,647,365
def mir_right(data): """ Append Mirror to right """ return np.append(data[...,::-1],data,axis=-1)
7daa6b4c70e80fcbeda894b7ff358b44149edf22
3,647,366
def mask_nms(masks, bbox_scores, instances_confidence_threshold=0.5, overlap_threshold=0.7): """ NMS-like procedure used in Panoptic Segmentation Remove the overlap areas of different instances in Instance Segmentation """ panoptic_seg = np.zeros(masks.shape[:2], dtype=np.uint8) sorted_inds = list(range(len(bbox_scores))) current_segment_id = 0 segments_score = [] for inst_id in sorted_inds: score = bbox_scores[inst_id] if score < instances_confidence_threshold: break mask = masks[:, :, inst_id] mask_area = mask.sum() if mask_area == 0: continue intersect = (mask > 0) & (panoptic_seg > 0) intersect_area = intersect.sum() if intersect_area * 1.0 / mask_area > overlap_threshold: continue if intersect_area > 0: mask = mask & (panoptic_seg == 0) current_segment_id += 1 # panoptic_seg[np.where(mask==1)] = current_segment_id # panoptic_seg = panoptic_seg + current_segment_id*mask panoptic_seg = np.where(mask == 0, panoptic_seg, current_segment_id) segments_score.append(score) # print(np.unique(panoptic_seg)) return panoptic_seg, segments_score
92465752e741fffb04ea1a243dd465799daf6598
3,647,367
def table_to_lookup(table): """Converts the contents of a dynamodb table to a dictionary for reference. Uses dump_table to download the contents of a specified table, then creates a route lookup dictionary where each key is (route id, express code) and contains elements for avg_speed, and historic_speeds. Args: table: A boto3 Table object from which all data will be read into memory and returned. Returns: A dictionary with (route id, segment id) keys and average speed (num), historic speeds (list), and local express code (str) data. """ # Put the data in a dictionary to reference when adding speeds to geojson items = dump_table(table) route_lookup = {} for item in items: if 'avg_speed_m_s' in item.keys(): route_id = int(item['route_id']) local_express_code = item['local_express_code'] hist_speeds = [float(i) for i in item['historic_speeds']] route_lookup[(route_id, local_express_code)] = { 'avg_speed_m_s': float(item['avg_speed_m_s']), 'historic_speeds': hist_speeds } return route_lookup
97a95a2e06e81907d2d1091e2ebf41a26808653a
3,647,368
def _initialize_weights(variable_scope, n_head, n_input, n_hidden): """ initialize the weight of Encoder with multiple heads, and Decoder """ with tf.variable_scope(variable_scope, reuse=tf.AUTO_REUSE): all_weights = dict() ## forward, Each head has the same dimension ad n_hidden n_head_hidden = n_hidden/n_head # n_head_hidden = n_hidden for i in range(n_head): index = i + 1 weight_key = 'w1_%d' % index bias_key = 'b1_%d' % index all_weights[weight_key] = tf.get_variable(weight_key, shape=[n_input, n_head_hidden], initializer=tf.contrib.layers.xavier_initializer()) all_weights[bias_key] = tf.Variable(tf.zeros([n_head_hidden], dtype = tf.float32), name = bias_key) ## reconstruct all_weights['w2'] = tf.Variable(tf.zeros([n_hidden, n_input], dtype = tf.float32), name = 'w2') all_weights['b2'] = tf.Variable(tf.zeros([n_input], dtype = tf.float32), name = 'b2') ## DEBUG: for key in all_weights.keys(): tensor = all_weights[key] print ("DEBUG: Shape of Weight %s" % key) print (tensor.shape) return all_weights
d7950218d9b76c12fd3dbad54bf30286ec2d3b94
3,647,369
import time def sh(cmd, stdin="", sleep=False): """ run a command, send stdin and capture stdout and exit status""" if sleep: time.sleep(0.5) # process = Popen(cmd.split(), stdin=PIPE, stdout=PIPE) process = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE) process.stdin.write(bytes(stdin, "utf-8")) stdout = process.communicate()[0].decode('utf-8').strip() process.stdin.close() returncode = process.returncode return returncode, stdout
22db066b2e4b429b1756bf00d4592359c79939cd
3,647,370
def array_to_slide(arr: np.ndarray) -> openslide.OpenSlide: """converts a numpy array to a openslide.OpenSlide object Args: arr (np.ndarray): input image array Returns: openslide.OpenSlide: a slide object from openslide """ assert isinstance(arr, np.ndarray) slide = openslide.ImageSlide(Image.fromarray(arr)) return slide
83a5d2def7a0626a24396b933bd984a6de7ae634
3,647,371
from typing import Sequence import copy def baseline_replacement_by_blur(arr: np.array, patch_slice: Sequence, blur_kernel_size: int = 15, **kwargs) -> np.array: """ Replace a single patch in an array by a blurred version. Blur is performed via a 2D convolution. blur_kernel_size controls the kernel-size of that convolution (Default is 15). Assumes unbatched channel first format. """ nr_channels = arr.shape[0] # Create blurred array. blur_kernel_size = (1, *([blur_kernel_size] * (arr.ndim - 1))) kernel = np.ones(blur_kernel_size, dtype=arr.dtype) kernel *= 1.0 / np.prod(blur_kernel_size) kernel = np.tile(kernel, (nr_channels, 1, *([1] * (arr.ndim - 1)))) if arr.ndim == 3: arr_avg = conv2D_numpy( x=arr, kernel=kernel, stride=1, padding=0, groups=nr_channels, pad_output=True, ) elif arr.ndim == 2: raise NotImplementedError("1d support not implemented yet") else: raise ValueError("Blur supports only 2d inputs") # Perturb array. arr_perturbed = copy.copy(arr) arr_perturbed[patch_slice] = arr_avg[patch_slice] return arr_perturbed
5e5f25eeff3caebbdd8c9612fb26d23a11e3f80e
3,647,372
def admit_dir(file): """ create the admit directory name from a filename This filename can be a FITS file (usually with a .fits extension or a directory, which would be assumed to be a CASA image or a MIRIAD image. It can be an absolute or relative address """ loc = file.rfind('.') ext = '.admit' if loc < 0: return file + ext else: if file[loc:] == ext: print "Warning: assuming a re-run on existing ",file return file return file[:loc] + ext
16d4b32b3994a3a260556b984e86e6507cad581b
3,647,373
def grad(values: list[int], /) -> list[int]: """Compute the gradient of a sequence of values.""" return [v2 - v1 for v1, v2 in zip(values, values[1:])]
a1df1dffb27028dc408b00ec8ac26b6f68d9c923
3,647,374
from typing import Union def subtract( num1: Union[int, float], num2: Union[int, float], *args ) -> Union[int, float]: """Subtracts given numbers""" sub: Union[int, float] = num1 - num2 for num in args: sub -= num return sub
55db772fdcdc9aa24fd61069a3adcd6bc7abe468
3,647,375
import pkgutil import importlib def get_submodules(module): """ Attempts to find all submodules of a given module object """ _skip = [ "numpy.f2py", "numpy.f2py.__main__", "numpy.testing.print_coercion_tables", ] try: path = module.__path__ except Exception: path = [getfile(module)] modules = {_name(module): module} for importer, modname, ispkg in pkgutil.walk_packages( path=path, prefix=_name(module) + ".", onerror=lambda x: None, ): # Some known packages cause issues if modname in _skip: continue try: modules[modname] = importlib.import_module(modname) except Exception: pass return modules
214b2aa0551e59a11bd853791ec64e9a238fd155
3,647,376
import inspect import hashlib def ucr_context_cache(vary_on=()): """ Decorator which caches calculations performed during a UCR EvaluationContext The decorated function or method must have a parameter called 'context' which will be used by this decorator to store the cache. """ def decorator(fn): assert 'context' in fn.__code__.co_varnames assert isinstance(vary_on, tuple) @wraps(fn) def _inner(*args, **kwargs): # shamelessly stolen from quickcache callargs = inspect.getcallargs(fn, *args, **kwargs) context = callargs['context'] prefix = '{}.{}'.format( fn.__name__[:40] + (fn.__name__[40:] and '..'), hashlib.md5(inspect.getsource(fn).encode('utf-8')).hexdigest()[-8:] ) cache_key = (prefix,) + tuple(callargs[arg_name] for arg_name in vary_on) if context.exists_in_cache(cache_key): return context.get_cache_value(cache_key) res = fn(*args, **kwargs) context.set_cache_value(cache_key, res) return res return _inner return decorator
85c6b511761eddb74c8a856f9d2c2f1029cba8b2
3,647,377
def extract_value(item, key): """Get the value for the given key or return an empty string if no value is found.""" value = item.find(key).text if value is None: value = "" else: value = sanitize_value(value) return value
228115b77b0298bb808dcbb0dbd89a0938f0feef
3,647,378
def create_train_test_split(data: pd.DataFrame, split_size: float = .8, seed: int = 42) -> list: """ takes the final data set and splits it into random train and test subsets. Returns a list containing train-test split of inputs Args: data: dataset to be split into train/test split_size: the size of the train dataset (default .8) seed: pass an int for reproducibility purposes Returns: A list containing train-test split of inputs """ # assert split size between 0 and 1 assert 0 <= split_size <= 1, "split_size out of bounds" assert isinstance(data, pd.DataFrame), "no DataFrame provided" assert isinstance(seed, int), "provided seed is no integer" # split into features and target # features = data.drop('target', axis=1) # target = data['target'] # stratify by the target to ensure equal distribution return train_test_split(data, train_size=split_size, random_state=seed, shuffle=True)
49a5a07e5f232106d502eaf2f9d9021644f37e27
3,647,379
def get_vec(text, model, stopwords): """ Transform text pandas series in array with the vector representation of the sentence using fasttext model """ array_fasttext = np.array([sent2vec(x, model, stopwords) for x in text]) return array_fasttext
9c28cf8cd2c6acea3f81a5290d6f6c9fd027a6ca
3,647,380
from typing import Tuple import math def rytz_axis_construction(d1: Vector, d2: Vector) -> Tuple[Vector, Vector, float]: """ The Rytz’s axis construction is a basic method of descriptive Geometry to find the axes, the semi-major axis and semi-minor axis, starting from two conjugated half-diameters. Source: `Wikipedia <https://en.m.wikipedia.org/wiki/Rytz%27s_construction>`_ Given conjugated diameter `d1` is the vector from center C to point P and the given conjugated diameter `d2` is the vector from center C to point Q. Center of ellipse is always ``(0, 0, 0)``. This algorithm works for 2D/3D vectors. Args: d1: conjugated semi-major axis as :class:`Vector` d2: conjugated semi-minor axis as :class:`Vector` Returns: Tuple of (major axis, minor axis, ratio) """ Q = Vector(d1) # vector CQ P1 = Vector(d2).orthogonal(ccw=False) # vector CP', location P' D = P1.lerp(Q) # vector CD, location D, midpoint of P'Q radius = D.magnitude radius_vector = (Q - P1).normalize(radius) # direction vector P'Q A = D - radius_vector # vector CA, location A B = D + radius_vector # vector CB, location B if A.isclose(NULLVEC) or B.isclose(NULLVEC): raise ArithmeticError('Conjugated axis required, invalid source data.') major_axis_length = (A - Q).magnitude minor_axis_length = (B - Q).magnitude if math.isclose(major_axis_length, 0.) or math.isclose(minor_axis_length, 0.): raise ArithmeticError('Conjugated axis required, invalid source data.') ratio = minor_axis_length / major_axis_length major_axis = B.normalize(major_axis_length) minor_axis = A.normalize(minor_axis_length) return major_axis, minor_axis, ratio
14fec51fa1591e74553e6918187dfb850ae4b6f0
3,647,381
def _join_ljust(words, width=9): """join list of str to fixed width, left just""" return ' '.join(map(lambda s: s.ljust(width), words)).strip()
6096f5be960fb0ae2fe942c9b55924802094db16
3,647,382
def construct_dictionaries(color,marker,size, scatter_ecolor='k', alpha=1.0, fill_scatter=False, elinewidth=1,capsize=0): """ Example usage: halo_kws = construct_dictionaries('k','o', 20, alpha=.3) pltabund.plot_XFe_XFe(ax, 'K', 'Mg', roed, plot_xlimit=True, plot_ylimit=True, label="Halo", **halo_kws) """ e_kws = {'ecolor':color,'elinewidth':elinewidth,'capsize':capsize} if fill_scatter: ulkws = {'arrow_length':0.25, 'scatter_kws':{'marker':marker,'s':size,'facecolor':color,'alpha':alpha}, 'arrow_kws':{'color':color,'head_length':0.15,'head_width':0.05} } else: ulkws = {'arrow_length':0.25, 'scatter_kws':{'marker':marker,'s':size,'facecolor':'none', 'linewidths':1,'edgecolors':color,'alpha':alpha}, 'arrow_kws':{'color':color,'head_length':0.15,'head_width':0.05} } kws = {'color':color,'edgecolors':scatter_ecolor,'marker':marker,'s':size,'alpha':alpha, 'e_kws':e_kws,'ulkws':ulkws} return kws
6741dd1ffacc3a10953f86ccdaf53d1d3504a77c
3,647,383
from datetime import datetime import os def to_file(exp_dir, chain): """Write results to file :param exp_dir: directory to export :param chain: Chain object :raise RoutineErr: on file I/O error :return True: on success """ now = datetime.datetime.today().strftime('%Y%m%d-%H%M%S-%f') file_name = os.path.join(exp_dir, 'chains-{}.txt'.format(now)) try: out = open(file_name, 'wt') except OSError: raise RoutineErr('Could not open file: {}'.format(file_name)) if chain.dna1: out.write('>{}-DNA1\n'.format(chain.info)) out.write('{}\n'.format(chain.dna1)) out.write('\n') if chain.dna2: out.write('>{}-DNA2\n'.format(chain.info)) out.write('{}\n'.format(chain.dna2)) out.write('\n') if chain.rna: out.write('>{}-RNA\n'.format(chain.info)) out.write('{}\n'.format(chain.rna)) out.write('\n') if chain.protein: out.write('>{}-protein\n'.format(chain.info)) out.write('{}\n'.format(chain.protein)) out.write('\n') out.close() return True
782c452d1a17e6b63a443e7f388923602fbef12b
3,647,384
def arrows(m) -> str: """One or more arrows separate by a space""" return m.arrow_list
3fb43d9d753f667148bb9bb18eb026f7385d6264
3,647,385
def hex2int(hex_str): """ Convert 2 hex characters (e.g. "23") to int (35) :param hex_str: hex character string :return: int integer """ return int(hex_str, 16)
0640cffd6f7558f4dfd1bc74e20510e7d2051ca3
3,647,386
def lat_2_R(lat): """Takes a geodetic latitude and puts out the distance from the center of an ellipsoid Earth to the surface Arguments: lat (float): Geodetic Latitude angle [degrees]. Returns: (float): Geocentric distance [km] """ R_polar = 6356.752314245 R_eq = 6378.137 lat = lat / 180 * pi R = sqrt( ((R_eq ** 2 * cos(lat)) ** 2 + (R_polar ** 2 * sin(lat)) ** 2) / ((R_eq * cos(lat)) ** 2 + (R_polar * sin(lat)) ** 2) ) # e = sqrt(1-R_polar**2/R_eq**2) # R = R_eq/sqrt(1-e**2*sin(lat/180*pi)**2) # R = sqrt( ( (R_eq**2*cos(lat))**2 + (R_polar**2*sin(lat))**2 ) / ( (R_eq*cos(lat))**2 + (R_polar*sin(lat))**2 ) ) return R
aa4f24ccb8a82bad1e6d9fc49f7e2984874ee78b
3,647,387
def compare_records(old_list, new_list): """Compare two lists of SeqRecord objects.""" assert isinstance(old_list, list) assert isinstance(new_list, list) assert len(old_list) == len(new_list) for old_r, new_r in zip(old_list, new_list): if not compare_record(old_r, new_r): return False return True
6b70115f9db52898849fa065eff6ed827eab1e16
3,647,388
def embed_owners_wallet(address, asset_name) -> Embed: """Return discord embed of wallet owner""" title = f"{asset_name} is owned by" description = f"`{address}`" color = Colour.blurple() embed = Embed(title=title, description=description, color=color) name = "This address belongs to wallet..." value = f"{POOL_PM_URL}/{address}/0e14267a" embed.add_field(name=name, value=value, inline=False) embed.set_footer(text=f"Data comes from {POOL_PM_URL}") return embed
016039acbdf264e5389d3e4f94153345226d2549
3,647,389
def Qdist_H_lm_jk_FGH(a, b, c , d, N_x, N_y, h, par, model): """ Parameters ---------- a : TYPE left end point of the interval in x coordinate chosen for solving the time independent schrodinger equation. b : TYPE right end point of the interval in x coordinate chosen for solving the time independent schrodinger equation. c : TYPE left end point of the interval in y coordinate chosen for solving the time independent schrodinger equation. d : TYPE right end point of the interval in y coordinate chosen for solving the time independent schrodinger equation. N_x : TYPE number of grid points in x coordinate, must be an odd integer value. N_y : TYPE number of grid points in y coordinate, must be an odd integer value. h : TYPE reduced planck's constant of the system. par : TYPE parameters of the potential energy function. Returns ------- H_lm_jk : TYPE Hamiltonian matrix which is a discretisation of the Hamiltonian operator, computed using the FGH method. """ dx = (b-a)/(N_x-1) dpx = 2*np.pi / (N_x-1) / dx dy = (d-c)/(N_y-1) dpy = 2*np.pi / (N_y-1) / dy H_lm_jk = np.zeros((N_x*N_y,N_x*N_y)) for i1 in range(N_x*N_y): #print(i1) for i2 in range(i1, N_x*N_y): k = (i2 // N_x) j = (i2 % N_x) m = (i1 // N_x) l = (i1 % N_x) sum_rhalf = sum(np.cos(2*np.pi*np.arange(1, int((N_x+1)/2))*(j-l)/(N_x-1))) sum_r = 2 * sum_rhalf + 1 sum_shalf = sum(np.cos(2*np.pi*np.arange(1, int((N_y+1)/2))*(k-m)/(N_y-1))) sum_s = 2 * sum_shalf + 1 if j == l and k == m: H_lm_jk[m*N_x+l,k*N_x+j] = H_lm_jk[m*N_x+l,k*N_x+j] + 2 * 1/(N_x-1) /(N_y-1)* h**2 * sum((np.arange(1, int((N_x+1)/2))*dpx)**2 / 2 * np.cos(2*np.pi*np.arange(1, int((N_x+1)/2))*(j-l)/(N_x-1))) * sum_s \ + 2 * 1/(N_x-1) /(N_y-1)* h**2 * sum((np.arange(1, int((N_y+1)/2))*dpy)**2 / 2 * np.cos(2*np.pi*np.arange(1, int((N_y+1)/2))*(k-m)/(N_y-1))) * sum_r H_lm_jk[m*N_x+l,k*N_x+j] = H_lm_jk[m*N_x+l,k*N_x+j] + potential_energy_2dof((a+j*dx),(c+k*dy),par, model) H_lm_jk[k*N_x+j,m*N_x+l] = H_lm_jk[m*N_x+l,k*N_x+j] else: H_lm_jk[m*N_x+l,k*N_x+j] = H_lm_jk[m*N_x+l,k*N_x+j] + 2 * 1/(N_x-1) /(N_y-1)* h**2 * sum((np.arange(1, int((N_x+1)/2))*dpx)**2 / 2 * np.cos(2*np.pi*np.arange(1, int((N_x+1)/2))*(j-l)/(N_x-1))) * sum_s \ + 2 * 1/(N_x-1) /(N_y-1)* h**2 * sum((np.arange(1, int((N_y+1)/2))*dpy)**2 / 2 * np.cos(2*np.pi*np.arange(1, int((N_y+1)/2))*(k-m)/(N_y-1))) * sum_r H_lm_jk[k*N_x+j,m*N_x+l] = H_lm_jk[m*N_x+l,k*N_x+j] return H_lm_jk
f67c369f5eadd273afa3a2af7cf9d9b274d7a994
3,647,390
from django.core.exceptions import ObjectDoesNotExist from physical.models import Host from backup.models import LogConfiguration def get_log_configuration_retention_backup_log_days(host_id): """Return LOG_CONFIGURATION_RETENTION_BACKUP_LOG_DAYS""" try: host = Host.objects.get(id=host_id) databaseinfra = host.instances.all()[0].databaseinfra except Exception as e: LOG.warn("Error on get_log_configuration_retention_backup_log_days. Host id: {} - error: {}".format(host_id, e)) return None try: log_configuration = LogConfiguration.objects.get(environment=databaseinfra.environment, engine_type=databaseinfra.engine.engine_type) except ObjectDoesNotExist: return None return log_configuration.retention_days
02ed8a863fd383c15eb25c37553a7b521a2b27fe
3,647,391
def createLabelsAndWeightsFromRois(image, roiimage): """Create class labels and instance labels. Args: image: input image roiimage: input mask image Returns: classlabelsdata, instancelabelsdata, total_instances """ logger.info("Creating class and instance labels ...") W = image.shape[1] H = image.shape[0] logger.info("H, W = {},{}".format(H, W)) classlabelsdata = np.ones([H*W]) instancelabelsdata = np.zeros([H*W]) roi_val = np.unique(roiimage) total_instances = 1 roiimage = roiimage.reshape(-1) for j in range(roi_val.shape[0]): if roi_val[j]>0: indices = np.where(roiimage == roi_val[j]) for ind in indices: classlabelsdata[ind] = 2 instancelabelsdata[ind] = total_instances total_instances+=1 return classlabelsdata, instancelabelsdata, total_instances
747a99c9a1e27666dfc5f6aa73394d86ee4e7a19
3,647,392
def dup_inner_refine_complex_root(f, x, y, dx, dy, F, K): """One bisection step of complex root refinement algorithm. """ hx, hy = dx/2, dy/2 cx, cy = x + hx, y + hy F1, F2, F3, F4 = F Fx = _dup_inner_sturm(f, K.one, K.zero, cx, cy, K) Fy = _dup_inner_sturm(f, K.zero, K.one, cx, cy, K) # Quadrant #1: ++ F11 = Fx F12 = _dup_sturm_shift(F2, hx, K) F13 = F3 F14 = _dup_sturm_mirror(_dup_sturm_shift(Fy, hy, K), K) k1 = _dup_inner_zeros(F11, F12, F13, F14, hx, hy, K) if k1 == 1: return (cx, cy, hx, hy, (F11, F12, F13, F14)) # Quadrant #2: -+ F21 = _dup_sturm_shift(Fx,-hx, K) F22 = Fy F23 = _dup_sturm_shift(F3, hx, K) F24 = F4 k2 = _dup_inner_zeros(F21, F22, F23, F24, hx, hy, K) if k2 == 1: return (x, cy, hx, hy, (F21, F22, F23, F24)) # Quadrant #3: -- F31 = F1 F32 = _dup_sturm_shift(Fy,-hy, K) F33 = _dup_sturm_mirror(Fx, K) F34 = _dup_sturm_shift(F4, hy, K) k3 = _dup_inner_zeros(F31, F32, F33, F34, hx, hy, K) if k3 == 1: return (x, y, hx, hy, (F31, F32, F33, F34)) # Quadrant #4: +- F41 = _dup_sturm_shift(F1, hx, K) F42 = F2 F43 = _dup_sturm_mirror(_dup_sturm_shift(Fx, hx, K), K) F44 = _dup_sturm_mirror(Fy, K) k4 = _dup_inner_zeros(F41, F42, F43, F44, hx, hy, K) if k4 == 1: return (cx, y, hx, hy, (F41, F42, F43, F44)) raise RefinementFailed("no roots in (%s, %s) x (%s, %s) rectangle" % (x, y, x+dx, y+dy))
c45e1deba1ad68fb9a85a2e46ca4e3c7eddb072d
3,647,393
def print_dataset_info(superclasses, subclass_splits, label_map, label_map_sub): """ Obtain a dataframe with information about the superclasses/subclasses included in the dataset. Args: superclasses (list): WordNet IDs of superclasses subclass_splits (tuple): Tuple entries correspond to the source and target domains respectively. A tuple entry is a list, where each element is a list of subclasses to be included in a given superclass in that domain. If split is None, the second tuple element is empty. label_map (dict): Map from (super)class number to superclass name label_map_sub (dict): Map from subclass number to subclass name (equivalent to label map for original dataset) Returns: dataDf (pandas DataFrame): Columns contain relevant information about the datast """ def print_names(class_idx): return [f'{label_map_sub[r].split(",")[0]} ({r})' for r in class_idx] data = {'superclass': []} contains_split = len(subclass_splits[1]) if contains_split: data.update({'subclasses (source)': [], 'subclasses (target)': []}) else: data.update({'subclasses': []}) for i, (k, v) in enumerate(label_map.items()): data['superclass'].append(f'{v}') if contains_split: data['subclasses (source)'].append(print_names(subclass_splits[0][i])) data['subclasses (target)'].append(print_names(subclass_splits[1][i])) else: data['subclasses'].append(print_names(subclass_splits[0][i])) dataDf = pd.DataFrame(data) return dataDf
6885eecfaa38f609957b6ccfa594701f77ea1b30
3,647,394
def mock_sync_cavatica_account(mocker): """ Mocks out sync Cavatica account functions """ sync_cavatica_account = mocker.patch( "creator.projects.cavatica.sync_cavatica_account" ) sync_cavatica_account.return_value = [], [], [] return sync_cavatica_account
27a0a8abee2c025fe17ba4fa4a939bcf04fc9c63
3,647,395
def check_illegal(s): """ :param s: (String) user input :return: (Bool) check user input is illegal or not """ check = 0 for ch in s: if len(ch) > 1: check = 1 if check == 0: return True else: print("Illegal input") return False
6c028f03ae6f317e7fea020e2da1f35b93d3bcd7
3,647,396
def phasefold(time, rv, err, period): """Phasefold an rv timeseries with a given period. Parameters ---------- time : array_like An array containing the times of measurements. rv : array_like An array containing the radial-velocities. err : array_like An array containing the radial-velocity uncertainties. period : float The period with which to phase fold. Returns ------- time_phased : array_like The phased timestamps. rv_phased : array_like The phased RVs. err_phased : array_like The phased RV uncertainties. """ phases = (time / period) % 1 sortIndi = sp.argsort(phases) # sorts the points # gets the indices so we sort the RVs correspondingly(?) time_phased = phases[sortIndi] rv_phased = rv[sortIndi] err_phased = err[sortIndi] return time_phased, rv_phased, err_phased
c0076fd49fa9c97fa90419f6f2d42d78c09f0f2e
3,647,397
def get_att_mats(translate_model): """ Get's the tensors representing the attentions from a build model. The attentions are stored in a dict on the Transformer object while building the graph. :param translate_model: Transformer object to fetch the attention weights from. :return: """ encdec_atts = [] prefix = 'transformer/body/' postfix = '/multihead_attention/dot_product_attention' for i in range(1, translate_model.hparams.num_hidden_layers): encdec_att = translate_model.attention_weights[ '%sdecoder/layer_%i/encdec_attention%s' % (prefix, i, postfix)] encdec_atts.append(encdec_att) encdec_att_mats = [tf.squeeze(tf.reduce_sum(mat, axis=1)) for mat in encdec_atts] return encdec_att_mats
d6c7f0d214c444250210c926b2407405e8e34807
3,647,398
def get_resource_string(package, resource): """Return a string containing the contents of the specified resource. If the pathname is absolute it is retrieved starting at the path of the importer for 'fullname'. Otherwise, it is retrieved relative to the module within the loader. """ provider, resource = NormalizeResource(package, resource) return provider.get_resource_string(_resource_manager, resource)
e0e76f912e02d30645b646e076c10cbd5016a600
3,647,399