content
stringlengths
22
815k
id
int64
0
4.91M
def fancy_vector(v): """ Returns a given 3-vector or array in a cute way on the shell, if you use 'print' on the return value. """ return "\n / %5.2F \\\n" % (v[0]) + \ " | %5.2F |\n" % (v[1]) + \ " \\ %5.2F /\n" % (v[2])
20,200
def findpath_split(seq, ss1, ss2, md, th = 5, w = None): """ Calculate findpath barriers for smaller components. Args: seq: RNA sequence. ss1: Structure 1. ss2: Structure 2. md: ViennaRNA model details. th: Threshold of how many basepairs must change for an independent findpath run. Defaults to 5. w: Findpath width. Defaults to None. Returns: path, barrier: The folding path and the barrier height. WARNING: If path splitting actually took place, then energy values given in the path data are only relative to the starting structure. """ pt1 = make_pair_table(ss1, base = 0, chars = list('.x')) pt2 = make_pair_table(ss2, base = 0, chars = list('.x')) mindiff = None recurse = None for ij in chain(common_exterior_bases(pt1, pt2), common_basepairs(pt1, pt2)): (i, j) = ij if isinstance(ij, tuple) else (ij, None) st1O, st1I = split_struct(ss1, i, j, spacer = '...') st2O, st2I = split_struct(ss2, i, j, spacer = '...') do = RNA.bp_distance(st1O, st2O) if do < th: continue di = RNA.bp_distance(st1I, st2I) if di < th: continue diff = abs(di-do) if mindiff is None or diff < mindiff: mindiff = diff seqO, seqI = split_struct(seq, i, j, spacer = 'NNN') recurse = ((i, j), (seqO, st1O, st2O), (seqI, st1I, st2I)) elif mindiff is not None and diff > mindiff: # No need to check the rest if we are getting worse. break if mindiff is not None: pathO, _ = findpath_split(*recurse[1], md, th, w) pathI, _ = findpath_split(*recurse[2], md, th, w) return findpath_merge(pathO, pathI, *recurse[0]) else: fpw = 4 * RNA.bp_distance(ss1, ss2) if w is None else w return call_findpath(seq, ss1, ss2, md, w = fpw)
20,201
def get_root_relative_url(url_path): """Remove the root page slug from the URL path""" return _clean_rel_url('/'.join(url_path.split('/')[2:]))
20,202
def test_policy_om_random_mdp(): """Test that optimal policy occupancy measure ("om") for a random MDP is sane.""" mdp = gym.make("imitation/Random-v0") V, Q, pi = mce_partition_fh(mdp) assert np.all(np.isfinite(V)) assert np.all(np.isfinite(Q)) assert np.all(np.isfinite(pi)) # Check it is a probability distribution along the last axis assert np.all(pi >= 0) assert np.allclose(np.sum(pi, axis=-1), 1) Dt, D = mce_occupancy_measures(mdp, pi=pi) assert np.all(np.isfinite(D)) assert np.any(D > 0) # expected number of state visits (over all states) should be equal to the # horizon assert np.allclose(np.sum(D), mdp.horizon)
20,203
def run_command(command, split=False, include_errors=False, cwd=None, shell=False, env=None): """Run command in subprocess and return exit code and output""" sub_env = os.environ.copy() if env is not None: sub_env.update(env) if include_errors: error_pipe = subprocess.STDOUT else: error_pipe = subprocess.PIPE process = subprocess.Popen( command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=error_pipe, shell=shell, universal_newlines=True, cwd=cwd, env=sub_env ) if split: output = process.stdout.readlines() else: output = process.stdout.read() return_code = process.wait() logger.debug('subprocess %s returned %d, output: %s', command, return_code, output) return return_code, output
20,204
def exclusion_windows_matching(match_peaks): """ Discard the occurrences of matching and non-matchign ions when they are found in the window (+-losses_window_removal) around M-xx or free bases ions """ output_dic = match_peaks for key in match_peaks: if match_peaks[key]: for t in match_peaks[key]: mass_losses_list, new_list = find_losses_freebases(match_peaks[key][t][7:]), [] for ion in match_peaks[key][t][7:]: # Keep ion losses and free bases matched in the MS2_matches list if 'M-' not in ion[1] and len(ion[1].split('(')[0]) != 1: flag, mz_ion = 1, np.float64(ion[2]) for mass_loss in mass_losses_list: # Add the MS2 offset mass_loss_offseted = mass_loss + ppm_range(mass_loss, MS2_ppm_offset) # Check and discard any sequencing ion is found in the M-xx exclusion window if mass_loss_offseted - args.losses_window_removal <= \ mz_ion <= mass_loss_offseted + args.losses_window_removal: flag = 0 break if flag == 1: new_list.append(ion) else: new_list.append(ion) output_dic[key].update({t: match_peaks[key][t][:7] + new_list}) return output_dic
20,205
def any_of(elements): """ Check to see if the argument is contained in a list of possible elements. :param elements: The elements to check the argument against in the predicate. :return: A predicate to check if the argument is a constituent element. """ def predicate(argument): return argument in elements return predicate
20,206
def find_year(films_lst: list, year: int): """ Filter list of films by given year """ filtered_films_lst = [line for line in films_lst if line[1] == str(year)] return filtered_films_lst
20,207
def func_BarPS(HA_Open, HA_Close, HA_PS_Lookback, PS_pct_level=[0.35, 0.5, 0.95, 0.97], combine=False): """ 0. This function is for calculating price trend number of HA bar, by looking back HA_PS_Lookback HA bars, according to the previous bars' distribution, find the range (i.e. -4,-3,-2,-1,0,1,2,3,4) of the current bar. 1. This function has 5 arguments (one optional) and returns 1 DataFrame as output. 2. Input arguements including: (1) HA_Open: Dataframe (2) HA_Close: DataFrame (3) HA_PS_Lookback: int, number of bars to lookback. (4) PS_pct_level: list, optional, default value is [0.35, 0.5, 0.95, 0.97] (5) combine: boolean, optional, default value is False, calculating the up bar and down bar separately, while combine=True calculates the up bar and down bar combined. 3. Output is 1 DataFrame (1) HA_PS: Showed as -4,3,-2,-1,0,1,2,3,4, indicating the size of HA bars. """ # Initialize: HA_num = len(HA_Open) HA_PS = np.zeros_like(HA_Open) HA_Open = HA_Open.values HA_Close = HA_Close.values # Main: for i in range(HA_PS_Lookback, HA_num): HA_Open_lb = HA_Open [i-HA_PS_Lookback:i] HA_Close_1b = HA_Close[i-HA_PS_Lookback:i] HA_PS_positive_level, HA_PS_negative_level = func_PS_Level(HA_Open_lb, HA_Close_1b, PS_pct_level, combine) HA_range = HA_Close[i] - HA_Open[i] if HA_range > 0: HA_PS_temp = np.where(HA_range <= HA_PS_positive_level)[0] + 1 if len(HA_PS_temp) != 0: HA_PS[i] = HA_PS_temp[0] - 1 else: HA_PS[i] = len(HA_PS_positive_level) # -1 if HA_range < 0: HA_PS_temp = np.where(HA_range >= HA_PS_negative_level)[0] + 1 if len(HA_PS_temp) != 0: HA_PS[i] = -HA_PS_temp[0] + 1 else: HA_PS[i] = -len(HA_PS_negative_level) # +1 HA_PS_df = pd.DataFrame(HA_PS, columns=['PS']) return HA_PS_df
20,208
def filter_whitespace(stream: List[Part]) -> List[Part]: """Remove whitespace tokens""" return flu(stream).filter(lambda x: x.token != Token.WHITESPACE).collect()
20,209
def iterellipse(x: int, y: int, b: int, a: int): """Yields (int, int) 2D vertices along a path defined by major and minor axes b and a as it traces an ellipse with origin set at (x, y) Args: b, a: major and minor axes Yields: [x: int, y: int] """ for p in iterellipsepart(b,a): yield from mirror1stquad(x,y,p)
20,210
def merge_partial_dicts(interfaces_dict, partials_dict): """Merges partial interface into non-partial interface. Args: interfaces_dict: A dict of the non-partial interfaces. partial_dict: A dict of partial interfaces. Returns: A merged dictionary of |interface_dict| with |partial_dict|. """ for interface_name, partial in partials_dict.iteritems(): interface = interfaces_dict.get(interface_name) if not interface: raise Exception('There is a partial interface, but the corresponding non-partial interface was not found.') for member in _MEMBERS: interface[member].extend(partial.get(member)) interface.setdefault(_PARTIAL_FILEPATH, []).append(partial[_FILEPATH]) return interfaces_dict
20,211
def gen_code_def_part(metadata): """生成代码中定义类的部分。 """ class_def_dict = validate(metadata) class_def_list = list(class_def_dict.values()) code = templates.t_def_all_class.render(class_def_list=class_def_list) return code
20,212
def clang_plusplus_frontend(input_file, args): """Generate LLVM IR from C++ language source(s).""" compile_command = default_clang_compile_command(args) compile_command[0] = llvm_exact_bin('clang++') return compile_to_bc(input_file, compile_command, args)
20,213
def create_forward_many_to_many_manager(superclass, rel, reverse): """ Create a manager for the either side of a many-to-many relation. This manager subclasses another manager, generally the default manager of the related model, and adds behaviors specific to many-to-many relations. """ class ManyRelatedManager(superclass): def __init__(self, instance=None): super().__init__() self.instance = instance if not reverse: self.model = rel.model self.query_field_name = rel.field.related_query_name() self.prefetch_cache_name = rel.field.name self.source_field_name = rel.field.m2m_field_name() self.target_field_name = rel.field.m2m_reverse_field_name() self.symmetrical = rel.symmetrical else: self.model = rel.related_model self.query_field_name = rel.field.name self.prefetch_cache_name = rel.field.related_query_name() self.source_field_name = rel.field.m2m_reverse_field_name() self.target_field_name = rel.field.m2m_field_name() self.symmetrical = False self.through = rel.through self.reverse = reverse self.source_field = self.through._meta.get_field(self.source_field_name) self.target_field = self.through._meta.get_field(self.target_field_name) self.core_filters = {} self.pk_field_names = {} for lh_field, rh_field in self.source_field.related_fields: core_filter_key = '%s__%s' % (self.query_field_name, rh_field.name) self.core_filters[core_filter_key] = getattr(instance, rh_field.attname) self.pk_field_names[lh_field.name] = rh_field.name self.related_val = self.source_field.get_foreign_related_value(instance) if None in self.related_val: raise ValueError('"%r" needs to have a value for field "%s" before ' 'this many-to-many relationship can be used.' % (instance, self.pk_field_names[self.source_field_name])) # Even if this relation is not to pk, we require still pk value. # The wish is that the instance has been already saved to DB, # although having a pk value isn't a guarantee of that. if instance.pk is None: raise ValueError("%r instance needs to have a primary key value before " "a many-to-many relationship can be used." % instance.__class__.__name__) def __call__(self, *, manager): manager = getattr(self.model, manager) manager_class = create_forward_many_to_many_manager(manager.__class__, rel, reverse) return manager_class(instance=self.instance) do_not_call_in_templates = True def _build_remove_filters(self, removed_vals): filters = Q(**{self.source_field_name: self.related_val}) # No need to add a subquery condition if removed_vals is a QuerySet without # filters. removed_vals_filters = (not isinstance(removed_vals, QuerySet) or removed_vals._has_filters()) if removed_vals_filters: filters &= Q(**{'%s__in' % self.target_field_name: removed_vals}) if self.symmetrical: symmetrical_filters = Q(**{self.target_field_name: self.related_val}) if removed_vals_filters: symmetrical_filters &= Q( **{'%s__in' % self.source_field_name: removed_vals}) filters |= symmetrical_filters return filters def _apply_rel_filters(self, queryset): """ Filter the queryset for the instance this manager is bound to. """ queryset._add_hints(instance=self.instance) if self._db: queryset = queryset.using(self._db) return queryset._next_is_sticky().filter(**self.core_filters) def _remove_prefetched_objects(self): try: self.instance._prefetched_objects_cache.pop(self.prefetch_cache_name) except (AttributeError, KeyError): pass # nothing to clear from cache def get_queryset(self): try: return self.instance._prefetched_objects_cache[self.prefetch_cache_name] except (AttributeError, KeyError): queryset = super().get_queryset() return self._apply_rel_filters(queryset) def get_prefetch_queryset(self, instances, queryset=None): if queryset is None: queryset = super().get_queryset() queryset._add_hints(instance=instances[0]) queryset = queryset.using(queryset._db or self._db) query = {'%s__in' % self.query_field_name: instances} queryset = queryset._next_is_sticky().filter(**query) # M2M: need to annotate the query in order to get the primary model # that the secondary model was actually related to. We know that # there will already be a join on the join table, so we can just add # the select. # For non-autocreated 'through' models, can't assume we are # dealing with PK values. fk = self.through._meta.get_field(self.source_field_name) join_table = fk.model._meta.db_table connection = connections[queryset.db] qn = connection.ops.quote_name queryset = queryset.extra(select={ '_prefetch_related_val_%s' % f.attname: '%s.%s' % (qn(join_table), qn(f.column)) for f in fk.local_related_fields}) return ( queryset, lambda result: tuple( getattr(result, '_prefetch_related_val_%s' % f.attname) for f in fk.local_related_fields ), lambda inst: tuple( f.get_db_prep_value(getattr(inst, f.attname), connection) for f in fk.foreign_related_fields ), False, self.prefetch_cache_name, False, ) def add(self, *objs, through_defaults=None): self._remove_prefetched_objects() db = router.db_for_write(self.through, instance=self.instance) with transaction.atomic(using=db, savepoint=False): self._add_items( self.source_field_name, self.target_field_name, *objs, through_defaults=through_defaults, ) # If this is a symmetrical m2m relation to self, add the mirror # entry in the m2m table. `through_defaults` aren't used here # because of the system check error fields.E332: Many-to-many # fields with intermediate tables must not be symmetrical. if self.symmetrical: self._add_items(self.target_field_name, self.source_field_name, *objs) add.alters_data = True def remove(self, *objs): self._remove_prefetched_objects() self._remove_items(self.source_field_name, self.target_field_name, *objs) remove.alters_data = True def clear(self): db = router.db_for_write(self.through, instance=self.instance) with transaction.atomic(using=db, savepoint=False): signals.m2m_changed.send( sender=self.through, action="pre_clear", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=None, using=db, ) self._remove_prefetched_objects() filters = self._build_remove_filters(super().get_queryset().using(db)) self.through._default_manager.using(db).filter(filters).delete() signals.m2m_changed.send( sender=self.through, action="post_clear", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=None, using=db, ) clear.alters_data = True def set(self, objs, *, clear=False, through_defaults=None): # Force evaluation of `objs` in case it's a queryset whose value # could be affected by `manager.clear()`. Refs #19816. objs = tuple(objs) db = router.db_for_write(self.through, instance=self.instance) with transaction.atomic(using=db, savepoint=False): if clear: self.clear() self.add(*objs, through_defaults=through_defaults) else: old_ids = set(self.using(db).values_list(self.target_field.target_field.attname, flat=True)) new_objs = [] for obj in objs: fk_val = ( self.target_field.get_foreign_related_value(obj)[0] if isinstance(obj, self.model) else obj ) if fk_val in old_ids: old_ids.remove(fk_val) else: new_objs.append(obj) self.remove(*old_ids) self.add(*new_objs, through_defaults=through_defaults) set.alters_data = True def create(self, *, through_defaults=None, **kwargs): db = router.db_for_write(self.instance.__class__, instance=self.instance) new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs) self.add(new_obj, through_defaults=through_defaults) return new_obj create.alters_data = True def get_or_create(self, *, through_defaults=None, **kwargs): db = router.db_for_write(self.instance.__class__, instance=self.instance) obj, created = super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs) # We only need to add() if created because if we got an object back # from get() then the relationship already exists. if created: self.add(obj, through_defaults=through_defaults) return obj, created get_or_create.alters_data = True def update_or_create(self, *, through_defaults=None, **kwargs): db = router.db_for_write(self.instance.__class__, instance=self.instance) obj, created = super(ManyRelatedManager, self.db_manager(db)).update_or_create(**kwargs) # We only need to add() if created because if we got an object back # from get() then the relationship already exists. if created: self.add(obj, through_defaults=through_defaults) return obj, created update_or_create.alters_data = True def _add_items(self, source_field_name, target_field_name, *objs, through_defaults=None): # source_field_name: the PK fieldname in join table for the source object # target_field_name: the PK fieldname in join table for the target object # *objs - objects to add. Either object instances, or primary keys of object instances. through_defaults = through_defaults or {} # If there aren't any objects, there is nothing to do. from django.db.models import Model if objs: new_ids = set() for obj in objs: if isinstance(obj, self.model): if not router.allow_relation(obj, self.instance): raise ValueError( 'Cannot add "%r": instance is on database "%s", value is on database "%s"' % (obj, self.instance._state.db, obj._state.db) ) fk_val = self.through._meta.get_field( target_field_name).get_foreign_related_value(obj)[0] if fk_val is None: raise ValueError( 'Cannot add "%r": the value for field "%s" is None' % (obj, target_field_name) ) new_ids.add(fk_val) elif isinstance(obj, Model): raise TypeError( "'%s' instance expected, got %r" % (self.model._meta.object_name, obj) ) else: new_ids.add(obj) db = router.db_for_write(self.through, instance=self.instance) vals = (self.through._default_manager.using(db) .values_list(target_field_name, flat=True) .filter(**{ source_field_name: self.related_val[0], '%s__in' % target_field_name: new_ids, })) new_ids.difference_update(vals) with transaction.atomic(using=db, savepoint=False): if self.reverse or source_field_name == self.source_field_name: # Don't send the signal when we are inserting the # duplicate data row for symmetrical reverse entries. signals.m2m_changed.send( sender=self.through, action='pre_add', instance=self.instance, reverse=self.reverse, model=self.model, pk_set=new_ids, using=db, ) # Add the ones that aren't there already self.through._default_manager.using(db).bulk_create([ self.through(**through_defaults, **{ '%s_id' % source_field_name: self.related_val[0], '%s_id' % target_field_name: obj_id, }) for obj_id in new_ids ]) if self.reverse or source_field_name == self.source_field_name: # Don't send the signal when we are inserting the # duplicate data row for symmetrical reverse entries. signals.m2m_changed.send( sender=self.through, action='post_add', instance=self.instance, reverse=self.reverse, model=self.model, pk_set=new_ids, using=db, ) def _remove_items(self, source_field_name, target_field_name, *objs): # source_field_name: the PK colname in join table for the source object # target_field_name: the PK colname in join table for the target object # *objs - objects to remove. Either object instances, or primary # keys of object instances. if not objs: return # Check that all the objects are of the right type old_ids = set() for obj in objs: if isinstance(obj, self.model): fk_val = self.target_field.get_foreign_related_value(obj)[0] old_ids.add(fk_val) else: old_ids.add(obj) db = router.db_for_write(self.through, instance=self.instance) with transaction.atomic(using=db, savepoint=False): # Send a signal to the other end if need be. signals.m2m_changed.send( sender=self.through, action="pre_remove", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=old_ids, using=db, ) target_model_qs = super().get_queryset() if target_model_qs._has_filters(): old_vals = target_model_qs.using(db).filter(**{ '%s__in' % self.target_field.target_field.attname: old_ids}) else: old_vals = old_ids filters = self._build_remove_filters(old_vals) self.through._default_manager.using(db).filter(filters).delete() signals.m2m_changed.send( sender=self.through, action="post_remove", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=old_ids, using=db, ) return ManyRelatedManager
20,214
def grp_render_dashboard_module(context, module, index=None, subindex=None): """ Template tag that renders a given dashboard module, it takes a ``DashboardModule`` instance as first parameter and an integer ``index`` as second parameter, that is the index of the module in the dashboard. """ module.init_with_context(context) context.update({ 'template': module.template, 'module': module, 'index': index, 'subindex': subindex, 'admin_url': reverse('%s:index' % get_admin_site_name(context)), }) return context
20,215
def serialize_routing(value, explicit_type=None): """Custom logic to find matching serialize implementation and returns it's unique registration string key :param value: instance to serialize :param explicit_type: explicit serialization type for value :return: str key to find proper serialize implementation """ value_type = data_type(value, explicit_type) if DICT_DATA_TYPE.match(value_type): return "dict" if LIST_DATA_TYPE.match(value_type): return "list" if TUPLE_DATA_TYPE.match(value_type): return "tuple" return value_type
20,216
def get_projection_matricies(az, el, distance_ratio, roll = 0, focal_length=35, img_w=137, img_h=137): """ Calculate 4x3 3D to 2D projection matrix given viewpoint parameters. Code from "https://github.com/Xharlie/DISN" """ F_MM = focal_length # Focal length SENSOR_SIZE_MM = 32. PIXEL_ASPECT_RATIO = 1. # pixel_aspect_x / pixel_aspect_y RESOLUTION_PCT = 100. SKEW = 0. CAM_MAX_DIST = 1.75 CAM_ROT = np.asarray([[1.910685676922942e-15, 4.371138828673793e-08, 1.0], [1.0, -4.371138828673793e-08, -0.0], [4.371138828673793e-08, 1.0, -4.371138828673793e-08]]) # Calculate intrinsic matrix. scale = RESOLUTION_PCT / 100 # print('scale', scale) f_u = F_MM * img_w * scale / SENSOR_SIZE_MM f_v = F_MM * img_h * scale * PIXEL_ASPECT_RATIO / SENSOR_SIZE_MM # print('f_u', f_u, 'f_v', f_v) u_0 = img_w * scale / 2 v_0 = img_h * scale / 2 K = np.matrix(((f_u, SKEW, u_0), (0, f_v, v_0), (0, 0, 1))) # Calculate rotation and translation matrices. # Step 1: World coordinate to object coordinate. sa = np.sin(np.radians(-az)) ca = np.cos(np.radians(-az)) se = np.sin(np.radians(-el)) ce = np.cos(np.radians(-el)) R_world2obj = np.transpose(np.matrix(((ca * ce, -sa, ca * se), (sa * ce, ca, sa * se), (-se, 0, ce)))) # Step 2: Object coordinate to camera coordinate. R_obj2cam = np.transpose(np.matrix(CAM_ROT)) R_world2cam = R_obj2cam * R_world2obj cam_location = np.transpose(np.matrix((distance_ratio * CAM_MAX_DIST, 0, 0))) T_world2cam = -1 * R_obj2cam * cam_location # Step 3: Fix blender camera's y and z axis direction. R_camfix = np.matrix(((1, 0, 0), (0, -1, 0), (0, 0, -1))) R_world2cam = R_camfix * R_world2cam T_world2cam = R_camfix * T_world2cam RT = np.hstack((R_world2cam, T_world2cam)) # finally, consider roll cr = np.cos(np.radians(roll)) sr = np.sin(np.radians(roll)) R_z = np.matrix(((cr, -sr, 0), (sr, cr, 0), (0, 0, 1))) rot_mat = get_rotate_matrix(-np.pi / 2) return K, R_z@RT@rot_mat
20,217
def test_apply_3d(): """test that apply correctly applies a simple function across 3d volumes of a Stack""" stack = synthetic_stack() assert np.all(stack.xarray == 1) stack.apply(divide, in_place=True, value=4, group_by={Axes.ROUND, Axes.CH}) assert (stack.xarray == 0.25).all()
20,218
def pix_to_coord(edges, pix, interp="lin"): """Convert pixel coordinates to grid coordinates using the chosen interpolation scheme.""" scale = interpolation_scale(interp) interp_fn = interp1d( np.arange(len(edges), dtype=float), scale(edges), fill_value="extrapolate" ) return scale.inverse(interp_fn(pix))
20,219
def create_action_urls(actions, model=None, **url_args): """ Creates a list of URLs for the given actions. """ urls = {} if len(actions) > 0: # Resolve the url_args values as attributes from the model values = {} for arg in url_args: values[arg] = getattr(model, url_args[arg]) # Generate the URL for every action for action in actions: urls[action] = flask.url_for(actions[action], **values) return urls
20,220
def check_vfvx(x0, fx, fx_args, dfx, dfx_args=None, delta=1e-5): """ Check derivatives of a (vectorized) vector or scalar function of a vector variable. """ if x0.ndim != 2: raise ValueError('The variable must have two dimensions!') if dfx_args is None: dfx_args = fx_args dfx_a = dfx(x0, *dfx_args) dfx_d = nm.zeros_like(dfx_a) for ic in range(x0.shape[1]): x = x0.copy() x[:, ic] += delta f1 = fx(x, *fx_args) x = x0.copy() x[:, ic] -= delta f2 = fx(x, *fx_args) dfx_d[:, ic] = 0.5 * (f1 - f2) / delta error = nm.linalg.norm((dfx_a - dfx_d).ravel(), nm.inf) print('analytical:', dfx_a) print('difference:', dfx_d) print('error:', error) return dfx_a, dfx_d, error
20,221
def process( save_data_path: Path, annotations_df: pd.DataFrame, vat_image_directory: Path ) -> None: """ bbox format is [xmin, ymin, xmax, ymax] """ image_path = save_data_path / "imgs" annotations_path = save_data_path / "annotations" create_directory(save_data_path, overwrite=True) create_directory(image_path) create_directory(annotations_path) annotations_df.bbox = annotations_df.bbox.apply(literal_eval).apply(np.array) failed_downloads = [] cdlis = set(annotations_df.tablet_CDLI) print("Number of images: ", len(cdlis), "\n") for cdli in cdlis: cdli_annotations = annotations_df[annotations_df.tablet_CDLI == cdli] bboxes = cdli_annotations["bbox"].to_numpy() bounding_boxes = BoundingBoxes.from_two_vertices(cdli, bboxes) bounding_boxes.create_ground_truth_txt(annotations_path) download_path = f"https://cdli.ucla.edu/dl/photo/{cdli}.jpg" if "VAT" not in cdli: # VAT images downloaded not download from cdli try: im = Image.open(urlopen(download_path)) im.save(f"{image_path}/{cdli}.jpg") except HTTPError: failed_downloads.append(cdli) print(f"Failed: {cdli}") continue print(f"Success: {cdli}") for vat_image in vat_image_directory.iterdir(): shutil.copy(vat_image, image_path / vat_image.name) print("---------------Failed Downloads-------------------------") print("\n".join(failed_downloads)) delete_corrupt_images_and_annotations(save_data_path, failed_downloads) is_valid_data(save_data_path)
20,222
def test_feature_list_pattern_features(mk_creoson_post_dict, mk_getactivefile): """Test list_group_features.""" c = creopyson.Client() result = c.feature_list_pattern_features( "pattern_name", file_="file", type_="type", ) assert isinstance(result, (list)) result = c.feature_list_pattern_features("pattern_name") assert isinstance(result, (list))
20,223
def add_hovertool(p1, cr_traj, traj_src, sat_src, traj_df): """Adds a hovertool to the top panel of the data visualization tool plot.""" # Create the JS callback for vertical line on radar plots. callback_htool = CustomJS(args={'traj_src':traj_src,'sat_src':sat_src}, code=""" const indices = cb_data.index["1d"].indices[0]; var data_traj = traj_src.data var t_traj = data_traj['t'] const t_val = t_traj[indices] var data_sat = sat_src.data; var t_sat = data_sat['t'] t_sat[0] = t_val t_sat[1] = t_val sat_src.change.emit(); """) # Add the hovertool for the satellite trajectory points on top panel, which are # linked to the vertical line on the bottom panel. htool_mode = ('vline' if max(traj_df['y'])-min(traj_df['y'])<= (max(traj_df['x'])-min(traj_df['x'])) else 'hline') tooltips1 = [("lat", "@lat"),("lon", "@lon"),('time','@t_str')] p1.add_tools(HoverTool(renderers=[cr_traj],callback=callback_htool, mode=htool_mode,tooltips=tooltips1)) return p1
20,224
def backpage_url_to_sitekey(url): """http://longisland.backpage.com/FemaleEscorts/s-mny-oo-chics-but-oo-nn-lik-oo-me-19/40317377""" (scheme, netloc, path, params, query, fragment) = urlparse(url) sitekey = netloc.split('.')[0] return sitekey
20,225
def batch_eye_like(X: torch.Tensor): """Return batch of identity matrices like given batch of matrices `X`.""" return torch.eye(*X.shape[1:], out=torch.empty_like(X))[None, :, :].repeat(X.size(0), 1, 1)
20,226
def cal_occurence(correspoding_text_number_list): """ calcualte each occurence of a number in a list """ di = dict() for i in correspoding_text_number_list: i = str(i) s = di.get(i, 0) if s == 0: di[i] = 1 else: di[i] = di[i] + 1 return di
20,227
def subtract(v: Vector, w: Vector) -> Vector: """simple vector subtraction""" assert len(v) == len(w), 'Vectors need to have the same length' return [vi - wi for vi, wi in zip(v, w)]
20,228
def upgradeExplicitOid(store): """ Upgrade a store to use explicit oid columns. This allows VACUUMing the database without corrupting it. This requires copying all of axiom_objects and axiom_types, as well as all item tables that have not yet been upgraded. Consider VACUUMing the database afterwards to reclaim space. """ upgradeSystemOid(store) for typename, version in store.querySchemaSQL(LATEST_TYPES): cls = _typeNameToMostRecentClass[typename] if cls.schemaVersion != version: remaining = store.querySQL( 'SELECT oid FROM {} LIMIT 1'.format( store._tableNameFor(typename, version))) if len(remaining) == 0: # Nothing to upgrade continue else: raise RuntimeError( '{}:{} not fully upgraded to {}'.format( typename, version, cls.schemaVersion)) store.transact( _upgradeTableOid, store, store._tableNameOnlyFor(typename, version), lambda: store._justCreateTable(cls), lambda: store._createIndexesFor(cls, []))
20,229
def test_int_init_from_boxfile(): """Test mech init from .box file.""" test_dir = "tests/int/init_from_boxfile" utils.cleanup_dir_and_vms_from_dir(test_dir) ubuntu = "ubuntu-18.04" box_file = "/tmp/{}.box".format(ubuntu) # download the file if we don't have it already # that way we "cache" the file commands = """ if ! [ -f "{box_file}" ]; then wget -O "{box_file}" "https://vagrantcloud.com/bento/\ boxes/{ubuntu}/versions/201912.04.0/providers/vmware_desktop.box" fi """.format(box_file=box_file, ubuntu=ubuntu) results = subprocess.run(commands, cwd=test_dir, shell=True, capture_output=True) stdout = results.stdout.decode('utf-8') stderr = results.stderr.decode('utf-8') assert results.returncode == 0 # init from boxfile command = "mech init --box bento/{} file:{}".format(ubuntu, box_file) expected_lines = ["Initializing", "has been init"] results = subprocess.run(command, cwd=test_dir, shell=True, capture_output=True) stdout = results.stdout.decode('utf-8') stderr = results.stderr.decode('utf-8') assert stderr == '' assert results.returncode == 0 for line in expected_lines: print(line) assert re.search(line, stdout, re.MULTILINE) # should start command = "mech up" expected_lines = ["Extracting", "Added network", "Bringing machine", "Getting IP", "Sharing folders", "started", "Provisioning"] results = subprocess.run(command, cwd=test_dir, shell=True, capture_output=True) stdout = results.stdout.decode('utf-8') stderr = results.stderr.decode('utf-8') assert stderr == '' assert results.returncode == 0 for line in expected_lines: print(line) assert re.search(line, stdout, re.MULTILINE) # should be able to destroy command = "mech destroy -f" expected = "Deleting" results = subprocess.run(command, cwd=test_dir, shell=True, capture_output=True) stdout = results.stdout.decode('utf-8') stderr = results.stderr.decode('utf-8') assert stderr == '' assert results.returncode == 0 assert re.search(expected, stdout)
20,230
def f1(): """ Filtering 1D. """ # Get center of the filter c = int((size - 1) / 2) # Pad the flatten (1D array) image with wrapping If = np.pad(I.flatten(), (c), 'wrap') # Initialize the resulting image Ir = np.zeros(If.shape) # Apply 1D convulation in the image for x in range(c, Ir.shape[0] - c): Ir[x] = conv_point1d(If, filter, x, c) # Remove padding Ir = Ir[c:-c] # Return the resulting image with original shape return Ir.reshape(I.shape)
20,231
def _unify_data_and_user_kwargs( data: 'LayerData', kwargs: Optional[dict] = None, layer_type: Optional[str] = None, fallback_name: str = None, ) -> 'FullLayerData': """Merge data returned from plugins with options specified by user. If ``data == (_data, _meta, _type)``. Then: - ``kwargs`` will be used to update ``_meta`` - ``layer_type`` will replace ``_type`` and, if provided, ``_meta`` keys will be pruned to layer_type-appropriate kwargs - ``fallback_name`` is used if ``not _meta.get('name')`` .. note: If a user specified both layer_type and additional keyword arguments to viewer.open(), it is their responsibility to make sure the kwargs match the layer_type. Parameters ---------- data : LayerData 1-, 2-, or 3-tuple with (data, meta, layer_type) returned from plugin. kwargs : dict, optional User-supplied keyword arguments, to override those in ``meta`` supplied by plugins. layer_type : str, optional A user-supplied layer_type string, to override the ``layer_type`` declared by the plugin. fallback_name : str, optional A name for the layer, to override any name in ``meta`` supplied by the plugin. Returns ------- FullLayerData Fully qualified LayerData tuple with user-provided overrides. """ _data, _meta, _type = _normalize_layer_data(data) if layer_type: # the user has explicitly requested this be a certain layer type # strip any kwargs from the plugin that are no longer relevant _meta = prune_kwargs(_meta, layer_type) _type = layer_type if kwargs: # if user provided kwargs, use to override any meta dict values that # were returned by the plugin. We only prune kwargs if the user did # *not* specify the layer_type. This means that if a user specified # both layer_type and additional keyword arguments to viewer.open(), # it is their responsibility to make sure the kwargs match the # layer_type. _meta.update(prune_kwargs(kwargs, _type) if not layer_type else kwargs) if not _meta.get('name') and fallback_name: _meta['name'] = fallback_name return (_data, _meta, _type)
20,232
def get_sentence_embeddings(data): """ data -> list: list of text """ features = temb.batch_tokenize(data, tokenizer) dataset = temb.prepare_dataset(features) embeddings = temb.compute_embeddings(dataset, model) return embeddings
20,233
def sample_user(phone="+989123456789", full_name="testname"): """ Create a sample user """ return get_user_model().objects.create_user(phone=phone, full_name=full_name)
20,234
async def metoo(hahayes): """ Haha yes """ if not hahayes.text[0].isalpha() and hahayes.text[0] not in ("/", "#", "@", "!"): await hahayes.edit(random.choice(RENDISTR))
20,235
async def test_import_duplicate_yaml(hass: HomeAssistant) -> None: """Test that the yaml import works.""" MockConfigEntry( domain=DOMAIN, data={"host": "192.168.1.123"}, source=config_entries.SOURCE_IMPORT, unique_id="uuid", ).add_to_hass(hass) with patch( "pyoctoprintapi.OctoprintClient.get_discovery_info", return_value=DiscoverySettings({"upnpUuid": "uuid"}), ), patch( "pyoctoprintapi.OctoprintClient.request_app_key", return_value="test-key" ) as request_app_key: result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data={ "host": "1.1.1.1", "api_key": "test-key", "name": "Printer", "port": 81, "ssl": True, "path": "/", }, ) await hass.async_block_till_done() assert len(request_app_key.mock_calls) == 0 assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured"
20,236
def get_word_counts(filepath: str) -> Dict[str, int]: """ Return a dictionary of key-value pairs where keys are words from the given file and values are their counts. If there is no such file, return an empty dictionary. :param filepath: path to the file :return: a dictionary of word counts >>> get_word_counts(Path('scripts/The Invisible Man 1933.txt'))['snow'] 6 >>> get_word_counts(Path('scripts/The Time Machine 2002.txt'))['high'] 10 """ filepath = Path(__file__).parent / filepath if not path.exists(filepath): return None with open(filepath, 'r') as file: words = list(map(lambda word: word.strip('.,!?;:-').lower(), file.readline().split(' '))) word_counts = dict(Counter(words)) return word_counts
20,237
def getVanHoveDistances(positions, displacements, L): """ Compte van Hove distances between particles of a system of size `L', with `positions' and `displacements'. Parameters ---------- positions : (*, 2) float array-like Positions of the particles. displacements : (*, 2) float array-like Displacements of the particles. L : float Size of the system box. Returns ------- distances : (*^2,) float Numpy array Van Hove distances. """ positions = np.array(positions, dtype=np.double) N = len(positions) assert positions.shape == (N, 2) displacements = np.array(displacements, dtype=np.double) assert displacements.shape == (N, 2) distances = np.empty((N**2,), dtype=np.double) _pycpp.getVanHoveDistances.argtypes = [ ctypes.c_int, ctypes.c_double, np.ctypeslib.ndpointer(dtype=np.double, ndim=1, flags='C_CONTIGUOUS'), np.ctypeslib.ndpointer(dtype=np.double, ndim=1, flags='C_CONTIGUOUS'), np.ctypeslib.ndpointer(dtype=np.double, ndim=1, flags='C_CONTIGUOUS'), np.ctypeslib.ndpointer(dtype=np.double, ndim=1, flags='C_CONTIGUOUS'), np.ctypeslib.ndpointer(dtype=np.double, ndim=1, flags='C_CONTIGUOUS')] _pycpp.getVanHoveDistances( N, L, np.ascontiguousarray(positions[:, 0]), np.ascontiguousarray(positions[:, 1]), np.ascontiguousarray(displacements[:, 0]), np.ascontiguousarray(displacements[:, 1]), np.ascontiguousarray(distances)) return distances
20,238
def plot_local_coordinate_system_matplotlib( lcs, axes: plt.Axes.axes = None, color: Any = None, label: str = None, time: Union[pd.DatetimeIndex, pd.TimedeltaIndex, List[pd.Timestamp]] = None, time_ref: pd.Timestamp = None, time_index: int = None, show_origin: bool = True, show_trace: bool = True, show_vectors: bool = True, ) -> plt.Axes.axes: """Visualize a `weldx.transformations.LocalCoordinateSystem` using matplotlib. Parameters ---------- lcs : weldx.transformations.LocalCoordinateSystem The coordinate system that should be visualized axes : matplotlib.axes.Axes The target matplotlib axes. If `None` is provided, a new one will be created color : Any An arbitrary color. The data type must be compatible with matplotlib. label : str Name of the coordinate system time : pandas.DatetimeIndex, pandas.TimedeltaIndex, List[pandas.Timestamp], or \ LocalCoordinateSystem The time steps that should be plotted time_ref : pandas.Timestamp A reference timestamp that can be provided if the ``time`` parameter is a `pandas.TimedeltaIndex` time_index : int Index of a specific time step that should be plotted show_origin : bool If `True`, the origin of the coordinate system will be highlighted in the color passed as another parameter show_trace : If `True`, the trace of a time dependent coordinate system will be visualized in the color passed as another parameter show_vectors : bool If `True`, the the coordinate axes of the coordinate system are visualized Returns ------- matplotlib.axes.Axes : The axes object that was used as canvas for the plot """ if axes is None: _, axes = plt.subplots(subplot_kw={"projection": "3d", "proj_type": "ortho"}) if lcs.is_time_dependent and time is not None: lcs = lcs.interp_time(time, time_ref) if lcs.is_time_dependent and time_index is None: for i, _ in enumerate(lcs.time): draw_coordinate_system_matplotlib( lcs, axes, color=color, label=label, time_idx=i, show_origin=show_origin, show_vectors=show_vectors, ) label = None else: draw_coordinate_system_matplotlib( lcs, axes, color=color, label=label, time_idx=time_index, show_origin=show_origin, show_vectors=show_vectors, ) if show_trace and lcs.coordinates.values.ndim > 1: coords = lcs.coordinates.values if color is None: color = "k" axes.plot(coords[:, 0], coords[:, 1], coords[:, 2], ":", color=color) return axes
20,239
def saved_searches_list(request): """ Renders the saved_searches_list html """ args = get_saved_searches_list(request.user) return render('saved_searches_list.html', args, request)
20,240
def _blob(x, y, area, colour, val, textcolor="black"): """ Draws a square-shaped blob with the given area (< 1) at the given coordinates. """ hs = np.sqrt(area) / 2 xcorners = np.array([x - hs, x + hs, x + hs, x - hs]) ycorners = np.array([y - hs, y - hs, y + hs, y + hs]) plt.fill(xcorners, ycorners, colour) if val < 0.05: plt.text(x - 0.25, y - 0.2, "⁎", fontsize=5, color=textcolor, fontweight="bold")
20,241
def upload(): """Upload files. This endpoint is used to upload "trusted" files; E.i. files created by CERT-EU E.g. CITAR, CIMBL, IDS signatures, etc. **Example request**: .. sourcecode:: http POST /api/1.0/upload HTTP/1.1 Host: do.cert.europa.eu Accept: application/json Content-Type: multipart/form-data; boundary=----WebKitFormBoundaryklDA9 ------WebKitFormBoundaryklDA94BtcALil3R2 Content-Disposition: form-data; name="files[0]"; filename="test.gz" Content-Type: application/x-gzip ------WebKitFormBoundaryklDA94BtcALil3R2-- **Example response**: .. sourcecode:: http HTTP/1.0 201 CREATED Content-Type: application/json { "files": [ "test.gz" ], "message": "Files uploaded" } :reqheader Accept: Content type(s) accepted by the client :reqheader Content-Type: multipart/form-data required :resheader Content-Type: this depends on `Accept` header or request :>json array files: List of files saved to disk :>json string message: Status message :statuscode 201: Files successfully saved """ uploaded_files = [] for idx, file in request.files.items(): filename = secure_filename(file.filename) file.save(os.path.join(current_app.config['APP_UPLOADS'], filename)) uploaded_files.append(filename) return ApiResponse({ 'message': 'Files uploaded', 'files': uploaded_files }, 201)
20,242
def get_active_loan_by_item_pid(item_pid): """Return any active loans for the given item.""" return search_by_pid( item_pid=item_pid, filter_states=current_app.config.get( "CIRCULATION_STATES_LOAN_ACTIVE", [] ), )
20,243
def _get_szymkiewicz_simpson_coefficient(a: Set[X], b: Set[X]) -> float: """Calculate the Szymkiewicz–Simpson coefficient. .. seealso:: https://en.wikipedia.org/wiki/Overlap_coefficient """ if a and b: return len(a.intersection(b)) / min(len(a), len(b)) return 0.0
20,244
def data_splitter( input: pd.DataFrame, ) -> Output(train=pd.DataFrame, test=pd.DataFrame,): """Splits the input dataset into train and test slices.""" train, test = train_test_split(input, test_size=0.1, random_state=13) return train, test
20,245
def reload(module, exclude=('sys', 'os.path', 'builtins', '__main__', 'numpy', 'numpy._globals')): """Recursively reload all modules used in the given module. Optionally takes a list of modules to exclude from reloading. The default exclude list contains sys, __main__, and __builtin__, to prevent, e.g., resetting display, exception, and io hooks. """ global found_now for i in exclude: found_now[i] = 1 try: with replace_import_hook(deep_import_hook): return deep_reload_hook(module) finally: found_now = {}
20,246
def get_words_for_board(words, board_size, packing_constant=1.1): """Pick a cutoff which is just beyond limit of the board size.""" # Order the words by length. It's easier to pack shorter words, so prioritize them. # This is SUPER hacky, should have a Word class that handles these representational differences. words = sorted(words, key=lambda w: len(w.replace(" ", "").replace("-", ""))) cum_len = np.cumsum([len(word.replace(" ", "").replace("-", "")) for word in words]) num_words = None for word_idx, cum_letters in enumerate(cum_len): # Try to pack in slightly more letters than would fit on the word without overlaps, # as governed by the packing constant. if cum_letters > packing_constant * board_size**2: num_words = word_idx break if not num_words: raise ValueError(f"Too few semantic neighbor words to pack a {board_size}x{board_size} board.") return words[:num_words]
20,247
async def test_device_setup_broadlink_exception(hass): """Test we handle a Broadlink exception.""" device = get_device("Office") mock_api = device.get_mock_api() mock_api.auth.side_effect = blke.BroadlinkException() with patch.object( hass.config_entries, "async_forward_entry_setup" ) as mock_forward, patch.object( hass.config_entries.flow, "async_init" ) as mock_init: mock_api, mock_entry = await device.setup_entry(hass, mock_api=mock_api) assert mock_entry.state == ENTRY_STATE_SETUP_ERROR assert mock_api.auth.call_count == 1 assert mock_forward.call_count == 0 assert mock_init.call_count == 0
20,248
def write_cells_from_component( component: Component, dirpath: Optional[PathType] = None ) -> None: """Writes all Component cells. Args: component: dirpath: directory path to write GDS (defaults to CWD) """ dirpath = dirpath or pathlib.Path.cwd() if component.references: for ref in component.references: component = ref.parent component.write_gds(f"{pathlib.Path(dirpath)/component.name}.gds") write_cells_from_component(component=component, dirpath=dirpath) else: component.write_gds(f"{pathlib.Path(dirpath)/component.name}.gds")
20,249
def booleans(key, val): """returns ucsc formatted boolean""" if val in (1, True, "on", "On", "ON"): val = "on" else: val = "off" return val
20,250
def feature_kstest_histograms(dat, covars, batch_list, filepath): """ Plots kernel density plots and computes KS test p-values separated by batch effect groups for a dataset (intended to assess differences in distribution to all batch effects in batch_list following harmonization with NestedComBat *Note that this is differs from the version in GMMComBat only by file destination naming Arguments --------- data : DataFrame of original data with shape (samples, features) output: DataFrame of harmonized data with shape (samples, features) covars : DataFrame with shape (samples, covariates) corresponding to original data. All variables should be label- encoded (i.e. strings converted to integer designations) batch_list : list of strings indicating batch effect column names within covars (i.e. ['Manufacturer', 'CE'...]) filepath : write destination for kernel density plots """ print('Plotting final feature histograms...') p_df = pd.DataFrame() for batch_col in batch_list: p = [] split_col = covars[batch_col] filepath2 = filepath + 'feature_histograms/' + batch_col + '/' if not os.path.exists(filepath2): os.makedirs(filepath2) for feature in dat: plt.figure() dat[feature][split_col == 0].plot.kde() dat[feature][split_col == 1].plot.kde() plt.xlabel(feature) filename = filepath2 + feature + '.png' plt.savefig(filename, bbox_inches='tight') plt.close() p_value = ks_2samp(dat[feature][split_col == 0], dat[feature][split_col == 1]) p.append(p_value.pvalue) p_df[batch_col] = p p_df.index = dat.keys() p_df.to_csv(filepath + 'final_nested_ks_values.csv')
20,251
def run_check_handler_sync( check: Check, handler: CheckHandler, *args, **kwargs ) -> None: """Run a check handler and record the result into a Check object. Args: check: The check to record execution results. handler: A callable handler to perform the check. args: A list of positional arguments to pass to the handler. kwargs: A dictionary of keyword arguments to pass to the handler. Raises: ValueError: Raised if an invalid value is returned by the handler. """ try: check.run_at = datetime.datetime.now() _set_check_result(check, handler(*args, **kwargs)) except Exception as error: _set_check_result(check, error) finally: check.runtime = Duration(datetime.datetime.now() - check.run_at)
20,252
def merge_param_classes(*cls_list, merge_positional_params: bool = True) -> type(Params): """ Merge multiple Params classes into a single merged params class and return the merged class. Note that this will not flatten the nested classes. :param cls_list: A list of Params subclasses or classes to merge into a single Params class :param merge_positional_params: Whether or not to merge the positional params in the classes """ if len(cls_list) == 1: return cls_list[0] class MergedParams(Params): __doc__ = f'A Combination of {len(cls_list)} Params Classes:\n' append_params_attributes(MergedParams, *cls_list) for params_cls in cls_list: MergedParams.__doc__ += f'\n\t {params_cls.__name__} - {params_cls.__doc__}' # resolve positional arguments: if merge_positional_params: params_to_delete, positional_param = _merge_positional_params( [(k, v) for k, v in MergedParams.__dict__.items() if not k.startswith('_')]) if positional_param is None and params_to_delete == []: return MergedParams setattr(MergedParams, 'positionals', positional_param) positional_param.__set_name__(MergedParams, 'positionals') for k in params_to_delete: delattr(MergedParams, k) return MergedParams
20,253
def make_pin_list(eff_cnt): """Generates a pin list with an effect pin count given by eff_cnt.""" cards = [1] * eff_cnt cards.extend([0] * (131 - len(cards))) random.shuffle(cards) deck = collections.deque(cards) pin_list = [] for letters, _ in KEY_WHEEL_DATA: pins = [c for c in letters if deck.pop()] pin_list.append(''.join(pins)) return pin_list
20,254
def drop(cols, stmt): """ Function: Drops columns from the statement. Input: List of columns to drop. Output: Statement with columns that are not dropped. """ col_dict = column_dict(stmt) col_names = [c for c in col_dict.keys()] colintention = [c.evaluate(stmt).name if isinstance(c, Intention) else c for c in cols] new_cols = list(filter(lambda c: c not in colintention, col_names)) undrop = select(new_cols, stmt) return undrop
20,255
def ConvertVolumeSizeString(volume_size_gb): """Converts the volume size defined in the schema to an int.""" volume_sizes = { "500 GB (128 GB PD SSD x 4)": 500, "1000 GB (256 GB PD SSD x 4)": 1000, } return volume_sizes[volume_size_gb]
20,256
def _check_dims(matrix): """Raise a ValueError if the input matrix has more than two square. Parameters ---------- matrix : numpy.ndarray Input array. """ if matrix.ndim != 2: raise ValueError('Expected a square matrix, got array of shape' ' {0}.'.format(matrix.shape))
20,257
def render_template_with_system_context(value): """ Render provided template with a default system context. :param value: Template string. :type value: ``str`` :param context: Template context. :type context: ``dict`` """ context = { SYSTEM_KV_PREFIX: KeyValueLookup(), } rendered = render_template(value=value, context=context) return rendered
20,258
def ToTranslation(tree, placeholders): """Converts the tree back to a translation, substituting the placeholders back in as required. """ text = tree.ToString() assert text.count(PLACEHOLDER_STRING) == len(placeholders) transl = tclib.Translation() for placeholder in placeholders: index = text.find(PLACEHOLDER_STRING) if index > 0: transl.AppendText(text[:index]) text = text[index + len(PLACEHOLDER_STRING):] transl.AppendPlaceholder(placeholder) if text: transl.AppendText(text) return transl
20,259
def complex(real, imag): """Return a 'complex' tensor - If `fft` module is present, returns a propert complex tensor - Otherwise, stack the real and imaginary compoenents along the last dimension. Parameters ---------- real : tensor imag : tensor Returns ------- complex : tensor """ if _torch_has_complex: return torch.complex(real, imag) else: return torch.stack([real, imag], -1)
20,260
def _load_images_from_param_file( param_filename: str, delete: bool, ) -> Iterator[Tuple[np.ndarray, np.ndarray, scene_parameters.SceneParameters]]: """Yields tuples of image and scene parameters. Args: param_filename: read images from this jsonl parameter file. delete: delete images after reading them """ with open(param_filename) as f: for line in f.readlines(): params = scene_parameters.SceneParameters.load(json.loads(line)) dirname = os.path.dirname(param_filename) img_fname = os.path.join(dirname, params.filename) mask_fname = os.path.join(dirname, params.mask_filename) img = imageio.imread(img_fname) mask = imageio.imread(mask_fname) yield img, mask, params if delete: os.remove(img_fname)
20,261
def get_snps(x: str) -> tuple: """Parse a SNP line and return name, chromsome, position.""" snp, loc = x.split(' ') chrom, position = loc.strip('()').split(':') return snp, chrom, int(position)
20,262
def qr_match(event, context, user=None): """ Function used to associate a given QR code with the given email """ user_coll = coll('users') result = user_coll.update_one({'email': event["link_email"]}, {'$push': {'qrcode': event["qr_code"]}}) if result.matched_count == 1: return {"statusCode": 200, "body": "success"} else: return {"statusCode": 404, "body": "User not found"}
20,263
def plot_smallnorb(path, is_train=False, samples_per_class=5): """Plot examples from the smallNORB dataset. Execute this command in a Jupyter Notebook. Author: Ashley Gritzman 18/04/2019 Args: is_train: True for the training dataset, False for the test dataset samples_per_class: number of samples images per class Returns: None """ # To plot pretty figures import matplotlib.pyplot as plt plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' CLASSES = ['animal', 'human', 'airplane', 'truck', 'car'] # Get batch from data queue. Batch size is FLAGS.batch_size, which is then # divided across multiple GPUs path = os.path.join(path, 'smallNORB') dataset = input_fn(path, is_train) img_bch, lab_bch, cat_bch, elv_bch, azi_bch, lit_bch = next(iter(dataset)) img_bch = img_bch.numpy() lab_bch = lab_bch.numpy() cat_bch = cat_bch.numpy() elv_bch = elv_bch.numpy() azi_bch = azi_bch.numpy() lit_bch = lit_bch.numpy() num_classes = len(CLASSES) fig = plt.figure(figsize=(num_classes * 2, samples_per_class * 2)) fig.suptitle("category, elevation, azimuth, lighting") for y, cls in enumerate(CLASSES): idxs = np.flatnonzero(lab_bch == y) idxs = np.random.choice(idxs, samples_per_class, replace=False) for i, idx in enumerate(idxs): plt_idx = i * num_classes + y + 1 plt.subplot(samples_per_class, num_classes, plt_idx) # plt.imshow(img_bch[idx].astype('uint8').squeeze()) plt.imshow(np.squeeze(img_bch[idx])) plt.xticks([], []) plt.yticks([], []) plt.xlabel("{}, {}, {},{}".format(cat_bch[idx], elv_bch[idx], azi_bch[idx], lit_bch[idx])) # plt.axis('off') if i == 0: plt.title(cls) plt.show()
20,264
def update_has_started(epoch, settings): """ Tells whether update has started or not :param epoch: epoch number :param settings: settings dictionary :return: True if the update has started, False otherwise """ return is_baseline_with_update(settings['baseline']) and epoch >= settings['update']['start_epoch']
20,265
def compute_i_th_moment_batches(input, i): """ compute the i-th moment for every feature map in the batch :param input: tensor :param i: the moment to be computed :return: """ n, c, h, w = input.size() input = input.view(n, c, -1) mean = torch.mean(input, dim=2).view(n, c, 1, 1) eps = 1e-5 var = torch.var(input, dim=2).view(n, c, 1, 1) + eps std = torch.sqrt(var) if i == 1: return mean elif i == 2: return std else: sol = ((input.view(n, c, h, w) - mean.expand(n, c, h, w)) / std).pow(i) sol = torch.mean(sol.view(n, c, -1), dim=2).view(n, c, 1, 1) return sol
20,266
def hs_online_check(onion, put_url): """Online check for hidden service.""" try: print onion return hs_http_checker(onion, put_url) except Exception as error: print "Returned nothing." print error return ""
20,267
def plotfile(fname, cols=(0,), plotfuncs=None, comments='#', skiprows=0, checkrows=5, delimiter=',', names=None, subplots=True, newfig=True, **kwargs): """ Plot the data in a file. *cols* is a sequence of column identifiers to plot. An identifier is either an int or a string. If it is an int, it indicates the column number. If it is a string, it indicates the column header. matplotlib will make column headers lower case, replace spaces with underscores, and remove all illegal characters; so ``'Adj Close*'`` will have name ``'adj_close'``. - If len(*cols*) == 1, only that column will be plotted on the *y* axis. - If len(*cols*) > 1, the first element will be an identifier for data for the *x* axis and the remaining elements will be the column indexes for multiple subplots if *subplots* is *True* (the default), or for lines in a single subplot if *subplots* is *False*. *plotfuncs*, if not *None*, is a dictionary mapping identifier to an :class:`~matplotlib.axes.Axes` plotting function as a string. Default is 'plot', other choices are 'semilogy', 'fill', 'bar', etc. You must use the same type of identifier in the *cols* vector as you use in the *plotfuncs* dictionary, e.g., integer column numbers in both or column names in both. If *subplots* is *False*, then including any function such as 'semilogy' that changes the axis scaling will set the scaling for all columns. - *comments*: the character used to indicate the start of a comment in the file, or *None* to switch off the removal of comments - *skiprows*: is the number of rows from the top to skip - *checkrows*: is the number of rows to check to validate the column data type. When set to zero all rows are validated. - *delimiter*: is the character(s) separating row items - *names*: if not None, is a list of header names. In this case, no header will be read from the file If *newfig* is *True*, the plot always will be made in a new figure; if *False*, it will be made in the current figure if one exists, else in a new figure. kwargs are passed on to plotting functions. Example usage:: # plot the 2nd and 4th column against the 1st in two subplots plotfile(fname, (0,1,3)) # plot using column names; specify an alternate plot type for volume plotfile(fname, ('date', 'volume', 'adj_close'), plotfuncs={'volume': 'semilogy'}) Note: plotfile is intended as a convenience for quickly plotting data from flat files; it is not intended as an alternative interface to general plotting with pyplot or matplotlib. """ if newfig: fig = figure() else: fig = gcf() if len(cols) < 1: raise ValueError('must have at least one column of data') if plotfuncs is None: plotfuncs = {} with cbook._suppress_matplotlib_deprecation_warning(): r = mlab._csv2rec(fname, comments=comments, skiprows=skiprows, checkrows=checkrows, delimiter=delimiter, names=names) def getname_val(identifier): 'return the name and column data for identifier' if isinstance(identifier, str): return identifier, r[identifier] elif isinstance(identifier, Number): name = r.dtype.names[int(identifier)] return name, r[name] else: raise TypeError('identifier must be a string or integer') xname, x = getname_val(cols[0]) ynamelist = [] if len(cols) == 1: ax1 = fig.add_subplot(1, 1, 1) funcname = plotfuncs.get(cols[0], 'plot') func = getattr(ax1, funcname) func(x, **kwargs) ax1.set_ylabel(xname) else: N = len(cols) for i in range(1, N): if subplots: if i == 1: ax = ax1 = fig.add_subplot(N - 1, 1, i) else: ax = fig.add_subplot(N - 1, 1, i, sharex=ax1) elif i == 1: ax = fig.add_subplot(1, 1, 1) yname, y = getname_val(cols[i]) ynamelist.append(yname) funcname = plotfuncs.get(cols[i], 'plot') func = getattr(ax, funcname) func(x, y, **kwargs) if subplots: ax.set_ylabel(yname) if ax.is_last_row(): ax.set_xlabel(xname) else: ax.set_xlabel('') if not subplots: ax.legend(ynamelist) if xname == 'date': fig.autofmt_xdate()
20,268
def test_integer(): """ """ unary = np.array([[2, 8, 8], [7, 3, 7], [8, 8, 2], [6, 4, 6]]) edges = np.array([[0, 1], [1, 2], [2, 3]]) edge_weight = np.array([3, 10, 1]) smooth = 1 - np.eye(3) labels = pygco.cut_general_graph(edges, edge_weight, unary, smooth, n_iter=1) np.array_equal(labels, np.array([0, 2, 2, 1]))
20,269
def parse_options(): """Parses and checks the command-line options. Returns: A tuple containing the options structure. """ usage = 'Usage: ./update_mapping.py [options]' desc = ('Example: ./update_mapping.py -o mapping.json.\n' 'This script generates and stores a file that gives the\n' 'mapping between phone serial numbers and BattOr serial numbers\n' 'Mapping is based on which physical ports on the USB hubs the\n' 'devices are plugged in to. For instance, if there are two hubs,\n' 'the phone connected to port N on the first hub is mapped to the\n' 'BattOr connected to port N on the second hub, for each N.') parser = argparse.ArgumentParser(usage=usage, description=desc) parser.add_argument('-o', '--output', dest='out_file', default='mapping.json', type=str, action='store', help='mapping file name') parser.add_argument('-u', '--hub', dest='hub_types', action='append', choices=['plugable_7port'], help='USB hub types.') options = parser.parse_args() if not options.hub_types: options.hub_types = ['plugable_7port'] return options
20,270
def test_module(proxies): """ This is the call made when pressing the integration test button. """ # a sample microsoft logo png content = b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\xd8\x00\x00\x00.\x08\x03\x00\x00\x00}5\'\xdb\x00' \ b'\x00\x01\x11PLTE\x00\x00\x00sssssssssssssss\xf3P"sssssssssssssssssssssssssssssssssssswxj\x00\xa4' \ b'\xefl\xca\x00\x7f\xba\x00\xff\xb9\x00\x00\xa2\xff\xfaN\x1c\xffD%\x00\xa4\xf0\xff\xb9\x00sss\x00' \ b'\xae\xff\xff\xba\x00\xff\xbc\x00sssz\xbb\x00\x7f\xba' \ b'\x00ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss\xfbI\x1f\xf8N' \ b'\x1d\x00\xaf\xff{\xba\x00\x7f\xba\x00\xf2P"\x7f\xba\x00\xff\xbe\x00\x7f\xba\x00\xff\xb9\x00\xff' \ b'\xb9\x00\xff\xbc\x00\xf2P"\x00\xae\xffm\xca\x00\xff\xab\x00\x7f\xba\x00\xff\xb9\x00{' \ b'\xba\x00\x00\xa8\xffsss\xff\xb9\x00\x7f\xba\x00\x00\xa4\xef\xf2P"\xffL\x18y\xba\x00\x00\xa4\xf4' \ b'\xf4N#\x0b5v\xab\x00\x00\x00RtRNS\x00\xf2\x0c\xf8\xd8\x14\xc9\xfc\x1acS\xb4\xac\x9e\'\x80\xca\xd3' \ b'\xa5\x05\xc9\xaeCC\x1e\x1c\x1e\xf3\xf3\xe4\xb4\xb4\xad\\\x1c\xc9\xc3j+\x97<\xec\xb0N1 ' \ b'\x0f\x91\x86B6\xb9v\xceG\xe8\xde\xbe{o\x8d\xa1 ' \ b'\xcf\xc5\x1f\xea\xe4\xdf\xd1\xcd\xc8\xb7\xa4\x9f\x8e\x89z50"\x16\xfb\x12E\xa6\x00\x00\x05' \ b'\x12IDATh\xde\xed\x98\xe7\x96\x93@\x18\x86?\x081\xa6S\xec\x8a\x8a\xa6\x90\x04H\xd9\x90d\x93\xd8{' \ b'\xd7\xb1{\xff\x17"\xd3!\xc0\x0f\x8f\xe0QO\x9e\x1f\xbb\xb3\xb3d\x98g\xca;\x10x\xf9\xe8L\x8a{' \ b'g.\xc3\xeb\x87\xb7S<}\xfc\x16\xfe\x19\xce|\xc9\xe0\xc7]8\xff)\x8b\x0b\xf0\xcf\x90-v\xe5(' \ b'\xf6\xd7r\x14;\x8a\xfd%\xfc\'b\x8b\xe9\xb89\xee\xffA1u\xe2A\xf9\xf8=\x84\xd9\x14$\xa6\xde\x19\x9b' \ b'\xa696\xab\x90\xa4{\x1aU7\xdb*\x00\xb8\nB-(' \ b'\x1b\xaf\x86\n\x15\xabV\x10\xc19\xb8\r\xadU\xb0\x98\x86K[(' \ b'\x17\xd5F\xe5\x88\x9d@\x82\x19\x17\xe3W\xb4\xa1\\\xb6(\xa2\xb6\xf3\x1a[' \ b'<\x96g\xf7\xf3\xa2\xc4\xd0"^k\xad\xa5\x18\x18\xb8\xb4\x84r!\x1b\xacA\xbb4\xdb\xe0bQb\xfbx\xad' \ b'\x83bb\x9e\x82\xd0\x1d(\r\xd9\r\x83\x96\xeb\x08\x15)f[\xc9\xf1\x93b\xa0.|(' \ b'\x99\xb38:\xc6\x85\x8b\x8d*\xa4)\x81\x8bEm*V:R\xacU\xb8\xd8\xb0G\xc6K0\xc4u\xe3\x1c1k\xb0j4\xe6g' \ b'\x93\x99\xe6-\xaa\xa2\xb8\x9a\xac\x16\x01\xc4\t\xdcFc5\xb0\xe2\xb7uW\x13Q#\xc5\xe4>\x98\x14"\xe6' \ b'\x90A\x1a\x88~(8L\x9a\\l;\xd2\x8d\x90/F\x7f\xcarY\xc7c\xba_\xeb\xdai\xf4\xab\xc27\xc8dL\x97ve' \ b'<\x07N\xc7\xa4u5\x1e@V\x9d-\xf5\xd1\xb0C\x9e86zH\xfe\xec\xe9\x11\xa7\x1b\x92\xfa\xa7\xb8\xe5\xdf' \ b'\x16\xb3PD\x97\xd7\xf5q\xbfA\xe7b\xed\x98\xf5\x161\xa8\xc9\t^\xb3\x01\xfe\x894<\xee:\x924-\xbe' \ b'\xb28=\xb6\xd0GH2\x03\x809\xcaF\xf9m\xb1%I\x0b-\x1e\x1d}\xd0\x12b\x1d\x00R\x94\x84\x00\x80gU\x1f' \ b'"&\xe6+(\x8eN\xcc\x1a\xe8@l\x82\x12\x0c\xcb\x14\xeb\x92IB+\x19\x1d\x8a\x95%\xd6E\xd4\xc1\x1c\x9e(' \ b'\xc8`b\x15\xacS\xc3b6\x8d\xd7\xa6\xd9\xc3\x8d\xb2=c\x10\xa5\xba3\xdbP1\x97\xb5\xd12CZ\xdaEU\x8aB' \ b'\xd6\xaaB \x1f\xaeDU6\xdc\xfb\x9aE$\xf6-\x8b,' \ b'1\x15\x9f\xc8&\xad2i1-\xe6\xd1.\xd1U9\xa5b\x84e5\x98\xcc`O\xcau\xd6\x06fEf\x83\x9f\x82\x8d)D\x90' \ b'\xd5\xba^\x90\xc40\x88B\x15\x02\xe8\xd0\xb8W#\xe8 ;\xa0\x06*\\\xbe{\xf9\x90\xa8\xca\x82[' \ b'\xefo\xa5x\xf0\xc0J\x8b\xd1\xde\xd7|r;\\3\xcf\x12\x1b\x93\x05&>iq\xb1\t\xd0\xcf\xb1\x98\x96fM\x80' \ b'\xba\x94\x95\x0bQ\xf1Y\x8a\xd0\xc7\xd0\xecT\xfc-\xa4\x98GF\x9e\xe7\x83\x06)1vs%\x96\xf3L\xac-#G' \ b'\xbe\x05\xf8\xf8\xe2\xdaYp\xa8\xa0\xa0-\xee#6`/\xff\x1c{' \ b'u\xffz\x8a\xfb\xd7?\xc0\x9b\'7R<\x7f\xf6.-\xc6\x02.\xe4\x9bb\x99%\xe6\xc8U%\xc5\xe4$\x99\xc9\x81' \ b'>\xa5\xff\x1a\xd0\x1c\x11G\x89F}\x19\xea\x08\xafK+W\xec\xdc\xe7\x0c\xbe_\x82\x1b\x1f\xb3\xb8\x98' \ b'\x16\xe3\xcd\xb9t\xad(\xd5,' \ b'\xb1)Y\xfbi\xb1N\xec1\xccO\x1c\xf2h\x87\x07\x8c`8V\xec\x99P\x10\x92\x06~Y\xec\xe6/\x88\xc1\x86fo' \ b'\x93DGZ\x8cM\x89\x97\x14\xe3\xd7\xc8\xeb9]\xb6}\xce\xf2Sk\x8dCw\x81K\xa7\x90\x9c\xd7y\x99btBF\x10' \ b'\xd0;\xa5\xc5Xv\x0c~U\x0c\xe8\xf9\xcd\xe6\xcf\xfd\xf3bt7,' \ b'\xea$:\xf2gl\x9e+\x16\x8a\xdd#\x97b\x9f\x14\xdd\xa6x\xe7\x1b\xc4\x82U\x84\xbf_\xae\x18\xb4\xf0' \ b'`\xea4:r\xf7\xd8,W\xec$\xf9\xba\xda\x8cgI\xb5\xcd\x9e2\xac\x11^\x17b^-\x1b\x9f\xe8Y\xa98)P\xccA' \ b'\x8cN\x96\x18\xcbf#G\x8cyO\x81\x11\x10\x83\x008;\x16\xecz"\x81\\\xfa\xadD\x893&TD\xfbi\xb1`\x94x' \ b'\xd3V-!&;Y\xeb\x00e\xc6\xce\x86@\x8d\x05\xbb\xce\x04\r\x80\xd8,' \ b'\xf7\xb3\xc4\xb6E\x8a\xcd\xa8\xd8<SL\xfc\xff\x0e\x9b\xa009cl2\x14\x8fM\x10[' \ b'\x98KV\xe1\x93\x9e\xf3\xe7\x93\x9e*\x8f\xbe\xb5u ' \ b'\xb6\xe2\xeeVQb\xbe\xfc\xfe+-&\xa7t\xd4\x9au\xcd52\x12b\xe2\xed\xa3v2\xebN\rR\x9c\xd2\xe1\x0f\xf7' \ b'\x8d\xd5Rc\xb1\x08KD\xfc\xa36\xda6b\x8bN\x8a\xc9n\x18\xdd\xa1^\x94\x18\xdc!\xcf;\xf9b\x1d\x05' \ b'\tRbX"A\xeb\xb0\xce\x90q)Y\xc2\xa1\x18\x8c\x11E)Ll\xc2\x15\xd2b\x03z\xb5\x96z\xd1\x94b$^$\xc3CY' \ b'\xfb\xacX\xf1\x92z\xfa\xcb\x1c\xf0\x8a\x10\x0bB[C}\x91rJe(' \ b'\xcb\xda:TqGj\x9a\xbd\xf1\xf9\xac0\xb5Jo\x8eG\xbfB\xae\x91\xad\xedm\xd6\xa71;\xc9;\'k\xa6\xb5\x05' \ b'\xce\xa0\xa5\xb0\xbai\x95\x8f\xafak5\x11\xa9\x0bz\x13\xe3\x97\xc5~\x0b\xdf\xe9\xef\xea+\x15r\x188' \ b'\xfd~\xdd\xb5@\xe2E5\xce\x00\x12\xb8uR\x97\x8b\xd7\xdf\xf5\xdd_O\xc5\x7f\x86\xa3\xd8Q\xec/\xe1(' \ b'v\x14\xfbK8\xf7\xf9j\x8a\xcfW/\xc1\x8b\x8f\xd7\xd2\xfcCb?\x01\xc7\xf5]\n\x11\xa0Y\x98\x00\x00\x00' \ b'\x00IEND\xaeB`\x82 ' response = perform_logo_detection_service_request(content, proxies) logo_found = response.get('responses', [])[0].get('logoAnnotations', [])[0].get('description', '') if 'microsoft' in logo_found.lower(): demisto.results('ok') else: return_error(str(response))
20,271
def retrieve(object_type, **kwargs): """Get objects from the Metatlas object database. This will automatically select only objects created by the current user unless `username` is provided. Use `username='*'` to search against all users. Parameters ---------- object_type: string The type of object to search for (i.e. "Groups"). **kwargs Specific search queries (i.e. name="Sargasso"). Use '%' for wildcard patterns (i.e. description='Hello%'). If you want to match a '%' character, use '%%'. Returns ------- objects: list List of Metatlas Objects meeting the criteria. Will return the latest version of each object. """ workspace = Workspace.get_instance() out = workspace.retrieve(object_type, **kwargs) workspace.close_connection() return out
20,272
def read_plot_pars() : """ Parameters are (in this order): Minimum box width, Maximum box width, Box width iterations, Minimum box length, Maximum box length, Box length iterations, Voltage difference """ def extract_parameter_from_string(string): #returns the part of the string after the ':' sign parameter = "" start_index = string.find(':') for i in range(start_index+1, len(string)-1): parameter += string[i] return parameter f = open("input.txt", "r") pars = [] line_counter = 0 for line in f: if ((line_counter > 0) and (line_counter < 8)): pars.append(extract_parameter_from_string(line)) line_counter += 1 return pars
20,273
def _configure(yew): """Prompt user for settings necessary for remote operations. Store in user prefs. Skip secret things like tokens and passwords. """ # the preferences need to be in the form: #  location.default.username for pref in settings.USER_PREFERENCES: if "token" in pref or "password" in pref: continue d = yew.store.prefs.get_user_pref(pref) p = pref.split(".") i = p[2] value = click.prompt("Enter %s" % i, default=d, type=str) click.echo(pref + "==" + value) yew.store.prefs.put_user_pref(pref, value)
20,274
def install_custom_css( destdir, cssfile, resource=PKGNAME ): """ Add the kernel CSS to custom.css """ ensure_dir_exists( destdir ) custom = os.path.join( destdir, 'custom.css' ) prefix = css_frame_prefix(resource) # Check if custom.css already includes it. If so, let's remove it first exists = False if os.path.exists( custom ): with io.open(custom) as f: for line in f: if line.find( prefix ) >= 0: exists = True break if exists: remove_custom_css( destdir, resource ) # Fetch the CSS file cssfile += '.css' data = pkgutil.get_data( resource, os.path.join('resources',cssfile) ) # get_data() delivers encoded data, str (Python2) or bytes (Python3) # Add the CSS at the beginning of custom.css # io.open uses unicode strings (unicode in Python2, str in Python3) with io.open(custom + '-new', 'wt', encoding='utf-8') as fout: fout.write( u'{}START ======================== */\n'.format(prefix)) fout.write( data.decode('utf-8') ) fout.write( u'{}END ======================== */\n'.format(prefix)) if os.path.exists( custom ): with io.open( custom, 'rt', encoding='utf-8' ) as fin: for line in fin: fout.write( unicode(line) ) os.rename( custom+'-new',custom)
20,275
def create(subscribe: typing.Subscription) -> Observable: """Creates an observable sequence object from the specified subscription function. .. marble:: :alt: create [ create(a) ] ---1---2---3---4---| Args: subscribe: Subscription function. Returns: An observable sequence that can be subscribed to via the given subscription function. """ return Observable(subscribe)
20,276
def tocl(d): """Generate TOC, in-page links to the IDs we're going to define below""" anchors = sorted(d.keys(), key=_lower) return TemplateData(t='All The Things', e=[a for a in anchors])
20,277
def assert_pd_equal(left, right, **kwargs): """Assert equality of two pandas objects.""" if left is None: return method = { pd.Series: pd.testing.assert_series_equal, pd.DataFrame: pd.testing.assert_frame_equal, np.ndarray: np.testing.assert_array_equal, }[left.__class__] method(left, right, **kwargs)
20,278
def usage(): """simple usage statement to prompt user when issue occur""" print('\nKML from CSV Points -- usage:\n') print('This program takes up to three flags (all optional).') print(' -i sets the path to the input CSV file.') print(' -o sets the path to the output KML file.') print(' -p sets the path to the icon used to identify your points.\n') print('To geolocate points, the input CSV file will need columns labeled:') print(' latitude or lat (case insensitive)') print(' longitude, lon, or long (case insensitive)\n') print('Optionally, to locate points in time, the CSV will need a column:') print(' date_time (case insensitive)\n') sys.exit()
20,279
def main( filenames: Iterable[PathLike], recursive: bool = False, verbose: int = 0, ): """ Augment Flake8 noqa comments with PyLint comments. """ # stdlib import functools import glob from itertools import chain # 3rd party from domdf_python_tools.paths import PathPlus # this package from flake2lint import process_file ret = 0 glob_func = functools.partial(glob.iglob, recursive=recursive) for file in map(PathPlus, chain.from_iterable(map(glob_func, filenames))): if ".git" in file.parts or "venv" in file.parts or ".tox" in file.parts: continue ret_for_file = process_file(file) if ret_for_file and verbose: click.echo(f"Rewriting '{file!s}'") ret |= ret_for_file sys.exit(ret)
20,280
def get_upper_parentwidget(widget, parent_position: int): """This function replaces this: self.parentWidget().parentWidget().parentWidget() with this: get_upper_parentwidget(self, 3) :param widget: QWidget :param parent_position: Which parent :return: Wanted parent widget """ while parent_position > 0: widget = widget.parentWidget() parent_position -= 1 else: return widget
20,281
def DirectorySizeAsString(directory): """Returns size of directory as a string.""" return SizeAsString(DirectorySize(directory))
20,282
def test_datetime_pendulum_negative_non_dst(): """ datetime.datetime negative UTC non-DST """ assert ( ormsgpack.packb( [ datetime.datetime( 2018, 12, 1, 2, 3, 4, 0, tzinfo=pendulum.timezone("America/New_York"), ) ] ) == msgpack.packb(["2018-12-01T02:03:04-05:00"]) )
20,283
def patch_is_tty(value): """ Wrapped test function will have peltak.core.shell.is_tty set to *value*. """ def decorator(fn): # pylint: disable=missing-docstring @wraps(fn) def wrapper(*args, **kw): # pylint: disable=missing-docstring is_tty = shell.is_tty shell.is_tty = value try: return fn(*args, **kw) finally: shell.is_tty = is_tty return wrapper return decorator
20,284
def zones_analytics(self): """ API core commands for Cloudflare API""" base = self._base branch = self.zones setattr(branch, "analytics", self._add_unused(base, "zones", "analytics")) branch = self.zones.analytics setattr(branch, "colos", self._add_with_auth(base, "zones", "analytics/colos")) setattr(branch, "dashboard", self._add_with_auth(base, "zones", "analytics/dashboard"))
20,285
def make_bed_from_multi_eland_stream( instream, outstream, name, description, chr_prefix='chr', max_reads=255): """ read a multi eland result file from instream and write the bedfile to outstream :Parameters: - `instream`: stream containing the output from eland - `outstream`: stream to write the bed file too - `name`: name of bed-file (must be unique) - `description`: longer description of the bed file - `chromosome_prefix`: restrict output lines to fasta records that start with this pattern - `max_reads`: maximum number of reads to write to bed stream """ for lane in make_bed_from_multi_eland_generator(instream, name, description, chr_prefix, max_reads): outstream.write(lane)
20,286
def uniform_unit_scaling(tensor: torch.Tensor, nonlinearity: str = "linear"): """ An initaliser which preserves output variance for approximately gaussian distributed inputs. This boils down to initialising layers using a uniform distribution in the range `(-sqrt(3/dim[0]) * scale, sqrt(3 / dim[0]) * scale)`, where `dim[0]` is equal to the input dimension of the parameter and the `scale` is a constant scaling factor which depends on the non-linearity used. See `Random Walk Initialisation for Training Very Deep Feedforward Networks <https://www.semanticscholar.org/paper/Random-Walk-Initialization-for-Training-Very-Deep-Sussillo-Abbott/be9728a0728b6acf7a485225b1e41592176eda0b>`_ for more information. # Parameters tensor : `torch.Tensor`, required. The tensor to initialise. nonlinearity : `str`, optional (default = `"linear"`) The non-linearity which is performed after the projection that this tensor is involved in. This must be the name of a function contained in the `torch.nn.functional` package. # Returns The initialised tensor. """ size = 1.0 # Estimate the input size. This won't work perfectly, # but it covers almost all use cases where this initialiser # would be expected to be useful, i.e in large linear and # convolutional layers, as the last dimension will almost # always be the output size. for dimension in list(tensor.size())[:-1]: size *= dimension activation_scaling = torch.nn.init.calculate_gain(nonlinearity, tensor) max_value = math.sqrt(3 / size) * activation_scaling return tensor.data.uniform_(-max_value, max_value)
20,287
def save_level1b_fits(outModel, obs_params, save_dir=None, **kwargs): """Save Level1bModel to FITS and update headers""" # Check if save directory specified in obs_params if save_dir is None: save_dir = obs_params.get('save_dir') file_path = outModel.meta.filename if save_dir is not None: # Create directory and intermediates if they don't exist os.makedirs(save_dir, exist_ok=True) file_path = os.path.join(save_dir, file_path) # Save model to DMS FITS file print(f' Saving: {file_path}') outModel.save(file_path) # Update header information update_dms_headers(file_path, obs_params) update_headers_pynrc_info(file_path, obs_params, **kwargs)
20,288
def get_results(elfFile): """Converts and returns collected data.""" staticSizes = parseElf(elfFile) romSize = sum([size for key, size in staticSizes.items() if key.startswith("rom_")]) ramSize = sum([size for key, size in staticSizes.items() if key.startswith("ram_")]) results = { "rom": romSize, "rom_rodata": staticSizes["rom_rodata"], "rom_code": staticSizes["rom_code"], "rom_misc": staticSizes["rom_misc"], "ram": ramSize, "ram_data": staticSizes["ram_data"], "ram_zdata": staticSizes["ram_zdata"], } return results
20,289
def path_element_to_dict(pb): """datastore.entity_pb.Path_Element converter.""" return { 'type': pb.type(), 'id': pb.id(), 'name': pb.name(), }
20,290
def copy_to_tmp(in_file): """Copies a file to a tempfile. The point of this is to copy small files from CNS to tempdirs on the client when using code that's that hasn't been Google-ified yet. Examples of files are the vocab and config files of the Hugging Face tokenizer. Arguments: in_file: Path to the object to be copied, likely in CNS Returns: Path where the object ended up (inside of the tempdir). """ # We just want to use Python's safe tempfile name generation algorithm with tempfile.NamedTemporaryFile(delete=False) as f_out: target_path = os.path.join(tempfile.gettempdir(), f_out.name) gfile.Copy(in_file, target_path, overwrite=True) return target_path
20,291
def segment_sylls_from_songs(audio_dirs, song_seg_dirs, syll_seg_dirs, p, \ shoulder=0.05, img_fn='temp.pdf', verbose=True): """ Split song renditions into syllables, write segments. Enter quantiles to determine where to split the song motif. Entering the same quantile twice will remove it. Note ---- * All the song segments must be the same duration! Parameters ---------- audio_dirs : list of str Audio directories. song_seg_dirs : list of str Directories containing song segments. syll_seg_dirs : list of str Directories where syllable segments are written. p : dict Segmenting parameters. shoulder : float, optional Duration of padding on either side of song segments, in seconds. img_fn : str, optional Image filename. Defaults to ``'temp.pdf'``. verbose : bool, optional Defaults to `True`. """ # Read segments. song_segs = read_segment_decisions(audio_dirs, song_seg_dirs) # Collect spectrograms. empty_audio_files = [] specs, fns, song_onsets = [], [], [] for audio_fn in song_segs: with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=WavFileWarning) fs, audio = wavfile.read(audio_fn) for seg in song_segs[audio_fn].reshape(-1,2): # Make spectrogram. onset, offset = seg[0] - shoulder, seg[1] + shoulder i1, i2 = int(fs*onset), int(fs*offset) spec, dt = _get_spec(fs, audio[max(i1,0):i2], p) # Pad spectrogram if it's near the edge of the file. if i1 < 0 or i2 > len(audio): pre_bins = max(0, int(np.round(-i1/fs/dt))) post_bins = max(0, int(np.round((i2 - len(audio))/fs/dt))) new_spec = np.mean(spec) * \ np.ones((spec.shape[0], spec.shape[1]+pre_bins+post_bins)) if post_bins == 0: post_bins = -new_spec.shape[1] new_spec[pre_bins:-post_bins] spec = new_spec specs.append(spec) fns.append(audio_fn) song_onsets.append(onset) if len(song_segs[audio_fn]) == 0: empty_audio_files.append(audio_fn) assert len(specs) > 0, "Found no spectrograms!" # Calculate and smooth amplitude traces. amp_traces = [] for spec in specs: amps = np.sum(spec, axis=0) amps -= np.mean(amps) amps /= np.std(amps) + EPSILON amp_traces.append(amps) # Truncate the amplitude traces if they aren't exactly the same length. min_time_bins = min(len(amp_trace) for amp_trace in amp_traces) max_time_bins = max(len(amp_trace) for amp_trace in amp_traces) if verbose and (min_time_bins != max_time_bins): print("Found different numbers of time bins in segments!") print("\tmin:" + str(min_time_bins) + ", max:", max_time_bins) print("\tTruncating to minimum number of time bins.") if min_time_bins != max_time_bins: amp_traces = [amp_trace[:min_time_bins] for amp_trace in amp_traces] amp_traces = np.array(amp_traces) # Warp the amplitude traces. max_t = amp_traces.shape[1]*dt*1e3 num_time_bins = amp_traces.shape[1] model = ShiftWarping(maxlag=0.2, smoothness_reg_scale=10.0) model.fit(amp_traces[:,:,np.newaxis], iterations=50) aligned = model.predict().squeeze() max_raw_val = np.max(amp_traces) max_aligned_val = np.max(aligned) shifts = model.shifts quantiles = [] break_flag = False while True: # Plot. _, axarr = plt.subplots(3,1, sharex=True) axarr[0].imshow(specs[np.random.randint(len(specs))], origin='lower', \ aspect='auto', extent=[0,max_t,p['min_freq']/1e3, \ p['max_freq']/1e3]) temp = np.copy(amp_traces) for q in quantiles: for i in range(len(temp)): try: temp[i,int(round(q*num_time_bins))+shifts[i]] = max_raw_val except IndexError: pass axarr[1].imshow(temp, origin='lower', aspect='auto', \ extent=[0,max_t,0,len(amp_traces)]) temp = np.copy(aligned) for q in quantiles: for i in range(len(temp)): temp[i,int(round(q*num_time_bins))] = max_aligned_val axarr[2].imshow(temp, origin='lower', aspect='auto', \ extent=[0,max_t,0,len(amp_traces)]) axarr[0].set_ylabel("Frequency (kHz)") axarr[1].set_ylabel('Amplitude') axarr[2].set_ylabel('Shifted') axarr[0].set_title('Enter segmenting quantiles:') axarr[2].set_xlabel('Time (ms)') plt.savefig(img_fn) plt.close('all') # Ask for segmenting decisions. while True: temp = input("Add or delete quantile or [s]top: ") if temp == 's': break_flag = True break try: temp = float(temp) assert 0.0 < temp and temp < 1.0 if temp in quantiles: quantiles.remove(temp) else: quantiles.append(temp) break except: print("Invalid input!") print("Must be \'s\' or a float between 0 and 1.") continue if break_flag: break # Write syllable segments. if verbose: print("Writing syllable segments...") duration = num_time_bins * dt quantiles = np.array(quantiles) quantiles.sort() files_encountered = {} for i, (fn, song_onset) in enumerate(zip(fns, song_onsets)): # Unshifted onsets and offsets. onsets = song_onset + duration * quantiles[:-1] offsets = song_onset + duration * quantiles[1:] # Apply shifts. onsets += shifts[i] * dt offsets += shifts[i] * dt # Save. index = audio_dirs.index(os.path.split(fn)[0]) write_fn = os.path.join(syll_seg_dirs[index], os.path.split(fn)[-1]) write_fn = write_fn[:-4] + '.txt' if not os.path.exists(os.path.split(write_fn)[0]): os.makedirs(os.path.split(write_fn)[0]) segs = np.stack([onsets, offsets]).reshape(2,-1).T header, mode = "", 'ab' if fn not in files_encountered: files_encountered[fn] = 1 mode = 'wb' header += "Syllables from song: " + fn + "\n" header += "Song onset: "+str(song_onset) with open(write_fn, mode) as f: np.savetxt(f, segs, fmt='%.5f', header=header) # Write empty files corresponding to audio files without song. for fn in empty_audio_files: index = audio_dirs.index(os.path.split(fn)[0]) write_fn = os.path.join(syll_seg_dirs[index], os.path.split(fn)[-1]) write_fn = write_fn[:-4] + '.txt' if not os.path.exists(os.path.split(write_fn)[0]): os.makedirs(os.path.split(write_fn)[0]) header = "Syllables from song: " + fn np.savetxt(write_fn, np.array([]), header=header)
20,292
def get_orr_tensor(struct): """ Gets orientation of all molecules in the struct """ molecule_list = get_molecules(struct) orr_tensor = np.zeros((len(molecule_list),3,3)) for i,molecule_struct in enumerate(molecule_list): orr_tensor[i,:,:] = get_molecule_orientation(molecule_struct) return orr_tensor
20,293
def make_request_for_quotation(supplier_data=None): """ :param supplier_data: List containing supplier data """ supplier_data = supplier_data if supplier_data else get_supplier_data() rfq = frappe.new_doc('Request for Quotation') rfq.transaction_date = nowdate() rfq.status = 'Draft' rfq.company = '_Test Company' rfq.message_for_supplier = 'Please supply the specified items at the best possible rates.' for data in supplier_data: rfq.append('suppliers', data) rfq.append("items", { "item_code": "_Test Item", "description": "_Test Item", "uom": "_Test UOM", "qty": 5, "warehouse": "_Test Warehouse - _TC", "schedule_date": nowdate() }) rfq.submit() return rfq
20,294
def testModule(m): """ Test all the docstrings for classes/functions/methods in a module :param m: Module to test :type m: module """ for name,obj in inspect.getmembers(m): if inspect.isclass(obj): testClass(obj) elif inspect.isfunction(obj): testFunction(obj)
20,295
def mlrPredict(W, data): """ mlrObjFunction predicts the label of data given the data and parameter W of Logistic Regression Input: W: the matrix of weight of size (D + 1) x 10. Each column is the weight vector of a Logistic Regression classifier. X: the data matrix of size N x D Output: label: vector of size N x 1 representing the predicted label of corresponding feature vector given in data matrix """ label = np.zeros((data.shape[0], 1)) row = data.shape[0] ################## # YOUR CODE HERE # ################## # HINT: Do not forget to add the bias term to your input data # Adding biases biases = np.full((row,1),1) X = np.concatenate((biases,data), axis=1) t = np.sum(np.exp(np.dot(X,W)),axis=1) t = t.reshape(t.shape[0],1) theta_value = np.exp(np.dot(X,W))/t label = np.argmax(theta_value,axis=1) label = label.reshape(row,1) return label
20,296
def make_vehicle_random_drive(vehicles): """a thread make vehicle random drive 3s and autopilot 10s""" vehicle = vehicles[0] while True: time.sleep(10) logger.info('Start random drive...') vehicle.set_autopilot(False) steer = random.uniform(-0.2,0.2) throttle = random.uniform(0,1) vehicle.apply_control(carla.VehicleControl(throttle=throttle, steer=steer)) time.sleep(3) logger.info('Start autopilot...') vehicle.set_autopilot(True)
20,297
def write_urdb_rate_data(urdb_rate_data, urdb_filepath = './', overwrite_identical=True): """ Takes Pandas DataFrame containing URDB rate data and stores as .csv at urdb_filepath. The 'overwrite_identical' variable indicates whether 'urdb_rate_data' should be compared to previous URDB download and replace it if they are found to be identical. This avoids data duplication when it is unnecessary. Function returns True if data previously exists, False if it is unique, and None if the comparison was never performed (overwrite_identical == False). """ todays_date = helpers.todays_date() new_urdb_file = urdb_filepath+'usurdb_{}.csv'.format(todays_date) urdb_rate_data.to_csv(new_urdb_file, index=False) # Check if db has changed since last download if overwrite_identical: prev_urdb_files = glob.glob(urdb_filepath+'*.csv') if len(prev_urdb_files)>1: prev_urdb_dates = [fp.split('usurdb_')[1].split('.csv')[0] for fp in prev_urdb_files] prev_urdb_dates.remove(todays_date) most_recent_date = pd.Series(prev_urdb_dates).map(int).max() most_recent_urdb_file = urdb_filepath+'usurdb_{}.csv'.format(most_recent_date) if filecmp.cmp(new_urdb_file, most_recent_urdb_file, shallow=True): subprocess.run('rm {}'.format(most_recent_urdb_file), shell=True) prev_exists = True else: prev_exists = False else: prev_exists = False else: prev_exists = None return prev_exists
20,298
def get_batches_from_generator(iterable, n): """ Batch elements of an iterable into fixed-length chunks or blocks. """ it = iter(iterable) x = tuple(islice(it, n)) while x: yield x x = tuple(islice(it, n))
20,299