content
stringlengths
22
815k
id
int64
0
4.91M
def analyze(request): """ 利用soar分析SQL :param request: :return: """ text = request.POST.get('text') instance_name = request.POST.get('instance_name') db_name = request.POST.get('db_name') if not text: result = {"total": 0, "rows": []} else: soar = Soar() if instance_name != '' and db_name != '': soar_test_dsn = SysConfig().get('soar_test_dsn') # 获取实例连接信息 instance_info = Instance.objects.get(instance_name=instance_name) online_dsn = "{user}:{pwd}@{host}:{port}/{db}".format(user=instance_info.user, pwd=instance_info.raw_password, host=instance_info.host, port=instance_info.port, db=db_name) else: online_dsn = '' soar_test_dsn = '' args = {"report-type": "markdown", "query": '', "online-dsn": online_dsn, "test-dsn": soar_test_dsn, "allow-online-as-test": "false"} rows = generate_sql(text) for row in rows: args['query'] = row['sql'].replace('"', '\\"').replace('`', '').replace('\n', ' ') cmd_args = soar.generate_args2cmd(args=args, shell=True) stdout, stderr = soar.execute_cmd(cmd_args, shell=True).communicate() row['report'] = stdout if stdout else stderr result = {"total": len(rows), "rows": rows} return HttpResponse(json.dumps(result, cls=ExtendJSONEncoder, bigint_as_string=True), content_type='application/json')
15,700
def test_1_6_4_systemd_coredump_package(host): """ CIS Ubuntu 20.04 v1.0.0 - Rule # 1.6.4 Tests if systemd-coredump package is installed """ assert host.package('systemd-coredump').is_installed
15,701
def sin(c): """ sin(a+x)= sin(a) cos(x) + cos(a) sin(x) """ if not isinstance(c,pol): return math.sin(c) a0,p=c.separate(); lst=[math.sin(a0),math.cos(a0)] for n in range(2,c.order+1): lst.append( -lst[-2]/n/(n-1)) return phorner(lst,p)
15,702
def _calc_metadata() -> str: """ Build metadata MAY be denoted by appending a plus sign and a series of dot separated identifiers immediately following the patch or pre-release version. Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-]. """ if not is_appveyor: return "local-build" is_pr = PR_NUM in env assert (PR_NUM in env) == (PR_BRANCH in env) assert VER in env if is_pr: return "{VER}.pr{PR_NUM}-{PR_BRANCH}".format(**env) else: if env[BRANCH] != "master": # Shouldn't happen, since side branches are not built. return "{VER}.{BRANCH}".format(**env) else: return "{VER}".format(**env)
15,703
def authorization_required(func): """Returns 401 response if user is not logged-in when requesting URL with user ndb.Key in it or Returns 403 response if logged-in user's ndb.Key is different from ndb.Key given in requested URL. """ @functools.wraps(func) def decorated_function(*pa, **ka): # pylint: disable=missing-docstring if auth.is_authorized(ndb.Key(urlsafe=ka['key'])): return func(*pa, **ka) if not auth.is_logged_in(): return abort(401) return abort(403) return decorated_function
15,704
def pad_col(input, val=0, where='end'): """Addes a column of `val` at the start of end of `input`.""" if len(input.shape) != 2: raise ValueError(f"Only works for `phi` tensor that is 2-D.") pad = torch.zeros_like(input[:, :1]) if val != 0: pad = pad + val if where == 'end': return torch.cat([input, pad], dim=1) elif where == 'start': return torch.cat([pad, input], dim=1) raise ValueError(f"Need `where` to be 'start' or 'end', got {where}")
15,705
def seats_found_ignoring_floor(data: List[List[str]], row: int, col: int) -> int: """ Search each cardinal direction util we hit a wall or a seat. If a seat is hit, determine if it's occupied. """ total_seats_occupied = 0 cardinal_direction_operations = itertools.product([-1, 0, 1], repeat=2) for row_modifier, col_modifier in cardinal_direction_operations: if row_modifier or col_modifier: total_seats_occupied += next_seat_on_path_occupied( data, row, col, row_modifier, col_modifier ) return total_seats_occupied
15,706
def main(module, dry_run=False, *arguments): """Load module to run module: module path dry_run: only parse input arguments and print them arguments: arguments of the imported module """ module = load_module(module)
15,707
def test_set_sample(fake_session): """Set value should find the second AllowableFieldType.""" fake_fv = fake_session.FieldValue.load( { "id": 200, "child_item_id": None, "allowable_field_type_id": None, "allowable_field_type": None, "parent_class": "Operation", "role": "input", "object_type": None, "field_type": { "id": 100, "allowable_field_types": [ {"id": 1, "sample_type_id": 2}, {"id": 2, "sample_type_id": 3}, # should find this ], }, } ) fake_sample = fake_session.Sample.load( {"id": 300, "sample_type_id": 3, "sample_type": {"id": 3}} ) fake_fv.set_value(sample=fake_sample) assert fake_fv.allowable_field_type_id == 2 assert fake_fv.allowable_field_type.id == 2 assert fake_fv.child_sample_id == fake_sample.id assert fake_fv.sample == fake_sample
15,708
def product_design_space() -> ProductDesignSpace: """Build a ProductDesignSpace for testing.""" alpha = RealDescriptor('alpha', lower_bound=0, upper_bound=100, units="") beta = RealDescriptor('beta', lower_bound=0, upper_bound=100, units="") gamma = CategoricalDescriptor('gamma', categories=['a', 'b', 'c']) dimensions = [ ContinuousDimension(alpha, lower_bound=0, upper_bound=10), ContinuousDimension(beta, lower_bound=0, upper_bound=10), EnumeratedDimension(gamma, values=['a', 'c']) ] return ProductDesignSpace(name='my design space', description='does some things', dimensions=dimensions)
15,709
def compute_percents_of_labels(label): """ Compute the ratio/percentage size of the labels in an labeled image :param label: the labeled 2D image :type label: numpy.ndarray :return: An array of relative size of the labels in the image. Indices of the sizes in the array \ is corresponding to the labels in the labeled image. E.g. output [0.2, 0.5, 0.3] means label 0's size \ is 0.2 of the labeled image, label 1' size is 0.5 of the labeled image, and label 2's size is 0.3 of \ the labeled image. :rtype: numpy.ndarray """ # Get the bins of the histogram. Since the last bin of the histogram is [label, label+1] # We add 1 to the number of different labels in the labeled image when generating bins num_labels = np.arange(0, len(np.unique(label)) + 1) # Histogramize the label image and get the frequency array percent_of_dominance (percent_of_dominance, _) = np.histogram(label, bins=num_labels) # Convert the dtype of frequency array to float percent_of_dominance = percent_of_dominance.astype("float") # Normalized by the sum of frequencies (number of pixels in the labeled image) percent_of_dominance /= percent_of_dominance.sum() return percent_of_dominance
15,710
def removepara(H,M,Hmin = '1/2',Hmax = 'max',output=-1,kwlc={}): """ Retrieve lineal contribution to cycle and remove it from cycle. **H** y **M** corresponds to entire cycle (two branches). I.e. **H** starts and ends at the same value (or an aproximate value). El ciclo M vs H se separa en sus dos ramas. H1,M1 y H2,M2, defined by:: H1,M1: curva con dH/dt < 0. El campo decrece con el tiempo. H2,M2: curva con dH/dt > 0. El campo aumenta con el tiempo. Con la variable global FIGS = True shows intermediate states of proceso de determinarion y linear contribution removing. Figure Shows **Hmin** and **Hmax** positions in the cycle. output: kind of output, (0 or -1) out.params or (1) out. (v 0.210304) Note: output is set to -1 as default to achive backward compatibility. But it should be changed in future to 1. kwlc = dictionary with kwargs to be passed to lienar contribution. Returns: if output = -1: H1,M1,H2,M2,[pendiente,salto,desp] if output = 1: returns plain objtect with previous attributes and others. """ if PRINT: print('**********************************************************') print('removepara ') print('**********************************************************') if Hmax == 'max': Hmax = max(abs(H)) if Hmin == '1/2': Hmin = 0.5*max(abs(H)) H1,M1,H2,M2 = splitcycle(H,M) o1 = linealcontribution(H1,M1,[Hmax,Hmin],label='dH/dt < 0',output=output,**kwlc) o2 = linealcontribution(H2,M2,[Hmax,Hmin],label='dH/dt > 0',output=output,**kwlc) if output == 1: p1 = o1.params p2 = o2.params elif output == -1: p1 = o1 p2 = o2 Ms = (p1['Ms'].value + p2['Ms'].value)*0.5 if p1['Ms'].stderr == None or p2['Ms'].stderr == None: eMs = None else: eMs = (p1['Ms'].stderr + p2['Ms'].stderr)*0.5 # Fin de ajustes if PRINT: print('slope 1:',p1['Xi']) print('slope 2:',p2['Xi']) print('Ms 1 :',p1['Ms']) print('Ms 2 :',p2['Ms']) print('Ms :%s +/- %s'%(Ms,eMs)) print('offset 1 :',p1['offset']) print('offset 2 :',p2['offset']) print('a 1 :',p1['a']) print('a 2 :',p2['a']) print('b 1 :',p1['b']) print('b 2 :',p2['b']) # Armamos una pendiente promedio a partir de la obtenida para cada rama. # Corregimos ambas ramas eliminando esta pendiente. pend =(p1['Xi']+p2['Xi'])/2. salto=(p1['Ms']+p2['Ms'])/2. desp =(p1['offset']+p2['offset'])/2. M1 = (M1-H1*pend) M2 = (M2-H2*pend) if FIGS: __newfig__() pyp.plot(H1,M1,'b.-',label = 'dH/dt < 0') pyp.plot(H2,M2,'r.-',label = 'dH/dt > 0') pyp.axhline(salto,color = 'k', alpha =0.5) pyp.axhline(-salto,color= 'k', alpha =0.5) pyp.legend(loc=0) if output == 1: out = ReturnClass() out.H1 = H1 out.H2 = H2 out.M1 = M1 out.M2 = M2 out.pend = pend out.desp = desp out.salto = salto out.o1 = o1 out.o2 = o2 return out else: return H1,M1,H2,M2,[pend,salto,desp]
15,711
def flush(): """ Remove all mine contents of minion. :rtype: bool :return: True on success CLI Example: .. code-block:: bash salt '*' mine.flush """ if __opts__["file_client"] == "local": return __salt__["data.update"]("mine_cache", {}) load = { "cmd": "_mine_flush", "id": __opts__["id"], } return _mine_send(load, __opts__)
15,712
def polygon_to_shapely_polygon_wkt_compat(polygon): """ Convert a Polygon to its Shapely Polygon representation but with WKT compatible coordinates. """ shapely_points = [] for location in polygon.locations(): shapely_points.append(location_to_shapely_point_wkt_compat(location)) return shapely.geometry.Polygon(shapely.geometry.LineString(shapely_points))
15,713
def main(): """ 程序入口,完成初始化,定义神经网络结构,训练,打印等逻辑 Args: Return: """ # 初始化,设置是否使用gpu,trainer数量 paddle.init(use_gpu=False, trainer_count=1) # 配置网络结构和设置参数 cost, parameters, optimizer, feeding = network_config() # 记录成本cost costs = [] # 构造trainer,配置三个参数cost、parameters、update_equation,它们分别表示成本函数、参数和更新公式。 trainer = paddle.trainer.SGD( cost=cost, parameters=parameters, update_equation=optimizer) # 处理事件 def event_handler(event): """ 事件处理器,可以根据训练过程的信息作相应操作 Args: event: 事件对象,包含event.pass_id, event.batch_id, event.cost等信息 Return: """ if isinstance(event, paddle.event.EndIteration): if event.pass_id % 100 == 0: print "Pass %d, Batch %d, Cost %f" % ( event.pass_id, event.batch_id, event.cost) costs.append(event.cost) if isinstance(event, paddle.event.EndPass): result = trainer.test( reader=paddle.batch(test(), batch_size=2), feeding=feeding) print "Test %d, Cost %f" % (event.pass_id, result.cost) # 模型训练 # paddle.reader.shuffle(train(), buf_size=500): # 表示trainer从train()这个reader中读取了buf_size=500大小的数据并打乱顺序 # paddle.batch(reader(), batch_size=256): # 表示从打乱的数据中再取出batch_size=256大小的数据进行一次迭代训练 # feeding:用到了之前定义的feeding索引,将数据层x和y输入trainer # event_handler:事件管理机制,可以自定义event_handler,根据事件信息作相应的操作 # num_passes:定义训练的迭代次数 trainer.train( reader=paddle.batch( paddle.reader.shuffle(train(), buf_size=500), batch_size=256), feeding=feeding, event_handler=event_handler, num_passes=300) # 打印参数结果 print_parameters(parameters) # 展示学习曲线 plot_costs(costs)
15,714
def test_device_code_grant( requests_mock, oauth2client, token_endpoint, device_code, client_id, client_credential, public_jwk, client_auth_method_handler, device_code_grant_validator, public_app_auth_validator, client_secret_basic_auth_validator, client_secret_post_auth_validator, client_secret_jwt_auth_validator, private_key_jwt_auth_validator, ): """.device_code() sends a requests to the Token Endpoint using the Device Code grant.""" new_access_token = secrets.token_urlsafe() new_refresh_token = secrets.token_urlsafe() requests_mock.post( token_endpoint, json={ "access_token": new_access_token, "refresh_token": new_refresh_token, "token_type": "Bearer", "expires_in": 3600, }, ) token_resp = oauth2client.device_code(device_code) assert requests_mock.called_once assert not token_resp.is_expired() assert token_resp.access_token == new_access_token assert token_resp.refresh_token == new_refresh_token device_code_grant_validator(requests_mock.last_request, device_code=device_code) if client_auth_method_handler == PublicApp: public_app_auth_validator(requests_mock.last_request, client_id=client_id) elif client_auth_method_handler == ClientSecretPost: client_secret_post_auth_validator( requests_mock.last_request, client_id=client_id, client_secret=client_credential, ) elif client_auth_method_handler == ClientSecretBasic: client_secret_basic_auth_validator( requests_mock.last_request, client_id=client_id, client_secret=client_credential, ) elif client_auth_method_handler == ClientSecretJWT: client_secret_jwt_auth_validator( requests_mock.last_request, client_id=client_id, client_secret=client_credential, endpoint=token_endpoint, ) elif client_auth_method_handler == PrivateKeyJWT: private_key_jwt_auth_validator( requests_mock.last_request, client_id=client_id, endpoint=token_endpoint, public_jwk=public_jwk, )
15,715
def ccd_process(ccd, oscan=None, trim=None, error=False, masterbias=None, bad_pixel_mask=None, gain=None, rdnoise=None, oscan_median=True, oscan_model=None): """Perform basic processing on ccd data. The following steps can be included: * overscan correction * trimming of the image * create edeviation frame * gain correction * add a mask to the data * subtraction of master bias The task returns a processed `ccdproc.CCDData` object. Parameters ---------- ccd: `ccdproc.CCDData` Frame to be reduced oscan: None, str, or, `~ccdproc.ccddata.CCDData` For no overscan correction, set to None. Otherwise proivde a region of `ccd` from which the overscan is extracted, using the FITS conventions for index order and index start, or a slice from `ccd` that contains the overscan. trim: None or str For no trim correction, set to None. Otherwise proivde a region of `ccd` from which the image should be trimmed, using the FITS conventions for index order and index start. error: boolean If True, create an uncertainty array for ccd masterbias: None, `~numpy.ndarray`, or `~ccdproc.CCDData` A materbias frame to be subtracted from ccd. bad_pixel_mask: None or `~numpy.ndarray` A bad pixel mask for the data. The bad pixel mask should be in given such that bad pixels havea value of 1 and good pixels a value of 0. gain: None or `~astropy.Quantity` Gain value to multiple the image by to convert to electrons rdnoise: None or `~astropy.Quantity` Read noise for the observations. The read noise should be in `~astropy.units.electron` oscan_median : bool, optional If true, takes the median of each line. Otherwise, uses the mean oscan_model : `~astropy.modeling.Model`, optional Model to fit to the data. If None, returns the values calculated by the median or the mean. Returns ------- ccd: `ccdproc.CCDData` Reduded ccd """ # make a copy of the object nccd = ccd.copy() # apply the overscan correction if isinstance(oscan, ccdproc.CCDData): nccd = ccdproc.subtract_overscan(nccd, overscan=oscan, median=oscan_median, model=oscan_model) elif isinstance(oscan, six.string_types): nccd = ccdproc.subtract_overscan(nccd, fits_section=oscan, median=oscan_median, model=oscan_model) elif oscan is None: pass else: raise TypeError('oscan is not None, a string, or CCDData object') # apply the trim correction if isinstance(trim, six.string_types): nccd = ccdproc.trim_image(nccd, fits_section=trim) elif trim is None: pass else: raise TypeError('trim is not None or a string') # create the error frame if error and gain is not None and rdnoise is not None: nccd = ccdproc.create_deviation(nccd, gain=gain, rdnoise=rdnoise) elif error and (gain is None or rdnoise is None): raise ValueError( 'gain and rdnoise must be specified to create error frame') # apply the bad pixel mask if isinstance(bad_pixel_mask, np.ndarray): nccd.mask = bad_pixel_mask elif bad_pixel_mask is None: pass else: raise TypeError('bad_pixel_mask is not None or numpy.ndarray') # apply the gain correction if isinstance(gain, u.quantity.Quantity): nccd = ccdproc.gain_correct(nccd, gain) elif gain is None: pass else: raise TypeError('gain is not None or astropy.Quantity') # test subtracting the master bias if isinstance(masterbias, ccdproc.CCDData): nccd = nccd.subtract(masterbias) elif isinstance(masterbias, np.ndarray): nccd.data = nccd.data - masterbias elif masterbias is None: pass else: raise TypeError( 'masterbias is not None, numpy.ndarray, or a CCDData object') return nccd
15,716
def get_role_keyids(rolename): """ <Purpose> Return a list of the keyids associated with 'rolename'. Keyids are used as identifiers for keys (e.g., rsa key). A list of keyids are associated with each rolename. Signing a metadata file, such as 'root.json' (Root role), involves signing or verifying the file with a list of keys identified by keyid. <Arguments> rolename: An object representing the role's name, conformant to 'ROLENAME_SCHEMA' (e.g., 'root', 'snapshot', 'timestamp'). <Exceptions> tuf.FormatError, if 'rolename' does not have the correct object format. tuf.UnknownRoleError, if 'rolename' cannot be found in the role database. tuf.InvalidNameError, if 'rolename' is incorrectly formatted. <Side Effects> None. <Returns> A list of keyids. """ # Raises tuf.FormatError, tuf.UnknownRoleError, or tuf.InvalidNameError. _check_rolename(rolename) roleinfo = _roledb_dict[rolename] return roleinfo['keyids']
15,717
def _DX(X): """Computes the X finite derivarite along y and x. Arguments --------- X: (m, n, l) numpy array The data to derivate. Returns ------- tuple Tuple of length 2 (Dy(X), Dx(X)). Note ---- DX[0] which is derivate along y has shape (m-1, n, l). DX[1] which is derivate along x has shape (m, n-1, l). """ return (X[1:, :, :] - X[:-1, :, :], # D along y X[:, 1:, :] - X[:, 0:-1, :]) # D along x
15,718
def load_spectra_from_dataframe(df): """ :param df:pandas dataframe :return: """ total_flux = df.total_flux.values[0] spectrum_file = df.spectrum_filename.values[0] pink_stride = df.spectrum_stride.values[0] spec = load_spectra_file(spectrum_file, total_flux=total_flux, pinkstride=pink_stride, as_spectrum=True) return spec
15,719
def included_element(include_predicates, exclude_predicates, element): """Return whether an index element should be included.""" return (not any(evaluate_predicate(element, ep) for ep in exclude_predicates) and (include_predicates == [] or any(evaluate_predicate(element, ip) for ip in include_predicates)))
15,720
def ele_clear_input(context, selector=None, param2=None): """ Empty the selector element param1 and enter the value param2 :param context: step context :param selector: locator string for selector element (or None). :param param2: string to be input """ g_Context.step.ele_clear_input(context, selector, param2)
15,721
def tfm_setup(self:CameraProperties, more_setup:Callable[[CameraProperties],None] = None, dtype:Union[np.int32,np.float32] = np.int32): """Setup for transforms""" # for fast smile correction self.smiled_size = (np.ptp(self.settings["row_slice"]), self.settings["resolution"][1] - np.max(self.calibration["smile_shifts"]) ) self.line_buff = CircArrayBuffer(self.smiled_size, axis=0, dtype=dtype) # for collapsing spectral pixels into bands self.byte_sz = dtype(0).nbytes self.width = np.uint16(self.settings["fwhm_nm"]*self.settings["resolution"][1]/np.ptp(self.calibration["wavelengths_linear"])) self.bin_rows = np.ptp(self.settings["row_slice"]) self.bin_cols = self.settings["resolution"][1] - np.max(self.calibration["smile_shifts"]) self.reduced_shape = (self.bin_rows,self.bin_cols//self.width,self.width) # update the wavelengths for fast binning self.binned_wavelengths = self.calibration["wavelengths_linear"].astype(np.float32) self.binned_wavelengths = np.lib.stride_tricks.as_strided(self.binned_wavelengths, strides=(self.width*4,4), # assumed np.float32 shape=(len(self.binned_wavelengths)//self.width,self.width)) self.binned_wavelengths = np.around(self.binned_wavelengths.mean(axis=1),decimals=1) # update the wavelengths for slow binning n_bands = int(np.ptp(self.calibration["wavelengths"])//self.settings["fwhm_nm"]) # jump by `fwhm_nm` and find closest array index, then let the wavelengths be in the middle between jumps self.λs = np.around(np.array([np.min(self.calibration["wavelengths"]) + i*self.settings["fwhm_nm"] for i in range(n_bands+1)]),decimals=1) self.bin_idxs = [np.argmin(np.abs(self.calibration["wavelengths"]-λ)) for λ in self.λs] self.λs += self.settings["fwhm_nm"]//2 # self.bin_buff = CircArrayBuffer((np.ptp(self.settings["row_slice"]),n_bands), axis=1, dtype=dtype) # precompute some reference data for converting digital number to radiance self.nearest_exposure = self.calibration["rad_ref"].sel(exposure=self.settings["exposure_ms"],method="nearest").exposure # self.dark_current = np.array( self.settings["exposure_ms"]/self.nearest_exposure * \ self.calibration["rad_ref"].sel(exposure=self.nearest_exposure,luminance=0).isel(luminance=0) ) self.ref_luminance = np.array( self.settings["exposure_ms"]/self.nearest_exposure * \ self.calibration["rad_ref"].sel(exposure=self.nearest_exposure,luminance=self.settings["luminance"]) - \ self.dark_current ) self.spec_rad_ref = np.float32(self.calibration["sfit"](self.calibration["wavelengths"])) # prep for converting radiance to reflectance self.rad_6SV = np.float32(self.calibration["rad_fit"](self.calibration["wavelengths"])) if more_setup is not None: more_setup(self)
15,722
def _insertstatushints(x): """Insert hint nodes where status should be calculated (first path) This works in bottom-up way, summing up status names and inserting hint nodes at 'and' and 'or' as needed. Thus redundant hint nodes may be left. Returns (status-names, new-tree) at the given subtree, where status-names is a sum of status names referenced in the given subtree. """ if x is None: return (), x op = x[0] if op in {'string', 'symbol', 'kindpat'}: return (), x if op == 'not': h, t = _insertstatushints(x[1]) return h, (op, t) if op == 'and': ha, ta = _insertstatushints(x[1]) hb, tb = _insertstatushints(x[2]) hr = ha + hb if ha and hb: return hr, ('withstatus', (op, ta, tb), ('string', ' '.join(hr))) return hr, (op, ta, tb) if op == 'or': hs, ts = zip(*(_insertstatushints(y) for y in x[1:])) hr = sum(hs, ()) if sum(bool(h) for h in hs) > 1: return hr, ('withstatus', (op,) + ts, ('string', ' '.join(hr))) return hr, (op,) + ts if op == 'list': hs, ts = zip(*(_insertstatushints(y) for y in x[1:])) return sum(hs, ()), (op,) + ts if op == 'func': f = getsymbol(x[1]) # don't propagate 'ha' crossing a function boundary ha, ta = _insertstatushints(x[2]) if getattr(symbols.get(f), '_callstatus', False): return (f,), ('withstatus', (op, x[1], ta), ('string', f)) return (), (op, x[1], ta) raise error.ProgrammingError('invalid operator %r' % op)
15,723
def make_sine(freq: float, duration: float, sr=SAMPLE_RATE): """Return sine wave based on freq in Hz and duration in seconds""" N = int(duration * sr) # Number of samples return np.sin(np.pi*2.*freq*np.arange(N)/sr)
15,724
def _widget_abbrev(o): """Make widgets from abbreviations: single values, lists or tuples.""" float_or_int = (float, int) if isinstance(o, (list, tuple)): if o and all(isinstance(x, string_types) for x in o): return DropdownWidget(values=[unicode_type(k) for k in o]) elif _matches(o, (float_or_int, float_or_int)): min, max, value = _get_min_max_value(o[0], o[1]) if all(isinstance(_, int) for _ in o): cls = IntSliderWidget else: cls = FloatSliderWidget return cls(value=value, min=min, max=max) elif _matches(o, (float_or_int, float_or_int, float_or_int)): step = o[2] if step <= 0: raise ValueError("step must be >= 0, not %r" % step) min, max, value = _get_min_max_value(o[0], o[1], step=step) if all(isinstance(_, int) for _ in o): cls = IntSliderWidget else: cls = FloatSliderWidget return cls(value=value, min=min, max=max, step=step) else: return _widget_abbrev_single_value(o)
15,725
def test_default_max_reads(device, scaling): """ Test read_measured_value_buffer() without passing the "max_reads" parameter. """ result = device.read_measured_value_buffer(scaling) assert type(result) is Sfc5xxxReadBufferResponse assert result.scaling == scaling assert result.read_count >= 1 assert result.lost_values >= 0 assert result.remaining_values >= 0 assert result.sampling_time >= 0.0 assert len(result.values) >= 0
15,726
def get_conditions(): """ List of conditions """ return [ 'blinded', 'charmed', 'deafened', 'fatigued', 'frightened', 'grappled', 'incapacitated', 'invisible', 'paralyzed', 'petrified', 'poisoned', 'prone', 'restrained', 'stunned', 'unconscious', 'exhaustion' ]
15,727
def negative_predictive_value(y_true: np.array, y_score: np.array) -> float: """ Calculate the negative predictive value (duplicted in :func:`precision_score`). Args: y_true (array-like): An N x 1 array of ground truth values. y_score (array-like): An N x 1 array of predicted values. Returns: npv (float): The negative predictive value. """ tn = true_negative(y_true, y_score) fn = false_negative(y_true, y_score) npv = tn / (tn + fn) return npv
15,728
def flat_list(*alist): """ Flat a tuple, list, single value or list of list to flat list e.g. >>> flat_list(1,2,3) [1, 2, 3] >>> flat_list(1) [1] >>> flat_list([1,2,3]) [1, 2, 3] >>> flat_list([None]) [] """ a = [] for x in alist: if x is None: continue if isinstance(x, (tuple, list)): a.extend([i for i in x if i is not None]) else: a.append(x) return a
15,729
def do_regression(X_cols: List[str], y_col: str, df: pd.DataFrame, solver='liblinear', penalty='l1', C=0.2) -> LogisticRegression: """ Performs regression. :param X_cols: Independent variables. :param y_col: Dependent variable. :param df: Data frame. :param solver: Solver. Default is liblinear. :param penalty: Penalty. Default is ``l1``. :param C: Strength of regularlization. Default is ``0.2``. :return: Logistic regression model. """ X = df[X_cols] y = df[y_col] model = LogisticRegression(penalty=penalty, solver=solver, C=C) model.fit(X, y) return model
15,730
def describe_chap_credentials(TargetARN=None): """ Returns an array of Challenge-Handshake Authentication Protocol (CHAP) credentials information for a specified iSCSI target, one for each target-initiator pair. See also: AWS API Documentation Examples Returns an array of Challenge-Handshake Authentication Protocol (CHAP) credentials information for a specified iSCSI target, one for each target-initiator pair. Expected Output: :example: response = client.describe_chap_credentials( TargetARN='string' ) :type TargetARN: string :param TargetARN: [REQUIRED] The Amazon Resource Name (ARN) of the iSCSI volume target. Use the DescribeStorediSCSIVolumes operation to return to retrieve the TargetARN for specified VolumeARN. :rtype: dict :return: { 'ChapCredentials': [ { 'TargetARN': 'string', 'SecretToAuthenticateInitiator': 'string', 'InitiatorName': 'string', 'SecretToAuthenticateTarget': 'string' }, ] } """ pass
15,731
def gaussian_smooth(var, sigma): """Apply a filter, along the time dimension. Applies a gaussian filter to the data along the time dimension. if the time dimension is missing, raises an exception. The DataArray that is returned is shortened along the time dimension by sigma, half of sigma on each end. The width of the window is 2xsigma + 1. """ if type(var) is not xr.DataArray: raise TypeError("First argument must be an Xarray DataArray.") if 'time' not in var.dims: raise IndexError("Time coordinate not found.") # The convolution window must have the same number of dimensions as the # variable. The length of every dimension is one, except time, which is # 2xsigma + 1. var_dimensions = np.ones( len(var.coords), dtype=np.int ) timepos = var.dims.index('time') var_dimensions[timepos] = 2*sigma + 1 # Use a normalized gaussian so the average of the variable does not change. gausswin = gaussian(2*sigma + 1, sigma) gausswin = gausswin/np.sum(gausswin) # The window series used in the convolve operation is the gaussion for the # time dimension and a singleton zero for the other dimensions. This way # the multidimension covolve is: # # g(m,n,...) = \sum_k \sum_l ... f[k,l,...]h[k-m]\delta_l0... # timeslice_specification = [0 for x in range(len(var.coords))] timeslice_specification[timepos] = slice(None) win = np.zeros(var_dimensions) win[timeslice_specification] = gausswin # The third parameter 'same' specifies a return array of the same shape as # var. out = convolve(var, win, 'same') outda = xr.DataArray(out, name=var.name, coords=var.coords, dims=var.dims) outda.attrs = var.attrs # # Append "(Gaussian filtered: sigma = ###" to the end of th variable name. # newname = "{0} (Gaussian filtered: sigma = {1})".format(var.name, sigma) # outda.name = newname return outda
15,732
def make_ood_dataset(ood_dataset_cls: _BaseDatasetClass) -> _BaseDatasetClass: """Generate a BaseDataset with in/out distribution labels.""" class _OodBaseDataset(ood_dataset_cls): """Combine two datasets to form one with in/out of distribution labels.""" def __init__( self, in_distribution_dataset: BaseDataset, shuffle_datasets: bool = False, **kwargs): super().__init__(**kwargs) # This should be the builder for whatever split will be considered # in-distribution (usually the test split). self._in_distribution_dataset = in_distribution_dataset self._shuffle_datasets = shuffle_datasets def load(self, *, preprocess_fn=None, batch_size: int = -1) -> tf.data.Dataset: # Set up the in-distribution dataset using the provided dataset builder. if preprocess_fn: dataset_preprocess_fn = preprocess_fn else: dataset_preprocess_fn = ( self._in_distribution_dataset._create_process_example_fn()) # pylint: disable=protected-access dataset_preprocess_fn = ops.compose( dataset_preprocess_fn, _create_ood_label_fn(True)) dataset = self._in_distribution_dataset.load( preprocess_fn=dataset_preprocess_fn, batch_size=batch_size) # Set up the OOD dataset using this class. if preprocess_fn: ood_dataset_preprocess_fn = preprocess_fn else: ood_dataset_preprocess_fn = super()._create_process_example_fn() ood_dataset_preprocess_fn = ops.compose( ood_dataset_preprocess_fn, _create_ood_label_fn(False)) ood_dataset = super().load( preprocess_fn=ood_dataset_preprocess_fn, batch_size=batch_size) # We keep the fingerprint id in both dataset and ood_dataset # Combine the two datasets. try: combined_dataset = dataset.concatenate(ood_dataset) except TypeError: logging.info( 'Two datasets have different types, concat feature and label only') def clean_keys(example): # only keep features and labels, remove the rest return { 'features': example['features'], 'labels': example['labels'], 'is_in_distribution': example['is_in_distribution'] } combined_dataset = dataset.map(clean_keys).concatenate( ood_dataset.map(clean_keys)) if self._shuffle_datasets: combined_dataset = combined_dataset.shuffle(self._shuffle_buffer_size) return combined_dataset @property def num_examples(self): return ( self._in_distribution_dataset.num_examples + super().num_examples) return _OodBaseDataset
15,733
def _title_case(value): """ Return the title of the string but the first letter is affected. """ return value[0].upper() + value[1:]
15,734
def test_vector_laplace_cart(ndim): """test different vector laplace operators""" bcs = _get_random_grid_bcs(ndim, dx="uniform", periodic="random", rank=1) print(bcs) field = VectorField.random_uniform(bcs.grid) res1 = field.laplace(bcs, backend="scipy").data res2 = field.laplace(bcs, backend="numba").data assert res1.shape == (ndim,) + bcs.grid.shape np.testing.assert_allclose(res1, res2)
15,735
def test_json_schema(runner): """Tests that the json schema is in sync with this code.""" schema_dir = os.path.dirname(os.path.realpath(__file__)) fname = os.path.join(schema_dir, f'schemas/pxm-manifest-{version}.json') with open(fname) as f: schema = json.load(f) result = runner.invoke(create_manifest, manifest_args) doc = json.loads(result.output) assert result.exit_code == 0 # if an exception is raised by validate then the test fails validate(doc, schema, format_checker=FormatChecker())
15,736
def zoom_api_call(user, verb, url, *args, **kwargs): """ Perform an API call to Zoom with various checks. If the call returns a token expired event, refresh the token and try the call one more time. """ if not settings.SOCIAL_AUTH_ZOOM_OAUTH2_KEY: raise DRFValidationError( "Server is not configured with Zoom OAuth2 credentials." ) if not user.is_authenticated: raise DRFValidationError("You are not authenticated.") social = user.social_auth.filter(provider="zoom-oauth2").first() if social is None: raise DRFValidationError("You have not linked your Zoom account yet.") is_retry = "retry" in kwargs if is_retry: del kwargs["retry"] out = requests.request( verb, url.format(uid=social.uid), *args, headers={"Authorization": f"Bearer {social.get_access_token(load_strategy())}"}, **kwargs, ) if out.status_code == 204: return out # check for token expired event data = out.json() if data.get("code") == 124 and not is_retry: social.refresh_token(load_strategy()) kwargs["retry"] = True return zoom_api_call(user, verb, url, *args, **kwargs) return out
15,737
def update_search_grammar(extra_consts, in_file, out_file): """let the user to provide constants to the synthesis target grammar.""" current_grammar = None with open(in_file, "r") as f: current_grammar = f.read() if extra_consts: consts = "enum SmallStr {{\n {0} \n}}".format(",".join(['"{}"'.format(x) for x in extra_consts])) extra = ''' func mutateCustom: Table r -> Table a, BoolFunc b, ColInt c, SmallStr d { row(r) == row(a); col(r) == col(a) + 1; } func filter: Table r -> Table a, BoolFunc b, ColInt c, SmallStr d { row(r) < row(a); col(r) == col(a); }''' new_grammar = consts + "\n" + current_grammar + "\n" + extra else: new_grammar = current_grammar with open(out_file, "w") as g: g.write(new_grammar)
15,738
def copy_javascript(name): """Return the contents of javascript resource file.""" # TODO use importlib_resources to access javascript file content folder = os.path.join(os.path.dirname(os.path.realpath(__file__)), "js") with open(os.path.join(folder, name + ".js")) as fobj: content = fobj.read() return content
15,739
def addHtmlImgTagExtension(notionPyRendererCls): """A decorator that add the image tag extension to the argument list. The decorator pattern allows us to chain multiple extensions. For example, we can create a renderer with extension A, B, C by writing: addAExtension(addBExtension(addCExtension(notionPyRendererCls))) """ def newNotionPyRendererCls(*extraExtensions): new_extension = [HTMLBlock, HTMLSpan] return notionPyRendererCls(*chain(new_extension, extraExtensions)) return newNotionPyRendererCls
15,740
def mechaber(mechaber_name): """Route function for visualizing and exploring Mechabrim.""" mechaber = Mechaber.query.filter_by(mechaber_name=mechaber_name).first_or_404() # page = request.args.get("page", 1, type=int) # mekorot = sefer.mekorot.order_by(Makor.ref).paginate( # page, current_app.config["ELEMENTS_PER_PAGE"], False # ) # next_url = ( # url_for("main.sefer", sefername=sefer.name(), page=mekorot.next_num) # if mekorot.has_next # else None # ) # prev_url = ( # url_for("main.sefer", sefername=sefer.name(), page=mekorot.prev_num) # if mekorot.has_prev # else None # ) # return render_template('elements/mechaber.html', mechaber=mechaber) return render_template("todo.html", mechaber=mechaber)
15,741
def get_symmetry_projectors(character_table, conjugacy_classes, print_results=False): """ :param character_table: each row gives the characters of a different irreducible rep. Each column corresponds to a different conjugacy classes :param conjugacy_classes: List of lists of conjugacy class elements :param print_results: :return projs: """ if not validate_char_table(character_table, conjugacy_classes): raise Exception("invalid character table/conjugacy class combination") # columns (or rows, since orthogonal mat) represent basis states that can be transformed into one another by symmetries states_related_by_symm = sum([sum([np.abs(g) for g in cc]) for cc in conjugacy_classes]) # only need sums over conjugacy classes to build projectors class_sums = [sum(cc) for cc in conjugacy_classes] projs = [reduce_symm_projector( sum([np.conj(ch) * cs for ch, cs in zip(chars, class_sums)]), chars[0], states_related_by_symm, print_results=print_results) for chars in character_table] # test projector size proj_to_dims = np.asarray([p.shape[0] for p in projs]).sum() proj_from_dims = projs[0].shape[1] if proj_to_dims != proj_from_dims: raise Exception("total span of all projectors was %d, but expected %d." % (proj_to_dims, proj_from_dims)) return projs
15,742
def check_market_open(data, sheet, row): """Exits program if the date from the webpage is the same as the last entry in the spreadsheet :param data: dict (str:str) Contains values scraped from website with keys matching the column titles which are found in the titles list :param sheet: dict (str:any) Contains the details of every cell in the spreadsheet, can call '.value' to get specific cell's contents :param row: int Row number of the first empty row from the top """ if sheet.cell(row=row - 1, column=1).value == data['date']: print('Markets are closed today. No update.') exit()
15,743
def is_permutation_matrix(matrix: List[List[bool]]) -> bool: """Returns whether the given boolean matrix is a permutation matrix.""" return (all(sum(v) == 1 for v in matrix) and sum(any(v) for v in matrix) == len(matrix))
15,744
def DPT_Hybrid(pretrained=True, **kwargs): """ # This docstring shows up in hub.help() MiDaS DPT-Hybrid model for monocular depth estimation pretrained (bool): load pretrained weights into model """ model = DPTDepthModel( path=None, backbone="vitb_rn50_384", non_negative=True, ) if pretrained: checkpoint = ( "https://github.com/intel-isl/MiDaS/releases/download/v3/dpt_hybrid-midas-501f0c75.pt" ) state_dict = torch.hub.load_state_dict_from_url( checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True ) model.load_state_dict(state_dict) return model
15,745
def show_counts(input_dict): """Format dictionary count information into a string Args: input_dict (dictionary): input keys and their counts Return: string: formatted output string """ out_s = '' in_dict_sorted = {k: v for k, v in sorted(input_dict.items(), key=lambda item: item[1], reverse=True)} for idx, (k, v) in enumerate(in_dict_sorted.items()): out_s += '\t{}:\t{} ({})\n'.format(idx, k, v) out_s += '\n' return out_s
15,746
def encipher_railfence(message,rails): """ Performs Railfence Encryption on plaintext and returns ciphertext Examples ======== >>> from sympy.crypto.crypto import encipher_railfence >>> message = "hello world" >>> encipher_railfence(message,3) 'horel ollwd' Parameters ========== message : string, the message to encrypt. rails : int, the number of rails. Returns ======= The Encrypted string message. References ========== .. [1] https://en.wikipedia.org/wiki/Rail_fence_cipher """ r = list(range(rails)) p = cycle(r + r[-2:0:-1]) return ''.join(sorted(message, key=lambda i: next(p)))
15,747
def format_signature(name: str, signature: inspect.Signature) -> str: """Formats a function signature as if it were source code. Does not yet handle / and * markers. """ params = ', '.join( format_parameter(arg) for arg in signature.parameters.values()) if signature.return_annotation is signature.empty: return_annotation = '' else: return_annotation = ' -> ' + _annotation_name( signature.return_annotation) return f'{name}({params}){return_annotation}'
15,748
def extract_ratios_from_ddf(ddf): """The same as the df version, but works with dask dataframes instead.""" # we basicaly abuse map_partition's ability to expand indexes for lack of a working # groupby(level) in dask return ddf.map_partitions(extract_ratios_from_df, meta={'path': str, 'ratio': str, 'url': str}).clear_divisions()
15,749
def check_if_prime(number): """checks if number is prime Args: number (int): Raises: TypeError: if number of type float Returns: [bool]: if number prime returns ,True else returns False """ if type(number) == float: raise TypeError("TypeError: entered float type") if number > 1 : for i in range( 2, int(number / 2) + 1 ): if number % i == 0: return False return True else: return False
15,750
def get_signatures() -> {}: """ Helper method used to identify the valid arguments that can be passed to any of the pandas IO functions used by the program :return: Returns a dictionary containing the available arguments for each pandas IO method """ # Creates an empty dictionary to collect the function names and signatures sigreturn = {} # Loops over the functions that are used for IO operations for io in PANDAS_IO: # Gets the name of the function in question funcname = io.__name__ # Gets the list of arguments that the function can take args = list(inspect.signature(io).parameters.keys()) # Adds the arguments to the dictionary with the function name as the key sigreturn[funcname] = args # Returns the dictionary object return sigreturn
15,751
def test_neoxargs_load_arguments_6_7B_local_setup(): """ verify 6-7B.yml can be loaded without raising validation errors """ run_neox_args_load_test(["6-7B.yml", "local_setup.yml"])
15,752
def _load_flags(): """Load flag definitions. It will first attempt to load the file at TINYFLAGS environment variable. If that does not exist, it will then load the default flags file bundled with this library. :returns list: Flag definitions to use. """ path = os.getenv('TINYFLAGS') if path and os.path.exists(path) and not os.path.isdir(path): try: with open(path, 'r') as f: return json.load(f) except: pass return [] # with open(resource_filename('tinyenv', 'config/flags.json'), 'r') as f: # return json.load(f)
15,753
def _get_indentation_option(explicit: Optional[Union[str, int]] = None) -> Optional[str]: """Get the value for the ``indentation`` option. Args: explicit (Optional[Union[str, int]]): the value explicitly specified by user, :data:`None` if not specified Returns: Optional[str]: the value for the ``indentation`` option; :data:`None` means *auto detection* at runtime :Environment Variables: :envvar:`F2FORMAT_INDENTATION` -- the value in environment variable See Also: :data:`_default_indentation` """ return parse_indentation(explicit or os.getenv('F2FORMAT_INDENTATION') or _default_indentation)
15,754
def batch_answer_same_context(questions: List[str], context: str) -> List[str]: """Answers the questions with the given context. :param questions: The questions to answer. :type questions: List[str] :param context: The context to answer the questions with. :type context: str :return: The answers. :rtype: List[str] """ return _batch_answer_same_context[get_mode()](questions, context)
15,755
def test_container_count(dockerc): """Verify the test composition and container.""" # stopped parameter allows non-running containers in results assert ( len(dockerc.containers(stopped=True)) == 1 ), "Wrong number of containers were started."
15,756
def complex_multiplication(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: """ Multiplies two complex-valued tensors. Assumes the tensor has a named dimension "complex". Parameters ---------- x : torch.Tensor Input data y : torch.Tensor Input data Returns ------- torch.Tensor """ assert_complex(x, enforce_named=True, complex_last=True) assert_complex(y, enforce_named=True, complex_last=True) # multiplication = torch.view_as_complex(x.rename(None)) * torch.view_as_complex( # y.rename(None) # ) # return torch.view_as_real(multiplication).refine_names(*x.names) # TODO: Unsqueezing is not yet supported for named tensors, fix when it is. complex_index = x.names.index("complex") real_part = x.select("complex", 0) * y.select("complex", 0) - x.select("complex", 1) * y.select("complex", 1) imaginary_part = x.select("complex", 0) * y.select("complex", 1) + x.select("complex", 1) * y.select("complex", 0) real_part = real_part.rename(None) imaginary_part = imaginary_part.rename(None) multiplication = torch.cat( [ real_part.unsqueeze(dim=complex_index), imaginary_part.unsqueeze(dim=complex_index), ], dim=complex_index, ) return multiplication.refine_names(*x.names)
15,757
def dynamic_embedding_lookup(keys: tf.Tensor, config: de_config_pb2.DynamicEmbeddingConfig, var_name: typing.Text, service_address: typing.Text = "", skip_gradient_update: bool = False, timeout_ms: int = -1) -> tf.Tensor: """Returns the embeddings of from given keys. Args: keys: A string `Tensor` of shape [batch_size] or [batch_size, max_sequence_length] where an empty string would be mapped to an all zero embedding. config: A DynamicEmbeddingConfig proto that configures the embedding. var_name: A unique name for the given embedding. service_address: The address of a knowledge bank service. If empty, the value passed from --kbs_address flag will be used instead. skip_gradient_update: A boolean indicating if gradient update is needed. timeout_ms: Timeout millseconds for the connection. If negative, never timout. Returns: A `Tensor` of shape with one of below: - [batch_size, config.embedding_dimension] if the input Tensor is 1D, or - [batch_size, max_sequence_length, config.embedding_dimension] if the input is 2D. Raises: ValueError: If name is not specified. """ if not var_name: raise ValueError("Must specify a valid var_name.") # If skip_gradient_update is true, reate a dummy variable so that the # gradients can be passed in. if skip_gradient_update: grad_placeholder = tf.constant(0.0) else: grad_placeholder = tf.Variable(0.0) context.add_to_collection(var_name, config) resource = gen_carls_ops.dynamic_embedding_manager_resource( config.SerializeToString(), var_name, service_address, timeout_ms) return gen_carls_ops.dynamic_embedding_lookup(keys, grad_placeholder, resource, config.embedding_dimension)
15,758
def register_unary_op(registered_name, operation): """Creates a `Transform` that wraps a unary tensorflow operation. If `registered_name` is specified, the `Transform` is registered as a member function of `Series`. Args: registered_name: the name of the member function of `Series` corresponding to the returned `Transform`. operation: a unary TensorFlow operation. """ doc = DOC_FORMAT_STRING.format(operation.__name__, operation.__doc__) @property def name(self): return operation.__name__ @property def input_valency(self): return 1 @property def _output_names(self): return "output" def _apply_transform(self, input_tensors): input_tensor = input_tensors[0] if isinstance(input_tensor, ops.SparseTensor): result = ops.SparseTensor(input_tensor.indices, operation(input_tensor.values), input_tensor.shape) else: result = operation(input_tensor) # pylint: disable=not-callable return self.return_type(result) cls = type(operation.__name__, (transform.Transform,), {"name": name, "__doc__": doc, "input_valency": input_valency, "_output_names": _output_names, "_apply_transform": _apply_transform}) series.Series.register_unary_op(registered_name)(cls)
15,759
def add_climatology(data, clim): """Add 12-month climatology to a data array with more times. Suppose you have anomalies data and you want to add back its climatology to it. In this sense, this function does the opposite of `get_anomalies`. Though in this case there is no way to obtain the climatology so it has to be provided. Parameters ---------- data: xarray.DataArray Input must have a named `time` coordinate. clim: xarray.DataArray The climatology must have the same spatial dimensions as `data`. Naturally, the time dimension can differ. The values of this array will be replicated as many times as `data` has. Returns ------- xarray.DataArray with both fields added. """ # noqa # make sure shapes are correct ddims = len(data.dims) cdims = len(clim.dims) if ddims != cdims: msg = 'both data arrays must have same dimensions' raise ValueError(msg) # get number of years in dataarray years = np.unique(data.time.dt.year) nyear = years.size # get tiled shape tshape = np.ones(ddims, dtype=int) tshape[0] = nyear # create tiled climatology tclim = np.tile(clim.values, tshape) # add climatology to data array new = data.copy() new.values = np.array(data.values) + tclim return new
15,760
def broadcast(name, message): """ Send a message to all users from the given name. """ print message for to_name, conn in users.items(): if to_name != name: try: conn.send(message + "\n") except socket.error: pass
15,761
def test(session): """Run tests.""" tests = session.posargs or [TESTS_DIR] session.install("-v", ".[tests]", silent=True) session.run("python", "-m", "coverage", "erase") session.run( "python", "-m", "pytest", "--numprocesses=auto", "--cov", PACKAGE_DIR, "--cov-append", "--cov-report=", *tests, )
15,762
def already_exists(statement: str, lines: List[str]) -> bool: """ Check if statement is in lines """ return any(statement in line.strip() for line in lines)
15,763
def uniform(lower_list, upper_list, dimensions): """Fill array """ if hasattr(lower_list, '__iter__'): return [random.uniform(lower, upper) for lower, upper in zip(lower_list, upper_list)] else: return [random.uniform(lower_list, upper_list) for _ in range(dimensions)]
15,764
def prepare_data(files, voxel_size, device='cuda'): """ Loads the data and prepares the input for the pairwise registration demo. Args: files (list): paths to the point cloud files """ feats = [] xyz = [] coords = [] n_pts = [] for pc_file in files: pcd0 = o3d.io.read_point_cloud(pc_file) xyz0 = np.array(pcd0.points) # Voxelization sel0 = ME.utils.sparse_quantize(xyz0 / voxel_size, return_index=True) # Make point clouds using voxelized points xyz0 = xyz0[sel0[1],:] # Get features npts0 = xyz0.shape[0] xyz.append(to_tensor(xyz0)) n_pts.append(npts0) feats.append(np.ones((npts0, 1))) coords.append(np.floor(xyz0 / voxel_size)) coords_batch0, feats_batch0 = ME.utils.sparse_collate(coords, feats) data = {'pcd0': torch.cat(xyz, 0).float(), 'sinput0_C': coords_batch0, 'sinput0_F': feats_batch0.float(), 'pts_list': torch.tensor(n_pts)} return data
15,765
def test_select_via_env_var_implicit(env_var, tmpdir): """Config file selection can leverage default environmanent variables.""" conf_file = tmpdir.join("test-refgenconf-conf.yaml").strpath assert not os.path.exists(conf_file) with open(conf_file, "w"): pass assert os.path.isfile(conf_file) with TmpEnv(overwrite=True, **{env_var: conf_file}): assert conf_file == select_genome_config(None)
15,766
def save_model(model, model_filepath): """ Function: Save a pickle file of the model Input: model: the classification model model_filepath (str): the path of pickle file """ with open(model_filepath, 'wb') as f: pickle.dump(model, f)
15,767
def reshape(box, new_size): """ box: (N, 4) in y1x1y2x2 format new_size: (N, 2) stack of (h, w) """ box[:, :2] = new_size * box[:, :2] box[:, 2:] = new_size * box[:, 2:] return box
15,768
def print_voxels_size(path: Path): """ Prints size of voxels in millimeters :param path: path to folder containing masks :return: """ for scan_path in path.iterdir(): if scan_path.name.endswith('mask.nii.gz'): print(nib.load(str(scan_path)).header.get_zooms())
15,769
def sort_actions(request): """Sorts actions after drag 'n drop. """ action_list = request.POST.get("objs", "").split('&') if len(action_list) > 0: pos = 10 for action_str in action_list: action_id = action_str.split('=')[1] action_obj = Action.objects.get(pk=action_id) action_obj.position = pos action_obj.save() pos = pos + 10 result = json.dumps({ "message": _(u"The actions have been sorted."), }, cls=LazyEncoder) return HttpResponse(result, content_type='application/json')
15,770
def fetch_file(parsed_url, config): """ Fetch a file from Github. """ if parsed_url.scheme != 'github': raise ValueError(f'URL scheme must be "github" but is "{parsed_url.github}"') ghcfg = config.get('github') if not ghcfg: raise BuildRunnerConfigurationError('Missing configuration for github in buildrunner.yaml') nlcfg = ghcfg.get(parsed_url.netloc) if not nlcfg: gh_cfgs = ', '.join(ghcfg.keys()) raise BuildRunnerConfigurationError( f'Missing github configuration for {parsed_url.netloc} in buildrunner.yaml' f' - known github configurations: {gh_cfgs}' ) ver = nlcfg.get('version') # NOTE: potentially the v3_fetch_file() works for other github API versions. if ver == 'v3': contents = v3_fetch_file(parsed_url, nlcfg) else: raise NotImplementedError(f'No version support for github API version {ver}') return contents
15,771
def _build_and_test_cobalt_locally(git_revision): """ Assumes that the current working directory is a Cobalt repo. Checks out Cobalt at the given |git_revision| and then builds and tests Cobalt. Throws an exception if any step fails. """ subprocess.check_call(['git', 'checkout', git_revision]) _cobaltb('setup') _cobaltb('clean', '--full') _cobaltb('build') _cobaltb('test')
15,772
def number_of_days(year: int, month: int) -> int: """ Gets the number of days in a given year and month :param year: :type year: :param month: :type month: :return: :rtype: """ assert isinstance(year, int) and 0 <= year assert isinstance(month, int) and 0 < month <= 12 c = calendar.Calendar() days = c.itermonthdays(year, month) days = set(days) days.remove(0) return len(days)
15,773
def safe_decode(text, incoming=None, errors='strict'): """Decodes incoming str using `incoming` if they're not already unicode. :param incoming: Text's current encoding :param errors: Errors handling policy. See here for valid values http://docs.python.org/2/library/codecs.html :returns: text or a unicode `incoming` encoded representation of it. :raises TypeError: If text is not an isntance of str """ if not isinstance(text, six.string_types): raise TypeError("%s can't be decoded" % type(text)) if isinstance(text, six.text_type): return text if not incoming: incoming = (sys.stdin.encoding or sys.getdefaultencoding()) try: return text.decode(incoming, errors) except UnicodeDecodeError: # Note(flaper87) If we get here, it means that # sys.stdin.encoding / sys.getdefaultencoding # didn't return a suitable encoding to decode # text. This happens mostly when global LANG # var is not set correctly and there's no # default encoding. In this case, most likely # python will use ASCII or ANSI encoders as # default encodings but they won't be capable # of decoding non-ASCII characters. # # Also, UTF-8 is being used since it's an ASCII # extension. return text.decode('utf-8', errors)
15,774
def medstddev(data, mask=None, medi=False, axis=0): """ This function computes the stddev of an n-dimensional ndarray with respect to the median along a given axis. Parameters: ----------- data: ndarray A n dimensional array frmo wich caculate the median standar deviation. mask: ndarray Mask indicating the good and bad values of data. medi: boolean If True return a tuple with (stddev, median) of data. axis: int The axis along wich the median std deviation is calculated. Examples: -------- >>> import medstddev as m >>> b = np.array([[1, 3, 4, 5, 6, 7, 7], [4, 3, 4, 15, 6, 17, 7], [9, 8, 7, 6, 5, 4, 3]]) >>> c = np.array([b, 1-b, 2+b]) >>> std, med = m.medstddev(c, medi=True, axis=2) >>> print(median(c, axis=2)) [[ 5. 6. 6.] [-4. -5. -5.] [ 7. 8. 8.]] >>> print(med) [[ 5. 6. 6.] [-4. -5. -5.] [ 7. 8. 8.]] >>> print(std) [[ 2.23606798 6.05530071 2.1602469 ] [ 2.23606798 6.05530071 2.1602469 ] [ 2.23606798 6.05530071 2.1602469 ]] >>> # take a look at the first element of std >>> d = c[0,0,:] >>> print(d) [1, 3, 4, 5, 6, 7, 7] >>> print(m.medstddev1d(d)) 2.2360679775 >>> # See medstddev1d for masked examples Modification history: --------------------- 2010-11-05 patricio Written by Patricio Cubillos pcubillos@fulbrightmail.org """ # flag to return median value retmed = medi # get shape shape = np.shape(data) # default mask, all good. if mask is None: mask = np.ones(shape) # base case: 1D if len(shape) == 1: return medstddev1d(data, mask, retmed) newshape = np.delete(shape, axis) # results std = np.zeros(newshape) medi = np.zeros(newshape) # reduce dimensions until 1D case reduce(medstddev1d, data, mask, std, medi, axis) # return statement: if retmed: return (std, medi) return std
15,775
def load_npz(filename: FileLike) -> JaggedArray: """ Load a jagged array in numpy's `npz` format from disk. Args: filename: The file to read. See Also: save_npz """ with np.load(filename) as f: try: data = f["data"] shape = f["shape"] return JaggedArray(data, shape) except KeyError: msg = "The file {!r} does not contain a valid jagged array".format(filename) raise RuntimeError(msg)
15,776
def _egg_link_name(raw_name: str) -> str: """ Convert a Name metadata value to a .egg-link name, by applying the same substitution as pkg_resources's safe_name function. Note: we cannot use canonicalize_name because it has a different logic. """ return re.sub("[^A-Za-z0-9.]+", "-", raw_name) + ".egg-link"
15,777
def my_view(request): """Displays info details from nabuco user""" owner, c = User.objects.get_or_create(username='nabuco') # Owner of the object has full permissions, otherwise check RBAC if request.user != owner: # Get roles roles = get_user_roles(request.user, owner) # Get operation op, c = RBACOperation.objects.get_or_create(name='display') # Per-model permission: # Has user permission to display groups that nabuco belongs to? if not RBACGenericPermission.objects.get_permission(owner, Group, op, roles): return HttpResponseForbidden("Sorry, you are not allowed to see nabuco groups") # Per-object permission: # Has user permission to see this group which nabuco belong to? group_inst = get_object_or_404(Group, name='punks') if not RBACPermission.objects.get_permission(owner, owner, op, roles): return HttpResponseForbidden("Sorry, you are not allowed to see this group details") return render_to_response("base.html", {'owner': owner, 'model': Group, 'model_inst': owner, 'operation': op, 'roles': roles}, context_instance=RequestContext(request))
15,778
def handler500(request): """ Custom 500 view :param request: :return: """ return server_error(request, template_name='base/500.html')
15,779
def get_badpixel_mask(shape, bins): """Get the mask of bad pixels and columns. Args: shape (tuple): Shape of image. bins (tuple): CCD bins. Returns: :class:`numpy.ndarray`: 2D binary mask, where bad pixels are marked with *True*, others *False*. The bad pixels are found *empirically*. """ mask = np.zeros(shape, dtype=np.bool) if bins == (1, 1) and shape == (4136, 4096): ny, nx = shape mask[349:352, 627:630] = True mask[349:ny//2, 628] = True mask[1604:ny//2, 2452] = True mask[280:284,3701] = True mask[274:ny//2, 3702] = True mask[272:ny//2, 3703] = True mask[274:282, 3704] = True mask[1720:1722, 3532:3535] = True mask[1720, 3535] = True mask[1722, 3532] = True mask[1720:ny//2,3533] = True mask[347:349, 4082:4084] = True mask[347:ny//2,4083] = True mask[ny//2:2631, 1909] = True else: print('No bad pixel information for this CCD size.') raise ValueError return mask
15,780
def maTotalObjectMemory(): """__NATIVE__ /* Wrapper generated for: */ /* int maTotalObjectMemory(void); */ PmReturn_t retval = PM_RET_OK; int func_retval; pPmObj_t p_func_retval = C_NULL; /* If wrong number of args, raise TypeError */ if (NATIVE_GET_NUM_ARGS() != 0) { PM_RAISE(retval, PM_RET_EX_TYPE); return retval; } func_retval = maTotalObjectMemory(); retval = int_new(func_retval, &p_func_retval); NATIVE_SET_TOS(p_func_retval); return retval; """ pass
15,781
def main(argv=None): """Entry point for the CLI interface """ parser = argparse.ArgumentParser() group = parser.add_mutually_exclusive_group(required=True) group.add_argument("--fullness", nargs='?', const="", type=str, help="path to lfs_df text file; summarize fullness of OSTs ") group.add_argument("--failure", nargs='?', const="", type=str, help="path to ost_map text file; summarize failure state of OSSes and OSTs") parser.add_argument("-o", "--output", type=str, default=None, help="output file") parser.add_argument("filesystem", help="logical file system name (e.g., cscratch)") parser.add_argument("datetime", help="date and time of interest in YYYY-MM-DDTHH:MM:SS format") args = parser.parse_args(argv) target_datetime = datetime.datetime.strptime(args.datetime, "%Y-%m-%dT%H:%M:%S") if args.failure is not None: results = lfsstatus.get_failures( args.filesystem, target_datetime, cache_file=args.failure if args.failure != "" else None) elif args.fullness is not None: results = lfsstatus.get_fullness( args.filesystem, target_datetime, cache_file=args.fullness if args.fullness != "" else None) else: raise Exception('Neither --fullness nor --failure were specified') # Serialize the object cache_file = args.output if cache_file is None: print(json.dumps(results, indent=4, sort_keys=True)) else: print("Caching to %s" % cache_file) json.dump(results, open(cache_file, 'w'))
15,782
def model_fn(nn_last_layer, correct_label, learning_rate, num_classes): """ Build the TensorFLow loss and optimizer operations. :param nn_last_layer: TF Tensor of the last layer in the neural network :param correct_label: TF Placeholder for the correct label image :param learning_rate: TF Placeholder for the learning rate :param num_classes: Number of classes to classify :return: Tuple of (logits, train_op, cross_entropy_loss) """ # TODO: Implement function # create logits logits = tf.reshape(nn_last_layer, (-1, num_classes)) correct_label = tf.reshape(correct_label, (-1, num_classes)) # create loss function. cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=correct_label)) # Define optimizer. Adam in this case to have variable learning rate. optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) # Apply optimizer to the loss function. train_op = optimizer.minimize(cross_entropy_loss) return logits, train_op, cross_entropy_loss
15,783
def maybe_load_yaml(item): """Parses `item` only if it is a string. If `item` is a dictionary it is returned as-is. Args: item: Returns: A dictionary. Raises: ValueError: if unknown type of `item`. """ if isinstance(item, six.string_types): return yaml.load(item) elif isinstance(item, dict): return item else: raise ValueError("Got {}, expected string or dict", type(item))
15,784
def histeq(im,nbr_bins=256): """histogram equalize an image""" #get image histogram im = np.abs(im) imhist,bins = np.histogram(im.flatten(),nbr_bins,normed=True) cdf = imhist.cumsum() #cumulative distribution function cdf = 255 * cdf / cdf[-1] #normalize #use linear interpolation of cdf to find new pixel values im2 = np.interp(im.flatten(),bins[:-1],cdf) return im2.reshape(im.shape)
15,785
def txgamma(v, t, gamma, H0): """ Takes in: v = values at z=0; t = list of redshifts to integrate over; gamma = interaction term. Returns a function f = [dt/dz, d(a)/dz, d(e'_m)/dz, d(e'_de)/dz, d(z)/dz, d(dl)/dz] """ (t, a, ombar_m, ombar_de, z, dl) = v #omegam, omegade, z, dl) = v Hz = H0 * (ombar_m + ombar_de)**(1/2) if np.isnan(Hz): print('txgamma') print('z = %s, Hz = %s, gamma = %s, ombar_m = %s, ombar_de = %s' %(z, Hz, gamma, ombar_m, ombar_de)) irate = (gamma/(-t+0.0001))*(1-ombar_de/(ombar_de+ombar_m)) /(1+z)/Hz # first derivatives of functions I want to find: f = [# dt/dz (= f.d wrt z of time) -1/((1+z) * Hz), # d(a)/dz (= f.d wrt z of scale factor) -(1+z)**(-2), # d(ombar_m)/dz (= f.d wrt z of density_m(t) / crit density(t0)) 3*ombar_m /(1+z) - irate, # d(ombar_de)/dz (= f.d wrt z of density_de(t) / crit desnity(t0)) irate, # d(z)/dz (= f.d wrt z of redshift) 1, # d(dl)/dz (= f.d wrt z of luminosty distance) 1/Hz] # H + Hdz*(1+z) return f
15,786
def save_file(data, path, verbose=False): """Creates intermediate directories if they don't exist.""" dir = os.path.dirname(path) if not os.path.isdir(dir): os.makedirs(dir) if verbose: print(f"Saving: {path}") _, ext = os.path.splitext(path) if ext == ".pkl": with open(path, "wb") as f: pickle.dump(data, f, protocol=2) elif ext == ".json": with open(path, "w") as f: json.dump(data, f, indent=4, separators=(",", ": "), sort_keys=True) f.write("\n")
15,787
def process_song_file(cur, filepath): """ Function Purpose: Open and process data from song data file to insert into {songs, artists} table Inputs: - filepath: the filepath where the JSON song data file is stored - cur: cursor Outputs: - 'song_data': Insert [song_id, title, artist_id, year, duration] ==> into songs table - 'artist_data': Insert [artist_id, name, location, latitude, longitude] ==> into artists table """ # open song file df = pd.read_json(filepath, lines=True) # ********1- Insert Into song_table******** # insert song record song_data = df.values[0][[7, 8, 0, 9, 5]].tolist() cur.execute(song_table_insert, song_data) # ********2- Insert Into artist_table******** # insert artist record artist_data = df.values[0][[0, 4, 2, 1, 3]].tolist() cur.execute(artist_table_insert, artist_data)
15,788
def text_pb(tag, data, description=None): """Create a text tf.Summary protobuf. Arguments: tag: String tag for the summary. data: A Python bytestring (of type bytes), a Unicode string, or a numpy data array of those types. description: Optional long-form description for this summary, as a `str`. Markdown is supported. Defaults to empty. Raises: TypeError: If the type of the data is unsupported. Returns: A `tf.Summary` protobuf object. """ try: tensor = tensor_util.make_tensor_proto(data, dtype=np.object) except TypeError as e: raise TypeError("tensor must be of type string", e) summary_metadata = metadata.create_summary_metadata( display_name=None, description=description ) summary = summary_pb2.Summary() summary.value.add(tag=tag, metadata=summary_metadata, tensor=tensor) return summary
15,789
def sanitize_input(args: dict) -> dict: """ Gets a dictionary for url params and makes sure it doesn't contain any illegal keywords. :param args: :return: """ if "mode" in args: del args["mode"] # the mode should always be detailed trans = str.maketrans(ILLEGAL_CHARS, ' ' * len(ILLEGAL_CHARS)) for k, v in args.copy().items(): if isinstance(v, str): # we only need to verify v because k will never be entered by a user args[k] = v.translate(trans) return args
15,790
def test_prep_file(text, expected): """ Writes text to file, then processes the file (which writes to an output file), loads the output file and compares to the expected result :param text: :param expected: :return: """ infile, infile_filename = tempfile.mkstemp() outfile, outfile_filename = tempfile.mkstemp() with open(infile_filename, 'w', encoding='ascii') as f: f.write(text) prep_file(infile_filename, outfile_filename) with open(outfile_filename, 'r') as f: line = f.readline().strip() assert line == expected # cleanup os.close(infile) os.close(outfile)
15,791
def add_notebook(args, library_db): """add a notebook to sqlite database""" import os from src.praxxis.library import sync_library root = (os.path.sep).join(os.path.abspath(args.path).split(os.path.sep)[:-1]) notebook_name = args.path.split(os.path.sep)[-1] print(root) relative_path = "" sync_library.load_notebook(notebook_name, root, library_db, "none", relative_path)
15,792
def sum_to(containers, goal, values_in_goal=0): """ Find all sets of containers which sum to goal, store the number of containers used to reach the goal in the sizes variable. """ if len(containers) == 0: return 0 first = containers[0] remain = containers[1:] if first > goal: with_first = 0 elif first == goal: sizes.append(values_in_goal + 1) with_first = 1 else: with_first = sum_to(remain, goal-first, values_in_goal + 1) return with_first + sum_to(remain, goal, values_in_goal)
15,793
def Daq_DeleteProbe(label: str) -> None: """Removes probe compensation information from database Parameters ---------- label : str Compensation identifier """ CTS3Exception._check_error(_MPuLib.Daq_DeleteProbe( label.encode('ascii')))
15,794
def rt2add_enc_v1(rt, grid): """ :param rt: n, k, 2 | log[d, tau] for each ped (n,) to each vic (k,) modifies rt during clipping to grid :param grid: (lx, ly, dx, dy, nx, ny) lx, ly | lower bounds for x and y coordinates of the n*k (2,) in rt dx, dy | step sizes of the regular grid nx, ny | number of grid points in each coordinate (so nx*ny total) :return: n, m | m = nx*ny, encoding for each ped uses row-major indexing for the flattened (2d) indices for nx 'rows' and ny 'columns' """ n, k = rt.shape[:2] nx, ny = np.array(grid[-2:]).astype(np.int32) m = nx * ny Z = np.zeros((n, m), dtype=np.float32) clip2grid(rt, grid) # n, k a_x = np.empty((n, k), dtype=np.int32) r_x = np.empty((n, k), dtype=np.float32) np.divmod(rt[..., 0] - grid[0], grid[2], a_x, r_x, casting='unsafe') th_x = 1 - r_x / grid[2] a_y = np.empty((n, k), dtype=np.int32) r_y = np.empty((n, k), dtype=np.float32) np.divmod(rt[..., 1] - grid[1], grid[3], a_y, r_y, casting='unsafe') th_y = 1 - r_y / grid[3] # 1d inds for m, | n, k c_x = ny * a_x + a_y offsets = np.array([0, ny, 1, ny+1], dtype=np.int32) # n, k, 4 inds = c_x[..., np.newaxis] + offsets[np.newaxis, :] vals = np.dstack((th_x*th_y, (1-th_x)*th_y, th_x*(1-th_y), (1-th_x)*(1-th_y))) row_inds = np.repeat(np.arange(n, dtype=np.int32), 4*k) np.add.at(Z, (row_inds, inds.ravel()), vals.ravel()) return Z
15,795
def warning(*tokens: Token, **kwargs: Any) -> None: """Print a warning message""" tokens = [brown, "Warning:"] + list(tokens) # type: ignore kwargs["fileobj"] = sys.stderr message(*tokens, **kwargs)
15,796
def i_print_g(text=""): """ prints indented green text on terminal :param text: :return: """ print(Fore.GREEN + Style.BRIGHT + indent(text, prefix=" "))
15,797
def run_standard_p4_command(command, args): """Runs a standard p4 command. Uses exec, so this will be the last function you ever call. Used to transfer control to stock p4 for non-custom commands. """ if command: args = [command] + args os.execvp('p4', ['p4'] + args)
15,798
def draw_with_replacement(heap): """Return ticket drawn with replacement from given heap of tickets. Args: heap (list): an array of Tickets, arranged into a heap using heapq. Such a heap is also known as a 'priority queue'. Returns: the Ticket with the least ticket number in the heap. Side-effects: the heap maintains its size, as the drawn ticket is replaced by the next ticket for that id. Example: >>> x = Ticket('0.234', 'x', 2) >>> y = Ticket('0.354', 'y', 1) >>> z = Ticket('0.666', 'z', 2) >>> heap = [] >>> heapq.heappush(heap, x) >>> heapq.heappush(heap, y) >>> heapq.heappush(heap, z) >>> heap [Ticket(ticket_number='0.234', id='x', generation=2), Ticket(ticket_number='0.354', id='y', generation=1), Ticket(ticket_number='0.666', id='z', generation=2)] >>> draw_with_replacement(heap) Ticket(ticket_number='0.234', id='x', generation=2) >>> heap [Ticket(ticket_number='0.354', id='y', generation=1), Ticket(ticket_number='0.666', id='z', generation=2), Ticket(ticket_number='0.54783080274940261636464668679572\ 2512609112766306951592422621788875312684400211', id='x', generation=3)] """ ticket = heapq.heappop(heap) heapq.heappush(heap, next_ticket(ticket)) return ticket
15,799