content
stringlengths
22
815k
id
int64
0
4.91M
def convert_string(inpt): """Return string value from input lit_input >>> convert_string(1) '1' """ if PY2: return str(inpt).decode() else: return str(inpt)
5,338,800
def get_ipc_kernel(imdark, tint, boxsize=5, nchans=4, bg_remove=True, hotcut=[5000,50000], calc_ppc=False, same_scan_direction=False, reverse_scan_direction=False): """ Derive IPC/PPC Convolution Kernels Find the IPC and PPC kernels used to convolve detector pixel data. Finds all hot pixels within hotcut parameters and measures the average relative flux within adjacent pixels. Parameters ========== Keyword Parameters ================== boxsize : int Size of the box. Should be odd, but if even, then adds +1. bg_remove : bool Remove the average dark current values for each hot pixel cut-out. Only works if boxsize>3. hotcut : array-like Min and max values of hot pixels (above bg and bias) to cosider. calc_ppc : bool Calculate and return post-pixel coupling? same_scan_direction : bool Are all the output channels read in the same direction? By default fast-scan readout direction is ``[-->,<--,-->,<--]`` If ``same_scan_direction``, then all ``-->`` reverse_scan_direction : bool If ``reverse_scan_direction``, then ``[<--,-->,<--,-->]`` or all ``<--`` """ ny, nx = imdark.shape chsize = int(ny / nchans) imtemp = imdark * tint boxhalf = int(boxsize/2) boxsize = int(2*boxhalf + 1) distmin = np.ceil(np.sqrt(2.0) * boxhalf) # Get rid of pixels around border pixmask = ((imtemp>hotcut[0]) & (imtemp<hotcut[1])) pixmask[0:4+boxhalf, :] = False pixmask[-4-boxhalf:, :] = False pixmask[:, 0:4+boxhalf] = False pixmask[:, -4-boxhalf:] = False # Ignore borders between amplifiers for ch in range(1, nchans): x1 = ch*chsize - boxhalf x2 = x1 + 2*boxhalf pixmask[:, x1:x2] = False indy, indx = np.where(pixmask) nhot = len(indy) if nhot < 2: print("No hot pixels found!") return None # Only want isolated pixels # Get distances for every pixel # If too close, then set equal to 0 for i in range(nhot): d = np.sqrt((indx-indx[i])**2 + (indy-indy[i])**2) ind_close = np.where((d>0) & (d<distmin))[0] if len(ind_close)>0: pixmask[indy[i], indx[i]] = 0 indy, indx = np.where(pixmask) nhot = len(indy) if nhot < 2: print("No hot pixels found!") return None # Stack all hot pixels in a cube hot_all = [] for iy, ix in zip(indy, indx): x1, y1 = np.array([ix,iy]) - boxhalf x2, y2 = np.array([x1,y1]) + boxsize sub = imtemp[y1:y2, x1:x2] # Flip channels along x-axis for PPC if calc_ppc: # Check if an even or odd channel (index 0) for ch in np.arange(0,nchans,2): even = True if (ix > ch*chsize) and (ix < (ch+1)*chsize-1) else False if same_scan_direction: flip = True if reverse_scan_direction else False elif even: flip = True if reverse_scan_direction else False else: flip = False if reverse_scan_direction else True if flip: sub = sub[:,::-1] hot_all.append(sub) hot_all = np.array(hot_all) # Remove average dark current values if boxsize>3 and bg_remove==True: for im in hot_all: im -= np.median([im[0,:], im[:,0], im[-1,:], im[:,-1]]) # Normalize by sum in 3x3 region norm_all = hot_all.copy() for im in norm_all: im /= im[boxhalf-1:boxhalf+2, boxhalf-1:boxhalf+2].sum() # Take average of normalized stack ipc_im_avg = np.median(norm_all, axis=0) # ipc_im_sig = robust.medabsdev(norm_all, axis=0) corner_val = (ipc_im_avg[boxhalf-1,boxhalf-1] + ipc_im_avg[boxhalf+1,boxhalf+1] + ipc_im_avg[boxhalf+1,boxhalf-1] + ipc_im_avg[boxhalf-1,boxhalf+1]) / 4 if corner_val<0: corner_val = 0 # Determine post-pixel coupling value? if calc_ppc: ipc_val = (ipc_im_avg[boxhalf-1,boxhalf] + \ ipc_im_avg[boxhalf,boxhalf-1] + \ ipc_im_avg[boxhalf+1,boxhalf]) / 3 if ipc_val<0: ipc_val = 0 ppc_val = ipc_im_avg[boxhalf,boxhalf+1] - ipc_val if ppc_val<0: ppc_val = 0 k_ipc = np.array([[corner_val, ipc_val, corner_val], [ipc_val, 1-4*ipc_val, ipc_val], [corner_val, ipc_val, corner_val]]) k_ppc = np.zeros([3,3]) k_ppc[1,1] = 1 - ppc_val k_ppc[1,2] = ppc_val return (k_ipc, k_ppc) # Just determine IPC else: ipc_val = (ipc_im_avg[boxhalf-1,boxhalf] + ipc_im_avg[boxhalf,boxhalf-1] + ipc_im_avg[boxhalf,boxhalf+1] + ipc_im_avg[boxhalf+1,boxhalf]) / 4 if ipc_val<0: ipc_val = 0 kernel = np.array([[corner_val, ipc_val, corner_val], [ipc_val, 1-4*ipc_val, ipc_val], [corner_val, ipc_val, corner_val]]) return kernel
5,338,801
def magma_finalize(): """ Finalize MAGMA. """ status = _libmagma.magma_finalize() magmaCheckStatus(status)
5,338,802
def get_description(): """ Return a dict describing how to call this plotter """ desc = dict() desc['data'] = True desc['cache'] = 86400 desc['description'] = """This plot presents the trailing X number of days temperature or precipitation departure from long term average. You can express this departure either in Absolute Departure or as a Standard Deviation. The Standard Deviation option along with precipitation is typically called the "Standardized Precipitation Index". <p>The plot also contains an underlay with the weekly US Drought Monitor that is valid for the station location. If you plot a climate district station, you get the US Drought Monitor valid for the district centroid. If you plot a statewide average, you get no USDM included. """ today = datetime.date.today() sts = today - datetime.timedelta(days=720) desc['arguments'] = [ dict(type='station', name='station', default='IA0200', label='Select Station:', network='IACLIMATE'), dict(type='int', name='p1', default=31, label='First Period of Days'), dict(type='int', name='p2', default=91, label='Second Period of Days'), dict(type='int', name='p3', default=365, label='Third Period of Days'), dict(type='date', name='sdate', default=sts.strftime("%Y/%m/%d"), min='1893/01/01', label='Start Date of Plot'), dict(type='date', name='edate', default=today.strftime("%Y/%m/%d"), min='1893/01/01', label='End Date of Plot'), dict(type='select', name='pvar', default='precip', options=PDICT, label='Which variable to plot?'), dict(type='select', name='how', default='diff', options=PDICT2, label='How to Express Departure?'), ] return desc
5,338,803
def SpringH(z,m,k): """ with shapes (bs,2nd)""" D = z.shape[-1] # of ODE dims, 2*num_particles*space_dim q = z[:,:D//2].reshape(*m.shape,-1) p = z[:,D//2:].reshape(*m.shape,-1) return EuclideanK(p,m) + SpringV(q,k)
5,338,804
def write_sushi_input_files(lhafile): """ Add SusHi-related blocks to LHA file """ outfiles = {} for higgsname, higgstype in {'H': 12, 'A': 21}.iteritems(): lha = LHA(lhafile) sushi = Block('SUSHI', comment='SusHi specific') sushi.add(Entry([1, 2], comment='Select 2HDM')) sushi.add(Entry([2, higgstype], comment='h / H / A')) sushi.add(Entry([3, 0], comment='p-p collisions')) sushi.add(Entry([4, 13000], comment='E_cm')) sushi.add(Entry([5, 2], comment='ggH at NNLO')) sushi.add(Entry([6, 2], comment='bbH at NNLO')) sushi.add(Entry([7, 2], comment='SM EW content')) sushi.add(Entry([19, 1], comment='Verbosity')) sushi.add(Entry([20, 0], comment='All processes')) lha.add_block(sushi) thdm = Block('2HDM', '2HDM parameters') #thdm.add(Entry([1], comment='Type I')) #thdm.add(Entry([2], comment='Type II')) thdm.add(Entry([4], comment='Type IV')) lha.add_block(thdm) distrib = Block('DISTRIB', comment='Kinematic requirements') distrib.add(Entry([1, 0], comment='Sigma total')) distrib.add(Entry([2, 0], comment='Disable pT cut')) #distrib.add(Entry([21, GENER_SETTINGS['higgs_pt_min']], comment='Min higgs pT')) distrib.add(Entry([3, 0], comment='Disable eta cut')) #distrib.add(Entry([32, GENER_SETTINGS['higgs_eta_max']], comment='Max eta')) distrib.add(Entry([4, 1], comment='Use eta, not y')) lha.add_block(distrib) pdfspec = Block('PDFSPEC') pdfspec.add(Entry([1, 'MMHT2014lo68cl.LHgrid'], comment='Name of pdf (lo)')) pdfspec.add(Entry([2, 'MMHT2014nlo68cl.LHgrid'], comment='Name of pdf (nlo)')) pdfspec.add(Entry([3, 'MMHT2014nnlo68cl.LHgrid'], comment='Name of pdf (nnlo)')) pdfspec.add(Entry([4, 'MMHT2014nnlo68cl.LHgrid'], comment='Name of pdf (n3lo)')) pdfspec.add(Entry([10, 0], comment='Set number')) lha.add_block(pdfspec) lha.get_block('SMINPUTS').add(Entry([8, 1.275], comment='m_c')) # Write output suffix = '_%s_sushi.in' % higgsname outname = lhafile.replace('.lha', suffix) lha.write(outname) outfiles[higgsname] = outname return outfiles
5,338,805
def test_fetch_market_trade_data_dataframe(): """Tests downloading of market and trade/order data from dataframe """ from tcapy.data.databasesource import DatabaseSourceCSV ### Get market data market_loader = Mediator.get_tca_market_trade_loader() market_data_store = DatabaseSourceCSV(market_data_database_csv=csv_market_data_store).fetch_market_data( ticker=ticker, start_date=start_date, finish_date=finish_date) dataframe_trade_order_mapping = OrderedDict() for k in csv_trade_order_mapping.keys(): dataframe_trade_order_mapping[k] = DatabaseSourceCSV(trade_data_database_csv=csv_trade_order_mapping[k]).fetch_trade_order_data( ticker=ticker, start_date=start_date, finish_date=finish_date) # for a high level trade data request, we need to use TCA request, because it usually involves some # market data download (we are assuming that the market data is being downloaded from our arctic database) # eg. for converting notionals to reporting currency tca_request = TCARequest( start_date=start_date, finish_date=finish_date, ticker=ticker, trade_data_store='dataframe', market_data_store=market_data_store, trade_order_mapping=dataframe_trade_order_mapping ) for t in trade_order_list: trade_order_df = market_loader.get_trade_order_data(tca_request, t) try: trade_order_df = Mediator.get_volatile_cache().get_dataframe_handle(trade_order_df) except: pass assert not trade_order_df.empty \ and trade_order_df.index[0] >= pd.Timestamp(start_date).tz_localize('utc') \ and trade_order_df.index[-1] <= pd.Timestamp(finish_date).tz_localize('utc') ### Test using DataFactory and DatabaseSource from tcapy.data.datafactory import DataFactory data_factory = DataFactory() for t in trade_order_list: ### Test using DataFactory trade_request = TradeRequest(start_date=start_date, finish_date=finish_date, ticker=ticker, data_store='dataframe', trade_order_mapping=dataframe_trade_order_mapping, trade_order_type=t) trade_order_df = data_factory.fetch_table(trade_request) assert not trade_order_df.empty \ and trade_order_df.index[0] >= pd.Timestamp(start_date).tz_localize('utc') \ and trade_order_df.index[-1] <= pd.Timestamp(finish_date).tz_localize('utc') ### Test using DatabaseSourceDataFrame from tcapy.data.databasesource import DatabaseSourceDataFrame database_source = DatabaseSourceDataFrame() trade_order_df = database_source.fetch_trade_order_data(start_date, finish_date, ticker, table_name=dataframe_trade_order_mapping[t]) assert not trade_order_df.empty \ and trade_order_df.index[0] >= pd.Timestamp(start_date).tz_localize('utc') \ and trade_order_df.index[-1] <= pd.Timestamp(finish_date).tz_localize('utc')
5,338,806
def pytest_collection(session): # pylint: disable=unused-argument """Monkey patch lru_cache, before any module imports occur.""" # Gotta hold on to this before we patch it away old_lru_cache = functools.lru_cache @wraps(functools.lru_cache) def lru_cache_wrapper(*args, **kwargs): """Wrap lru_cache decorator, to track which functions are decorated.""" # Apply lru_cache params (maxsize, typed) decorated_function = old_lru_cache(*args, **kwargs) # Mimicking lru_cache: https://github.com/python/cpython/blob/v3.7.2/Lib/functools.py#L476-L478 @wraps(decorated_function) def decorating_function(user_function): """Wraps the user function, which is what everyone is actually using. Including us.""" wrapper = decorated_function(user_function) CACHED_FUNCTIONS.append(wrapper) return wrapper return decorating_function # Monkey patch the wrapped lru_cache decorator functools.lru_cache = lru_cache_wrapper yield # Be a good citizen and undo our monkeying functools.lru_cache = old_lru_cache
5,338,807
def __graph_laplacian(mtx): """ Compute the Laplacian of the matrix. .. math:: """ L = np.diag(np.sum(mtx, 0)) - mtx return L
5,338,808
def moon_illumination(phase: float) -> float: """Calculate the percentage of the moon that is illuminated. Currently this value increases approximately linearly in time from new moon to full, and then linearly back down until the next new moon. Args: phase: float The phase angle of the Moon, in degrees. Returns: illumination: flaot The percentage of the Moon that is illuminated. """ return 100 * (1 - np.abs(phase - 180) / 180)
5,338,809
def integ_test(gateway_host=None, test_host=None, destroy_vm="True"): """ Run the integration tests. This defaults to running on local vagrant machines, but can also be pointed to an arbitrary host (e.g. amazon) by passing "address:port" as arguments gateway_host: The ssh address string of the machine to run the gateway services on. Formatted as "host:port". If not specified, defaults to the `cwag` vagrant box. test_host: The ssh address string of the machine to run the tests on on. Formatted as "host:port". If not specified, defaults to the `cwag_test` vagrant box. """ destroy_vm = bool(strtobool(destroy_vm)) # Setup the gateway: use the provided gateway if given, else default to the # vagrant machine if not gateway_host: vagrant_setup("cwag", destroy_vm) else: ansible_setup(gateway_host, "cwag", "cwag_dev.yml") execute(_copy_config) execute(_start_gateway) # Run the tests: use the provided test machine if given, else default to # the vagrant machine if not test_host: vagrant_setup("cwag_test", destroy_vm) else: ansible_setup(test_host, "cwag_test", "cwag_test.yml") execute(_start_ue_simulator) execute(_run_unit_tests) execute(_run_integ_tests)
5,338,810
def convex_hull_mask_iou(points_uv, im_shape, gt_hull_mask): """Computes masks by calculating a convex hull from points. Creates two masks (if possible), one for the estimated foreground pixels and one for the estimated background pixels. Args: points_uv: (2, N) Points in u, v coordinates im_shape: image shape [image_height, im_width] gt_hull_mask: mask created by calculating convex hull Returns: best_iou: best mask iou calculated from the calculated hull masks and the ground truth hull mask """ im_height, im_width = im_shape # Segment the points into background and foreground if len(set(points_uv[0])) > 1: thresh = filters.threshold_li(points_uv[0]) pred_seg_1 = points_uv[0] > thresh pred_seg_2 = points_uv[0] < thresh segs = [pred_seg_1, pred_seg_2] else: # There is only one unique point so a threshold cannot be made segs = [np.full(points_uv[0].shape, True, dtype=bool)] mask_list = [] # Loop over both segments since it is uncertain which segment is foreground or background for seg in segs: # Obtain the coordinates of the pixels pred_u = np.int32(points_uv[0][seg]) pred_v = np.int32(points_uv[1][seg]) # Remove duplicate coordinates by forming a set coords = set(zip(pred_u, pred_v)) # Convex hull calculation requires a numpy array coords = np.array(list(coords)) # Need at least 3 points to create convex hull if len(coords) < 3: continue # Points must not lie along a single line in order to create convex hull elif any(np.all(coords == coords[0, :], axis=0)): continue else: hull = ConvexHull(coords) img = Image.new('L', (im_width, im_height), 0) vertices = list(zip(coords[hull.vertices, 0], coords[hull.vertices, 1])) ImageDraw.Draw(img).polygon(vertices, outline=1, fill=1) mask = np.array(img) mask_list.append(mask) best_iou = 0 for mask in mask_list: iou = evaluation.mask_iou(mask, gt_hull_mask) if iou > best_iou: best_iou = iou return best_iou
5,338,811
def remove_dataset_from_disk(interval_list_dataset, version=None, dest_path=CACHE_PATH): """ Remove the full-seq dataset from the disk. Parameters: interval_list_dataset (str or Path): Either a path or a name of dataset included in this package. version (int): Version of the dataset. dest_path (str or Path): Folder to store the full-seq dataset. """ interval_list_dataset = _guess_location(interval_list_dataset) metadata = _check_dataset_existence(interval_list_dataset, version) dataset_name = _get_dataset_name(interval_list_dataset) path = Path(dest_path) / dataset_name if path.exists(): shutil.rmtree(path)
5,338,812
def run(text, base_dir, debug_filename, symbols = set()): """Rudimentary resolver for the following preprocessor commands: // #include <some-file> (no check for cyclic includes!) // #ifdef | #if <symbol> // <contents> // [ #elif // <alt-contents> ]* // [ #else // <alt-contents> ] // #endif """ out = [] stack = [] lines = text.split('\n') l_iter = iter(zip(range(1, len(lines)+1),lines)) push_line = None nline = -1 def error(msg): raise Exception(msg + ' @ ' + debug_filename + ':' + str(nline)) while True: try: nline, line = push_line or next(l_iter) push_line = None except StopIteration: break match = line_re.match(line) if match: skip_branch = False cmd = match.group(1) if cmd == 'include': name = match.group(2).strip('<>"\'') fpath = os.path.join(base_dir, name) print 'handling js #include: ' + fpath with open( fpath, 'rt' ) as inp: out.append(run(inp.read(), os.path.split(fpath)[1], name, symbols)) elif cmd in ['if', 'ifdef', 'ifndef']: val = eval_conditional(match.group(2), symbols) if cmd == 'ifndef': val = not val print('eval: ' + cmd + ' ' + match.group(2) + ' as ' + str(val)) skip_branch = not val stack.append(val) elif cmd in ['else', 'elif']: if not stack: error('syntax error, unexpected ' + cmd) # has been handled before? if stack[-1]: skip_branch = True elif cmd != 'elif' or eval_conditional(match.group(2), symbols): stack[-1] = True else: skip_branch = True elif cmd == 'endif': if not stack: error('syntax error, unexpected endif') continue stack.pop() else: error('define/ifdef/endif/else currently ignored') if skip_branch: # skip everything up to the next elif/else/endif at the same nesting level nesting = 1 while True: try: nline, line = next(l_iter) match = line_re.match(line) if match: done = False cmd = match.group(1) if cmd in ['if', 'ifdef']: nesting += 1 elif cmd == 'endif': nesting -= 1 if nesting == 0: done = True if cmd in ['else', 'elif'] and nesting == 1: done = True if done: push_line = nline, line break except StopIteration: error('syntax error, unexpected EOF') return else: out.append(line) return '\n'.join(out)
5,338,813
def load_callback(module: ModuleType, event: Event) -> Callable[..., Awaitable[None]]: """ Load the callback function from the handler module """ callback = getattr(module, "handler") if not inspect.iscoroutinefunction(callback): raise TypeError( f"expected 'coroutine function' for 'handler', got {type(callback).__name__!r}" ) signature = inspect.signature(callback) params = dict(signature.parameters) # Construct the model from the callback for manual events if isinstance(event, ManualEvent): expect_returns(signature, None, Response, allow_unannotated=True) event.model = build_model_from_params(params) # Ensure the signature is passed the same parameters as the event sends elif isinstance(event, AutomatedEvent): expect_returns(signature, None, allow_unannotated=True) # Get the model parameters model_signature = inspect.signature(event.input_validator) model_params = dict(model_signature.parameters) validate_automated_signature(params, model_params) return callback
5,338,814
def read_config_key(fname='', existing_dict=None, delim=None): """ Read a configuration key. """ # Check file existence if os.path.isfile(fname) is False: logger.error("I tried to read key "+fname+" but it does not exist.") return(existing_dict) logger.info("Reading: "+fname) # Expected Format expected_words = 3 expected_format = "config_type config_name params_as_dict" # Open File infile = open(fname, 'r') # Initialize the dictionary if existing_dict is None: out_dict = {} else: out_dict = existing_dict # Loop over the lines lines_read = 0 while True: line = infile.readline() if len(line) == 0: break if skip_line(line, expected_words=expected_words, delim=delim, expected_format=expected_format): continue this_type, this_value, this_params = parse_one_line(line, delim=delim) # Check if the type of entry is new if (this_type in out_dict.keys()) == False: out_dict[this_type] = {} # Initialize a configuration on the first entry - configs can have several lines if (this_value not in out_dict[this_type].keys()): out_dict[this_type][this_value] = {} # Parse the parameters as a literal try: this_params_dict = ast.literal_eval(this_params) except: logger.error("Could not parse parameters as a dictionary. Line is: ") logger.error(line) continue # Now read in parameters. To do this, define templates for # expected fields and data types for each type of # configuration. Check to match these. if this_type == "array_tag": expected_params = { 'timebin':'0s', } if this_type == "interf_config": expected_params = { 'array_tags':[], 'res_min_arcsec':0.0, 'res_max_arcsec':0.0, 'res_min_pc':0.0, 'res_max_pc':0.0, 'res_step_factor':1.0, 'res_list':[], 'clean_scales_arcsec':[] } if this_type == "feather_config": expected_params = { 'interf_config':'', 'res_min_arcsec':0.0, 'res_max_arcsec':0.0, 'res_step_factor':1.0, 'res_min_pc':0.0, 'res_max_pc':0.0, 'res_list':[] } if this_type == "line_product": expected_params = { 'line_tag':'', 'channel_kms':0.0, 'statwt_edge_kms':50.0, 'fitorder':0, 'combinespw':False, 'lines_to_flag':[], } if this_type == "cont_product": expected_params = { 'freq_ranges_ghz':[], 'channel_ghz':0.0, 'lines_to_flag':[] } # Check configs for expected name and data type for this_key in this_params_dict.keys(): if this_key not in expected_params.keys(): logger.error('Got an unexpected parameter key. Line is:') logger.error(line) continue if type(this_params_dict[this_key]) != type(expected_params[this_key]): logger.error('Got an unexpected parameter type for parameter '+str(this_key)+'. Line is:') logger.error(line) continue if this_key in out_dict[this_type][this_value].keys(): logger.debug("Got a repeat parameter definition for "+this_type+" "+this_value) logger.debug("Parameter "+this_key+" repeats. Using the latest value.") out_dict[this_type][this_value][this_key] = this_params_dict[this_key] lines_read += 1 infile.close() logger.info("Read "+str(lines_read)+" lines into a configuration definition dictionary.") return(out_dict)
5,338,815
def animeuser_auto_logical_delete(): """一定の日数以上生き残ってしまったAnimeUserを論理削除します""" logical_divide_day: str = os.getenv("LOGICAL_DIVIDE_DAY", default="3") logical_divide_day_int: int = int(logical_divide_day) divide_datetime: datetime.datetime = datetime.datetime.now() - datetime.timedelta( days=logical_divide_day_int ) animeroom_queryset = AnimeUser.objects.alive().filter( updated_at__lte=divide_datetime ) animeroom_queryset.delete() animeroom_queryset.save()
5,338,816
def start_app() -> None: """Start Experiment Registry.""" bcipy_gui = app(sys.argv) ex = ExperimentRegistry( title='Experiment Registry', height=700, width=600, background_color='black') sys.exit(bcipy_gui.exec_())
5,338,817
def launch_lambdas(total_count, lambda_arn, lambda_args, dlq_arn, cubes_arn, downsample_queue_url, receipt_handle): """Launch lambdas to process all of the target cubes to downsample Launches an initial set of lambdas and monitors the cubes SQS queue to understand the current status. If the count in the queue doesn't change for UNCHANGING_LAUNCH cycles then it will calculate how many more lambdas to launch and launch them. If the queue count doesn't change after launching more lambdas an exception will eventually be raised so the activity is not hanging forever. Args: total_count (int): The initial number of lambdas to launch lambda_arn (str): Name or ARN of the lambda function to invoke lambda_args (str): The lambda payload to pass when invoking dlq_arn (str): ARN of the SQS DLQ to monitor for error messages cubes_arn (str): ARN of the input cubes SQS queue to monitor for completion of the downsample downsample_queue_url (str): URL of downsample job queue receipt_handle (str): Handle of message from downsample queue """ per_lambda = ceildiv(total_count, POOL_SIZE) d,m = divmod(total_count, per_lambda) counts = [per_lambda] * d if m > 0: counts += [m] assert sum(counts) == total_count, "Didn't calculate counts per lambda correctly" log.debug("Launching {} lambdas in chunks of {} using {} processes".format(total_count, per_lambda, POOL_SIZE)) args = ((count, lambda_arn, lambda_args, dlq_arn) for count in counts) start = datetime.now() with Pool(POOL_SIZE) as pool: pool.starmap(invoke_lambdas, args) stop = datetime.now() log.info("Launched {} lambdas in {}".format(total_count, stop - start)) # Finished launching lambdas, need to wait for all to finish log.info("Finished launching lambdas") polling_start = datetime.now() previous_count = 0 count_count = 1 zero_count = 0 while True: if check_queue(dlq_arn) > 0: raise FailedLambdaError() count = check_queue(cubes_arn) log.debug("Status polling - count {}".format(count)) log.debug("Throttling count {}".format(lambda_throttle_count(lambda_arn))) if count == previous_count: count_count += 1 if count_count == UNCHANGING_MAX: raise ResolutionHierarchyError("Status polling stuck at {} items for {}".format(count, polling_start - datetime.now())) if count_count == UNCHANGING_THROTTLE: # If the throttle count is increasing -> Sleep # If the throttle count is decreasing # If the cubes queue count has changed -> Continue regular polling # If the cubes queue count has not changed -> Sleep # If the throttle count is zero -> Continue regular polling # # This means that this loop will block until throttle has stopped / cubes # in the queue have been processed. # # If throttling stops and no cubes have been processed the UNCHANGING_MAX # threashold is the last guard so the activity doesn't hang prev_throttle = 0 while True: throttle = lambda_throttle_count(lambda_arn) if throttle < prev_throttle and check_queue(cubes_arn) != count: # If the throttle count is decreasing and the queue count has # changed continue the regular polling cycle break if throttle == 0: # No throttling happening break if throttle > 0: # Don't update count is there was an error getting the current count prev_throttle = throttle # Tell SQS we're still alive update_visibility_timeout(downsample_queue_url, receipt_handle) time.sleep(MAX_LAMBDA_TIME.seconds) if check_queue(dlq_arn) > 0: raise FailedLambdaError() if count_count == UNCHANGING_LAUNCH: # We have noticed that the last few messages are spread across multiple AWS queue servers and # A single lambda requesting 10 messages will only get messages from a single queue server. So we # pad the number of lambdas by EXTRAS_LAMBDAS to avoid extra looping cycles. needed = ceildiv(count, BUCKET_SIZE) if needed > 0: log.debug("Launching {} more lambdas".format(needed)) start = datetime.now() invoke_lambdas(needed + EXTRA_LAMBDAS, lambda_arn, lambda_args, dlq_arn) stop = datetime.now() log.debug("Launched {} lambdas with {} extra in {}".format(needed, EXTRA_LAMBDAS, stop - start)) else: previous_count = count count_count = 1 if count == 0: zero_count += 1 if zero_count == ZERO_COUNT: log.info("Finished polling for lambda completion") break else: log.info("Zero cubes left, waiting to make sure lambda finishes") else: zero_count = 0 # Tell SQS we're still alive update_visibility_timeout(downsample_queue_url, receipt_handle) time.sleep(MAX_LAMBDA_TIME.seconds)
5,338,818
def send_result_mail(adress, link): """Create and send a mail with the download link to adress.""" # parse adress if "," in adress: splitchar = "," elif ";" in adress: splitchar = ";" else: splitchar = " " toadress = adress.split(splitchar) toadress = [i.strip() for i in toadress] server = "localhost" fromadress = "sb-sparv@svenska.gu.se" subject = "Your corpus is done!" txt = "Dear Sparv User,\n\n" txt += "You can download the annotated corpus by clicking on the following link:\n\n" + link txt += "\n\nPlease note that the corpus will be removed after seven days." txt += "\n\nYours,\nSparv\nhttp://spraakbanken.gu.se/sparv\nsb-sparv@svenska.gu.se" # Prepare actual message message = "\From: %s\nTo: %s\nSubject: %s\n\n%s" % (fromadress, ", ".join(toadress), subject, txt) # Send the mail server = smtplib.SMTP(server) server.sendmail(fromadress, toadress, message) server.quit()
5,338,819
def examples(conf, concept, positives, vocab, neg_count=None): """ Builds positive and negative examples. """ if neg_count is None: neg_count = conf.getint('sample','neg_count') while True: for (chosen_idx, idces), e_token_indices in positives: if len(chosen_idx) ==1: # FIXME: only taking into account those that have exactly one gold concept c_token_indices = concept.vectorize[chosen_idx[0]] import random negative_token_indices = [concept.vectorize[i] for i in random.sample(list(set([*range(len(concept.names))])-set(idces)),neg_count)] entity_inputs = np.tile(pad_sequences([e_token_indices], padding='post', maxlen=conf.getint('embedding','length')), (len(negative_token_indices)+1, 1)) # Repeat the same entity for all concepts concept_inputs = pad_sequences([c_token_indices]+negative_token_indices, padding='post', maxlen=conf.getint('embedding','length')) # concept_inputs = np.asarray([[concept_dict[cid]] for cid in [concept_id]+negative_concepts]) # import pdb; pdb.set_trace() distances = [1] + [0]*len(negative_token_indices) data = { 'inp_mentions': entity_inputs, 'inp_candidates': concept_inputs, 'prediction_layer': np.asarray(distances), } yield data, data
5,338,820
def molmer_sorensen(theta, N=None, targets=[0, 1]): """ Quantum object of a Mølmer–Sørensen gate. Parameters ---------- theta: float The duration of the interaction pulse. N: int Number of qubits in the system. target: int The indices of the target qubits. Returns ------- molmer_sorensen_gate: :class:`qutip.Qobj` Quantum object representation of the Mølmer–Sørensen gate. """ if targets != [0, 1] and N is None: N = 2 if N is not None: return expand_operator(molmer_sorensen(theta), N, targets=targets) return Qobj( [ [np.cos(theta/2.), 0, 0, -1.j*np.sin(theta/2.)], [0, np.cos(theta/2.), -1.j*np.sin(theta/2.), 0], [0, -1.j*np.sin(theta/2.), np.cos(theta/2.), 0], [-1.j*np.sin(theta/2.), 0, 0, np.cos(theta/2.)] ], dims=[[2, 2], [2, 2]])
5,338,821
def verify_path(path): """check if the project path is correct""" if not os.path.exists(path) or not os.path.isdir(path): error('Path specified for project creation does not exist or is not a directory')
5,338,822
def get_pixel_dist(pixel, red, green, blue): """ Returns the color distance between pixel and mean RGB value Input: pixel (Pixel): pixel with RGB values to be compared red (int): average red value across all images green (int): average green value across all images blue (int): average blue value across all images Returns: dist (int): color distance between red, green, and blue pixel values """ color_distance = math.sqrt((pixel.red - red)**2 + (pixel.green - green)**2 + (pixel.blue - blue)**2) return color_distance
5,338,823
def test_capture_log(allured_testdir, logging): """ >>> import logging >>> import pytest >>> import allure >>> logger = logging.getLogger(__name__) >>> @pytest.fixture ... def fixture(request): ... logger.info("Start fixture") ... def finalizer(): ... logger.info("Stop fixture") ... request.addfinalizer(finalizer) >>> def test_capture_log_example(fixture): ... logger.info("Start test") ... with allure.step("Step"): ... logger.info("Start step") """ allured_testdir.parse_docstring_source() params = [] if logging else ["-p", "no:logging"] if_logging_ = is_ if logging else is_not allured_testdir.run_with_allure("--log-cli-level=INFO", *params) assert_that(allured_testdir.allure_report, has_property("attachments", all_of( if_logging_(has_value(contains_string("Start fixture"))), if_logging_(has_value(contains_string("Stop fixture"))), if_logging_(has_value(contains_string("Start test"))), if_logging_(has_value(contains_string("Start step"))) ) ) )
5,338,824
def list_children_shapes(node, all_hierarchy=True, full_path=True): """ Returns a list of children shapes of the given node :param node: :param all_hierarchy: :param full_path: :return: """ raise NotImplementedError()
5,338,825
def structure_pmu(array: np.ndarray) -> np.ndarray: """Helper function to convert 4 column array into structured array representing 4-momenta of particles. Parameters ---------- array : numpy ndarray of floats, with shape (num particles, 4) The 4-momenta of the particles, arranged in columns. Columns must be in order (x, y, z, e). See also -------- structure_pmu_components : structured array from seperate 1d arrays of momentum components. Notes ----- As the data-type of the input needs to be recast, the output is a copy of the original data, not a view on it. Therefore it uses additional memory, so later changes to the original will not affect the returned array, and vice versa. """ if array.dtype != _types.pmu: struc_array = array.astype(_types.pmu[0][1]) struc_array = struc_array.view(dtype=_types.pmu, type=np.ndarray) struc_pmu = struc_array.copy().squeeze() else: struc_pmu = array return struc_pmu
5,338,826
def _log_from_checkpoint(args): """Infer logging directory from checkpoint file.""" int_dir, checkpoint_name = os.path.split(args.checkpoint) logdir = os.path.dirname(int_dir) checkpoint_num = int(checkpoint_name.split('_')[1]) _log_args(logdir, args, modified_iter=checkpoint_num) return logdir, checkpoint_num
5,338,827
def url_query_parameter(url, parameter, default=None, keep_blank_values=0): """Return the value of a url parameter, given the url and parameter name General case: >>> import w3lib.url >>> w3lib.url.url_query_parameter("product.html?id=200&foo=bar", "id") '200' >>> Return a default value if the parameter is not found: >>> w3lib.url.url_query_parameter("product.html?id=200&foo=bar", "notthere", "mydefault") 'mydefault' >>> Returns None if `keep_blank_values` not set or 0 (default): >>> w3lib.url.url_query_parameter("product.html?id=", "id") >>> Returns an empty string if `keep_blank_values` set to 1: >>> w3lib.url.url_query_parameter("product.html?id=", "id", keep_blank_values=1) '' >>> """ queryparams = parse_qs( urlsplit(str(url))[3], keep_blank_values=keep_blank_values ) return queryparams.get(parameter, [default])[0]
5,338,828
def read_ground_stations_extended(filename_ground_stations_extended): """ Reads ground stations from the input file. :param filename_ground_stations_extended: Filename of ground stations basic (typically /path/to/ground_stations.txt) :return: List of ground stations """ ground_stations_extended = [] gid = 0 with open(filename_ground_stations_extended, 'r') as f: for line in f: split = line.split(',') if len(split) != 8: raise ValueError("Extended ground station file has 8 columns: " + line) if int(split[0]) != gid: raise ValueError("Ground station id must increment each line") ground_station_basic = { "gid": gid, "name": split[1], "latitude_degrees_str": split[2], "longitude_degrees_str": split[3], "elevation_m_float": float(split[4]), "cartesian_x": float(split[5]), "cartesian_y": float(split[6]), "cartesian_z": float(split[7]), } ground_stations_extended.append(ground_station_basic) gid += 1 return ground_stations_extended
5,338,829
def _stdin_yaml_arg(): """ @return: iterator for next set of service args on stdin. Iterator returns a list of args for each call. @rtype: iterator """ import yaml import select loaded = None poll = select.poll() poll.register(sys.stdin, select.POLLIN) try: arg = 'x' while not rospy.is_shutdown() and arg != '\n': buff = '' while arg != '\n' and arg.strip() != '---': val = poll.poll(1.0) if not val: continue arg = sys.stdin.readline() + '\n' if arg.startswith('... logging'): # temporary, until we fix rospy logging continue elif arg.strip() != '---': buff = buff + arg try: loaded = yaml.load(buff.rstrip()) except Exception as e: print("Invalid YAML: %s"%str(e), file=sys.stderr) if loaded is not None: yield loaded else: # EOF reached, this will raise GeneratorExit return # reset arg arg = 'x' except select.error: return
5,338,830
def main(): """ Converts characters to uppercase, then output the complementary sequence through the newly created function (build_complement()) """ dna = input('Please give me a DNA strand and I\'ll find the complement: ') # Converts characters to uppercase dna = dna.upper ans = build_complement(dna) print('The complement of ' + str(dna) + ' is ' + str(ans))
5,338,831
def quiet_py4j(): """Suppress spark logging for the test context.""" logger = logging.getLogger('py4j') logger.setLevel(logging.WARN)
5,338,832
def send_email(destination, code): """ Send the validation email. """ if 'CLOUD' not in os.environ: # If the application is running locally, use config.ini anf if not, set environment variables config = configparser.ConfigParser() config.read_file(open('config.ini')) # Sender email and account password sender = config['SENDER']['from'] password = config['SENDER_PASSWORD']['psw'] else: sender = os.environ['SENDER'] password = os.environ['SENDER_PASSWORD'] ret = False try: text = "Code: {}".format(code) message = """\ From: %s To: %s Subject: %s %s """ % (sender, destination, 'Agnes', text) # TODO Improve the email format. Let it more Readable # Log in to server using secure context and send email context = ssl.create_default_context() with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=context) as server: server.login(sender, password) server.sendmail(sender, destination, message) logger.debug('Sending email to {}'.format(destination)) ret = True except Exception as e: logger.exception(e, exc_info=False) finally: return ret
5,338,833
def shortPrescID(): """Create R2 (short format) Prescription ID Build the prescription ID and add the required checkdigit. Checkdigit is selected from the PRESCRIPTION_CHECKDIGIT_VALUES constant """ _PRESC_CHECKDIGIT_VALUES = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ+' hexString = str(uuid.uuid1()).replace('-', '').upper() prescriptionID = hexString[:6] + '-Z' + hexString[6:11] + '-' + hexString[12:17] prscID = prescriptionID.replace('-', '') prscIDLength = len(prscID) runningTotal = 0 for stringPosition in range(prscIDLength): runningTotal = runningTotal + int(prscID[stringPosition], 36) * (2 ** (prscIDLength - stringPosition)) checkValue = (38 - runningTotal % 37) % 37 checkValue = _PRESC_CHECKDIGIT_VALUES[checkValue] prescriptionID += checkValue return prescriptionID
5,338,834
def rmse(predictions, verbose=True): """Compute RMSE (Root Mean Squared Error). .. math:: \\text{RMSE} = \\sqrt{\\frac{1}{|\\hat{R}|} \\sum_{\\hat{r}_{ui} \in \\hat{R}}(r_{ui} - \\hat{r}_{ui})^2}. Args: predictions (:obj:`list` of :obj:`Prediction\ <surprise.prediction_algorithms.predictions.Prediction>`): A list of predictions, as returned by the :meth:`test() <surprise.prediction_algorithms.algo_base.AlgoBase.test>` method. verbose: If True, will print computed value. Default is ``True``. Returns: The Root Mean Squared Error of predictions. Raises: ValueError: When ``predictions`` is empty. """ if not predictions: raise ValueError('Prediction list is empty.') mse = np.mean([float((true_r - est)**2) for (_, _, true_r, est, _) in predictions]) rmse_ = np.sqrt(mse) if verbose: print('RMSE: {0:1.4f}'.format(rmse_)) return rmse_
5,338,835
def test_cbv_proxyidmixin_should_succeed(quantity: int, client) -> None: """Tests if CBVs using ProxydMixin can retrieve objects correctly""" int_persons = baker.make("appmock.PersonIntegerPK", _quantity=quantity) uuid_persons = baker.make("appmock.PersonUUIDPK", _quantity=quantity) for person in int_persons: url = reverse("class-person-int-detail", kwargs={"pk": person.id_}) res = client.get(url) assert decode(person.id_) == res.context["person"].pk for person in uuid_persons: url = reverse("class-person-uuid-detail", kwargs={"pk": person.id_}) res = client.get(url) assert decode(person.id_) == res.context["person"].pk
5,338,836
def join_csvs(column, csvs_in, csv_out, encoding_in='utf-8', encoding_out='utf-8'): """Outer join a comma-delimited list of csvs on a given column. Common encodings include: utf-8, cp1252. """ dfs = [read_csv(csv_in, encoding_in) for csv_in in csvs_in.split(',')] df = pd_outer_join(dfs, column) df.to_csv(csv_out, encoding=encoding_out)
5,338,837
def get_module_docstring(path): """get a .py file docstring, without actually executing the file""" with open(path) as f: return ast.get_docstring(ast.parse(f.read()))
5,338,838
def get_authenticate_kwargs(oauth_credentials=None, http_=None): """Returns a dictionary with keyword arguments for use with discovery Prioritizes oauth_credentials or a http client provided by the user If none provided, falls back to default credentials provided by google's command line utilities. If that also fails, tries using httplib2.Http() Used by `gcs.GCSClient` and `bigquery.BigQueryClient` to initiate the API Client """ if oauth_credentials: authenticate_kwargs = { "credentials": oauth_credentials } elif http_: authenticate_kwargs = { "http": http_ } else: # neither http_ or credentials provided try: # try default credentials oauth_credentials = oauth2client.client.GoogleCredentials.get_application_default() authenticate_kwargs = { "credentials": oauth_credentials } except oauth2client.client.GoogleCredentials.ApplicationDefaultCredentialsError: # try http using httplib2 authenticate_kwargs = { "http": httplib2.Http() } return authenticate_kwargs
5,338,839
def marginal_expectation(distribution: Tensor, axes: AxesLike, integrals: Union[Callable, Sequence[Callable]], *args, **kwargs): """ Computes expectations along the ``axes`` according to ``integrals`` independently. ``args`` and ``kwargs`` are passed to ``integral`` as additional arguments. """ axes = np.core.numeric.normalize_axis_tuple(axes, distribution.ndim, 'axes') if callable(integrals): integrals = [integrals] if len(integrals) == 1: integrals = [integrals[0]] * len(axes) for axis, integral in zip_equal(axes, integrals): # sum over other axes, but allow for reduction of `axis` other_axes = list(axes) other_axes.remove(axis) other_axes = np.array(other_axes) other_axes[other_axes > axis] -= 1 yield expectation(distribution, axis, integral, *args, **kwargs).sum(tuple(other_axes))
5,338,840
def get_transforms(size=128, mobilenet=False): """ Gets all the torchvision transforms we will be applying to the dataset. """ # These are the transformations that we will do to our dataset # For X transforms, let's do some of the usual suspects and convert to tensor. # Don't forget to normalize to [0.0, 1.0], FP32 # and don't forget to resize to the same size every time. x_transforms = [ T.Resize((size, size)), T.RandomApply([ T.RandomAffine(degrees=20, translate=(0.1, 0.1)), T.RandomHorizontalFlip(p=0.5), T.RandomRotation(degrees=(-30, 30)), T.RandomVerticalFlip(p=0.5), ], p=0.5), T.ColorJitter(brightness=0.5), T.ToTensor(), # Converts to FP32 [0.0, 1.0], Tensor type ] # Pretrained MobileNetV2 requires normalizing like this: if mobilenet: x_transforms.append(T.Normalize(mean=MOBILENET_MEAN, std=MOBILENET_STD)) # For Y transforms, we need to make sure that we do the same thing to the ground truth, # since we are trying to recreate the image. y_transforms = [ T.Resize((size, size), interpolation=Image.NEAREST), # Make sure we don't corrupt the labels T.RandomApply([ T.RandomAffine(degrees=20, translate=(0.1, 0.1)), T.RandomHorizontalFlip(p=0.5), T.RandomRotation(degrees=(-30, 30)), T.RandomVerticalFlip(p=0.5), ], p=0.5), ] return x_transforms, y_transforms
5,338,841
def transform(f, a, b, c, d): """ Transform a given function linearly. If f(t) is the original function, and a, b, c, and d are the parameters in order, then the return value is the function F(t) = af(cx + d) + b """ return lambda x: a * f(c * x + d) + b
5,338,842
def delete_rules(request): """ Deletes the rules with the given primary key. """ if request.method == 'POST': rules_id = strip_tags(request.POST['post_id']) post = HouseRules.objects.get(pk=rules_id) post.filepath.delete() # Delete actual file post.delete() return redirect('archive-rules')
5,338,843
def display_word(word, secret_word, word_to_guess): """Function to edit the word to display and the word to guess (word to display is the test word with its colored letter and the word to guess is the word with spaces in it, for each missing letter). Args: word (str): the input word secret_word (str): the secret word that the user have to find word_to_guess (str): the word with spaces for each missing letter Returns: str: the word to guess, to update it at each try """ word_to_display = "" indexes = [] # We need to do the dictio at each input because we need to edit it for # each test word. It will be needed to not display several yellow letter # when there should be only one. dictio = letters_dict(secret_word) # For each letter in the word for letter_index in range(len(word)): word_letter = word[letter_index] # If the letter is the same at the same place in the secret_word if word_letter==secret_word[letter_index]: # Colors the letter in green word_to_display += colored(word_letter, "green") # Adds the index to a list indexes.append(letter_index) dictio[word_letter] -= 1 # If the letter is not the same at the same place in the secret word # but is in the word anyway elif word_letter in secret_word: if dictio[word_letter]>0: # Colors the letter in yellow and substract 1 to the dictionary # of letters, if it's not 0 word_to_display += colored(word_letter, "yellow") dictio[word_letter] -= 1 else: # If there's 0 for the letter in the dictionary, it's because we # already encountered them all, so we don't color it word_to_display += word_letter else: word_to_display += word_letter # Transforms the word to guess as a list, within each letter is one element word_to_guess_list = list(word_to_guess) for index in range(len(secret_word)): if index in indexes: # If the user have found a letter, replaces the space (_) by it word_to_guess_list[index] = secret_word[index] # Reforms the word word_to_guess = "".join(word_to_guess_list) return word_to_display, word_to_guess
5,338,844
def test_append_new_event(init_context): """ Append new event """ pc = init_context pc.append_event("test", Arguments("test", a=1)) pc.append_event("test1", Arguments("test1", a=2)) pc.append_event("test2", Arguments("test2", a=3)) e1 = pc.event_queue.get() e2 = pc.event_queue.get() e3 = pc.event_queue.get() assert e1.args.a == 1 and e2.args.a == 2 and e3.args.a == 3, "Unexpected event arguments"
5,338,845
def fixPythonImportPath(): """ Add main.py's folder to Python's import paths. We need this because by default Macros and UNO components can only import files located in `pythonpath` folder, which must be in extension's root folder. This requires extra configuration in IDE and project structure becomes somewhat ugly TODO Py3.5: use pathlib """ import sys from inspect import getsourcefile from os.path import dirname, join, abspath, pardir # a hack to get this file's location, because `__file__` and `sys.argv` are not defined inside macro thisFilePath = getsourcefile(lambda: 0) # relative path to parent dir like `<path to py macros or extension>\writer2wiki-ext\writer2wiki\..` parentDir = join(dirname(thisFilePath), pardir) parentDirAbs = abspath(parentDir) if parentDirAbs not in sys.path: log.debug('appending dir: ' + parentDirAbs) sys.path.append(parentDirAbs) else: log.debug('NOT appending ' + parentDirAbs)
5,338,846
def plot_imfs(signal, time_samples, imfs, fignum=None): """Visualize decomposed signals. :param signal: Analyzed signal :param time_samples: time instants :param imfs: intrinsic mode functions of the signal :param fignum: (optional) number of the figure to display :type signal: array-like :type time_samples: array-like :type imfs: array-like of shape (n_imfs, length_of_signal) :type fignum: int :return: None :Example: >>> plot_imfs(signal) .. plot:: ../../docs/examples/emd_fmsin.py """ n_imfs = imfs.shape[0] plt.figure(num=fignum) axis_extent = max(np.max(np.abs(imfs[:-1, :]), axis=0)) # Plot original signal ax = plt.subplot(n_imfs, 1, 1) ax.plot(time_samples, signal) ax.axis([time_samples[0], time_samples[-1], signal.min(), signal.max()]) ax.tick_params(which='both', left=False, bottom=False, labelleft=False, labelbottom=False) ax.grid(False) ax.set_ylabel('Signal') ax.set_title('Empirical Mode Decomposition') # Plot the IMFs for i in range(n_imfs - 1): ax = plt.subplot(n_imfs, 1, i + 2) ax.plot(time_samples, imfs[i, :]) ax.axis([time_samples[0], time_samples[-1], -axis_extent, axis_extent]) ax.tick_params(which='both', left=False, bottom=False, labelleft=False, labelbottom=False) ax.grid(False) ax.set_ylabel('imf' + str(i + 1)) # Plot the residue ax = plt.subplot(n_imfs + 1, 1, n_imfs + 1) ax.plot(time_samples, imfs[-1, :], 'r') ax.axis('tight') ax.tick_params(which='both', left=False, bottom=False, labelleft=False, labelbottom=False) ax.grid(False) ax.set_ylabel('res.') plt.show()
5,338,847
def get_config_cache(course_pk: 'int') -> dict: """Cacheからコンフィグを取得する.存在しない場合,新たにキャッシュを生成して格納後,コンフィグを返す.""" cache_key = f"course-config-{course_pk}" cached_config = cache.get(cache_key, None) if cached_config is None: config = Config.objects.filter(course_id=course_pk).first() cached_config = set_config_from_instance(config) return cached_config
5,338,848
def log_command(func): """ Logging decorator for logging bot commands and info """ def log_command(*args, **kwargs): slack, command, event = args user = slack.user_info(event["user"]) log_line = 'USER: %s | CHANNEL ID: %s | COMMAND: %s | TEXT: %s' command_info = log_line % (user["user"]["name"], event["channel"], command, event["text"]) logging.info(command_info) command = func(*args, **kwargs) return command return log_command
5,338,849
def expand_home_folder(path): """Checks if path starts with ~ and expands it to the actual home folder.""" if path.startswith("~"): return os.environ.get('HOME') + path[1:] return path
5,338,850
def calc_stats(scores_summ, curr_lines, curr_idx, CI=0.95, ext_test=None, stats="mean", shuffle=False): """ calc_stats(scores_summ, curr_lines, curr_idx) Calculates statistics on scores from runs with specific analysis criteria and records them in the summary scores dataframe. Required args: - scores_summ (pd DataFrame): DataFrame containing scores summary - curr_lines (pd DataFrame) : DataFrame lines corresponding to specific analysis criteria - curr_idx (int) : Current row in the scores summary DataFrame Optional args: - CI (num) : Confidence interval around which to collect percentile values default: 0.95 - extra_test (str): Name of extra test set, if any (None if none) default: None - stats (str) : stats to take, i.e., "mean" or "median" default: "mean" - shuffle (bool) : If True, data is for shuffled, and will be averaged across runs before taking stats default: False Returns: - scores_summ (pd DataFrame): Updated DataFrame containing scores, as well as epoch_n, runs_total, runs_nan summaries """ scores_summ = copy.deepcopy(scores_summ) # score labels to perform statistics on sc_labs = ["epoch_n"] + logreg_util.get_sc_labs( True, ext_test_name=ext_test) # avoids accidental nuisance dropping by pandas curr_lines["epoch_n"] = curr_lines["epoch_n"].astype(float) if shuffle: # group runs and take mean or median across scores_summ.loc[curr_idx, "mouse_n"] = -1 keep_lines = \ [col for col in curr_lines.columns if col in sc_labs] + ["run_n"] grped_lines = curr_lines[keep_lines].groupby("run_n", as_index=False) if stats == "mean": curr_lines = grped_lines.mean() # automatically skips NaNs elif stats == "median": curr_lines = grped_lines.median() # automatically skips NaNs else: gen_util.accepted_values_error("stats", stats, ["mean", "median"]) # calculate n_runs (without nans and with) scores_summ.loc[curr_idx, "runs_total"] = len(curr_lines) scores_summ.loc[curr_idx, "runs_nan"] = curr_lines["epoch_n"].isna().sum() # percentiles to record ps, p_names = math_util.get_percentiles(CI) for sc_lab in sc_labs: if sc_lab in curr_lines.keys(): cols = [] vals = [] data = curr_lines[sc_lab].astype(float) for stat in ["mean", "median"]: cols.extend([stat]) vals.extend( [math_util.mean_med(data, stats=stat, nanpol="omit")]) for error in ["std", "sem"]: cols.extend([error]) vals.extend([math_util.error_stat( data, stats="mean", error=error, nanpol="omit")]) # get 25th and 75th quartiles cols.extend(["q25", "q75"]) vals.extend(math_util.error_stat( data, stats="median", error="std", nanpol="omit")) # get other percentiles (for CI) cols.extend(p_names) vals.extend(math_util.error_stat( data, stats="median", error="std", nanpol="omit", qu=ps)) # get MAD cols.extend(["mad"]) vals.extend([math_util.error_stat( data, stats="median", error="sem", nanpol="omit")]) # plug in values cols = [f"{sc_lab}_{name}" for name in cols] gen_util.set_df_vals(scores_summ, curr_idx, cols, vals) return scores_summ
5,338,851
def report_date_time() -> str: """Return the report date requested as query parameter.""" report_date_string = dict(bottle.request.query).get("report_date") return str(report_date_string).replace("Z", "+00:00") if report_date_string else iso_timestamp()
5,338,852
def assign_colour_label_data(catl): """ Assign colour label to data Parameters ---------- catl: pandas Dataframe Data catalog Returns --------- catl: pandas Dataframe Data catalog with colour label assigned as new column """ logmstar_arr = catl.logmstar.values u_r_arr = catl.modelu_rcorr.values colour_label_arr = np.empty(len(catl), dtype='str') for idx, value in enumerate(logmstar_arr): # Divisions taken from Moffett et al. 2015 equation 1 if value <= 9.1: if u_r_arr[idx] > 1.457: colour_label = 'R' else: colour_label = 'B' if value > 9.1 and value < 10.1: divider = 0.24 * value - 0.7 if u_r_arr[idx] > divider: colour_label = 'R' else: colour_label = 'B' if value >= 10.1: if u_r_arr[idx] > 1.7: colour_label = 'R' else: colour_label = 'B' colour_label_arr[idx] = colour_label catl['colour_label'] = colour_label_arr return catl
5,338,853
def get_policy(arn): """Get info about a policy.""" client = get_client("iam") response = client.get_policy(PolicyArn=arn) return response
5,338,854
def get_file_xml(filename): """ :param filename: the filename, without the .xml suffix, in the tests/xml directory :return: returns the specified file's xml """ file = os.path.join(XML_DIR, filename + '.xml') with open(file, 'r') as f: xml = f.read() return xml
5,338,855
def _write_roadways(roadway_feature_class, condition): """Writes roadway feature class to STAMINA syntax Arguments: roads_feature_class {String} -- Path to feature class condition {String} -- Existing, NoBuild, or Build. Determines fields to use from geospatial template Returns: [string] -- [roadways] """ roadway_count = len([row for row in shapefile.Reader(roadway_feature_class)]) with shapefile.Reader(roadway_feature_class) as roadways: roadway_string = "2,{}\n".format(roadway_count) flds = validate_roadway_field(condition) for row in roadways.shapeRecords(): road = row.record["road_name"] speed = row.record["speed"] auto = round(row.record[flds[0]], 0) medium = round(row.record[flds[1]], 0) heavy = round(row.record[flds[2]], 0) roadway_string += "{}\n".format(road) roadway_string += "CARS {} {}\n".format(auto, speed) roadway_string += "MT {} {}\n".format(medium, speed) roadway_string += "HT {} {}\n".format(heavy, speed) roadway_string += _write_roadway_points(row.shape) roadway_string += roadway_separator() return roadway_string
5,338,856
def detect_backends() -> tuple: """ Registers all available backends and returns them. This includes only backends for which the minimal requirements are fulfilled. Returns: `tuple` of `phi.math.backend.Backend` """ try: from .tf import TF_BACKEND except ImportError: pass try: from .torch import TORCH_BACKEND except ImportError: pass try: from .jax import JAX_BACKEND except ImportError: pass from .math.backend import BACKENDS return tuple(BACKENDS)
5,338,857
def WriteMobileRankings(IncludedTitles, TxtFile, TitleMin = DefaultTitleMin, SortedBy = DefaultSort, SortedByTie = DefaultSortTie, LinesBetween = DefaultLines): """Writes a TxtFile for the titles in IncludedTitles, in a mobile friendly format. IncludedTitles: a list of string(s) in Titles; the Title(s) whose rankings are to be written. TxtFile: a string; the name of the file to be written. TitleMin: the number of titles if the Title is 'Overall'. SortedBy: a string in Sortings; the primary method of sorting. SortedByTie: a string in Sortings; the method of sorting in the event of a tie. Example: WriteMobileRankings(['Melee', 'Sm4sh'], 'MeleeSm4shRankingsMobile', TitleMin = 2, SortedBy = 'Low', SortedByTie = 'Middle', LinesBetween = 2)""" TxtFile = Addtxt(TxtFile) f = open(TxtFile, 'w') if type(IncludedTitles) != list: IncludedTitles = [IncludedTitles] f.write('Place - Tag / Name: Games Played\n(Best Title(s) if Overall)\nLow, Middle, High Estimates\n\n') for Title in IncludedTitles: f.write(Title + '\n') Rankings = RankingList(Title, TitleMin, SortedBy, SortedByTie) if Title == 'Overall': Dict = OverallPersonDict(TitleMin, SortedBy, SortedByTie) TitleTotal = 0 for i in range(len(Rankings)): Person = Rankings[i] f.write(str(Person[0]) + ' - ' + \ Person[4] + ' / ' + \ Person[5] + ': ' + \ str(Person[7]) + '\n' + \ str(Person[6])[1:-1].replace("'", '') + '\n' + \ format(Person[1], Rounding) + ', ' + \ format(Person[2], Rounding) + ', ' + \ format(Person[3], Rounding) + '\n\n') TitleTotal += Rankings[i][7] else: Dict = TitleDict(Title) TitleTotal = 0 for i in range(len(Rankings)): Person = Rankings[i] f.write(str(Person[0]) + ' - ' + \ Person[4] + ' / ' + \ Person[5] + ': ' + \ str(Person[6]) + '\n' + \ format(Person[1], Rounding) + ', ' + \ format(Person[2], Rounding) + ', ' + \ format(Person[3], Rounding) + '\n\n') TitleTotal += Rankings[i][6] f.write('Total Games: ' + str(TitleTotal)) if Title != (IncludedTitles)[-1]: f.write('\n'*(LinesBetween + 1)) f = open(TxtFile, 'r+') f.close()
5,338,858
def mix_audio(word_path=None, bg_path=None, word_vol=1.0, bg_vol=1.0, sample_time=1.0, sample_rate=16000): """ Read in a wav file and background noise file. Resample and adjust volume as necessary. """ # If no word file is given, just return random background noise if word_path == None: waveform = [0] * int(sample_time * sample_rate) fs = sample_rate else: # Open wav file, resample, mix to mono waveform, fs = librosa.load(word_path, sr=sample_rate, mono=True) # Pad 0s on the end if not long enough if len(waveform) < sample_time * sample_rate: waveform = np.append(waveform, np.zeros(int((sample_time * sample_rate) - len(waveform)))) # Truncate if too long waveform = waveform[:int(sample_time * sample_rate)] # If no background noise is given, just return the waveform if bg_path == None: return waveform # Open background noise file bg_waveform, fs = librosa.load(bg_path, sr=fs) # Pick a random starting point in background file max_end = len(bg_waveform) - int(sample_time * sample_rate) start_point = random.randint(0, max_end) end_point = start_point + int(sample_time * sample_rate) # Mix the two sound samples (and multiply by volume) waveform = [0.5 * word_vol * i for i in waveform] + \ (0.5 * bg_vol * bg_waveform[start_point:end_point]) return waveform
5,338,859
def analytical_pulse_width(ekev): """ Estimate analytical_pulse_width (FWHM) from radiation energy (assumes symmetrical beam) :param ekev: radiation energy [keV] :return sig: Radiation pulse width (FWHM) [m] """ sig = np.log((7.4e03/ekev))*6 return sig/1e6
5,338,860
def progress_timeout(progress_bar): """ Update the progress of the timer on a timeout tick. Parameters ---------- progress_bar : ProgressBar The UI progress bar object Returns ------- bool True if continuing timer, False if done. """ global time_remaining, time_total time_remaining -= 1 new_val = 1 - (time_remaining / time_total) if new_val >= 1: progress_bar.pb.set_text("Coffee extraction done.") play_endsound() return False progress_bar.pb.set_fraction(new_val) progress_bar.pb.set_text("{0:.1f} % Brewed ({1:01d}:{2:02d} Remaining)" .format(new_val * 100, time_remaining / 60, time_remaining % 60)) return True
5,338,861
def socket_file(module_name): """ Get the absolute path to the socket file for the named module. """ module_name = realname(module_name) return join(sockets_directory(), module_name + '.sock')
5,338,862
def test_create_kernel(tmpdir): """Creates a new directory '3-kernel' and all its input files.""" dirname = '3-kernel' d = tmpdir.join(dirname) expected_dir = os.path.join(fixtures_dir, dirname) bgw.create_kernel(config, tmpdir.realpath()) with open(os.path.join(expected_dir, 'kernel.inp.expected'), 'r') as f: assert d.join('kernel.inp').read() == f.read() with open(os.path.join(expected_dir, 'clean.expected'), 'r') as f: assert d.join('clean').read() == f.read()
5,338,863
def create_notification_entry(testcase_id, user_email): """Create a entry log for sent notification.""" notification = data_types.Notification() notification.testcase_id = testcase_id notification.user_email = user_email notification.put()
5,338,864
def postBuild(id: str): """Register a new build. Args: id: Identifier of Repository for which build is to be registered. Returns: build_id: Identifier of Build created. """ return register_builds( id, request.headers["X-Project-Access-Token"], request.json )
5,338,865
def submit(g_nocaptcha_response_value, secret_key, remoteip): """ Submits a reCAPTCHA request for verification. Returns RecaptchaResponse for the request recaptcha_response_field -- The value of recaptcha_response_field from the form secret_key -- your reCAPTCHA private key remoteip -- the user's ip address """ if not (g_nocaptcha_response_value and len(g_nocaptcha_response_value)): return RecaptchaResponse( is_valid=False, error_codes=['incorrect-captcha-sol'] ) params = urlencode({ 'secret': want_bytes(secret_key), 'remoteip': want_bytes(remoteip), 'response': want_bytes(g_nocaptcha_response_value), }) if not PY2: params = params.encode('utf-8') req = Request( url=VERIFY_URL, data=params, headers={ 'Content-type': 'application/x-www-form-urlencoded', 'User-agent': 'noReCAPTCHA Python' } ) httpresp = urlopen(req) try: res = force_text(httpresp.read()) return_values = json.loads(res) except (ValueError, TypeError): return RecaptchaResponse( is_valid=False, error_codes=['json-read-issue'] ) except: return RecaptchaResponse( is_valid=False, error_codes=['unknown-network-issue'] ) finally: httpresp.close() return_code = return_values.get("success", False) error_codes = return_values.get('error-codes', []) logger.debug("%s - %s" % (return_code, error_codes)) if return_code is True: return RecaptchaResponse(is_valid=True) else: return RecaptchaResponse(is_valid=False, error_codes=error_codes)
5,338,866
def boundary(shape, n_size, n): """ Shape boundaries & their neighborhoods @param shape 2D_bool_numpy_array: True if pixel in shape @return {index: neighborhood} index: 2D_int_tuple = index of neighborhood center in shape neighborhood: 2D_bool_numpy_array of size n_size Boundaries are shape pixels inside the shape having 1 or more 4-neighbors outside the shape. """ return {i: shape[n(i)] for i in np.ndindex(shape.shape) if is_boundary_pixel(shape,i,n_size)}
5,338,867
def centered_mols(self, labels, return_trans=False): """ Return the molecules translated at the origin with a corresponding cell Parameters ---------- labels : int or list of ints The labels of the atoms to select print_centro : bool Print the translation vector which was detected as -centroid Returns ------- mol : Mol object The selected molecules with their centroid at the origin mod_cell : Mol object The new confined cell corresponding to the now translated molecules """ mol, mod_cell = self.complete_mol(labels) centro = mol.centroid() mol.translate(-centro) mod_cell.translate(-centro) mod_cell = mod_cell.confined() if return_trans: return mol, mod_cell, -centro else: return mol, mod_cell
5,338,868
def binary_accuracy(output: torch.Tensor, target: torch.Tensor) -> float: """Computes the accuracy for binary classification""" with torch.no_grad(): batch_size = target.size(0) pred = (output >= 0.5).float().t().view(-1) correct = pred.eq(target.view(-1)).float().sum() correct.mul_(100.0 / batch_size) return correct
5,338,869
def arp(ipaddress): """Clear IP ARP table""" if ipaddress is not None: command = 'sudo ip -4 neigh show {}'.format(ipaddress) (out, err) = run_command(command, return_output=True) if not err and 'dev' in out: outputList = out.split() dev = outputList[outputList.index('dev') + 1] command = 'sudo ip -4 neigh del {} dev {}'.format(ipaddress, dev) else: click.echo("Neighbor {} not found".format(ipaddress)) return else: command = "sudo ip -4 -s -s neigh flush all" run_command(command)
5,338,870
def prepare_config(config): """ Prepares a dictionary to be stored as a json. Converts all numpy arrays to regular arrays Args: config: The config with numpy arrays Returns: The numpy free config """ c = {} for key, value in config.items(): if isinstance(value, np.ndarray): value = value.tolist() c[key] = value return c
5,338,871
def load_config(path='config.json'): """ Loads configruation from config.json file. Returns station mac address, interval, and units for data request """ # Open config JSON with open(path) as f: # Load JSON file to dictionary config = json.load(f) # Return mac address, interval, and units return (config['station_max_address'], int(config['interval']), config['units'])
5,338,872
def log_px_z(pred_logits, outcome): """ Returns Bernoulli log probability. :param pred_logits: logits for outcome 1 :param outcome: datapoint :return: log Bernoulli probability of outcome given logits in pred_logits """ pred = pred_logits.view(pred_logits.size(0), -1) y = outcome.view(outcome.size(0), -1) return -torch.sum(torch.max(pred, torch.tensor(0., device=pred.device)) - pred * y + torch.log(1 + torch.exp(-torch.abs(pred))), 1)
5,338,873
def test_one_epoch(sess, ops, data_input): """ ops: dict mapping from string to tf ops """ is_training = False loss_sum = 0 num_batches = data_input.num_test // BATCH_SIZE acc_a_sum = [0] * 5 acc_s_sum = [0] * 5 preds = [] labels_total = [] acc_a = [0] * 5 acc_s = [0] * 5 for batch_idx in range(num_batches): if "_io" in MODEL_FILE: imgs, labels = data_input.load_one_batch(BATCH_SIZE, reader_type="io") if "resnet" in MODEL_FILE or "inception" in MODEL_FILE or "densenet" in MODEL_FILE: imgs = MODEL.resize(imgs) feed_dict = {ops['imgs_pl']: imgs, ops['labels_pl']: labels, ops['is_training_pl']: is_training} else: imgs, others, labels = data_input.load_one_batch(BATCH_SIZE) if "resnet" in MODEL_FILE or "inception" in MODEL_FILE or "densenet" in MODEL_FILE: imgs = MODEL.resize(imgs) feed_dict = {ops['imgs_pl'][0]: imgs, ops['imgs_pl'][1]: others, ops['labels_pl']: labels, ops['is_training_pl']: is_training} loss_val, pred_val = sess.run([ops['loss'], ops['pred']], feed_dict=feed_dict) preds.append(pred_val) labels_total.append(labels) loss_sum += np.mean(np.square(np.subtract(pred_val, labels))) for i in range(5): acc_a[i] = np.mean(np.abs(np.subtract(pred_val[:, 1], labels[:, 1])) < (1.0 * (i+1) / 180 * scipy.pi)) acc_a_sum[i] += acc_a[i] acc_s[i] = np.mean(np.abs(np.subtract(pred_val[:, 0], labels[:, 0])) < (1.0 * (i+1) / 20)) acc_s_sum[i] += acc_s[i] log_string('test mean loss: %f' % (loss_sum / float(num_batches))) for i in range(5): log_string('test accuracy (angle-%d): %f' % (float(i+1), (acc_a_sum[i] / float(num_batches)))) log_string('test accuracy (speed-%d): %f' % (float(i+1), (acc_s_sum[i] / float(num_batches)))) preds = np.vstack(preds) labels = np.vstack(labels_total) a_error, s_error = mean_max_error(preds, labels, dicts=get_dicts()) log_string('test error (mean-max): angle:%.2f speed:%.2f' % (a_error / scipy.pi * 180, s_error * 20)) a_error, s_error = max_error(preds, labels) log_string('test error (max): angle:%.2f speed:%.2f' % (a_error / scipy.pi * 180, s_error * 20)) a_error, s_error = mean_topk_error(preds, labels, 5) log_string('test error (mean-top5): angle:%.2f speed:%.2f' % (a_error / scipy.pi * 180, s_error * 20)) a_error, s_error = mean_error(preds, labels) log_string('test error (mean): angle:%.2f speed:%.2f' % (a_error / scipy.pi * 180, s_error * 20)) print (preds.shape, labels.shape) np.savetxt(os.path.join(TEST_RESULT_DIR, "preds_val.txt"), preds) np.savetxt(os.path.join(TEST_RESULT_DIR, "labels_val.txt"), labels) # plot_acc(preds, labels)
5,338,874
def main(): """ Main function for handling user arguments """ parser = argparse.ArgumentParser(description='Check windows hashdumps against http://cracker.offensive-security.com') parser.add_argument('priority_code', help='Priority code provided by PWK course console') parser.add_argument('hash_dump', default='-', nargs='?', help='LM/NTLM hash to be sent to cracker; default reads from STDIN') args = parser.parse_args() if args.hash_dump == "-": for line in sys.stdin.readlines(): crack_input(args.priority_code, line.strip()) else: crack_input(args.priority_code, args.hash_dump)
5,338,875
def stderr_redirector(stream: typing.BinaryIO): """A context manager that redirects Python stderr and C stderr to the given binary I/O stream.""" def _redirect_stderr(to_fd): """Redirect stderr to the given file descriptor.""" # Flush the C-level buffer stderr libc.fflush(c_stderr) # Flush and close sys.stderr - also closes the file descriptor (fd) sys.stderr.close() # Make original_stderr_fd point to the same file as to_fd os.dup2(to_fd, original_stderr_fd) # Create a new sys.stderr that points to the redirected fd sys.stderr = io.TextIOWrapper(os.fdopen(original_stderr_fd, 'wb')) # The original fd stderr points to. Usually 2 on POSIX systems. original_stderr_fd = sys.stderr.fileno() # Save a copy of the original stderr fd in saved_stderr_fd saved_stderr_fd = os.dup(original_stderr_fd) # Create a temporary file and redirect stderr to it tfile = tempfile.TemporaryFile(mode='w+b') try: _redirect_stderr(tfile.fileno()) # Yield to caller, then redirect stderr back to the saved fd yield _redirect_stderr(saved_stderr_fd) # Copy contents of temporary file to the given stream tfile.flush() tfile.seek(0, io.SEEK_SET) stream.write(tfile.read()) finally: tfile.close() os.close(saved_stderr_fd)
5,338,876
def _sort_rows(matrix, num_rows): """Sort matrix rows by the last column. Args: matrix: a matrix of values (row,col). num_rows: (int) number of sorted rows to return from the matrix. Returns: Tensor (num_rows, col) of the sorted matrix top K rows. """ tmatrix = tf.transpose(a=matrix, perm=[1, 0]) sorted_tmatrix = tf.nn.top_k(tmatrix, num_rows)[0] return tf.transpose(a=sorted_tmatrix, perm=[1, 0])
5,338,877
def partial_at(func, indices, *args): """Partial function application for arguments at given indices.""" @functools.wraps(func) def wrapper(*fargs, **fkwargs): nargs = len(args) + len(fargs) iargs = iter(args) ifargs = iter(fargs) posargs = (next((ifargs, iargs)[i in indices]) for i in range(nargs)) # posargs = list( posargs ) # print( 'posargs', posargs ) return func(*posargs, **fkwargs) return wrapper
5,338,878
def try_load_module(module_name): """ Import a module by name, print the version info and file name. Return None on failure. """ try: import importlib mod = importlib.import_module(module_name) print green("%s %s:" % (module_name, mod.__version__)), mod.__file__ return mod except ImportError: print yellow("Could not find nltk") return None
5,338,879
def test_VaultFile_load( testcase, vault_yaml, password, server_schema, exp_data, exp_encrypted): """ Test function for VaultFile._load_vault_file() """ with TempDirectory() as tmp_dir: # Create the vault file filename = 'tmp_vault.yml' filepath = os.path.join(tmp_dir.path, filename) if isinstance(vault_yaml, six.text_type): vault_yaml = vault_yaml.encode('utf-8') tmp_dir.write(filename, vault_yaml) if password: vault = easy_vault.EasyVault(filepath, password) vault.encrypt() del vault # The code to be tested act_data, act_encrypted = _load_vault_file( filepath, password, use_keyring=False, use_prompting=False, verbose=False, server_schema=server_schema) # Ensure that exceptions raised in the remainder of this function # are not mistaken as expected exceptions assert testcase.exp_exc_types is None, \ "Expected exception not raised: {}". \ format(testcase.exp_exc_types) assert act_data == exp_data assert act_encrypted == exp_encrypted
5,338,880
def project_image(request, uid): """ GET request : return project image PUT request : change project image """ project = Project.objects.filter(uid=uid).first() imgpath = project.image.path if project.image else get_thumbnail() if request.method == "PUT": file_object = request.data.get("file") imgpath = change_image(obj=project, file_object=file_object) data = open(imgpath, "rb") .read() return HttpResponse(content=data, content_type="image/jpeg")
5,338,881
def validate(prefix: str, identifier: str) -> Optional[bool]: """Validate the identifier against the prefix's pattern, if it exists. :param prefix: The prefix in the CURIE :param identifier: The identifier in the CURIE :return: Whether this identifier passes validation, after normalization >>> validate("chebi", "1234") True >>> validate("chebi", "CHEBI:12345") True >>> validate("chebi", "CHEBI:ABCD") False """ resource = get_resource(prefix) if resource is None: return None return resource.validate_identifier(identifier)
5,338,882
def test_correct_config(): """Test whether config parser properly parses configuration file""" flexmock(builtins, open=StringIO(correct_config)) res_key, res_secret = twitter.parse_configuration("some_path") assert res_key == key assert res_secret == secret
5,338,883
def laplacian_positional_encoding(g, pos_enc_dim): """ Graph positional encoding v/ Laplacian eigenvectors """ # Laplacian A = g.adjacency_matrix_scipy(return_edge_ids=False).astype(float) N = sp.diags(dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float) L = sp.eye(g.number_of_nodes()) - N * A * N # Eigenvectors with scipy #EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim+1, which='SR') EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim+1, which='SR', tol=1e-2) EigVec = EigVec[:, EigVal.argsort()] # increasing order out = torch.from_numpy(EigVec[:,1:pos_enc_dim+1]).float() return out
5,338,884
async def replace_chain(): """ replaces the current chain with the most recent and longest chain """ blockchain.replace_chain() blockchain.is_chain_valid(chain=blockchain.chain) return{'message': 'chain has been updated and is valid', 'longest chain': blockchain.chain}
5,338,885
def ucb(bufferx, objective_weights, regression_models, param_space, scalarization_method, objective_limits, iteration_number, model_type, classification_model=None, number_of_cpus=0): """ Multi-objective ucb acquisition function as detailed in https://arxiv.org/abs/1805.12168. The mean and variance of the predictions are computed as defined by Hutter et al.: https://arxiv.org/pdf/1211.0906.pdf :param bufferx: a list of tuples containing the points to predict and scalarize. :param objective_weights: a list containing the weights for each objective. :param regression_models: the surrogate models used to evaluate points. :param param_space: a space object containing the search space. :param scalarization_method: a string indicating which scalarization method to use. :param evaluations_per_optimization_iteration: how many configurations to return. :param objective_limits: a dictionary with estimated minimum and maximum values for each objective. :param iteration_number: an integer for the current iteration number, used to compute the beta :param classification_model: the surrogate model used to evaluate feasibility constraints :param number_of_cpus: an integer for the number of cpus to be used in parallel. :return: a list of scalarized values for each point in bufferx. """ beta = np.sqrt(0.125*np.log(2*iteration_number + 1)) augmentation_constant = 0.05 prediction_means = {} prediction_variances = {} number_of_predictions = len(bufferx) tmp_objective_limits = copy.deepcopy(objective_limits) prediction_means, prediction_variances = models.compute_model_mean_and_uncertainty(bufferx, regression_models, model_type, param_space, var=True) if classification_model != None: classification_prediction_results = models.model_probabilities(bufferx, classification_model, param_space) feasible_parameter = param_space.get_feasible_parameter()[0] true_value_index = classification_model[feasible_parameter].classes_.tolist().index(True) feasibility_indicator = classification_prediction_results[feasible_parameter][:,true_value_index] else: feasibility_indicator = [1]*number_of_predictions # if no classification model is used, then all points are feasible # Compute scalarization if (scalarization_method == "linear"): scalarized_predictions = np.zeros(number_of_predictions) beta_factor = 0 for objective in regression_models: scalarized_predictions += objective_weights[objective]*prediction_means[objective] beta_factor += objective_weights[objective]*prediction_variances[objective] scalarized_predictions -= beta*np.sqrt(beta_factor) scalarized_predictions = scalarized_predictions*feasibility_indicator # The paper does not propose this, I applied their methodology to the original tchebyshev to get the approach below # Important: since this was not proposed in the paper, their proofs and bounds for the modified_tchebyshev may not be valid here. elif(scalarization_method == "tchebyshev"): scalarized_predictions = np.zeros(number_of_predictions) total_values = np.zeros(number_of_predictions) for objective in regression_models: scalarized_values = objective_weights[objective] * np.absolute(prediction_means[objective] - beta*np.sqrt(prediction_variances[objective])) total_values += scalarized_values scalarized_predictions = np.maximum(scalarized_values, scalarized_predictions) scalarized_predictions += augmentation_constant*total_values scalarized_predictions = scalarized_predictions*feasibility_indicator elif(scalarization_method == "modified_tchebyshev"): scalarized_predictions = np.full((number_of_predictions), float("inf")) reciprocated_weights = reciprocate_weights(objective_weights) for objective in regression_models: scalarized_value = reciprocated_weights[objective] * (prediction_means[objective] - beta*np.sqrt(prediction_variances[objective])) scalarized_predictions = np.minimum(scalarized_value, scalarized_predictions) scalarized_predictions = scalarized_predictions*feasibility_indicator scalarized_predictions = -scalarized_predictions # We will minimize later, but we want to maximize instead, so we invert the sign else: print("Error: unrecognized scalarization method:", scalarization_method) raise SystemExit return scalarized_predictions, tmp_objective_limits
5,338,886
def np_array_to_binary_vector(np_arr): """ Converts a NumPy array to the RDKit ExplicitBitVector type. """ binary_vector = DataStructs.ExplicitBitVect(len(np_arr)) binary_vector.SetBitsFromList(np.where(np_arr)[0].tolist()) return binary_vector
5,338,887
def augment_features(data, feature_augmentation): """ Augment features for a given data matrix. :param data: Data matrix. :param feature_augmentation: Function applied to augment the features. :return: Augmented data matrix. """ if data is not None and feature_augmentation is not None: if isinstance(feature_augmentation, list): for augmentation_function in feature_augmentation: data = augmentation_function(data) else: data = feature_augmentation(data) return data
5,338,888
def _get_data_from_empty_list(source, fields='*', first_row=0, count=-1, schema=None): """ Helper function for _get_data that handles empty lists. """ fields = get_field_list(fields, schema) return {'cols': _get_cols(fields, schema), 'rows': []}, 0
5,338,889
def setup(bot: Monty) -> None: """Load the TokenRemover cog.""" bot.add_cog(TokenRemover(bot))
5,338,890
def copy_keys_except(dic, *keys): """Return a copy of the dict without the specified items. """ ret = dic.copy() for key in keys: try: del ret[key] except KeyError: pass return ret
5,338,891
def get_params(img, scale, ratio): """Get parameters for ``crop`` for a random sized crop. Args: img (PIL Image): Image to be cropped. scale (tuple): range of size of the origin size cropped ratio (tuple): range of aspect ratio of the origin aspect ratio cropped Returns: tuple: params (i, j, h, w) to be passed to ``crop`` for a random sized crop. """ area = img.size[0] * img.size[1] for attempt in range(10): target_area = random.uniform(*scale) * area log_ratio = (math.log(ratio[0]), math.log(ratio[1])) aspect_ratio = math.exp(random.uniform(*log_ratio)) w = int(round(math.sqrt(target_area * aspect_ratio))) h = int(round(math.sqrt(target_area / aspect_ratio))) if w <= img.size[0] and h <= img.size[1]: i = random.randint(0, img.size[1] - h) j = random.randint(0, img.size[0] - w) return i, j, h, w # Fallback to central crop in_ratio = img.size[0] / img.size[1] if in_ratio < min(ratio): w = img.size[0] h = int(round(w / min(ratio))) elif in_ratio > max(ratio): h = img.size[1] w = int(round(h * max(ratio))) else: # whole image w = img.size[0] h = img.size[1] i = (img.size[1] - h) // 2 j = (img.size[0] - w) // 2 return i, j, h, w
5,338,892
async def start_time() -> Any: """ Returns the contest start time. """ return schemas.Timestamp(timestamp=settings.EVENT_START_TIME)
5,338,893
def reshape(v, shape): """Implement `reshape`.""" return np.reshape(v, shape)
5,338,894
def generate_html_from_module(module): """ Extracts a module documentations from a module object into a HTML string uses a pre-written builtins list in order to exclude built in functions :param module: Module object type to extract documentation from :return: String representation of an HTML file """ html_content = f"<html><head><title>{module.__name__} Doc</title></head><body><h1>Module {module.__name__}:</h1>" html_content += f"Function {module.__doc__}" for function in module.__dict__: if callable(getattr(module, function)): html_content += f"<h2>Function {function}:</h2>" html_content += f"{getattr(module, function).__doc__}" html_content += f"<h3>Annotations:</h3>" for annotation in getattr(module, function).__annotations__.keys(): html_content += f"{annotation} <br>" html_content += "</body></html>" return html_content
5,338,895
def _phi(r, order): """Coordinate-wise nonlinearity used to define the order of the interpolation. See https://en.wikipedia.org/wiki/Polyharmonic_spline for the definition. Args: r: input op order: interpolation order Returns: phi_k evaluated coordinate-wise on r, for k = r """ # using EPSILON prevents log(0), sqrt0), etc. # sqrt(0) is well-defined, but its gradient is not with tf.name_scope("phi"): if order == 1: r = tf.maximum(r, EPSILON) r = tf.sqrt(r) return r elif order == 2: return 0.5 * r * tf.math.log(tf.maximum(r, EPSILON)) elif order == 4: return 0.5 * tf.square(r) * tf.math.log(tf.maximum(r, EPSILON)) elif order % 2 == 0: r = tf.maximum(r, EPSILON) return 0.5 * tf.pow(r, 0.5 * order) * tf.math.log(r) else: r = tf.maximum(r, EPSILON) return tf.pow(r, 0.5 * order)
5,338,896
def updated_topology_description(topology_description, server_description): """Return an updated copy of a TopologyDescription. :Parameters: - `topology_description`: the current TopologyDescription - `server_description`: a new ServerDescription that resulted from a hello call Called after attempting (successfully or not) to call hello on the server at server_description.address. Does not modify topology_description. """ address = server_description.address # These values will be updated, if necessary, to form the new # TopologyDescription. topology_type = topology_description.topology_type set_name = topology_description.replica_set_name max_set_version = topology_description.max_set_version max_election_id = topology_description.max_election_id server_type = server_description.server_type # Don't mutate the original dict of server descriptions; copy it. sds = topology_description.server_descriptions() # Replace this server's description with the new one. sds[address] = server_description if topology_type == TOPOLOGY_TYPE.Single: # Set server type to Unknown if replica set name does not match. if (set_name is not None and set_name != server_description.replica_set_name): error = ConfigurationError( "client is configured to connect to a replica set named " "'%s' but this node belongs to a set named '%s'" % ( set_name, server_description.replica_set_name)) sds[address] = server_description.to_unknown(error=error) # Single type never changes. return TopologyDescription( TOPOLOGY_TYPE.Single, sds, set_name, max_set_version, max_election_id, topology_description._topology_settings) if topology_type == TOPOLOGY_TYPE.Unknown: if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.LoadBalancer): if len(topology_description._topology_settings.seeds) == 1: topology_type = TOPOLOGY_TYPE.Single else: # Remove standalone from Topology when given multiple seeds. sds.pop(address) elif server_type not in (SERVER_TYPE.Unknown, SERVER_TYPE.RSGhost): topology_type = _SERVER_TYPE_TO_TOPOLOGY_TYPE[server_type] if topology_type == TOPOLOGY_TYPE.Sharded: if server_type not in (SERVER_TYPE.Mongos, SERVER_TYPE.Unknown): sds.pop(address) elif topology_type == TOPOLOGY_TYPE.ReplicaSetNoPrimary: if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.Mongos): sds.pop(address) elif server_type == SERVER_TYPE.RSPrimary: (topology_type, set_name, max_set_version, max_election_id) = _update_rs_from_primary(sds, set_name, server_description, max_set_version, max_election_id) elif server_type in ( SERVER_TYPE.RSSecondary, SERVER_TYPE.RSArbiter, SERVER_TYPE.RSOther): topology_type, set_name = _update_rs_no_primary_from_member( sds, set_name, server_description) elif topology_type == TOPOLOGY_TYPE.ReplicaSetWithPrimary: if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.Mongos): sds.pop(address) topology_type = _check_has_primary(sds) elif server_type == SERVER_TYPE.RSPrimary: (topology_type, set_name, max_set_version, max_election_id) = _update_rs_from_primary(sds, set_name, server_description, max_set_version, max_election_id) elif server_type in ( SERVER_TYPE.RSSecondary, SERVER_TYPE.RSArbiter, SERVER_TYPE.RSOther): topology_type = _update_rs_with_primary_from_member( sds, set_name, server_description) else: # Server type is Unknown or RSGhost: did we just lose the primary? topology_type = _check_has_primary(sds) # Return updated copy. return TopologyDescription(topology_type, sds, set_name, max_set_version, max_election_id, topology_description._topology_settings)
5,338,897
def test_success(database): """ Testing valid program activity name for the corresponding TAS/TAFS as defined in Section 82 of OMB Circular A-11. """ populate_publish_status(database) af_1 = AwardFinancialFactory(row_number=1, agency_identifier='test', submission_id=1, main_account_code='test', program_activity_name='test', program_activity_code='test') af_2 = AwardFinancialFactory(row_number=2, agency_identifier='test', submission_id=1, main_account_code='test', program_activity_name='test', program_activity_code='test') pa = ProgramActivityFactory(fiscal_year_quarter='FY17Q1', agency_id='test', allocation_transfer_id='test', account_number='test', program_activity_name='test', program_activity_code='test') submission = SubmissionFactory(submission_id=1, reporting_fiscal_year='2017', reporting_fiscal_period=3, publish_status_id=PUBLISH_STATUS_DICT['unpublished']) assert number_of_errors(_FILE, database, models=[af_1, af_2, pa], submission=submission) == 0
5,338,898
def get_absolute_filepath(filepath: str) -> str: """Returns absolute filepath of the file/folder from the given `filepath` (along with the extension, if any)""" absolute_filepath = os.path.realpath(path=filepath) return absolute_filepath
5,338,899