content
stringlengths
22
815k
id
int64
0
4.91M
def generate_id(): """Generate Hexadecimal 32 length id.""" return "%032x" % random.randrange(16 ** 32)
5,327,500
def show_final_report(found_files: int, optimized_files: int, src_size: int, bytes_saved: int, time_passed: float): """ Show a final report with the time spent and filesize savings :param found_files: number of found image files :param optimized_files: number of image files that were processed :param src_size: original sum of file sizes :param bytes_saved: savings in file sizes (sum) :param time_passed: specify -1 in order to hide this (watch directory) """ fps = found_files / time_passed if bytes_saved: average = bytes_saved / optimized_files percent = bytes_saved / src_size * 100 else: average = 0 percent = 0 report = f"\n{40 * '-'}\n" if time_passed == -1: report += f"\n Processed {found_files} files ({human(src_size)})." else: report += f"\n Processed {found_files} files ({human(src_size)}) in " \ f"{time_passed:.1f}s ({fps:.1f} f/s)." report += f"\n Optimized {optimized_files} files." \ f"\n Average savings: {human(average)} per optimized file" \ f"\n Total space saved: {human(bytes_saved)} / {percent:.1f}%\n" print(report)
5,327,501
def __charge_to_sdf(charge): """Translate RDkit charge to the SDF language. Args: charge (int): Numerical atom charge. Returns: str: Str representation of a charge in the sdf language """ if charge == -3: return "7" elif charge == -2: return "6" elif charge == -1: return "5" elif charge == 0: return "0" elif charge == 1: return "+1" elif charge == 2: return "+2" elif charge == 3: return "+4" else: return "0"
5,327,502
def validate(epoch, model,gmm, ema=None): """ - Deploys the color normalization on test image dataset - Evaluates NMI / CV / SD # Evaluates the cross entropy between p_data and p_model. """ print("Starting Validation") model = parallelize(model) gmm = parallelize(gmm) model.to(device) gmm.to(device) bpd_meter = utils.AverageMeter() ce_meter = utils.AverageMeter() if ema is not None: ema.swap() update_lipschitz(model) model.eval() gmm.eval() mu_tmpl = 0 std_tmpl = 0 N = 0 print(f"Deploying on {len(train_loader)} batches of {args.batchsize} templates...") idx = 0 for x, y in tqdm(train_loader): x = x.to(device) ### TEMPLATES ### D = x[:,0,...].unsqueeze(1) D = rescale(D) # Scale to [0,1] interval D = D.repeat(1, args.nclusters, 1, 1) with torch.no_grad(): if isinstance(model,torch.nn.DataParallel): z_logp = model.module(D.view(-1, *input_size[1:]), 0, classify=False) else: z_logp = model(D.view(-1, *input_size[1:]), 0, classify=False) z, delta_logp = z_logp if isinstance(gmm,torch.nn.DataParallel): logpz, params = gmm.module(z.view(-1,args.nclusters,args.imagesize,args.imagesize), x.permute(0,2,3,1)) else: logpz, params = gmm(z.view(-1,args.nclusters,args.imagesize,args.imagesize), x.permute(0,2,3,1)) mu, std, gamma = params mu = mu.cpu().numpy() std = std.cpu().numpy() gamma = gamma.cpu().numpy() mu = mu[...,np.newaxis] std = std[...,np.newaxis] mu = np.swapaxes(mu,0,1) # (3,4,1) -> (4,3,1) mu = np.swapaxes(mu,1,2) # (4,3,1) -> (4,1,3) std = np.swapaxes(std,0,1) # (3,4,1) -> (4,3,1) std = np.swapaxes(std,1,2) # (4,3,1) -> (4,1,3) N = N+1 mu_tmpl = (N-1)/N * mu_tmpl + 1/N* mu std_tmpl = (N-1)/N * std_tmpl + 1/N* std if idx == len(train_loader) - 1: break idx+=1 print("Estimated Mu for template(s):") print(mu_tmpl) print("Estimated Sigma for template(s):") print(std_tmpl) metrics = dict() for tc in range(1,args.nclusters+1): metrics[f'mean_{tc}'] = [] metrics[f'median_{tc}']=[] metrics[f'perc_95_{tc}']=[] metrics[f'nmi_{tc}']=[] metrics[f'sd_{tc}']=[] metrics[f'cv_{tc}']=[] print(f"Predicting on {len(test_loader)} batches of {args.val_batchsize} templates...") idx=0 for x_test, y_test in tqdm(test_loader): x_test = x_test.to(device) ### DEPLOY ### D = x_test[:,0,...].unsqueeze(1) D = rescale(D) # Scale to [0,1] interval D = D.repeat(1, args.nclusters, 1, 1) with torch.no_grad(): if isinstance(model,torch.nn.DataParallel): z_logp = model.module(D.view(-1, *input_size[1:]), 0, classify=False) else: z_logp = model(D.view(-1, *input_size[1:]), 0, classify=False) z, delta_logp = z_logp if isinstance(gmm,torch.nn.DataParallel): logpz, params = gmm.module(z.view(-1,args.nclusters,args.imagesize,args.imagesize), x_test.permute(0,2,3,1)) else: logpz, params = gmm(z.view(-1,args.nclusters,args.imagesize,args.imagesize), x_test.permute(0,2,3,1)) mu, std, pi = params mu = mu.cpu().numpy() std = std.cpu().numpy() pi = pi.cpu().numpy() mu = mu[...,np.newaxis] std = std[...,np.newaxis] mu = np.swapaxes(mu,0,1) # (3,4,1) -> (4,3,1) mu = np.swapaxes(mu,1,2) # (4,3,1) -> (4,1,3) std = np.swapaxes(std,0,1) # (3,4,1) -> (4,3,1) std = np.swapaxes(std,1,2) # (4,3,1) -> (4,1,3) X_hsd = np.swapaxes(x_test.cpu().numpy(),1,2) X_hsd = np.swapaxes(X_hsd,2,3) X_conv = imgtf.image_dist_transform(X_hsd, mu, std, pi, mu_tmpl, std_tmpl, args) ClsLbl = np.argmax(np.asarray(pi),axis=-1) ClsLbl = ClsLbl.astype('int32') mean_rgb = np.mean(X_conv,axis=-1) pdb.set_trace() for tc in range(1,args.nclusters+1): msk = ClsLbl==tc if not msk.any(): continue # skip metric if no class labels are found ma = mean_rgb[msk] mean = np.mean(ma) median = np.median(ma) perc = np.percentile(ma, 95) nmi = median / perc metrics[f'mean_{tc}'].append(mean) metrics[f'median_{tc}'].append(median) metrics[f'perc_95_{tc}'].append(perc) metrics[f'nmi_{tc}'].append(nmi) if idx == len(test_loader) - 1: break idx+=1 av_sd = [] av_cv = [] for tc in range(1,args.nclusters+1): if len(metrics[f'mean_{tc}']) == 0: continue metrics[f'sd_{tc}'] = np.array(metrics[f'nmi_{tc}']).std() metrics[f'cv_{tc}'] = np.array(metrics[f'nmi_{tc}']).std() / np.array(metrics[f'nmi_{tc}']).mean() print(f'sd_{tc}:', metrics[f'sd_{tc}']) print(f'cv_{tc}:', metrics[f'cv_{tc}']) av_sd.append(metrics[f'sd_{tc}']) av_cv.append(metrics[f'cv_{tc}']) print(f"Average sd = {np.array(av_sd).mean()}") print(f"Average cv = {np.array(av_cv).mean()}") import csv file = open(f"metrics-{args.train_centers[0]}-{args.val_centers[0]}.csv","w") writer = csv.writer(file) for key, value in metrics.items(): writer.writerow([key, value]) file.close() # correct = 0 # total = 0 # start = time.time() # with torch.no_grad(): # for i, (x, y) in enumerate(tqdm(test_loader)): # x = x.to(device) # bpd, logits, _, _ = compute_loss(x, model) # bpd_meter.update(bpd.item(), x.size(0)) # val_time = time.time() - start # if ema is not None: # ema.swap() # s = 'Epoch: [{0}]\tTime {1:.2f} | Test bits/dim {bpd_meter.avg:.4f}'.format(epoch, val_time, bpd_meter=bpd_meter) # if args.task in ['classification', 'hybrid']: # s += ' | CE {:.4f} | Acc {:.2f}'.format(ce_meter.avg, 100 * correct / total) # logger.info(s) # return bpd_meter.avg return
5,327,503
async def fail_detect(state): """ Async method to detect a failure in a node (forced a failure to detect for testing) Accepts: state which is an empty object from the udp_broadcast class Returns: nothing, it pops nodes from dictionary if node fails and notifies if popped node is coordinator, coordinator failed """ failed_key = None while True: # set each key to false, indicates has failed for k in state.nodes: state.nodes[k] = False await asyncio.sleep(10) for k in state.nodes: # check if false, indicates failure if not state.nodes[k]: failed_key = k # if key has failed, remove the key from nodes with pop if failed_key: state.nodes.pop(failed_key) failed_key = None # the coordinator is not in nodes, then it has failed, could have been popped if not (state.coordinator in state.nodes): print("Coordinator failed... Starting election...") state.coordinator = None # call election to determine new coordinator asyncio.create_task(election(state))
5,327,504
def print_process_acro_found_help(): """Prints acronym handling user commands help""" print(_(" y: Aceptar - Guarda el acrónimo con la información mostrada y actualiza la base de datos.")) print(_(" n: Saltar - Descarta el acrónimo y continúa al siguiente.")) print(_(" e: Editar - Modifica una definición. En principal se indica la definición en el idioma de origen.")) print(_(" En traducción se indica la definición en español. Dejar vacío si no es necesaria.")) print(_(" a: Añadir - Añade una definición adicional.")) print(_(" s: Seleccionar - Selecciona las definiciones a usar. Aquellas deseleccionadas (en gris) no se incluirán en la tabla ")) print(_(" de acrónimos de salida. La diferencia con eliminar es que la definición se mantiene en la base de datos.")) print(_(" b: Blacklist - Alterna el estado del acrónimo en la lista negra. Si está en esta lista se saltará ")) print(_(" automáticamente al procesar en modo auto o semi-automático.")) print(_(" d: Eliminar - Elimina el acrónimo o una de sus definiciones de la base de datos.")) print(_(" z: Deshacer - Retrocede al acrónimo anterior.")) print(_(" m: Modo - Alterna entre modos de procesamiento manual y semiautomático.")) print(_(" h: Ayuda - Muestra esta información."))
5,327,505
def show_diff_popup(git_gutter, **kwargs): """Show the diff popup. Arguments: git_gutter (GitGutterCommand): The main command object, which represents GitGutter. kwargs (dict): The arguments passed from GitGutterDiffPopupCommand to GitGutterCommand. """ if not git_gutter.git_handler.in_repo(): return # validate highlighting argument highlight_diff = kwargs.get('highlight_diff') if highlight_diff is None: mode = git_gutter.settings.get('diff_popup_default_mode', '') highlight_diff = mode == 'diff' # validate point argument point = kwargs.get('point') if point is None: selection = git_gutter.view.sel() if not selection: return point = selection[0].end() # get line number from text point line = git_gutter.view.rowcol(point)[0] + 1 # create popup asynchronously in case it takes several 100ms _show_diff_popup_impl( git_gutter, line, highlight_diff, kwargs.get('flags', 0), git_gutter.git_handler.diff_line_change(line))
5,327,506
def gctx2gct_main(args): """ Separate from main() in order to make command-line tool. """ in_gctoo = parse_gctx.parse(args.filename, convert_neg_666=False) if args.output_filepath is None: basename = os.path.basename(args.filename) out_name = os.path.splitext(basename)[0] + ".gct" else: out_name = args.output_filepath write_gct.write(in_gctoo, out_name)
5,327,507
def save_str(self=str(''), filename=str('output.txt'), permissions=str('w')): """Save a given string to disk using a given file name. Args: self(str): String to save to disk. (default str('')) filename(str): File name to use when saving to disk. (default str('output.txt')) permissions(str): Permissions to use when opening file. (default str('w')) Returns: dict: Input parameters specified.""" # Convert variables if self is not None: string = str(self) if filename is not None: filename = str(filename) if permissions is not None: permissions = str(permissions) # Open file on disk. file_on_disk = open(filename, permissions) # Write to file on disk. file_on_disk.write(string) # Close file on disk. file_on_disk.close() # Create dict of specified parameters. params = dict({'string': [string], 'filename': [filename], 'permissions': [permissions]}) # Return dict of specified parameters. return params
5,327,508
def balanced(banked_chemicals): """return true if all non-ore chemicals have non-negative amounts.""" def _enough(chemical): return chemical == "ORE" or banked_chemicals[chemical] >= 0 return all(map(_enough, banked_chemicals))
5,327,509
def add_log(log, logfile): """It sets the formatter for the handle and add that handler to the logger. Args: log(Logging.logger): The logger object used for logging. logfile(str): path for the log file. Returns: None """ dir = os.path.dirname(logfile) if not os.path.exists(dir) and dir != "": os.makedirs(dir) handler = logging.FileHandler(logfile) formatter = PopperFormatter(colors=False) # Set handler.setFormatter(formatter) log.addHandler(handler)
5,327,510
def test_default_params(frames: Tuple[Request, Response]) -> None: """Test frame attributes.""" for frame in frames: assert frame.recipient == BROADCAST_ADDRESS assert frame.message == b"" assert frame.sender == ECONET_ADDRESS assert frame.sender_type == ECONET_TYPE assert frame.econet_version == ECONET_VERSION
5,327,511
def dict2json(thedict, json_it=False, compress_it=False): """if json_it convert thedict to json if compress_it, do a bzip2 compression on the json""" if compress_it: return bz2.compress(json.dumps(thedict).encode()) elif json_it: return json.dumps(thedict) else: return thedict
5,327,512
def parse_arguments(): """ Parse command line arguments. """ import argparse parser = argparse.ArgumentParser( description="Python script for minimizing unit cell." ) subparser = parser.add_subparsers(dest='command') subparser.required = True yaml_parse = subparser.add_parser("yaml") xml_parse = subparser.add_parser("xml") yaml_parse.add_argument( '--input', "-i", type=str, help="Input yaml file", required=True ) xml_parse.add_argument( '--nvt', dest='nvt', action='store_true', default=False, required=False, help="Perform md in nvt only." ) xml_parse.add_argument( '--input', "-i", type=str, help="Input xml file", required=True ) xml_parse.add_argument( '--pdb', "-p", type=str, help="Input pdb file", required=True ) xml_parse.add_argument( '--prefix', "-pre", type=str, help="Output prefix for csv and dcd files.", default="xtal_md", required=False ) xml_parse.add_argument( '--nanoseconds', "-ns", type=int, help="Production length in nanoseconds.", required=False, default=100 ) xml_parse.add_argument( '--replicates', "-r", type=int, help="Number of replicates to generate.", required=False, default=10 ) xml_parse.add_argument( '--temperature', "-t", type=float, help="Target temperature in md run.", required=False, default=298.15 ) return parser.parse_args()
5,327,513
def get_image(roidb, config): """ preprocess image and return processed roidb :param roidb: a list of roidb :return: list of img as in mxnet format roidb add new item['im_info'] 0 --- x (width, second dim of im) | y (height, first dim of im) """ num_images = len(roidb) processed_ims = [] processed_roidb = [] for i in range(num_images): roi_rec = roidb[i] assert os.path.exists(roi_rec['image']), '%s does not exist'.format(roi_rec['image']) im = cv2.imread(roi_rec['image'], cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION) if roidb[i]['flipped']: im = im[:, ::-1, :] new_rec = roi_rec.copy() scale_ind = random.randrange(len(config.SCALES)) target_size = config.SCALES[scale_ind][0] max_size = config.SCALES[scale_ind][1] im, im_scale = resize(im, target_size, max_size, stride=config.network.IMAGE_STRIDE) im_tensor = transform(im, config.network.PIXEL_MEANS) processed_ims.append(im_tensor) im_info = [im_tensor.shape[2], im_tensor.shape[3], im_scale] new_rec['boxes'] = clip_boxes(np.round(roi_rec['boxes'].copy() * im_scale), im_info[:2]) new_rec['im_info'] = im_info processed_roidb.append(new_rec) return processed_ims, processed_roidb
5,327,514
def compoundedInterest(fv, p): """Compounded interest Returns: Interest value Input values: fv : Future value p : Principal """ i = fv - p return i
5,327,515
def get_volumetric_scene(self, data_key="total", isolvl=0.5, step_size=3, **kwargs): """Get the Scene object which contains a structure and a isosurface components Args: data_key (str, optional): Use the volumetric data from self.data[data_key]. Defaults to 'total'. isolvl (float, optional): The cuoff for the isosurface to using the same units as VESTA so e/bhor and kept grid size independent step_size (int, optional): step_size parameter for marching_cubes_lewiner. Defaults to 3. **kwargs: kwargs for the Structure.get_scene function Returns: [type]: [description] """ struct_scene = self.structure.get_scene(**kwargs) iso_scene = self.get_isosurface_scene( data_key=data_key, isolvl=isolvl, step_size=step_size, origin=struct_scene.origin, ) struct_scene.contents.append(iso_scene) return struct_scene
5,327,516
def intercept(on=True): """ enable or disable tracking/interecpting of matplotlib calls """ global _intercept, _intercept_user _intercept = on _intercept_user = on
5,327,517
def test_generic_command_positive_value_key(requests_mock, client): """ Given: - API resource /applications When: - Running the generic command Then: - Ensure outputs are as expected """ applications_res = load_test_data('applications.json') requests_mock.get( 'https://graph.microsoft.com/v1.0/applications?$top=10', json=applications_res, ) args = { 'resource': '/applications', 'odata': '$top=10', } res = generic_command(client, args) assert res.outputs == applications_res.get('value')
5,327,518
def encode_labels(x, features): """ Maps strings to integers """ from numpy import concatenate from sklearn.compose import ColumnTransformer from sklearn.preprocessing import OrdinalEncoder encoder = ColumnTransformer([("", OrdinalEncoder(), features)], n_jobs=-1) x[:, features] = encoder.fit_transform(x) return x
5,327,519
def get_jogframe( conx: Connection, idx: int, group: int = 1, include_comment: bool = False ) -> t.Tuple[Position_t, t.Optional[str]]: """Return the jog frame at index 'idx'. :param idx: Numeric ID of the jog frame. :type idx: int :param group: Numeric ID of the motion group the jog frame is associated with. :type group: int :returns: A tuple containing the user frame and associated comment (if requested) :rtype: tuple(Position_t, str) """ if group < 1 or group > 8: raise ValueError( f"Requested group id invalid (must be between 1 and 8, got: {group})" ) if idx < 1 or idx > 5: raise ValueError( f"Requested jog frame idx invalid (must be between 1 and 5, got: {idx})" ) varname = f'[TPFDEF]JOGFRAMES[{group},{idx}]' frame = _get_frame_var(conx, varname) cmt = None if include_comment: JOGFRAME = 2 cmt = _get_frame_comment(conx, frame_type=JOGFRAME, group=group, idx=idx) return (frame, cmt)
5,327,520
def rotate(arr, bins): """ Return an array rotated by 'bins' places to the left :param list arr: Input data :param int bins: Number of bins to rotate by """ bins = bins % len(arr) if bins == 0: return arr else: return np.concatenate((arr[bins:], arr[:bins]))
5,327,521
def clear_list(items: Iterable[Optional[Typed]]) -> List[Typed]: """ return unique items in order of first ocurrence """ return list(OrderedDict.fromkeys(i for i in items if i is not None))
5,327,522
def chords(labels): """ Transform a list of chord labels into an array of internal numeric representations. Parameters ---------- labels : list List of chord labels (str). Returns ------- chords : numpy.array Structured array with columns 'root', 'bass', and 'intervals', containing a numeric representation of chords (`CHORD_DTYPE`). """ crds = np.zeros(len(labels), dtype=CHORD_DTYPE) cache = {} for i, lbl in enumerate(labels): cv = cache.get(lbl, None) if cv is None: cv = chord(lbl) cache[lbl] = cv crds[i] = cv return crds
5,327,523
def reduce_aet_if_dry(aet, wat_lev, fc): """ Reduce actual evapotranspiration if the soil is dry. If the water level in a cell is less than 0.7*fc, the rate of evapo-transpiration is reduced by a factor. This factor is 1 when wat_lev = 0.7*fc and decreases linearly to reach 0 when wat_lev = 0 i.e. where wat_lev < 0.7*fc, apply a correction factor of wat_lev/(0.7*fc) to the aet grid. Args: aet: "Raw" actual evapotranspiration grid. wat_lev: Water level grid fc: Soil field capacity grid. Returns: Array (modified AET grid with AET reduced where necessary). """ # Get a boolean array showing which cells need correcting bool_array = wat_lev < (0.7*fc) # Calculate a correction factor for all cells, but subtract 1 from answer cor_facts_minus1 = (wat_lev / (0.7*fc)) - 1 # Multiplying bool_array by cor_facts_minus1 gives a grid with values of # (cor_fact - 1) for cells which need correcting and zero otherwise. Add 1 # to this to get a grid of cor_facts and ones cor_facts = (bool_array * cor_facts_minus1) + 1 return aet*cor_facts
5,327,524
def merge_sort(array): """ Sort array via merge sort algorithm Args: array: list of elements to be sorted Returns: Sorted list of elements Examples: >>> merge_sort([1, -10, 21, 3, 5]) [-10, 1, 3, 5, 21] """ if len(array) == 1: return array[:] mid = len(array) // 2 left = merge_sort(array[:mid]) right = merge_sort(array[mid:]) sort_result = _merge(left, right) return sort_result
5,327,525
def inline_query(update: Update, _): """Translate LaTeX to unicode, then send it using the inline mode.""" result = replace(update.inline_query.query).strip() if not result: return update.inline_query.answer( [ InlineQueryResultArticle( id="result", title=result[:15] + "..." if len(result) > 15 else result, input_message_content=InputTextMessageContent(result), ) ] )
5,327,526
def cmd_update( package ): """Updates a package""" curpath = os.getcwd() with cd( "build/tools/%s" % package.subdir ): update( package ) build_and_install( package, curpath )
5,327,527
def create_monitored_session(target: tf.train.Server, task_index: int, checkpoint_dir: str, save_checkpoint_secs: int, config: tf.ConfigProto=None) -> tf.Session: """ Create a monitored session for the worker :param target: the target string for the tf.Session :param task_index: the task index of the worker :param checkpoint_dir: a directory path where the checkpoints will be stored :param save_checkpoint_secs: number of seconds between checkpoints storing :param config: the tensorflow configuration (optional) :return: the session to use for the run """ # we chose the first task to be the chief is_chief = task_index == 0 # Create the monitored session sess = tf.train.MonitoredTrainingSession( master=target, is_chief=is_chief, hooks=[], checkpoint_dir=checkpoint_dir, save_checkpoint_secs=save_checkpoint_secs, config=config ) return sess
5,327,528
def sun_rise_set_times(datetime_index, coords): """ Return sunrise and set times for the given datetime_index and coords, as a Series indexed by date (days, resampled from the datetime_index). """ obs = ephem.Observer() obs.lat = str(coords[0]) obs.lon = str(coords[1]) # Ensure datetime_index is daily dtindex = pd.DatetimeIndex(datetime_index.to_series().map(pd.Timestamp.date).unique()) return pd.Series( _sun_rise_set(dtindex, obs), index=dtindex )
5,327,529
def running_mean(iterable, kind='arithmetic'): """ Compute the running mean of an iterable. After `n` items, the mean of the first `n` items are yielded. Parameters ---------- iterable : iterable An iterable object. kind : str The type of mean to compute, either `arithmetic` or `geometric`. Algorithmic details ------------------- Memory: O(1) Time: O(n) where n is the length of the sequence. Examples -------- >>> sequence = iter([1, 2, 3]) >>> list(running_mean(sequence)) [1.0, 1.5, 2.0] >>> sequence = iter([1, 2, 3]) >>> list(running_mean(sequence, kind='geometric')) [1.0, 1.4142135623730951, 1.8171205928321397] >>> sequence = iter([1, 0, 5]) >>> list(running_mean(sequence)) [1.0, 0.5, 2.0] """ iterable = iter(iterable) # Two kinds of means are implemented if kind == 'arithmetic': bin_op = operator.add bin_op_repeated = operator.mul elif kind == 'geometric': bin_op = operator.mul bin_op_repeated = operator.pow else: error_msg = 'Argument `kind` must be `arithmetic` or `geometric`.' raise ValueError(error_msg) # Iterate over all the elements generator = enumerate(itertools.accumulate(iterable, bin_op), start=1) for term, accumulated in generator: yield bin_op_repeated(accumulated, 1 / term)
5,327,530
def get_n1_event_format(): """ Define the format for the events in a neurone recording. Arguments: None. Returns: - A Struct (from the construct library) describing the event format. """ # Define the data format of the events # noinspection PyUnresolvedReferences return Struct( "Revision" / Int32sl, "RFU1" / Int32sl, "Type" / Int32sl, "SourcePort" / Int32sl, "ChannelNumber" / Int32sl, "Code" / Int32sl, "StartSampleIndex" / Int64ul, "StopSampleIndex" / Int64ul, "DescriptionLength" / Int64ul, "DescriptionOffset" / Int64ul, "DataLength" / Int64ul, "DataOffset" / Int64ul, "RFU2" / Int32sl, "RFU3" / Int32sl, "RFU4" / Int32sl, "RFU5" / Int32sl )
5,327,531
def test_writer_validate_be(): """System endianness and shb endianness should match""" shb = define_testdata().valid_shb_le _sysle = Writer._Writer__le Writer._Writer__le = False try: writer = Writer(fobj, shb=shb) # noqa except Exception as e: assert isinstance(e, ValueError) assert str(e) == 'shb: expecting class SectionHeaderBlock on a big-endian system' Writer._Writer__le = _sysle
5,327,532
def idzp_rid(eps, m, n, matveca): """ Compute ID of a complex matrix to a specified relative precision using random matrix-vector multiplication. :param eps: Relative precision. :type eps: float :param m: Matrix row dimension. :type m: int :param n: Matrix column dimension. :type n: int :param matveca: Function to apply the matrix adjoint to a vector, with call signature `y = matveca(x)`, where `x` and `y` are the input and output vectors, respectively. :type matveca: function :return: Rank of ID. :rtype: int :return: Column index array. :rtype: :class:`numpy.ndarray` :return: Interpolation coefficients. :rtype: :class:`numpy.ndarray` """ proj = np.empty( m + 1 + 2*n*(min(m, n) + 1), dtype=np.complex128, order='F') k, idx, proj, ier = _id.idzp_rid(eps, m, n, matveca, proj) if ier: raise _RETCODE_ERROR proj = proj[:k*(n-k)].reshape((k, n-k), order='F') return k, idx, proj
5,327,533
def update_position(request, space_url): """ This view saves the new note position in the debate board. Instead of reloading all the note form with all the data, we use the partial form "UpdateNotePosition" which only handles the column and row of the note. """ place = get_object_or_404(Space, url=space_url) if request.method == "POST" and request.is_ajax(): note = get_object_or_404(Note, pk=request.POST['noteid']) debate = get_object_or_404(Debate, pk=note.debate.id) position_form = UpdateNotePosition(request.POST or None, instance=note) if (request.user.has_perm('admin_space', place) or request.user.has_perm('mod_space', place) or request.user.has_perm('admin_debate', debate) or request.user.has_perm('mod_debate', debate) or request.user == note.author): if position_form.is_valid(): position_form_uncommited = position_form.save(commit=False) position_form_uncommited.column = get_object_or_404(Column, pk=request.POST['column']) position_form_uncommited.row = get_object_or_404(Row, pk=request.POST['row']) position_form_uncommited.save() return HttpResponse(_("Note updated")) else: return HttpResponseBadRequest(_("There has been an error validating the form.")) else: raise PermissionDenied else: return HttpResponseBadRequest(_("The petition was not POST."))
5,327,534
def main(X_train_path, X_test_path, y_train_path, y_test_path): """ Transforms train data and performs evaluation of multiple models with selecting one model with the best scores Parameters: ---------- X_train_path : path for csv file with X_train data X_test_path : path for csv file with X_test data y_train_path: path for csv file with y_train data y_test_path : path for csv file with y_test data Returns: -------- csv files with results of cross-validation and parameters of the best model store in: 'results/cv_scores_for_alternative_methods.csv' 'results/final_results.csv' """ X_train = pd.read_csv(X_train_path) X_test = pd.read_csv(X_test_path) y_train = pd.read_csv(y_train_path) y_test = pd.read_csv(y_test_path) # Limiting amount of data to 100 rows only. Used for debugging or making any modifications: # X_train = X_train.head(100) # X_test = X_test.head(100) # y_train = y_train.head(100) # y_test = y_test.head(100) preprocessor = transform_with_pipe(X_train, y_train) evalute_alternative_methods(X_train, y_train, preprocessor) tune_hyperparameters(X_train, X_test, y_train, y_test, preprocessor)
5,327,535
def _initialized( ): """ 初期化 """ ts = bpy.types.Scene ts.temp_align_objects_object = bpy.props.PointerProperty(name="Target Object", description="Target Object", type=bpy.types.Object) ts.temp_align_objects_object_subtarget = bpy.props.StringProperty(name="Target Object's Sub Target", description="Target Object's Sub Target", default="") ts.temp_align_objects_position_all = bpy.props.BoolProperty(name="Position All", default=True, update=_update_position_all) ts.temp_align_objects_rotation_all = bpy.props.BoolProperty(name="Rotation All", default=True, update=_update_rotation_all) ts.temp_align_objects_scale_all = bpy.props.BoolProperty(name="Scale All", default=True, update=_update_scale_all) ts.temp_align_objects_position_flags = bpy.props.BoolVectorProperty(name="Position (World)", subtype="XYZ", default=(True, True, True)) ts.temp_align_objects_rotation_flags = bpy.props.BoolVectorProperty(name="Rotation (Local)", subtype="XYZ", default=(True, True, True)) ts.temp_align_objects_scale_flags = bpy.props.BoolVectorProperty(name="Scale", subtype="XYZ", default=(True, True, True)) ts.temp_align_objects_bone_length = bpy.props.BoolProperty(name="Length", default=False)
5,327,536
def LOG_WARN_NOPUSH( msg ): """ Log a warning, don't push it to the list of warnings to be echoed at the end of compilation. :param msg: Text to log :type msg: str """ LOG_MSG( terminfo.TermColor.YELLOW, "WARN", msg, 3 )
5,327,537
def tan(x): """Return the tangent of *x* radians.""" return 0.0
5,327,538
def init_wandb(cfg) -> None: """ Initialize project on Weights & Biases Args: cfg (Dict) : Configuration file """ wandb.init( name=cfg["LOGGING"]["NAME"], config=cfg, project=cfg["LOGGING"]["PROJECT"], resume="allow", id=cfg["LOGGING"]["ID"] )
5,327,539
def entropy(p): """ Calculates the Shannon entropy for a marginal distribution. Args: p (np.ndarray): the marginal distribution. Returns: (float): the entropy of p """ # Since zeros do not contribute to the Shannon entropy by definition, we # ignore them to avoid any errors/warnings. p = p[p != 0] H = -np.dot(p, np.log(p)) # Filter against machine epsilon return _eps_filter(H)
5,327,540
def test_sens_dc(): """Sensitivity analysis axes (DC).""" solutions = run( dedent_multiline( """ Test simulation R1 n1 0 1k V1 n1 0 DC 1 .sens V(n1, n2) .end """ ) ) assert list(solutions.keys()) == ["sens1"] sens1 = solutions["sens1"] assert sens1.xvector is None assert list(sens1.yvectors.keys()) == [ "r1", "r1:bv_max", "r1:ef", "r1:lf", "r1:wf", "r1_bv_max", "r1_l", "r1_m", "r1_scale", "r1_w", "v1" ]
5,327,541
def format_to_str(*a, **kwargs): """ Formats gotten objects to str. """ result = "" if kwargs == {}: kwargs = {'keepNewlines': True} for x in range(0, len(a)): tempItem = a[x] if type(tempItem) is str: result += tempItem elif type(tempItem) in [list, dict, tuple]: result += str(tempItem) # pformat(tempItem) elif hasattr(tempItem, "itemType"): result += "<" + tempItem.itemType + ":" + tempItem.itemModelPointer + ">" else: result += str(tempItem) if x < len(a) - 1: result += " " if not kwargs['keepNewlines']: result = result.replace("\n", "*nl*") return result
5,327,542
def get_all_files_in_tree_with_regex(basedir: str, regex_str: str) -> List[str]: """ Returns a list of paths such that each path is to a file with the provided suffix. Walks the entire tree of basedir. """ r = re.compile(regex_str) data_files = [] for root, dirs, files in os.walk(basedir): for f in files: if r.match(f): logger.info("f: %s -- matches regex: %s" % (f, regex_str)) data_files.append(os.path.join(root, f)) return data_files
5,327,543
def zero_corrected_countless(data): """ Vectorized implementation of downsampling a 2D image by 2 on each side using the COUNTLESS algorithm. data is a 2D numpy array with even dimensions. """ # allows us to prevent losing 1/2 a bit of information # at the top end by using a bigger type. Without this 255 is handled incorrectly. data = data + 1 # don't use +=, it will affect the original data. sections = [] # This loop splits the 2D array apart into four arrays that are # all the result of striding by 2 and offset by (0,0), (0,1), (1,0), # and (1,1) representing the A, B, C, and D positions from Figure 1. factor = (2, 2) for offset in np.ndindex(factor): part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))] sections.append(part) a, b, c, d = sections ab = a * (a == b) # PICK(A,B) ac = a * (a == c) # PICK(A,C) bc = b * (b == c) # PICK(B,C) a = ab | ac | bc # Bitwise OR, safe b/c non-matches are zeroed result = a + (a == 0) * d - 1 # a or d - 1 return result
5,327,544
def test_bidirectionallstm(backend: Backend): """Test forward rewrite of bidirectionallstm.""" check_backend(backend) bilstm = get_bidirectionallstm_model() bilstm.cpu().eval() deploy_cfg = mmcv.Config( dict( backend_config=dict(type=backend.value), onnx_config=dict(output_names=['output'], input_shape=None), codebase_config=dict( type='mmocr', task='TextRecognition', ))) input = torch.rand(1, 1, 32) # to get outputs of pytorch model model_inputs = { 'input': input, } model_outputs = get_model_outputs(bilstm, 'forward', model_inputs) # to get outputs of onnx model after rewrite wrapped_model = WrapModel(bilstm, 'forward') rewrite_inputs = {'input': input} rewrite_outputs, is_backend_output = get_rewrite_outputs( wrapped_model=wrapped_model, model_inputs=rewrite_inputs, deploy_cfg=deploy_cfg, run_with_backend=True) if is_backend_output: model_output = model_outputs.cpu().numpy() rewrite_output = rewrite_outputs[0].cpu().numpy() assert np.allclose( model_output, rewrite_output, rtol=1e-03, atol=1e-05) else: assert rewrite_outputs is not None
5,327,545
def update_custom_field(dev_name, os_version): """ Update a custom field in NetBox """ # Connect to NetBox environment # Disable warnings for self-signed certs if using HTTPS with NetBox urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) session = requests.Session() session.verify = False netbox = pynetbox.api(NB_URL, token=NB_TOKEN) # Used with HTTPS netbox.http_session = session # Get device record from NetBox device = netbox.dcim.devices.get(name=dev_name) if NB_CUSTOM_FIELD in device.custom_fields.keys(): # Custom field exists try: if device.update({"custom_fields": {NB_CUSTOM_FIELD: os_version}}): print( f'{dev_name}: NetBox "{NB_CUSTOM_FIELD}" updated with "{os_version}"' ) else: print(f'{dev_name}: NetBox "{NB_CUSTOM_FIELD}" not updated') except pynetbox.RequestError as err: print("Error processing request", err.error) sys.exit(1) else: # Custom field does not exist, need to first create it try: netbox.extras.custom_fields.create( [ { "content_types": ["dcim.device"], "type": "text", "name": NB_CUSTOM_FIELD, "label": "", "description": "", "required": False, "filter_logic": "loose", "default": "", "weight": 100, "validation_minimum": None, "validation_maximum": None, "validation_regex": "", "choices": [], } ] ) # Then, update field device.update({"custom_fields": {NB_CUSTOM_FIELD: os_version}}) print(f'{dev_name}: NetBox "{NB_CUSTOM_FIELD}" updated with "{os_version}"') except pynetbox.RequestError as err: print("Error processing request", err.error) sys.exit(1)
5,327,546
def mnn_synthetic_data( n_samples: int = 1000, n_features: int = 100, n_batches: int = 2, n_latent: int = 2, n_classes: int = 3, proportions: np.ndarray = None, sparsity: float = 1.0, scale: Union[int, float] = 5, batch_scale: float = 0.1, bio_batch_angle: Union[float, None] = None, seed: int = 2018, ): """ :param n_samples: number of samples (cells) per batch :param n_features: number of features (genes) :param n_batches: number of batches :param n_latent: size of the latent space used to generate data :param n_classes: number of classes shared across batches :param proportions: proportion of cells from each class in each batch. If shape is (n_classes,) same proportions used each time. If shape is (n_batches, n_classes) then each row is a different batch. Default is equal representation :param sparsity: sparsity of class weightings :param scale: scaling factor for generating data :param batch_scale: batch effect relative to data :param bio_batch_angle: angle of batch effect w/ bio subspace :param seed: seed for random number generator :return: real-valued expression data with batch effect and metadata """ if proportions is None: proportions = np.ones((n_batches, n_classes)) / n_classes else: proportions = np.broadcast_to(proportions, (n_batches, n_classes)) if seed: np.random.seed(seed) class_centers = latent.gen_classes(n_latent, n_classes, sparsity, scale) batches = np.repeat(np.arange(n_batches), n_samples) latent_exp = [] classes = [] for b in range(n_batches): b_latent, b_classes = latent.sample_classes( n_samples, class_centers, proportions[b, :] ) latent_exp.append(b_latent) classes.append(b_classes) latent_exp = np.vstack(latent_exp) classes = np.hstack(classes) programs = latent.gen_programs(n_latent, n_features, 1.0, 1.0) expression = np.dot(latent_exp, programs) projection_to_bio = np.dot(np.linalg.pinv(programs), programs) expression_w_batch = batch.add_batch_vectors( expression, batches, batch_scale, bio_batch_angle, projection_to_bio, copy=True ) adata = util.arrays_to_anndata( expression_w_batch, batches, classes, X_latent=latent_exp, X_gt=expression ) return adata
5,327,547
def then_response(result, response_json: dict): """ Check entire response """ if response_json is not None: print('actual:\n{}'.format(json.dumps(result['response'].json(), indent=4))) print('expected:\n{}'.format(json.dumps(response_json, indent=4))) assert result['response'].json() == response_json, result['filename']
5,327,548
def send_command(rtr2, cmd): """ take the cmd and print output """ rtr2.send(cmd) time.sleep(2) output = rtr2.recv(MAX_BUFFER) print output
5,327,549
def create_labeled_pair(img, gt_center, prop_center, gt_radius, scale): """ Given a crater proposal and ground truth label, this function creates a labeled pair. Returns X, Y, where X is an image, and Y is a set of ground truths. img: an array gt_center: the known ground-truth center point (x, y) (floats) prop_center: the crater proposal center (x, y) (ints) gt_radius: the known ground-truth crater radius in pixels (float) scale: one of [32, 64, 128, 256] (very rough size of crater) follows the scheme: 0<r<8 --> scale=32 8<r<16 --> scale=64 16<r<32 --> scale=128 32<r --> scale=256 (craters bigger than r=64 not supported) """ permitted_scales = [32, 64, 128, 256] if scale not in permitted_scales: msg = f"scale {scale} not permitted. Please use one of: " msg += str(permitted_scales) raise Exception(msg) scale_factor = scale//32 x_offset = (gt_center[0] - prop_center[0])/scale_factor y_offset = (gt_center[1] - prop_center[1])/scale_factor r_scaled = gt_radius/scale Y = (x_offset, y_offset, r_scaled) X = extract_proposal(img, prop_center, scale) return X, Y
5,327,550
def loader_to_dask(loader_array): """ Map a call to `dask.array.from_array` onto all the elements in ``loader_array``. This is done so that an explicit ``meta=`` argument can be provided to prevent loading data from disk. """ if len(loader_array.shape) != 1: raise ValueError("Can only be used on one dimensional arrays") # The meta argument to from array is used to determine properties of the # array, such as dtype. We explicitly specify it here to prevent dask # trying to auto calculate it by reading from the actual array on disk. meta = np.zeros((0,), dtype=loader_array[0].dtype) to_array = partial(da.from_array, meta=meta) return map(to_array, loader_array)
5,327,551
def list_to_sentences(string): """ Splits text at newlines and puts it back together after stripping new- lines and enumeration symbols, joined by a period. """ if string is None: return None lines = string.splitlines() curr = '' processed = [] for line in lines: stripped = line.strip() # empty line if 0 == len(stripped): if curr: processed.append(re.sub(r'\.\s*$', '', curr)) curr = '' # beginning a new fragment elif not curr or 0 == len(curr): curr = re.sub(r'^[-\d\.\(\)]+\s*', '', stripped) # new line item? true when it starts with "-", "1." or "1)" (with # optional dash) or if the indent level is less than before (simple # whitespace count) (NO LONGER IMPLEMENTED) elif re.match(r'^-\s+', stripped) \ or re.match(r'^\d+\.\s+', stripped) \ or re.match(r'^(-\s*)?\d+\)\s+', stripped): if curr: processed.append(re.sub(r'\.\s*$', '', curr)) curr = re.sub(r'^(-|(\d+\.)|((-\s*)?\d+\)))\s*', '', stripped) # append to previous fragment else: curr = '%s %s' % (curr, stripped) if curr: processed.append(re.sub(r'\.\s*$', '', curr)) sentences = '. '.join(processed) if len(processed) > 0 else '' if len(sentences) > 0: sentences += '.' return sentences
5,327,552
def decode_locations_one_layer(anchors_one_layer, offset_bboxes): """decode the offset bboxes into center bboxes Args: anchors_one_layer: ndarray represents all anchors coordinate in one layer, encode by [y,x,h,w] offset_bboxes: A tensor with any shape ,the shape of lowest axis must be 4, means the offset val in [y,x,h,w] Return: the locations of bboxes encode by [y,x,h,w] """ shape = offset_bboxes.get_shape().as_list() try: i = shape.index(None) shape[i] = -1 except ValueError: pass offset_bboxes = tf.reshape(offset_bboxes,shape=tf.stack([shape[0], -1, shape[-1]])) yref, xref, href, wref = anchors_one_layer ymin = yref - href / 2. xmin = xref - wref / 2. ymax = yref + href / 2. xmax = xref + wref / 2. anchor_ymin = np.float32(ymin) anchor_xmin = np.float32(xmin) anchor_ymax = np.float32(ymax) anchor_xmax = np.float32(xmax) # Transform to center / size. anchor_cy = (anchor_ymax + anchor_ymin) / 2. anchor_cx = (anchor_xmax + anchor_xmin) / 2. anchor_h = anchor_ymax - anchor_ymin anchor_w = anchor_xmax - anchor_xmin ## reshape to -1 ## anchor_cy = np.reshape(anchor_cy,[-1]) anchor_cx = np.reshape(anchor_cx, [-1]) anchor_h = np.reshape(anchor_h, [-1]) anchor_w = np.reshape(anchor_w, [-1]) bboxes_cy = offset_bboxes[:, :, 0] * anchor_h + anchor_cy bboxes_cx = offset_bboxes[:, :, 1] * anchor_w + anchor_cx bboxes_h = tf.exp(offset_bboxes[:, :, 2]) * anchor_h bboxes_w = tf.exp(offset_bboxes[:, :, 3]) * anchor_w cbboxes_out = tf.stack([bboxes_cy, bboxes_cx, bboxes_h, bboxes_w], axis=-1) cbboxes_out = tf.reshape(cbboxes_out, shape=shape) return cbboxes_out
5,327,553
def test_select_subset(): """Test SelectSubset widget events.""" src_items = ["one", "two", "three"] sel_subs = nbw.SelectSubset(source_items=src_items, default_selected=["one"]) check.equal(sel_subs._select_list.options, ("one",)) sel_subs._w_filter.value = "t" for item in sel_subs._source_list.options: check.is_in(item, ("two", "three")) sel_subs._w_filter.value = "" sel_subs._select_list.value = ["one"] sel_subs._b_del.click() check.is_false(sel_subs._select_list.options) sel_subs._source_list.value = ["two"] sel_subs._b_add.click() check.equal(sel_subs._select_list.options, ("two",)) sel_subs._b_add_all.click() check.equal(len(sel_subs._select_list.options), len(src_items)) for opt in src_items: check.is_in(opt, sel_subs._select_list.options) sel_subs._b_del_all.click() check.is_false(sel_subs._select_list.options) sel_subs._ipython_display_()
5,327,554
def FormatReserved(enum_or_msg_proto): """Format reserved values/names in a [Enum]DescriptorProto. Args: enum_or_msg_proto: [Enum]DescriptorProto message. Returns: Formatted enum_or_msg_proto as a string. """ reserved_fields = FormatBlock('reserved %s;\n' % ','.join( map(str, sum([list(range(rr.start, rr.end)) for rr in enum_or_msg_proto.reserved_range], [])))) if enum_or_msg_proto.reserved_range else '' if enum_or_msg_proto.reserved_name: reserved_fields += FormatBlock('reserved %s;\n' % ', '.join('"%s"' % n for n in enum_or_msg_proto.reserved_name)) return reserved_fields
5,327,555
def upgrade_input_dir(inputs_dir): """ Upgrade an input directory. """ def rename_file(old_name, new_name, optional_file=True): old_path = os.path.join(inputs_dir, old_name) new_path = os.path.join(inputs_dir, new_name) if optional_file and not os.path.isfile(old_path): return shutil.move(old_path, new_path) def rename_column(file_name, old_col_name, new_col_name, optional_file=True): path = os.path.join(inputs_dir, file_name) if optional_file and not os.path.isfile(path): return df = pandas.read_csv(path, na_values=['.'], sep='\t') df.rename(columns={old_col_name: new_col_name}, inplace=True) df.to_csv(path, sep='\t', na_rep='.', index=False) old_new_column_names_in_file = { 'gen_inc_heat_rates.tab': [('project', 'GENERATION_PROJECT')] } for fname, old_new_pairs in old_new_column_names_in_file.iteritems(): for old, new in old_new_pairs: rename_column(fname, old_col_name=old, new_col_name=new) # merge trans_optional_params.tab with transmission_lines.tab trans_lines_path = os.path.join(inputs_dir, 'transmission_lines.tab') trans_opt_path = os.path.join(inputs_dir, 'trans_optional_params.tab') if os.path.isfile(trans_lines_path) and os.path.isfile(trans_lines_path): trans_lines = pandas.read_csv(trans_lines_path, na_values=['.'], sep='\t') if os.path.isfile(trans_opt_path): trans_opt = pandas.read_csv(trans_opt_path, na_values=['.'], sep='\t') trans_lines = trans_lines.merge(trans_opt, on='TRANSMISSION_LINE', how='left') trans_lines.to_csv(trans_lines_path, sep='\t', na_rep='.', index=False) if os.path.isfile(trans_opt_path): os.remove(trans_opt_path) # Write a new version text file. switch_model.upgrade._write_input_version(inputs_dir, upgrades_to)
5,327,556
def test_install_package_with_root(): """ Test installing a package using pip install --root """ env = reset_env() root_dir = env.scratch_path/'root' result = run_pip('install', '--root', root_dir, '-f', find_links, '--no-index', 'simple==1.0') normal_install_path = env.root_path / env.site_packages / 'simple-1.0-py%s.egg-info' % pyversion #use distutils to change the root exactly how the --root option does it from distutils.util import change_root root_path = change_root(os.path.join(env.scratch, 'root'), normal_install_path) assert root_path in result.files_created, str(result)
5,327,557
def download_pepper(load=True): # pragma: no cover """Download scan of a pepper (capsicum). Originally obtained from Laser Design. Parameters ---------- load : bool, optional Load the dataset after downloading it when ``True``. Set this to ``False`` and only the filename will be returned. Returns ------- pyvista.PolyData or str DataSet or filename depending on ``load``. Examples -------- >>> from pyvista import examples >>> dataset = examples.download_pepper() >>> dataset.plot() """ return _download_and_read('pepper.ply', load=load)
5,327,558
def getAllSerial(): """get all device serials found by command adb devices""" _, msgs = shell_command("adb devices") devices = [line for line in msgs if "\tdevice\n" in line] serials = sorted([dev.split()[0] for dev in devices], key=len) return serials
5,327,559
def generate_labgraph_monitor(graph: lg.Graph) -> None: """ A function that serialize the graph topology and send it using to LabGraphMonitor Front-End using Labgraph Websocket API @params: graph: An instance of the computational graph """ # Local variables nodes: List[LabgraphMonitorNode] = [] # Identify graph nodes nodes = identify_graph_nodes(graph) # Connect graph edges nodes = connect_to_upstream(nodes, graph.__streams__.values()) # Serialize the graph topology serialized_graph = serialize_graph( type(graph).__name__, nodes ) # Send the serialized graph to Front-End # using LabGraph Websockets API run_server(serialized_graph)
5,327,560
def create_directory(list_path_proj: list, dir_name: str): """ :return: Directory created at c:\\users\\$Env:USER\\projects\\automate-ssas-build\\examples/bi-project_name-olap/queries/ c:\\users\\$Env:USER\\projects\\automate-ssas-build\\examples/bi-two-olap/queries/ """ list_path_proj_with_dir = [] print(f'\nDirectory created at') for path_olap in list_path_proj: dir = os.path.join(path_olap + dir_name) Path(dir).mkdir(parents=True, exist_ok=True) list_path_proj_with_dir.append(dir) print(dir) return list_path_proj_with_dir
5,327,561
def plot_points(points): """Generate a plot with a varying number of randomly generated points Args: points (int): a number of points to plot Returns: An svg plot with <points> data points """ # data for plotting data = np.random data = np.random.rand(points, 2) fig = Figure() FigureCanvas(fig) ax = fig.add_subplot(111) ax.scatter(data[:,0], data[:,1]) ax.set_xlabel('x') ax.set_ylabel('y') ax.set_title(f'There are {points} data points!') ax.grid(True) img = io.StringIO() fig.savefig(img, format='svg') #clip off the xml headers from the image svg_img = '<svg' + img.getvalue().split('<svg')[1] return svg_img
5,327,562
def bisect_status(): """Reproduce the status line git-bisect prints after each step.""" return "Bisecting: {} revisions left to test after this (roughly {} steps).".format( ceil((bisect_revisions() - 1) / 2), bisect_steps_remaining() - 1, )
5,327,563
def echo_view(): """Call echo() with the Flask request.""" return echo(flask.request)
5,327,564
def read_bytes_offset_file(f,n_bytes,v=0): """ Used to skip some offset when reading a binary file. Parameters ---------- f : file handler (sys.stdin). n_words : int number of words of type TYPE_WORD. v : int [0 by default] verbose mode if 1. """ words_array = [] try: words_array = np.fromfile(file = f,dtype=np.uint8, count=n_bytes) if v==1: print("vdif - Read "+str(n_bytes)) except EOFError: if v==1: print("vdif - Tried to read "+str(n_bytes)) return([]) return([])
5,327,565
def person_relationship_dates(node): """Find the nearest start/end dates related to a node's person.""" person = node.people.single() rel = node.people.relationship(person) if rel.start_date is not None: return {'start_date': rel.start_date, 'end_date': rel.end_date} elif rel.start_date is None: # Look at all roles associated with the project, # and determine the date range based on those roles. if isinstance(node, Project): start_dates = [] end_dates = [] for role in node.roles: rel = role.people.relationship(person) if rel.start_date is not None: start_dates.append(rel.start_date) if rel.end_date is not None: end_dates.append(rel.end_date) dates = { 'start_date': min(start_dates) if start_dates else None, 'end_date': max(end_dates) if end_dates else None, } if start_dates and not end_dates: dates['end_date'] = datetime.date.today() return dates else: return {'start_date': None, 'end_date': None}
5,327,566
def oops(): """Lazy way to return an oops reponse.""" return make_response('oops', 400)
5,327,567
def test_single_missing_atom_line_invalid_atom_indices( single_missing_atom_line_v3000_sdf: Pathy, ) -> None: """An sdf block with a missing atom line has invalid indices. That means that the number of atoms is fewer than the counts line. Args: single_missing_atom_line_v3000_sdf: pytest fixture of a Path to the sdf """ mol: SDBlock = next(parse_sdf(single_missing_atom_line_v3000_sdf)) ctable = mol.ctable() with pytest.raises(IndicesMismatchError, match="fewer atom lines than count line"): ctable.valid_atom_indices()
5,327,568
def test_nested_cache(session): """Tests whether the sample pulled.""" browser = Browser(session) browser.use_cache = True st_from_session = session.SampleType.find(1) sample_from_session = st_from_session.samples[0] sample = browser.find(sample_from_session.id, "Sample") st = browser.find(st_from_session.id, "SampleType") sample_from_cache = browser.find(sample_from_session.id, "Sample") st_from_cache = browser.find(st_from_session.id, "SampleType") # check if sample type pulle from cache is always the same assert st_from_session is not st assert st_from_cache is st # check if sample pulled from cache is the same assert sample_from_session is not sample assert sample_from_cache is sample assert ( st_from_cache.samples[0].id == sample.id ), "The first indexed sample should be the same sample found by the browser" # assert st_from_cache.samples[0] is sample, "The browser should have preferentially found the previously cached sample."
5,327,569
def get_lowest_energy_conformer( name, mol, gfn_exec=None, settings=None, ): """ Get lowest energy conformer of molecule. Method: 1) ETKDG conformer search on molecule 2) xTB `normal` optimisation of each conformer 3) xTB `opt_level` optimisation of lowest energy conformer 4) save file """ if settings is None: settings = { 'conf_opt_level': 'normal', 'final_opt_level': 'extreme', 'charge': 0, 'no_unpaired_e': 0, 'max_runs': 1, 'calc_hessian': False, 'solvent': None, 'N': 100 } # Check for missing settings. req_settings = [ 'N', 'final_opt_level', 'charge', 'no_unpaired_e', 'max_runs', 'calc_hessian', 'solvent', 'conf_opt_level' ] for i in req_settings: if i not in settings: raise MissingSettingError( f'Settings missing {i}. Has {settings.keys()}.' ) # Run ETKDG on molecule. print(f'....running ETKDG on {name}') cids, confs = build_conformers(mol, N=settings['N']) # Optimize all conformers at normal level with xTB. low_e_conf_id = -100 low_e = 10E20 for cid in cids: name_ = f'{name}_confs/c_{cid}' ey_file = f'{name}_confs/c_{cid}_eyout' mol = update_from_rdkit_conf( mol, confs, conf_id=cid ) mol.write(f'{name}_confs/c_{cid}.mol') # Optimize. opt_mol = optimize_conformer( name=name_, mol=mol, gfn_exec=gfn_exec, opt_level=settings['conf_opt_level'], charge=settings['charge'], no_unpaired_e=settings['no_unpaired_e'], max_runs=settings['max_runs'], calc_hessian=settings['calc_hessian'], solvent=settings['solvent'] ) opt_mol.write(f'{name}_confs/c_{cid}_opt.mol') # Get energy. calculate_energy( name=name_, mol=opt_mol, gfn_exec=gfn_exec, ey_file=ey_file, charge=settings['charge'], no_unpaired_e=settings['no_unpaired_e'], solvent=settings['solvent'] ) ey = read_gfnx2xtb_eyfile(ey_file) if ey < low_e: print( 'lowest energy conformer updated with energy: ' f'{ey}, id: {cid}' ) low_e_conf_id = cid low_e = ey # Get lowest energy conformer. low_e_conf = stk.BuildingBlock.init_from_file( f'{name}_confs/c_{low_e_conf_id}_opt.mol' ) low_e_conf.write(f'{name}_confs/low_e_unopt.mol') # Optimize lowest energy conformer at opt_level. low_e_conf = optimize_conformer( name=name+'low_e_opt', mol=low_e_conf, gfn_exec=gfn_exec, opt_level=settings['final_opt_level'], charge=settings['charge'], no_unpaired_e=settings['no_unpaired_e'], max_runs=settings['max_runs'], calc_hessian=settings['calc_hessian'], solvent=settings['solvent'] ) low_e_conf.write(f'{name}_confs/low_e_opt.mol') # Return molecule. return low_e_conf
5,327,570
def apply_settings(app, settings): """ From a dict of settings, save them locally and set up any necessary child systems from those settings :param app: aiohttp Application to add settings to :param settings: Dict of settings data """ settings["base_path"] = pathlib.Path(settings["base_path"]).expanduser() app['settings'] = settings if "twitch_id" in settings: from .. import twitch cid = settings["twitch_id"] secret = settings["twitch_secret"] redirect = settings["twitch_redirect"] app['twitch_app'] = twitch.TwitchApp(cid=cid, secret=secret, redirect=redirect)
5,327,571
def set_deal_products(deal_id, payload, product_list): """ Устанавливает (создаёт или обновляет) товарные позиции сделки; из руководства Битрикс24: товарные позиции сделки, существующие до момента вызова метода crm.deal.productrows.set, будут заменены новыми. returns : None rtype : None """ products = [] for product in product_list: products.append( { product["NAME"]: { "ID": product["ID"], "PRICE": product["PRICE"], "CURRENCY_ID": product["CURRENCY_ID"] } } ) print("Добавляю товары в сделку.") rows = [] for item in payload["crm.deal"]["deal_products"]: for product in products: if product.get(item): rows.append({ "PRODUCT_ID": product[item]["ID"], "PRICE": product[item]["PRICE"], "CURRENCY_ID":product[item]["CURRENCY_ID"], }) b.call( "crm.deal.productrows.set", { "id": deal_id, "rows": rows } )
5,327,572
def dictize_params(params): """ Parse parameters into a normal dictionary """ param_dict = dict() for key, value in params.iteritems(): param_dict[key] = value return param_dict
5,327,573
def dot(x, y, sparse=False): """Wrapper for tf.matmul (sparse vs dense).""" if sparse: res = tf.sparse_tensor_dense_matmul(x, y) else: res = tf.matmul(x, y) return res
5,327,574
def test_Archive_get_status_code(db, client, oauth2): """Test the Archive's get method with error on Archivematica.""" sip = SIP.create() ark = Archive.create(sip=sip, accession_id='id', archivematica_id=uuid.uuid4()) ark.status = ArchiveStatus.WAITING db.session.commit() mock_response = Response() mock_response.status_code = 404 with patch('requests.get', return_value=mock_response): response = client.get(url_for( 'invenio_archivematica_api.archive_api', accession_id=ark.accession_id, access_token=oauth2.token), data=json.dumps({'realStatus': True}), content_type='application/json') assert response.status_code == mock_response.status_code
5,327,575
def get_python3_status(classifiers): """ Search through list of classifiers for a Python 3 classifier. """ status = False for classifier in classifiers: if classifier.find('Programming Language :: Python :: 3') == 0: status = True return status
5,327,576
def drawBeta(s, w, size=1): """Draw beta from its distribution (Eq.9 Rasmussen 2000) using ARS Make it robust with an expanding range in case of failure""" #nd = w.shape[0] 用于多维数据 lb = 0.0 flag = True cnt = 0 while flag: xi = lb + np.logspace(-3 - cnt, 1 + cnt, 200) # update range if needed flag = False try: ars = ARS(fbeta, fbetaprima, xi=xi, lb=0.0, ub=np.inf, \ s=s, w=w) except: cnt += 1 flag = True # draw beta return ars.draw(size)
5,327,577
def get_unique_map_to_pullback(p, p_a, p_b, z_a, z_b): """Find a unique map to pullback.""" z_p = dict() for value in p: z_keys_from_a = set() if value in p_a.keys(): a_value = p_a[value] z_keys_from_a = set(keys_by_value(z_a, a_value)) z_keys_from_b = set() if value in p_b.keys(): b_value = p_b[value] z_keys_from_b.update(keys_by_value(z_b, b_value)) z_keys = z_keys_from_a.intersection(z_keys_from_b) for z_key in z_keys: z_p[z_key] = value return z_p
5,327,578
def f1_score( pred: torch.Tensor, target: torch.Tensor, num_classes: Optional[int] = None, class_reduction: str = 'micro', ) -> torch.Tensor: """ Computes the F1-score (a.k.a F-measure), which is the harmonic mean of the precision and recall. It ranges between 1 and 0, where 1 is perfect and the worst value is 0. Args: pred: estimated probabilities target: ground-truth labels num_classes: number of classes class_reduction: method to reduce metric score over labels - ``'micro'``: calculate metrics globally (default) - ``'macro'``: calculate metrics for each label, and find their unweighted mean. - ``'weighted'``: calculate metrics for each label, and find their weighted mean. - ``'none'``: returns calculated metric per class Return: Tensor containing F1-score Example: >>> x = torch.tensor([0, 1, 2, 3]) >>> y = torch.tensor([0, 1, 2, 2]) >>> f1_score(x, y) tensor(0.7500) """ return fbeta_score(pred=pred, target=target, beta=1., num_classes=num_classes, class_reduction=class_reduction)
5,327,579
def PyramidPoolingModule(inputs, feature_map_shape): """ Build the Pyramid Pooling Module. """ interp_block1 = InterpBlock(inputs, 1, feature_map_shape) interp_block2 = InterpBlock(inputs, 2, feature_map_shape) interp_block3 = InterpBlock(inputs, 3, feature_map_shape) interp_block6 = InterpBlock(inputs, 6, feature_map_shape) res = tf.concat([inputs, interp_block6, interp_block3, interp_block2, interp_block1], axis=-1) return res
5,327,580
def order(ord): """ `order` is decorator to order the pipeline classes. This decorator specifies a property named "order" to the member function so that we can use the property to order the member functions. This `order` function can be combined with the decorator `with_transforms` which orders the member functions. ```python class AGoodClass: def __init__(self): self.size = 0 @order(1) def first_good_member(self, new): return "first good member" @order(2) def second_good_member(self, new): return "second good member" ``` """ return attributes(order=ord)
5,327,581
def single_node_test(ctx, config): """ - ceph-deploy.single_node_test: null #rhbuild testing - ceph-deploy.single_node_test: rhbuild: 1.2.3 """ log.info("Testing ceph-deploy on single node") if config is None: config = {} overrides = ctx.config.get('overrides', {}) teuthology.deep_merge(config, overrides.get('ceph-deploy', {})) if config.get('rhbuild'): log.info("RH Build, Skip Download") with contextutil.nested( lambda: cli_test(ctx=ctx, config=config), ): yield else: with contextutil.nested( lambda: install_fn.ship_utilities(ctx=ctx, config=None), lambda: download_ceph_deploy(ctx=ctx, config=config), lambda: cli_test(ctx=ctx, config=config), ): yield
5,327,582
def collisionIndicator(egoPose, egoPoly, objPose, objPoly): """ Indicator function for collision between ego vehicle and moving object Param: egoPose: ego vehicle objPose: pose of object Return: col_indicator: (float) collision indicator between two object """ dMean = np.array([egoPose.x_m-objPose.x_m, egoPose.y_m-objPose.y_m]) dCov = egoPose.covUtm + objPose.covUtm diff_yaw = abs(egoPose.yaw_rad-objPose.yaw_rad) col_indicator = 0 # handle parallel and orthogonal case if abs(math.remainder(diff_yaw, np.pi/2)) < param._COLLISION_ORTHO_THRES: poly, bound = gaussian.minkowskiSumOrthogonal(egoPoly, objPoly) col_indicator = collisionIndicatorComputeSimple(bound, dMean, dCov) # handle general case else: poly, bound = gaussian.minkowskiSum(egoPoly, objPoly) col_indicator = collisionIndicatorCompute( poly=poly, bound=bound, dMean=dMean, dCov=dCov) return col_indicator
5,327,583
def test_version_dataset(temp_config, dataset_name, dataset_version, dataset_metadata, entity): """ GIVEN: A mocked dataset folder to track, a dataset version to track, metadata to track, and a backend to use WHEN: This dataset is registered or versioned on W&B THEN: The dataset shows up as a run on the portal """ # need to mock out all the wandb calls and test just the inputs to them _, config = temp_config with patch("wicker.plugins.wandb.wandb.init") as patched_wandb_init: with patch("wicker.plugins.wandb.wandb.Artifact") as patched_artifact: # version the dataset with the patched functions/classes version_dataset(dataset_name, dataset_version, entity, dataset_metadata) # establish the expected calls expected_artifact_calls = [ call(f"{dataset_name}_{dataset_version}", type="dataset"), call().add_reference( f"{config['aws_s3_config']['s3_datasets_path']}{dataset_name}/{dataset_version}/assets", name="dataset", ), call().metadata.__setitem__("version", dataset_version), call().metadata.__setitem__( "s3_uri", f"{config['aws_s3_config']['s3_datasets_path']}{dataset_name}/{dataset_version}/assets" ), ] for key, value in dataset_metadata.items(): expected_artifact_calls.append(call().metadata.__setitem__(key, value)) expected_run_calls = [ call(project="dataset_curation", name=f"{dataset_name}_{dataset_version}", entity=entity), call().log_artifact(patched_artifact()), ] # assert that these are properly called patched_artifact.assert_has_calls(expected_artifact_calls, any_order=True) patched_wandb_init.assert_has_calls(expected_run_calls, any_order=True)
5,327,584
def lookup_listener(param): """ Flags a method as a @lookup_listener. This method will be updated on the changes to the lookup. The lookup changes when values are registered in the lookup or during service activation. @param param: function being attached to @return: """ def decor(func): if not hasattr(func, "lookup_decor"): func.lookup_decor = [param] else: func.lookup_decor.append(param) return func return decor
5,327,585
async def read_multi_analog_inputs(app, addr): """ Execute a single request using `ReadPropertyMultipleRequest`. This will read the first 40 analog input values from the remote device. :param app: An app instance :param addr: The network address of the remote device :return: """ read_access_specs = [] for i in range(10): read_access_specs.append( ReadAccessSpecification( objectIdentifier=('analogInput', i), listOfPropertyReferences=[PropertyReference(propertyIdentifier='presentValue')], ) ) return await app.execute_request( ReadPropertyMultipleRequest( listOfReadAccessSpecs=read_access_specs, destination=Address(addr) ), )
5,327,586
def lookupBlock(blockName): """ Look up block name string in name list data value (e.g. color) override may be appended to the end e.g. stained_hardened_clay_10 Note: block name lookup is case insensitive """ blockName = blockName.upper() try: try: name, data = blockName.rsplit('_', 1) except ValueError: return Blocks[blockName] else: try: data = int(data) except ValueError: return Blocks[blockName] return Block(Blocks[name].id, data) except KeyError: print 'Invalid block name:', blockName sys.exit()
5,327,587
def deploy_on_host(shell, migrate, collectstatic, dependencies, deploy_django, deploy_react, django_branch, react_branch, meta=dict()): """ Args: shell: Connection object to connect to the shell. migrate: bool. Weather migration have to be done or not. collectstatic: bool. Weather static files have to be collected or not. dependencies: bool. weather to install dependencies for python or not. deploy_django: bool. weather to deploy django or not. deploy_react: bool. weather to deploy react or not. django_branch: str. Branch name to get deployed. react_branch: str. Branch name to get deployed. meta: dict. meta contains context, to transport common data such as settings. Returns: None """ if deploy_django: print("Deploying Django..") _deploy_django( shell, migrate=migrate, collectstatic=collectstatic, dependencies=dependencies, django_branch=django_branch, meta=meta ) if deploy_react: print("Deploying React..") _deploy_react( shell, react_branch, meta=meta )
5,327,588
def factorize(values, sort=False, na_sentinel=-1, size_hint=None): """Encode the input values as integer labels Parameters ---------- values: Series, Index, or CuPy array The data to be factorized. na_sentinel : number, default -1 Value to indicate missing category. Returns ------- (labels, cats) : (cupy.ndarray, cupy.ndarray or Index) - *labels* contains the encoded values - *cats* contains the categories in order that the N-th item corresponds to the (N-1) code. Examples -------- >>> import cudf >>> data = cudf.Series(['a', 'c', 'c']) >>> codes, uniques = cudf.factorize(data) >>> codes array([0, 1, 1], dtype=int8) >>> uniques StringIndex(['a' 'c'], dtype='object') See Also -------- cudf.Series.factorize : Encode the input values of Series. """ if sort: raise NotImplementedError( "Sorting not yet supported during factorization." ) if na_sentinel is None: raise NotImplementedError("na_sentinel can not be None.") if size_hint: warn("size_hint is not applicable for cudf.factorize") return_cupy_array = isinstance(values, cp.ndarray) values = Series(values) cats = values._column.dropna().unique().astype(values.dtype) name = values.name # label_encoding mutates self.name labels = values._label_encoding(cats=cats, na_sentinel=na_sentinel).values values.name = name return labels, cats.values if return_cupy_array else Index(cats)
5,327,589
def make_proc(code, variables, path, *, use_async=False): # pylint: disable=redefined-builtin """Compile this code block to a procedure. Args: code: the code block to execute. Text, will be indented. vars: variable names to pass into the code path: the location where the code is stored use_async: False if sync code, True if async, None if in thread Returns: the procedure to call. All keyval arguments will be in the local dict. """ hdr = f"""\ def _proc({ ",".join(variables) }): """ if use_async: hdr = "async " + hdr code = hdr + code.replace("\n", "\n ") code = compile(code, str(path), "exec") return partial(_call_proc, code, variables)
5,327,590
def ratings_std(df): """calculate standard deviation of ratings from the given dataframe parameters ---------- df (pandas dataframe): a dataframe cotanis all ratings Returns ------- standard deviation(float): standard deviation of ratings, keep 4 decimal """ std_value = df['ratings'].std() std_value = round(std_value,4) return std_value
5,327,591
def save(objct, fileoutput, binary=True): """ Save 3D object to file. (same as `write()`). Possile extensions are: - vtk, vti, ply, obj, stl, byu, vtp, xyz, tif, vti, mhd, png, bmp. """ return write(objct, fileoutput, binary)
5,327,592
def read_pfm(fname): """ Load a pfm file as a numpy array Args: fname: path to the file to be loaded Returns: content of the file as a numpy array """ file = open(fname, 'rb') color = None width = None height = None scale = None endian = None header = file.readline().rstrip() if b'PF' == header: color = True elif b'Pf' == header: color = False else: raise Exception('Not a PFM file! header: ' + header) dims = file.readline() try: width, height = list(map(int, dims.split())) except: raise Exception('Malformed PFM header.') scale = float(file.readline().rstrip()) if scale < 0: # little-endian endian = '<' scale = -scale else: endian = '>' # big-endian data = np.fromfile(file, endian + 'f') shape = (height, width, 3) if color else (height, width) data = np.reshape(data, shape) data = np.flipud(data) return data, scale
5,327,593
def broaden_spectrum(spect, sigma): """ Broadens a peak defined in spect by the sigma factor and returns the x and y data to plot. Args: ---- spect (np.ndarray) -- input array containing the peak info for the individual peak to be broadened. sigma (float) -- gaussian broadening term for the peaks given. Returns: -------- plot_vals (list) -- a 2D array containing the x and y values for plotting. """ # Assertions # assert isinstance(spect, (np.ndarray, list)), \ # 'Input must be a list or a numpy array.' # assert isinstance(sigma, float), \ # 'sigma value must be a float' #min of the spectrum **FUTURE FEATURE** #min_x = min(spect[0]) - 50. min_x = spect[0] - 50 #max of the spectrum **FUTURE FEATURE** #max_x = max(spect[0]) + 50. max_x = spect[0] + 50 x = np.linspace(start=min_x, stop=max_x, num=10000) y = [0. for k in range(len(x))] for i in range(len(x)): #**FUTURE FEATURE** #for j in range(len(spect[0])): # y[j] += spect[1][j] * math.exp(-0.5 * (((x[i] - spect[0][j]) ** 2\ # ) / sigma ** 2)) y[i] += spect[1] * math.exp(-0.5 * (((x[i] - spect[0]) ** 2) / \ sigma ** 2)) plot_vals = [x, y] return plot_vals
5,327,594
def mv_audio(serial_id, audio_setting): """ This function will change the audio recording settings to {audio_setting} in the meraki dashboard for the mv camera with the {serial_id} :param: serial_id: the serial id for the meraki mv camera :param: audio_setting: 'true' to turn on audio recording, 'false' to turn off audio recording :return: api response status code """ url = f"https://api.meraki.com/api/v1/devices/{serial_id}/camera/qualityAndRetention" payload = f'''{{ "audioRecordingEnabled": {audio_setting} }}''' headers = { "Content-Type": "application/json", "Accept": "application/json", "X-Cisco-Meraki-API-Key": API_KEY } response = requests.request('PUT', url, headers=headers, data=payload) if response.status_code == 403: print(f'Camera-serial id:{serial_id} audio recording have been changed to: {audio_setting}') return response.status_code
5,327,595
def lsp_text_edits(changed_file: ChangedFile) -> List[TextEdit]: """Take a jedi `ChangedFile` and convert to list of text edits. Handles inserts, replaces, and deletions within a text file """ old_code = ( changed_file._module_node.get_code() # pylint: disable=protected-access ) new_code = changed_file.get_new_code() opcode_position_lookup_old = get_opcode_position_lookup(old_code) text_edits = [] for opcode in get_opcodes(old_code, new_code): if opcode.op in _OPCODES_CHANGE: start = opcode_position_lookup_old[opcode.old_start] end = opcode_position_lookup_old[opcode.old_end] start_char = opcode.old_start - start.range_start end_char = opcode.old_end - end.range_start new_text = new_code[opcode.new_start : opcode.new_end] text_edits.append( TextEdit( range=Range( start=Position(line=start.line, character=start_char), end=Position(line=end.line, character=end_char), ), new_text=new_text, ) ) return text_edits
5,327,596
def _hash(file_name, hash_function=hashlib.sha256): """compute hash of file `file_name`""" with open(file_name, 'rb') as file_: return hash_function(file_.read()).hexdigest()
5,327,597
def prepare_cmf(observer='1931_2deg'): """Safely returns the color matching function dictionary for the specified observer. Parameters ---------- observer : `str`, {'1931_2deg', '1964_10deg'} the observer to return Returns ------- `dict` cmf dict Raises ------ ValueError observer not 1931 2 degree or 1964 10 degree """ if observer.lower() == '1931_2deg': return prepare_cie_1931_2deg_observer() elif observer.lower() == '1964_10deg': return prepare_cie_1964_10deg_observer() else: raise ValueError('observer must be 1931_2deg or 1964_10deg')
5,327,598
def plotAssemblyTypes( blueprints, coreName, assems=None, plotNumber=1, maxAssems=None, showBlockAxMesh=True, ): """ Generate a plot showing the axial block and enrichment distributions of each assembly type in the core. Parameters ---------- bluepprints: Blueprints The blueprints to plot assembly types of. assems: list list of assembly objects to be plotted. plotNumber: integer number of uniquely identify the assembly plot from others and to prevent plots from being overwritten. maxAssems: integer maximum number of assemblies to plot in the assems list. showBlockAxMesh: bool if true, the axial mesh information will be displayed on the right side of the assembly plot. """ if assems is None: assems = list(blueprints.assemblies.values()) if not isinstance(assems, (list, set, tuple)): assems = [assems] if not isinstance(plotNumber, int): raise TypeError("Plot number should be an integer") if maxAssems is not None and not isinstance(maxAssems, int): raise TypeError("Maximum assemblies should be an integer") numAssems = len(assems) if maxAssems is None: maxAssems = numAssems # Set assembly/block size constants yBlockHeights = [] yBlockAxMesh = OrderedSet() assemWidth = 5.0 assemSeparation = 0.3 xAssemLoc = 0.5 xAssemEndLoc = numAssems * (assemWidth + assemSeparation) + assemSeparation # Setup figure fig, ax = plt.subplots(figsize=(15, 15), dpi=300) for index, assem in enumerate(assems): isLastAssem = True if index == (numAssems - 1) else False (xBlockLoc, yBlockHeights, yBlockAxMesh) = _plotBlocksInAssembly( ax, assem, isLastAssem, yBlockHeights, yBlockAxMesh, xAssemLoc, xAssemEndLoc, showBlockAxMesh, ) xAxisLabel = re.sub(" ", "\n", assem.getType().upper()) ax.text( xBlockLoc + assemWidth / 2.0, -5, xAxisLabel, fontsize=13, ha="center", va="top", ) xAssemLoc += assemWidth + assemSeparation # Set up plot layout ax.spines["right"].set_visible(False) ax.spines["top"].set_visible(False) ax.spines["bottom"].set_visible(False) ax.yaxis.set_ticks_position("left") yBlockHeights.insert(0, 0.0) yBlockHeights.sort() yBlockHeightDiffs = numpy.diff( yBlockHeights ) # Compute differential heights between each block ax.set_yticks([0.0] + list(set(numpy.cumsum(yBlockHeightDiffs)))) ax.xaxis.set_visible(False) ax.set_title("Assembly Designs for {}".format(coreName), y=1.03) ax.set_ylabel("Thermally Expanded Axial Heights (cm)".upper(), labelpad=20) ax.set_xlim([0.0, 0.5 + maxAssems * (assemWidth + assemSeparation)]) # Plot and save figure ax.plot() figName = coreName + "AssemblyTypes{}.png".format(plotNumber) runLog.debug("Writing assem layout {} in {}".format(figName, os.getcwd())) fig.savefig(figName) plt.close(fig) return figName
5,327,599