content
stringlengths
22
815k
id
int64
0
4.91M
def plot_sleep_per_weekday(title, ylabel, keys, values, counter, color_list): """ creates a bar chart for the given data :param title: title to display :param ylabel: ylabel text :param keys: names for the bars :param values: values to create bars for :param user: the user which we are visualizing here :return: """ fig = plt.figure(title) ax = fig.add_subplot(111) ## necessary variables ind = np.arange(len(values)) width = 0.5 try: offset = max(values) / 20 ## the bars bars = ax.bar(ind, values, width, color=color_list) for rect in bars: height = rect.get_height() ax.text(rect.get_x()+rect.get_width()/2., offset+height, '%.2f' % height, ha='center', va='bottom') # axes and labels ax.set_xlim(-width, len(ind)+width) ax.set_ylim(0, max(values) + max(values) / 10) ax.set_ylabel(ylabel) ax.set_title(title + "\n (based on " + str(sum(counter)) + " nights of sleep data)") xTickMarks = keys ax.set_xticks(ind+(width/2)) xtickNames = ax.set_xticklabels(xTickMarks) plt.setp(xtickNames, rotation=-45, fontsize=10) plt.legend((bars[0], bars[-1]), ('Weeknights', 'Weeekends'), loc=4) plt.show() except ValueError: print("no data was found")
5,326,300
def binary_seg_loss(loss): """ Chooses the binary segmentation loss to use depending on the loss name in parameter :param loss: the type of loss to use """ if loss == 'focal': return BinaryFocalLoss() else: return tf.keras.losses.BinaryCrossentropy()
5,326,301
def time_test_csv( ID, CSV_TIMES, SCALE_PARAM, GTNX, GRAPH_TYPE, graph, path_costs, cost_sum, dist, time_pipeline, notes ): """ Prepare current data for time logs csv file :param CSV_TIMES: output file :params: all parameters to save in the csv """ # get scale factor and number of nonzero pixels: try: factor = graph.factor except AttributeError: factor = 1 n_pixels = np.sum(np.mean(graph.cost_rest, axis=0) > 0) # --> csv columns: # scale,graphtool,graphtype,n_nodes,n_edges,add_nodes_time,add_edge_time, # shortest_path_time, notes param_list = [ ID, SCALE_PARAM, GTNX, GRAPH_TYPE, factor, dist, n_pixels, graph.n_nodes, graph.n_edges, graph.time_logs["add_nodes"], graph.time_logs["add_all_edges"], graph.time_logs["shortest_path"], path_costs, cost_sum, time_pipeline, notes ] append_to_csv(CSV_TIMES, param_list)
5,326,302
def get_config_file(): """ Return the loaded config file if one exists. """ # config will be created here if we can't find one new_config_path = os.path.expanduser('~/dagobahd.yml') config_dirs = ['/etc', os.path.expanduser('~/dagobah/dagobah/daemon/')] config_filenames = ['dagobahd.yml', 'dagobahd.yaml', '.dagobahd.yml', '.dagobahd.yaml'] for directory in config_dirs: for filename in config_filenames: try: if os.path.isfile(os.path.join(directory, filename)): to_load = open(os.path.join(directory, filename)) config = yaml.load(to_load.read()) to_load.close() replace_nones(config) return config except: pass # if we made it to here, need to create a config file # double up on notifications here to make sure first-time user sees it print 'Creating new config file in home directory' print 'sometrhins' logging.info('Creating new config file in home directory') new_config = open(new_config_path, 'w') new_config.write(return_standard_conf()) new_config.close() new_config = open(new_config_path, 'r') config = yaml.load(new_config.read()) new_config.close() replace_nones(config) return config
5,326,303
def bostock_cat_colors(color_sets = ["set3"]): """ Get almost as many categorical colors as you please. Get more than one of the color brewer sets with ['set1' , 'set2'] Parameters ---------- sets : list list of color sets to return valid options are (set1, set2, set3, pastel1, pastel2, paired, dark, accent, category10) Returns ------- categorical_colors : list list of strings (e.g. ["#e41a1c",...]) Examples -------- >>> bostock_cat_colors(['set3'])[:5] ['#8dd3c7', '#ffffb3', '#bebada', '#fb8072', '#80b1d3'] >>> bostock_cat_colors(['category10'])[:5] ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd'] Notes ----- list of hex colors can be found here: https://observablehq.com/@d3/color-schemes """ bostock = \ {"set1" : ["#e41a1c","#377eb8","#4daf4a","#984ea3", "#ff7f00","#ffff33","#a65628","#f781bf", "#999999"], "set2" : ["#66c2a5","#fc8d62","#8da0cb","#e78ac3", "#a6d854","#ffd92f","#e5c494","#b3b3b3"], "set3" : ["#8dd3c7","#ffffb3","#bebada","#fb8072", "#80b1d3","#fdb462","#b3de69","#fccde5", "#d9d9d9","#bc80bd","#ccebc5","#ffed6f"], "pastel1" : ["#fbb4ae","#b3cde3","#ccebc5","#decbe4", "#fed9a6","#ffffcc","#e5d8bd","#fddaec", "#f2f2f2"], "pastel2" : ["#b3e2cd","#fdcdac","#cbd5e8","#f4cae4", "#e6f5c9","#fff2ae","#f1e2cc","#cccccc"], "paired" : ["#a6cee3","#1f78b4","#b2df8a","#33a02c", "#fb9a99","#e31a1c","#fdbf6f","#ff7f00", "#cab2d6","#6a3d9a","#ffff99","#b15928"], "dark" : ["#1b9e77","#d95f02","#7570b3","#e7298a", "#66a61e","#e6ab02","#a6761d","#666666"], "accent" : ["#7fc97f","#beaed4","#fdc086","#ffff99", "#386cb0","#f0027f","#bf5b17","#666666"], "category10":["#1f77b4","#ff7f0e","#2ca02c","#d62728", "#9467bd","#8c564b","#e377c2","#7f7f7f", "#bcbd22","#17becf"] } l = [bostock[k] for k in color_sets] categorical_colors = [item for sublist in l for item in sublist] return categorical_colors
5,326,304
def SaveClose(): """Close all scene.""" SaveCloseMatplotlib() SaveCloseMlab()
5,326,305
def run_docstyle_check(repository): """Run PyDocsStyle checker against the selected repository.""" command = "pushd {repo};./check-docstyle.sh > ../{repo}.pydocstyle.txt;popd".format( repo=repository) os.system(command)
5,326,306
def PruneXTraceJSONFiles(request_types): """ Prune a folder of xtrace files to remove garbace collection traces and a few others that do not correspond to the various request types @params request_types: the allowable request types for this set of traces. """ # start statistics start_time = time.time() # make pruned directory if it does not exist if not os.path.exists('jsons/xtrace/pruned-traces'): os.mkdir('jsons/xtrace/pruned-traces') # expect the xtrace files to be in this directory xtrace_filenames = sorted(glob.glob('jsons/xtrace/*.json')) # initialize the number of traces per request type to 0 traces_per_request_type = {} for request_type in request_types: traces_per_request_type[request_type] = 0 for xtrace_filename in xtrace_filenames: #print (xtrace_filename) # read this xtrace json file with open(xtrace_filename, 'r') as fd: data = json.load(fd) # get the base id for this file base_id = data['id'] # consider all of the nodes in the list of reports reports = data['reports'] nnodes = len(reports) tags = set() # determine if the trace should be removed by looking at tags remove_trace = False for report in reports: # skip the initial Tag of 'FsShell' if 'Tag' in report and not report['Tag'][0] == 'FsShell': # 'GarbageCollection', 'NameNode', and 'DataNode' have two components if not len(report['Tag']) == 1: remove_trace = True else: tags.add(report['Tag'][0]) # if we should remove the trace, make sure there are no tags if remove_trace: assert (not len(tags)) pruned_filename = 'jsons/xtrace/pruned-traces/{}.json'.format(base_id) os.rename(xtrace_filename, pruned_filename) # otherwise make sure there is only one request else: # make sure that there is only one tag per requeset assert (len(tags) == 1) # turn the tag into a request_type request = list(tags)[0] request_type = request.split(' ')[0].strip('-') assert (request_type in request_types) traces_per_request_type[request_type] += 1 print ('Pruned XTrace files in {:0.2f} seconds.'.format(time.time() - start_time)) for request_type in traces_per_request_type: print (' {}: {}'.format(request_type, traces_per_request_type[request_type]))
5,326,307
def bbpssw_gates_and_measurement_bob(q1, q2): """ Performs the gates and measurements for Bob's side of the BBPSSW protocol :param q1: Bob's qubit from the first entangled pair :param q2: Bob's qubit from the second entangled pair :return: Integer 0/1 indicating Bob's measurement outcome """ q1.cnot(q2) m2 = q2.measure() return m2
5,326,308
def fiber_array( n: int = 8, pitch: float = 127.0, core_diameter: float = 10, cladding_diameter: float = 125, layer_core: Tuple[int, int] = gf.LAYER.WG, layer_cladding: Tuple[int, int] = gf.LAYER.WGCLAD, ) -> Component: """Returns a fiber array .. code:: pitch <-> _________ | | lid | o o o o | | | base |_________| length """ c = Component() for i in range(n): core = c.add_ref(circle(radius=core_diameter / 2, layer=layer_core)) cladding = c.add_ref(circle(radius=cladding_diameter / 2, layer=layer_cladding)) core.movex(i * pitch) cladding.movex(i * pitch) c.add_port(name=f"F{i}", width=core_diameter, orientation=0) return c
5,326,309
def check_flags(): """Additional logic to make sure flags are set appropriately.""" if FLAGS.customized_model is not None: if (not tf.compat.v1.gfile.Exists(FLAGS.customized_model + '.data-00000-of-00001') or not tf.compat.v1.gfile.Exists(FLAGS.customized_model + '.index') or not tf.compat.v1.gfile.Exists(FLAGS.customized_model + '.meta')): raise RuntimeError('The model files {}* do not exist. Potentially ' 'relevant issue: ' 'https://github.com/google/deepvariant/blob/r1.3/docs/' 'FAQ.md#why-cant-it-find-one-of-the-input-files-eg-' 'could-not-open'.format(FLAGS.customized_model)) logging.info( 'You set --customized_model. Instead of using the default ' 'model for %s, `call_variants` step will load %s* ' 'instead.', FLAGS.model_type, FLAGS.customized_model) if FLAGS.use_hp_information and FLAGS.model_type != 'PACBIO': raise ValueError('--use_hp_information can only be used with ' '--model_type="PACBIO"')
5,326,310
def delete(movie_id): """ deletes the movie from the database :param movie_id: id to delete :return: index file """ movie_to_delete_id = Movie.query.get(movie_id) db_session.delete(movie_to_delete_id) db_session.commit() return redirect(url_for('home'))
5,326,311
def one_vector_block_diagonal(num_blocks: int, vector_length: int) -> Tensor: """Computes a block diagonal matrix with column vectors of ones as blocks. Associated with the mathematical symbol :math:`E`. Example: :: one_vector_block_diagonal(3, 2) == tensor([ [1., 0., 0.], [1., 0., 0.], [0., 1., 0.], [0., 1., 0.], [0., 0., 1.], [0., 0., 1.]]). Args: num_blocks: number of columns. vector_length: number of ones in each matrix diagonal block. Returns: ``(n * vector_length, n)`` 0-1 tensor. """ # pylint: disable=E1103 return torch.eye(num_blocks).repeat(1, vector_length).reshape( num_blocks * vector_length, num_blocks)
5,326,312
def get_cli_args(): """ :return: argparse.Namespace with command-line arguments from user """ args = get_main_pipeline_arg_names().difference({ 'output', 'ses', 'subject', 'task', WRAPPER_LOC[2:].replace('-', '_') }) tasks = ('SST', 'MID', 'nback') parser = get_pipeline_cli_argparser(arg_names=args) parser.add_argument('-all-events', '--all-events', type=valid_readable_dir, help=('Valid path to an existing directory which has ' '1 folder per subject, with the folder structure ' '(--all-events)/(subject ID)/(session name)/' 'level-1/events.')) parser.add_argument('-all-outputs', '--all-outputs', type=valid_output_dir, help=('Valid path to your output directory root. In ' 'other words, the "--output" argument for each ' 'command in the --script-list file will be ' 'subject- and session-specific subdirectories ' 'of this --all-outputs directory.')) parser.add_argument('-output', '--output', type=valid_output_dir, required=False) parser.add_argument('-script', '--script', type=valid_readable_file) parser.add_argument('-script-list', '--script-list', required=True) parser.add_argument('-slurm', '--slurm', action='store_true') parser.add_argument('-sourcedata', '--sourcedata', type=valid_readable_dir) parser.add_argument('-tasks', '--tasks', nargs='+', default=tasks) # choices=tasks, parser.add_argument(WRAPPER_LOC, type=valid_readable_dir, default=SCRIPT_DIR) #, dest='loc') return vars(parser.parse_args())
5,326,313
def list_all_assets(organization_id): """Demonstrate listing and printing all assets.""" i = 0 # [START securitycenter_list_all_assets] from google.cloud import securitycenter client = securitycenter.SecurityCenterClient() # organization_id is the numeric ID of the organization. # organization_id = "1234567777" org_name = "organizations/{org_id}".format(org_id=organization_id) # Call the API and print results. asset_iterator = client.list_assets(request={"parent": org_name}) for i, asset_result in enumerate(asset_iterator): print(i, asset_result) # [END securitycenter_list_all_assets] return i
5,326,314
def _0_to_empty_str(dataframe: pd.DataFrame, column_data_type: dict): """ 데이터가 str인 column에 들어있는 0을 '' 로 바꾸어 준다. column_data_type 에서 value가 'str' 인 column 만 바꾸어 준다. """ for column, datatype in column_data_type.items(): if datatype == "str": dataframe[column].replace("0", "", inplace=True) return dataframe
5,326,315
async def several_static_pools(fixt_controller, count: int = 5): """Создать несколько статических пулов.""" pool_main_resources = await get_resources_for_pool_test() controller_id = pool_main_resources["controller_id"] resource_pool_id = pool_main_resources["resource_pool_id"] context = await get_auth_context() pools_list = list() # Создаем пулы for _ in range(count + 1): vm_id = uuid.uuid4() qu = """ mutation{addStaticPool( verbose_name: "%s", controller_id: "%s", resource_pool_id: "%s", vms:[ {id: "%s", verbose_name: "test_2"} ], connection_types: [SPICE, RDP], ){ pool { pool_id } ok } }""" % ( get_test_pool_name(), controller_id, resource_pool_id, vm_id, ) # id созданного пула pool_create_res = await execute_scheme(pool_schema, qu, context=context) pools_list.append(pool_create_res["addStaticPool"]["pool"]["pool_id"]) # Пулы созданы yield {"pools": pools_list} # Удаляем созданные пулы for pool_id in pools_list: qu = ( """ mutation { removePool(pool_id: "%s", full: true) { ok } } """ % pool_id ) await execute_scheme(pool_schema, qu, context=context)
5,326,316
def set_proxies(): """Set proxies to allow downloading of external URLs.""" os.environ["HTTP_PROXY"] = "http://fwdproxy:8080" os.environ["HTTPS_PROXY"] = "http://fwdproxy:8080" os.environ["http_proxy"] = "fwdproxy:8080" os.environ["https_proxy"] = "fwdproxy:8080"
5,326,317
def _predict(rel): """ Predicts the betrayal probabilities and returns them as an inference.Output object. """ return inference.predict(rel)
5,326,318
def evaluate_exams(request, exam_id): """ Request-Methods :POST Request-Headers : Authorization Token Request-Body: Student-Solution -> JSON Response: "student_name" -> str, "teacher_name" -> str, "batch" -> str, "marks" -> str, "exam_start_date_time" -> str, "total_marks" -> str, "grade" -> str """ student_solutions = request.data.get("student_solutions") student = check_token_and_get_student(request) if not Exam.objects.filter(Q(id=exam_id) & Q(batch=student.batch)).exists(): raise NotFoundException("Exam not found") exam = Exam.objects.get(Q(id=exam_id) & Q(batch=student.batch)) exam_start_date_time = datetime.now(tz=timezone.utc) - timedelta( hours=exam.exam_period.hour ) if Result.objects.filter(Q(exam=exam) & Q(student=student)).exists(): raise AlreadyExistsException("Already Submitted the exam") score = evaluate_exam_score(exam.questions_and_solutions, student_solutions)[ "total_score" ] grade = evaluate_exam_grade(score, exam.total_marks) result = Result( exam_start_date_time=(exam_start_date_time), exam=exam, student=student, teacher=exam.teacher, student_solutions=student_solutions, total_marks=float(exam.total_marks), score=score, grade=grade, ) result.save() response = { "student_name": student.name, "teacher_name": exam.teacher.name, "batch": exam.batch, "score": score, "exam_start_date_time": (exam_start_date_time), "total_marks": float(exam.total_marks), "grade": grade, } return JsonResponse(data=response, status=200)
5,326,319
def _accumulated_penalty_energy_fw(energy_to_track, penalty_matrix, parallel): """Calculates acummulated penalty in forward direction (t=0...end). `energy_to_track`: squared abs time-frequency transform `penalty_matrix`: pre-calculated penalty for all potential jumps between two frequencies # Returns: `penalized_energy`: new energy with added forward penalty `ridge_idxs`: calculated initial ridge with only forward penalty """ penalized_energy = energy_to_track.copy() fn = (__accumulated_penalty_energy_fwp if parallel else __accumulated_penalty_energy_fw) fn(penalized_energy, penalty_matrix) ridge_idxs = np.unravel_index(np.argmin(penalized_energy, axis=0), penalized_energy.shape)[1] return penalized_energy, ridge_idxs
5,326,320
def rot90(m, k=1, axis=2): """Rotate an array k*90 degrees in the counter-clockwise direction around the given axis This differs from np's rot90 because it's 3D """ m = np.swapaxes(m, 2, axis) m = np.rot90(m, k) m = np.swapaxes(m, 2, axis) return m
5,326,321
def get_pads(onnx_node): # type: (NodeWrapper) -> Tuple[int, int, int] """ Get padding values for the operation described by an ONNX node. If `auto_pad` attribute is specified as SAME_UPPER or SAME_LOWER, or VALID values are calculated. Otherwise values are taken from the `pads` attribute. `pads` value should follow [x1_begin, x2_begin..., x1_end, x2_end,...] :param onnx_node: wrapped ONNX node for Conv or Pool operation :return: tuple of numbers of pixels to pad (height, width, depth) """ auto_pad = onnx_node.get_attribute_value('auto_pad') pads = onnx_node.get_attribute_value('pads', ()) # Padding along each axis kernel_shape = onnx_node.get_attribute_value('kernel_shape') # Attribute 'auto_pad' is deprecated, but is currently used by CNTK if auto_pad: if auto_pad == 'VALID': pads = [0, 0] * len(kernel_shape) else: # SAME_UPPER or SAME_LOWER mean pad the input so that the output size match the input. # In case of odd number add the extra padding at the end for SAME_UPPER and at the # beginning for SAME_LOWER. def pad_value(kernel_dim): # type: (int) -> float return (kernel_dim - 1.0) / 2.0 pads_starts = [floor(pad_value(dim)) if auto_pad == 'SAME_UPPER' else ceil(pad_value(dim)) for dim in kernel_shape] pads_ends = [ceil(pad_value(dim)) if auto_pad == 'SAME_UPPER' else floor(pad_value(dim)) for dim in kernel_shape] pads = pads_starts + pads_ends verify_symmetric_padding(onnx_node, pads) pad_h, pad_w, pad_d = 0, 0, 0 if pads and len(pads) == 2: # ONNX input axes NCHW pad_h, pad_w = pads if pads and len(pads) == 3: # ONNX input axes NCHWD pad_h, pad_w, pad_d = pads if pads and len(pads) == 4: # ONNX input axes NCHW pad_h, pad_w, _, _ = pads elif pads and len(pads) == 6: # ONNX input axes NCHWD pad_h, pad_w, pad_d, _, _, _ = pads return pad_h, pad_w, pad_d
5,326,322
def get_changelog_version() -> Optional[str]: """ Return latest version from changelog.txt file. """ version: Optional[str] = None root_dir = hgit.get_client_root(super_module=False) changelog_file = os.path.join(root_dir, "changelog.txt") hdbg.dassert_file_exists(changelog_file) changelog = hio.from_file(changelog_file) match = re.search(_CHANGELOG_VERSION_RE, changelog) if match: version = match.group() return version
5,326,323
def from_dict(transforms): """Deserializes the transformations stored in a dict. Supports deserialization of Streams only. Parameters ---------- transforms : dict Transforms Returns ------- out : solt.core.Stream An instance of solt.core.Stream. """ if not isinstance(transforms, dict): raise TypeError("Transforms must be a dict!") for t in transforms: if "transforms" in transforms[t]: transforms[t]["transforms"] = [from_dict(x) for x in transforms[t]["transforms"]] if "affine_transforms" in transforms[t]: transforms[t]["affine_transforms"] = from_dict(transforms[t]["affine_transforms"]) if t in Serializable.registry: cls = Serializable.registry[t] else: raise ValueError(f"Could not find {t} in the registry!") return cls(**transforms[t])
5,326,324
def create_mp_pool(nproc=None): """Creates a multiprocessing pool of processes. Arguments --------- nproc : int, optional number of processors to use. Defaults to number of available CPUs minus 2. """ n_cpu = pathos.multiprocessing.cpu_count() if nproc is None: nproc = n_cpu - 2 assert nproc <= n_cpu, \ f'Cannot allocate more processes than existing CPUs: {nproc} > {n_cpu}' return ProcessingPool(nproc)
5,326,325
def subpixel_edges(img, threshold, iters, order): """ Detects subpixel features for each pixel belonging to an edge in `img`. The subpixel edge detection used the method published in the following paper: "Accurate Subpixel Edge Location Based on Partial Area Effect" http://www.sciencedirect.com/science/article/pii/S0262885612001850 Parameters ---------- img: ndarray A grayscale image. threshold: int or float Specifies the minimum difference of intensity at both sides of a pixel to be considered as an edge. iters: int Specifies how many smoothing iterations are needed to find the final edges: 0: Oriented to noise free images. No previous smoothing on the image. The detection is applied on the original image values (section 3 of the paper). 1: Oriented to low-noise images. The detection is applied on the image previously smoothed by a 3x3 mask (default) (sections 4 and 5 of the paper) >1: Oriented to high-noise images. Several stages of smoothing + detection + synthetic image creation are applied (section 6 of the paper). A few iterations are normally enough. order: int Specifies the order of the edges to find: 1: first order edges (straight lines) 2: second order edges (default) Returns ------- An instance of EdgePixel """ if iters == 0: return main_iter0(img, threshold, iters, order) elif iters == 1: return main_iter1(img, threshold, iters, order) elif iters > 1: for iterN in range(iters): ep, img = main_iterN(img, threshold, iters, order) return ep
5,326,326
def arrays(hyperchunks, array_count): """Iterate over the arrays in a set of hyperchunks.""" class Attribute(object): def __init__(self, expression, hyperslices): self._expression = expression self._hyperslices = hyperslices @property def expression(self): return self._expression @property def hyperslice_count(self): return 0 if self._hyperslices is None else len(self._hyperslices) def hyperslices(self): """Iterate over the hyperslices in a hyperchunk.""" if self._hyperslices is not None: for hyperslice in self._hyperslices: yield tuple(hyperslice) class Array(object): def __init__(self, index, attributes, order, hyperslices): self._index = index self._attributes = attributes self._order = order self._hyperslices = hyperslices @property def index(self): return self._index @property def attribute_count(self): return 0 if self._attributes is None else len(self._attributes) @property def order(self): return self._order def attributes(self, attribute_count): """Iterate over the attributes in a hyperchunk.""" if self._attributes is not None: for attributes in self._attributes: if isinstance(attributes, (numbers.Integral, type(Ellipsis), slice)): if isinstance(attributes, numbers.Integral): if attributes < 0: attributes = slice(attribute_count + attributes, attribute_count + attributes + 1) else: attributes = slice(attributes, attributes + 1) elif isinstance(attributes, type(Ellipsis)): attributes = slice(0, attribute_count) start, stop, step = attributes.indices(attribute_count) for index in numpy.arange(start, stop, step): yield Attribute(slycat.hyperchunks.grammar.AttributeIndex(index), self._hyperslices) else: yield Attribute(attributes, self._hyperslices) for hyperchunk in hyperchunks: for arrays in hyperchunk.arrays: if isinstance(arrays, (numbers.Integral, type(Ellipsis), slice)): if isinstance(arrays, numbers.Integral): if arrays < 0: arrays = slice(array_count + arrays, array_count + arrays + 1) else: arrays = slice(arrays, arrays + 1) elif isinstance(arrays, type(Ellipsis)): arrays = slice(0, array_count) start, stop, step = arrays.indices(array_count) for index in numpy.arange(start, stop, step): yield Array(index, hyperchunk.attributes, hyperchunk.order, hyperchunk.hyperslices) else: cherrypy.log.error("hyperchunks.__init__.py", "Unexpected array: %r" % arrays) raise ValueError("Unexpected array: %r" % arrays)
5,326,327
def ne(x: Union[DNDarray, float, int], y: Union[DNDarray, float, int]) -> DNDarray: """ Returns a :class:`~heat.core.dndarray.DNDarray` containing the results of element-wise rich comparison of non-equality between values from two operands, commutative. Takes the first and second operand (scalar or :class:`~heat.core.dndarray.DNDarray`) whose elements are to be compared as argument. Parameters ---------- x: DNDarray or scalar The first operand involved in the comparison y: DNDarray or scalar The second operand involved in the comparison Examples --------- >>> import heat as ht >>> x = ht.float32([[1, 2],[3, 4]]) >>> ht.ne(x, 3.0) DNDarray([[ True, True], [False, True]], dtype=ht.bool, device=cpu:0, split=None) >>> y = ht.float32([[2, 2], [2, 2]]) >>> ht.ne(x, y) DNDarray([[ True, False], [ True, True]], dtype=ht.bool, device=cpu:0, split=None) """ res = _operations.__binary_op(torch.ne, x, y) if res.dtype != types.bool: res = dndarray.DNDarray( res.larray.type(torch.bool), res.gshape, types.bool, res.split, res.device, res.comm, res.balanced, ) return res
5,326,328
def iter_paired_end_filenames(sample_name_delims, sample_names, patterns, filenames): """Print paired end files to stdout, one pair per line. Print unpaired files to stderr. """ tbl = {} for filename in filenames: filename = filename.strip() try: sample_name, end = get_sample_name_and_end(filename, patterns=patterns, sample_name_delims=sample_name_delims) if not end: print(f'[SINGLE ENDED]\t{filename}', file=stderr) continue try: tbl[sample_name][end - 1] = filename except KeyError: tbl[sample_name] = [None, None] tbl[sample_name][end - 1] = filename except AssertionError: print(f'[UNKNOWN PATTERN]\t{filename}', file=stderr) for sample_name, (r1, r2) in tbl.items(): if r1 is None or r2 is None: print(f'[UNPAIRED SAMPLE]\t{sample_name}\t{r2 if r1 is None else r1}', file=stderr) continue out_str = f'{r1}\t{r2}' if sample_names: out_str = f'{sample_name}\t{out_str}' print(out_str)
5,326,329
def main(samples = 1195191, cores = -1): """ This function loads data as a pandas dataframe, calculates mean and variance of the sentiment scores per week and month. Lastly, it saves the figures. Inputs: samples, default: 50000: description: int, a number of titles to be sampled (max 1 milion) cores, default: -1 : descritption int, the number of cpu cores use for the text processing. Uses all available cores as a default. Output: plots of mean and variance per week and month saved in reports/figures/ """ # load data as a pandas dataframe data_path = os.path.join("..", "data", "raw", "abcnews-date-text.csv") #path to data data = pd.read_csv(data_path) # read data print("Succesfully loaded data") # set a column to a date type and sample data frame data['publish_date'] = pd.to_datetime(data.publish_date, format="%Y%m%d") #convert variable to datetime format data = data.sample(samples) # sample x random headlines # sort data data = data.sort_values("publish_date") # empty list for sentiment scores senti_list = [] print(f"Processing texts using {cores} cores") # a loop to extract sentiment score for every headline for doc in tqdm(nlp.pipe(data.headline_text, disable = ["ner"], n_process = cores)): #for each headline... #doc = nlp(headline) #process headline score = doc._.polarity #extract sentiment score senti_list.append(score) #append to list # appending list with the sentiment score into pandas dataframe data["sentiment"] = senti_list # calculating and plotting mean score per week data_week_mean = data.resample("W",on ="publish_date").mean() sentiment_plot(data_week_mean, y_lim = (-0.175,0.175), title = "Mean of Weekly Sentiment Scores", filename = "week_plot_mean",samples = samples) # calculating and plotting mean score per month data_month_mean = data.resample("M",on ="publish_date").mean() sentiment_plot(data_month_mean, y_lim = (-0.175,0.175), title = "Mean of Monthly Sentiment Scores", filename = "month_plot_mean",samples = samples) # calculating and plotting variance per week data_week_var = data.resample("W",on ="publish_date").var() sentiment_plot(data_week_var, y_lim = (0,0.125), title = "Variance of Weekly Sentiment Scores", filename ="week_plot_variance",samples = samples) # calculating and plotting variance per month data_month_var = data.resample("M",on ="publish_date").var() sentiment_plot(data_month_var, y_lim = (0,0.125), title = "Variance of Monthly Sentiment Scores", filename = "month_plot_variance",samples= samples) print(f"Done :-) \n Plots are available at {os.path.join('..', 'reports', 'figures')}")
5,326,330
def _quote_embedded_quotes(text): """ Replace any embedded quotes with two quotes. :param text: the text to quote :return: the quoted text """ result = text if '\'' in text: result = result.replace('\'', '\'\'') if '"' in text: result = result.replace('"', '""') return result
5,326,331
def exp2(x): """Calculate 2**x""" return 2 ** x
5,326,332
def test_update_metadata_family_does_not_exist(db_session, make_workspace, upload_file, mocker): """Updating metadata of a family not present in a workspace should fail""" mocker.patch('flask_principal.Permission.can', return_value=True) workspace = make_workspace(families={'base': 0, 'existing': 0}) file_id = upload_file(workspace=workspace) new_metadata = { 'metadata': { 'unknown': { 'key': 'value', } } } with pytest.raises(APIException): update_metadata(wid=workspace.id, uuid=file_id, body=new_metadata)
5,326,333
def play(): """Play page.""" ticket_name = request.cookies.get('ticket_name') ticket = None game = get_game() new_ticket = True if ticket_name: ticket = Ticket.get_by_name(ticket_name) new_ticket = ticket and ticket.game != game.id if new_ticket: ticket = Ticket.create( name=get_name(), game=game.id, data=json.dumps(Generator().get_ticket()) ) resp = make_response(render_template("public/play.html", card=ticket, data=json.loads(ticket.data))) resp.set_cookie('ticket_name', ticket.name) return resp
5,326,334
def count_search_results(idx, typ, query, date_range, exclude_distributions, exclude_article_types): """Count the number of results for a query """ q = create_query(query, date_range, exclude_distributions, exclude_article_types) #print q return _es().count(index=idx, doc_type=typ, body=q)
5,326,335
def ifft(data: np.ndarray) -> np.ndarray: """ Perform inverse discrete Fast Fourier transform of data by conjugating signal. Arguments: data: frequency data to be transformed (np.array, shape=(n,), dtype='float64') Return: result: Inverse transformed data """ n = len(data) result = np.conjugate(fft(np.conjugate(data))) return result
5,326,336
def archived_minute(dataSet, year, month, day, hour, minute): """ Input: a dataset and specific minute Output: a list of ride details at that minute or -1 if no ride during that minute """ year = str(year) month = str(month) day = str(day) #Converts hour and minute into 2 digit integers (that are strings) hour = "%02d" % hour minute = "%02d" % minute timeStamp = month+'/'+day+'/'+year+' '+hour+':'+minute+':'+'00' if timeStamp in dataSet: return dataSet[timeStamp] else: return -1
5,326,337
def Rq(theta, vect): """Returns a 3x3 matrix representing a rotation of angle theta about vect axis. Parameters ---------- theta: float, rotation angle in radian vect: list of float or array, vector about which the rotation happens """ I = np.matrix(np.identity(3)) Q = np.matrix(np.zeros((3,3))) Q[0,1] = -vect[2] Q[0,2] = vect[1] Q[1,2] = -vect[0] Q[1,0] = -Q[0,1] Q[2,0] = -Q[0,2] Q[2,1] = -Q[1,2] res = I + np.sin(theta)*Q + (1-np.cos(theta))*Q**2 return res
5,326,338
def ExpsMaintPol(): """Maintenance expense per policy""" return asmp.ExpsMaintPol.match(prod, polt, gen).value
5,326,339
def inputRead(c, inps): """ Reads the tokens in the input channels (Queues) given by the list inps using the token rates defined by the list c. It outputs a list where each element is a list of the read tokens. Parameters ---------- c : [int] List of token consumption rates. inps : [Queue] List of channels. Returns ---------- inputs: [List] List of token lists. """ if len(c) != len(inps): raise Exception("Token consumption list and Queue list have different sizes") inputs = [] for i in range(len(c)): aux = [] for j in range(c[i]): aux.append(inps[i].get()) inputs.append(aux) return inputs
5,326,340
def _split_schema_abstract(s): """ split the schema abstract into fields >>> _split_schema_abstract("a b c") ['a', 'b', 'c'] >>> _split_schema_abstract("a(a b)") ['a(a b)'] >>> _split_schema_abstract("a b[] c{a b}") ['a', 'b[]', 'c{a b}'] >>> _split_schema_abstract(" ") [] """ r = [] w = '' brackets = [] for c in s: if c == ' ' and not brackets: if w: r.append(w) w = '' else: w += c if c in _BRACKETS: brackets.append(c) elif c in _BRACKETS.values(): if not brackets or c != _BRACKETS[brackets.pop()]: raise ValueError("unexpected " + c) if brackets: raise ValueError("brackets not closed: %s" % brackets) if w: r.append(w) return r
5,326,341
def myfn(n): """打印hello world 每隔一秒打印一个hello world,共n次 """ if n == 1: print("hello world!") return else: print("hello world!") return myfn(n - 1)
5,326,342
def name(ea, **flags): """Return the name defined at the address specified by `ea`. If `flags` is specified, then use the specified value as the flags. """ ea = interface.address.inside(ea) # figure out what default flags to use fn = idaapi.get_func(ea) # figure out which name function to call if idaapi.__version__ < 6.8: # if get_true_name is going to return the function's name instead of a real one, then leave it as unnamed. if fn and interface.range.start(fn) == ea and not flags: return None aname = idaapi.get_true_name(ea) or idaapi.get_true_name(ea, ea) else: aname = idaapi.get_ea_name(ea, flags.get('flags', idaapi.GN_LOCAL)) # return the name at the specified address or not return utils.string.of(aname) or None
5,326,343
def estimate_distance( row: pd.DataFrame, agent_x: float, agent_y: float ): """ Side function to estimate distance from AGENT to the other vehicles This function should be applied by row Args: row: (pd.DataFrame) agent_x: (float) x coordinate of agent agent_y: (float) y coordinate of agent Returns: (pd.DataFrame) """ row["distance"] = np.sqrt( (row["center_x"] - agent_x) ** 2 + (row["center_y"] - agent_y) ** 2 ) return row
5,326,344
def spinpol_bands(kpath, eigenvalues_up, eigenvalues_dn, backend=None, data=None, **kwargs): """ Plot the provided data for a bandstructure (spin-polarized) Non-weighted, weighted, as a line plot or scatter plot, color-mapped or fixed colors are all possible options :param kpath: data for the kpoints path (flattened to 1D) :param eigenvalues_up: data for the eigenvalues for spin-up :param eigenvalues_dn: data for the eigenvalues for spin-down :param data: source for the data of the plot (optional) (pandas Dataframe for example) :param backend: name of the backend to use (uses a default if None is given) Kwargs are passed on to the backend plotting functions: - ``matplotlib``: :py:func:`~masci_tools.vis.plot_methods.plot_spinpol_bands()` - ``bokeh``: :py:func:`~masci_tools.vis.bokeh_plots.bokeh_spinpol_bands()` :returns: Figure object for the used plotting backend """ from .plot_methods import plot_spinpol_bands from .bokeh_plots import bokeh_spinpol_bands plot_funcs = {PlotBackend.mpl: plot_spinpol_bands, PlotBackend.bokeh: bokeh_spinpol_bands} backend = PlotBackend.from_str(backend) return plot_funcs[backend](kpath, eigenvalues_up, eigenvalues_dn, data=data, **kwargs)
5,326,345
def test_init_method(): """ Test constructor """ name = 'Hallway' position = 'Entrance' inventory = ['Paper', 'Cage', 'Freshman Badge'] main_character = Character(name, position, inventory) assert main_character.name == name assert main_character.position == position assert main_character.inventory == inventory
5,326,346
def WriteSurfaceElectrostaticsView(Mode, OutFH, SelectionObjectName, ElectrostaticsGroupName, ElectrostaticsGroupMembers, DisplayAs = None, SurfaceCavityMode = 2): """Write out PML for viewing surface electrostatics. """ if len(ElectrostaticsGroupMembers) == 5: Name, ContactPotentialName, MapName, LegendName, VolumeName = ElectrostaticsGroupMembers else: Name, ContactPotentialName, MapName, LegendName = ElectrostaticsGroupMembers VolumeName = None PMLCmds = [] # Setup chain... PMLCmds.append("""cmd.create("%s", "(%s)")""" % (Name, SelectionObjectName)) # Setup vacuum electrostatics surface along with associated objects... PMLCmds.append("""util.protein_vacuum_esp("%s", mode=2, quiet=0, _self=cmd)""" % (Name)) PMLCmds.append("""cmd.set_name("%s_e_chg", "%s")""" % (Name, ContactPotentialName)) if DisplayAs is not None: PMLCmds.append("""cmd.show("%s", "(%s)")""" % (DisplayAs, ContactPotentialName)) if re.match("^Cavity$", Mode, re.I): if SurfaceCavityMode is not None: PMLCmds.append("""cmd.set("surface_cavity_mode", %d, "%s")\n""" % (SurfaceCavityMode, ContactPotentialName)) PMLCmds.append(PyMOLUtil.SetupPMLForEnableDisable(ContactPotentialName, Enable = True)) PMLCmds.append("""cmd.set_name("%s_e_map", "%s")""" % (Name, MapName)) PMLCmds.append(PyMOLUtil.SetupPMLForEnableDisable(MapName, Enable = False)) PMLCmds.append("""cmd.set_name("%s_e_pot", "%s")""" % (Name, LegendName)) PMLCmds.append(PyMOLUtil.SetupPMLForEnableDisable(LegendName, Enable = False)) if VolumeName is not None: PMLCmds.append("""cmd.volume("%s", "%s", "%s", "(%s)")""" % (VolumeName, MapName, "esp", Name)) PMLCmds.append(PyMOLUtil.SetupPMLForEnableDisable(VolumeName, Enable = False)) # Delete name and take it out from the group membership. It is # is already part of ContactPotential object. PMLCmds.append("""cmd.delete("%s")""" % (Name)) ElectrostaticsGroupMembers.pop(0) PML = "\n".join(PMLCmds) OutFH.write("\n%s\n" % PML) # Setup group... GenerateAndWritePMLForGroup(OutFH, ElectrostaticsGroupName, ElectrostaticsGroupMembers, False, "close")
5,326,347
def test_access_db_type_url_down(): """ :return: """ httpretty.enable() httpretty.reset() conftest.mock_user_perm("admin") conftest.mock_app_perm("bk_dataweb") conftest.mock_create_data_id() conftest.mock_get_data_id("db") res = post("/v3/access/deploy_plan/", param) assert not res["result"]
5,326,348
def test_markov_word_chain_builder(): """Test typical use of the WordChainBuilder markov chain class.""" input_sequence = ( 'PIKARD', 'Q', 'PIKARD', 'DORF', 'PIKARD', 'Q', 'ROKER', 'Q', 'PIKARD' ) expected_probabilities = { 'PIKARD': [('Q', 2.0 / 3), ('DORF', 1.0 / 3)], 'Q': [('PIKARD', 2.0 / 3), ('ROKER', 1.0 / 3)], 'DORF': [('PIKARD', 1.0)], 'ROKER': [('Q', 1.0)], } builder = markov.WordChainBuilder() for word in input_sequence: builder.add_next(word) chain = builder.normalize() for leader, probs in expected_probabilities.items(): assert leader in chain assert sort_probs(probs) == sort_probs(chain[leader])
5,326,349
def _verify_metadata(metadata, args): """ <Purpose> Internal method to verify link or layout signatures. <Arguments> metadata: Metablock object (contains Link or Layout object) args: see argparser <Exceptions> SystemExit(0) if verification passes SystemExit(1) if verification fails SystemExit(2) if any exception occurs """ try: # Load pubkeys from disk .... if args.key is not None: pub_key_dict = util.import_public_keys_from_files_as_dict(args.key, args.key_type) # ... or from gpg keyring elif args.gpg is not None: # pragma: no branch pub_key_dict = util.import_gpg_public_keys_from_keyring_as_dict( args.gpg, args.gpg_home) for keyid, verification_key in six.iteritems(pub_key_dict): metadata.verify_signature(verification_key) LOG.info("Signature verification passed for keyid '{}'" .format(keyid)) sys.exit(0) except exceptions.SignatureVerificationError as e: LOG.error("Signature verification failed: {}".format(e)) sys.exit(1) except Exception as e: LOG.error("The following error occurred while verifying signatures: " "{}".format(e)) sys.exit(2)
5,326,350
def valid_config_and_get_dates(): """ 校验配置文件的参数,并返回配置文件的预约疫苗日期 :return: """ if config.global_config.getConfigSection("cookie") == "": raise Exception("请先配置登陆后的 cookie,查看方式请查看 README.MD") if config.global_config.getConfigSection("date") == "": raise Exception("请先配置登陆后的 预约日期") valid_dates = get_dates() if len(valid_dates) == 0: raise Exception("预约日期未配置或配置不正确(预约日期需要大于等于今天),请重新配置预约日期") return valid_dates
5,326,351
def check_internet_connection(): """Checks if there is a working internet connection.""" url = 'http://www.google.com/' timeout = 5 try: _ = requests.get(url, timeout=timeout) return True except requests.ConnectionError as e: return False return False
5,326,352
def wall_filter(points, img): """ Filters away points that are inside walls. Works by checking where the refractive index is not 1. """ deletion_mask = img[points[:, 0], points[:, 1]] != 1 filtered_points = points[~deletion_mask] return filtered_points
5,326,353
def navigateResults(results): """Navigate all links, returning a list contaning the ulrs and corresponding pages. results:[String] - List with links to be visited Return: {list}[{tuple}({String}url, {String}content)]""" global BASE_ADDR ret = [] for i in results: page = requests.get("%s%s" % (BASE_ADDR, i), verify=False) ret.append(["%s%s" % (BASE_ADDR, i), page.text]) return ret
5,326,354
def _get_matching_signature(oper, args): """ Search the first operation signature matched by a list of arguments Args: oper: Operation where searching signature args: Candidate list of argument expressions Returns: Matching signature, None if not found """ # Search corresponding signature return next((s for s in oper.signatures if _is_matching_arguments(s, args)), None)
5,326,355
def format_size(num: int) -> str: """Format byte-sizes. :param num: Size given as number of bytes. .. seealso:: http://stackoverflow.com/a/1094933 """ for x in ['bytes', 'KB', 'MB', 'GB']: if num < 1024.0 and num > -1024.0: return "%3.1f%s" % (num, x) num /= 1024.0 return "%3.1f%s" % (num, 'TB')
5,326,356
def annotate_task(task_id, note): """Annotate task with task_id with note string""" uuid = Popen(['task', task_id, 'uuids'], stdout=PIPE).stdout.read().strip().decode('utf-8') call(['task', task_id, 'annotate', '--', 'email:', 'Notes']) notes_dir = get_notes_dir() note_file = os.path.join(notes_dir, '{}.txt'.format(uuid)) with open(note_file, 'w') as note_f: note_f.write(note)
5,326,357
def __sort_merge(arr: [], start: int, end: int): """ 二路归并:分割数组为两部分,分别对这两部分排序,然后将两个有序数组合并 """ # 结束条件,当分割部分只剩一个元素时已有序,结束分割 if end - start <= 1: return mid = (start + end) // 2 __sort_merge(arr, start, mid) __sort_merge(arr, mid, end) # 递归合并 __merge_array(arr, start, mid, end)
5,326,358
def hub_payload(hub): """Create response payload for a hub.""" if hasattr(hub, "librarySectionID"): media_content_id = f"{HUB_PREFIX}{hub.librarySectionID}:{hub.hubIdentifier}" else: media_content_id = f"{HUB_PREFIX}server:{hub.hubIdentifier}" payload = { "title": hub.title, "media_class": MEDIA_CLASS_DIRECTORY, "media_content_id": PLEX_URI_SCHEME + media_content_id, "media_content_type": hub.type, "can_play": False, "can_expand": True, } return BrowseMedia(**payload)
5,326,359
def _decrypt(secret_key, data): """Decrypt data using the secret_key""" print(Encryptor(secret_key).decrypt_string(data))
5,326,360
def roq_transform(pressure, loading): """Rouquerol transform function.""" return loading * (1 - pressure)
5,326,361
def test_report_properties(): """``Report`` objects have statistically evaluated properties.""" report = Report(verbs=['GET', 'POST'], status_codes=['20', 404]) report.add( path='/foo/bar', verb='GET', status=205, time=0.1, upstream_time=0.09, body_bytes=255) assert report.requests == 1 # counter attributes evaluate to Counter.most_common() assert report.verbs == [('GET', 1), ('POST', 0)] assert report.status == [('20', 1), ('404', 0)] assert report.path_requests == [('/foo/bar', 1)] # list attributes return a ListStats instance times = report.times assert isinstance(times, ListStats) assert times.mean == 0.1 assert times.median == 0.1 upstream_times = report.upstream_times assert isinstance(upstream_times, ListStats) assert upstream_times.mean == 0.09 assert upstream_times.median == 0.09 body_bytes = report.body_bytes assert isinstance(body_bytes, ListStats) assert body_bytes.mean == 255 assert body_bytes.median == 255 # per path attributes are ordered dictionaries with path keys path_verbs = report.path_verbs assert isinstance(path_verbs, OrderedDict) assert [key for key in path_verbs.keys()] == ['/foo/bar'] assert path_verbs['/foo/bar'] == [('GET', 1), ('POST', 0)] path_status = report.path_status assert isinstance(path_status, OrderedDict) assert [key for key in path_status.keys()] == ['/foo/bar'] assert path_status['/foo/bar'] == [('20', 1), ('404', 0)] path_times = report.path_times assert isinstance(path_times, OrderedDict) assert [key for key in path_times.keys()] == ['/foo/bar'] assert isinstance(path_times['/foo/bar'], ListStats) assert path_times['/foo/bar'].mean == 0.1 assert path_times['/foo/bar'].median == 0.1 path_upstream_times = report.path_upstream_times assert isinstance(path_upstream_times, OrderedDict) assert [key for key in path_upstream_times.keys()] == ['/foo/bar'] assert isinstance(path_upstream_times['/foo/bar'], ListStats) assert path_upstream_times['/foo/bar'].mean == 0.09 assert path_upstream_times['/foo/bar'].median == 0.09 path_body_bytes = report.path_body_bytes assert isinstance(path_body_bytes, OrderedDict) assert [key for key in path_body_bytes.keys()] == ['/foo/bar'] assert isinstance(path_body_bytes['/foo/bar'], ListStats) assert path_body_bytes['/foo/bar'].mean == 255 assert path_body_bytes['/foo/bar'].median == 255
5,326,362
def _cms_inmem(file_names): """ Computes mean and image_classification deviation in an offline fashion. This is possible only when the dataset can be allocated in memory. Parameters ---------- file_names: List of String List of file names of the dataset Returns ------- mean : double std : double """ img = np.zeros([file_names.size] + list(np.array(Image.open(file_names[0]).convert('RGB')).shape)) # Load all samples for i, sample in enumerate(file_names): img[i] = np.array(Image.open(sample).convert('RGB')) mean = np.array([np.mean(img[:, :, :, 0]), np.mean(img[:, :, :, 1]), np.mean(img[:, :, :, 2])]) / 255.0 std = np.array([np.std(img[:, :, :, 0]), np.std(img[:, :, :, 1]), np.std(img[:, :, :, 2])]) / 255.0 return mean, std
5,326,363
def ddphi_spherical_zm (dd, ps_zm, r_e, lat, time_chunk=None ): """ This function calculates the gradient in meridional direction in a spherical system It takes and returns xarray.DataArrays inputs: dd data xarray.DataArray with (latitude, time, level) or (latitude, time), or combinations there off ps_zm xr.DataArray, Surfare pressure in the dimensions acoording dd, no copying to addional dimensions needed. 2nd dimension should be latitude, if more then 2 dims. r_e earth radius used in the spherical gradient lat np.array, latitude values in degree, same size as dd.latitude returns: xr.DataArray same dimensions as dd """ import xarray as xr # ensure correct chunks rechunk_dic=dict() for k in dd.dims: rechunk_dic[k]= dd[k].size if time_chunk is not None: rechunk_dic['time']= time_chunk dd= dd.chunk(rechunk_dic) #plt.hist(np.diff(lat_radiens)) lat_radiens =lat *np.pi/180.0 cos_phi= np.cos(lat_radiens) if ps_zm is None: print('no ps weight lat gradient') ps_dummy = dd.isel(level=1)*0+1 grad_matrix = ps_dummy* r_e *cos_phi**2 * dd else: print('ps weight lat gradient') rechunk_dic=dict() for k in ps_zm.dims: rechunk_dic[k]= uzm_vzm_rep[k].size if time_chunk is not None: rechunk_dic['time']= time_chunk ps_zm=ps_zm.chunk(rechunk_dic) grad_matrix =ps_zm* r_e *cos_phi**2 * dd if lat.size != grad_matrix.shape[1]: grad_matrix= grad_matrix.T if lat.size != grad_matrix.shape[1]: raise ValueError('the 2nd dimension it not the same size as the latitude. make sure the input arrays as the cooriantes like (time, latitude, level) or (time, latitude)') grad_matrix_dphi = - grad_matrix.differentiate('latitude', edge_order=2)/(4.0*lat_radiens.diff('latitude').mean()) #grad_matrix_dphi_np =np.gradient(grad_matrix, lat_radiens , axis=1) # ensure same order of diemnsions when data is returned # only for non-xarray fileds # trans_list=list() # for k in list(dd.shape): # for i in [i for i,x in enumerate(list(grad_matrix_dphi.shape)) if x == k]: # trans_list.append(i) #print(np.shape(r_e**2 *cos_phi**2)) #print(np.shape(ps_zm * r_e**2 *cos_phi**2)) if ps_zm is None: factor = r_e**2 *cos_phi**2 else: factor = ps_zm * r_e**2 *cos_phi**2 # non xarray version #dd_return = xr.DataArray(data=np.transpose(grad_matrix_dphi, trans_list), dims=dd.dims, coords=dd.coords ) /factor # xarray version dd_return = grad_matrix_dphi/factor return dd_return
5,326,364
def kill_port(port: int): """ Kill a process which listening to a port """ os.system('fuser -k {}/tcp'.format(port)) pass
5,326,365
def process_group_magmom_comp( name=None, group=None, write_atoms_objects=False, verbose=False, ): """ """ #| - process_group_magmom_comp # ##################################################### group_w_o = group # ##################################################### out_dict = dict() out_dict["df_magmoms_comp"] = None out_dict["good_triplet_comb"] = None out_dict["job_ids"] = None # out_dict[""] = job_ids_list = list(set(group.job_id_max.tolist())) #| - Reading data # ######################################################### df_jobs = get_df_jobs() # ######################################################### df_atoms_sorted_ind = get_df_atoms_sorted_ind() df_atoms_sorted_ind = df_atoms_sorted_ind.set_index("job_id") # ######################################################### df_job_ids = get_df_job_ids() df_job_ids = df_job_ids.set_index("job_id") from methods import read_magmom_comp_data assert name != None, "Must pass name to read previous data" magmom_comp_data_prev = read_magmom_comp_data(name=name) if magmom_comp_data_prev is not None: pair_wise_magmom_comp_data_prev = \ magmom_comp_data_prev["pair_wise_magmom_comp_data"] #__| if write_atoms_objects: #| - Write atoms objects df_i = pd.concat([ df_job_ids, df_atoms_sorted_ind.loc[ group_w_o.job_id_max.tolist() ] ], axis=1, join="inner") # ######################################################### df_index_i = group_w_o.index.to_frame() compenv_i = df_index_i.compenv.unique()[0] slab_id_i = df_index_i.slab_id.unique()[0] active_sites = [i for i in df_index_i.active_site.unique() if i != "NaN"] active_site_i = active_sites[0] folder_name = compenv_i + "__" + slab_id_i + "__" + str(int(active_site_i)) # ######################################################### for job_id_i, row_i in df_i.iterrows(): tmp = 42 job_id = row_i.name atoms = row_i.atoms_sorted_good ads = row_i.ads file_name = ads + "_" + job_id + ".traj" print("Is this saving to the right place d9sf") root_file_path = os.path.join("__temp__", folder_name) print(os.getcwd(), root_file_path) if not os.path.exists(root_file_path): os.makedirs(root_file_path) file_path = os.path.join(root_file_path, file_name) atoms.write(file_path) #__| # ##################################################### #| - Getting good triplet combinations all_triplet_comb = list(itertools.combinations( group_w_o.job_id_max.tolist(), 3)) good_triplet_comb = [] for tri_i in all_triplet_comb: df_jobs_i = df_jobs.loc[list(tri_i)] # Triplet must not contain duplicate ads # Must strictly be a *O, *OH, and *bare triplet ads_freq_dict = CountFrequency(df_jobs_i.ads.tolist()) tmp_list = list(ads_freq_dict.values()) any_repeat_ads = [True if i > 1 else False for i in tmp_list] if not any(any_repeat_ads): good_triplet_comb.append(tri_i) #__| # ##################################################### #| - MAIN LOOP if verbose: print( "Number of viable triplet combinations:", len(good_triplet_comb) ) data_dict_list = [] pair_wise_magmom_comp_data = dict() for tri_i in good_triplet_comb: #| - Process triplets data_dict_i = dict() if verbose: print("tri_i:", tri_i) all_pairs = list(itertools.combinations(tri_i, 2)) df_jobs_i = df_jobs.loc[list(tri_i)] sum_norm_abs_magmom_diff = 0. for pair_i in all_pairs: # if pair_i in list(pair_wise_magmom_comp_data_prev.keys()): if (magmom_comp_data_prev is not None) and \ (pair_i in list(pair_wise_magmom_comp_data_prev.keys())): magmom_data_out = pair_wise_magmom_comp_data_prev[pair_i] else: # print("Need to run manually") # print("pair_i:", pair_i) #| - Process pairs row_jobs_0 = df_jobs.loc[pair_i[0]] row_jobs_1 = df_jobs.loc[pair_i[1]] ads_0 = row_jobs_0.ads ads_1 = row_jobs_1.ads # ############################################# if set([ads_0, ads_1]) == set(["o", "oh"]): job_id_0 = df_jobs_i[df_jobs_i.ads == "o"].iloc[0].job_id job_id_1 = df_jobs_i[df_jobs_i.ads == "oh"].iloc[0].job_id elif set([ads_0, ads_1]) == set(["o", "bare"]): job_id_0 = df_jobs_i[df_jobs_i.ads == "bare"].iloc[0].job_id job_id_1 = df_jobs_i[df_jobs_i.ads == "o"].iloc[0].job_id elif set([ads_0, ads_1]) == set(["oh", "bare"]): job_id_0 = df_jobs_i[df_jobs_i.ads == "bare"].iloc[0].job_id job_id_1 = df_jobs_i[df_jobs_i.ads == "oh"].iloc[0].job_id else: print("Woops something went wrong here") # ############################################# row_atoms_i = df_atoms_sorted_ind.loc[job_id_0] # ############################################# atoms_0 = row_atoms_i.atoms_sorted_good magmoms_sorted_good_0 = row_atoms_i.magmoms_sorted_good was_sorted_0 = row_atoms_i.was_sorted # ############################################# # ############################################# row_atoms_i = df_atoms_sorted_ind.loc[job_id_1] # ############################################# atoms_1 = row_atoms_i.atoms_sorted_good magmoms_sorted_good_1 = row_atoms_i.magmoms_sorted_good was_sorted_1 = row_atoms_i.was_sorted # ############################################# # ############################################# magmom_data_out = get_magmom_diff_data( ads_atoms=atoms_1, slab_atoms=atoms_0, ads_magmoms=magmoms_sorted_good_1, slab_magmoms=magmoms_sorted_good_0, ) #__| pair_wise_magmom_comp_data[pair_i] = magmom_data_out tot_abs_magmom_diff = magmom_data_out["tot_abs_magmom_diff"] norm_abs_magmom_diff = magmom_data_out["norm_abs_magmom_diff"] if verbose: print(" ", "pair_i: ", pair_i, ": ", np.round(norm_abs_magmom_diff, 3), sep="") sum_norm_abs_magmom_diff += norm_abs_magmom_diff # ################################################# data_dict_i["job_ids_tri"] = set(tri_i) data_dict_i["sum_norm_abs_magmom_diff"] = sum_norm_abs_magmom_diff # ################################################# data_dict_list.append(data_dict_i) # ################################################# #__| #__| # ##################################################### df_magmoms_i = pd.DataFrame(data_dict_list) # ##################################################### out_dict["df_magmoms_comp"] = df_magmoms_i out_dict["good_triplet_comb"] = good_triplet_comb out_dict["pair_wise_magmom_comp_data"] = pair_wise_magmom_comp_data out_dict["job_ids"] = job_ids_list # ##################################################### return(out_dict) #__|
5,326,366
def precompute_dgmatrix(set_gm_minmax,res=0.1,adopt=True): """Precomputing MODIT GRID MATRIX for normalized GammaL Args: set_gm_minmax: set of gm_minmax for different parameters [Nsample, Nlayers, 2], 2=min,max res: grid resolution. res=0.1 (defaut) means a grid point per digit adopt: if True, min, max grid points are used at min and max values of x. In this case, the grid width does not need to be res exactly. Returns: grid for DIT (Nlayer x NDITgrid) """ set_gm_minmax=np.array(set_gm_minmax) lminarray=np.min(set_gm_minmax[:,:,0],axis=0) #min lmaxarray=np.max(set_gm_minmax[:,:,1],axis=0) #max dlog=np.max(lmaxarray-lminarray) gm=[] Ng=(dlog/res).astype(int)+2 Nlayer=len(lminarray) for i in range(0,Nlayer): lxmin=lminarray[i] lxmax=lmaxarray[i] if adopt==False: grid=np.logspace(lxmin,lxmin+(Ng-1)*res,Ng) else: grid=np.logspace(lxmin,lxmax,Ng) gm.append(grid) gm=np.array(gm) return gm
5,326,367
def get_image_urls(ids): """function to map ids to image URLS""" return [f"http://127.0.0.1:8000/{id}" for id in ids]
5,326,368
def partition(n: int) -> int: """Pure Python partition function, ported to Python from SageMath. A000041 implemented by Peter Luschny. @CachedFunction def A000041(n): if n == 0: return 1 S = 0; J = n-1; k = 2 while 0 <= J: T = A000041(J) S = S+T if is_odd(k//2) else S-T J -= k if is_odd(k) else k//2 k += 1 return S """ if n in _p.keys(): return _p[n] if not n: return 1 sum, j, k = EMPTY_SUM, dec(n), 2 while j >= 0: t = partition(j) if k//2 % 2: sum += t else: sum -= t if k % 2: j -= k else: j -= k//2 k += 1 _p[n] = sum return sum
5,326,369
def port_to_host_int(port: int) -> int: """Function to convert a port from network byte order to little endian Args: port (int): the big endian port to be converted Returns: int: the little endian representation of the port """ return ntohs(port)
5,326,370
def inv_median(a): """ Inverse of the median of array a. This can be used as the `scale` argument of ccdproc.combine when combining flat frames. See CCD Data Reduction Guide Sect. 4.3.1 """ return 1 / np.median(a)
5,326,371
def mpdisted(dask_client, T_A, T_B, m, percentage=0.05, k=None, normalize=True): """ Compute the z-normalized matrix profile distance (MPdist) measure between any two time series with a distributed dask cluster The MPdist distance measure considers two time series to be similar if they share many subsequences, regardless of the order of matching subsequences. MPdist concatenates and sorts the output of an AB-join and a BA-join and returns the value of the `k`th smallest number as the reported distance. Note that MPdist is a measure and not a metric. Therefore, it does not obey the triangular inequality but the method is highly scalable. Parameters ---------- dask_client : client A Dask Distributed client that is connected to a Dask scheduler and Dask workers. Setting up a Dask distributed cluster is beyond the scope of this library. Please refer to the Dask Distributed documentation. T_A : ndarray The first time series or sequence for which to compute the matrix profile T_B : ndarray The second time series or sequence for which to compute the matrix profile m : int Window size percentage : float, default 0.05 The percentage of distances that will be used to report `mpdist`. The value is between 0.0 and 1.0. This parameter is ignored when `k` is not `None`. k : int Specify the `k`th value in the concatenated matrix profiles to return. When `k` is not `None`, then the `percentage` parameter is ignored. normalize : bool, default True When set to `True`, this z-normalizes subsequences prior to computing distances. Otherwise, this function gets re-routed to its complementary non-normalized equivalent set in the `@core.non_normalized` function decorator. Returns ------- MPdist : float The matrix profile distance Notes ----- `DOI: 10.1109/ICDM.2018.00119 \ <https://www.cs.ucr.edu/~eamonn/MPdist_Expanded.pdf>`__ See Section III """ return _mpdist(T_A, T_B, m, percentage, k, dask_client=dask_client, mp_func=stumped)
5,326,372
def validate_input(helper, definition): """Implement your own validation logic to validate the input stanza configurations""" # This example accesses the modular input variable # client_account = definition.parameters.get('client_account', None) if(definition.parameters.get('client_account', None) is None): helper.log_error("[ZPA-E-NO_CLIENT_ACCOUNT] No client account was provided") sys.exit(1) pass
5,326,373
def test_describe_enclaves(init_resources): # pylint: disable=unused-argument """Test describe enclaves does what we expect it to do.""" result = describe_enclaves_ok() result_json = json.loads(result.stdout.decode('UTF-8')) assert not result_json result = run_enclave_ok(SAMPLE_EIF, "1028", "2") result_json = json.loads(result.stdout.decode('UTF-8')) enclave_id = result_json["EnclaveID"] result = describe_enclaves_ok() result_json = json.loads(result.stdout.decode('UTF-8')) assert result_json[0]["EnclaveID"] == enclave_id terminate_enclave_ok(enclave_id) result = describe_enclaves_ok() result_json = json.loads(result.stdout.decode('UTF-8')) assert not result_json
5,326,374
def naive_pipeline_2() -> Pipeline: """Generate pipeline with NaiveModel(2).""" pipeline = Pipeline(model=NaiveModel(2), transforms=[], horizon=7) return pipeline
5,326,375
def caption_image_batch(encoder, decoder, images, word_map, device, max_length): """ Reads an image and captions it with beam search. :param encoder: encoder model :param decoder: decoder model :param image: image :param word_map: word map :param beam_size: number of sequences to consider at each decode-step :return: caption, weights for visualization """ # Encode encoder_out = encoder(images) # (1, enc_image_size, enc_image_size, encoder_dim) batch_size = encoder_out.size(0) encoder_dim = encoder_out.size(3) # Flatten encoding encoder_out = encoder_out.view(batch_size, -1, encoder_dim) # (1, num_pixels, encoder_dim) # Tensor to store top k previous words at each step; now they're just <start> k_prev_words = torch.LongTensor([[word_map['<start>']]] * batch_size) # (k, 1) # Tensor to store top k sequences; now they're just <start> seqs = k_prev_words # (k, 1) # Lists to store completed sequences, their alphas and scores complete_seqs = set() # Start decoding step = 1 h, c = decoder.init_hidden_state(encoder_out) # s is a number less than or equal to k, because sequences are removed from this process once they hit <end> while len(complete_seqs) < batch_size: embeddings = decoder.embedding(k_prev_words.to(device)).squeeze(1) # (s, embed_dim) awe, alpha = decoder.attention(encoder_out, h) # (s, encoder_dim), (s, num_pixels) gate = decoder.sigmoid(decoder.f_beta(h)) # gating scalar, (s, encoder_dim) awe = gate * awe h, c = decoder.decode_step(torch.cat([embeddings, awe], dim=1), (h, c)) # (s, decoder_dim) scores = decoder.fc(h) # (s, vocab_size) _, next_word_inds = scores.max(1) next_word_inds = next_word_inds.cpu() # Add new words to sequences, alphas seqs = torch.cat([seqs, next_word_inds.unsqueeze(1)], dim=1) # (s, step+1) # Which sequences are incomplete (didn't reach <end>)? incomplete_inds = [ind for ind, next_word in enumerate(next_word_inds) if next_word != word_map['<end>']] complete_inds = set(range(batch_size)) - set(incomplete_inds) complete_seqs.update(complete_inds) k_prev_words = next_word_inds.unsqueeze(1) # Break if things have been going on too long if step > max_length: break step += 1 k_end_words = torch.LongTensor([[word_map['<end>']]] * batch_size) # (k, 1) seqs = torch.cat([seqs, k_end_words], dim=1) # (s, step+1) seq_length = [s.tolist().index(word_map['<end>']) for s in seqs] return seq_length
5,326,376
def wasserstein_distance(p, q, C): """Wasserstein距离计算方法, p.shape=(m,) q.shape=(n,) C.shape=(m,n) p q满足归一性概率化 """ p = np.array(p) q = np.array(q) A_eq = [] for i in range(len(p)): A = np.zeros_like(C) A[i,:] = 1.0 A_eq.append(A.reshape((-1,))) for i in range(len(q)): A = np.zeros_like(C) A[:,i] = 1.0 A_eq.append(A.reshape((-1,))) A_eq = np.array(A_eq) b_eq = np.concatenate([p, q], axis=0) C = np.array(C).reshape((-1,)) return optimize.linprog( c=C, A_eq=A_eq[:-1], b_eq=b_eq[:-1], method="interior-point", options={"cholesky":False, "sym_pos":True} ).fun
5,326,377
def median_cutoff_points(ventricular_rate, ponset, toffset): """Calculate the median cutoff start and end points""" ponset = 0 if np.isnan(ponset) else int(ponset) toffset = 600 if np.isnan(toffset) else int(toffset) # limit the onset and offset to be in the range of 0-600 # take some margin of 10ms (5*2) on the start and end indices margin = 5 ponset = max(ponset - margin, 0) toffset = min(toffset, 600) if np.isnan(ventricular_rate): end = 600 else: # calculate the average number of points between the QRS complexes rr_interval = (1 * 60 * 1000 / 2) / ventricular_rate # say that the end of a beat would be around the onset of the P wave # plus the avg. duration of one beat end = min(ponset + margin + rr_interval, 600) # if the GE measured T wave offset is larger than our calculated beat # endpoint, take the measured T wave offset end = max(end, toffset) if not np.isinf(end): end = int(end) return ponset, end
5,326,378
def make_colormap(color_palette, N=256, gamma=1.0): """ Create a linear colormap from a color palette. Parameters ---------- color_palette : str, list, or dict A color string, list of color strings, or color palette dict Returns ------- cmap : LinearSegmentedColormap A colormap object based on color_palette using linear segments. """ colors = extract_palette(color_palette) rgb = map(hex2rgb, colors) return LinearSegmentedColormap.from_list('custom', list(rgb), N=N, gamma=1.0)
5,326,379
def frac_correct(t): """Compute fraction correct and confidence interval of trials t """ assert np.all(t.outcome.values<2) frac = t.outcome.mean() conf = confidence(frac, len(t)) return frac, conf
5,326,380
def run_location(tokens, description): """Identifies the indices of matching text in the lines. Arguments: tokens (list): A list of strings, serialized from the GUI. description (CourseDescription): The course to be matched against. Returns: list: List of list of index positions. """ indices = run_text(tokens[1], description) result = [] if tokens[0] == 'before': for line_indices in indices: result.append([start for start, end in line_indices]) elif tokens[0] == 'after': for line_indices in indices: result.append([end for start, end in line_indices]) return result
5,326,381
def SUM(r, expression = lambda trx:None): #todo None ok? cause error = good? """Sum expression""" if expression.func_code.co_consts==(None,) and len(r._heading)==1: #if no expression but 1 attribute, use it expression = lambda trx:trx[r._heading[0]] return reduce(lambda x,y: x + y, (expression(tr) for tr in r._scan()), 0)
5,326,382
def domean(data, start, end, calculation_type): """ Gets average direction using Fisher or principal component analysis (line or plane) methods Parameters ---------- data : nest list of data: [[treatment,dec,inc,int,quality],...] start : step being used as start of fit (often temperature minimum) end : step being used as end of fit (often temperature maximum) calculation_type : string describing type of calculation to be made 'DE-BFL' (line), 'DE-BFL-A' (line-anchored), 'DE-BFL-O' (line-with-origin), 'DE-BFP' (plane), 'DE-FM' (Fisher mean) Returns ------- mpars : dictionary with the keys "specimen_n","measurement_step_min", "measurement_step_max","specimen_mad","specimen_dec","specimen_inc" """ mpars = {} datablock = [] start0, end0 = start, end # indata = [rec.append('g') if len(rec)<6 else rec for rec in indata] # # this statement doesn't work! indata = [] for rec in data: if len(rec) < 6: rec.append('g') indata.append(rec) if indata[start0][5] == 'b': print("Can't select 'bad' point as start for PCA") flags = [x[5] for x in indata] bad_before_start = flags[:start0].count('b') bad_in_mean = flags[start0:end0 + 1].count('b') start = start0 - bad_before_start end = end0 - bad_before_start - bad_in_mean datablock = [x for x in indata if x[5] == 'g'] if indata[start0] != datablock[start]: print('problem removing bad data in pmag.domean start of datablock shifted:\noriginal: %d\nafter removal: %d' % ( start0, indata.index(datablock[start]))) if indata[end0] != datablock[end]: print('problem removing bad data in pmag.domean end of datablock shifted:\noriginal: %d\nafter removal: %d' % ( end0, indata.index(datablock[end]))) mpars["calculation_type"] = calculation_type rad = old_div(np.pi, 180.) if end > len(datablock) - 1 or end < start: end = len(datablock) - 1 control, data, X, Nrec = [], [], [], float(end - start + 1) cm = [0., 0., 0.] # # get cartesian coordinates # fdata = [] for k in range(start, end + 1): if calculation_type == 'DE-BFL' or calculation_type == 'DE-BFL-A' or calculation_type == 'DE-BFL-O': # best-fit line data = [datablock[k][1], datablock[k][2], datablock[k][3]] else: data = [datablock[k][1], datablock[k][2], 1.0] # unit weight fdata.append(data) cart = dir2cart(data) X.append(cart) if calculation_type == 'DE-BFL-O': # include origin as point X.append([0., 0., 0.]) # pass if calculation_type == 'DE-FM': # for fisher means fpars = fisher_mean(fdata) mpars["specimen_direction_type"] = 'l' mpars["specimen_dec"] = fpars["dec"] mpars["specimen_inc"] = fpars["inc"] mpars["specimen_alpha95"] = fpars["alpha95"] mpars["specimen_n"] = fpars["n"] mpars["specimen_r"] = fpars["r"] mpars["measurement_step_min"] = indata[start0][0] mpars["measurement_step_max"] = indata[end0][0] mpars["center_of_mass"] = cm mpars["specimen_dang"] = -1 return mpars # # get center of mass for principal components (DE-BFL or DE-BFP) # for cart in X: for l in range(3): cm[l] += old_div(cart[l], Nrec) mpars["center_of_mass"] = cm # # transform to center of mass (if best-fit line) # if calculation_type != 'DE-BFP': mpars["specimen_direction_type"] = 'l' if calculation_type == 'DE-BFL' or calculation_type == 'DE-BFL-O': # not for planes or anchored lines for k in range(len(X)): for l in range(3): X[k][l] = X[k][l] - cm[l] else: mpars["specimen_direction_type"] = 'p' # # put in T matrix # T = np.array(Tmatrix(X)) # # get sorted evals/evects # t, V = tauV(T) if t == []: mpars["specimen_direction_type"] = "Error" print("Error in calculation") return mpars v1, v3 = V[0], V[2] if t[2] < 0: t[2] = 0 # make positive if calculation_type == 'DE-BFL-A': Dir, R = vector_mean(fdata) mpars["specimen_direction_type"] = 'l' mpars["specimen_dec"] = Dir[0] mpars["specimen_inc"] = Dir[1] mpars["specimen_n"] = len(fdata) mpars["measurement_step_min"] = indata[start0][0] mpars["measurement_step_max"] = indata[end0][0] mpars["center_of_mass"] = cm s1 = np.sqrt(t[0]) MAD = old_div(np.arctan(old_div(np.sqrt(t[1] + t[2]), s1)), rad) if np.iscomplexobj(MAD): MAD = MAD.real # I think this is how it is done - i never anchor the "PCA" - check mpars["specimen_mad"] = MAD return mpars if calculation_type != 'DE-BFP': # # get control vector for principal component direction # rec = [datablock[start][1], datablock[start][2], datablock[start][3]] P1 = dir2cart(rec) rec = [datablock[end][1], datablock[end][2], datablock[end][3]] P2 = dir2cart(rec) # # get right direction along principal component ## for k in range(3): control.append(P1[k] - P2[k]) # changed by rshaar # control is taken as the center of mass # control=cm dot = 0 for k in range(3): dot += v1[k] * control[k] if dot < -1: dot = -1 if dot > 1: dot = 1 if np.arccos(dot) > old_div(np.pi, 2.): for k in range(3): v1[k] = -v1[k] # get right direction along principal component # s1 = np.sqrt(t[0]) Dir = cart2dir(v1) MAD = old_div(np.arctan(old_div(np.sqrt(t[1] + t[2]), s1)), rad) if np.iscomplexobj(MAD): MAD = MAD.real if calculation_type == "DE-BFP": Dir = cart2dir(v3) MAD = old_div( np.arctan(np.sqrt(old_div(t[2], t[1]) + old_div(t[2], t[0]))), rad) if np.iscomplexobj(MAD): MAD = MAD.real # # get angle with center of mass # CMdir = cart2dir(cm) Dirp = [Dir[0], Dir[1], 1.] dang = angle(CMdir, Dirp) mpars["specimen_dec"] = Dir[0] mpars["specimen_inc"] = Dir[1] mpars["specimen_mad"] = MAD # mpars["specimen_n"]=int(Nrec) mpars["specimen_n"] = len(X) mpars["specimen_dang"] = dang[0] mpars["measurement_step_min"] = indata[start0][0] mpars["measurement_step_max"] = indata[end0][0] return mpars
5,326,383
def download_data(dataset: str): """ Downloads a dataset as a .csv file. :param dataset: The name of the dataset to download. """ return send_from_directory(app.config['DATA_FOLDER'], dataset, as_attachment=True)
5,326,384
def _split_features_target(feature_matrix, problem_name): """Split the features and labels. Args: feature_matrix (pd.DataFrame): a dataframe consists of both feature values and target values. problem_name (str): the name of the problem. Returns: tuple: features (pd.DataFrame) and target (pd.Series). """ features = feature_matrix.copy().reset_index(drop=True) if problem_name.lower() in features.columns: features.pop(problem_name.lower()) target = features.pop(TARGET_NAME[problem_name]) features = features return features, target
5,326,385
def model_create_load_run_save(gpu, args, files, train_files): """The main function which does the overall training. Should be split into multiple parts in the future. Currently monolithc intentionally.""" rank = args.nr * args.gpus + gpu ## The rank of the current process out of the total number of processes indicated by world_size. dist.init_process_group(backend='nccl', init_method='env://', world_size=args.world_size, rank=rank) if args.shard_files and rank == 0: ## First shard the data using process 0 aka the prime process or master process. Other processes will wait. shard_files_mono(files, args.world_size) shard_files_bi(train_files, args.world_size) dist.barrier() ## Stop other processes from proceeding till sharding is done. if args.use_official_pretrained: if "mbart" in args.model_path: if "50" in args.model_path: tok = MBart50Tokenizer.from_pretrained(args.tokenizer_name_or_path) else: tok = MBartTokenizer.from_pretrained(args.tokenizer_name_or_path) else: tok = BartTokenizer.from_pretrained(args.tokenizer_name_or_path) else: tok = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path, do_lower_case=False, use_fast=False, keep_accents=True) ## Fast tokenizers are not good because their behavior is weird. Accents should be kept or else the segmentation will be messed up on languages with accented characters. No lower case obviously because we want to train on the original case. Set to false if you are ok with the model not dealing with cases. print("Tokenizer is:", tok) print(f"Running DDP checkpoint example on rank {rank}.") ## Unlike the FT script this will always be distributed if args.fp16: ## Although the code supports FP16/AMP training, it tends to be unstable in distributed setups so use this carefully. print("We will do fp16 training") scaler = torch.cuda.amp.GradScaler() else: print("We will do fp32 training") if args.encoder_tying_config is not None: print("We will use recurrently stacked layers for the encoder with configuration:", args.encoder_tying_config) if args.decoder_tying_config is not None: print("We will use recurrently stacked layers for the decoder with configuration:", args.decoder_tying_config) if args.unidirectional_encoder: print("Using unidirectional encoder.") writer = SummaryWriter(args.model_path+".tflogs") if args.use_official_pretrained: if "mbart" in args.pretrained_model: model = MBartForConditionalGeneration.from_pretrained(args.pretrained_model) ## We may use FBs official model and fine-tune it for our purposes. elif "bart" in args.pretrained_model: model = BartForConditionalGeneration.from_pretrained(args.pretrained_model) ## We may use FBs official model and fine-tune it for our purposes. model.config.dropout = args.dropout ## We should set dropouts manually model.attention_dropout = args.attention_dropout ## We should set dropouts manually model.activation_dropout = args.activation_dropout ## We should set dropouts manually else: config = MBartConfig(vocab_size=len(tok), encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_dim, decoder_ffn_dim=args.decoder_ffn_dim, d_model=args.d_model, no_embed_norm=args.no_embed_norm, scale_embedding=args.scale_embedding, pad_token_id=tok.pad_token_id, eos_token_id=tok(["</s>"], add_special_tokens=False).input_ids[0][0], bos_token_id=tok(["<s>"], add_special_tokens=False).input_ids[0][0], encoder_tying_config=args.encoder_tying_config, decoder_tying_config=args.decoder_tying_config, multilayer_softmaxing=args.multilayer_softmaxing, wait_k=args.wait_k, unidirectional_encoder=args.unidirectional_encoder, softmax_temperature=args.softmax_temperature, temperature_calibration=args.temperature_calibration, layerdrop=args.layerdrop, no_scale_attention_embedding=args.no_scale_attention_embedding, positional_encodings=args.positional_encodings) ## Configuration. TODO: Save this configuration somehow. model = MBartForConditionalGeneration(config) torch.cuda.set_device(gpu) model.cuda(gpu) model.train() if args.distillation: ## When distilling we need a parent model. The creation of the model is in the same way as the child. This model is immediately loaded with some pretrained params and then loaded into the GPU. print("We will do distillation from a parent model.") if args.use_official_parent_pretrained: if "mbart" in args.parent_pretrained_model: parent_model = MBartForConditionalGeneration.from_pretrained(args.parent_pretrained_model) ## We may use FBs official model and fine-tune it for our purposes. elif "bart" in args.parent_pretrained_model: parent_model = BartForConditionalGeneration.from_pretrained(args.parent_pretrained_model) ## We may use FBs official model and fine-tune it for our purposes. parent_model.config.dropout = args.dropout ## We should set dropouts manually parent_model.attention_dropout = args.attention_dropout ## We should set dropouts manually parent_model.activation_dropout = args.activation_dropout ## We should set dropouts manually else: parent_config = MBartConfig(vocab_size=len(tok), encoder_layers=args.parent_encoder_layers, decoder_layers=args.parent_decoder_layers, dropout=args.parent_dropout, attention_dropout=args.parent_attention_dropout, activation_dropout=args.parent_activation_dropout, encoder_attention_heads=args.parent_encoder_attention_heads, decoder_attention_heads=args.parent_decoder_attention_heads, encoder_ffn_dim=args.parent_encoder_ffn_dim, decoder_ffn_dim=args.parent_decoder_ffn_dim, d_model=args.parent_d_model, no_embed_norm=args.no_embed_norm, scale_embedding=args.scale_embedding, pad_token_id=tok.pad_token_id, eos_token_id=tok(["</s>"], add_special_tokens=False).input_ids[0][0], bos_token_id=tok(["<s>"], add_special_tokens=False).input_ids[0][0], encoder_tying_config=args.encoder_tying_config, decoder_tying_config=args.decoder_tying_config, multilayer_softmaxing=args.multilayer_softmaxing, wait_k=args.wait_k, unidirectional_encoder=args.unidirectional_encoder, softmax_temperature=args.softmax_temperature, temperature_calibration=args.temperature_calibration, layerdrop=args.layerdrop, no_scale_attention_embedding=args.no_scale_attention_embedding, positional_encodings=args.positional_encodings) parent_model = MBartForConditionalGeneration(config) parent_model.cuda(gpu) parent_model.train() ## We do this to enable dropout but we wont have an optimizer for this so we wont train this model. For now. Future implementations should ask if we want to do co-distill or not. By co-distillation I mean, the parent will learn together with the child. parent_model = DistributedDataParallel(parent_model, device_ids=[gpu], output_device=gpu) print("Loading a parent model from which distillation will be done.") dist.barrier() # configure map_location properly map_location = {'cuda:%d' % 0: 'cuda:%d' % gpu} if not args.use_official_parent_pretrained: parent_checkpoint_dict = torch.load(args.parent_pretrained_model, map_location=map_location) if type(parent_checkpoint_dict) == dict: parent_model.load_state_dict(parent_checkpoint_dict['model']) # We never do any remapping of the parent. We always reuse it as it is. else: parent_model.module.load_state_dict(parent_checkpoint_dict) # We never do any remapping of the parent. We always reuse it as it is. model = DistributedDataParallel(model, device_ids=[gpu], output_device=gpu) ## This wrapper around the model will enable distributed training. no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, }, { "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] ## We suppose that weight decay will be used except for biases and layer norm weights. optimizer = AdamW(optimizer_grouped_parameters, lr=args.lr, eps=1e-09) ## Our glorious optimizer. model.train() scheduler = get_linear_schedule_with_warmup(optimizer, args.warmup_steps, args.num_batches) ## A warmup and decay scheduler. We use the linear scheduler for now. TODO: Enable other schedulers with a flag. while scheduler.get_lr()[0] < 1e-7: ## We want to keep a minimum learning rate else for the initial batch or initial few batches barely anything will be learned which is a waste of computation. This minimum value is kept to 1e-7 by default in accordance with previous literature, other implementations and the Paris peace accords. scheduler.step() print("Initial LR is:", scheduler.get_lr()[0]) if args.pretrained_model != "" and not args.use_official_pretrained: ## Here we load a previous checkpoint in case training crashed. print("Loading from checkpoint. Strict loading by default but if there are missing or non matching keys, they will be ignored when layer remapping or component selection is done.") dist.barrier() map_location = {'cuda:%d' % 0: 'cuda:%d' % gpu} sys.stdout.flush() checkpoint_dict = torch.load(args.pretrained_model, map_location=map_location) if type(checkpoint_dict) == dict: model.load_state_dict(remap_embeddings_eliminate_components_and_eliminate_mismatches(model.state_dict(), remap_layers(checkpoint_dict['model'], 4, args), args), strict=True if (args.remap_encoder == "" and args.remap_decoder == "" and not args.eliminate_encoder_before_initialization and not args.eliminate_decoder_before_initialization and not args.eliminate_embeddings_before_initialization) else False) if not args.no_reload_optimizer_ctr_and_scheduler and args.remap_encoder is '' and args.remap_decoder is '' and not args.eliminate_encoder_before_initialization and not args.eliminate_decoder_before_initialization and not args.eliminate_embeddings_before_initialization: ## Do not load optimizers, ctr and schedulers when remapping or resuming training. if 'optimizer' in checkpoint_dict: print("Reloading optimizer") optimizer.load_state_dict(checkpoint_dict['optimizer']) ## Dubious if 'scheduler' in checkpoint_dict: print("Reloading scheduler") scheduler.load_state_dict(checkpoint_dict['scheduler']) ## Dubious if 'ctr' in checkpoint_dict: print("Reloading ctr. This means we resume training.") ctr = checkpoint_dict['ctr'] else: ctr = 0 else: model.module.load_state_dict(remap_embeddings_eliminate_components_and_eliminate_mismatches(model.state_dict(), remap_layers(checkpoint_dict, 3, args), args), strict=True if (args.remap_encoder == "" and args.remap_decoder == "" and not args.eliminate_encoder_before_initialization and not args.eliminate_decoder_before_initialization and not args.eliminate_embeddings_before_initialization) else False) ctr = 0 else: print("Training from scratch") CHECKPOINT_PATH = args.model_path if rank == 0: checkpoint_dict = {'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'scheduler': scheduler.state_dict(), 'ctr': 0} torch.save(checkpoint_dict, CHECKPOINT_PATH) ## Save a model by default every eval_every steps. This model will be saved with the same file name each time. torch.save(model.state_dict(), CHECKPOINT_PATH+".pure_model") dist.barrier() map_location = {'cuda:%d' % 0: 'cuda:%d' % gpu} checkpoint_dict = torch.load(CHECKPOINT_PATH, map_location=map_location) model.load_state_dict(checkpoint_dict['model']) optimizer.load_state_dict(checkpoint_dict['optimizer']) scheduler.load_state_dict(checkpoint_dict['scheduler']) ctr = checkpoint_dict['ctr'] model.train() print("Using label smoothing of", args.label_smoothing) print("Using gradient clipping norm of", args.max_gradient_clip_value) print("Using softmax temperature of", args.softmax_temperature) if args.max_ent_weight != -1: print("Doing entropy maximization during loss computation.") if args.multistep_optimizer_steps > 1: print("Using a multistep optimizer where gradients will be accumulated over", args.multistep_optimizer_steps, "batches.") num_batches_this_optimizer_step = 0 losses = 0 for input_ids, input_masks, decoder_input_ids, labels in generate_batches_monolingual_masked_or_bilingual(tok, args, rank, files, train_files, ctr): #Batches are generated from here. The argument (0.30, 0.40) is a range which indicates the percentage of the source sentence to be masked in case we want masking during training just like we did during BART pretraining. The argument 3.5 is the lambda to the poisson length sampler which indicates the average length of a word sequence that will be masked. Since this is pretraining we do not do any evaluations even if we train on parallel corpora. start = time.time() optimizer.zero_grad() ## Empty the gradients before any computation. if ctr % args.eval_every == 0: ## We have to evaluate our model every eval_every steps. Since there is no evaluation data this means our model is saved every eval_every steps. CHECKPOINT_PATH = args.model_path if rank == 0: print("Saving the model") sys.stdout.flush() # All processes should see same parameters as they all start from same # random parameters and gradients are synchronized in backward passes. # Therefore, saving it in one process is sufficient. checkpoint_dict = {'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'scheduler': scheduler.state_dict(), 'ctr': ctr} torch.save(checkpoint_dict, CHECKPOINT_PATH) ## Save a model by default every eval_every steps. This model will be saved with the same file name each time. torch.save(model.module.state_dict(), CHECKPOINT_PATH+".pure_model") if ctr % args.no_eval_save_every == 0: ## If no evaluation will be done then I consider it prudent to save the model every 10000 checkpoints by default. Change this to whatever value you want. torch.save(checkpoint_dict, CHECKPOINT_PATH + "."+str(ctr)) torch.save(model.module.state_dict(), CHECKPOINT_PATH+ "."+str(ctr)+".pure_model") # Use a barrier() to make sure that process 1 loads the model after process # 0 saves it. dist.barrier() # configure map_location properly print("Loading from checkpoint") map_location = {'cuda:%d' % 0: 'cuda:%d' % gpu} sys.stdout.flush() checkpoint_dict = torch.load(CHECKPOINT_PATH, map_location=map_location) model.load_state_dict(checkpoint_dict['model']) optimizer.load_state_dict(checkpoint_dict['optimizer']) scheduler.load_state_dict(checkpoint_dict['scheduler']) input_ids=input_ids.to(gpu) ## Move to gpu input_masks=input_masks.to(gpu) ## Move to gpu decoder_input_ids=decoder_input_ids.to(gpu) ## Move to gpu labels=labels.to(gpu) ## Move to gpu if args.mixed_wait_k: model.module.config.wait_k = random.randint(1, args.wait_k) if args.fp16: ## The difference between AMP and FP32 is the use of the autocast. The code below is duplicated and can be shrunk. TODO. with torch.cuda.amp.autocast(): if args.bilingual_train_frequency != -1 and ctr % args.bilingual_train_frequency == 0 and args.unify_encoder: source_hidden_state_encoder = model.module.get_encoder()(input_ids=input_ids, attention_mask=input_masks).last_hidden_state ## Run the encoder for source sentence. decoder_input_masks = (decoder_input_ids != tok.pad_token_id).int().to(gpu) target_hidden_state_encoder = model.module.get_encoder()(input_ids=decoder_input_ids, attention_mask=decoder_input_masks).last_hidden_state ## Run the encoder for source sentence. decoder_input_masks.to('cpu') ## Move to CPU. May not be needed but its a safety net. pad_mask = input_ids.eq(tok.pad_token_id).unsqueeze(2) source_hidden_state_encoder.masked_fill_(pad_mask, 0.0) source_hidden_state_encoder = source_hidden_state_encoder.mean(dim=1) pad_mask = decoder_input_ids.eq(tok.pad_token_id).unsqueeze(2) target_hidden_state_encoder.masked_fill_(pad_mask, 0.0) target_hidden_state_encoder = target_hidden_state_encoder.mean(dim=1) loss = -cosine_similarity(source_hidden_state_encoder, target_hidden_state_encoder) if rank == 0: writer.add_scalar("encoder unification loss", loss.detach().cpu().numpy(), ctr) else: mod_compute = model(input_ids=input_ids, attention_mask=input_masks, decoder_input_ids=decoder_input_ids, output_hidden_states=args.distillation, output_attentions=args.distillation) ## Run the model and get logits. logits = mod_compute.logits lprobs = torch.nn.functional.log_softmax(logits, dim=-1) ## Softmax tempering of logits if needed. loss = label_smoothed_nll_loss( lprobs, labels, args.label_smoothing, ignore_index=tok.pad_token_id ) ## Label smoothed cross entropy loss. loss = loss*args.softmax_temperature ## Up scale loss in case of non unitary temperatures. Note that in case of self calibrating temperature, the softmax temperature must be set to 1. if rank == 0: writer.add_scalar("pure cross entropy loss", loss.detach().cpu().numpy(), ctr) if args.temperature_calibration: loss = loss*mod_compute.softmax_temperature if rank == 0: writer.add_scalar("calibrated temperature", mod_compute.softmax_temperature.detach().cpu().numpy(), ctr) writer.add_scalar("calibrated temperature loss", loss.detach().cpu().numpy(), ctr) ## We will do multilayer softmaxing without any consideration for entropy maximization or distillation. if mod_compute.additional_lm_logits is not None: for additional_logits in mod_compute.additional_lm_logits: lprobs = torch.nn.functional.log_softmax(additional_logits, dim=-1) ## Softmax tempering of logits if needed. loss_extra = label_smoothed_nll_loss( lprobs, labels, args.label_smoothing, ignore_index=tok.pad_token_id ) ## Label smoothed cross entropy loss. loss_extra = loss_extra*args.softmax_temperature ## Up scale loss in case of non unitary temperatures. Note that in case of self calibrating temperature, the softmax temperature must be set to 1. TODO: Perhaps log this too. if args.temperature_calibration: loss_extra = loss_extra*mod_compute.softmax_temperature loss += loss_extra ## Up scale loss in case of non unitary temperatures. TODO: Perhaps log this too. if args.max_ent_weight != -1: ## This deals with softmax entropy maximization. The logic is that we compute the softmax entropy of the predictions via -(P(Y/X)*log(P(Y/X))). We then add it to the cross entropy loss with a negative sign as we wish to maximize entropy. This should penalize overconfident predictions. assert (args.max_ent_weight >= 0 and args.max_ent_weight <= 1) lprobs = torch.nn.functional.log_softmax(logits, dim=-1) ## No tempering here entropy = -(torch.exp(lprobs)*lprobs).mean() if rank == 0: writer.add_scalar("softmax entropy", entropy.detach().cpu().numpy(), ctr) if mod_compute.additional_lm_logits is not None: for additional_logits in mod_compute.additional_lm_logits: ## Compute entropy for each layer as well lprobs = torch.nn.functional.log_softmax(additional_logits, dim=-1) ## No tempering here entropy_extra = -(torch.exp(lprobs)*lprobs).mean() entropy += entropy_extra loss = loss*(1-args.max_ent_weight) - entropy*args.max_ent_weight ## Maximize the entropy so a minus is needed. Weigh and add losses as required. if rank == 0: writer.add_scalar("loss with entropy loss", loss.detach().cpu().numpy(), ctr) if args.distillation: ## Time to distill. with torch.no_grad(): ## No gradient to avoid memory allocation. parent_mod_compute = parent_model(input_ids=input_ids, attention_mask=input_masks ,decoder_input_ids=decoder_input_ids, output_hidden_states=args.distillation, output_attentions=args.distillation) distillation_loss = compute_distillation_losses(mod_compute, parent_mod_compute, labels, tok.pad_token_id, args) ## Get the parent model's computations. if rank == 0: writer.add_scalar("distillation loss", distillation_loss.detach().cpu().numpy(), ctr) loss = args.distillation_loss_weight*distillation_loss + (1.0 - args.distillation_loss_weight)*loss ## Update the main loss with weighing and adding. if rank == 0: writer.add_scalar("final loss", loss.detach().cpu().numpy(), ctr) else: if args.bilingual_train_frequency != -1 and ctr % args.bilingual_train_frequency == 0 and args.unify_encoder: source_hidden_state_encoder = model.module.get_encoder()(input_ids=input_ids, attention_mask=input_masks).last_hidden_state ## Run the encoder for source sentence. decoder_input_masks = (decoder_input_ids != tok.pad_token_id).int().to(gpu) target_hidden_state_encoder = model.module.get_encoder()(input_ids=decoder_input_ids, attention_mask=decoder_input_masks).last_hidden_state ## Run the encoder for source sentence. decoder_input_masks.to('cpu') ## Move to CPU. May not be needed but its a safety net. pad_mask = input_ids.eq(tok.pad_token_id).unsqueeze(2) source_hidden_state_encoder.masked_fill_(pad_mask, 0.0) source_hidden_state_encoder = source_hidden_state_encoder.mean(dim=1) pad_mask = decoder_input_ids.eq(tok.pad_token_id).unsqueeze(2) target_hidden_state_encoder.masked_fill_(pad_mask, 0.0) target_hidden_state_encoder = target_hidden_state_encoder.mean(dim=1) loss = -cosine_similarity(source_hidden_state_encoder, target_hidden_state_encoder) if rank == 0: writer.add_scalar("encoder unification loss", loss.detach().cpu().numpy(), ctr) else: mod_compute = model(input_ids=input_ids, attention_mask=input_masks, decoder_input_ids=decoder_input_ids, output_hidden_states=args.distillation, output_attentions=args.distillation) ## Run the model and get logits. logits = mod_compute.logits lprobs = torch.nn.functional.log_softmax(logits, dim=-1) ## Softmax tempering of logits if needed. loss = label_smoothed_nll_loss( lprobs, labels, args.label_smoothing, ignore_index=tok.pad_token_id ) ## Label smoothed cross entropy loss. loss = loss*args.softmax_temperature ## Up scale loss in case of non unitary temperatures. if rank == 0: writer.add_scalar("pure cross entropy loss", loss.detach().cpu().numpy(), ctr) if args.temperature_calibration: loss = loss*mod_compute.softmax_temperature if rank == 0: writer.add_scalar("calibrated temperature", mod_compute.softmax_temperature.detach().cpu().numpy(), ctr) writer.add_scalar("calibrated temperature loss", loss.detach().cpu().numpy(), ctr) ## We will do multilayer softmaxing without any consideration for entropy maximization or distillation. if mod_compute.additional_lm_logits is not None: for additional_logits in mod_compute.additional_lm_logits: lprobs = torch.nn.functional.log_softmax(additional_logits, dim=-1) ## Softmax tempering of logits if needed. loss_extra = label_smoothed_nll_loss( lprobs, labels, args.label_smoothing, ignore_index=tok.pad_token_id ) ## Label smoothed cross entropy loss. loss_extra = loss_extra*args.softmax_temperature ## Up scale loss in case of non unitary temperatures. Note that in case of self calibrating temperature, the softmax temperature must be set to 1. TODO: Perhaps log this too. if args.temperature_calibration: loss_extra = loss_extra*mod_compute.softmax_temperature loss += loss_extra*args.softmax_temperature ## Up scale loss in case of non unitary temperatures. Note that in case of self calibrating temperature, the softmax temperature must be set to 1. TODO: Perhaps log this too. if args.max_ent_weight != -1: ## This deals with softmax entropy maximization. The logic is that we compute the softmax entropy of the predictions via -(P(Y/X)*log(P(Y/X))). We then add it to the cross entropy loss with a negative sign as we wish to maximize entropy. This should penalize overconfident predictions. assert (args.max_ent_weight >= 0 and args.max_ent_weight <= 1) lprobs = torch.nn.functional.log_softmax(logits, dim=-1) ## No tempering here entropy = -(torch.exp(lprobs)*lprobs).mean() if rank == 0: writer.add_scalar("softmax entropy", entropy.detach().cpu().numpy(), ctr) if mod_compute.additional_lm_logits is not None: for additional_logits in mod_compute.additional_lm_logits: ## Compute entropy for each layer as well lprobs = torch.nn.functional.log_softmax(additional_logits, dim=-1) ## No tempering here entropy_extra = -(torch.exp(lprobs)*lprobs).mean() entropy += entropy_extra loss = loss*(1-args.max_ent_weight) - entropy*args.max_ent_weight ## Maximize the entropy so a minus is needed. Weigh and add losses as required. if rank == 0: writer.add_scalar("loss with entropy loss", loss.detach().cpu().numpy(), ctr) if args.distillation: ## Time to distill. with torch.no_grad(): ## No gradient to avoid memory allocation. parent_mod_compute = parent_model(input_ids=input_ids, attention_mask=input_masks, decoder_input_ids=decoder_input_ids, output_hidden_states=args.distillation, output_attentions=args.distillation) ## Get the parent model's computations. distillation_loss = compute_distillation_losses(mod_compute, parent_mod_compute, labels, tok.pad_token_id, args) ## Compute distillation losses. if rank == 0: writer.add_scalar("distillation loss", distillation_loss.detach().cpu().numpy(), ctr) loss = args.distillation_loss_weight*distillation_loss + (1.0 - args.distillation_loss_weight)*loss ## Update the main loss with weighing and adding. if rank == 0: writer.add_scalar("final loss", loss.detach().cpu().numpy(), ctr) input_ids=input_ids.to('cpu') ## Move to CPU. May not be needed but its a safety net. input_masks=input_masks.to('cpu') ## Move to CPU. May not be needed but its a safety net. decoder_input_ids=decoder_input_ids.to('cpu') ## Move to CPU. May not be needed but its a safety net. labels=labels.to('cpu') ## Move to CPU. May not be needed but its a safety net. if args.fp16: ## The gradient scaler needs to be invoked with FP16/AMP computation. loss = loss/args.multistep_optimizer_steps scaler.scale(loss).backward() num_batches_this_optimizer_step += 1 losses += loss if num_batches_this_optimizer_step < args.multistep_optimizer_steps: continue else: pass if args.fp16: ## With FP16/AMP computation we need to unscale gradients before clipping them. We then optimize and update the scaler. if args.max_gradient_clip_value != 0.0: scaler.unscale_(optimizer) torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_gradient_clip_value) scaler.step(optimizer) scaler.update() else: ## With FP32, we just do regular backpropagation, gradient clipping and then step the optimizer. loss = loss/args.multistep_optimizer_steps loss.backward() num_batches_this_optimizer_step += 1 losses += loss if num_batches_this_optimizer_step < args.multistep_optimizer_steps: continue if args.max_gradient_clip_value != 0.0: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_gradient_clip_value) optimizer.step() scheduler.step() ## Advance the scheduler to get to the next value of LR. lv = losses.detach().cpu().numpy() ## Detach the loss in order to report it. losses = 0 num_batches_this_optimizer_step = 0 if ctr % 10 == 0 and rank % 8 == 0: ## Print the current loss every 10 batches but only for the master/prime process. print(ctr, lv) sys.stdout.flush() if ctr % 1000 == 0 and rank == 0 and args.save_weights_and_gradeint_info: ## Save the model weight and gradient info every time this condition is triggered. for param_name, param_value in model.named_parameters(): if not ("embed_positions" in param_name and args.positional_encodings): writer.add_histogram("weights."+param_name, param_value.detach().cpu().numpy(), ctr) writer.add_histogram("gradients."+param_name, param_value.grad.detach().cpu().numpy(), ctr) end = time.time() ctr += 1 if rank == 0: checkpoint_dict = {'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'scheduler': scheduler.state_dict(), 'ctr': ctr} torch.save(checkpoint_dict, CHECKPOINT_PATH) ## Save one last time. torch.save(model.module.state_dict(), CHECKPOINT_PATH+".pure_model") ## We will distribute this model and/or use it for fine tuning. dist.destroy_process_group()
5,326,386
def descope_queue_name(scoped_name): """Descope Queue name with '.'. Returns the queue name from the scoped name which is of the form project-id.queue-name """ return scoped_name.split('.')[1]
5,326,387
def start(host="0.0.0.0", port=None, handlers=None, static_dir='./static', startup_callback=None, max_message_size=1 * 1024 * 1024, log_file='eui.log', log_level='DEBUG'): """ start eui :param host: host :param port: port, if port is None, port will be a random int value :param handlers: python function for js call :param static_dir: generate js file path, could not end with '/' :param startup_callback: the function after eui startup to run :param max_message_size: each message max size :param log_file: log file :param log_level: log level, 'CRITICAL', 'FATAL', 'ERROR', 'WARN', 'WARNING', 'INFO', 'DEBUG', 'NOTSET' :return: """ global _HANDLERS # init log _init_log(log_file, log_level) # init port if port is None: port = random.randint(5000, 50000) # init handlers if handlers: _HANDLERS = handlers # init js file _init_js(port, static_dir) # init socket sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind((host, port)) sock.listen(0) # startup callback function _startup_callback(startup_callback) log.info(f'\n******************** eui start up at port {port} ********************') # accept connection connection, addr = sock.accept() log.info(f'ui {addr} connect success!') # receive connection message data = connection.recv(max_message_size) headers = _get_headers(data) response_tpl = "HTTP/1.1 101 Switching Protocols\r\n" \ "Upgrade:websocket\r\n" \ "Connection: Upgrade\r\n" \ "Sec-WebSocket-Accept: %s\r\n" \ "WebSocket-Location: ws://%s\r\n\r\n" magic_string = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11' value = '' if headers.get('Sec-WebSocket-Key'): value = headers['Sec-WebSocket-Key'] + magic_string ac = base64.b64encode(hashlib.sha1(value.encode('utf-8')).digest()) response_str = response_tpl % (ac.decode('utf-8'), headers.get("Host")) connection.sendall(bytes(response_str, encoding="utf-8")) # startup dispatcher and send message worker _startup_dispatcher() _startup_send_message_worker(connection) # receive ui message while True: data = connection.recv(max_message_size) _RECEIVE_QUEUE.put(json.loads(_parse_payload(data)))
5,326,388
def SearchBeamPosition(DriverType=None): """A factory for SearchBeamPosition classes.""" DriverInstance = DriverFactory.Driver(DriverType) class SearchBeamPositionWrapper(DriverInstance.__class__): def __init__(self): DriverInstance.__class__.__init__(self) self.set_executable("dials.search_beam_position") self._sweep_filename = None self._spot_filename = None self._optimized_filename = None self._phil_file = None self._image_range = None def set_sweep_filename(self, sweep_filename): self._sweep_filename = sweep_filename def set_spot_filename(self, spot_filename): self._spot_filename = spot_filename def set_phil_file(self, phil_file): self._phil_file = phil_file def set_image_range(self, image_range): self._image_range = image_range def get_optimized_experiments_filename(self): return self._optimized_filename def run(self): logger.debug("Running %s", self.get_executable()) self.clear_command_line() self.add_command_line(self._sweep_filename) self.add_command_line(self._spot_filename) nproc = PhilIndex.params.xia2.settings.multiprocessing.nproc self.set_cpu_threads(nproc) self.add_command_line("nproc=%i" % nproc) if self._image_range: self.add_command_line("image_range=%d,%d" % self._image_range) if self._phil_file is not None: self.add_command_line(self._phil_file) self._optimized_filename = os.path.join( self.get_working_directory(), "%d_optimised.expt" % self.get_xpid() ) self.add_command_line("output.experiments=%s" % self._optimized_filename) self.start() self.close_wait() self.check_for_errors() self.get_all_output() assert os.path.exists(self._optimized_filename), self._optimized_filename return SearchBeamPositionWrapper()
5,326,389
def movie(function, movie_name="movie.gif", play_range=None, loop=0, optimize=True, duration=100, embed=False, mp4=True): """ Make a movie from a function. function has signature: function(index) and should return a PIL.Image. """ from IPython.display import Image frames = [] for index in range(*play_range): frames.append(function(index)) if frames: frames[0].save(movie_name, save_all=True, append_images=frames[1:], optimize=optimize, loop=loop, duration=duration) if mp4 is False: return Image(url=movie_name, embed=embed) else: return gif2mp4(movie_name)
5,326,390
def get_rounded_reward_2(duration: float) -> float: """ Helper function to round reward. :param duration: not rounded duration :return: rounded duration, two decimal points """ return round(get_reward(duration), 2)
5,326,391
def fitness_order(order): """fitness function of a order of cities""" score = 0 cacher = str(order) if cacher in cache: return cache[cacher] for i in range(len(order) - 1): score += distance_map[(order[i], order[i + 1])] score += distance_map[(order[0], order[-1])] cache[cacher] = score return score
5,326,392
def get_human_readable_user_scripts_path() -> str: """Return a human readable location of user-scripts. This is NOT a valid filesystem path; may be something like "(SD Card)". """ from ba import _lang app = _ba.app path: Optional[str] = app.python_directory_user if path is None: return '<Not Available>' # On newer versions of android, the user's external storage dir is probably # only visible to the user's processes and thus not really useful printed # in its entirety; lets print it as <External Storage>/myfilepath. if app.platform == 'android': ext_storage_path: Optional[str] = ( _ba.android_get_external_storage_path()) if (ext_storage_path is not None and app.python_directory_user.startswith(ext_storage_path)): path = ('<' + _lang.Lstr(resource='externalStorageText').evaluate() + '>' + app.python_directory_user[len(ext_storage_path):]) return path
5,326,393
def download_model_from_url(path, model='lstm'): """Downlod the model from url""" if os.path.exists(path): return download_model_path = DOWNLOAD_MODEL_PATH_DICT[model] logging.info("The first run will download the pre-trained model, which will take some time, please be patient!") dir_path = os.path.dirname(path) temp_path = dir_path + "_temp" file_path = os.path.join(temp_path, "model.tar.gz") if os.path.exists(temp_path): shutil.rmtree(temp_path) os.mkdir(temp_path) with (open(file_path, 'ab')) as f: r = requests.get(download_model_path, stream=True) total_len = int(r.headers.get('content-length')) for chunk in tqdm(r.iter_content(chunk_size=1024), total=total_len // 1024, desc='downloading %s' % download_model_path, unit='KB'): if chunk: f.write(chunk) f.flush() logging.debug('extacting... to %s' % file_path) with tarfile.open(file_path) as tf: tf.extractall(path=temp_path) # mv temp_path path for _, dirs, _ in os.walk(temp_path): if len(dirs) != 1: raise RuntimeError("There is a problem with the model catalogue," "please contact the author") shutil.move(os.path.join(temp_path, dirs[0]), path) # delete temp directory shutil.rmtree(temp_path)
5,326,394
def rotated_positive_orthogonal_basis( angle_x=np.pi / 3, angle_y=np.pi / 4, angle_z=np.pi / 5 ): """Get a rotated orthogonal basis. If X,Y,Z are the rotation matrices of the passed angles, the resulting basis is Z * Y * X. Parameters ---------- angle_x : Rotation angle around the x-axis (Default value = np.pi / 3) angle_y : Rotation angle around the y-axis (Default value = np.pi / 4) angle_z : Rotation angle around the z-axis (Default value = np.pi / 5) Returns ------- np.ndarray Rotated orthogonal basis """ # rotate axes to produce a more general test case r_x = tf.rotation_matrix_x(angle_x) r_y = tf.rotation_matrix_y(angle_y) r_z = tf.rotation_matrix_z(angle_z) r_tot = np.matmul(r_z, np.matmul(r_y, r_x)) return r_tot
5,326,395
def reset_db(): """Reset all databases.""" click.confirm('This operation will delete all database, do you want to continue?', abort=True) db.drop_all() click.echo('Drop all tables.') db.create_all() click.echo('Reset all database.') admin_privkey = "0x8ef2fa4907d75fa653cede21fa0a75d62add49e4db46044dd3954dda14fcdfa6" addr_msg = create_address_by_privkey(admin_privkey) admin = User(username="admin", email="admin@admin.com", is_admin=True, active=True, privkey=addr_msg["privateKeyHex"][2:], privatekey_hex=addr_msg["privateKeyHex"], privatekey_int=addr_msg["privateKeyInt"], publickey_hex=addr_msg["publicKeyHex"], publickey_int=addr_msg["publicKeyInt"], address=addr_msg["address"]) admin.set_password('admin') db.session.add(admin) db.session.commit() click.echo('Success Add Admin Count.')
5,326,396
def create_sequences(tokenizer, max_length, descriptions, photos_features, vocab_size): """ 从输入的图片标题list和图片特征构造LSTM的一组输入 Args: :param tokenizer: 英文单词和整数转换的工具keras.preprocessing.text.Tokenizer :param max_length: 训练数据集中最长的标题的长度 :param descriptions: dict, key 为图像的名(不带.jpg后缀), value 为list, 包含一个图像的几个不同的描述 :param photos_features: dict, key 为图像的名(不带.jpg后缀), value 为numpy array 图像的特征 :param vocab_size: 训练集中表的单词数量 :return: tuple: 第一个元素为 numpy array, 元素为图像的特征, 它本身也是 numpy.array 第二个元素为 numpy array, 元素为图像标题的前缀, 它自身也是 numpy.array 第三个元素为 numpy array, 元素为图像标题的下一个单词(根据图像特征和标题的前缀产生) 也为numpy.array Examples: from pickle import load tokenizer = load(open('tokenizer.pkl', 'rb')) max_length = 6 descriptions = {'1235345':['startseq one bird on tree endseq', "startseq red bird on tree endseq"], '1234546':['startseq one boy play water endseq', "startseq one boy run across water endseq"]} photo_features = {'1235345':[ 0.434, 0.534, 0.212, 0.98 ], '1234546':[ 0.534, 0.634, 0.712, 0.28 ]} vocab_size = 7378 print(create_sequences(tokenizer, max_length, descriptions, photo_features, vocab_size)) (array([[ 0.434, 0.534, 0.212, 0.98 ], [ 0.434, 0.534, 0.212, 0.98 ], [ 0.434, 0.534, 0.212, 0.98 ], [ 0.434, 0.534, 0.212, 0.98 ], [ 0.434, 0.534, 0.212, 0.98 ], [ 0.434, 0.534, 0.212, 0.98 ], [ 0.434, 0.534, 0.212, 0.98 ], [ 0.434, 0.534, 0.212, 0.98 ], [ 0.434, 0.534, 0.212, 0.98 ], [ 0.434, 0.534, 0.212, 0.98 ], [ 0.534, 0.634, 0.712, 0.28 ], [ 0.534, 0.634, 0.712, 0.28 ], [ 0.534, 0.634, 0.712, 0.28 ], [ 0.534, 0.634, 0.712, 0.28 ], [ 0.534, 0.634, 0.712, 0.28 ], [ 0.534, 0.634, 0.712, 0.28 ], [ 0.534, 0.634, 0.712, 0.28 ], [ 0.534, 0.634, 0.712, 0.28 ], [ 0.534, 0.634, 0.712, 0.28 ], [ 0.534, 0.634, 0.712, 0.28 ], [ 0.534, 0.634, 0.712, 0.28 ]]), array([[ 0, 0, 0, 0, 0, 2], [ 0, 0, 0, 0, 2, 59], [ 0, 0, 0, 2, 59, 254], [ 0, 0, 2, 59, 254, 6], [ 0, 2, 59, 254, 6, 134], [ 0, 0, 0, 0, 0, 2], [ 0, 0, 0, 0, 2, 26], [ 0, 0, 0, 2, 26, 254], [ 0, 0, 2, 26, 254, 6], [ 0, 2, 26, 254, 6, 134], [ 0, 0, 0, 0, 0, 2], [ 0, 0, 0, 0, 2, 59], [ 0, 0, 0, 2, 59, 16], [ 0, 0, 2, 59, 16, 82], [ 0, 2, 59, 16, 82, 24], [ 0, 0, 0, 0, 0, 2], [ 0, 0, 0, 0, 2, 59], [ 0, 0, 0, 2, 59, 16], [ 0, 0, 2, 59, 16, 165], [ 0, 2, 59, 16, 165, 127], [ 2, 59, 16, 165, 127, 24]]), array([[ 0., 0., 0., ..., 0., 0., 0.], [ 0., 0., 0., ..., 0., 0., 0.], [ 0., 0., 0., ..., 0., 0., 0.], ..., [ 0., 0., 0., ..., 0., 0., 0.], [ 0., 0., 0., ..., 0., 0., 0.], [ 0., 0., 0., ..., 0., 0., 0.]])) """ X1, X2, y = list(), list(), list() for key, desc_list in descriptions.items(): for desc in desc_list: seq = tokenizer.texts_to_sequences([desc])[0] for i in range(1, len(seq)): in_seq, out_seq = seq[:i], seq[i] #填充in_seq,使得其长度为max_length in_seq = pad_sequences([in_seq], maxlen = max_length)[0] out_seq = to_categorical([out_seq], num_classes = vocab_size)[0] X1.append(photos_features[key][0]) X2.append(in_seq) y.append(out_seq) return array(X1), array(X2), array(y)
5,326,397
def create_project_locale_groups(apps, schema_editor): """Create translators groups for every project locale object.""" Group = apps.get_model('auth', 'Group') Permission = apps.get_model('auth', 'Permission') ProjectLocale = apps.get_model('base', 'ProjectLocale') ContentType = apps.get_model('contenttypes', 'ContentType') GroupObjectPermission = apps.get_model('guardian', 'GroupObjectPermission') project_locale_ct = ContentType.objects.get(app_label='base', model='projectlocale') can_translate = Permission.objects.get(content_type=project_locale_ct, codename='can_translate_project_locale') for project_locale in ProjectLocale.objects.all(): translators_group = Group.objects.create(name='{}/{} translators'.format( project_locale.project.slug, project_locale.locale.code, )) translators_group.permissions.add(can_translate) GroupObjectPermission.objects.create( object_pk=project_locale.pk, content_type=project_locale_ct, group=translators_group, permission=can_translate ) project_locale.translators_group = translators_group project_locale.save()
5,326,398
def avatar_uri(instance, filename): """ upload_to handler for Channel.avatar """ return generate_filepath(filename, instance.name, "_avatar", "channel")
5,326,399