content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
from typing import List from typing import Tuple def get_validation_data_iter(data_loader: RawParallelDatasetLoader, validation_sources: List[str], validation_target: str, buckets: List[Tuple[int, int]], bucket_batch_sizes: List[BucketBatchSize], source_vocabs: List[vocab.Vocab], target_vocab: vocab.Vocab, max_seq_len_source: int, max_seq_len_target: int, batch_size: int, fill_up: str) -> 'ParallelSampleIter': """ Returns a ParallelSampleIter for the validation data. """ logger.info("=================================") logger.info("Creating validation data iterator") logger.info("=================================") validation_length_statistics = analyze_sequence_lengths(validation_sources, validation_target, source_vocabs, target_vocab, max_seq_len_source, max_seq_len_target) validation_sources_sentences = [SequenceReader(source, vocab, add_bos=False) for source, vocab in zip(validation_sources, source_vocabs)] validation_target_sentences = SequenceReader(validation_target, target_vocab, add_bos=True, limit=None) validation_data_statistics = get_data_statistics(validation_sources_sentences, validation_target_sentences, buckets, validation_length_statistics.length_ratio_mean, validation_length_statistics.length_ratio_std, source_vocabs, target_vocab) validation_data_statistics.log(bucket_batch_sizes) validation_data = data_loader.load(validation_sources_sentences, validation_target_sentences, validation_data_statistics.num_sents_per_bucket).fill_up(bucket_batch_sizes, fill_up) return ParallelSampleIter(data=validation_data, buckets=buckets, batch_size=batch_size, bucket_batch_sizes=bucket_batch_sizes, num_factors=len(validation_sources))
826b5847fb55e61ba3a0b416643fbbc356a23b07
3,646,100
def _serialize_property( target_expr: str, value_expr: str, a_property: mapry.Property, auto_id: _AutoID, cpp: mapry.Cpp) -> str: """ Generate the code to serialize the property. The value as the property is given as ``value_expr`` and serialized into the ``target_expr``. :param target_expr: C++ expression of the Json::Value to be set :param value_expr: C++ expression of the value to be serialized :param a_property: the property definition :param auto_id: generator of unique identifiers :param cpp: C++ settings :return: generated serialization code """ if not a_property.optional: return _serialize_value( target_expr=target_expr, value_expr=value_expr, a_type=a_property.type, auto_id=auto_id, cpp=cpp) ## # Handle optional property ## deref_value_expr = "(*{})".format(value_expr) serialization = _serialize_value( target_expr=target_expr, value_expr=deref_value_expr, a_type=a_property.type, auto_id=auto_id, cpp=cpp) return _SERIALIZE_OPTIONAL_PROPERTY_TPL.render( value_expr=value_expr, serialization=serialization)
3a5a7d7795dd771224d1fd5de8304844cd260fad
3,646,101
def read_raw_data(pattern): """:return X""" if isinstance(pattern, basestring): fpaths = glob.glob(pattern) elif isinstance(pattern, list): fpaths = pattern X = [] for fpath in fpaths: print 'loading file {} ... ' . format(fpath) X.extend(loadtxt(fpath)) return X
ae3f503db4b7f31a043dc4b611d9bf2393d7a352
3,646,102
def warp_affine_rio(src: np.ndarray, dst: np.ndarray, A: Affine, resampling: Resampling, src_nodata: Nodata = None, dst_nodata: Nodata = None, **kwargs) -> np.ndarray: """ Perform Affine warp using rasterio as backend library. :param src: image as ndarray :param dst: image as ndarray :param A: Affine transformm, maps from dst_coords to src_coords :param resampling: str|rasterio.warp.Resampling resampling strategy :param src_nodata: Value representing "no data" in the source image :param dst_nodata: Value to represent "no data" in the destination image **kwargs -- any other args to pass to ``rasterio.warp.reproject`` :returns: dst """ crs = _WRP_CRS src_transform = Affine.identity() dst_transform = A if isinstance(resampling, str): resampling = resampling_s2rio(resampling) # GDAL support for int8 is patchy, warp doesn't support it, so we need to convert to int16 if src.dtype.name == 'int8': src = src.astype('int16') if dst.dtype.name == 'int8': _dst = dst.astype('int16') else: _dst = dst rasterio.warp.reproject(src, _dst, src_transform=src_transform, dst_transform=dst_transform, src_crs=crs, dst_crs=crs, resampling=resampling, src_nodata=src_nodata, dst_nodata=dst_nodata, **kwargs) if dst is not _dst: # int8 workaround copy pixels back to int8 np.copyto(dst, _dst, casting='unsafe') return dst
4843ce222535a93b1fa7d0fee10161dadaba290b
3,646,103
def encode_integer_leb128(value: int) -> bytes: """Encode an integer with signed LEB128 encoding. :param int value: The value to encode. :return: ``value`` encoded as a variable-length integer in LEB128 format. :rtype: bytes """ if value == 0: return b"\0" # Calculate the number of bits in the integer and round up to the nearest multiple # of 7. We need to add 1 bit because bit_length() only returns the number of bits # required to encode the magnitude, but not the sign. n_bits = value.bit_length() + 1 if n_bits % 7: n_bits += 7 - (n_bits % 7) # Bit operations force a negative integer to its unsigned two's-complement # representation, e.g. -127 & 0xff = 0x80, -10 & 0xfff = 0xff6, etc. We use this to # sign-extend the number *and* make it unsigned. Once it's unsigned, we can use # ULEB128. mask = (1 << n_bits) - 1 value &= mask output = bytearray(n_bits // 7) for i in range(n_bits // 7): output[i] = 0x80 | (value & 0x7F) value >>= 7 # Last byte shouldn't have the high bit set. output[-1] &= 0x7F return bytes(output)
b74832115a58248f4a45a880f657de6dd38b0d8d
3,646,104
def google_sen_new(text_content): """ Analyzing Entity Sentiment in a String Args: text_content The text content to analyze """ # text_content = 'Grapes are good. Bananas are bad.' Available types: PLAIN_TEXT, HTML client = language_v1.LanguageServiceClient() type_ = enums.Document.Type.PLAIN_TEXT language = "en" document = {"content": text_content, "type": type_, "language": language} # Available values: NONE, UTF8, UTF16, UTF32 encoding_type = enums.EncodingType.UTF8 response = client.analyze_entity_sentiment(document, encoding_type=encoding_type) result_dict = {} # "entity":[] for entity in response.entities: result_list = [] result_list.append(entity.name) # Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al result_list.append(enums.Entity.Type( entity.type).name) # Get the salience score associated with the entity in the [0, 1.0] range result_list.append( entity.salience) # Get the aggregate sentiment expressed for this entity in the provided document. sentiment = entity.sentiment result_list.append(sentiment.score) result_list.append(sentiment.magnitude) result_dict[entity] = result_list return result_dict
57c4020f35a344d7f264453571e3f8825e00206f
3,646,105
import subprocess def _create_ip_config_data(): """ This loads into a map the result of IPCONFIG command. """ map_ipconfigs = dict() curr_itf = "" proc = subprocess.Popen(['ipconfig', '/all'], stdout=subprocess.PIPE) for curr_line in proc.stdout.readlines(): curr_line = curr_line.decode("utf-8").rstrip() if curr_line: if curr_line[0] != " ": curr_itf = curr_line.strip() if curr_itf[-1] == ":": curr_itf = curr_itf[:-1] map_ipconfigs[curr_itf] = [] else: idx_colon = curr_line.find(":") if idx_colon >= 0: curr_key = curr_line[:idx_colon].replace(". ","").strip() curr_val = curr_line[idx_colon+1:].strip() else: curr_val = curr_line.strip() map_ipconfigs[curr_itf].append((curr_key, curr_val)) return map_ipconfigs
64d613dfa8c602f47974f053351094e97e5d1246
3,646,106
def simplify_polygon_by(points, is_higher, should_stop, refresh_node): """ Simplify the given polygon by greedily removing vertices using a given priority. This is generalized from Visvalingam's algorithm, which is described well here: http://bost.ocks.org/mike/simplify/ is_higher = function(a,b) returns node higher in priority to be removed. should_stop = function(a) returns True if given highest priority node stops simplification. refresh_node = function(a) refreshes attributes dependent on adjacent vertices. """ length = len(points) # build nodes nodes = [VertexNode(p) for p in points] # connect nodes for i in xrange(length): prev_i = (i+length-1) % length next_i = (i+1) % length node = nodes[i] node.prev_node = nodes[prev_i] node.next_node = nodes[next_i] refresh_node(node) node.orig_index = i def on_index_change(node,i): """Callback that allows a node to know its location in the heap.""" node.heap_index = i heap = Heap(nodes, is_higher, on_index_change) while True: node = heap.peek() if should_stop(node): break heap.pop() # Close gap in doubly-linked list. prev_node, next_node = node.prev_node, node.next_node prev_node.next_node = next_node next_node.prev_node = prev_node # Refresh vertices that have new adjacents. refresh_node(prev_node) heap.reorder_node(prev_node.heap_index) refresh_node(next_node) heap.reorder_node(next_node.heap_index) # Return remaining points in their original order. return [node.point for node in sorted(heap.array, key=(lambda node: node.orig_index))]
b9ae05b2d146e78dbed36cc48df6cbd24c33fcbc
3,646,107
def get_builder(slug): """ Get the Builder object for a given slug name. Args: slug - The slug name of the installable software """ for builder in Index().index: if builder.slug == slug: return builder return False
d6013fb55d11be7a153b7a9e9f2bdd991b2a6304
3,646,108
def preprocess_normscale(patient_data, result, index, augment=True, metadata=None, normscale_resize_and_augment_function=normscale_resize_and_augment, testaug=False): """Normalizes scale and augments the data. Args: patient_data: the data to be preprocessed. result: dict to store the result in. index: index indicating in which slot the result dict the data should go. augment: flag indicating wheter augmentation is needed. metadata: metadata belonging to the patient data. """ if augment: if testaug: augmentation_params = sample_test_augmentation_parameters() else: augmentation_params = sample_augmentation_parameters() else: augmentation_params = None zoom_factor = None # Iterate over different sorts of data for tag, data in patient_data.items(): if tag in metadata: metadata_tag = metadata[tag] desired_shape = result[tag][index].shape cleaning_processes = getattr(config(), 'cleaning_processes', []) cleaning_processes_post = getattr(config(), 'cleaning_processes_post', []) if tag.startswith("sliced:data:singleslice"): # Cleaning data before extracting a patch data = clean_images( [patient_data[tag]], metadata=metadata_tag, cleaning_processes=cleaning_processes) # Augment and extract patch # Decide which roi to use. shift_center = (None, None) if getattr(config(), 'use_hough_roi', False): shift_center = metadata_tag["hough_roi"] patient_3d_tensor = normscale_resize_and_augment_function( data, output_shape=desired_shape[-2:], augment=augmentation_params, pixel_spacing=metadata_tag["PixelSpacing"], shift_center=shift_center[::-1])[0] if augmentation_params is not None: zoom_factor = augmentation_params["zoom_x"] * augmentation_params["zoom_y"] else: zoom_factor = 1.0 # Clean data further patient_3d_tensor = clean_images( patient_3d_tensor, metadata=metadata_tag, cleaning_processes=cleaning_processes_post) if "area_per_pixel:sax" in result: raise NotImplementedError() if augmentation_params and not augmentation_params.get("change_brightness", 0) == 0: patient_3d_tensor = augment_brightness(patient_3d_tensor, augmentation_params["change_brightness"]) put_in_the_middle(result[tag][index], patient_3d_tensor, True) elif tag.startswith("sliced:data:randomslices"): # Clean each slice separately data = [ clean_images([slicedata], metadata=metadata, cleaning_processes=cleaning_processes)[0] for slicedata, metadata in zip(data, metadata_tag)] # Augment and extract patches shift_centers = [(None, None)] * len(data) if getattr(config(), 'use_hough_roi', False): shift_centers = [m["hough_roi"] for m in metadata_tag] patient_3d_tensors = [ normscale_resize_and_augment_function( [slicedata], output_shape=desired_shape[-2:], augment=augmentation_params, pixel_spacing=metadata["PixelSpacing"], shift_center=shift_center[::-1])[0] for slicedata, metadata, shift_center in zip(data, metadata_tag, shift_centers)] if augmentation_params is not None: zoom_factor = augmentation_params["zoom_x"] * augmentation_params["zoom_y"] else: zoom_factor = 1.0 # Clean data further patient_3d_tensors = [ clean_images([patient_3d_tensor], metadata=metadata, cleaning_processes=cleaning_processes_post)[0] for patient_3d_tensor, metadata in zip(patient_3d_tensors, metadata_tag)] patient_4d_tensor = _make_4d_tensor(patient_3d_tensors) if augmentation_params and not augmentation_params.get("change_brightness", 0) == 0: patient_4d_tensor = augment_brightness(patient_4d_tensor, augmentation_params["change_brightness"]) if "area_per_pixel:sax" in result: raise NotImplementedError() put_in_the_middle(result[tag][index], patient_4d_tensor, True) elif tag.startswith("sliced:data:sax:locations"): pass # will be filled in by the next one elif tag.startswith("sliced:data:sax:is_not_padded"): pass # will be filled in by the next one elif tag.startswith("sliced:data:sax"): # step 1: sort (data, metadata_tag) with slice_location_finder slice_locations, sorted_indices, sorted_distances = slice_location_finder({i: metadata for i,metadata in enumerate(metadata_tag)}) data = [data[idx] for idx in sorted_indices] metadata_tag = [metadata_tag[idx] for idx in sorted_indices] slice_locations = np.array([slice_locations[idx]["relative_position"] for idx in sorted_indices]) slice_locations = slice_locations - (slice_locations[-1] + slice_locations[0])/2.0 data = [ clean_images([slicedata], metadata=metadata, cleaning_processes=cleaning_processes)[0] for slicedata, metadata in zip(data, metadata_tag)] # Augment and extract patches shift_centers = [(None, None)] * len(data) if getattr(config(), 'use_hough_roi', False): shift_centers = [m["hough_roi"] for m in metadata_tag] patient_3d_tensors = [ normscale_resize_and_augment_function( [slicedata], output_shape=desired_shape[-2:], augment=augmentation_params, pixel_spacing=metadata["PixelSpacing"], shift_center=shift_center[::-1])[0] for slicedata, metadata, shift_center in zip(data, metadata_tag, shift_centers)] if augmentation_params is not None: zoom_factor = augmentation_params["zoom_x"] * augmentation_params["zoom_y"] else: zoom_factor = 1.0 # Clean data further patient_3d_tensors = [ clean_images([patient_3d_tensor], metadata=metadata, cleaning_processes=cleaning_processes_post)[0] for patient_3d_tensor, metadata in zip(patient_3d_tensors, metadata_tag)] patient_4d_tensor = _make_4d_tensor(patient_3d_tensors) if augmentation_params and not augmentation_params.get("change_brightness", 0) == 0: patient_4d_tensor = augment_brightness(patient_4d_tensor, augmentation_params["change_brightness"]) # Augment sax order if augmentation_params and augmentation_params.get("flip_sax", 0) > 0.5: patient_4d_tensor = patient_4d_tensor[::-1] slice_locations = slice_locations[::-1] # Put data (images and metadata) in right location put_in_the_middle(result[tag][index], patient_4d_tensor, True) if "sliced:data:sax:locations" in result: eps_location = 1e-7 is_padded = np.array([False]*len(result["sliced:data:sax:locations"][index])) put_in_the_middle(result["sliced:data:sax:locations"][index], slice_locations + eps_location, True, is_padded) if "sliced:data:sax:distances" in result: eps_location = 1e-7 sorted_distances.append(0.0) # is easier for correct padding is_padded = np.array([False]*len(result["sliced:data:sax:distances"][index])) put_in_the_middle(result["sliced:data:sax:distances"][index], np.array(sorted_distances) + eps_location, True, is_padded) if "sliced:data:sax:is_not_padded" in result: result["sliced:data:sax:is_not_padded"][index] = np.logical_not(is_padded) elif tag.startswith("sliced:data:chanzoom:2ch"): # step 1: sort (data, metadata_tag) with slice_location_finder slice_locations, sorted_indices, sorted_distances = slice_location_finder({i: metadata for i,metadata in enumerate(metadata_tag[2])}) top_slice_metadata = metadata_tag[2][sorted_indices[0]] bottom_slice_metadata = metadata_tag[2][sorted_indices[-1]] ch2_metadata = metadata_tag[1] ch4_metadata = metadata_tag[0] trf_2ch, trf_4ch = get_chan_transformations( ch2_metadata=ch2_metadata, ch4_metadata=ch4_metadata, top_point_metadata = top_slice_metadata, bottom_point_metadata = bottom_slice_metadata, output_width=desired_shape[-1] ) ch4_3d_patient_tensor, ch2_3d_patient_tensor = [], [] ch4_data = data[0] ch2_data = data[1] if ch4_data is None and ch2_data is not None: ch4_data = ch2_data ch4_metadata = ch2_metadata if ch2_data is None and ch4_data is not None: ch2_data = ch4_data ch2_metadata = ch4_metadata for ch, ch_result, transform, metadata in [(ch4_data, ch4_3d_patient_tensor, trf_4ch, ch4_metadata), (ch2_data, ch2_3d_patient_tensor, trf_2ch, ch2_metadata)]: tform_shift_center, tform_shift_uncenter = build_center_uncenter_transforms(desired_shape[-2:]) zoom_factor = np.sqrt(np.abs(np.linalg.det(transform.params[:2,:2])) * np.prod(metadata["PixelSpacing"])) normalise_zoom_transform = build_augmentation_transform(zoom_x=zoom_factor, zoom_y=zoom_factor) if augmentation_params: augment_tform = build_augmentation_transform(**augmentation_params) total_tform = tform_shift_uncenter + augment_tform + normalise_zoom_transform + tform_shift_center + transform else: total_tform = tform_shift_uncenter + normalise_zoom_transform + tform_shift_center + transform ch_result[:] = [fast_warp(c, total_tform, output_shape=desired_shape[-2:]) for c in ch] # print "zoom factor:", zoom_factor if augmentation_params is not None: zoom_factor = augmentation_params["zoom_x"] * augmentation_params["zoom_y"] else: zoom_factor = 1.0 # Clean data further ch4_3d_patient_tensor = clean_images(np.array([ch4_3d_patient_tensor]), metadata=ch4_metadata, cleaning_processes=cleaning_processes_post)[0] ch2_3d_patient_tensor = clean_images(np.array([ch2_3d_patient_tensor]), metadata=ch2_metadata, cleaning_processes=cleaning_processes_post)[0] # Put data (images and metadata) in right location put_in_the_middle(result["sliced:data:chanzoom:2ch"][index], ch2_3d_patient_tensor, True) put_in_the_middle(result["sliced:data:chanzoom:4ch"][index], ch4_3d_patient_tensor, True) elif tag.startswith("sliced:data:shape"): raise NotImplementedError() elif tag.startswith("sliced:data"): # put time dimension first, then axis dimension data = clean_images(patient_data[tag], metadata=metadata_tag) patient_4d_tensor, zoom_ratios = resize_and_augment(data, output_shape=desired_shape[-2:], augment=augmentation_parameters) if "area_per_pixel:sax" in result: result["area_per_pixel:sax"][index] = zoom_ratios[0] * np.prod(metadata_tag[0]["PixelSpacing"]) if "noswitch" not in tag: patient_4d_tensor = np.swapaxes(patient_4d_tensor,1,0) put_in_the_middle(result[tag][index], patient_4d_tensor) elif tag.startswith("sliced:meta:all"): # TODO: this probably doesn't work very well yet result[tag][index] = patient_data[tag] elif tag.startswith("sliced:meta:PatientSex"): result[tag][index][0] = -1. if patient_data[tag]=='M' else 1. elif tag.startswith("sliced:meta:PatientAge"): number, letter = patient_data[tag][:3], patient_data[tag][-1] letter_rescale_factors = {'D': 365.25, 'W': 52.1429, 'M': 12., 'Y': 1.} result[tag][index][0] = float(patient_data[tag][:3]) / letter_rescale_factors[letter] if augmentation_params and zoom_factor: label_correction_function = lambda x: x * zoom_factor classification_correction_function = lambda x: utils.zoom_array(x, 1./zoom_factor) return label_correction_function, classification_correction_function else: return lambda x: x, lambda x: x
ea427a554d204da9857f3dacda9d6a3a96e4c107
3,646,109
def get_contact_lookup_list(): """get contact lookup list""" try: return jsonify(Contact.get_contact_choices()) except Exception as e: return e.message
e8d3a8f813366a16e86cf2eadf5acb7e235218de
3,646,110
def argmax(X, axis=None): """ Return tuple (values, indices) of the maximum entries of matrix :param:`X` along axis :param:`axis`. Row major order. :param X: Target matrix. :type X: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil, dia or :class:`numpy.matrix` :param axis: Specify axis along which to operate. If not specified, whole matrix :param:`X` is considered. :type axis: `int` """ if sp.isspmatrix(X): X = X.tocsr() assert axis == 0 or axis == 1 or axis is None, "Incorrect axis number." res = [[float('-inf'), 0] for _ in range(X.shape[1 - axis])] if axis is not None else [float('-inf'), 0] def _caxis(row, col): if X[row, col] > res[col][0]: res[col] = (X[row, col], row) def _raxis(row, col): if X[row, col] > res[row][0]: res[row] = (X[row, col], col) def _naxis(row, col): if X[row, col] > res[0]: res[0] = X[row, col] res[1] = row * X.shape[0] + col check = _caxis if axis == 0 else _raxis if axis == 1 else _naxis [check(row, col) for row in range(X.shape[0]) for col in range(X.shape[1])] if axis is None: return res elif axis == 0: t = list(zip(*res)) return list(t[0]), np.mat(t[1]) else: t = list(zip(*res)) return list(t[0]), np.mat(t[1]).T else: idxX = np.asmatrix(X).argmax(axis) if axis is None: eX = X[idxX // X.shape[1], idxX % X.shape[1]] elif axis == 0: eX = [X[idxX[0, idx], col] for idx, col in zip(range(X.shape[1]), range(X.shape[1]))] else: eX = [X[row, idxX[idx, 0]] for row, idx in zip(range(X.shape[0]), range(X.shape[0]))] return eX, idxX
b97cb16798e0d726fc21a76a2a3c6a02d284e313
3,646,111
import hashlib from re import T from re import A def posts(): """ Function accessed by AJAX to handle a Series of Posts """ try: series_id = request.args[0] except: raise HTTP(400) try: recent = request.args[1] except: recent = 5 table = s3db.cms_post # List of Posts in this Series query = (table.series_id == series_id) posts = db(query).select(table.name, table.body, table.avatar, table.created_by, table.created_on, limitby=(0, recent)) output = UL(_id="comments") for post in posts: author = B(T("Anonymous")) if post.created_by: utable = s3db.auth_user ptable = s3db.pr_person ltable = s3db.pr_person_user query = (utable.id == post.created_by) left = [ltable.on(ltable.user_id == utable.id), ptable.on(ptable.pe_id == ltable.pe_id)] row = db(query).select(utable.email, ptable.first_name, ptable.middle_name, ptable.last_name, left=left, limitby=(0, 1)).first() if row: person = row.pr_person user = row[utable._tablename] username = s3_fullname(person) email = user.email.strip().lower() hash = hashlib.md5(email).hexdigest() url = "http://www.gravatar.com/%s" % hash author = B(A(username, _href=url, _target="top")) header = H4(post.name) if post.avatar: avatar = s3base.s3_avatar_represent(post.created_by) else: avatar = "" row = LI(DIV(avatar, DIV(DIV(header, _class="comment-header"), DIV(XML(post.body), _class="comment-body"), _class="comment-text"), DIV(DIV(post.created_on, _class="comment-date"), _class="fright"), DIV(author, _class="comment-footer"), _class="comment-box")) output.append(row) return XML(output)
dc5b43016c3e50c52969c91505856941c951911b
3,646,112
def resolve_game_object_y_collision(moving, static): """Resolves a collision by moving an object along the y axis. Args: moving (:obj:`engine.game_object.PhysicalGameObject`): The object to move along the y axis. static (:obj:`engine.game_object.PhysicalGameObject`): The object to leave as-is. Returns: The change in the velocity of the object along the y axis. """ has_overlap = geometry.detect_overlap_1d( moving.x, moving.width, static.x, static.width) if has_overlap: # Overlap detected along x-axis, resolve collision on y-axis return _resolve_game_object_axis_collision(moving, static, 'y') return 0
24d63b4fd9e4a37d22aceee01e6accc2a74e8ee4
3,646,113
def filter_all(fn, *l): """ Runs the filter function on all items in a list of lists :param fn: Filter function :param l: list of lists to filter :return: list of filtered lists >>> filter_all(lambda x: x != "", ['a'], ['b'], [""], ["d"]) [['a'], ['b'], [], ['d']] """ return [filter(fn, lst) for lst in chain(*l)]
114b7b9bf9b22d55bd891a654318d4a49e30be51
3,646,114
def get_test_runners(args): """ Get Test Runners """ res = list() qitest_jsons = args.qitest_jsons or list() # first case: qitest.json in current working directory test_runner = get_test_runner(args) if test_runner: res.append(test_runner) # second case: qitest.json specified with --qitest-json for qitest_json in qitest_jsons: test_runner = get_test_runner(args, qitest_json=qitest_json) res.append(test_runner) # third case: parsing build projects build_projects_runners = parse_build_projects(args) # avoid appending a test_runner guessed from a build project # when res already contains a test runner computed from a # --qitest-json argument known_cwds = [x.cwd for x in res] for test_runner in build_projects_runners: if test_runner.cwd not in known_cwds: res.append(test_runner) if args.coverage and not build_projects_runners: raise Exception("""--coverage can only be used from a qibuild CMake project\n""") elif args.coverage: return build_projects_runners if not res: raise EmptyTestListException("Nothing found to test") return res
73d9b4e73935cd2c41c24bd1376ade9ea274f23d
3,646,115
def get_preselected_facets(params, all_categories): """ Resolve all facets that have been determined by the GET parameters. Args: params: Contains the categories/facets all_categories: Returns: dict: Contains all sorted facets """ ret_arr = {} iso_cat = params.get("isoCategories", "") custom_cat = params.get("customCategories", "") inspire_cat = params.get("inspireThemes", "") org_cat = params.get("registratingDepartments", "") # resolve ids by iterating all_categories all_iso_cat = all_categories[0] all_inspire_cat = all_categories[1] all_custom_cat = all_categories[2] all_org_cat = all_categories[3] iso_preselect = __resolve_single_facet(iso_cat, all_iso_cat) inspire_preselect = __resolve_single_facet(inspire_cat, all_inspire_cat) custom_preselect = __resolve_single_facet(custom_cat, all_custom_cat) org_preselect = __resolve_single_facet(org_cat, all_org_cat) if len(iso_preselect) > 0: ret_arr["ISO 19115"] = iso_preselect if len(inspire_preselect) > 0: ret_arr["INSPIRE"] = inspire_preselect if len(custom_preselect) > 0: ret_arr["Custom"] = custom_preselect if len(org_preselect) > 0: ret_arr["Organizations"] = org_preselect return ret_arr
878f3ef05aaaa643782c6d50c6796bc503c0f8e6
3,646,116
def has_joined(*args: list, **kwargs) -> str: """ Validates the user's joining the channel after being required to join. :param args: *[0] -> first name :param kwargs: :return: Generated validation message """ first_name = args[0] text = f"{_star_struck}{_smiling_face_with_heart} بسیار خب " \ f"<b>{first_name}</b> " \ f", حالا تمام دسترسی ها رو داری{_party_popper}{_confetti_ball}\n\n" \ f"تبریک از طرف @chromusic_fa {_red_heart}\n" \ f"با خیال راحت هر فایل صوتی رو سرچ کن {_face_blowing_a_kiss}" return text
d446da88a362c3821e25d0bee0e110ec0a906423
3,646,117
def depth_residual_regresssion_subnet(x, flg, regular, subnet_num): """Build a U-Net architecture""" """ Args: x is the input, 4-D tensor (BxHxWxC) flg represent weather add the BN regular represent the regularizer number Return: output is 4-D Tensor (BxHxWxC) """ pref = 'depth_regression_subnet_' + str(subnet_num) + '_' # whether to train flag train_ae = flg # define initializer for the network keys = ['conv', 'upsample'] keys_avoid = ['OptimizeLoss'] inits = [] init_net = None if init_net != None: for name in init_net.get_variable_names(): # select certain variables flag_init = False for key in keys: if key in name: flag_init = True for key in keys_avoid: if key in name: flag_init = False if flag_init: name_f = name.replace('/', '_') num = str(init_net.get_variable_value(name).tolist()) # self define the initializer function exec( "class " + name_f + "(Initializer):\n def __init__(self,dtype=tf.float32): self.dtype=dtype \n def __call__(self,shape,dtype=None,partition_info=None): return tf.cast(np.array(" + num + "),dtype=self.dtype)\n def get_config(self):return {\"dtype\": self.dtype.name}") inits.append(name_f) # autoencoder n_filters = [ 128, 96, 64, 32, 16, 1, ] filter_sizes = [ 3, 3, 3, 3, 3, 3, ] pool_sizes = [ \ 1, 1, 1, 1, 1, 1, ] pool_strides = [ 1, 1, 1, 1, 1, 1, ] skips = [ \ False, False, False, False, False, False, ] # change space ae_inputs = tf.identity(x, name='ae_inputs') # prepare input current_input = tf.identity(ae_inputs, name="input") #################################################################################################################### # convolutional layers: depth regression feature = [] for i in range(0, len(n_filters)): name = pref + "conv_" + str(i) # define the initializer if name + '_bias' in inits: bias_init = eval(name + '_bias()') else: bias_init = tf.zeros_initializer() if name + '_kernel' in inits: kernel_init = eval(name + '_kernel()') else: kernel_init = None if i == (len(n_filters) - 1): activation = None else: activation = relu # convolution current_input = tf.layers.conv2d( inputs=current_input, filters=n_filters[i], kernel_size=[filter_sizes[i], filter_sizes[i]], padding="same", activation=activation, trainable=train_ae, kernel_initializer=kernel_init, bias_initializer=bias_init, name=name, ) if pool_sizes[i] == 1 and pool_strides[i] == 1: feature.append(current_input) else: feature.append( tf.layers.max_pooling2d( \ inputs=current_input, pool_size=[pool_sizes[i], pool_sizes[i]], strides=pool_strides[i], name=pref + "pool_" + str(i) ) ) current_input = feature[-1] depth_coarse = tf.identity(feature[-1], name='depth_coarse_output') return depth_coarse
2c5d2cb03f60acc92f981d108b791a0e1215f5f6
3,646,118
def dist2(x, c): """ Calculates squared distance between two sets of points. Parameters ---------- x: numpy.ndarray Data of shape `(ndata, dimx)` c: numpy.ndarray Centers of shape `(ncenters, dimc)` Returns ------- n2: numpy.ndarray Squared distances between each pair of data from x and c, of shape `(ndata, ncenters)` """ assert x.shape[1] == c.shape[1], \ 'Data dimension does not match dimension of centers' x = np.expand_dims(x, axis=0) # new shape will be `(1, ndata, dimx)` c = np.expand_dims(c, axis=1) # new shape will be `(ncenters, 1, dimc)` # We will now use broadcasting to easily calculate pairwise distances n2 = np.sum((x - c) ** 2, axis=-1) return n2
24a1b9a368d2086a923cd656923dc799726ed7f0
3,646,119
def process_name(i: int, of: int) -> str: """Return e.g. '| | 2 |': an n-track name with track `i` (here i=2) marked. This makes it easy to follow each process's log messages, because you just go down the line until you encounter the same number again. Example: The interleaved log of four processes that each simulate a car visiting a charging station. The processes have been named with `process_name()`, and their log messages start with their `self.name`. (Car #2 does not turn up in this snippet.) | | | 3 arriving at 6 | 1 | | starting to charge at 7 0 | | | starting to charge at 7 | 1 | | leaving the bcs at 9 """ lines = ["|"] * of lines[i] = str(i) return " ".join(lines)
bc3e0d06544b61249a583b6fa0a010ec917c0428
3,646,120
def cards_db(db): """ CardsDB object that's empty. """ db.delete_all() return db
9649b309990325eca38ed89c6e9d499b41786dab
3,646,121
def _geo_connected(geo, rxn): """ Assess if geometry is connected. Right now only works for minima """ # Determine connectivity (only for minima) if rxn is not None: gra = automol.geom.graph(geo) conns = automol.graph.connected_components(gra) lconns = len(conns) else: lconns = 1 # Check connectivity if lconns == 1: connected = True else: ioprinter.bad_conformer('disconnected') connected = False return connected
d5993b7083703746214e70d6d100857da99c6c02
3,646,122
def scale_to_range(image, dest_range=(0,1)): """ Scale an image to the given range. """ return np.interp(image, xp=(image.min(), image.max()), fp=dest_range)
a225b44dc05d71d8ccc380d26fb61d96116414da
3,646,123
def files(): """Hypothesis strategy for generating objects pyswagger can use as file handles to populate `file` format parameters. Generated values take the format: `dict('data': <file object>)`""" return file_objects().map(lambda x: {"data": x})
04e787502a043ffba08912724c9e29f84a6a416c
3,646,124
from typing import Optional from typing import Generator def get_histograms( query: Optional[str] = None, delta: Optional[bool] = None ) -> Generator[dict, dict, list[Histogram]]: """Get Chrome histograms. Parameters ---------- query: Optional[str] Requested substring in name. Only histograms which have query as a substring in their name are extracted. An empty or absent query returns all histograms. delta: Optional[bool] If true, retrieve delta since last call. Returns ------- histograms: list[Histogram] Histograms. **Experimental** """ response = yield { "method": "Browser.getHistograms", "params": filter_none({"query": query, "delta": delta}), } return [Histogram.from_json(h) for h in response["histograms"]]
059792d045bdea84ff636a27de9a1d9812ae4c24
3,646,125
def energy_decay_curve_chu_lundeby( data, sampling_rate, freq='broadband', noise_level='auto', is_energy=False, time_shift=True, channel_independent=False, normalize=True, plot=False): """ This function combines Chu's and Lundeby's methods: The estimated noise level is subtracted before backward integration, the impulse response is truncated at the intersection time, and the correction for the truncation is applied [1, 2, 3]_ Parameters ---------- data : ndarray, double The room impulse response with dimension [..., n_samples] sampling_rate: integer The sampling rate of the room impulse response. freq: integer OR string The frequency band. If set to 'broadband', the time window of the Lundeby-algorithm will not be set in dependence of frequency. noise_level: ndarray, double OR string If not specified, the noise level is calculated based on the last 10 percent of the RIR. Otherwise specify manually for each channel as array. is_energy: boolean Defines, if the data is already squared. time_shift : boolean Defines, if the silence at beginning of the RIR should be removed. channel_independent : boolean Defines, if the time shift and normalizsation is done channel-independently or not. normalize : boolean Defines, if the energy decay curve should be normalized in the end or not. plot: Boolean Specifies, whether the results should be visualized or not. Returns ------- energy_decay_curve: ndarray, double Returns the noise handeled edc. References ---------- .. [1] Lundeby, Virgran, Bietz and Vorlaender - Uncertainties of Measurements in Room Acoustics - ACUSTICA Vol. 81 (1995) .. [2] W. T. Chu. “Comparison of reverberation measurements using Schroeder’s impulse method and decay-curve averaging method”. In: Journal of the Acoustical Society of America 63.5 (1978), pp. 1444–1450. .. [3] M. Guski, “Influences of external error sources on measurements of room acoustic parameters,” 2015. """ energy_data, n_channels, data_shape = preprocess_rir( data, is_energy=is_energy, time_shift=time_shift, channel_independent=channel_independent) n_samples = energy_data.shape[-1] subtraction = subtract_noise_from_squared_rir( energy_data, noise_level=noise_level) intersection_time, late_reverberation_time, noise_level = \ intersection_time_lundeby( energy_data, sampling_rate=sampling_rate, freq=freq, initial_noise_power=noise_level, is_energy=True, time_shift=False, channel_independent=False, plot=False) time_vector = smooth_rir(energy_data, sampling_rate)[2] energy_decay_curve = np.zeros([n_channels, n_samples]) for idx_channel in range(0, n_channels): intersection_time_idx = np.argmin(np.abs( time_vector - intersection_time[idx_channel])) if noise_level == 'auto': p_square_at_intersection = estimate_noise_energy( energy_data[idx_channel], is_energy=True) else: p_square_at_intersection = noise_level[idx_channel] # calculate correction term according to DIN EN ISO 3382 correction = (p_square_at_intersection * late_reverberation_time[idx_channel] * (1 / (6*np.log(10))) * sampling_rate) energy_decay_curve[idx_channel, :intersection_time_idx] = \ ra.schroeder_integration( subtraction[idx_channel, :intersection_time_idx], is_energy=True) energy_decay_curve[idx_channel] += correction if normalize: # Normalize the EDC... if not channel_independent: # ...by the first element of each channel. energy_decay_curve = (energy_decay_curve.T / energy_decay_curve[..., 0]).T else: # ...by the maximum first element of each channel. max_start_value = np.amax(energy_decay_curve[..., 0]) energy_decay_curve /= max_start_value energy_decay_curve[..., intersection_time_idx:] = np.nan if plot: plt.figure(figsize=(15, 3)) plt.subplot(131) plt.plot(time_vector, 10*np.log10(energy_data.T)) plt.xlabel('Time [s]') plt.ylabel('Squared IR [dB]') plt.subplot(132) plt.plot(time_vector, 10*np.log10(subtraction.T)) plt.xlabel('Time [s]') plt.ylabel('Noise subtracted IR [dB]') plt.subplot(133) plt.plot(time_vector[0:energy_decay_curve.shape[-1]], 10*np.log10( energy_decay_curve.T)) plt.xlabel('Time [s]') plt.ylabel('Tr. EDC with corr. & subt. [dB]') plt.tight_layout() # Recover original data shape: energy_decay_curve = np.reshape(energy_decay_curve, data_shape) energy_decay_curve = np.squeeze(energy_decay_curve) return energy_decay_curve
3e9711fbc47442a27fc339b3fb18ad6f21a44c91
3,646,126
def sinkhorn( p, q, metric="euclidean", ): """ Returns the earth mover's distance between two point clouds Parameters ---------- cloud1 : 2-D array First point cloud cloud2 : 2-D array Second point cloud Returns ------- distance : float The distance between the two point clouds """ p_weights = np.ones(len(p)) / len(p) q_weights = np.ones(len(q)) / len(q) pairwise_dist = np.ascontiguousarray( pairwise_distances(p, Y=q, metric=metric, n_jobs=-1) ) result = pot.sinkhorn2( p_weights, q_weights, pairwise_dist, reg=0.05, numItermax=100, return_matrix=False, ) return np.sqrt(result)
93a4eb2383cfc4a2f462daf1b984d773c339aee7
3,646,127
def generate_s3_events(cluster_name, cluster_dict, config): """Add the S3 Events module to the Terraform cluster dict. Args: cluster_name (str): The name of the currently generating cluster cluster_dict (defaultdict): The dict containing all Terraform config for a given cluster. config (dict): The loaded config from the 'conf/' directory Returns: bool: Result of applying the s3_events module """ s3_event_buckets = config['clusters'][cluster_name]['modules']['s3_events'] generate_s3_events_by_bucket(cluster_name, cluster_dict, config, s3_event_buckets) return True
93af74bcd9b0c16fdfd1a3495bb709d84edb10a6
3,646,128
def cluster_seg(bt, seg_list, radius): """ Fetch segments which align themself for a given tolerance. """ cluster, seen_ix = [], set() for i, seg in enumerate(seg_list): if i not in seen_ix: sim_seg_ix = list(bt.query_radius([seg], radius)[0]) seen_ix |= set(sim_seg_ix) cluster.append(sim_seg_ix) return _find_connected_components(cluster)
37308331b41cd7d1e1b8717bf4c0d5a4ced55385
3,646,129
def stage_grid( Dstg, A, dx_c, tte, min_Rins=None, recamber=None, stag=None, resolution=1. ): """Generate an H-mesh for a turbine stage.""" # Change scaling factor on grid points # Distribute the spacings between stator and rotor dx_c = np.array([[dx_c[0], dx_c[1] / 2.0], [dx_c[1] / 2.0, dx_c[2]]]) # Streamwise grids for stator and rotor x_c, ilte = streamwise_grid(dx_c, resolution=resolution) x = [x_ci * Dstg.cx[0] for x_ci in x_c] # Generate radial grid Dr = np.array([Dstg.Dr[:2], Dstg.Dr[1:]]) r = merid_grid(x_c, Dstg.rm, Dr, resolution=resolution) # Evaluate radial blade angles r1 = r[0][ilte[0][0], :] spf = (r1 - r1.min()) / r1.ptp() chi = np.stack((Dstg.free_vortex_vane(spf), Dstg.free_vortex_blade(spf))) # If recambering, then tweak the metal angles if not recamber is None: dev = np.reshape(recamber, (2, 2, 1)) dev[1] *= -1 # Reverse direction of rotor angles chi += dev # Get sections (normalised by axial chord for now) sect = [ geometry.radially_interpolate_section( spf, chii, spf, tte, Ai, stag=stagi ) for chii, Ai, stagi in zip(chi, A, stag) ] # If we have asked for a minimum inscribed circle, confirm that the # constraint is not violated if min_Rins: for i, row_sect in enumerate(sect): for rad_sect in row_sect: current_radius = geometry.largest_inscribed_circle(rad_sect.T) if current_radius < min_Rins: raise geometry.GeometryConstraintError( ( "Row %d, Thickness is too small for the constraint " "inscribed circle: %.3f < %.3f" % (i, current_radius, min_Rins) ) ) # Now we can do b2b grids rt = [b2b_grid(*args, resolution=resolution) for args in zip(x, r, Dstg.s, Dstg.cx, sect)] # Offset the rotor so it is downstream of stator x[1] = x[1] + x[0][-1] - x[1][0] # fig, ax = plt.subplots() # ax.plot(x[0],rt[0][:,0,(0,-1)]) # ax.plot(x[1],rt[1][:,0,(0,-1)]) # ax.axis('equal') # plt.savefig('sect.pdf') # quit() return x, r, rt, ilte
e19e07e61be0079559ee597475ce017f8f0a6189
3,646,130
def get_best_trial(trial_list, metric): """Retrieve the best trial.""" return max(trial_list, key=lambda trial: trial.last_result.get(metric, 0))
c5ddbb9ad00cddaba857d0d0233f6452e6702552
3,646,131
def make_registry_metaclass(registry_store): """Return a new Registry metaclass.""" if not isinstance(registry_store, dict): raise TypeError("'registry_store' argument must be a dict") class Registry(type): """A metaclass that stores a reference to all registered classes.""" def __new__(mcs, class_name, base_classes, class_dict): """Create and returns a new instance of Registry. The registry is a class named 'class_name' derived from 'base_classes' that defines 'class_dict' as additional attributes. The returned class is added to 'registry_store' using class_dict["REGISTERED_NAME"] as the name, or 'class_name' if the "REGISTERED_NAME" attribute isn't defined. If the sentinel value 'LEAVE_UNREGISTERED' is specified as the name, then the returned class isn't added to 'registry_store'. The returned class will have the "REGISTERED_NAME" attribute defined either as its associated key in 'registry_store' or the 'LEAVE_UNREGISTERED' sentinel value. """ registered_name = class_dict.setdefault("REGISTERED_NAME", class_name) cls = type.__new__(mcs, class_name, base_classes, class_dict) if registered_name is not LEAVE_UNREGISTERED: if registered_name in registry_store: raise ValueError("The name %s is already registered; a different value for the" " 'REGISTERED_NAME' attribute must be chosen" % (registered_name)) registry_store[registered_name] = cls return cls return Registry
c1c0426e4d47323ccd3ab80ff3917253858f1b0c
3,646,132
def bind11(reactant, max_helix = True): """ Returns a list of reaction pathways which can be produced by 1-1 binding reactions of the argument complex. The 1-1 binding reaction is the hybridization of two complementary unpaired domains within a single complex to produce a single unpseudoknotted product complex. """ reactions = set() structure = list(reactant.pair_table) for (strand_index, strand) in enumerate(structure): for (domain_index, domain) in enumerate(strand): # The displacing domain must be free if structure[strand_index][domain_index] is not None : continue start_loc = (strand_index, domain_index) # search (one direction) around the loop for an open domain that can be bound. results = find_on_loop(reactant, start_loc, filter_bind11) assert len(results) == len(find_on_loop(reactant, start_loc, filter_bind11, direction = -1)) for e, (invader, before, target, after) in enumerate(results): if max_helix: invader, before, target, after = zipper( reactant, invader[0], before, target[0], after, filter_bind11) results[e] = list(map(Loop, [invader, before, target, after])) # build products for (loc1s, before, loc2s, after) in results: # Should be reversed loc2s right? assert [x == ~y for x,y in zip(loc1s.domains, loc2s.domains)] product = do_bind11(reactant, loc1s.domain_locs, loc2s.domain_locs) reaction = PepperReaction([reactant], [product], 'bind11') if reaction.rate_constant[0] is None: reaction.rate_constant = (unimolecular_binding_rate(loc1s.dlength, before, after), '/s') reactions.add(reaction) return sorted(reactions)
45c74105e2f9733092aba3b55edd4dbaa8e9e26e
3,646,133
def get_all_movie_props(movies_set: pd.DataFrame, flag: int, file_path: str): """ Function that returns the data frame of all movie properties from dbpedia :param movies_set: data set of movies with columns movie id and movie dbpedia uri :param flag: 1 to generate the data frame from scratch and 0 to read from file :param file_path: file path to read if flag is not 0 :return: the data frame of all movie properties from dbpedia """ cols = ['movie_id', 'prop', 'obj'] if flag == 1: all_movie_props = obtain_all_movie_props(movies_set, cols) all_movie_props.to_csv(file_path, mode='w', header=False, index=False) else: all_movie_props = pd.read_csv(file_path, header=None) all_movie_props.columns = cols all_movie_props = all_movie_props.set_index(cols[0]) return all_movie_props
f3b85ce0d5b0e0fa8f28a2f3e8ee7d69c2002ee1
3,646,134
def convert_to_clocks(duration, f_sampling=200e6, rounding_period=None): """ convert a duration in seconds to an integer number of clocks f_sampling: 200e6 is the CBox sampling frequency """ if rounding_period is not None: duration = max(duration//rounding_period, 1)*rounding_period clock_duration = int(duration*f_sampling) return clock_duration
602b00af689cc25374b7debd39264b438de44baa
3,646,135
def account_approved(f): """Checks whether user account has been approved, raises a 401 error otherwise . """ def decorator(*args, **kwargs): if not current_user: abort(401, {'message': 'Invalid user account.'}) elif not current_user.is_approved: abort(401, {'message': 'Account has not yet been approved.'}) return f(*args, **kwargs) return decorator
e9f9e7bd15bd1df22540a6a42db95501a26fcce2
3,646,136
def multiply(x): """Multiply operator. >>> multiply(2)(1) 2 """ def multiply(y): return y * x return multiply
77d983090e03820d03777f1f69cfc7b0ef6d88a2
3,646,137
def tally_transactions(address, txs): """Calculate the net value of all deposits, withdrawals and fees :param address: Address of the account :param txs: Transactions JSON for the address :returns: The total net value of all deposits, withdrawals and fees """ send_total = 0 for item in txs['result']: if item['success']: # Check for deposits/withdrawals if "MsgSend" in item['messageTypes']: if item['messages'][0]['content']['toAddress'] != address: # Remove withdrawals send_total -= translate_basecro_to_cro(Decimal(item['messages'][0]['content']['amount'][0]['amount'])) else: # Add deposits send_total += translate_basecro_to_cro(Decimal(item['messages'][0]['content']['amount'][0]['amount'])) # Remove fees send_total -= translate_basecro_to_cro(Decimal(item['fee'][0]['amount'])) return send_total
6eaca1e7be11f9af254bcc491ff661413d8745f4
3,646,138
def expose(policy): """ Annotate a method to permit access to contexts matching an authorization policy. The annotation may be specified multiple times. Methods lacking any authorization policy are not accessible. :: @mitogen.service.expose(policy=mitogen.service.AllowParents()) def unsafe_operation(self): ... :param mitogen.service.Policy policy: The policy to require. """ def wrapper(func): func.mitogen_service__policies = [policy] + getattr( func, "mitogen_service__policies", [] ) return func return wrapper
74caed36885e5ea947a2ecdac9a2cddf2f5f51b0
3,646,139
def _bytes_feature(value): """Creates a bytes feature from the passed value. Args: value: An numpy array. Returns: A TensorFlow feature. """ return tf.train.Feature( bytes_list=tf.train.BytesList( value=[value.astype(np.float32).tostring()]))
e13ac22bef91af7847aecdb558e849de27e89623
3,646,140
from typing import Union from typing import OrderedDict def get_cell_phase( adata: anndata.AnnData, layer: str = None, gene_list: Union[OrderedDict, None] = None, refine: bool = True, threshold: Union[float, None] = 0.3, ) -> pd.DataFrame: """Compute cell cycle phase scores for cells in the population Arguments --------- adata: :class:`~anndata.AnnData` layer: `str` or None (default: `None`) The layer of data to use for calculating correlation. If None, use adata.X. gene_list: `OrderedDict` or None (default: `None`) OrderedDict of marker genes to use for cell cycle phases. If None, the default list will be used. refine: `bool` (default: `True`) whether to refine the gene lists based on how consistent the expression is among the groups threshold: `float` or None (default: `0.3`) threshold on correlation coefficient used to discard genes (expression of each gene is compared to the bulk expression of the group and any gene with a correlation coefficient less than this is discarded) Returns ------- Cell cycle scores indicating the likelihood a given cell is in a given cell cycle phase """ # get list of genes if one is not provided if gene_list is None: cell_phase_genes = get_cell_phase_genes(adata, layer, refine=refine, threshold=threshold) else: cell_phase_genes = gene_list adata.uns["cell_phase_genes"] = cell_phase_genes # score each cell cycle phase and Z-normalize phase_scores = pd.DataFrame(batch_group_score(adata, layer, cell_phase_genes)) normalized_phase_scores = phase_scores.sub(phase_scores.mean(axis=1), axis=0).div(phase_scores.std(axis=1), axis=0) normalized_phase_scores_corr = normalized_phase_scores.transpose() normalized_phase_scores_corr["G1-S"] = [1, 0, 0, 0, 0] normalized_phase_scores_corr["S"] = [0, 1, 0, 0, 0] normalized_phase_scores_corr["G2-M"] = [0, 0, 1, 0, 0] normalized_phase_scores_corr["M"] = [0, 0, 0, 1, 0] normalized_phase_scores_corr["M-G1"] = [0, 0, 0, 0, 1] phase_list = ["G1-S", "S", "G2-M", "M", "M-G1"] # final scores for each phaase are correlation of expression profile with vectors defined above cell_cycle_scores = normalized_phase_scores_corr.corr() tmp = -len(phase_list) cell_cycle_scores = cell_cycle_scores[tmp:].transpose()[: -len(phase_list)] # pick maximal score as the phase for that cell cell_cycle_scores["cell_cycle_phase"] = cell_cycle_scores.idxmax(axis=1) cell_cycle_scores["cell_cycle_phase"] = cell_cycle_scores["cell_cycle_phase"].astype("category") cell_cycle_scores["cell_cycle_phase"].cat.set_categories(phase_list, inplace=True) def progress_ratio(x, phase_list): ind = phase_list.index(x["cell_cycle_phase"]) return x[phase_list[(ind - 1) % len(phase_list)]] - x[phase_list[(ind + 1) % len(phase_list)]] # interpolate position within given cell cycle phase cell_cycle_scores["cell_cycle_progress"] = cell_cycle_scores.apply( lambda x: progress_ratio(x, list(phase_list)), axis=1 ) cell_cycle_scores.sort_values( ["cell_cycle_phase", "cell_cycle_progress"], ascending=[True, False], inplace=True, ) # order of cell within cell cycle phase cell_cycle_scores["cell_cycle_order"] = cell_cycle_scores.groupby("cell_cycle_phase").cumcount() cell_cycle_scores["cell_cycle_order"] = cell_cycle_scores.groupby("cell_cycle_phase")["cell_cycle_order"].apply( lambda x: x / (len(x) - 1) ) return cell_cycle_scores
9b379c42cd409893d51885f5580b26b9700547bf
3,646,141
def variational_lower_bound(prediction): """ This is the variational lower bound derived in Auto-Encoding Variational Bayes, Kingma & Welling, 2014 :param [posterior_means, posterior_logvar, data_means, data_logvar, originals] posterior_means: predicted means for the posterior posterior_logvar: predicted log variances for the posterior data_means: predicted mean parameter for the voxels modelled as Gaussians data_logvar: predicted log variance parameter for the voxels modelled as Gaussians originals: the original inputs :return: """ # log_2pi = np.log(2*np.pi) log_2pi = 1.837877 assert len(prediction) >= 5, \ "please see the returns of network/vae.py" \ "for the prediction list format" posterior_means, posterior_logvar = prediction[:2] data_means, data_logvar = prediction[2:4] originals = prediction[4] squared_diff = tf.square(data_means - originals) log_likelihood = \ data_logvar + log_2pi + tf.exp(-data_logvar) * squared_diff # batch_size = tf.shape(log_likelihood)[0] batch_size = log_likelihood.get_shape().as_list()[0] log_likelihood = tf.reshape(log_likelihood, shape=[batch_size, -1]) log_likelihood = -0.5 * tf.reduce_sum(log_likelihood, axis=[1]) KL_divergence = 1 + posterior_logvar \ - tf.square(posterior_means) \ - tf.exp(posterior_logvar) KL_divergence = -0.5 * tf.reduce_sum(KL_divergence, axis=[1]) return tf.reduce_mean(KL_divergence - log_likelihood)
bcbc9a660f07fe677f823ee3aeb284817e94601d
3,646,142
import base64 def didGen(vk, method="dad"): """ didGen accepts an EdDSA (Ed25519) key in the form of a byte string and returns a DID. :param vk: 32 byte verifier/public key from EdDSA (Ed25519) key :param method: W3C did method string. Defaults to "dad". :return: W3C DID string """ if vk is None: return None # convert verkey to jsonable unicode string of base64 url-file safe vk64u = base64.urlsafe_b64encode(vk).decode("utf-8") return "did:{0}:{1}".format(method, vk64u)
9991491ab486d8960633190e3d3baa9058f0da50
3,646,143
import pickle def load_dataset(datapath): """Extract class label info """ with open(datapath + "/experiment_dataset.dat", "rb") as f: data_dict = pickle.load(f) return data_dict
3a0d8ef9c48036879b32ab0e74e52429418297c0
3,646,144
def deleteupload(): """Deletes an upload. An uploads_id is given and that entry is then removed from the uploads table in the database. """ uploads_id = request.args.get('uploads_id') if not uploads.exists(uploads_id=uploads_id): return bad_json_response( 'BIG OOPS: Something went wrong deleting the file.' ) uploads.delete(uploads_id=uploads_id) return good_json_response('success')
df18801b287569f1fa1114fc7059a415b82913d0
3,646,145
def read(fn, offset, length, hdfs=None): """ Read a block of bytes from a particular file """ with hdfs.open(fn, 'r') as f: f.seek(offset) bytes = f.read(length) logger.debug("Read %d bytes from %s:%d", len(bytes), fn, offset) return bytes
219ad06d0b111a14ff3f1ba895d516d4340ed1dd
3,646,146
from typing import Dict from pathlib import Path import json def load_json(filename: str) -> Dict: """Read JSON file from metadata folder Args: filename: Name of metadata file Returns: dict: Dictionary of data """ filepath = ( Path(__file__).resolve().parent.parent.joinpath("metadata").joinpath(filename) ) metadata: Dict = json.loads(filepath.read_text()) return metadata
37d9f08344cf2a544c12fef58992d781556a9efd
3,646,147
def get_riemann_sum(x, delta_x): """ Returns the riemann `sum` given a `function` and the input `x` and `delta_x` Parameters ---------- x : list List of numbers returned by `np.linspace` given a lower and upper bound, and the number of intervals delta_x : The interval Returns ------- float The integral sum """ return sum(f(x)*delta_x)
dd80d12581533fa4074411845050f29193a03432
3,646,148
def MPO_rand(n, bond_dim, phys_dim=2, normalize=True, cyclic=False, herm=False, dtype=float, **mpo_opts): """Generate a random matrix product state. Parameters ---------- n : int The number of sites. bond_dim : int The bond dimension. phys_dim : int, optional The physical (site) dimensions, defaults to 2. normalize : bool, optional Whether to normalize the operator such that ``trace(A.H @ A) == 1``. cyclic : bool, optional Generate a MPO with periodic boundary conditions or not, default is open boundary conditions. dtype : {float, complex} or numpy dtype, optional Data type of the tensor network. herm : bool, optional Whether to make the matrix hermitian (or symmetric if real) or not. mpo_opts Supplied to :class:`~quimb.tensor.tensor_1d.MatrixProductOperator`. """ cyc_shp = (bond_dim,) if cyclic else () shapes = [(*cyc_shp, bond_dim, phys_dim, phys_dim), *((bond_dim, bond_dim, phys_dim, phys_dim),) * (n - 2), (bond_dim, *cyc_shp, phys_dim, phys_dim)] def gen_data(shape): data = randn(shape, dtype=dtype) if not herm: return data trans = (0, 2, 1) if len(shape) == 3 else (0, 1, 3, 2) return data + data.transpose(*trans).conj() arrays = map(lambda x: x / norm_fro_dense(x)**(1 / (x.ndim - 1)), map(gen_data, shapes)) rmpo = MatrixProductOperator(arrays, **mpo_opts) if normalize: rmpo /= (rmpo.H @ rmpo)**0.5 return rmpo
22220095b5cfcb3625edf3cde59e03fa37cd5423
3,646,149
def get_short_size(size_bytes): """ Get a file size string in short format. This function returns: "B" size (e.g. 2) when size_bytes < 1KiB "KiB" size (e.g. 345.6K) when size_bytes >= 1KiB and size_bytes < 1MiB "MiB" size (e.g. 7.8M) when size_bytes >= 1MiB size_bytes: File size in bytes """ if size_bytes < 1024: return str(size_bytes) if size_bytes < 1048576: return f"{size_bytes / 1024:.1f}K" return f"{size_bytes / 1048576:.1f}M"
ebc9ba25c01dedf0d15b9e2a21b67989763bc8c8
3,646,150
def pair_setup( auth_type: AuthenticationType, connection: HttpConnection ) -> PairSetupProcedure: """Return procedure object used for Pair-Setup.""" _LOGGER.debug("Setting up new AirPlay Pair-Setup procedure with type %s", auth_type) if auth_type == AuthenticationType.Legacy: srp = LegacySRPAuthHandler(new_credentials()) srp.initialize() return AirPlayLegacyPairSetupProcedure(connection, srp) if auth_type == AuthenticationType.HAP: srp = SRPAuthHandler() srp.initialize() return AirPlayHapPairSetupProcedure(connection, srp) raise exceptions.NotSupportedError( f"authentication type {auth_type} does not support Pair-Setup" )
6fbd0eb57bd3da8c70233d07393513036548482b
3,646,151
def score_from_srl(srl_path, truth_path, freq, verbose=False): """ Given source list output by PyBDSF and training truth catalogue, calculate the official score for the sources identified in the srl. Args: srl_path (`str`): Path to source list (.srl file) truth_path (`str`): Path to training truth catalogue freq (`int`): Image frequency band (560, 1400 or 9200 MHz) verbose (`bool`): True to print out size ratio info """ truth_df = load_truth_df(truth_path) # Predict size ID and correct the Maj and Min values: cat_df = cat_df_from_srl(srl_path) scorer = Sdc1Scorer(cat_df, truth_df, freq) score = scorer.run(train=True, detail=True, mode=1) return score
87cfdd7ed7c1a42fc3a4080289e7e34be6a2a85a
3,646,152
def get_feature_read(key, max_num_bbs=None): """Choose the right feature function for the given key to parse TFRecords Args: key: the feature name max_num_bbs: Max number of bounding boxes (used for `bounding_boxes` and `classes`) max_num_groups: Number of pre-defined groups (used for `clustered_bounding_boxes`) """ if key in ['im_id', 'num_boxes']: return tf.FixedLenFeature((), tf.int64) elif key in ['bounding_boxes']: assert max_num_bbs is not None return tf.FixedLenFeature((max_num_bbs, 4), tf.float32) elif key in ['classes']: assert max_num_bbs is not None return tf.FixedLenFeature((max_num_bbs,), tf.int64) else: raise SystemExit("Unknown feature", key)
6ec7f06e900baedec19950c0f4742da9c4df1514
3,646,153
import numpy import time def kernel(cc, eris, t1=None, t2=None, max_cycle=50, tol=1e-8, tolnormt=1e-6, verbose=logger.INFO): """Exactly the same as pyscf.cc.ccsd.kernel, which calls a *local* energy() function.""" if isinstance(verbose, logger.Logger): log = verbose else: log = logger.Logger(cc.stdout, verbose) if t1 is None and t2 is None: t1, t2 = cc.init_amps(eris)[1:] elif t1 is None: nocc = cc.nocc nvir = cc.nmo - nocc t1 = numpy.zeros((nocc,nvir), eris.dtype) elif t2 is None: t2 = cc.init_amps(eris)[2] cput1 = cput0 = (time.clock(), time.time()) nocc, nvir = t1.shape eold = 0 eccsd = 0 if cc.diis: adiis = lib.diis.DIIS(cc, cc.diis_file) adiis.space = cc.diis_space else: adiis = lambda t1,t2,*args: (t1,t2) conv = False for istep in range(max_cycle): t1new, t2new = cc.update_amps(t1, t2, eris) normt = numpy.linalg.norm(t1new-t1) + numpy.linalg.norm(t2new-t2) t1, t2 = t1new, t2new t1new = t2new = None if cc.diis: t1, t2 = cc.diis(t1, t2, istep, normt, eccsd-eold, adiis) eold, eccsd = eccsd, energy(cc, t1, t2, eris) log.info('istep = %d E(CCSD) = %.15g dE = %.9g norm(t1,t2) = %.6g', istep, eccsd, eccsd - eold, normt) cput1 = log.timer('CCSD iter', *cput1) if abs(eccsd-eold) < tol and normt < tolnormt: conv = True break log.timer('CCSD', *cput0) return conv, eccsd, t1, t2
24917c48cbdb2062914462ad3f354cdd0e4e6318
3,646,154
import argparse def get_arguments(): """parse provided command line arguments""" parser = argparse.ArgumentParser() parser.add_argument( "--server", help="Where to send the output - use https URL to POST " "to the dognews server API, or a file name to save locally as json", required=True) parser.add_argument( "--imagefolder", help="Where to save the thumbnails", required=True) parser.add_argument( "--token", help="Authentication token associated with the submit-bot user, generated in the dognews server app", required=True) return parser.parse_args()
bc7aa7fefda65ade99820e786a460e07a5037f46
3,646,155
async def getAllDestinyIDs(): """Returns a list with all discord members destiny ids""" select_sql = """ SELECT destinyID FROM "discordGuardiansToken";""" async with (await get_connection_pool()).acquire(timeout=timeout) as connection: result = await connection.fetch(select_sql) return [x[0] for x in result]
22e94165cc0f50c8458be152d77231f30f7383b8
3,646,156
def login(): """Handles login for Gello.""" form = LoginForm() if form.validate_on_submit(): user = User.query.filter_by(email=form.email.data).first() if user is not None and user.verify_password(form.password.data): login_user(user, form.remember_me.data) return redirect(request.args.get('next') or url_for('main.index')) flash('Invalid username or password.') return render_template('auth/login.html', form=form)
5000c52d652c114a5b69b403fca9809dfa9e6bca
3,646,157
def create_loss_functions(interconnector_coefficients, demand_coefficients, demand): """Creates a loss function for each interconnector. Transforms the dynamic demand dependendent interconnector loss functions into functions that only depend on interconnector flow. i.e takes the function f and creates g by pre-calculating the demand dependent terms. f(inter_flow, flow_coefficient, nsw_demand, nsw_coefficient, qld_demand, qld_coefficient) = inter_losses becomes g(inter_flow) = inter_losses The mathematics of the demand dependent loss functions is described in the :download:`Marginal Loss Factors documentation section 3 to 5 <../../docs/pdfs/Marginal Loss Factors for the 2020-21 Financial year.pdf>`. Examples -------- >>> import pandas as pd Some arbitrary regional demands. >>> demand = pd.DataFrame({ ... 'region': ['VIC1', 'NSW1', 'QLD1', 'SA1'], ... 'loss_function_demand': [6000.0 , 7000.0, 5000.0, 3000.0]}) Loss model details from 2020 Jan NEM web LOSSFACTORMODEL file >>> demand_coefficients = pd.DataFrame({ ... 'interconnector': ['NSW1-QLD1', 'NSW1-QLD1', 'VIC1-NSW1', 'VIC1-NSW1', 'VIC1-NSW1'], ... 'region': ['NSW1', 'QLD1', 'NSW1', 'VIC1', 'SA1'], ... 'demand_coefficient': [-0.00000035146, 0.000010044, 0.000021734, -0.000031523, -0.000065967]}) Loss model details from 2020 Jan NEM web INTERCONNECTORCONSTRAINT file >>> interconnector_coefficients = pd.DataFrame({ ... 'interconnector': ['NSW1-QLD1', 'VIC1-NSW1'], ... 'loss_constant': [0.9529, 1.0657], ... 'flow_coefficient': [0.00019617, 0.00017027], ... 'from_region_loss_share': [0.5, 0.5]}) Create the loss functions >>> loss_functions = create_loss_functions(interconnector_coefficients, demand_coefficients, demand) Lets use one of the loss functions, first get the loss function of VIC1-NSW1 and call it g >>> g = loss_functions[loss_functions['interconnector'] == 'VIC1-NSW1']['loss_function'].iloc[0] Calculate the losses at 600 MW flow >>> print(g(600.0)) -70.87199999999996 Now for NSW1-QLD1 >>> h = loss_functions[loss_functions['interconnector'] == 'NSW1-QLD1']['loss_function'].iloc[0] >>> print(h(600.0)) 35.70646799999993 Parameters ---------- interconnector_coefficients : pd.DataFrame ====================== ======================================================================================== Columns: Description: interconnector unique identifier of a interconnector (as `str`) loss_constant the constant term in the interconnector loss factor equation (as np.float64) flow_coefficient the coefficient of the interconnector flow variable in the loss factor equation (as np.float64) from_region_loss_share the proportion of loss attribute to the from region, remainer are attributed to the to region (as np.float64) ====================== ======================================================================================== demand_coefficients : pd.DataFrame ================== ========================================================================================= Columns: Description: interconnector unique identifier of a interconnector (as `str`) region the market region whose demand the coefficient applies too, required (as `str`) demand_coefficient the coefficient of regional demand variable in the loss factor equation (as `np.float64`) ================== ========================================================================================= demand : pd.DataFrame ==================== ===================================================================================== Columns: Description: region unique identifier of a region (as `str`) loss_function_demand the estimated regional demand, as calculated by initial supply + demand forecast, in MW (as `np.float64`) ==================== ===================================================================================== Returns ------- pd.DataFrame loss_functions ================ ============================================================================================ Columns: Description: interconnector unique identifier of a interconnector (as `str`) loss_function a `function` object that takes interconnector flow (as `float`) an input and returns interconnector losses (as `float`). ================ ============================================================================================ """ demand_loss_factor_offset = pd.merge(demand_coefficients, demand, 'inner', on=['region']) demand_loss_factor_offset['offset'] = demand_loss_factor_offset['loss_function_demand'] * \ demand_loss_factor_offset['demand_coefficient'] demand_loss_factor_offset = demand_loss_factor_offset.groupby('interconnector', as_index=False)['offset'].sum() loss_functions = pd.merge(interconnector_coefficients, demand_loss_factor_offset, 'left', on=['interconnector']) loss_functions['loss_constant'] = loss_functions['loss_constant'] + loss_functions['offset'].fillna(0) loss_functions['loss_function'] = \ loss_functions.apply(lambda x: create_function(x['loss_constant'], x['flow_coefficient']), axis=1) return loss_functions.loc[:, ['interconnector', 'loss_function', 'from_region_loss_share']]
1522b4506d4dad40e2b3d16bdd8ebd92d9b46401
3,646,158
def num2proto(pnum): """Protocol number to name""" # Look for the common ones first if pnum == 6: return "tcp" elif pnum == 17: return "udp" elif pnum == 1: return "icmp" elif pnum == 58: # Use the short form of icmp-ipv6 when appropriate return "icmpv6" # Get cached proto table, else create new one global proto_table if not bool(proto_table): proto_table = ProtocolTable() pname = proto_table[pnum] # If not found, return the number as a string if pname == "Unassigned": return str(pnum) return pname
ad68b0fe530d63de62087eb23e5cacca0d48b996
3,646,159
def get_problem_size(problem_size, params): """compute current problem size""" if callable(problem_size): problem_size = problem_size(params) if isinstance(problem_size, (str, int, np.integer)): problem_size = (problem_size, ) current_problem_size = [1, 1, 1] for i, s in enumerate(problem_size): if isinstance(s, str): current_problem_size[i] = int( eval(replace_param_occurrences(s, params))) elif isinstance(s, (int, np.integer)): current_problem_size[i] = s else: raise TypeError( "Error: problem_size should only contain strings or integers") return current_problem_size
c71394f081a7f0d00fcae653dffc439bc7b1b3b1
3,646,160
import zipfile import os def zip_dir(path): """ Create a zip archive containing all files and dirs rooted in path. The archive is created in memory and a file handler is returned by the function. Args: path: directory containing the resources to archive. Return: file_out: file handler pointing to the compressed archive. """ file_out = BytesIO() with zipfile.ZipFile(file_out, "w", zipfile.ZIP_DEFLATED) as ziph: for root, _, files in os.walk(path): for file in files: ziph.write(os.path.join(root, file), os.path.relpath(os.path.join(root, file), start=path)) file_out.seek(0) return file_out
fd60182ca3854a922d44c16e87efde0f7671ce1b
3,646,161
def interpolate(x, size=None, scale_factor=None, mode='nearest', align_corners=False, align_mode=0, data_format='NCHW', name=None): """ This op resizes a batch of images. The input must be a 3-D Tensor of the shape (num_batches, channels, in_w) or 4-D (num_batches, channels, in_h, in_w), or a 5-D Tensor of the shape (num_batches, channels, in_d, in_h, in_w) or (num_batches, in_d, in_h, in_w, channels), Where in_w is width of the input tensor, in_h is the height of the input tensor, in_d is the depth of the intput tensor. and the resizing only applies on the three dimensions(depth, height and width). Supporting resample methods: 'linear' : Linear interpolation 'bilinear' : Bilinear interpolation 'trilinear' : Trilinear interpolation 'nearest' : Nearest neighbor interpolation 'bicubic' : Bicubic interpolation 'area': Area interpolation Linear interpolation is the method of using a line connecting two known quantities to determine the value of an unknown quantity between the two known quantities. Nearest neighbor interpolation is to perform nearest neighbor interpolation in both the 3rd dimension(in height direction) and the 4th dimension(in width direction) on input tensor. Bilinear interpolation is an extension of linear interpolation for interpolating functions of two variables (e.g. H-direction and W-direction in this op) on a rectilinear 2D grid. The key idea is to perform linear interpolation first in one direction, and then again in the other direction. Trilinear interpolation is an extension of linear interpolation for interpolating functions of three variables (e.g. D-direction, H-direction and W-direction in this op) on a rectilinear 3D grid. The linear interpolation is performed on three directions. align_corners and align_mode are optional parameters,the calculation method of interpolation can be selected by them. Bicubic interpolation is an extension of cubic interpolation for interpolating data points on a two-dimensional regular grid. The interpolated surface is smoother than corresponding surfaces obtained by bilinear interpolation or nearest-neighbor interpolation. Area interpolation is to perform area interpolation in both the 3rd dimension(in height direction) , the 4th dimension(in width direction) and the 5th dimension(in depth direction) on input tensor. Set to area will directly call `paddle.nn.functional.adaptive_avg_pool1d` or `paddle.nn.functional.adaptive_avg_pool2d` or `paddle.nn.functional.adaptive_avg_pool3d`. Example: .. code-block:: text For scale_factor: if align_corners = True && out_size > 1 : scale_factor = (in_size-1.0)/(out_size-1.0) else: scale_factor = float(in_size/out_size) Linear interpolation: if: align_corners = False , align_mode = 0 input : (N,C,W_in) output: (N,C,W_out) where: W_out = (W_{in}+0.5) * scale_{factor} - 0.5 else: input : (N,C,W_in) output: (N,C,W_out) where: W_out = W_{in} * scale_{factor} Nearest neighbor interpolation: align_corners = False input : (N,C,H_in,W_in) output: (N,C,H_out,W_out) where: H_out = floor (H_{in} * scale_{factor}) W_out = floor (W_{in} * scale_{factor}) Bilinear interpolation: if: align_corners = False , align_mode = 0 input : (N,C,H_in,W_in) output: (N,C,H_out,W_out) where: H_out = (H_{in}+0.5) * scale_{factor} - 0.5 W_out = (W_{in}+0.5) * scale_{factor} - 0.5 else: input : (N,C,H_in,W_in) output: (N,C,H_out,W_out) where: H_out = H_{in} * scale_{factor} W_out = W_{in} * scale_{factor} Bicubic interpolation: if: align_corners = False input : (N,C,H_in,W_in) output: (N,C,H_out,W_out) where: H_out = (H_{in}+0.5) * scale_{factor} - 0.5 W_out = (W_{in}+0.5) * scale_{factor} - 0.5 else: input : (N,C,H_in,W_in) output: (N,C,H_out,W_out) where: H_out = H_{in} * scale_{factor} W_out = W_{in} * scale_{factor} Trilinear interpolation: if: align_corners = False , align_mode = 0 input : (N,C,D_in,H_in,W_in) output: (N,C,D_out,H_out,W_out) where: D_out = (D_{in}+0.5) * scale_{factor} - 0.5 H_out = (H_{in}+0.5) * scale_{factor} - 0.5 W_out = (W_{in}+0.5) * scale_{factor} - 0.5 else: input : (N,C,D_in,H_in,W_in) output: (N,C,D_out,H_out,W_out) where: D_out = D_{in} * scale_{factor} H_out = H_{in} * scale_{factor} W_out = W_{in} * scale_{factor} For details of linear interpolation, please refer to Wikipedia: https://en.wikipedia.org/wiki/Linear_interpolation. For details of nearest neighbor interpolation, please refer to Wikipedia: https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation. For details of bilinear interpolation, please refer to Wikipedia: https://en.wikipedia.org/wiki/Bilinear_interpolation. For details of trilinear interpolation, please refer to Wikipedia: https://en.wikipedia.org/wiki/Trilinear_interpolation. For details of bicubic interpolation, please refer to Wikipedia: https://en.wikipedia.org/wiki/Bicubic_interpolation Parameters: x (Tensor): 3-D, 4-D or 5-D Tensor, its data type is float32, float64, or uint8, its data format is specified by :attr:`data_format`. size (list|tuple|Tensor|None): Output shape of image resize layer, the shape is (out_w, ) when input is a 3-D Tensor, the shape is (out_h, out_w) when input is a 4-D Tensor and is (out_d, out_h, out_w) when input is a 5-D Tensor. Default: None. If a list/tuple, each element can be an integer or a Tensor of shape: [1]. If a Tensor, its dimensions size should be a 1. scale_factor (float|Tensor|list|tuple|None): The multiplier for the input height or width. At least one of :attr:`size` or :attr:`scale_factor` must be set. And :attr:`size` has a higher priority than :attr:`scale_factor`.Has to match input size if it is either a list or a tuple or a Tensor. Default: None. mode (str): The resample method. It supports 'linear', 'area', 'nearest', 'bilinear', 'bicubic' and 'trilinear' currently. Default: 'nearest' align_corners(bool) : An optional bool, If True, the centers of the 4 corner pixels of the input and output tensors are aligned, preserving the values at the corner pixels.This only has an effect when 'linear', 'bilinear', 'bicubic' or 'trilinear'. Default: False align_mode(int) : An optional for linear/bilinear/trilinear interpolation. Refer to the formula in the example above, it can be \'0\' for src_idx = scale_factor*(dst_indx+0.5)-0.5 , can be \'1\' for src_idx = scale_factor*dst_index. data_format (str, optional): Specify the data format of the input, and the data format of the output will be consistent with that of the input. An optional string from:`NCW`, `NWC`, `"NCHW"`, `"NHWC"`, `"NCDHW"`, `"NDHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: `[batch_size, input_channels, input_height, input_width]`. When it is `"NCHW"`, the data is stored in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: A 3-D Tensor of the shape (num_batches, channels, out_w) or (num_batches, out_w, channels), A 4-D Tensor of the shape (num_batches, channels, out_h, out_w) or (num_batches, out_h, out_w, channels), or 5-D Tensor of the shape (num_batches, channels, out_d, out_h, out_w) or (num_batches, out_d, out_h, out_w, channels). Raises: TypeError: size should be a list or tuple or Tensor. ValueError: The 'mode' of image_resize can only be 'linear', 'bilinear', 'trilinear', 'bicubic', 'area' or 'nearest' currently. ValueError: 'linear' only support 3-D tensor. ValueError: 'bilinear' and 'bicubic' only support 4-D tensor. ValueError: 'nearest' only support 4-D or 5-D tensor. ValueError: 'trilinear' only support 5-D tensor. ValueError: One of size and scale_factor must not be None. ValueError: size length should be 1 for input 3-D tensor. ValueError: size length should be 2 for input 4-D tensor. ValueError: size length should be 3 for input 5-D tensor. ValueError: scale_factor should be greater than zero. TypeError: align_corners should be a bool value ValueError: align_mode can only be '0' or '1' ValueError: data_format can only be 'NCW', 'NWC', 'NCHW', 'NHWC', 'NCDHW' or 'NDHWC'. Examples: .. code-block:: python import paddle import numpy as np import paddle.nn.functional as F # given out size input_data = np.random.rand(2,3,6,10).astype("float32") x = paddle.to_tensor(input_data) output_1 = F.interpolate(x=x, size=[12,12]) print(output_1.shape) # [2L, 3L, 12L, 12L] # given scale output_2 = F.interpolate(x=x, scale_factor=[2,1]) print(output_2.shape) # [2L, 3L, 12L, 10L] # bilinear interp output_3 = F.interpolate(x=x, scale_factor=[2,1], mode="bilinear") print(output_2.shape) # [2L, 3L, 12L, 10L] """ data_format = data_format.upper() resample = mode.upper() resample_type = mode.lower() resample_methods = [ 'LINEAR', 'BILINEAR', 'TRILINEAR', 'NEAREST', 'BICUBIC', 'AREA', ] if resample not in resample_methods: raise ValueError( "The 'resample' of image_resize can only be 'area', 'linear', 'bilinear', 'trilinear', " " 'bicubic' or 'nearest' currently.") if resample in ['LINEAR'] and len(x.shape) != 3: raise ValueError("'linear' only support 3-D tensor.") if resample in ['NEAREST'] and len(x.shape) != 4 and len(x.shape) != 5: raise ValueError("'NEAREST' only support 4-D or 5-D tensor.") if resample in ['BILINEAR', 'BICUBIC'] and len(x.shape) != 4: raise ValueError("'bilinear' and 'bicubic' only support 4-D tensor.") if resample == 'TRILINEAR' and len(x.shape) != 5: raise ValueError("'trilinear'only support 5-D tensor.") if size is None and scale_factor is None: raise ValueError("One of size and scale_factor must not be None.") if not isinstance(align_corners, bool): raise TypeError("Attr align_corners should be a bool value") if align_mode != 0 and align_mode != 1: raise ValueError("align_mode can only be 0 or 1") if align_corners != 0 and resample == 'NEAREST': raise ValueError( "align_corners option can only be set with the interpolating modes: linear | bilinear | bicubic | trilinear" ) if resample == 'AREA': if isinstance(size, list) or isinstance(size, tuple) or isinstance( size, Variable): if len(size) == 0: raise ValueError("output size can not be empty") if len(x.shape) == 3: return paddle.nn.functional.adaptive_avg_pool1d(x, size) elif len(x.shape) == 4: return paddle.nn.functional.adaptive_avg_pool2d(x, size) elif len(x.shape) == 5: return paddle.nn.functional.adaptive_avg_pool3d(x, size) helper = LayerHelper('{}_interp_v2'.format(resample_type), **locals()) dtype = helper.input_dtype(input_param_name='x') if len(x.shape) == 3 and data_format not in ['NCW', 'NWC']: raise ValueError( "Got wrong value for param `data_format`: " + data_format + " received but only `NCW` or `NWC` supported for 3-D input.") elif len(x.shape) == 4 and data_format not in ['NCHW', 'NHWC']: raise ValueError( "Got wrong value for param `data_format`: " + data_format + " received but only `NCHW` or `NHWC` supported for 4-D input.") elif len(x.shape) == 5 and data_format not in ['NCDHW', 'NDHWC']: raise ValueError( "Got wrong value for param `data_format`: " + data_format + " received but only `NCDHW` or `NDHWC` supported for 5-D input.") def _is_list_or_turple_(data): return (isinstance(data, list) or isinstance(data, tuple)) if data_format == 'NCHW' or data_format == 'NCDHW' or data_format == 'NCW': data_layout = 'NCHW' if data_format == 'NHWC' or data_format == 'NDHWC' or data_format == 'NWC': data_layout = 'NHWC' if resample == 'NEAREST': align_corners = False inputs = {"X": x} attrs = { "out_d": -1, "out_h": -1, "out_w": -1, "interp_method": resample_type, "align_corners": align_corners, "align_mode": align_mode, "data_layout": data_layout } out_shape = size scale = scale_factor if out_shape is not None and scale is not None: raise ValueError("Only one of size or scale_factor should be defined.") if out_shape is not None: if isinstance(out_shape, Variable) and not in_dynamic_mode(): out_shape.stop_gradient = True inputs['OutSize'] = out_shape else: if in_dynamic_mode(): if isinstance(out_shape, Variable): out_shape = list(out_shape.numpy()) for i, dim in enumerate(out_shape): if isinstance(dim, Variable): out_shape[i] = dim.numpy()[0] if not (_is_list_or_turple_(out_shape)): raise TypeError("size should be a list or tuple or Variable.") # Validate the shape contain_var = False for dim_idx, dim_size in enumerate(out_shape): if isinstance(dim_size, Variable): contain_var = True continue assert dim_size > 0, ( "Each dimension size given in out_shape must be greater than 0." ) if contain_var: new_size_tensor = [] size_list = [] for dim in out_shape: if isinstance(dim, Variable): dim.stop_gradient = True new_size_tensor.append(dim) size_list.append(-1) else: assert (isinstance(dim, int)) temp_out = helper.create_variable_for_type_inference( 'int32') fill_constant( [1], 'int32', dim, force_cpu=True, out=temp_out) new_size_tensor.append(temp_out) size_list.append(dim) inputs['SizeTensor'] = new_size_tensor if len(x.shape) == 3: if len(out_shape) != 1: raise ValueError( "size length should be 2 for input 3-D tensor") if contain_var: attrs['out_w'] = size_list[0] else: out_shape = list(map(int, out_shape)) attrs['out_w'] = out_shape[0] if len(x.shape) == 4: if len(out_shape) != 2: raise ValueError("size length should be 2 for " "input 4-D tensor.") if contain_var: attrs['out_h'] = size_list[0] attrs['out_w'] = size_list[1] else: out_shape = list(map(int, out_shape)) attrs['out_h'] = out_shape[0] attrs['out_w'] = out_shape[1] if len(x.shape) == 5: if len(out_shape) != 3: raise ValueError("size length should be 3 for " "input 5-D tensor.") if contain_var: attrs['out_d'] = size_list[0] attrs['out_h'] = size_list[1] attrs['out_w'] = size_list[2] else: out_shape = list(map(int, out_shape)) attrs['out_d'] = out_shape[0] attrs['out_h'] = out_shape[1] attrs['out_w'] = out_shape[2] else: if in_dynamic_mode() and isinstance(scale, Variable): scale = list(scale.numpy()) if isinstance(scale, Variable): scale.stop_gradient = True inputs["Scale"] = scale elif isinstance(scale, float) or isinstance(scale, int): if scale <= 0: raise ValueError("Attr(scale) should be greater than zero.") scale_list = [] for i in range(len(x.shape) - 2): scale_list.append(scale) attrs['scale'] = list(map(float, scale_list)) elif isinstance(scale, list) or isinstance(scale, tuple): if len(scale) != len(x.shape) - 2: raise ValueError("scale_shape length should be {} for " "input {}-D tensor.".format( len(x.shape) - 2, len(x.shape))) for value in scale: if value <= 0: raise ValueError("Attr(scale) should be greater than zero.") attrs['scale'] = list(map(float, scale)) else: raise TypeError( "Attr(scale)'s type should be float, int, list, tuple, or Tensor." ) if in_dynamic_mode(): attr_list = [] for k, v in attrs.items(): attr_list.append(k) attr_list.append(v) dy_attr = tuple(attr_list) if resample_type == "linear": out = _C_ops.linear_interp_v2(x, *dy_attr) elif resample_type == "bilinear": out = _C_ops.bilinear_interp_v2(x, *dy_attr) elif resample_type == "trilinear": out = _C_ops.trilinear_interp_v2(x, *dy_attr) elif resample_type == "nearest": out = _C_ops.nearest_interp_v2(x, *dy_attr) elif resample_type == "bicubic": out = _C_ops.bicubic_interp_v2(x, *dy_attr) return out out = helper.create_variable_for_type_inference(dtype) helper.append_op( type='{}_interp_v2'.format(resample_type), inputs=inputs, outputs={"Out": out}, attrs=attrs) return out
a9e03ecc89cdb623922984d3cce9b6cb114419b9
3,646,162
from datetime import datetime def encode_auth_token(user_id): """ Generates the Auth Token :return: string """ try: payload = { 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=90), 'iat': datetime.datetime.utcnow(), 'sub': user_id } return jwt.encode( payload, app.config.get('SECRET_KEY'), algorithm='HS256' ) except Exception as e: return e
5239b2d85e3c4c1f4c6f74297118295c0bf7d532
3,646,163
from typing import List from typing import Tuple from typing import Dict def build_docs_for_packages( current_packages: List[str], docs_only: bool, spellcheck_only: bool, for_production: bool, jobs: int, verbose: bool, ) -> Tuple[Dict[str, List[DocBuildError]], Dict[str, List[SpellingError]]]: """Builds documentation for all packages and combines errors.""" all_build_errors: Dict[str, List[DocBuildError]] = defaultdict(list) all_spelling_errors: Dict[str, List[SpellingError]] = defaultdict(list) with with_group("Cleaning documentation files"): for package_name in current_packages: console.print(f"[info]{package_name:60}:[/] Cleaning files") builder = AirflowDocsBuilder(package_name=package_name, for_production=for_production) builder.clean_files() if jobs > 1: run_in_parallel( all_build_errors, all_spelling_errors, current_packages, docs_only, for_production, jobs, spellcheck_only, verbose, ) else: run_sequentially( all_build_errors, all_spelling_errors, current_packages, docs_only, for_production, spellcheck_only, verbose, ) return all_build_errors, all_spelling_errors
c4196c139fda703c90c60507156cce8cb29da98e
3,646,164
def _inspect_output_dirs_test(ctx): """Test verifying output directories used by a test.""" env = analysistest.begin(ctx) # Assert that the output bin dir observed by the aspect added by analysistest # is the same as those observed by the rule directly, even when that's # under a config transition and therefore not the same as the bin dir # used by the test rule. bin_path = analysistest.target_bin_dir_path(env) target_under_test = analysistest.target_under_test(env) asserts.false(env, not bin_path, "bin dir path not found.") asserts.false( env, bin_path == ctx.bin_dir.path, "bin dir path expected to differ between test and target_under_test.", ) asserts.equals(env, bin_path, target_under_test[_OutputDirInfo].bin_path) return analysistest.end(env)
de14b2d4514792d4b2427ff3ef4fae6e7af8e31d
3,646,165
import ray def wait(object_refs, num_returns=1, timeout=None): """Return a list of IDs that are ready and a list of IDs that are not. This method is identical to `ray.wait` except it adds support for tuples and ndarrays. Args: object_refs (List[ObjectRef], Tuple(ObjectRef), np.array(ObjectRef)): List like of object refs for objects that may or may not be ready. Note that these IDs must be unique. num_returns (int): The number of object refs that should be returned. timeout (float): The maximum amount of time in seconds to wait before returning. Returns: A list of object refs that are ready and a list of the remaining object IDs. """ if isinstance(object_refs, (tuple, np.ndarray)): return ray.wait( list(object_refs), num_returns=num_returns, timeout=timeout) return ray.wait(object_refs, num_returns=num_returns, timeout=timeout)
e56ffd1700715049cc899d27735bb98da47fa2b6
3,646,166
def train( data, feature_names, tagset, epochs, optimizer, score_func=perceptron_score, step_size=1, ): """ Trains the model on the data and returns the parameters :param data: Array of dictionaries representing the data. One dictionary for each data point (as created by the make_data_point function). :param feature_names: Array of Strings. The list of feature names. :param tagset: Array of Strings. The list of tags. :param epochs: Int. The number of epochs to train :return: FeatureVector. The learned parameters. """ parameters = FeatureVector({}) # creates a zero vector gradient = get_gradient( data, feature_names, tagset, parameters, score_func ) def training_observer(epoch, parameters): """ Evaluates the parameters on the development data, and writes out the parameters to a 'model.iter'+epoch and the predictions to 'ner.dev.out'+epoch. :param epoch: int. The epoch :param parameters: Feature Vector. The current parameters :return: Double. F1 on the development data """ (_, _, f1) = evaluate( dev_data, parameters, feature_names, tagset, score_func ) return f1 # return the final parameters return optimizer( sample_num, epochs, gradient, parameters, training_observer, step_size=step_size, )
51a0a506deecf56067ef185848d7f706c9da0d3e
3,646,167
def summed_timeseries(timeseries): """ Give sum of value series against timestamps for given timeseries containing several values per one timestamp :param timeseries: :return: """ sum_timeseries = [] for i in range(len(timeseries)): if len(timeseries[i])>1: sum_timeseries.append([timeseries[i][0], '%.3f' % (sum(timeseries[i][1:]))]) return sum_timeseries
618505f8f0960900a993bb6d9196d17bf31d02a6
3,646,168
import pathlib def path_check(path_to_check): """ Check that the path given as a parameter is an valid absolute path. :param path_to_check: string which as to be checked :type path_to_check: str :return: True if it is a valid absolute path, False otherwise :rtype: boolean """ path = pathlib.Path(path_to_check) if not path.is_absolute(): return False return True
41b3537b0be2c729ba993a49863df4a15119db8b
3,646,169
from typing import Union from typing import Sequence from typing import Optional from typing import Dict from typing import Any from datetime import datetime from typing import List def _path2list( path: Union[str, Sequence[str]], boto3_session: boto3.Session, s3_additional_kwargs: Optional[Dict[str, Any]], last_modified_begin: Optional[datetime.datetime] = None, last_modified_end: Optional[datetime.datetime] = None, suffix: Union[str, List[str], None] = None, ignore_suffix: Union[str, List[str], None] = None, ignore_empty: bool = False, ) -> List[str]: """Convert Amazon S3 path to list of objects.""" _suffix: Optional[List[str]] = [suffix] if isinstance(suffix, str) else suffix _ignore_suffix: Optional[List[str]] = [ignore_suffix] if isinstance(ignore_suffix, str) else ignore_suffix if isinstance(path, str): # prefix paths: List[str] = list_objects( path=path, suffix=_suffix, ignore_suffix=_ignore_suffix, boto3_session=boto3_session, last_modified_begin=last_modified_begin, last_modified_end=last_modified_end, ignore_empty=ignore_empty, s3_additional_kwargs=s3_additional_kwargs, ) elif isinstance(path, list): if last_modified_begin or last_modified_end: raise exceptions.InvalidArgumentCombination( "Specify a list of files or (last_modified_begin and last_modified_end)" ) paths = path if _suffix is None else [x for x in path if x.endswith(tuple(_suffix))] paths = path if _ignore_suffix is None else [x for x in paths if x.endswith(tuple(_ignore_suffix)) is False] else: raise exceptions.InvalidArgumentType(f"{type(path)} is not a valid path type. Please, use str or List[str].") return paths
542d41ce29f71e3209d702eab157a75fa40650c0
3,646,170
import torch def e_greedy_normal_noise(mags, e): """Epsilon-greedy noise If e>0 then with probability(adding noise) = e, multiply mags by a normally-distributed noise. :param mags: input magnitude tensor :param e: epsilon (real scalar s.t. 0 <= e <=1) :return: noise-multiplier. """ if e and uniform(0, 1) <= e: # msglogger.info("%sRankedStructureParameterPruner - param: %s - randomly choosing channels", # threshold_type, param_name) return torch.randn_like(mags) return 1
e2e9f8f49e7d3e6b2319aaa6a869f24aa3047946
3,646,171
def beam_area(*args): """ Calculate the Gaussian beam area. Parameters ---------- args: float FWHM of the beam. If args is a single argument, a symmetrical beam is assumed. If args has two arguments, the two arguments are bmaj and bmin, the width of the major and minor axes of the beam in that order. Return ------ out: float Beam area. No unit conversion is performed, i.e. the unit will depend on the input arguments. For example, beam width in degree wil return the beam area in square degree. Likewise, beam width in pixel will return the beam area in pixel. """ if len(args) > 2: raise ValueError('Input argument must be a single beam width for a ' 'symmetrical beam, or widths of the major and minor ' 'axes of the beam.') if len(args) == 2: bmaj, bmin = args else: bmaj = args[0] bmin = bmaj return np.pi * bmaj * bmin / (4 * np.log(2))
93c4616018a098199a47fb25038cb88707444864
3,646,172
def get_settlement_amounts( participant1, participant2 ): """ Settlement algorithm Calculates the token amounts to be transferred to the channel participants when a channel is settled. !!! Don't change this unless you really know what you are doing. """ total_available_deposit = ( participant1.deposit + participant2.deposit - participant1.withdrawn - participant2.withdrawn ) participant1_amount = ( participant1.deposit + participant2.transferred - participant1.withdrawn - participant1.transferred ) participant1_amount = max(participant1_amount, 0) participant1_amount = min(participant1_amount, total_available_deposit) participant2_amount = total_available_deposit - participant1_amount participant1_locked = min(participant1_amount, participant1.locked) participant2_locked = min(participant2_amount, participant2.locked) participant1_amount = max(participant1_amount - participant1.locked, 0) participant2_amount = max(participant2_amount - participant2.locked, 0) assert total_available_deposit == ( participant1_amount + participant2_amount + participant1_locked + participant2_locked ) return SettlementValues( participant1_balance=participant1_amount, participant2_balance=participant2_amount, participant1_locked=participant1_locked, participant2_locked=participant2_locked, )
e4bddfccbced0235b1d5265519208cef5167013d
3,646,173
import time def timefunc(f): """Simple timer function to identify slow spots in algorithm. Just import function and put decorator @timefunc on top of definition of any function that you want to time. """ def f_timer(*args, **kwargs): start = time.time() result = f(*args, **kwargs) end = time.time() print(f.__name__, 'took {:.2f} seconds'.format(end - start)) return result, (end - start) return f_timer
56d5d052fa559e1b7c797ed00ee1b82c8e2126d6
3,646,174
def rdr_geobox(rdr) -> GeoBox: """ Construct GeoBox from opened dataset reader. """ h, w = rdr.shape return GeoBox(w, h, rdr.transform, rdr.crs)
0c22ff869faa2988e63a4b13d17c8f5ba7343ffc
3,646,175
def sequence(lst: Block[Result[_TSource, _TError]]) -> Result[Block[_TSource], _TError]: """Execute a sequence of result returning commands and collect the sequence of their response.""" return traverse(identity, lst)
d228237edc95a4d2c4ef1c9591af41a639c42a6d
3,646,176
def keyword_dct_from_block(block, formatvals=True): """ Take a section with keywords defined and build a dictionary for the keywords assumes a block that is a list of key-val pairs """ key_dct = None if block is not None: block = ioformat.remove_whitespace(block) key_val_blocks = keyword_value_blocks(block) if key_val_blocks is not None: key_dct = {} for key, val in key_val_blocks: if formatvals: formtd_key, formtd_val = format_keyword_values(key, val) key_dct[formtd_key] = formtd_val else: key_dct[key] = val return key_dct
929defe8d07fff4bf50f1167c0749a6e19d9ecb2
3,646,177
def get_geocode(args): """ Returns GPS coordinates from Google Maps for a given location. """ result = Geocoder.geocode(args.address) lat, lon = result[0].coordinates lat = round(lat, 6) lon = round(lon, 6) return (lat, lon)
1ef5d89a1157bbbe381ac1e4500e198735b71898
3,646,178
def mice(data, **kwargs): """Multivariate Imputation by Chained Equations Reference: Buuren, S. V., & Groothuis-Oudshoorn, K. (2011). Mice: Multivariate Imputation by Chained Equations in R. Journal of Statistical Software, 45(3). doi:10.18637/jss.v045.i03 Implementation follows the main idea from the paper above. Differs in decision of which variable to regress on (here, I choose it at random). Also differs in stopping criterion (here the model stops after change in prediction from previous prediction is less than 10%). Parameters ---------- data: numpy.ndarray Data to impute. Returns ------- numpy.ndarray Imputed data. """ null_xy = find_null(data) # Add a column of zeros to the index values null_xyv = np.append(null_xy, np.zeros((np.shape(null_xy)[0], 1)), axis=1) null_xyv = [[int(x), int(y), v] for x, y, v in null_xyv] temp = [] cols_missing = set([y for _, y, _ in null_xyv]) # Step 1: Simple Imputation, these are just placeholders for x_i, y_i, value in null_xyv: # Column containing nan value without the nan value col = data[:, [y_i]][~np.isnan(data[:, [y_i]])] new_value = np.mean(col) data[x_i][y_i] = new_value temp.append([x_i, y_i, new_value]) null_xyv = temp # Step 5: Repeat step 2 - 4 until convergence (the 100 is arbitrary) converged = [False] * len(null_xyv) while all(converged): # Step 2: Placeholders are set back to missing for one variable/column dependent_col = int(np.random.choice(list(cols_missing))) missing_xs = [int(x) for x, y, value in null_xyv if y == dependent_col] # Step 3: Perform linear regression using the other variables x_train, y_train = [], [] for x_i in (x_i for x_i in range(len(data)) if x_i not in missing_xs): x_train.append(np.delete(data[x_i], dependent_col)) y_train.append(data[x_i][dependent_col]) model = LinearRegression() model.fit(x_train, y_train) # Step 4: Missing values for the missing variable/column are replaced # with predictions from our new linear regression model temp = [] # For null indices with the dependent column that was randomly chosen for i, x_i, y_i, value in enumerate(null_xyv): if y_i == dependent_col: # Row 'x' without the nan value new_value = model.predict(np.delete(data[x_i], dependent_col)) data[x_i][y_i] = new_value.reshape(1, -1) temp.append([x_i, y_i, new_value]) delta = (new_value-value)/value if delta < 0.1: converged[i] = True null_xyv = temp return data
e2046350d071a1cc17bc23077c0be2d0939371b5
3,646,179
from sys import flags def SaveFlagValues(): """Returns copy of flag values as a dict. Returns: Dictionary mapping keys to values. Keys are flag names, values are corresponding __dict__ members. E.g. {'key': value_dict, ...}. """ if hasattr(flags, '_FlagValues'): # pylint:disable=protected-access # In OSS code we use tensorflow/python/platform/flags.py:_FlagValues # which is not iterable. flag_dict = FLAGS.__dict__['__flags'] # Make a shallow copy of the flags. return {name: flag_dict[name] for name in flag_dict} else: # FLAGS is iterable and provides __getitem__. return {name: _CopyFlagDict(FLAGS[name]) for name in FLAGS}
c49bfee49a9c2531c4315831e247b83f2086b592
3,646,180
def estimate_purity_err(dim: int, op_expect: np.ndarray, op_expect_var: np.ndarray, renorm=True): """ Propagate the observed variance in operator expectation to an error estimate on the purity. This assumes that each operator expectation is independent. :param dim: dimension of the Hilbert space :param op_expect: array of estimated expectations of each operator being measured :param op_expect_var: array of estimated variance for each operator expectation :param renorm: flag that provides error for the renormalized purity :return: purity given the operator expectations """ # TODO: incorporate covariance of observables estimated simultaneously; # see covariances_of_all_iz_obs # TODO: check validitiy of approximation |op_expect| >> 0, and functional form below (squared?) var_of_square_op_expect = (2 * np.abs(op_expect)) ** 2 * op_expect_var # TODO: check if this adequately handles |op_expect| >\> 0 need_second_order = np.isclose([0.] * len(var_of_square_op_expect), var_of_square_op_expect, atol=1e-6) var_of_square_op_expect[need_second_order] = op_expect_var[need_second_order]**2 purity_var = (1 / dim) ** 2 * (np.sum(var_of_square_op_expect)) if renorm: purity_var = (dim / (dim - 1.0)) ** 2 * purity_var return np.sqrt(purity_var)
da317a0f59ce2a13c6c3842a255acfd63a54046c
3,646,181
from functools import reduce def get_next_code(seen, server_ticket=0): """Find next unused assertion code. Called by: SConstruct and main() Since SConstruct calls us, codes[] must be global OR WE REPARSE EVERYTHING """ if not codes: (_, _, seen) = read_error_codes() if server_ticket: # Each SERVER ticket is allocated 100 error codes ranging from TICKET_00 -> TICKET_99. def generator(seen, ticket): avail_codes = list(range(ticket * 100, (ticket + 1) * 100)) avail_codes.reverse() while avail_codes: code = avail_codes.pop() if str(code) in seen: continue yield code return "No more available codes for ticket. Ticket: {}".format(ticket) return generator(seen, server_ticket) # No server ticket. Return a generator that counts starting at highest + 1. highest = reduce(lambda x, y: max(int(x), int(y)), (loc.code for loc in codes)) return iter(range(highest + 1, MAXIMUM_CODE))
b2aeef05725137208e8aaef213f1bf68eb673e06
3,646,182
import asyncio def getEnergyUsage(): """Query plug for energy usage data. Runs as async task. :return: json with device energy data """ energy_data = asyncio.run(plug.get_emeter_realtime()) return energy_data
43fe5814de6776052c8c48ac65e9df8893956ef6
3,646,183
def get_sequence_from_kp(midi): """ Get the reduced chord sequence from a kp KP-corpus file. Parameters ========== midi : pretty_midi A pretty_midi object representing the piece to parse. Returns ======= chords : list The reduced chord sequence from the given piece. times : list The time of each chord in chords. """ def convert_chord_kp(chord): """ Convert the given chord from a string (read from the KP-corpus), to a tonic and quality. Parameters ========== chord : string A string representation of a chord. Returns ======= tonic : int The tonic of the chord, where 0 represents C. A chord with no tonic returns None here. quality : int The quality of chord, where 0 is major, 1 is minor, and 2 is diminished. Others are None. """ global tonic_map, accidental_map chord = chord.split('_') tonic = tonic_map[chord[0][0]] if tonic is not None: for accidental in chord[0][1:]: tonic += accidental_map[accidental] tonic %= 12 quality = quality_map[chord[1]] return tonic, quality return get_reduced_chord_sequence([convert_chord_kp(lyric.text) for lyric in midi.lyrics], [lyric.time for lyric in midi.lyrics])
fd11658607e44151fbd6eaec08bff4ce510e8ba5
3,646,184
def get_bw_range(features): """ Get the rule-of-thumb bandwidth and a range of bandwidths on a log scale for the Gaussian RBF kernel. :param features: Features to use to obtain the bandwidths. :return: Tuple consisting of: * rule_of_thumb_bw: Computed rule-of-thumb bandwidth. * bws: List of bandwidths on a log scale. """ dists = sklearn.metrics.pairwise.pairwise_distances(features).reshape(-1) rule_of_thumb_bw = np.median(dists) gammas = np.logspace(np.log(0.5/np.percentile(dists, 99)**2), np.log(0.5/np.percentile(dists, 1)**2), 10, base=np.e) bws = np.sqrt(1/(2*gammas)) return rule_of_thumb_bw, bws
2e954badf9e529bd0c62449c34a631d5be87950b
3,646,185
def gen_endpoint(endpoint_name, endpoint_config_name): """ Generate the endpoint resource """ endpoint = { "SagemakerEndpoint": { "Type": "AWS::SageMaker::Endpoint", "DependsOn": "SagemakerEndpointConfig", "Properties": { "EndpointConfigName": { "Fn::GetAtt": ["SagemakerEndpointConfig", "EndpointConfigName"] }, "EndpointName": endpoint_name, "RetainAllVariantProperties": False, }, }, } return endpoint
bc658e6aebc41cfddefe0e77b2d65748a84789c5
3,646,186
import yaml import os def load_config(): """ Load configuration and set debug flag for this environment """ # Load global configuration config = yaml.load(open(os.path.abspath('./conf/global.yaml'), 'r').read()) # Detect development or production environment and configure accordingly if os.environ['SERVER_SOFTWARE'].startswith('Dev'): conf_f = open(os.path.abspath('./conf/development.yaml'), 'r') config = dict(config.items() + yaml.load(conf_f.read()).items()) else: conf_f = open(os.path.abspath('./conf/production.yaml'), 'r') config = dict(config.items() + yaml.load(conf_f.read()).items()) return config
2b629de582cc527a92ad3d2a314c989ec939fbc1
3,646,187
def read_dataframe(df, smiles_column, name_column, data_columns=None): """Read molecules from a dataframe. Parameters ---------- df : pandas.DataFrame Dataframe to read molecules from. smiles_column : str Key of column containing SMILES strings or rdkit Mol objects. name_column : str Key of column containing molecule name strings. data_columns : list, optional A list of column keys containg data to retain in molecule graph nodes. The default is None. Returns ------- DataFrameMolSupplier """ return DataFrameMolSupplier(df, smiles_column, name_column, data_columns)
d27e01e38132818f0bb740fc0033f22949e9fa79
3,646,188
from typing import List def dummy_awsbatch_cluster_config(mocker): """Generate dummy cluster.""" image = Image(os="alinux2") head_node = dummy_head_node(mocker) compute_resources = [ AwsBatchComputeResource(name="dummy_compute_resource1", instance_types=["dummyc5.xlarge", "optimal"]) ] queue_networking = AwsBatchQueueNetworking(subnet_ids=["dummy-subnet-1"], security_groups=["sg-1", "sg-2"]) queues = [AwsBatchQueue(name="queue1", networking=queue_networking, compute_resources=compute_resources)] scheduling = AwsBatchScheduling(queues=queues) # shared storage shared_storage: List[Resource] = [] shared_storage.append(dummy_fsx()) shared_storage.append(dummy_ebs("/ebs1")) shared_storage.append(dummy_ebs("/ebs2", volume_id="vol-abc")) shared_storage.append(dummy_ebs("/ebs3", raid=Raid(raid_type=1, number_of_volumes=5))) shared_storage.append(dummy_efs("/efs1", file_system_id="fs-efs-1")) shared_storage.append(dummy_raid("/raid1")) cluster = _DummyAwsBatchClusterConfig( image=image, head_node=head_node, scheduling=scheduling, shared_storage=shared_storage ) cluster.custom_s3_bucket = "s3://dummy-s3-bucket" cluster.additional_resources = "https://additional.template.url" cluster.config_version = "1.0" cluster.iam = ClusterIam() cluster.tags = [Tag(key="test", value="testvalue")] return cluster
ff9c24e90faf92758a83c3529d261c901b614b01
3,646,189
def float_or_none(val, default=None): """ Arguments: - `x`: """ if val is None: return default else: try: ret = float(val) except ValueError: ret = default return ret
00beabbd2fe4633e6738fea2220d55d096bfa91e
3,646,190
def user_get_year_rating(user_id: int): """ Get the last step user was at :param user_id: :return: str """ try: con = psconnect(db_url, sslmode='require') cursor = con.cursor() cursor.execute("SELECT year,rating FROM users WHERE uid = %s", (user_id,)) result = cursor.fetchone() con.close() return result except psError as e: print(e)
4911d5d0ea8f99b1d889d048aa31825452b3f3fe
3,646,191
from pymco import message def msg_with_data(config, filter_): """Creates :py:class:`pymco.message.Message` instance with some data.""" # Importing here since py-cov will ignore code imported on conftest files # imports with mock.patch('time.time') as time: with mock.patch('hashlib.sha1') as sha1: time.return_value = ctxt.MSG['msgtime'] sha1.return_value.hexdigest.return_value = ctxt.MSG['requestid'] body = { ':action': 'runonce', ':data': {':noop': True, ':process_results': True}, ':ssl_msgtime': 1421878604, ':ssl_ttl': 60, } return message.Message(body=body, agent='puppet', filter_=filter_, config=config)
3bb50370512e58fce6c098ed2144b7406cc6eab4
3,646,192
import os def get_image_names(): """ Returns (image_names, covid_image_names, normal_image_names, virus_image_names), where each is a list of image names """ image_names = os.listdir(DEFAULT_IMG_PATH_UNEDITED) # Remove directories image_names.remove("COVID-19") image_names.remove("Normal") image_names.remove("ViralPneumonia") covid_image_names = os.listdir(COVID_IMG_PATH_UNEDITED) normal_image_names = os.listdir(NORMAL_IMG_PATH_UNEDITED) virus_image_names = os.listdir(VIRUS_IMG_PATH_UNEDITED) return image_names, covid_image_names, normal_image_names, virus_image_names
42fcd4d570f90a9b22217559c1d942f399852363
3,646,193
def api_connect_wifi(): """ Connect to the specified wifi network """ res = network.wifi_connect() return jsonify(res)
1a8832bb67bb1d5b73357b9b1359f6c1835f3c85
3,646,194
from typing import List async def get_sinks_metadata(sinkId: str) -> List: # pylint: disable=unused-argument """Get metadata attached to sinks This adapter does not implement metadata. Therefore this will always result in an empty list! """ return []
458b674cc59a80572fd9676aec81d0a7c353a8f3
3,646,195
def fn_lin(x_np, *, multiplier=3.1416): """ Linear function """ return x_np * multiplier
e64f112b486ea6a0bdf877d67c98417ae90f03b3
3,646,196
def get_MACD(df, column='Close'): """Function to get the EMA of 12 and 26""" df['EMA-12'] = df[column].ewm(span=12, adjust=False).mean() df['EMA-26'] = df[column].ewm(span=26, adjust=False).mean() df['MACD'] = df['EMA-12'] - df['EMA-26'] df['Signal'] = df['MACD'].ewm(span=9, adjust=False).mean() df['Histogram'] = df['MACD'] - df['Signal'] return df
b5eb25c9a5097fb2a0d874d62b6ab1957bbe3f11
3,646,197
def from_pyGraphviz_agraph(A, create_using=None): """Returns a EasyGraph Graph or DiGraph from a PyGraphviz graph. Parameters ---------- A : PyGraphviz AGraph A graph created with PyGraphviz create_using : EasyGraph graph constructor, optional (default=None) Graph type to create. If graph instance, then cleared before populated. If `None`, then the appropriate Graph type is inferred from `A`. Examples -------- >>> K5 = eg.complete_graph(5) >>> A = eg.to_pyGraphviz_agraph(K5) >>> G = eg.from_pyGraphviz_agraph(A) Notes ----- The Graph G will have a dictionary G.graph_attr containing the default graphviz attributes for graphs, nodes and edges. Default node attributes will be in the dictionary G.node_attr which is keyed by node. Edge attributes will be returned as edge data in G. With edge_attr=False the edge data will be the Graphviz edge weight attribute or the value 1 if no edge weight attribute is found. """ if create_using is None: if A.is_directed(): if A.is_strict(): create_using = eg.DiGraph else: create_using = eg.MultiDiGraph else: if A.is_strict(): create_using = eg.Graph else: create_using = eg.MultiGraph # assign defaults N = eg.empty_graph(0, create_using) if A.name is not None: N.name = A.name # add graph attributes N.graph.update(A.graph_attr) # add nodes, attributes to N.node_attr for n in A.nodes(): str_attr = {str(k): v for k, v in n.attr.items()} N.add_node(str(n), **str_attr) # add edges, assign edge data as dictionary of attributes for e in A.edges(): u, v = str(e[0]), str(e[1]) attr = dict(e.attr) str_attr = {str(k): v for k, v in attr.items()} if not N.is_multigraph(): if e.name is not None: str_attr["key"] = e.name N.add_edge(u, v, **str_attr) else: N.add_edge(u, v, key=e.name, **str_attr) # add default attributes for graph, nodes, and edges # hang them on N.graph_attr N.graph["graph"] = dict(A.graph_attr) N.graph["node"] = dict(A.node_attr) N.graph["edge"] = dict(A.edge_attr) return N
66f57a2864f87342c84452336da647fb7489ec66
3,646,198
from typing import Collection import copy def get_textbox_rectangle_from_pane(pane_rectangle: GeometricRectangle, texts: Collection[str], direction: str) -> GeometricRectangle: """ Args: pane_rectangle: texts: direction: Returns: """ num_boxes: int = len(texts) dimensions = copy.deepcopy(pane_rectangle.dimensions) if direction == 'right': dimensions.width /= num_boxes elif direction == 'down': dimensions.height /= num_boxes else: raise InvalidDirectionError(f'direction must be "right" or "down": {direction}') return GeometricRectangle(top_left=pane_rectangle.top_left, dimensions=dimensions)
9e0a3bb2f0a93312d17096fe36d1b0529b5b47e6
3,646,199