content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def find_revert_op(input_state_index: int): """Looks in the Cayley table the operation needed to reset the state to ground state from input state_tracker :param input_state_index: Index of the current state tracker :return: index of the next Clifford to apply to invert RB sequence """ for i in range(len(c1_ops)): if c1_table[input_state_index][i] == 0: return i
cba4a4f764f02abbfb14527cff4f1470d0c6ff3a
3,634,200
def _column_number_to_letters(number): """ Converts given column number into a column letters. Right shifts the column index by 26 to find column letters in reverse order. These numbers are 1-based, and can be converted to ASCII ordinals by adding 64. Parameters ---------- number : int Column number to convert to column letters. Returns ------- unicode Column letters. References ---------- :cite:`OpenpyxlDevelopers2019` Examples -------- # Doctests skip for Python 2.x compatibility. >>> _column_number_to_letters(128) # doctest: +SKIP 'DX' """ assert 1 <= number <= 18278, ( 'Column number {0} must be in range [1, 18278]!'.format(number)) letters = [] while number > 0: number, remainder = divmod(number, 26) if remainder == 0: remainder = 26 number -= 1 letters.append(chr(remainder + 64)) return ''.join(reversed(letters))
c9a68bcd32c8f254af322bc61e447cfae61cb6d2
3,634,201
import gurobipy as gb def toGRBFromStr(): """ Module for program-wide constant maps (e.g., dicts that should never change) """ return {"=": gb.GRB.EQUAL, "le": gb.GRB.LESS_EQUAL, "ge": gb.GRB.GREATER_EQUAL}
7fb8a522d787716d3006722511d7afb3c5378d3a
3,634,202
def _generate_mea_comment(obj: GamObject, object_module: GamObject): """ Check whether the object has a module, then generate the measurement comment and update the object ID to add the measurement to. Args: obj (GamObject): The object. object_module (GamObject): The object module, if there is one. Returns: (str, int): The measurement comment and object ID. """ type_name = obj.ob_objecttype.ot_name class_name = obj.ob_objecttype.ot_objectclass.oc_name # If object has a module, mention this in the mea. comment if object_module is not None: module_type = object_module.ob_objecttype.ot_id module_name = "SLD" if module_type == DBTypeIDs.SLD else "GCM" if module_type == DBTypeIDs.GCM else "Module" mea_comment = f'{module_name} for {obj.ob_id} "{obj.ob_name}" ({type_name} - {class_name}) via HLM PV IMPORT' else: mea_comment = f'"{obj.ob_name}" ({type_name} - {class_name}) via HLM PV IMPORT' return mea_comment
89a119a542c14e5edc745878127503b0f996007e
3,634,203
def master_do(func, *args, **kwargs): """Help calling function only on the rank0 process id ddp""" try: rank = dist.get_rank() if rank == 0: return func(*args, **kwargs) except AssertionError: # not in DDP setting, just do as usual func(*args, **kwargs)
befe0d157591c10cb6e590a82e5c3f5253fe4663
3,634,204
def news_msg(result): """ Interface Function for news intent """ campus_publication = "" news_result = [] try: campus_publication = result.parameters['club'] campus_publication = campus_publication.lower() except BaseException: campus_publication = "" return "I couldn't get any news from the publication requested." if "bwog" in campus_publication: news_result = make_bwog_feed() elif "spectator" in campus_publication: news_result = make_spec_feed() else: news_result = make_lion_feed() # if no results returned if news_result == []: news_result = ("I couldn't get any news " "updates from the publication requested.") response = Template.List(elements=news_result, top_element_style='large') return response
a60df1affbfda2ec68c1d4c27daaa495002e5780
3,634,205
def init_aws_client(): """Initializes and returns AWS boto3 client object""" client = boto3.client("network-firewall") return client
28d06097d97e4a364beff767cc0177922a3aff88
3,634,206
def verify_file_integrity(fchunk): """ @TODO: implement """ logger.debug("Verify md5sum...{}".format(fchunk)) return True
1b5925799eeb00b7710dc3f8f3d6371a6bb5f53b
3,634,207
def _chunk_member_lag(chunk, repl_member_list, primary_optimedates, test_run_indices): """Helper function to compute secondary lag from values in a chunk :param collection.OrderedDict chunk: FTDC JSON chunk :param list[str] repl_member_list: list of all members in the replSet :param str primary: which member is the primary? :param list[int] primary_optimedates: optimeDate values for the primary :rtype: dict (lag values for the secondaries) """ collect_chunk_lag = {} for member in repl_member_list: member_optimedate_key = ('replSetGetStatus', 'members', member, 'optimeDate') if member_optimedate_key not in chunk: break member_optimedate_values = chunk[member_optimedate_key] member_lag = [] for index in test_run_indices: secondary_optimedate = member_optimedate_values[index] lag = primary_optimedates[index] - secondary_optimedate member_lag.append(lag) collect_chunk_lag[member] = member_lag return collect_chunk_lag
115ba53505d5bcbb9e0c1cdf0eab675fae73e568
3,634,208
def graph_build_split(X, edge_index, node_mask: np.array): """ subgraph building through spliting the selected nodes from the original graph """ row, col = edge_index edge_mask = (node_mask[row] == 1) & (node_mask[col] == 1) ret_edge_index = edge_index[:, edge_mask] return X, ret_edge_index
45b62a79b6c35ceda181e15d7a4a00dda8146fc6
3,634,209
def get_pagination_class(): """ Returns custom pagination class, set in settings """ pagination_class = LIKES_REST_PAGINATION_CLASS if pagination_class: try: return import_string(pagination_class) except ImportError: pass return api_settings.DEFAULT_PAGINATION_CLASS
57c12519b1f4a6a2a4e5533176e9901e3f468bf9
3,634,210
def get_us_presidents_gender(president): """Given the name of a US President, return his gender, or None if not found. """ data = load('us_president_gender') for row in data: if row['president'] == president: return row['gender'] return None
46bca2773092fa31a53ebb774cbcca6d8863d1a8
3,634,211
import warnings def ProtoFromTfRecordFiles(files, max_entries=10000, features=None, is_sequence=False, iterator_options=None): """Creates a feature statistics proto from a set of TFRecord files. Args: files: A list of dicts describing files for each dataset for the proto. Each entry contains a 'path' field with the path to the TFRecord file on disk and a 'name' field to identify the dataset in the proto. max_entries: The maximum number of examples to load from each dataset in order to create the proto. Defaults to 10000. features: A list of strings that is a whitelist of feature names to create feature statistics for. If set to None then all features in the dataset are analyzed. Defaults to None. is_sequence: True if the input data from 'tables' are tf.SequenceExamples, False if tf.Examples. Defaults to false. iterator_options: Options to pass to the iterator that reads the examples. Defaults to None. Returns: The feature statistics proto for the provided files. """ warnings.warn( 'Use GenericFeatureStatisticsGenerator class method instead.', DeprecationWarning) return FeatureStatisticsGenerator().ProtoFromTfRecordFiles( files, max_entries, features, is_sequence, iterator_options)
d0891996a1f1889b575cd042a78eb0b17a95b4c5
3,634,212
def smallest_evenly_divisible(min_divisor, max_divisor, minimum_dividend=0): """Returns the smallest number that is evenly divisible (divisible with no remainder) by all of the numbers from `min_divisor` to `max_divisor`. If a `minimum_dividend` is provided, only dividends greater than this number will be evaluated. """ factors = range(max_divisor,0,-min_divisor) while True: counter = 0 for i in factors: if minimum_dividend % i != 0: break else: counter += 1 if counter == len(factors): return minimum_dividend minimum_dividend += 1
fa23d9a413a0909bfc05d7eb928aec8ade4cb06f
3,634,213
def pct75(input_tensor, weights_tensor): """Compute the 75th percentile of a given tensor.""" del weights_tensor return tfp.stats.percentile(input_tensor, 75)
8c447827559841d50e02a68b99da6d7abf422fda
3,634,214
import copy def get_bad_sequences(GoodSequenceList, Bad1st_list, BadOther_list): """Take each good sequence and implant a codec error at any possible byte position. RETURNS: List of bad sequences. """ result = [] for sequence in GoodSequenceList: # Implement a couple of bad sequences based on the good sequence. for i, lexatom in enumerate(sequence): if i == 0: bad_lexatoms = Bad1st_list else: bad_lexatoms = BadOther_list for bad in bad_lexatoms: bad_copy = copy(sequence) bad_copy[i] = bad result.append(bad_copy) return result
52ce3a1c9ee468c7b2c39d5113d783667884deba
3,634,215
def factorial(n): """ Returns the factorial of n Parameters ---------- n : int denotes the non-negative integer for which factorial value is needed """ if(n<0): raise NotImplementedError( "Enter a valid non-negative integer" ) if(n==0 or n==1): return 1 elif(n==2): return 2 return n*factorial(n-1)
fe0b7100e1292d1e96daf18545d9fdfb931f9f74
3,634,216
import time def check_redis(*args, **kwargs): """Checks if configured Redis instance is pingable.""" try: r = StrictRedis.from_url(current_app.config['CACHE_REDIS_URL']) t1 = time.time() res = r.ping() t2 = time.time() return 'redis', res, {'time': t2 - t1} except (ConnectionError, ValueError) as e: return 'redis', False, {'error': str(e)}
10ba4595b869747def8592b901125a197f794fea
3,634,217
def Divide(a, b): """Returns the quotient, or NaN if the divisor is zero.""" if b == 0: return float('nan') return a / float(b)
3ed0b07949bb802177e52bf8d04e9dfde92ab2de
3,634,218
import os def craft_item(): """Get craft item from environ varriable""" return os.environ.get('CRAFT_ITEM', None)
2f6f940ad83023dc21f68c2212c98c0e13d8d0e4
3,634,219
def guess_typecode(value): """Guess Gwyddion typecode for `value`.""" if np.isscalar(value) and hasattr(value, 'item'): # Seems to be a numpy type -- convert value = value.item() if isinstance(value, GwyObject): return 'o' elif isinstance(value, string_types): if len(value) == 1: return 'c' else: return 's' elif type(value) is bool: return 'b' elif type(value) is int: if abs(value) < 2**31: return 'i' else: return 'q' elif type(value) is float: return 'd' elif type(value) is np.ndarray: t = value.dtype.type if t == np.dtype('f8'): return 'D' elif t == np.dtype('i8'): return 'Q' elif t == np.dtype('i4'): return 'I' elif t == np.dtype('S'): return 'C' else: raise NotImplementedError else: raise NotImplementedError('{}, type: {}'.format(value, type(value)))
3440c8008596d479d084e8fe68ca941ab6b23c17
3,634,220
from typing import Dict from typing import Any def create_region(entity: Entity, author: Identity) -> Identity: """Create a region""" custom_properties: Dict[str, Any] = {"x_opencti_location_type": "Region"} return Location( created_by_ref=author, name=entity.value, region=entity.value, custom_properties=custom_properties, )
c1c21da1568f030b9b1a84263ddc3547226233b2
3,634,221
def post(post_id): """ 实例化一个评论表单,并将其传入post.html :param post_id: :return: """ post = Post.query.get_or_404(post_id) form = CommentForm() if form.validate_on_submit(): comment = Comment(comment_body=form.comment_body.data, post=post, author=current_user._get_current_object()) db.session.add(comment) db.session.commit() flash('你的评论已经发表。') # page 参数设为 -1,这是个特殊的页数,用来请求评论的最后一页,所以刚提交的评论才会出现在页面上。 return redirect(url_for('main.post', post_id=post.post_id, page=-1)) page = request.args.get('page', 1, type=int) if page == -1: page = (post.comments.count() - 1) / current_app.config['FLASKY_COMMENTS_PER_PAGE'] + 1 pagination = post.comments.order_by(Comment.comment_timestamp.asc()).paginate( page, per_page=current_app.config['FLASKY_COMMENTS_PER_PAGE'], error_out=False) comments = pagination.items # 这里必须要传入列表,因为只有这样,index.html和user.html引用的_posts.html模板才能在这个页面中使用 return render_template('post.html', posts=[post], form=form, comments=comments, pagination=pagination)
6b209ef5fd464ffbc0fa71c259d4105dbeb4f79d
3,634,222
def noisify(chse, dE, dt, dU): """ Create a nosified chain Parameters chse : Chain instance cE: float spread in on-site energies dt: float spread in hoppings dU: float spread in interaction """ dEs = np.zeros((chse.N,)) dts = np.zeros((chse.N,)) dUs = np.zeros((chse.N,)) if dE > 0: dEs = np.random.uniform(-dE, dE, size=(chse.N,)) dEs = dEs - np.average(dEs) if dt > 0: dts = np.random.normal(scale=dt, size=(chse.N - 1,)) if dU > 0: dUs = np.random.normal(scale=dU, size=(chse.N,)) # print(dEs) c = Chain(chse.N, chse.js, [chse.Es[i] + dEs[i] for i in range(chse.N)], [chse.ts[i] + dts[i] for i in range(chse.N - 1)], [chse.Us[i] + dUs[i] for i in range(chse.N)], chse.gs, chse.parasites[0].strength) c.info['name'] = 'noisychain' return c
7e4e1d4c2872e081fbc50536bda7883bb50cce35
3,634,223
def kepio(keplcfile): """Read in a Kepler LC file and return the time, flux, and error""" with fits.open(keplcfile) as hdu: print(hdu.info()) t = hdu[1].data["TIME"] f = hdu[1].data["SAP_FLUX"] e = hdu[1].data["SAP_FLUX_ERR"] return t,f,e
2bfc458200a2c4a04715eebd2306368e8f3581c2
3,634,224
def camera_matrix(K: np.ndarray, R: np.ndarray, t: np.ndarray) -> np.ndarray: """Derive the camera matrix. Derive the camera matrix from the camera intrinsic matrix (K), and the extrinsic rotation matric (R), and extrinsic translation vector (t). Note that this uses the matlab convention, such that M = [R;t] * K """ return np.concatenate((R, t), axis=0) @ K
274ca0736397850eb82a155000fd8265196315aa
3,634,225
import ntpath def path_leaf(path): """ Extract the file name from a path. If the file ends with a slash, the basename will be empty, so the function to deal with it Parameters ---------- path : str Path of the file Returns ------- output : str The name of the file """ head, tail = ntpath.split(path) output = tail or ntpath.basename(head) return output
58930f081c2366b9084bb279d1b8b267e5f93c96
3,634,226
from typing import Union import ast from pathlib import Path from typing import Set import os def expand_import_star( node: Union[ast.ImportFrom, _nodes.ImportFrom], path: Path ) -> Union[ast.ImportFrom, _nodes.ImportFrom]: """Expand import star statement, replace the `*` with a list of ast.alias. :param node: `_nodes/ast.ImportFrom` node that has a '*' as `alias.name`. :param path: where the node has imported. :returns: expanded `_nodes/ast.ImportFrom` (same input node type). :raises UnexpandableImportStar: when `ReadPermissionError`, `UnparsableFile` or `ModuleNotFoundError` raised. """ mpath = pathu.get_import_from_path(path, "*", node.module, node.level) importables: Set[str] = set() try: if mpath: content, _, _ = iou.safe_read(mpath, permissions=(os.R_OK,)) tree = parse_ast(content, mpath) analyzer = ImportablesAnalyzer(mpath) analyzer.visit(tree) importables = analyzer.get_stats() else: name = ("." * node.level) + (node.module if node.module else "") raise ModuleNotFoundError(name=name) except (ReadPermissionError, UnparsableFile, ModuleNotFoundError) as err: msg = ( err if not isinstance(err, ModuleNotFoundError) else f"{err.name!r} module not found or it's a C wrapped module!" ) if hasattr(node, "location"): location = node.location # type: ignore # pragma: nocover. else: location = _nodes.NodeLocation( (node.lineno, node.col_offset), 0 # type: ignore ) raise UnexpandableImportStar(path, location, str(msg)) from err # Create `ast.alias` for each name. node.names.clear() for name in importables: node.names.append(ast.alias(name=name, asname=None)) return node
a07640ff2d9fbf151423dd0338311cf732122d53
3,634,227
import torch def hoi2result(instance_labels, verb_scores, bboxes, sub_ids, obj_ids, max_per_img=100, valid_hois=None): """Convert detection hois to a list of numpy arrays. Used in QPIC. Args: valid_hois (): max_per_img (): obj_ids (): sub_ids (): verb_scores (): instance_labels (): bboxes (torch.Tensor | np.ndarray): shape (n, 5) Returns: list(ndarray): bbox results of each class """ if isinstance(bboxes, torch.Tensor): instance_labels = instance_labels.detach().cpu().numpy() verb_scores = verb_scores.detach().cpu().numpy() bboxes = bboxes.detach().cpu().numpy() sub_ids = sub_ids.detach().cpu().numpy() obj_ids = obj_ids.detach().cpu().numpy() bboxes = [{'bbox': bbox, 'category_id': label} for bbox, label in zip(bboxes, instance_labels)] hoi_scores = verb_scores verb_labels = np.tile(np.arange(hoi_scores.shape[1]), (hoi_scores.shape[0], 1)) sub_ids = np.tile(sub_ids, (hoi_scores.shape[1], 1)).T obj_ids = np.tile(obj_ids, (hoi_scores.shape[1], 1)).T hoi_scores = hoi_scores.ravel() verb_labels = verb_labels.ravel() sub_ids = sub_ids.ravel() obj_ids = obj_ids.ravel() if len(sub_ids) > 0: obj_labels = np.array([bboxes[obj_id]['category_id'] for obj_id in obj_ids]) masks = valid_hois[verb_labels, obj_labels] hoi_scores *= masks # note: the label of verb is 1-based. hois = [{'subject_id': sub_id, 'object_id': obj_id, 'category_id': category_id + 1, 'score': score} for sub_id, obj_id, category_id, score in zip(sub_ids, obj_ids, verb_labels, hoi_scores)] hois.sort(key=lambda k: (k.get('score', 0)), reverse=True) # todo: consider put this in head in the future if max_per_img: hois = hois[:max_per_img] else: hois = [] return {'predictions': bboxes, 'hoi_prediction': hois}
1af569ef31a27bfcca135d75e42cd7df26cecea5
3,634,228
def focus_metric(data, merit_function='vollath_F4', **kwargs): """Compute the focus metric. Computes a focus metric on the given data using a supplied merit function. The merit function can be passed either as the name of the function (must be defined in this module) or as a callable object. Additional keyword arguments for the merit function can be passed as keyword arguments to this function. Args: data (numpy array) -- 2D array to calculate the focus metric for. merit_function (str/callable) -- Name of merit function (if in panoptes.utils.images) or a callable object. Returns: scalar: result of calling merit function on data """ if isinstance(merit_function, str): try: merit_function = globals()[merit_function] except KeyError: raise KeyError( "Focus merit function '{}' not found in panoptes.utils.images!".format(merit_function)) return merit_function(data, **kwargs)
c8f571e11202d39d8f331fca5fc93333aeb71e62
3,634,229
def get_canonical_import(import_set): """Obtain one single import from a set of possible sources of a symbol. One symbol might come from multiple places as it is being imported and reexported. To simplify API changes, we always use the same import for the same module, and give preference to imports coming from main tensorflow code. Args: import_set: (set) Imports providing the same symbol Returns: A module name to import """ # We use the fact that list sorting is stable, so first we convert the set to # a sorted list of the names and then we resort this list to move elements # not in core tensorflow to the end. import_list = sorted(import_set) import_list.sort(key=lambda x: 'lite' in x) return import_list[0]
ae53ca4d271ab543a7a13f1ce8240ce6eb328bbb
3,634,230
def additive_white_gaussian_noise(signal, noise_level): """ Add gaussian white noise to audio signal. :param signal: Audio signal to permute. :param noise_level: standard deviation of the gaussian noise. """ # SNR = 10 * log((RMS of signal)^2 / (RMS of noise)^2) # RMS_s = np.sqrt(np.mean(signal*signal)) # RMS_n = np.sqrt(RMS_s*RMS_s / (np.power(10, SNR/10))) noise = np.random.normal(0, noise_level, signal.shape[0]) return signal + noise
c4705b2fa67ce319609677a91bb7efa3e9c427b7
3,634,231
import torch from typing import Iterable def _tensor_in(tensor: torch.Tensor, iterable: Iterable[torch.Tensor]): """Returns whether `tensor is element` for any element in `iterable` This function is necessary because `tensor in iterable` does not work reliably for `Tensor`s. See https://discuss.pytorch.org/t/how-to-judge-a-tensor-is-in-a-list/15998/4 for further discussion. """ return any(tensor is elem for elem in iterable)
84ac8a129440c9c8d7785029b04bd403514a3bb9
3,634,232
def flip(a, dim=0): """ Flip an array along a dimension. Parameters ---------- a : af.Array. Multi dimensional array. dim : optional: int. default: 0. The dimension along which the flip is performed. Returns ------- out : af.Array The output after flipping `a` along `dim`. Examples --------- >>> import arrayfire as af >>> a = af.randu(3, 3) >>> af.display(a) [3 3 1 1] 0.7269 0.3569 0.3341 0.7104 0.1437 0.0899 0.5201 0.4563 0.5363 >>> af.display(b) [3 3 1 1] 0.5201 0.4563 0.5363 0.7104 0.1437 0.0899 0.7269 0.3569 0.3341 >>> af.display(c) [3 3 1 1] 0.3341 0.3569 0.7269 0.0899 0.1437 0.7104 0.5363 0.4563 0.5201 """ out = Array() safe_call(backend.get().af_flip(c_pointer(out.arr), a.arr, c_int_t(dim))) return out
1c006acae6d6bfccb92a519da197fa6e3792e2c8
3,634,233
def get_cnn_model(params): """ Load base CNN model and add metadata fusion layers if 'use_metadata' is set in params.py :param params: global parameters, used to find location of the dataset and json file :return model: CNN model with or without depending on params """ input_tensor = Input(shape=(params.target_img_size[0],params.target_img_size[1],params.num_channels)) baseModel = VGG16(weights='imagenet', include_top=False, input_tensor=input_tensor) modelStruct = baseModel.output modelStruct = Flatten(input_shape=baseModel.output_shape[1:])(modelStruct) if params.use_metadata: auxiliary_input = Input(shape=(params.metadata_length,), name='aux_input') modelStruct = merge([modelStruct,auxiliary_input],mode='concat') modelStruct = Dense(params.cnn_last_layer_length, name='fc1')(modelStruct) modelStruct = Activation('elu')(modelStruct) modelStruct = Dropout(0.6)(modelStruct) modelStruct = Dense(params.cnn_last_layer_length, name='fc2')(modelStruct) modelStruct = Activation('elu')(modelStruct) modelStruct = Dropout(0.8)(modelStruct) predictions = Dense(params.num_labels, activation='softmax')(modelStruct) if not params.use_metadata: model = Model(input=[baseModel.input], output=predictions) else: model = Model(input=[baseModel.input, auxiliary_input], output=predictions) for i,layer in enumerate(model.layers): layer.trainable = True return model
c87ee7d11439adf5fd7bc98e3d70398c32421089
3,634,234
def is_development_mode(registry): """ Returns true, if mode is set to development in current ini file. :param registry: request.registry :return: Boolean """ if 'mode' in registry.settings: return registry.settings['mode'].lower() == 'development' return False
af1b11fa69231a455406247b593f8ff49855bc3f
3,634,235
def run_summarizer(parser, sentences, language='english'): """ :params parser: Parser for selected document type :params sentences: Maximum sentences for summarizer. :returns summary: Summarized page. """ summarizer = Summarizer(Stemmer(language)) summarizer.stop_words = get_stop_words(language) return [str(sentence) for sentence in summarizer(parser.document, sentences)]
9dd61447df7612b005b825c2e21fc3943596ab12
3,634,236
def mk_input(ctx, name, type): """ mk_input(Int_ctx ctx, char const * name, Int_type type) -> Int_net Parameters ---------- ctx: Int_ctx name: char const * type: Int_type """ return _api.mk_input(ctx, name, type)
05cba1813f9fb81ea132dac653073466862e4db0
3,634,237
def coo_fromdense_mhlo(mat, *, nnz, data_dtype, index_dtype, index_type): """COO from dense matrix.""" mat_type = ir.RankedTensorType(mat.type) rows, cols = mat_type.shape buffer_size, opaque = _hipsparse.build_coo_fromdense_descriptor( data_dtype, index_dtype, rows, cols, nnz) i32_type = ir.IntegerType.get_signless(32) out = mhlo.CustomCallOp( [ir.TupleType.get_tuple([ ir.RankedTensorType.get([nnz], mat_type.element_type), ir.RankedTensorType.get([nnz], index_type), ir.RankedTensorType.get([nnz], index_type), ir.RankedTensorType.get([buffer_size], ir.IntegerType.get_signless(8)), ])], [mat], call_target_name=ir.StringAttr.get("hipsparse_coo_fromdense"), has_side_effect=ir.BoolAttr.get(False), backend_config=ir.StringAttr.get(opaque), api_version=ir.IntegerAttr.get(i32_type, 2), called_computations=ir.ArrayAttr.get([]), operand_layouts=ir.ArrayAttr.get([ ir.DenseIntElementsAttr.get(np.array([1, 0]), type=ir.IndexType.get()), ]), result_layouts=ir.ArrayAttr.get([ ir.DenseIntElementsAttr.get(np.array([0]), type=ir.IndexType.get()), ] * 4)) return [ mhlo.GetTupleElementOp(out, ir.IntegerAttr.get(i32_type, i)).result for i in range(3) ]
c8cf5db1d05e9fcaf6d383f1d6cd9cfa2fe55ae8
3,634,238
def __get_wight(last_link: Link, end_link: Link, end_fraction, weight_function): """ Calculate the wight of the end_link. :param last_link: Needed to determine from which direction you comes :param end_link: Link from which the weight is calculated :param end_fraction: fraction as Number (1 >= fraction >= 0). Is always the percentage between start Node and finish :param weight_function: Function that calculates the wight :return: """ if last_link in end_link.get_links_at_start_node(): return weight_function.get_wight(end_link) * end_fraction else: return weight_function.get_wight(end_link) * (1 - end_fraction)
e80a82fa830d08890a51398f6f9e5ba69482405a
3,634,239
def float_fraction(trainpct): """ Float bounded between 0.0 and 1.0 """ try: f = float(trainpct) except ValueError: raise Exception("Fraction must be a float") if f < 0.0 or f > 1.0: raise Exception("Argument should be a fraction! Must be <= 1.0 and >= 0.0") return f
8eb28dcaa0ed9250f4aa68d668ad424b5b5eded5
3,634,240
def handle_internal(msg): """Process an internal message.""" internal = msg.gateway.const.Internal(msg.sub_type) handler = internal.get_handler(msg.gateway.handlers) if handler is None: return None return handler(msg)
0f5cae49cf5d36a5e161f88902c46af931fd622a
3,634,241
def _lower_bound_grad(op, grad): """Gradient for `lower_bound` if `gradient == 'identity_if_towards'`. Args: op: The op for which to calculate a gradient. grad: Gradient with respect to the output of the op. Returns: Gradient with respect to the inputs of the op. """ inputs, bound = op.inputs pass_through_if = tf.logical_or(inputs >= bound, grad < 0) return [tf.cast(pass_through_if, grad.dtype) * grad, None]
907ef212759f6a45ff32b78bcfede6222d978996
3,634,242
from rascil.processing_components.visibility import msv2 def list_ms(msname, ack=False): """ List sources and data descriptors in a MeasurementSet :param msname: File name of MS :param ack: Ask casacore to acknowledge each table operation :return: sources, data descriptors For example:: print(list_ms('3C277.1_avg.ms')) (['1302+5748', '0319+415', '1407+284', '1252+5634', '1331+305'], [0, 1, 2, 3]) """ try: from casacore.tables import table # pylint: disable=import-error except ModuleNotFoundError: raise ModuleNotFoundError("casacore is not installed") try: except ModuleNotFoundError: raise ModuleNotFoundError("cannot import msv2") tab = table(msname, ack=ack) log.debug("list_ms: %s" % str(tab.info())) fieldtab = table('%s/FIELD' % msname, ack=False) sources = fieldtab.getcol('NAME') ddtab = table('%s/DATA_DESCRIPTION' % msname, ack=False) dds = list(range(ddtab.nrows())) return sources, dds
0b12e59eead973c0b2e5370e1e0c98ac1ffc4ae8
3,634,243
def obv(s_interval: str, df_stock: pd.DataFrame) -> pd.DataFrame: """On Balance Volume Parameters ---------- s_interval: str Stock data interval df_stock: pd.DataFrame Dataframe of stock prices Returns ------- pd.DataFrame Dataframe with technical indicator """ # Daily if s_interval == "1440min": df_ta = ta.obv(close=df_stock["Adj Close"], volume=df_stock["Volume"]).dropna() # Intraday else: df_ta = ta.obv(close=df_stock["Close"], volume=df_stock["Volume"]).dropna() return df_ta
36d3cd371bb37fa5ee74f10b774ae4408a9164d2
3,634,244
from typing import Dict import requests def get_cultural_hotspots(url: str, params: Dict) -> pd.DataFrame: """Get cultural hotspots within city boundaries.""" package = requests.get(url, params=params).json() ch_locations = package["result"]["resources"][0]["url"] ch_locs_dir_path = "data/raw/cultural-hotspot-points-of-interest-wgs84" with urlopen(ch_locations) as zipresp: with ZipFile(BytesIO(zipresp.read())) as zfile: zfile.extractall(ch_locs_dir_path) df = gpd.read_file(f"{ch_locs_dir_path}/CULTURAL_HOTSPOT_WGS84.shp") df = ( df.drop_duplicates( subset=["PNT_OF_INT", "LATITUDE", "LONGITUDE"], keep="first", ) .reset_index(drop=True) .copy() ) df = ( df.drop_duplicates( subset=["PNT_OF_INT"], keep="first", ) .reset_index(drop=True) .copy() ) assert df[df.duplicated(subset=["PNT_OF_INT"], keep=False)].empty df_essentials = ( df[["RID", "PNT_OF_INT", "LATITUDE", "LONGITUDE"]] .rename( columns={ "RID": "ID", "PNT_OF_INT": "NAME", "LATITUDE": "POI_LATITUDE", "LONGITUDE": "POI_LONGITUDE", } ) .astype({"NAME": pd.StringDtype()}) ) # print(df_essentials.dtypes) return df_essentials
1a073998c51eca3f6a714462b864fa7d6c142e76
3,634,245
import optparse def setopts(): """ Setup all possible command line options.... """ usage = 'USAGE: %s [options]' % (NAME) version = NAME + " " + __version__ parser = optparse.OptionParser(usage=usage, version=version) parser.add_option("-v", "--voicefile", dest="voicefile", help="JSON voice file defining the voice to use.") parser.add_option("-t", "--text", dest="text", default=DEF_RULES, help="Text to synthesize.") return parser
af8efe810da81af1a20747b4fcc9aa8f244ed1fc
3,634,246
def f_test(df, ann): """Pre-select features without difference between types of dataset Parameters ---------- df : pandas.DataFrame A pandas DataFrame whose rows represent samples and columns represent features. ann : pandas.DataFrame DataFrame with annotation of samples. This argument is actually not used by the function. Returns ------- list List of features without difference between types of dataset intersected with a list of features from a given DataFrame. """ dataset_ids = ann[['Dataset', 'Dataset type']].drop_duplicates().to_numpy() samples = [ df.loc[(ann['Dataset'] == dataset) & (ann['Dataset type'] == dataset_type)] for dataset, dataset_type in dataset_ids if dataset_type != "Validation" ] statistics, pvalues = f_oneway(*samples, axis=0) return df.columns[pvalues > 0.05].to_list()
77b60abe9c09ff674f0af687552bacd75e8b75c4
3,634,247
from typing import Union def _is_group(cli_obj: Union[Group, Command, MultiCommand]) -> bool: """Detects if cli obj is a Group or not""" return isinstance(cli_obj, Group) and hasattr(cli_obj, "commands")
d262824ea8aabdd0e24740c2cbd0a9e0e4209ba1
3,634,248
import codecs import json def _get_input_json(input_path): """ A really basic helper function to dump the JSON data. This is probably a leftover from when I was iterating on different reduce() functions. """ # Read in the input file. input_file = codecs.open(input_path, encoding="utf-8", mode="r") json_data = json.load(input_file) input_file.close() return json_data
5c91e77b2224435dbf17fcfc2351c574c173c6aa
3,634,249
import re import urllib def urn_from_member_name(member, base_urn): """Returns a URN object from a zip file's member name.""" member = utils.SmartUnicode(member) # Remove %xx escapes. member = re.sub( "%(..)", lambda x: chr(int("0x" + x.group(1), 0)), member) # This is an absolute URN. if urllib.parse.urlparse(member).scheme == "aff4": result = member else: # Relative member becomes relative to the volume's URN. result = base_urn.Append(member, quote=False) return rdfvalue.URN(result)
f87a5f13aa3ae1fe840caa9d28c375295a730c82
3,634,250
def create_data_set(): """ 创建数据集 :return: """ data_set_ = [ [1, 1, 'yes'], [1, 1, 'yes'], [1, 0, 'no'], [0, 1, 'no'], [0, 1, 'no'] ] labels_ = ['no surfacing', 'flippers'] return np.array(data_set_), np.array(labels_)
e4c7cf3200d3acec618529a35196ec2f080d071b
3,634,251
from typing import Dict from typing import Any def get_text(xml: bytes, context: Dict[str, Any]) -> TablesList: """Xml as a string to a list of cell strings. :param xml: an xml bytes object which might contain text :param context: dictionary of document attributes generated in get_docx_text :returns: A 4-deep nested list of strings. Sorts the text into the DepthCollector instance, five-levels deep ``[table][row][cell][paragraph][run]`` is a string Joins the runs before returning, so return list will be ``[table][row][cell][paragraph]`` is a string If you'd like to extend or edit this package, this function is probably where you want to do it. Nothing tricky here except keeping track of the text formatting. """ tables = DepthCollector(5) do_html = context["do_html"] # noinspection PyPep8Naming def branches(branch: Element) -> None: """ Recursively iterate over descendents of branch. Add text when found. :param branch: An Element from an xml file (ElementTree) :return: None. Adds text cells to outer variable `tables`. """ for child in branch: tag = child.tag # set caret depth if tag == TABLE: tables.set_caret(1) elif tag == TABLE_ROW: tables.set_caret(2) elif tag == TABLE_CELL: tables.set_caret(3) elif tag == PARAGRAPH: tables.set_caret(4) # open elements if tag == PARAGRAPH: tables.insert(_get_bullet_string(child, context)) elif tag == RUN and do_html is True: # new text run run_style = get_run_style(child) open_style = getattr(tables, "open_style", ()) if run_style != open_style: tables.insert(style_close(open_style)) tables.insert(style_open(run_style)) tables.open_style = run_style elif tag == TEXT: # new text object. oddly enough, these don't all contain text text = child.text if child.text is not None else "" if do_html is True: text = text.replace("<", "&lt;") text = text.replace(">", "&gt;") tables.insert(text) elif tag == FOOTNOTE: if 'separator' not in child.attrib.get(qn('w:type'), '').lower(): tables.insert("footnote %s \t" % child.attrib[qn('w:id')]) #tables.insert(f"footnote{child.attrib[qn('w:id')]})\t") elif tag == ENDNOTE: if 'separator' not in child.attrib.get(qn('w:type'), '').lower(): tables.insert("endnote %s \t" % child.attrib[qn('w:id')]) #tables.insert(f"endnote{child.attrib[qn('w:id')]})\t") # add placeholders elif tag == FOOTNOTE_REFERENCE: tables.insert("----footnote%s----" % child.attrib[qn('w:id')]) #tables.insert(f"----footnote{child.attrib[qn('w:id')]}----") elif tag == ENDNOTE_REFERENCE: tables.insert("----endnote%s----" % child.attrib[qn('w:id')]) #tables.insert(f"----endnote{child.attrib[qn('w:id')]}----") elif tag == IMAGE: rId = child.attrib[qn("r:embed")] image = context["rId2Target"].get(rId) if image: tables.insert("----%s----" % image) #tables.insert(f"----{image}----") elif tag == TAB: tables.insert("\t") # enter child element branches(child) # close elements if tag == PARAGRAPH and do_html is True: tables.insert(style_close(getattr(tables, "open_style", ()))) tables.open_style = () if tag in {TABLE_ROW, TABLE_CELL, PARAGRAPH}: tables.raise_caret() elif tag == TABLE: tables.set_caret(1) branches(ElementTree.fromstring(xml)) tree = tables.tree for (i, j, k, l), paragraph in enum_at_depth(tree, 4): tree[i][j][k][l] = "".join(paragraph) return tree
c6d1197b07cc1e07e0cb299b455d243c9ee023d0
3,634,252
from typing import Any def day(query: int, field_name: str, object: Any) -> bool: """ Checks if value of object is equal to query """ return query == getattr(object, field_name).day
b24fc0de6c01b355633dfbd240a760c1f51bfa6f
3,634,253
import glob import tqdm def glob_read(path, read_fun, stop_i=None, show_bar=False): """read all files in path by glob Args: path (str): absolute path read_fun ([type]): different read function based on file type stop_i (int, optional): stop read at file i. Defaults to None. show_bar (bool, optional): display read process or not . Defaults to False. Returns: text_list (list): a list of lines in all files pathlist (list): a list of absolute file path Examples: >>> text_list, pathlist = glob_read("/home/directory/test", read_fun=read_txt) >>> print(text_list) ['line1', 'line2', 'line3'] """ text_list = [] pathlist = glob.glob(path) if show_bar: w = tqdm(pathlist, desc=u'已加载0个text') else: w = pathlist for i, txt in enumerate(w): if stop_i is not None and i > stop_i: break text_list.extend(read_fun(txt, show_bar=False)) if show_bar: w.set_description(u'已加载%s个text' % str(i+1)) return text_list, pathlist
6932cda5ae27c72cf9db1a5360c1c130b5cea6ed
3,634,254
def argument(*name_or_flags, **kwargs): """Convenience function to properly format arguments to pass to the subcommand decorator. """ return list(name_or_flags), kwargs
7f1ba4d4005168f3c634840ddd20d6fbb73182f5
3,634,255
import math def conv_float2negexp(val): """Returns the least restrictive negative exponent of the power 10 that would achieve the floating point convergence criterium *val*. """ return -1 * int(math.floor(math.log(val, 10)))
562ccf7d34f8034a25cabfb471e7fc2ab9c0feb6
3,634,256
def pick_wm_class_2(tissue_class_files): """Returns the white matter tissue class file from the list of segmented tissue class files Parameters ---------- tissue_class_files : list (string) List of tissue class files Returns ------- file : string Path to segment_seg_2.nii.gz is returned """ # noqa if isinstance(tissue_class_files, list): if len(tissue_class_files) == 1: tissue_class_files = tissue_class_files[0] for filename in tissue_class_files: if filename.endswith("seg_2.nii.gz"): return filename return None
a30809d94a57d12084fb1747cf1854163361c50d
3,634,257
def collect_datasets(data_type, varnames, list_of_ds, labels, **kwargs): """ Concatonate several different xarray datasets across a new "collection" dimension, which can be accessed with the specified labels. Stores them in an xarray dataset which can be passed to the ldcpy plot functions (Call this OR open_datasets() before plotting.) Parameters ========== varnames : list The variable(s) of interest to combine across input files (usually just one) list_of_datasets : list The datasets to be concatonated into a collection labels : list The respective label to access data from each dataset (also used in plotting fcns) **kwargs : (optional) – Additional arguments passed on to xarray.concat(). A list of available arguments can be found here: https://xarray-test.readthedocs.io/en/latest/generated/xarray.concat.html Returns ======= out : xarray.Dataset a collection containing all the data from the list datasets """ # Error checking: # list_of_files and labels must be same length assert len(list_of_ds) == len( labels ), 'ERROR:collect_dataset dataset list and labels arguments must be the same length' # the number of timeslices must be the same sz = np.zeros(len(list_of_ds)) for i, myds in enumerate(list_of_ds): sz[i] = myds.sizes['time'] indx = np.unique(sz) assert indx.size == 1, 'ERROR: all datasets must have the same length time dimension' if data_type == 'cam-fv': weights_name = 'gw' varnames.append(weights_name) elif data_type == 'pop': weights_name = 'TAREA' varnames.append(weights_name) # preprocess for i, myds in enumerate(list_of_ds): list_of_ds[i] = preprocess(myds, varnames) full_ds = xr.concat(list_of_ds, 'collection', **kwargs) if data_type == 'pop': full_ds.coords['cell_area'] = xr.DataArray(full_ds.variables.mapping.get(weights_name))[0] else: full_ds.coords['cell_area'] = ( xr.DataArray(full_ds.variables.mapping.get(weights_name)) .expand_dims(lon=full_ds.dims['lon']) .transpose() ) full_ds.attrs['cell_measures'] = 'area: cell_area' full_ds = full_ds.drop(weights_name) full_ds['collection'] = xr.DataArray(labels, dims='collection') print('dataset size in GB {:0.2f}\n'.format(full_ds.nbytes / 1e9)) full_ds.attrs['data_type'] = data_type return full_ds
bd5c67d61571f9a3ab41eafb88bfe6367462e052
3,634,258
def split_tiles(image, tile_size): """Splits the image into tiles of size `tile_size`.""" # The copy is necessary due to the use of the memory layout. if image.ndim == 2: image = image[..., None] image = np.array(image) image = make_divisible(image, tile_size).copy() height = width = tile_size nrows, ncols, depth = image.shape stride = image.strides nrows, m = divmod(nrows, height) ncols, n = divmod(ncols, width) if m != 0 or n != 0: raise ValueError('Image must be divisible by tile size.') return np.lib.stride_tricks.as_strided( np.ravel(image), shape=(nrows, ncols, height, width, depth), strides=(height * stride[0], width * stride[1], *stride), writeable=False)
be3d48e4fd926d0a8d2226990dac01315dc0437a
3,634,259
from bs4 import BeautifulSoup import requests def get_insider_activity(ticker: str) -> pd.DataFrame: """Get insider activity. [Source: Business Insider] Parameters ---------- ticker : str Ticker to get insider activity data from Returns ------- df_insider : pd.DataFrame Get insider activity data """ url_market_business_insider = ( f"https://markets.businessinsider.com/stocks/{ticker.lower()}-stock" ) text_soup_market_business_insider = BeautifulSoup( requests.get( url_market_business_insider, headers={"User-Agent": get_user_agent()} ).text, "lxml", ) d_insider = dict() l_insider_vals = list() for idx, insider_val in enumerate( text_soup_market_business_insider.findAll( "td", {"class": "table__td text-center"} ) ): l_insider_vals.append(insider_val.text.strip()) # Add value to dictionary if (idx + 1) % 6 == 0: # Check if we are still parsing insider trading activity if "/" not in l_insider_vals[0]: break d_insider[(idx + 1) // 6] = l_insider_vals l_insider_vals = list() df_insider = pd.DataFrame.from_dict( d_insider, orient="index", columns=["Date", "Shares Traded", "Shares Held", "Price", "Type", "Option"], ) df_insider["Date"] = pd.to_datetime(df_insider["Date"]) df_insider = df_insider.set_index("Date") df_insider = df_insider.sort_index(ascending=True) l_names = list() for s_name in text_soup_market_business_insider.findAll( "a", {"onclick": "silentTrackPI()"} ): l_names.append(s_name.text.strip()) df_insider["Insider"] = l_names return df_insider
97beab7bfc2ef90f74204777f0ef90c455fa0293
3,634,260
def resolve_dependencies(dependencies): """Resolve a set of dependencies to a specific versions or Unspecified. You can find more for the syntax of Debian dependencies relationships here https://www.debian.org/doc/debian-policy/ch-relationships.html Args: dependencies (str): string with dependencies (e.g. "('gcc-6-base (= 6.3.0-18+deb9u1), libc6 (>= 2.11),") Returns: results (list): of tuples with package names, and version """ results = list() if dependencies is not None: for dependency in dependencies.split(','): dependency = dependency.strip() if '|' in dependency: results.append(resolve_dependency(dependency.split('|')[0])) results.append(resolve_dependency(dependency.split('|')[1])) else: results.append(resolve_dependency(dependency)) return results
bbc5335eaf93de0c72c7dde6272f39f75c941b71
3,634,261
def sampler(img_target, pose, intrinsics, rng, options): """ Given a single image, samples rays """ pose_target = pose[:3, :4] ray_origins, ray_directions = get_ray_bundle( intrinsics.height, intrinsics.width, intrinsics.focal_length, pose_target ) coords = jnp.stack( jnp.meshgrid( jnp.arange(intrinsics.height), jnp.arange(intrinsics.width), indexing="xy" ), axis=-1, ).reshape((-1, 2)) select_inds = jax.random.choice( rng, coords.shape[0], shape=(options.num_random_rays,), replace=False ) select_inds = coords[select_inds] ray_origins = ray_origins[select_inds[:, 0], select_inds[:, 1], :] ray_directions = ray_directions[select_inds[:, 0], select_inds[:, 1], :] target_s = img_target[select_inds[:, 0], select_inds[:, 1], :] return ray_origins, ray_directions, target_s
e02907dbdef7f532ee6f843ff8fc367dc7a3ff56
3,634,262
from typing import List def sanitize_gpu_ids(gpus: List[int]) -> List[int]: """ Checks that each of the GPUs in the list is actually available. Raises a MisconfigurationException if any of the GPUs is not available. Args: gpus: list of ints corresponding to GPU indices Returns: unmodified gpus variable """ all_available_gpus = get_all_available_gpus() misconfig = False for gpu in gpus: if gpu not in all_available_gpus: misconfig = True if misconfig: # sometimes auto ddp might have different flags # but this is not what the user intended # correct for the user if len(gpus) == len(all_available_gpus): gpus = all_available_gpus else: raise MisconfigurationException(f""" You requested GPUs: {gpus} But your machine only has: {all_available_gpus} """) return gpus
b6f3f7da19fc7f26c6229ffb80fbed3ed6dfe363
3,634,263
from typing import Type def gauge(name: str, documentation: str, labels: tuple = ()) -> Type[Gauge]: """Builds a gauge with configured namespace / subsystem.""" return Gauge( name, documentation, labelnames=labels, namespace=s.PROMETHEUS_NAMESPACE, subsystem=s.PROMETHEUS_SUBSYSTEM, )
8267cfcbbd7bdef9e3d7b6dbe8db759c2f148f40
3,634,264
def uniform_scaling(weights, prune_ratio, prec_layers, succ_layers): """Better prune method Arguments: weights (OrderedDict): unpruned model weights prec_layers (dict): mapping from BN names to preceding convs/linears succ_layers (dict): mapping from BN names to succeeding convs/linears thresholding: thresholding method, by default: optimal prune Returns: pruned_weights (OrderedDict): pruned model weights """ # prune weights with calculated threshold of a uniform scaling norm_layer_names = list(set(succ_layers) & set(prec_layers)) grouped_weight_names = group_weight_names(weights.keys()) for norm_layer_name in norm_layer_names: norm_weight_name = norm_layer_name + WEIGHT_POSTFIX scale_weight = weights[norm_weight_name].abs() scale_weight_list = [_.abs().item() for _ in list(scale_weight)] keep_index = int(len(scale_weight_list) * prune_ratio + 0.5) scale_weight_sorted = sorted(scale_weight_list) prune_th = (scale_weight_sorted[keep_index]+scale_weight_sorted[keep_index+1]) / 2 keep_mask = scale_weight > prune_th # prune_ratio = 1 - keep_mask.sum().item() / len(scale_weight_list) if keep_mask.sum().item() == scale_weight.size(0): continue prune_filters(norm_layer_name, keep_mask, weights, grouped_weight_names, prec_layers, succ_layers) return weights, None
5a50dfd64bf9733ffbef957b184c3f88279338b1
3,634,265
def find_port(master_class_name, masters, output, opts): """Finds a triplet of free ports appropriate for the given master.""" try: master_class = getattr(Master, master_class_name) except AttributeError: raise ValueError('Master class %s does not exist' % master_class_name) used_ports = set() for m in masters: for port in ('port', 'slave_port', 'alt_port'): used_ports.add(m.get(port, 0)) used_ports = used_ports | PORT_BLACKLIST def _inner_loop(): for digits in xrange(0, 100): port = build_port_str(master_class, 'port', digits) slave_port = build_port_str(master_class, 'slave_port', digits) alt_port = build_port_str(master_class, 'alt_port', digits) if all([ int(port) not in used_ports, int(slave_port) not in used_ports, int(alt_port) not in used_ports]): return port, slave_port, alt_port return None, None, None port, slave_port, alt_port = _inner_loop() if not all([port, slave_port, alt_port]): raise RuntimeError('Unable to find available ports on host') output([ ('Master', field('master_base_class')), ('Port', field('master_port')), ('Alt port', field('master_port_alt')), ('Slave port', field('slave_port')) ], [ { 'master_base_class': master_class_name, 'master_port': port, 'master_port_alt': alt_port, 'slave_port': slave_port } ])
a5616fdefac44e450a9135d0d11563b88a84a9dc
3,634,266
def find_match(good_message, bad_message): """ Makes the hash of the bad message match that of the good one. Args: good_message: The good message we want to match. bad_message: The bad message we want to make match. Returns: Variations of the good and bad messages that have the same hash. """ # Generate variations of the good message hash. print "Generating good hashes... """ good_hashes = {} front_padding = 0 back_padding = 0 good_var = good_message for i in range(0, 65536): good_hash = binascii.crc32(good_var) # We only save the number of padding characters to reduce memory. good_hashes[good_hash] = (front_padding, back_padding) if random.randint(0, 1): good_var += "\0" back_padding += 1 else: good_var = "\0" + good_var front_padding += 1 print "Generating bad hashes..." bad_hash = binascii.crc32(bad_message) padding = 0 bad_var = bad_message while bad_hash not in good_hashes: char = random.choice(["\0", "\r"]) if random.randint(0, 1): bad_var += char else: bad_var = char + bad_var padding += 1 bad_hash = binascii.crc32(bad_var) if not padding % 1000: print "Tried %d variations." % padding good_match = pad_with_null(good_message, good_hashes[bad_hash]) return good_match, bad_var
723683de6b24d651bc2ebab43b7ccd758de1c4de
3,634,267
import logging def query_by_date_after(**kwargs): """ 根据发布的时间查询,之后的记录: 2020-06-03之后,即2020-06-03, 2020-06-04, ...... :param kwargs: {'date': date} :return: """ session = None try: date = kwargs['date'].strip() + config.BEGIN_DAY_TIME session = get_session() ret = session.query(SecondHand).filter(SecondHand.DATE_TIME >= date).order_by( SecondHand.DATE_TIME.desc()).limit(config.LIMIT_MAX).all() # 提交即保存到数据库 session.commit() results = parse_object(*ret) logging.info('OK : second_hand.py--->query_by_date_after(), 成功') return results except Exception as e: logging.critical('Error : second_hand.py--->query_by_date_after() 失败 : {}'.format(e)) return [] finally: session.close()
867faba9e835a8bbb539349ad316f5a594a6ead0
3,634,268
def split_data(im_in, dim, squeeze_data=True): """ """ # backwards compat return split_img_data(src_img=im_in, dim=dim, squeeze_data=squeeze_data)
e45e32e3654198e63597b0e813f3e929af11fc60
3,634,269
import csv import os import numpy def execute(args): """This function invokes the SDR model given URI inputs of files. It may write log, warning, or error messages to stdout. args - a python dictionary with at the following possible entries: args['workspace_dir'] - a uri to the directory that will write output and other temporary files during calculation. (required) args['results_suffix'] - a string to append to any output file name (optional) args['dem_uri'] - a uri to a digital elevation raster file (required) args['erosivity_uri'] - a uri to an input raster describing the rainfall eroisivity index (required) args['erodibility_uri'] - a uri to an input raster describing soil erodibility (required) args['lulc_uri'] - a uri to a land use/land cover raster whose LULC indexes correspond to indexs in the biophysical table input. Used for determining soil retention and other biophysical properties of the landscape. (required) args['watersheds_uri'] - a uri to an input shapefile of the watersheds of interest as polygons. (required) args['biophysical_table_uri'] - a uri to an input CSV file with biophysical information about each of the land use classes. args['threshold_flow_accumulation'] - an integer describing the number of upstream cells that must flow int a cell before it's considered part of a stream. required if 'stream_uri' is not provided. args['k_param'] - k calibration parameter (see user's guide for values) args['sdr_max'] - the max value the SDR can be args['ic_0_param'] - ic_0 calibration parameter (see user's guide for values) args['drainage_uri'] - An optional GIS raster dataset mask, that indicates areas that drain to the watershed. Format is that 1's indicate drainage areas and 0's or nodata indicate areas with no additional drainage. This model is most accurate when the drainage raster aligns with the DEM. args['_prepare'] - (optional) The preprocessed set of data created by the sdr._prepare call. This argument could be used in cases where the call to this function is scripted and can save a significant amount of runtime. returns nothing.""" #append a _ to the suffix if it's not empty and doens't already have one try: file_suffix = args['results_suffix'] if file_suffix != "" and not file_suffix.startswith('_'): file_suffix = '_' + file_suffix except KeyError: file_suffix = '' csv_dict_reader = csv.DictReader(open(args['biophysical_table_uri'], 'rU')) biophysical_table = {} for row in csv_dict_reader: biophysical_table[int(row['lucode'])] = row #Test to see if c or p values are outside of 0..1 for table_key in ['usle_c', 'usle_p']: for (lulc_code, table) in biophysical_table.iteritems(): try: float_value = float(table[table_key]) if float_value < 0 or float_value > 1: raise Exception( 'Value should be within range 0..1 offending value ' 'table %s, lulc_code %s, value %s' % ( table_key, str(lulc_code), str(float_value))) except ValueError as e: raise Exception( 'Value is not a floating point value within range 0..1 ' 'offending value table %s, lulc_code %s, value %s' % ( table_key, str(lulc_code), table[table_key])) intermediate_dir = os.path.join(args['workspace_dir'], 'intermediate') output_dir = os.path.join(args['workspace_dir'], 'output') #Sets up the intermediate and output directory structure for the workspace pygeoprocessing.geoprocessing.create_directories([output_dir, intermediate_dir]) #check if we've already prepared the DEM if '_prepare' in args: preprocessed_data = args['_prepare'] else: preprocessed_data = _prepare(**args) aligned_dem_uri = preprocessed_data['aligned_dem_uri'] aligned_erosivity_uri = preprocessed_data['aligned_erosivity_uri'] aligned_erodibility_uri = preprocessed_data['aligned_erodibility_uri'] thresholded_slope_uri = preprocessed_data['thresholded_slope_uri'] flow_accumulation_uri = preprocessed_data['flow_accumulation_uri'] flow_direction_uri = preprocessed_data['flow_direction_uri'] ls_uri = preprocessed_data['ls_uri'] #this section is to align the lulc with the prepared data, we need to make #a garbage tempoary dem to conform to the align_dataset_list API that #requires as many outputs as inputs aligned_lulc_uri = os.path.join(intermediate_dir, 'aligned_lulc.tif') out_pixel_size = pygeoprocessing.geoprocessing.get_cell_size_from_uri( preprocessed_data['aligned_dem_uri']) tmp_dem_uri = pygeoprocessing.geoprocessing.temporary_filename() pygeoprocessing.geoprocessing.align_dataset_list( [aligned_dem_uri, args['lulc_uri']], [tmp_dem_uri, aligned_lulc_uri], ['nearest'] * 2, out_pixel_size, 'dataset', 0, dataset_to_bound_index=0, aoi_uri=args['watersheds_uri']) os.remove(tmp_dem_uri) #classify streams from the flow accumulation raster LOGGER.info("Classifying streams from flow accumulation raster") stream_uri = os.path.join(intermediate_dir, 'stream%s.tif' % file_suffix) pygeoprocessing.routing.stream_threshold(flow_accumulation_uri, float(args['threshold_flow_accumulation']), stream_uri) stream_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(stream_uri) dem_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(args['dem_uri']) if 'drainage_uri' in args and args['drainage_uri'] != '': def add_drainage(stream, drainage): return numpy.where(drainage == 1, 1, stream) stream_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(stream_uri) #add additional drainage to the stream drainage_uri = os.path.join(output_dir, 'drainage%s.tif' % file_suffix) pygeoprocessing.geoprocessing.vectorize_datasets( [stream_uri, args['drainage_uri']], add_drainage, drainage_uri, gdal.GDT_Byte, stream_nodata, out_pixel_size, "intersection", dataset_to_align_index=0, vectorize_op=False) stream_uri = drainage_uri #Calculate the W factor LOGGER.info('calculate per pixel W') original_w_factor_uri = os.path.join( intermediate_dir, 'w_factor%s.tif' % file_suffix) thresholded_w_factor_uri = os.path.join( intermediate_dir, 'thresholded_w_factor%s.tif' % file_suffix) #map lulc to biophysical table lulc_to_c = dict( [(lulc_code, float(table['usle_c'])) for (lulc_code, table) in biophysical_table.items()]) lulc_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(aligned_lulc_uri) w_nodata = -1.0 pygeoprocessing.geoprocessing.reclassify_dataset_uri( aligned_lulc_uri, lulc_to_c, original_w_factor_uri, gdal.GDT_Float64, w_nodata, exception_flag='values_required') def threshold_w(w_val): '''Threshold w to 0.001''' w_val_copy = w_val.copy() nodata_mask = w_val == w_nodata w_val_copy[w_val < 0.001] = 0.001 w_val_copy[nodata_mask] = w_nodata return w_val_copy pygeoprocessing.geoprocessing.vectorize_datasets( [original_w_factor_uri], threshold_w, thresholded_w_factor_uri, gdal.GDT_Float64, w_nodata, out_pixel_size, "intersection", dataset_to_align_index=0, vectorize_op=False) cp_factor_uri = os.path.join( intermediate_dir, 'cp_factor%s.tif' % file_suffix) lulc_to_cp = dict( [(lulc_code, float(table['usle_c']) * float(table['usle_p'])) for (lulc_code, table) in biophysical_table.items()]) cp_nodata = -1.0 pygeoprocessing.geoprocessing.reclassify_dataset_uri( aligned_lulc_uri, lulc_to_cp, cp_factor_uri, gdal.GDT_Float64, cp_nodata, exception_flag='values_required') LOGGER.info('calculating rkls') rkls_uri = os.path.join(output_dir, 'rkls%s.tif' % file_suffix) calculate_rkls( ls_uri, aligned_erosivity_uri, aligned_erodibility_uri, stream_uri, rkls_uri) LOGGER.info('calculating USLE') usle_uri = os.path.join(output_dir, 'usle%s.tif' % file_suffix) nodata_rkls = pygeoprocessing.geoprocessing.get_nodata_from_uri(rkls_uri) nodata_cp = pygeoprocessing.geoprocessing.get_nodata_from_uri(cp_factor_uri) nodata_usle = -1.0 def mult_rkls_cp(rkls, cp_factor, stream): return numpy.where((rkls == nodata_rkls) | (cp_factor == nodata_cp), nodata_usle, rkls * cp_factor * (1 - stream)) pygeoprocessing.geoprocessing.vectorize_datasets( [rkls_uri, cp_factor_uri, stream_uri], mult_rkls_cp, usle_uri, gdal.GDT_Float64, nodata_usle, out_pixel_size, "intersection", dataset_to_align_index=0, aoi_uri=args['watersheds_uri'], vectorize_op=False) #calculate W_bar zero_absorption_source_uri = pygeoprocessing.geoprocessing.temporary_filename() loss_uri = pygeoprocessing.geoprocessing.temporary_filename() #need this for low level route_flux function pygeoprocessing.geoprocessing.make_constant_raster_from_base_uri( aligned_dem_uri, 0.0, zero_absorption_source_uri) flow_accumulation_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri( flow_accumulation_uri) w_accumulation_uri = os.path.join( intermediate_dir, 'w_accumulation%s.tif' % file_suffix) s_accumulation_uri = os.path.join( intermediate_dir, 's_accumulation%s.tif' % file_suffix) for factor_uri, accumulation_uri in [ (thresholded_w_factor_uri, w_accumulation_uri), (thresholded_slope_uri, s_accumulation_uri)]: LOGGER.info("calculating %s", accumulation_uri) pygeoprocessing.routing.route_flux( flow_direction_uri, aligned_dem_uri, factor_uri, zero_absorption_source_uri, loss_uri, accumulation_uri, 'flux_only', aoi_uri=args['watersheds_uri']) LOGGER.info("calculating w_bar") w_bar_uri = os.path.join(intermediate_dir, 'w_bar%s.tif' % file_suffix) w_bar_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(w_accumulation_uri) s_bar_uri = os.path.join(intermediate_dir, 's_bar%s.tif' % file_suffix) s_bar_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(s_accumulation_uri) for bar_nodata, accumulation_uri, bar_uri in [ (w_bar_nodata, w_accumulation_uri, w_bar_uri), (s_bar_nodata, s_accumulation_uri, s_bar_uri)]: LOGGER.info("calculating %s", accumulation_uri) def bar_op(base_accumulation, flow_accumulation): return numpy.where( (base_accumulation != bar_nodata) & (flow_accumulation != flow_accumulation_nodata), base_accumulation / flow_accumulation, bar_nodata) pygeoprocessing.geoprocessing.vectorize_datasets( [accumulation_uri, flow_accumulation_uri], bar_op, bar_uri, gdal.GDT_Float32, bar_nodata, out_pixel_size, "intersection", dataset_to_align_index=0, vectorize_op=False) LOGGER.info('calculating d_up') d_up_uri = os.path.join(intermediate_dir, 'd_up%s.tif' % file_suffix) cell_area = out_pixel_size ** 2 d_up_nodata = -1.0 def d_up(w_bar, s_bar, flow_accumulation): """Calculate the d_up index w_bar * s_bar * sqrt(upstream area) """ d_up_array = w_bar * s_bar * numpy.sqrt(flow_accumulation * cell_area) return numpy.where( (w_bar != w_bar_nodata) & (s_bar != s_bar_nodata) & (flow_accumulation != flow_accumulation_nodata), d_up_array, d_up_nodata) pygeoprocessing.geoprocessing.vectorize_datasets( [w_bar_uri, s_bar_uri, flow_accumulation_uri], d_up, d_up_uri, gdal.GDT_Float32, d_up_nodata, out_pixel_size, "intersection", dataset_to_align_index=0, vectorize_op=False) LOGGER.info('calculate WS factor') ws_factor_inverse_uri = os.path.join( intermediate_dir, 'ws_factor_inverse%s.tif' % file_suffix) ws_nodata = -1.0 slope_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri( preprocessed_data['thresholded_slope_uri']) def ws_op(w_factor, s_factor): #calculating the inverse so we can use the distance to stream factor function return numpy.where( (w_factor != w_nodata) & (s_factor != slope_nodata), 1.0 / (w_factor * s_factor), ws_nodata) pygeoprocessing.geoprocessing.vectorize_datasets( [thresholded_w_factor_uri, thresholded_slope_uri], ws_op, ws_factor_inverse_uri, gdal.GDT_Float32, ws_nodata, out_pixel_size, "intersection", dataset_to_align_index=0, vectorize_op=False) LOGGER.info('calculating d_dn') d_dn_uri = os.path.join(intermediate_dir, 'd_dn%s.tif' % file_suffix) pygeoprocessing.routing.routing_core.distance_to_stream( flow_direction_uri, stream_uri, d_dn_uri, factor_uri=ws_factor_inverse_uri) LOGGER.info('calculate ic') ic_factor_uri = os.path.join(intermediate_dir, 'ic_factor%s.tif' % file_suffix) ic_nodata = -9999.0 d_up_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(d_up_uri) d_dn_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(d_dn_uri) def ic_op(d_up, d_dn): nodata_mask = (d_up == d_up_nodata) | (d_dn == d_dn_nodata) return numpy.where( nodata_mask, ic_nodata, numpy.log10(d_up/d_dn)) pygeoprocessing.geoprocessing.vectorize_datasets( [d_up_uri, d_dn_uri], ic_op, ic_factor_uri, gdal.GDT_Float32, ic_nodata, out_pixel_size, "intersection", dataset_to_align_index=0, vectorize_op=False) LOGGER.info('calculate sdr') sdr_factor_uri = os.path.join(intermediate_dir, 'sdr_factor%s.tif' % file_suffix) sdr_nodata = -9999.0 k = float(args['k_param']) ic_0 = float(args['ic_0_param']) sdr_max = float(args['sdr_max']) def sdr_op(ic_factor, stream): nodata_mask = (ic_factor == ic_nodata) sdr = numpy.where( nodata_mask, sdr_nodata, sdr_max/(1+numpy.exp((ic_0-ic_factor)/k))) #mask out the stream layer return numpy.where(stream == 1, 0.0, sdr) pygeoprocessing.geoprocessing.vectorize_datasets( [ic_factor_uri, stream_uri], sdr_op, sdr_factor_uri, gdal.GDT_Float32, sdr_nodata, out_pixel_size, "intersection", dataset_to_align_index=0, vectorize_op=False) LOGGER.info('calculate sed export') sed_export_uri = os.path.join(output_dir, 'sed_export%s.tif' % file_suffix) sed_export_nodata = -1.0 def sed_export_op(usle, sdr): nodata_mask = (usle == nodata_usle) | (sdr == sdr_nodata) return numpy.where( nodata_mask, sed_export_nodata, usle * sdr) pygeoprocessing.geoprocessing.vectorize_datasets( [usle_uri, sdr_factor_uri], sed_export_op, sed_export_uri, gdal.GDT_Float32, sed_export_nodata, out_pixel_size, "intersection", dataset_to_align_index=0, vectorize_op=False) LOGGER.info('calculate sediment retention index') def sediment_index_op(rkls, usle, sdr_factor): nodata_mask = (rkls == nodata_rkls) | (usle == nodata_usle) | (sdr_factor == sdr_nodata) return numpy.where( nodata_mask, nodata_sed_retention_index, (rkls - usle) * sdr_factor / sdr_max) nodata_sed_retention_index = -1 sed_retention_index_uri = os.path.join( output_dir, 'sed_retention_index%s.tif' % file_suffix) pygeoprocessing.geoprocessing.vectorize_datasets( [rkls_uri, usle_uri, sdr_factor_uri], sediment_index_op, sed_retention_index_uri, gdal.GDT_Float32, nodata_sed_retention_index, out_pixel_size, "intersection", dataset_to_align_index=0, vectorize_op=False) LOGGER.info('calculate sediment retention') d_up_bare_soil_uri = os.path.join(intermediate_dir, 'd_up_bare_soil%s.tif' % file_suffix) d_up_nodata = -1.0 def d_up_bare_soil_op(s_bar, flow_accumulation): """Calculate the d_up index for bare soil 1.0 * s_bar * sqrt(upstream area) """ d_up_array = s_bar * numpy.sqrt(flow_accumulation * cell_area) return numpy.where( (s_bar != s_bar_nodata) & (flow_accumulation != flow_accumulation_nodata), d_up_array, d_up_nodata) pygeoprocessing.geoprocessing.vectorize_datasets( [s_bar_uri, flow_accumulation_uri], d_up_bare_soil_op, d_up_bare_soil_uri, gdal.GDT_Float32, d_up_nodata, out_pixel_size, "intersection", dataset_to_align_index=0, vectorize_op=False) #when calculating d_dn_bare the c factors are all 1, #so we invert just s, then accumulate it downstream s_factor_inverse_uri = os.path.join( intermediate_dir, 's_factor_inverse%s.tif' % file_suffix) s_nodata = -1.0 def s_op(s_factor): #calculating the inverse so we can use the distance to stream factor function return numpy.where(s_factor != slope_nodata, 1.0 / s_factor, s_nodata) pygeoprocessing.geoprocessing.vectorize_datasets( [thresholded_slope_uri], s_op, s_factor_inverse_uri, gdal.GDT_Float32, s_nodata, out_pixel_size, "intersection", dataset_to_align_index=0, vectorize_op=False) d_dn_bare_soil_uri = os.path.join(intermediate_dir, 'd_dn_bare_soil%s.tif' % file_suffix) d_up_nodata = -1.0 pygeoprocessing.routing.routing_core.distance_to_stream( flow_direction_uri, stream_uri, d_dn_bare_soil_uri, factor_uri=s_factor_inverse_uri) ic_factor_bare_soil_uri = os.path.join( intermediate_dir, 'ic_factor_bare_soil%s.tif' % file_suffix) ic_bare_soil_nodata = -9999.0 def ic_bare_soil_op(d_up_bare_soil, d_dn_bare_soil): nodata_mask = (d_up_bare_soil == d_up_nodata) | (d_dn_bare_soil == d_dn_nodata) return numpy.where( nodata_mask, ic_nodata, numpy.log10(d_up_bare_soil/d_dn_bare_soil)) pygeoprocessing.geoprocessing.vectorize_datasets( [d_up_bare_soil_uri, d_dn_bare_soil_uri], ic_bare_soil_op, ic_factor_bare_soil_uri, gdal.GDT_Float32, ic_nodata, out_pixel_size, "intersection", dataset_to_align_index=0, vectorize_op=False) sdr_factor_bare_soil_uri = os.path.join(intermediate_dir, 'sdr_factor_bare_soil%s.tif' % file_suffix) def sdr_bare_soil_op(ic_bare_soil_factor, stream): nodata_mask = (ic_bare_soil_factor == ic_nodata) sdr_bare_soil = numpy.where( nodata_mask, sdr_nodata, sdr_max/(1+numpy.exp((ic_0-ic_bare_soil_factor)/k))) #mask out the stream layer return numpy.where(stream == 1, 0.0, sdr_bare_soil) pygeoprocessing.geoprocessing.vectorize_datasets( [ic_factor_bare_soil_uri, stream_uri], sdr_bare_soil_op, sdr_factor_bare_soil_uri, gdal.GDT_Float32, sdr_nodata, out_pixel_size, "intersection", dataset_to_align_index=0, vectorize_op=False) def sediment_retention_bare_soil_op(rkls, usle, stream_factor, sdr_factor, sdr_factor_bare_soil): nodata_mask = ( (rkls == nodata_rkls) | (usle == nodata_usle) | (stream_factor == stream_nodata) | (sdr_factor == sdr_nodata) | (sdr_factor_bare_soil == sdr_nodata)) return numpy.where( nodata_mask, nodata_sediment_retention, (rkls * sdr_factor_bare_soil - usle * sdr_factor) * (1 - stream_factor)) nodata_sediment_retention = -1 sed_retention_bare_soil_uri = os.path.join( intermediate_dir, 'sed_retention%s.tif' % file_suffix) pygeoprocessing.geoprocessing.vectorize_datasets( [rkls_uri, usle_uri, stream_uri, sdr_factor_uri, sdr_factor_bare_soil_uri], sediment_retention_bare_soil_op, sed_retention_bare_soil_uri, gdal.GDT_Float32, nodata_sediment_retention, out_pixel_size, "intersection", dataset_to_align_index=0, vectorize_op=False) LOGGER.info('generating report') esri_driver = ogr.GetDriverByName('ESRI Shapefile') field_summaries = { 'usle_tot': pygeoprocessing.geoprocessing.aggregate_raster_values_uri(usle_uri, args['watersheds_uri'], 'ws_id').total, 'sed_export': pygeoprocessing.geoprocessing.aggregate_raster_values_uri(sed_export_uri, args['watersheds_uri'], 'ws_id').total, 'sed_retent': pygeoprocessing.geoprocessing.aggregate_raster_values_uri(sed_retention_bare_soil_uri, args['watersheds_uri'], 'ws_id').total, } original_datasource = ogr.Open(args['watersheds_uri']) watershed_output_datasource_uri = os.path.join(output_dir, 'watershed_results_sdr%s.shp' % file_suffix) #If there is already an existing shapefile with the same name and path, delete it #Copy the input shapefile into the designated output folder if os.path.isfile(watershed_output_datasource_uri): os.remove(watershed_output_datasource_uri) datasource_copy = esri_driver.CopyDataSource(original_datasource, watershed_output_datasource_uri) layer = datasource_copy.GetLayer() for field_name in field_summaries: field_def = ogr.FieldDefn(field_name, ogr.OFTReal) layer.CreateField(field_def) #Initialize each feature field to 0.0 for feature_id in xrange(layer.GetFeatureCount()): feature = layer.GetFeature(feature_id) for field_name in field_summaries: try: ws_id = feature.GetFieldAsInteger('ws_id') feature.SetField(field_name, float(field_summaries[field_name][ws_id])) except KeyError: LOGGER.warning('unknown field %s' % field_name) feature.SetField(field_name, 0.0) #Save back to datasource layer.SetFeature(feature) original_datasource.Destroy() datasource_copy.Destroy() for ds_uri in [zero_absorption_source_uri, loss_uri]: try: os.remove(ds_uri) except OSError as e: LOGGER.warn("couldn't remove %s because it's still open", ds_uri) LOGGER.warn(e)
86a328f5f00fd55398b2128233a65fe646a93540
3,634,270
def user_groups(username, htgroup_fn, strict=True): """ Returns a list of group names for the given user """ groups = [] for group_name, users in read_groups(htgroup_fn, strict=strict).items(): if username in users: groups.append(group_name) return groups
adc452c60c25e672829d3efa99a0c4bf41213e64
3,634,271
def uffangle(i,j,k,boij,bojk,theta): """ Return the UFF parameters for an angle interaction in Gromacs units (degrees, kJ mol^-1 rad^-2). Not used for the nebterpolator but I decided to keep this code. i = Element symbol (string) j = Element symbol for the middle atom (string) k = Element symbol (string) boij = Bond order for i-j bond bojk = Bond order for j-k bond theta = Angle (in radians) """ rij = uffbond(i,j,boij)[0] rjk = uffbond(j,k,bojk)[0] if abs(theta) < 1.0e-3: theta = UFFElements[j].th0 rik = sqrt(rij**2 + rjk**2 - 2*rij*rjk*np.cos(theta)) zi = UFFElements[i].z zk = UFFElements[k].z kijk = b * zi * zk * (3*rij*rjk*(1-np.cos(theta)**2) - rik**2*np.cos(theta)) / rik**5 #Division here needs to be reviewed # print "theta = %.4f nm kij = %.4e kJ/mol/rad**2" % (theta*180./pi,kijk*4.184) return theta*180./np.pi, kijk*4.184
f57527c0adbff255d64304c11623cb706b3dc784
3,634,272
import random def img_get_random_patch(img,w,h): """Get a random patch of a specific width and height from an image""" # Note that for this function it is the user's responsibility to ensure # the image size is big enough. We'll do an asertion to help but... # Figure out the maximum starting point within the image that max_x = img.shape[1] - w max_y = img.shape[0] - h # Make sure the size is big enough assert max_x >= 0, 'Trying to get a patch wider that the image width' assert max_y >= 0, 'Trying to get a patch higher that the image height' # Get a random starting point x = random.randint(0,max_x) y = random.randint(0,max_y) # Get the patch within the image image_patch = img[y:y+h,x:x+w, ...] # All done return image_patch
41ce199eb5ab8eb136f740eb2e1b495226510690
3,634,273
def list_to_dict(items): """Create dictionary from a parenthesized list of attribute/value pairs :param items: list :return: dict """ if not items: items = [] # minimal # dict(zip(items[0::2], items[1::2]) def recursive(item): """Check value of parenthesized list and if it a sublist call list_to_dict recursively :param item: :return: """ if is_iterable(item): return list_to_dict(item) return item return dict(zip(items[0::2], [recursive(item) for item in items[1::2]]))
a58ed6efe88592f0fa9af1c2daf9a132272e633f
3,634,274
def AVERAGEIF(avg_list, condition_list, condition): """Find the average of a list based on a specfic condition in another list. Parameters ---------- avg_list : list or array list or array that you will take the average of. Length must match condition_list. condition_list : list or array list or array that will be checked against condition for taking the average. Length must match avg_list. condition : string string to be checked against condition list. Returns ------- int or float The total sum of numbers matching condition from condition list """ if(len(avg_list) == len(condition_list)): if(type(condition) == str): avg_list = np.array(avg_list) condition_list = np.array(condition_list) return(np.mean(avg_list[condition_list == condition])) else: print('Invalid type: condition must be a string.') else: print('Length of avg_list and condition_list must match.')
1f34d34612626d500e534512eb493086ec19bce0
3,634,275
def fresnel_ts(n0, n1, theta0, theta1): """Compute the "t sub s" fresnel coefficient. This is associated with transmission of the s-polarized electric field. Parameters ---------- n0 : `float` refractive index of the "left" material n1 : `float` refractive index of the "right" material theta0 : `float` angle of incidence, radians theta1 : `float` angle of refraction, radians Returns ------- `float` the fresnel coefficient "t sub s" """ num = 2 * n0 * e.cos(theta0) den = n0 * e.cos(theta0) + n1 * e.cos(theta1) return num / den
faf68134372ad5df8c31107e7ff8b006ee7a1f66
3,634,276
def get_client_id_from_access_token(aws_region, aws_user_pool, token): """ Pulls the client ID out of an Access Token """ claims = get_claims(aws_region, aws_user_pool, token) if claims.get('token_use') != 'access': raise ValueError('Not an access token') return claims.get('client_id')
6ce2d5771b863e4bf8da367284486f5618d09473
3,634,277
def bgloop(tag, *iterables, runner=None): """Run a loop in a background thread.""" if runner is None: runner = run_thread def decorator(func): if tag in bg_instances and bg_instances[tag].running: raise RuntimeError("Already running loop") bg_instances[tag] = Object() bg_instances[tag].running = True bg_instances[tag].handle = runner(_run_loop, tag, func, iterables) return func def _run_loop(tag, func, iterables): try: bg_instances[tag].running = True for loop, item in Loop.over( fast_product(*iterables), length=product_len(*iterables) ): if not bg_instances[tag].running: break func(loop, *item) finally: bg_instances[tag].running = False return decorator
553e650ecc0b640e0cea3ad4c714cb3d66d327b4
3,634,278
from typing import AnyStr from typing import List from typing import Dict def get_metrics_rating(start: AnyStr, end: AnyStr, tenant_id: AnyStr, namespaces: List[AnyStr]) -> List[Dict]: """ Get the rating for metrics. :start (AnyStr) A timestamp, as a string, to represent the starting time. :end (AnyStr) A timestamp, as a string, to represent the ending time. :tenant_id (AnyStr) A string representing the tenant, only used by decorators. :namespaces (List[AnyStr]) A list of namespaces accessible by the tenant. Return the results of the query as a list of dictionary. """ qry = sa.text(""" SELECT frame_begin, sum(frame_price) as frame_price, metric FROM frames WHERE frame_begin >= :start AND frame_end < :end AND namespace != 'unspecified' AND pod != 'unspecified' AND namespace IN :namespaces GROUP BY frame_begin, metric ORDER BY frame_begin, metric """).bindparams(bindparam('namespaces', expanding=True)) params = { 'start': start, 'end': end, 'tenant_id': tenant_id, 'namespaces': namespaces } return process_query(qry, params)
852ce6a21f03b02691748d6d9a654edf10b7ed54
3,634,279
from typing import List def calculate_previous_risk_score_weightings() -> List[float]: """ Creates a risk score weighting distribution of size MAX_PREVIOUS_INCIDENTS such that the distribution is a decreasing linear series that sums to 1. For example, with MAX_PREVIOUS_INCIDENTS == 3, this function returns [0.5, 0.333..., 0.166...], or [3/6, 2/6. 1/6]. This is designed to put more weighting on the first most recent incident types, as well as lessen the difference between the number of incident types a client has. """ mpi = MAX_PREVIOUS_INCIDENTS # arithmetic sum formula denominator = mpi * (mpi+1) / 2 return [(mpi-i)/denominator for i in range(mpi)]
6064c345973335026c49522aada829eec67a5ba6
3,634,280
import torch def warp(x, flo, device): """ warp an image/tensor (im2) back to im1, according to the optical flow x: [B, C, H, W] (im2) flo: [B, 2, H, W] flow """ B, C, H, W = x.size() # mesh grid xx = torch.arange(0, W).view(1, -1).repeat(H, 1) yy = torch.arange(0, H).view(-1, 1).repeat(1, W) xx = xx.view(1, 1, H, W).repeat(B, 1, 1, 1) yy = yy.view(1, 1, H, W).repeat(B, 1, 1, 1) grid = torch.cat((xx, yy), 1).float() if x.is_cuda: grid = grid.to(device) vgrid = grid + flo vgrid = vgrid.to(device) # scale grid to [-1,1] vgrid[:, 0, :, :] = 2.0 * vgrid[:, 0, :, :] / max(W - 1, 1) - 1.0 vgrid[:, 1, :, :] = 2.0 * vgrid[:, 1, :, :] / max(H - 1, 1) - 1.0 vgrid = vgrid.permute(0, 2, 3, 1) output = nn.functional.grid_sample(x, vgrid) mask = torch.autograd.Variable(torch.ones(x.size())).to(device) mask = nn.functional.grid_sample(mask, vgrid) # if W==128: # np.save('mask.npy', mask.cpu().data.numpy()) # np.save('warp.npy', output.cpu().data.numpy()) mask[mask < 0.9999] = 0 mask[mask > 0] = 1 return output * mask
d009beeab36a84ba2659d87d22dc98ad0b20bf52
3,634,281
import os def expand_path(filename: str) -> str: """ Expands variables (user and environment) in a file name. :param filename: File name, possibly containing variables. :return: File name with variables expanded. """ return os.path.expandvars(os.path.expanduser(filename))
b6dae3491edbaa00a5f73959b2227ad2fe6f506e
3,634,282
def FindVolumeClose(hSearch): """Close a search handle opened by FindFirstVolume, typically after the last volume has been returned. """ if kernel32.FindVolumeClose(hSearch) == 0: return error(x_kernel32, "FindVolumeClose")
48a8c4629d9bc10b4a8befa33f399ed1a3727347
3,634,283
import os import logging def load_loggers(debug, persistent_storage): """ Loads all the loggers :param debug: is Debug enabled :param persistent_storage: is persistent storage enabled :return logger: main logger of the application :return storage_loggers: loggers for the persistent data engine """ # Load log level configuration file worker_path = os.path.dirname(os.path.realpath(__file__)) if debug: # Debug init_logging_worker(worker_path + '/../../log/logging_debug.json') else: # Default init_logging_worker(worker_path + '/../../log/logging_off.json') # Define logger facilities logger = logging.getLogger('pycompss.worker.piper_worker') storage_loggers = [] if persistent_storage: storage_loggers.append(logging.getLogger('dataclay')) storage_loggers.append(logging.getLogger('hecuba')) storage_loggers.append(logging.getLogger('redis')) storage_loggers.append(logging.getLogger('storage')) return logger, storage_loggers
6bbfc68cdd31c47d7d4f8f5388a71e82b116f342
3,634,284
def authenticated(method): """ Decorate methods with this to require that the Authorization header is filled. On failure, raises a 401 or 403 error. Raises: :py:class:`tornado.web.HTTPError` """ @wraps(method) async def wrapper(self, *args, **kwargs): if not self.current_user: if self.auth_data: raise HTTPError(403, reason="authentication failed") else: self.set_status(401) self.set_header('WWW-Authenticate', 'Basic realm=Restricted') self.finish() else: return await method(self, *args, **kwargs) return wrapper
bf64966f14d76b0f755f1d17672540a361c99c35
3,634,285
def _calculate_f1(conf_matrix): """ Calculate classification macro F1 score. Parameters ---------- conf_matrix : pandas.DataFrame DataFrame of confusion matrix. Returns ------- f1_total : float Total classification macro F1 score including detection FP and FN. f1_target : float Classification macro F1 score excluding detection FP. This metric is target-aware, meaning that it only takes GT classification into account. f1_pred : float Classification macro F1 score exculding both detection FP and FN. This metric is prediction-aware, meaning that it calculates classification F1 solely on TP prediction thus completely decouples detection and classification. """ conf_matrix_wo_ignore = conf_matrix.values[:, :-1] tp = np.diag(conf_matrix_wo_ignore)[:-1] # minus 1 since only four GT classes need to be calculated fp = np.array([conf_matrix_wo_ignore[i, :].sum() - tp[i] for i in range(conf_matrix_wo_ignore.shape[0] - 1)]) fn = np.array([conf_matrix_wo_ignore[:, i].sum() - tp[i] for i in range(conf_matrix_wo_ignore.shape[0] - 1)]) # calculate total F1 precision = tp / (tp + fp + 1e-8) recall = tp / (tp + fn + 1e-8) f1_total = np.mean((2 * precision * recall) / (precision + recall + 1e-8)) # calculate target-aware F1 which excludes detection FP precision = tp / (tp + (fp - conf_matrix_wo_ignore[:-1, -1]) + 1e-8) recall = tp / (tp + fn + 1e-8) f1_target = np.mean((2 * precision * recall)\ / (precision + recall + 1e-8)) # calculate prediction-aware F1 which excludes both detection FP and FN precision = tp / (tp + (fp - conf_matrix_wo_ignore[:-1, -1]) + 1e-8) recall = tp / (tp + (fn - conf_matrix_wo_ignore[-1, :-1]) + 1e-8) f1_pred = np.mean((2 * precision * recall) / (precision + recall + 1e-8)) return f1_total, f1_target, f1_pred
9dded3affbc164487020898cb46765e5c1dc8553
3,634,286
def prepare_wiki_content(content, indented=True): """ Set wiki page content """ if indented: lines = content.split("\n") content = " ".join(i + "\n" for i in lines) return content
14daea5cdb509b333c2aead6dcb453a82e73ce8d
3,634,287
def get_user_args(): """ **get_user_args** fetches user arguments from arguments""" display_name = request.args.get('display_name') email = request.args.get('email') email_verified = request.args.get('email_verified') uid = request.args.get('uid') cell = request.args.get('cell') provider_data = request.args.get('provider_data') access_token = request.args.get('access_token') return access_token, cell, display_name, email, email_verified, provider_data, uid
679836f40c441a6b61ef05bf9f96ef328c4751a4
3,634,288
def calculate_camera_center(P: np.ndarray, K: np.ndarray, R_T: np.ndarray) -> np.ndarray: """ Returns the camera center matrix for a given projection matrix. Args: - P: A numpy array of shape (3, 4) representing the projection matrix Returns: - cc: A numpy array of shape (1, 3) representing the camera center location in world coordinates """ # remove the last row from P to make 3x4 # P = P[:-1,:] cc = dot(-R_T, P[:,-1]) return cc
6bec2422af375cd1b0c6570a38f6afe60c7f927e
3,634,289
def __need_local_verify(ins:VerifyTokenLocal=None): """ ins 是否存在,是否 VerifyTokenLocal 对象 :param ins: :return: """ if ins is None: return False # 为 None, 不需要本地验证 if not isinstance(ins, VerifyTokenLocal): # 有值,但是不是 VerifyTokenLocal 实体,抛出异常 raise WrongLocalVerifyTokenInsError(ins) return True # 以上都满足, 返回 True 需要本地验证
b418cedc53e49dfdc93d25e822b6b8803e080d17
3,634,290
import sys import os import time import subprocess def blaster(counts, database, outname = "output"): """ Takes a list of counts in the format [[sequence, total reads, unique],...] then blasts each sequence against the RNASeqList (fasta formatted). Top match is appended to the end of each member of the count. :param counts: counts object nested list in the format of [[sequence, total reads, unique],...] :param dbLoc: database of reads to blast against :return: nested list of aligned counts. [[sequence, total reads, unique reads, blast alignment outfmt 6],...] """ queryLoc = outname+"queryTemp.txt" blastOut = outname+"blastTemp.txt" #Set blast location if sys.platform == "darwin": blastLoc = os.path.join(os.path.dirname(__file__), "blast/blastn") elif sys.platform.startswith("win"): blastLoc = os.path.join(os.path.dirname(__file__), "blast/blastn.exe") else: raise Exception("System is not supported") checkBLASTdb(database) dbLoc = os.path.join(os.path.dirname(__file__), "databases/"+database) print ("Creating BLAST query") queryMaker(counts, outLoc =queryLoc) print(str(len(counts))+" queries.") open(blastOut,'w').close() #blast won't be happy if the file doesn't already exist #makes a call to blast everything in the query file against the database. Outputs to a temporary blast file. print("BLASTing...") start_time = time.time() subprocess.call([blastLoc, '-db', dbLoc , '-query', queryLoc, '-out', blastOut, '-outfmt', '6', '-max_target_seqs', '1']) os.remove(queryLoc) print("BLAST Successful") print("BLAST took "+str(time.time()-start_time)+" seconds") #reads temporary blast file into hits f = open(blastOut, 'r') hits = f.readlines() f.close() os.remove(blastOut) for line in hits: #Each query was given a number by querymaker which corresponds to the index of each count #Each hit also has this index so a hit can be assigned to a count by that index templine = line.split('\t') i = int(templine[0]) counts[i].append(line.rstrip()) #each count now has its BLAST hit appended to it return counts
4b13528c21ac6eb4c6a994457f9a7511890805aa
3,634,291
def average_spectra(spec_data, t_avg, h_avg, **kwargs): """ Function to time-height average Doppler spectra :param spec_data: list of xarray data sets containing spectra (linear units) :param t_avg: integer :param h_avg: integer :param kwargs: 'verbosity' :return: list of xarray data sets containing averaged spectra """ print('averaging...') if 'verbosity' in kwargs and kwargs['verbosity'] > 0 else None avg_specs_list = [] # initialize empty list for f in range(len(spec_data)): # average spectra over neighbors in time-height avg_specs = xr.Dataset({'doppler_spectrum': xr.DataArray(np.zeros(spec_data[f].doppler_spectrum.shape), dims=['time', 'range', 'spectrum'], coords={'time': spec_data[f].time, 'range': spec_data[f].range_layers, 'spectrum': spec_data[f].spectrum}), 'chirp': spec_data[f].chirp}) if t_avg == 0 and h_avg == 0: avg_specs['doppler_spectrum'][:, :, :] = spec_data[f]['doppler_spectrum'].values[:, :, :] else: B = np.ones((1+t_avg*2, 1+h_avg*2))/((1+t_avg*2) * (1+h_avg*2)) print(f'matrix B for convolution is {B}') if 'verbosity' in kwargs and kwargs['verbosity'] > 0 else None range_offsets = spec_data[f].chirp_start_indices.values for d in range(avg_specs['doppler_spectrum'].values.shape[2]): one_bin_avg = average_single_bin(spec_data[f]['doppler_spectrum'].values, B, d, range_offsets) avg_specs['doppler_spectrum'][:, :, d] = one_bin_avg avg_specs_list.append(avg_specs) return avg_specs_list
071fca747555fdfaebc717a3e1caf98e326c89d6
3,634,292
import argparse import logging def parse_args(args): """Parse command line parameters Args: args ([str]): command line parameters as list of strings Returns: :obj:`argparse.Namespace`: command line parameters namespace """ parser = argparse.ArgumentParser(description="Sync Gitlab Issue into MS Project ") parser.add_argument( "--version", action="version", version="SyncGitlab2MSProject {ver}".format(ver=__version__), ) parser.add_argument( "-v", "--verbose", dest="loglevel", help="set loglevel to INFO", action="store_const", const=logging.INFO, ) parser.add_argument( "-vv", "--very-verbose", dest="loglevel", help="set loglevel to DEBUG", action="store_const", const=logging.DEBUG, ) parser.add_argument( "--ignore-label", "-i", dest="ignore_label", help="Ignore Gitlab Issue with a match to the label", default="", type=str, ) parser.add_argument( "--force-fixed-work", dest="fixed_work", help="Set all synced issued to fixed_work, overwriting " "also already existing tasks", action="store_true", ) # TODO read from ENV parser.add_argument( "--gitlab-url", "-u", dest="gitlab_url", help="URL to the gitlab instance i.e. https://gitlab.your-company.com", default="https://gitlab.com", type=str, ) # TODO read from ENV parser.add_argument( "--gitlab-token", "-t", dest="gitlab_token", help="Gitlab personal access token", default=None, ) parser.add_argument( "gitlab_resource_type", help="Gitlab resource type to sync with", type=str, choices=["project", "group"], ) parser.add_argument( "gitlab_resource_id", help="Gitlab resource id to sync with", type=int, ) parser.add_argument( dest="project_file", help="Microsoft Project File to sync with", type=str, ) return parser.parse_args(args)
9f0534e0c38fe55ee1d9006da06245916a0a1753
3,634,293
def get_db_dot_fmt_strings(db_list, config, query_extension="fasta"): """ Return a list of strings that are "{db}.{format}". Where db is the name of the database and format is the extension generated by the search (eg lastx, or tbl). There is a special case for fragmented HMM dbs where we need to add ".dbatch" to the format. """ strings = [] for d in db_list: db_data = config["dbs"][d] format = db_data.get("format", "tbl") if format.startswith("last"): format = get_last_alg(format, query_extension) if "frags" in db_data and int(db_data["frags"]) > 1: format = format + ".dbatch" strings.append("{}.{}".format(d, format)) return strings
d5ef3640f131189917966149bbcc50e530b1727c
3,634,294
import numpy def colormap(exps, colorby, definedinEM, annotation=None): """Generate the self.colors in the format which compatible with matplotlib""" if definedinEM: if colorby == "reads": color_res = [] for i in exps.get_readsnames(): c = exps.get_type(i, "color") if c[0] == "(": rgb = [eval(j) for j in c.strip('()').split(',')] color_res.append([v / 255 for v in rgb]) else: color_res.append(c) elif colorby == "regions": color_res = [] for i in exps.get_regionsnames(): c = exps.get_type(i, "color") if c[0] == "(": rgb = [eval(j) for j in c.strip('()').split(',')] color_res.append([v / 255 for v in rgb]) else: color_res.append(c) else: color_res = [] for i in exps.fieldsDict[colorby].values(): c = exps.get_type(i[0], "color") if c[0] == "(": rgb = [float(j) for j in c.strip('()').split(',')] color_res.append([v / 255 for v in rgb]) else: color_res.append(c) else: if annotation: color_res = plt.cm.Set1(numpy.linspace(0, 1, len(annotation))).tolist() else: # colors = [ 'lightgreen', 'pink', 'cyan', 'lightblue', 'tan', 'orange'] # colors = plt.cm.jet(numpy.linspace(0.1, 0.9, len(gen_tags(exps, colorby)))).tolist() if colorby == "reads": ks = [] for gr in exps.get_readsnames(): ks.append(exps.get_type(name=gr, field="factor")) n = len(set(ks)) elif colorby == "regions": ks = [] for gr in exps.get_regionsnames(): ks.append(exps.get_type(name=gr, field="factor")) n = len(set(ks)) else: n = len(exps.fieldsDict[colorby].keys()) # print(n) if n < 8: indn = np.linspace(0, 32, 256) color_res = [plt.cm.Set1(indn[i]) for i in range(n)] else: set1 = plt.get_cmap('Set1') cNorm = colormat.Normalize(vmin=0, vmax=n) scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=set1) color_res = [scalarMap.to_rgba(d) for d in range(n)] # color_res = plt.cm.Set1(numpy.linspace(0.1, 0.9, n)).tolist() # print(len(plt.cm.Set1().tolist())) # # np.linspace(0, 1, 9) color_res = unique(color_res) return color_res
d777fb19b52b097b569fff0f315d63115c8e2231
3,634,295
def wfs_25d_point(omega, x0, n0, xs, xref=[0, 0, 0], c=None, omalias=None): """Point source by 2.5-dimensional WFS. :: ____________ (x0-xs) n0 D(x0,k) = \|j k |xref-x0| ------------- e^(-j k |x0-xs|) |x0-xs|^(3/2) """ x0 = util.asarray_of_rows(x0) n0 = util.asarray_of_rows(n0) xs = util.asarray_1d(xs) xref = util.asarray_1d(xref) k = util.wavenumber(omega, c) ds = x0 - xs r = np.linalg.norm(ds, axis=1) return wfs_25d_preeq(omega, omalias, c) * \ np.sqrt(np.linalg.norm(xref - x0)) * inner1d(ds, n0) / \ r ** (3 / 2) * np.exp(-1j * k * r)
87fbf4dc467e0c0b7a9259d80f69125326dbf86d
3,634,296
def load_labels(fn, delimiter=',', id_col=0, label_col=1): """ Load ID list with label IDs e.g. use to load segment label, or synapse label """ d = np.genfromtxt(fn, delimiter=delimiter, dtype=int) label_to_id = {} id_to_label = {} for i in range(d.shape[0]): push_dict(label_to_id, d[i,label_col], d[i,id_col]) id_to_label[d[i,id_col]] = d[i,label_col] return label_to_id, id_to_label
001dc4c3da6b9614e8c87471b86697fabad3299e
3,634,297
import os import json def write_data(): """ Write any json data received, to a temporal folder. This data will be, later on, read by other services. :return: """ with open('/tmp/{}'.format(os.environ.get('PAYLOAD_FILENAME', 'data.json')), 'a') as out: out.write(json.dumps(json.loads(request.data))) return jsonify({ 'status_code': 200 })
1a50fd2e87eadebc0ba440753d7373a186fdb642
3,634,298
def flatten_with_joined_string_paths(structure, separator='/'): """Replacement for deprecated tf.nest.flatten_with_joined_string_paths.""" return [(separator.join(map(str, path)), item) for path, item in tree.flatten_with_path(structure)]
36b814752f5879996fb135904bb619a909ab302b
3,634,299