content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def imei_parse_nibble(nibble): """Parse one nibble of an IMEI and return its ASCII representation.""" if nibble < 10: return chr(nibble + ord('0')) if nibble == 0xa: return '*' if nibble == 0xb: return '#' if nibble == 0xc: return 'C' if nibble == 0xd: return '.' if nibble == 0xe: return '!' return ''
837445a7679bc5355978d7d4e69c5c9fa166cb3f
3,628,100
def check_command_succeeded(reply): """ Return true if command succeeded, print reason and return false if command rejected param reply: BinaryReply return: boolean """ if reply.command_number == 255: # 255 is the binary error response code. print ("Danger! Command rejected. Error code: " + str(reply.data)) return False else: # Command was accepted return True
a320b5000f59790e314108398339b9a66dbf6520
3,628,101
def format_interval(seconds): """ Format an integer number of seconds to a human readable string.""" units = [ (('week', 'weeks'), 604800), (('day', 'days'), 86400), (('hour', 'hours'), 3600), (('minute', 'minutes'), 60), #(('second', 'seconds'), 1) ] result = [] for names, value in units: n, seconds = divmod(seconds, value) if n > 0: result.append('%d %s' % (n, names[n > 1])) if seconds: result.append("%.2f %s" % (seconds, ['second', 'seconds'][seconds != 1.0])) return ', '.join(result)
8deae4627807f4c5e0cc1844499ebb39f658f2d0
3,628,102
def make_arguments_parser(): """Build and return a command line agument parser.""" parser = ArgumentParser(description=__doc__, epilog="""CSS-HTML-JS-Minify: Takes a file or folder full path string and process all CSS/HTML/JS found. If argument is not file/folder will fail. Check Updates works on Python3. Std-In to Std-Out is deprecated since it may fail with unicode characters. SHA1 HEX-Digest 11 Chars Hash on Filenames is used for Server Cache. CSS Properties are Alpha-Sorted, to help spot cloned ones, Selectors not. Watch works for whole folders, with minimum of ~60 Secs between runs.""") # parser.add_argument('--version', action='version', version=css_html_js_minify.__version__) parser.add_argument('fullpath', metavar='fullpath', type=str, help='Full path to local file or folder.') parser.add_argument('--wrap', action='store_true', help="Wrap output to ~80 chars per line, CSS only.") parser.add_argument('--prefix', type=str, help="Prefix string to prepend on output filenames.") parser.add_argument('--timestamp', action='store_true', help="Add a Time Stamp on all CSS/JS output files.") parser.add_argument('--quiet', action='store_true', help="Quiet, Silent.") parser.add_argument('--hash', action='store_true', help="Add SHA1 HEX-Digest 11chars Hash to Filenames.") parser.add_argument('--zipy', action='store_true', help="GZIP Minified files as '*.gz', CSS/JS only.") parser.add_argument('--sort', action='store_true', help="Alphabetically Sort CSS Properties, CSS only.") parser.add_argument('--comments', action='store_true', help="Keep comments, CSS/HTML only (Not Recommended)") parser.add_argument('--overwrite', action='store_true', help="Force overwrite all in-place (Not Recommended)") parser.add_argument('--after', type=str, help="Command to execute after run (Experimental).") parser.add_argument('--before', type=str, help="Command to execute before run (Experimental).") parser.add_argument('--watch', action='store_true', help="Watch changes.") parser.add_argument('--multiple', action='store_true', help="Allow Multiple instances (Not Recommended).") return parser.parse_args()
5acdc6c18edaad874f9a0f3328545c57028ceaa4
3,628,103
def split_on_comma(tokens): """Split a list of tokens on commas, ie ``,`` DELIM tokens. Only "top-level" comma tokens are splitting points, not commas inside a function or other :class:`ContainerToken`. :param tokens: An iterable of :class:`~.token_data.Token` or :class:`~.token_data.ContainerToken`. :returns: A list of lists of tokens """ parts = [] this_part = [] for token in tokens: if token.type == 'DELIM' and token.value == ',': parts.append(this_part) this_part = [] else: this_part.append(token) parts.append(this_part) return parts
8b89dc6857a7b3e9bcc02f3a291e0ff0cd8d5f20
3,628,104
def filter_grps(grps, rcs, irr_col, low, high, **kwargs): """ Apply irradiance filter around passsed reporting irradiances to groupby. For each group in the grps argument the irradiance is filtered by a percentage around the reporting irradiance provided in rcs. Parameters ---------- grps : pandas groupby Groupby object with time groups (months, seasons, etc.). rcs : pandas DataFrame Dataframe of reporting conditions. Use the rep_cond method to generate a dataframe for this argument. **kwargs Passed to pandas Grouper to control label and closed side of intervals. See pandas Grouper doucmentation for details. Default is left labeled and left closed. Returns ------- pandas groupby """ flt_dfs = [] freq = list(grps.groups.keys())[0].freq for grp_name, grp_df in grps: ref_val = rcs.loc[grp_name, 'poa'] grp_df_flt = flt_irr(grp_df, irr_col, low, high, ref_val=ref_val) flt_dfs.append(grp_df_flt) df_flt = pd.concat(flt_dfs) df_flt_grpby = df_flt.groupby(pd.Grouper(freq=freq, **kwargs)) return df_flt_grpby
772600b5d6f9d35c4751db6a566b4d886b2790de
3,628,105
from readDB import query_plug_set async def get_weapon_plug_hashes(perk_socket_set: SocketSet) -> list[PerkColumn]: """ get perks for all random and origin perk sockets :param perk_socket_set: weapon's perk sockets :return: weapon perks as List of PerkColumn """ plug_sets: list[PlugSet] = [] i: int = 1 while i < perk_socket_set.get_size(): if perk_socket_set.is_random_socket(index=i) or perk_socket_set.is_origin_socket(index=i): plug_set: int = perk_socket_set.get_plug_set_hash(index=i) plug_sets.append(PlugSet(query_plug_set(plug_set))) i += 1 return await get_plug_set_perk_hashes(plug_sets)
9a66c742ce3ff1cf027585bcd6c360536da7d1c2
3,628,106
def export_inventory_information(): """export the inventory search result to cvs, html or excel format.""" db_session = DBSession() export_results_form = ExportInventoryInformationForm(request.form) export_data = dict() export_data['export_format'] = export_results_form.export_format.data export_data['serial_number'] = export_results_form.hidden_serial_number.data \ if export_results_form.hidden_serial_number.data != "" else None export_data['region_ids'] = export_results_form.hidden_region_ids.data.split(',') \ if export_results_form.hidden_region_ids.data else [] export_data['chassis_types'] = export_results_form.hidden_chassis_types.data.split(',') \ if export_results_form.hidden_chassis_types.data else [] export_data['software_versions'] = export_results_form.hidden_software_versions.data.split(',') \ if export_results_form.hidden_software_versions.data else [] export_data['model_names'] = export_results_form.hidden_model_names.data.split(',') \ if export_results_form.hidden_model_names.data else [] export_data['partial_model_names'] = export_results_form.hidden_partial_model_names.data.split(',') \ if export_results_form.hidden_partial_model_names.data else [] export_data['vid'] = export_results_form.hidden_vid.data \ if export_results_form.hidden_vid.data != "" else None if export_data['region_ids']: region_names = db_session.query(Region.name).filter( Region.id.in_(map(int, export_data['region_ids']))).order_by(Region.name.asc()).all() export_data['region_names'] = [] [export_data['region_names'].append(query_tuple[0]) for query_tuple in region_names] else: export_data['region_names'] = [] export_data['available_inventory_iter'] = query_available_inventory(db_session, export_data.get('serial_number'), export_data.get('model_names'), export_data.get('partial_model_names'), export_data.get('vid')) export_data['in_use_inventory_iter'] = query_in_use_inventory(db_session, export_data) export_data['user'] = current_user writer = None if export_data.get('export_format') == ExportInformationFormat.HTML: writer = ExportInventoryInfoHTMLWriter(**export_data) elif export_data.get('export_format') == ExportInformationFormat.MICROSOFT_EXCEL: writer = ExportInventoryInfoExcelWriter(**export_data) elif export_data.get('export_format') == ExportInformationFormat.CSV: writer = ExportInventoryInfoCSVWriter(**export_data) if writer: file_path = writer.write_report() if export_results_form.send_email.data: email_message = "<html><head></head><body>Please find in the attachment the inventory search results " \ "matching the following search criteria: " search_criteria_in_html = get_search_filter_in_html(export_data) if search_criteria_in_html: email_message += search_criteria_in_html + '</body></html>' else: email_message += '&nbsp;None</body></html>' create_email_job_with_attachment_files(db_session, email_message, file_path, export_results_form.user_email.data) return send_file(file_path, as_attachment=True) logger.error('inventory: invalid export format "%s" chosen.' % export_data.get('export_format')) return
5b517f9a195e595ef04410bb4aa11384b2c73729
3,628,107
from typing import Set def decide_unsat(K: Set[CNFClause]) -> bool: """ Returns if given set of clauses is in CNF, or not :param K: Set of Clauses in CNF :return: bool, whether this formula is undecidable or not """ R: set[CNFClause] = set() S: set[CNFClause] = K while R != S: R = S S = Res(R) if CNFClause(set()) in S: return True return False
6a479d4f2017f7e60f03e9ef85064940bff6581a
3,628,108
def find(db, user): """ find the notelist :param db: :param user: :return: """ document = db.notelist.find_one({"_id": user}) return document
04c6ad64e9b8ff2f5cd5462cebb9d37127b6d176
3,628,109
def clip(x, xmin, xmax, dtype=None): """ Clips (limits) the values in an array. Given an interval, values outside the interval are clipped to the interval edges. For example, if an interval of :math:`[0, 1]` is specified, values smaller than 0 become 0, and values larger than 1 become 1. Note: Currently, clip with `nan` is not supported. Args: x (Tensor): Tensor containing elements to clip. xmin (Tensor, scalar, None): Minimum value. If None, clipping is not performed on lower interval edge. Not more than one of `xmin` and `xmax` may be None. xmax (Tensor, scalar, None): Maximum value. If None, clipping is not performed on upper interval edge. Not more than one of `xmin` and `xmax` may be None. If `xmin` or `xmax` are tensors, then the three tensors will be broadcasted to match their shapes. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the output Tensor. Returns: Tensor, a tensor with the elements of `x`, but where values < `xmin` are replaced with `xmin`, and those > `xmax` with `xmax`. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> from mindspore import Tensor >>> x = Tensor([1, 2, 3, -4, 0, 3, 2, 0]).astype("float32") >>> output = x.clip(0, 2) >>> print(output) [1 2 2 0 0 2 2 0] """ if xmin is None and xmax is None: const_utils.raise_value_error("One of max or min must be given.") is_scalar = False if xmin is not None: xmin = const_utils.make_tensor(xmin).astype(x.dtype) if x.ndim == 0 and xmin.ndim == 0: x = F.maximum(x.reshape((1,)), xmin).squeeze() else: x = F.maximum(x, xmin) if xmax is not None: xmax = const_utils.make_tensor(xmax).astype(x.dtype) if x.ndim == 0 and xmax.ndim == 0: x = F.minimum(x.reshape((1,)), xmax).squeeze() else: x = F.minimum(x, xmax) if is_scalar: return x.squeeze() if dtype is not None and dtype != x.dtype: return x.astype(dtype) return x
c99ba428593c589e6844cfd5e355d27cb36c5904
3,628,110
import math def jump(distance, delta_piece_y): """ 跳跃一定的距离 """ # 计算程序长度与截图测得的距离的比例 scale = 0.945 * 2 / head_diameter actual_distance = distance * scale * (math.sqrt(6) / 2) press_time = (-945 + math.sqrt(945 ** 2 + 4 * 105 * 36 * actual_distance)) / (2 * 105) * 1000 press_time = max(press_time, 200) # 设置 200ms 是最小的按压时间 press_time = int(press_time) cmd = 'shell input swipe {x1} {y1} {x2} {y2} {duration}'.format( x1=swipe_x1, y1=swipe_y1, x2=swipe_x2, y2=swipe_y2, duration=press_time + delta_piece_y ) print('{} {}'.format(adb.adb_path, cmd)) adb.run(cmd) return press_time
45c7ade5213d7997a6742373fc92026ab56280b3
3,628,111
def get_mean_hourly_sleep_mr_df(sleep_profile, colnames): """ Creates summary df for mean hourly sleeping metabolic rate for each fly. Exported as sheet to excel. """ mr_hourly = sleep_profile.iloc[:,1::3] # sleep profile is split into top half: day; bottom half: night night_bool_list = [x > len(mr_hourly)/2-1 for x in range(len(mr_hourly))] mr_hourly.isNight = pd.Series(night_bool_list) mean_hourly_list = [[x, ] for x in colnames] column_names = ['Fly', 'Mean Hourly MR Total', 'Mean Hourly MR Day', 'Mean Hourly MR Night'] def add_to_list(data): mean_hourly_list[i].append(data) for i in range(len(colnames)): mr_ser = mr_hourly.iloc[:,i] add_to_list(mr_ser.mean()) add_to_list(mr_ser[mr_hourly.isNight == False].mean()) add_to_list(mr_ser[mr_hourly.isNight == True].mean()) return pd.DataFrame(mean_hourly_list, columns=column_names)
167c58bba7bc8e3a5e456a9c20d4a7a55ba722d0
3,628,112
import os import fnmatch def get_files_in_traverse_dir(a_dir, patterns): # type: (str, str) -> List[str] """ return list of all files in directory which matches 'patterns' support Unix filename pattern matching ('*', '?', [seq], [!seq]) and multiple option in 'patterns' (space delimetered) """ return list(set([ # throw away duplicate files os.path.join(root, name) # full file name for root, _, files in os.walk(a_dir) # go through all subdirectores for pattern in patterns.split() # if multiple patterns given for name in fnmatch.filter(files, pattern) # pattern matching ]))
6f2f1e16975019f90ee4a127fd5ea6614bc1942b
3,628,113
def characters(probabilities): """Turn a 1-hot encoding or a probability distribution over the possible characters back into its (most likely) character representation.""" return [id2char(c) for c in np.argmax(probabilities, 1)]
7b516b63df60953519fb6ba4b529039b10944947
3,628,114
def system(t, state, u): """ differential system equation :param t: time :param state: current state :param u: input :return: derivative of the current state """ x = state[0] y = state[1] result = np.array([3 * x + 5 * y - 2 * x * y + 3 * x ** 2 - 0.5 * y ** 2, 10 - 0.6 * x + 0.9 * y ** 2 - 3 * x ** 2 * y + u]) return result
03a61dc8e1447c4ca636aaabd43dc95b63e5c031
3,628,115
import torch def LR_classifier(train_set, train_labels, test_set, test_labels, n_way): """ Wrapper function which trains a logistic regression on train_set/train_labels and return the accuracy on test_set. Parameters: train_set -- torch.tensor (n_sample, n_feature), contains training data train_labels -- torch.tensor (n_sample, 1), contains training labels test_set -- torch.tensor(n_test, n_feature), contains test data test_labels -- torch.tensor(n_test, 1), contains test labels n_way -- number of classes """ # Device device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # Train a logistic regression loss_per_epoch, classifier, criterion = train_logistic_regression(train_set, train_labels) # Set model to evaluate mode classifier.eval() # Check the performances on the train set (accuracy, loss and confidence) correct = 0 total = 0 train_loss = 0 train_confidence = 0 for inputs, labels in zip(train_set, train_labels): inputs = inputs.unsqueeze(dim=0).to(device) labels = labels.unsqueeze(dim=0).to(device) # Model with torch.no_grad(): outputs = classifier(inputs) _, predicted_labels = torch.max(outputs.data, 1) correct += (predicted_labels == labels).sum() total += labels.shape[0] train_loss += criterion(outputs, labels) conf, _ = torch.max(F.softmax(outputs.data, dim=1), 1) train_confidence += conf train_accuracy = 100. * correct.item() / total # be careful: rounding problem by doing 100. * correct / total train_loss = train_loss.item() / total train_confidence = train_confidence.item() / total # Evaluate the performances on the test set (accuracy, loss and confidence) correct = 0 total = 0 test_loss = 0 test_confidence = 0 for inputs, labels in zip(test_set, test_labels): inputs = inputs.unsqueeze(dim=0).to(device) labels = labels.unsqueeze(dim=0).to(device) # Model with torch.no_grad(): outputs = classifier(inputs) _, predicted_labels = torch.max(outputs.data, 1) correct += (predicted_labels == labels).sum() total += labels.shape[0] test_loss += criterion(outputs, labels) conf, _ = torch.max(F.softmax(outputs.data, dim=1), 1) test_confidence += conf test_accuracy = 100. * correct.item() / total test_loss = test_loss.item() / total test_confidence = test_confidence.item() / total return loss_per_epoch, train_accuracy, train_loss, train_confidence, test_accuracy, test_loss, test_confidence
30ee4a2e5b3585f086a69304564a3e288166e17f
3,628,116
import logging def json_file_content_check(package_info): """Check the content of json file.""" if package_info['category'] == '': logging.warning('The category of ' + package_info['name'] + ' package is lost.') return False if 'enable' not in package_info or package_info['enable'] == '': logging.warning('The enable of ' + package_info['name'] + ' package is lost.') return False if package_info['author']['name'] == '': logging.warning('The author name of ' + package_info['name'] + ' package is lost.') return False if package_info['author']['email'] == '': logging.warning('The author email of ' + package_info['name'] + ' package is lost.') return False if package_info['license'] == '': logging.warning('The license of ' + package_info['name'] + ' package is lost.') return False if package_info['repository'] == '': logging.warning('The repository of ' + package_info['name'] + ' package is lost.') return False else: if not determine_url_valid(package_info['repository']): return False for i in range(0, len(package_info['site'])): package_version = package_info['site'][i]['version'] package_url = package_info['site'][i]['URL'] logging.debug("%s : %s" % (package_version, package_url)) if not package_url[-4:] == '.git': logging.debug(package_info['site'][i]['filename']) if not determine_url_valid(package_url): return False return True
b700af7ecf7ee8f4c98f62915b969a4e10358a28
3,628,117
def debug(msg): """This is a function for debugging statements in jinja2 templates""" if args.DEBUG: return msg return ""
a94949e4d7e8cb3fe4acb7469945885fb266c4bf
3,628,118
import numpy def preprocess_image(x, mode='caffe'): """ Preprocess an image by subtracting the ImageNet mean. Args x: numpy.array of shape (None, None, 3) or (3, None, None). mode: One of "caffe" or "tf". - caffe: will zero-center each color channel with respect to the ImageNet dataset, without scaling. - tf: will scale pixels between -1 and 1, sample-wise. Returns The input with the ImageNet mean subtracted. """ # covert always to float32 to keep compatibility with opencv x = x.astype(numpy.float32) if mode == 'tf': x /= 127.5 x -= 1. elif mode == 'caffe': x -= [103.939, 116.779, 123.68] return x
a5fe2fe24e9dcef65a7f704a17ab14d4ddb72f97
3,628,119
def calculate_metric(threshold, args): """ 给定阈值得到预测类别后,计算precision和recall,其中precision是avg_sample(hit_cnt/pred_cnt), recall是avg_sample(hit_cnt/label_cnt),在当前场景下label_cnt恒等于1 """ # 格式:label:pred_cls,pred_score;pred_cls,pred_score; # 78089261739417600:78089261739417600,0.995399; # 获取label_list,按threshold划分得到pred_list label_list = [] pred_list = [] with open(args.search_result_file, mode='r') as f: results = f.readlines() for result in results: # print("result:", result) # label label = result.strip().split(':')[0] # 对于每个query图片,label是一个值 label_list.append(label) # pred pred_result = result.strip().split(':')[1].strip(';').split(';') # print("pred_result:", pred_result) pred_cls_list = [] # 对于每个query图片,pred是一个list for item in pred_result: try: pred_cls, pred_score = item.split(',') if float(pred_score) >= threshold: pred_cls_list.append(pred_cls) else: break except: print("item:", item) pred_list.append(pred_cls_list) # 求presision和recall assert len(label_list) == len(pred_list), "len(label_list) != len(pred_list)" sum_precision = 0.0 sum_recall = 0.0 """ tp: base里有该query,且检索结果是有 fp: base里没有该query,但检索结果是有 tn: base里没有该query,且检索结果是没有 fn: base里有该query,但检索结果是没有 """ tp = 0 fp = 0 tn = 0 fn = 0 for i in range(len(label_list)): if label_list[i] in pred_list[i]: # base里有该query,且检索结果是有 upper = 1 tp += 1 elif (label_list[i] == '0') and (len(pred_list[i]) == 0): # base里没有该query,且检索结果是没有 upper = 1 tn += 1 elif (label_list[i] == '0') and (len(pred_list[i]) != 0): # base里没有该query,但检索结果是有 upper = 0 fp += 1 else: # base里有该query,但检索结果是没有 upper = 0 fn += 1 lower_precision = max(len(pred_list[i]), 1) lower_recall = 1 sum_precision += upper / lower_precision sum_recall += upper / lower_recall precision = sum_precision / len(label_list) recall = sum_recall / len(label_list) pos_precision = tp / max(tp + fp, 1) pos_recall = tp / max(tp + fn, 1) pos_samples = tp + fn neg_precision = tn / max(tn + fn, 1) neg_recall = tn / max(tn + fp, 1) neg_samples = tn + fp return precision, recall, pos_precision, pos_recall, pos_samples, neg_precision, neg_recall, neg_samples
97bb7a81e9731a3f0c48000f98669d03f1d48ca4
3,628,120
import os import csv import random def load_data(): """Load data from the Quora dataset.""" # Partition off part of the train data for evaluation with open(os.path.join('data', 'quora', 'train.csv'), 'r') as train_file: train_data = [row for row in csv.reader(train_file, delimiter=',', quotechar='"')][1:] random.shuffle(train_data) # train_data = train_data[:50000] train_texts = [line[1] for line in train_data] train_cats = [{'INSINCERE': line[2] == '1'} for line in train_data] split = int(0.8*len(train_data)) return (train_texts[:split], train_cats[:split]), (train_texts[split:], train_cats[split:])
98d1b5241785cdb2f229fa076f64128aa4aaee29
3,628,121
from datetime import datetime def current_time() -> datetime: """Return timezone-aware current time as datetime.""" return datetime.now().astimezone()
2b7237f4c5a0d88ab7643dfdd3b1f8c524683884
3,628,122
def test_metamodel_callable_must_return_a_metamodel(): """ Test that meta-model callable must return an instance of TextXMetaModel. """ def invalid_metamodel_callable(): return 42 clear_language_registrations() register_language('test-lang', pattern='*.test', description='test-lang description', metamodel=invalid_metamodel_callable) with pytest.raises(TextXRegistrationError, match='.*Meta-model type for language.*'): metamodel_for_language('test-lang')
8d639c0a0abedb1ad80dc8aa4bbc4f46bc3499a7
3,628,123
def Reynold(V, D): """ Calculate Reynold number Parameters ---------- V : float velocity D : float diameter Returns ------- Re : float Reynold number """ nu = 1.004e-6 # kinematic viscosity [m^2/s] Re = np.abs(V*D/nu) return Re
c36280eb501ea792ecce7057973d3837d862f05a
3,628,124
def compile(self): """Compile AssignNode.""" global register_index register_type = "" if '"' not in self.children[1].compile(): register_type = "$N" else: register_type = "$S" byte_code = register_type + "%s = " % register_index byte_code += self.children[1].compile() + "\n\t" byte_code += self.children[0].tok + " = " byte_code += register_type + "%s" % register_index register_index += 1 return byte_code + "\n\n"
f0458e66d8245bba69c8f456e9b21785c7f7bee6
3,628,125
def create_client(*args): """ Create an EC2 client for downstream use. Must have a valid AWS configuration for boto3 to use. :param args: optional profile name for the AWS shared config file. :return: returns a boto3 EC2 client. """ try: if len(args) == 0: client = boto3.Session().client('ec2') else: client = boto3.Session(profile_name=args[0]).client('ec2') # try to describe instances- this will fail if credentials or permissions are inappropriate client.describe_instances() return client except NoRegionError: raise Exception('Error creating client; please run `aws configure` to set up your environment first.') except NoCredentialsError: raise Exception('Error creating client; your AWS credentials were not found. Run `aws configure` to check.') except ClientError as e: raise Exception('Error creating client; please check your credentials and permissions. Error was: {}'.format(e))
41ce3acc7e3e443058d3ab01ef05c6b7399da63f
3,628,126
def Uint32ToUint8(z, lsb_first=True): """ Example: np.all(Uint32ToUint8([0x01020304, 0x05060708]) == np.array([4,3,2,1,8,7,6,5], 'uint8')) """ return BinToUint8(Uint32ToBin(z, lsb_first))
6cb775731c6ec967f02da6b69a48b533e2f3ce3e
3,628,127
from typing import List def _decode(encoding: Encoding, decoder: Decoder) -> List[int]: """Generic decoder using subcalsses to decode different formats. It works by nitializing a list of timestamps and the decoder with the initial timestamp from the encoding data structure. The stateful decoder is then called on all the values in the output. """ tss = [encoding.initial_timestamp] dec = decoder(encoding.initial_timestamp) for v in encoding.values: tss.append(dec.decode(v)) return tss
594833d1d518a6dc055ed4769027bd36a44f8038
3,628,128
import shutil import os def _load_python(test_dir, ctxs): """Loads the Python library and samples for the given APIs. Args: test_dir: The parent directory. ctxs: The list of Contexts to load. Returns: list: A list of Commands to run samples. """ sample_cmds = {} for ctx in ctxs: sample_filenames = _generate_samples(ctx, _PYTHON) sample_cmds[ctx.id_] = [] src_dir = _make_src_dir(test_dir, ctx.name, ctx.version, _PYTHON) # Create a virtualenv. _call('virtualenv venv', cwd=src_dir) _call('venv/bin/pip install google-api-python-client google-auth', cwd=src_dir) for filename in sample_filenames: method_id = _parse_method_id_from_sample_filename(filename) # ./foo.bar.get.py new_filename = '{}.py'.format(method_id) shutil.copy(filename, os.path.join(src_dir, new_filename)) # /venv/bin/python .../foo.bar.get.py cmd = 'venv/bin/python {}'.format(new_filename) sample_cmds[ctx.id_].append(SampleCommand(method_id, cmd, src_dir)) print('Generated {} samples in {}'.format(len(sample_filenames), src_dir)) return sample_cmds
97b896b1025d16ea8f74e00fb16215b42786d78e
3,628,129
def merge_diff( initial_config=None, initial_path=None, merge_config=None, merge_path=None, saltenv="base", ): """ Return the merge diff, as text, after merging the merge config into the initial config. initial_config The initial configuration sent as text. This argument is ignored when ``initial_path`` is set. initial_path Absolute or remote path from where to load the initial configuration text. This argument allows any URI supported by :py:func:`cp.get_url <salt.modules.cp.get_url>`), e.g., ``salt://``, ``https://``, ``s3://``, ``ftp:/``, etc. merge_config The config to be merged into the initial config, sent as text. This argument is ignored when ``merge_path`` is set. merge_path Absolute or remote path from where to load the merge configuration text. This argument allows any URI supported by :py:func:`cp.get_url <salt.modules.cp.get_url>`), e.g., ``salt://``, ``https://``, ``s3://``, ``ftp:/``, etc. saltenv: ``base`` Salt fileserver environment from which to retrieve the file. Ignored if ``initial_path`` or ``merge_path`` is not a ``salt://`` URL. CLI Example: .. code-block:: bash salt '*' iosconfig.merge_diff initial_path=salt://path/to/running.cfg merge_path=salt://path/to/merge.cfg """ if initial_path: initial_config = __salt__["cp.get_file_str"](initial_path, saltenv=saltenv) candidate_config = merge_text( initial_config=initial_config, merge_config=merge_config, merge_path=merge_path, saltenv=saltenv, ) clean_running_dict = tree(config=initial_config) clean_running = _print_config_text(clean_running_dict) return _get_diff_text(clean_running, candidate_config)
bd61a70cfb9c4ebb2ff4399fe7825ea2abcac2e8
3,628,130
def make_label(label_text): """ returns a label object conforming to api specs given a name """ return { 'messageListVisibility': 'show', 'name': label_text, 'labelListVisibility': 'labelShow' }
8c388d138136af4f01ec02db1565d66049b38cf1
3,628,131
import socket def ip_to_str(address): """Print out an IP address given a string Args: address (inet struct): inet network address Returns: str: Printable/readable IP address """ return socket.inet_ntop(socket.AF_INET, address)
e62099babab38dc16a16f431512741ee532db4e1
3,628,132
import torch def _empty_memory(memory_dim): """Get a empty memory, assuming the memory is a row vector """ return torch.zeros(1, memory_dim)
b7454e52bbc3c20061d53716c2e805603eb041ff
3,628,133
def label_to_roi(label, expansion_mm=[0, 0, 0], return_as_list=False): """Generates a region of interest (ROI), defined by a starting index (z,y,x) and size (s_z, s_y, s_x). This can be used to crop images/labels. Args: label (sitk.Image | list): Binary label image/mask/structure to define to ROI expansion_mm (list, optional): An optional expansion of the box (in each direciton). Defaults to [0, 0, 0]. Returns: tuple: crop_box_size, crop_box_starting_index """ if hasattr(label, "__iter__") and not isinstance(label, sitk.Image): reference_label = sum(label) > 0 else: reference_label = label > 0 image_spacing = np.array(reference_label.GetSpacing()) label_stats_image_filter = sitk.LabelStatisticsImageFilter() label_stats_image_filter.Execute(reference_label, reference_label) bounding_box = np.array(label_stats_image_filter.GetBoundingBox(1)) index = [bounding_box[x * 2] for x in range(3)] size = [bounding_box[(x * 2) + 1] - bounding_box[x * 2] for x in range(3)] expansion_mm = np.array(expansion_mm) expansion = (expansion_mm / image_spacing).astype(int) # Avoid starting outside the image crop_box_index = np.max([index - expansion, np.array([0, 0, 0])], axis=0) # Avoid ending outside the image crop_box_size = np.min( [ np.array(reference_label.GetSize()) - crop_box_index, np.array(size) + 2 * expansion, ], axis=0, ) crop_box_size = [int(i) for i in crop_box_size] crop_box_index = [int(i) for i in crop_box_index] if return_as_list: return crop_box_index + crop_box_size return crop_box_size, crop_box_index
efc2278019f8a27c4e792ad71ee8d7dbda2dbc68
3,628,134
def get_topk_correct(outputs, targets, ks=(1,)): """ Computes the precision@K for the specified values of K """ maxk = max(ks) _, pred = outputs.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(targets.view(1, -1).expand_as(pred)) res = [] for k in ks: correct_k = correct[:k].view(-1).float().sum(0) res.append(correct_k.item()) return res
f657e850cff85ede7040f1b46123fc0cc1cadf7e
3,628,135
def _create_spectrogram(signal: np.ndarray, should_graph: bool = False) -> np.ndarray: """Creates spectrogram for signal, and returns it. Shows graph for the signal with a Gaussian filter overlayed, the filtered signal, and the FFT of the signal. """ n = len(signal) # 128 sigma = 3 time_list = np.arange(n) spectrogram = np.zeros((n, n)) for (i, time) in enumerate(time_list): g = _get_gaussian_filter(time, time_list, sigma) ug = signal * g ugt = np.abs(np.fft.fftshift(np.fft.fft(ug))) spectrogram[:, i] = ugt if should_graph and i in (20, 100): graph_gaussian_signals(signal, g, ug, ugt) return spectrogram
7e82cf45c1f5be2dbde929293b48110da61a43b6
3,628,136
import scipy.stats def test_reblocking(): """ Tests reblocking against known distribution. """ def corr_data(N, L): """ Creates correlated data. Taken from https://pyblock.readthedocs.io/en/latest/tutorial.html. """ return np.convolve(np.random.randn(2 ** N), np.ones(2 ** L) / 10, "same") n = 11 cols = ["test_data1", "test_data2"] dat1 = corr_data(n, 4) dat2 = corr_data(n, 7) test_data = pd.DataFrame(data={cols[0]: dat1, cols[1]: dat2}) reblocked_data = optimally_reblocked(test_data[cols]) for c in cols: row = reblocked_data.loc[c] reblocks = reblocked_data["reblocks"].values[0] std_err = scipy.stats.sem(reblock_by2(test_data, reblocks, c)) std_err_err = std_err / np.sqrt(2 * (2 ** (n - reblocks) - 1)) assert np.isclose( row["mean"], np.mean(test_data[c]), 1e-10, 1e-12 ), "Means are not equal" assert np.isclose( row["standard error"], std_err, 1e-10, 1e-12 ), "Standard errors are not equal" assert np.isclose( row["standard error error"], std_err_err, 1e-10, 1e-12 ), "Standard error errors are not equal" statlist = ["mean", "sem", lambda x: x.sem() / np.sqrt(2 * (len(x) - 1))] rb1 = reblock(test_data, len(test_data) // 4).agg(statlist).T rb2 = reblock_by2(test_data, 2).agg(statlist).T for c in rb1.columns: assert np.isclose(rb1[c], rb2[c], 1e-10, 1e-12).all(), (c, rb1[c], rb2[c])
c9caa94e57a38d80d5a599546a00415abb4fdedd
3,628,137
def get_rproj_czdisp(galaxyra, galaxydec, galaxycz, galaxygrpid, HUBBLE_CONST=70.): """ Compute the observational projected radius, in Mpc/h, and the observational velocity dispersion, in km/s, for a galaxy group catalog. Input should match the # of galaxies, and the output will as well. Based on FoF4 code of Berlind+ 2006. Parameters ---------- galaxyra : iterable Right-ascension of grouped galaxies in decimal degrees. galaxydec : iterable Declination of grouped galaxies in decimal degrees. galaxycz : iterable Redshift velocity (cz) of grouped galaxies in km/s. galaxygrpid : iterable Group ID numbers of grouped galaxies, shape should match `galaxyra`. Returns ------- rproj : np.array, shape matches `galaxyra` For element index i, projected radius of galaxy group to which galaxy i belongs, in Mpc/h. vdisp : np.array, shape matches `galaxyra` For element index i, velocity dispersion of galaxy group to which galaxy i belongs, in km/s. """ galaxyra=np.asarray(galaxyra) galaxydec=np.asarray(galaxydec) galaxycz=np.asarray(galaxycz) galaxygrpid=np.asarray(galaxygrpid) rproj=np.zeros(len(galaxyra)) vdisp=np.zeros(len(galaxyra)) grpra, grpdec, grpcz = group_skycoords(galaxyra, galaxydec, galaxycz, galaxygrpid) grpra = grpra*np.pi/180. #convert everything to radians galaxyra=galaxyra*np.pi/180. galaxydec=galaxydec*np.pi/180. grpdec = grpdec*np.pi/180. uniqid = np.unique(galaxygrpid) cspeed=299800 # km/s for uid in uniqid: sel = np.where(galaxygrpid==uid) nmembers=len(sel[0]) if nmembers==1: rproj[sel]=0. vdisp[sel]=0. else: phicen=grpra[sel][0] thetacen=grpdec[sel][0] cosDpsi=np.cos(thetacen)*np.cos(galaxydec[sel])+np.sin(thetacen)*np.sin(galaxydec[sel])*np.cos((phicen - galaxyra[sel])) sinDpsi=np.sqrt(1-cosDpsi**2) rp=sinDpsi*galaxycz[sel]/HUBBLE_CONST rproj[sel]=np.sqrt(np.sum(rp**2)/len(sel[0])) czcen = grpcz[sel][0] Dz2 = np.sum((galaxycz[sel]-czcen)**2.0) vdisp[sel]=np.sqrt(Dz2/(nmembers-1))/(1.+czcen/cspeed) return rproj, vdisp
558e6b5acb6363ba23dc1b95b0628b951eea6138
3,628,138
def colormap_to_ev_blocks_equidistant(colormap, stops): """ For a given number of stops, the colormap is divided into equal length segments. The colors are sampled at the start and end of these segments. They are then used to create "blocks", a constant color for a range between two exposure values. :param colormap: Colormap with 256 entries :param stops: Exposure value stops, needs to have at least three entries :return: Colormap based on exposure values """ if len(stops) < 3: raise ValueError("The stops need to have at least three entries.") ev_colormap = [] # It's -2 because only the range between the EVs is a "block", which is one less than the total number of EV stops step_size = 1.0 / (len(stops) - 2) color_coordinate = 0.0 for idx, stop in enumerate(stops): block_start = ColorPoint(stop, colormap[int(round(255.0 * color_coordinate))]) block_end = ColorPoint(stops[idx + 1], colormap[int(round(255.0 * color_coordinate))]) ev_colormap.append(block_start) ev_colormap.append(block_end) color_coordinate += step_size if idx == len(stops) - 2: break return ev_colormap
21b69ffae9e867c2326eded9efe797369087a429
3,628,139
import copy def turn_acs_into_acsK(actions_taken_so_far, all_samples, K, N, horizon): """ start with array, where each entry is (a_t) end with array, where each entry is (a_{t-(K-1)}..., a_{t-1}, a_{t}) """ #this will become [K-1, acDim] past_Kminus1_actions = actions_taken_so_far[-(K - 1):] #[K-1, acDim] --> [N, K-1, acDim] --> [N, 1, K-1, acDim] past_Kminus1_actions_tiled = np.expand_dims( np.tile(np.expand_dims(past_Kminus1_actions, 0), (N, 1, 1)), 1) ##[N, 1, K-1, acDim] prevKminus1 = past_Kminus1_actions_tiled #all_samples is [N, horizon, ac_dim] for z in range(horizon): #get, for each sim, action to execute at this timestep thisStep_acs_forAllSims = np.expand_dims( np.expand_dims(all_samples[:, z, :], 1), 1) ## [N, horizon, ac_dim] --> [N,1,acDim] --> [N,1,1,acDim] #add this action onto end of previous points #[N, 1, K-1, acDim] plus [N, 1, 1, acDim] --> [N, 1, K, acDim] if K==1: thisStep_K_acs_forAllSims = thisStep_acs_forAllSims else: thisStep_K_acs_forAllSims = np.append(prevKminus1, thisStep_acs_forAllSims, 2) #append onto final list if z==0: all_acs = copy.deepcopy(thisStep_K_acs_forAllSims) else: all_acs = np.append(all_acs, thisStep_K_acs_forAllSims, 1) #update prevK for next step (delete 0th entry from axis2) if K>1: prevKminus1 = np.delete(thisStep_K_acs_forAllSims, 0, 2) ##[N, 1, K-1, acDim] return all_acs
28d5bda4155f6610361f328c16486e43c0beb472
3,628,140
from typing import Dict from typing import Any import os from typing import List def parse_scripting_log(script_type: str) -> Dict[str, Any]: """Parse a log to run to create a sequence of XEM commands.""" file_name = f"xem_{script_type}.txt" relative_path = os.path.join("src", "xem_scripts", file_name) absolute_path = os.path.normcase(os.path.join(get_current_file_abs_directory(), os.pardir, os.pardir)) file_path = resource_path(relative_path, base_path=absolute_path) command_list: List[Dict[str, Any]] = list() script_dict = {"script_type": script_type, "command_list": command_list} is_parsing = False with open(file_path, "r") as log_file: is_script_done = False for line in log_file: if is_parsing: if "end_hardware_script" in line: is_script_done = True break if "mGET" in line: command_list.append(parse_scripting_log_line(line)) elif "begin_hardware_script" in line: script_details = parse_scripting_log_line(line) if script_details["script_type"] != script_type: log_script_type = script_details["script_type"] raise MismatchedScriptTypeError( f"Script type in log: '{log_script_type}' does not match file name: '{script_type}'" ) script_dict["version"] = script_details["version"] is_parsing = True if not is_script_done: raise ScriptDoesNotContainEndCommandError() return script_dict
122e215aed3b8543b3981a00043daaff7fd24e2a
3,628,141
def fetch_top_chapter_checkins(): """ Show checkin count of chapters in this week """ re = frappe.db.sql( """ SELECT sc.parent_chapter, c.sub_chapter, COUNT(*) AS checkin_count FROM `tabCheckin` c JOIN `tabSub Chapter` sc ON c.sub_chapter=sc.name WHERE c.creation BETWEEN %s AND %s GROUP BY sc.parent_chapter, c.sub_chapter """, [ str(get_first_day_of_week(date.today())), str(get_last_day_of_week(date.today())), ], as_dict=True, ) return re
1b725c0199d28acedf1e941938e924b79c835bc1
3,628,142
from bitcodin import api_base def get_api_base(): """ Get the api base url :return: string """ return api_base
f909de3dae62ce73ad03d813c325801e5a69a9b1
3,628,143
def simple_reduce_error( filename, conservative, precision, target_error, convergence_rate_scale_factor, ): """Determine points to add (first) or extend (second) to meet target error. FILENAME is the data file containing lambda, dhdl_average, and error_est. """ points = np.loadtxt(filename) with np.set_printoptions(precision=precision): return get_points_to_reduce_error( points, target_error, conservative, convergence_rate_scale_factor, )
697f6a100b246ebc783dd0fb7f8e712b989e42da
3,628,144
def read_artist_albums(id, name): """get lists of albums for artist """ list_a = [(x.name, str(x.release_year), str(x.id)) for x in dmla.list_albums_by_artist('', id, 'Jaar')] list_c = [(x['album'], x['year']) for x in dmlc.list_albums(DB_C, name)] return list_a, list_c
f4bdcb3cb7a65bd765fce2c960560367e3be2e9f
3,628,145
def client_add(request): """ Add new client and see the list of clients """ success_message, error_message = None, None form = ClientForm clients = Client.objects.all() enquiry_form = EnquiryClientForm enquiry_clients = EnquiryClient.objects.all() if request.method=="POST": form = ClientForm(request.POST, request.FILES) if form.is_valid(): obj = form.save(commit=False) obj.created_by = Employee.objects.get(employee_id=request.user.username) obj.save() success_message = "added a new client" else: error_message = "to add a new client" context = { 'form' : form, 'enquiry_form' : enquiry_form, 'clients' : clients, 'enquiry_clients': enquiry_clients, 'success_message': success_message, 'error_message' : error_message, 'user_info': Employee.objects.get(employee_id=request.user.username), 'cart': Cart.objects.filter(created_by__employee_id=request.user.username).count, } return render(request, 'employee/client_add.html', context)
4c1023656e307b135e8e9c5b9f1d5875194c0304
3,628,146
def mult(A, B): """ Function to multiply two values A and B, use as "mult(A, B)" """ return A * B
586c9077303dd8a36ae6007ff74756f77ec8fb3b
3,628,147
def remove_stopwords(sentence): """ Transforms a given text to its lemmatized form. Assumes clean text separated by spaces. :param sentence: the text to be lemmatized :type: string :return: lemmatized text :type: string """ return ' '.join([w for w in word_tokenize(sentence) if not w in stop_words])
52b8fd43a710b02b825779ff4d93cb6d988e512d
3,628,148
import re def fileparse(*args): """Split a path into basename, dirpath, and (optional) suffixes. Translated from perl File::Basename for unix, plus annotations""" fullname = args[0] suffixes = args[1:] if fullname is None: raise Die("fileparse(): need a valid pathname") fullname = str(fullname) [dirpath,basename] = (_m:=re.search(re.compile(r'^(.*/)?(.*)',re.S),fullname),_m.groups() if _m else [None,None])[1] if not (dirpath): dirpath = './' tail='' suffix='' if suffixes: for suffix in suffixes: if(isinstance(suffix, re.Pattern)): # in case they use qr suffix = suffix.pattern pat=f"({suffix})$" def sub(_m): nonlocal tail tail = _m.group(1) + tail return '' basename = re.sub(re.compile(pat,re.S),sub,basename,count=1) return (basename, dirpath, tail)
cbc2a6b09b9c04647de2d6f65b71bc93f0f92051
3,628,149
def lfnToEOS( path ): """Converts LFN to EOS. If path is not an LFN in the first place, return path. ??? shouldn't we raise an exception?""" if isLFN(path): pfn = 'root://eoscms.cern.ch//eos/cms/' + path return pfn.replace('//store','/store') else: return path
b41b0f42a707f1868776412d1c2f7a6a2fde3a47
3,628,150
def get_link_href(result_object, link_relation): """ Given a result_object (returned by a previous API call), return the link href for a link relation. 'result_object' a JSON object returned by a previous API call. May not be None. 'link_relation' the link relation for which href is required. Returns None if the link does not exist. """ # Argument error checking. assert result_object is not None result = None link = result_object['_links'].get(link_relation) if link: result = link.get('href') return result
400cd38d1b29ea71bf974d8aa16c1b3adf104428
3,628,151
from typing import Callable from typing import Any from typing import Sequence def scanr( fun: Callable[[Any, Any], Any], acc: Any, trav: Sequence[Any] ) -> Sequence[Any]: """Implementation of scanr in Python3. This is an implementation of the right-handed scan function from functional programming. scanr takes the second argument and the last item of the list and applies the function, then it takes the penultimate item from the end and the result, and so on. It returns the list of intermediate and final results. See the examples below. >>> print(scanr(lambda x, y: x + y, 0, [1, 2, 3])) [6, 5, 3, 0] >>> print(scanr(lambda x, y: x ** y, 0, [1, 2, 3])) [1, 2, 1, 0] """ if not trav: return [acc] xs = scanr(fun, acc, trav[1:]) x = xs[0] return list(chain([fun(trav[0], x)], xs))
4ef7c43aa4b4b9490efe955ef810a1eab38a6064
3,628,152
import sys def _are_we_frozen(): """Returns whether we are frozen via py2exe. This will affect how we find out where we are located.""" return hasattr(sys, "frozen")
a9e55631ce9f8d60351e41d257c225f859e39a05
3,628,153
def argtopk_aggregate(a_plus_idx, k, axis, keepdims): """ Final aggregation function of argtopk Invoke argtopk one final time, sort the results internally, drop the data and return the index only. """ assert keepdims is True a, idx = argtopk(a_plus_idx, k, axis, keepdims) axis = axis[0] idx2 = np.argsort(a, axis=axis) idx = take_along_axis(idx, idx2, axis) if k < 0: return idx return idx[ tuple( slice(None, None, -1) if i == axis else slice(None) for i in range(idx.ndim) ) ]
bb9c113112dcbd64bd833a390721a042a0cbbc56
3,628,154
def eht(): """Make an empty bst size of 10.""" return HashTable(10)
7197f5c5846a8b369e5c1284b40b990035fb447b
3,628,155
import re def rx_filter(objs: list, attr: str, prompt: str) -> list: """ Filter a list of dicts based on user-entered regex match to one of their values. """ while True: search_term = input(prompt+" ") # Prefer exact match first -- otherwise can never select an item that's a substring of another! matches = [obj for obj in objs if obj[attr] == search_term] # matches = [obj for obj in objs if attr(obj) == search_term] if matches: return matches rx_flags = 0 # If search doesn't have uppercase letters, make it case-insensitive. if search_term == search_term.lower(): rx_flags |= re.IGNORECASE rx = re.compile(search_term, rx_flags) matches = [obj for obj in objs if rx.search(obj[attr])] # matches = [obj for obj in objs if rx.search(attr(obj))] if matches: return matches print("No matches, try again.")
f0c6dd5609020054da7895e577483c911d9aaea3
3,628,156
import logging import glob def _get_morphotactics_rules(morphotactics_dir: str) -> _RewriteRuleSet: """Parses morphotactics model into valid rewrite rules. Args: morphotactics_dir: path to the directory that contains the text files that define rules of morphotactics FST. All files that have the ".txt" file extension under this directory will be picked up by this function and will attempted to be parsed into a set of rewrite rule objects. Raises: MorphotacticsCompilerError: one of the morphotactics rule definitions is illformed, or no valid rewrite rules can be generated from the rule definitions. Returns: Array of validated and parsed morphotactics rewrite rule objects. """ def _read_rule_set(path: str) -> _RewriteRule: logging.info(f"reading rewrite rules from '{path}'") # Below read call might throw IOError. lines = morphotactics_reader.read_rule_definitions(path) for index, line in lines.items(): try: morphotactics_validator.validate(line) except morphotactics_validator.InvalidMorphotacticsRuleError as error: raise MorphotacticsCompilerError( f"Rewrite rule at line {index} of '{path}' is illformed. {error}") return morphotactics_parser.parse(list(lines.values())) paths = sorted(glob.glob(f"{morphotactics_dir}/*.txt")) rule_sets = [_read_rule_set(p) for p in paths] morphotactics = _RewriteRuleSet() morphotactics.rule.extend(r for rs in rule_sets for r in rs.rule) if not morphotactics.rule: raise MorphotacticsCompilerError( "no valid morphotactics rewrite rules found.") return morphotactics
56da73c4a6623dcf9e2f3d8cc136f9aab3409c84
3,628,157
def _get_usb_hub_map(device_info_list): """Creates a map of usb hub addresses to device_infos by port. Args: device_info_list (list): list of known usb_connections dicts. Returns: dict: map of usb hub addresses to device_infos by port """ map_usb_hub_ports = {} for device_info in device_info_list: hub_address = device_info['usb_hub_address'] port = device_info['usb_hub_port'] if hub_address: if hub_address not in map_usb_hub_ports: map_usb_hub_ports[hub_address] = {} if not map_usb_hub_ports[hub_address].get( port) or device_info['ftdi_interface'] == 2: map_usb_hub_ports[hub_address][port] = device_info return map_usb_hub_ports
eaadc4713a41fdf38cea4fce35806d1d8772df27
3,628,158
def gaussian_smoothing(eem_df, sigma, truncate): """Performs gaussian smooothing on the excitation-emission matrix from the input sigma and truncation sigma. Args: eem_df (pandas.DataFrame): An excitation-emission matrix. sig (int): Sigma of the gaussian distribution weight for the data smoothing. trun (int): Truncate in 'sigmas' the gaussian distribution. Returns: pandas.DataFrame: A guassian smoothed excitation-emission matrix. """ # smooth the data with the gaussian filter eem_blurred = gaussian_filter(eem_df, sigma=sigma, truncate=truncate) eem_df[:] = eem_blurred return eem_df
e3e81ba47b2dd2c7a6015f092b151c65f1236bb9
3,628,159
import os import subprocess def get_current_commit(srcdir): """Return information about git commit checked out in the given directory. :param srcdir: source code directory :type srcdir: str :return: commit information composed of brief SHA1 and subject :rtype: str """ os.chdir(srcdir) return ( subprocess.check_output('git log --pretty=format:"%h %s" -n 1', shell=True) .decode() .rstrip("\r\n") )
3b3601303135bfdfe66cb069ef7d4ed1f413af8b
3,628,160
import re def parse_pgsql_logs(data): """ Parse the pgsql benchmark data from ripsaw and return the data in list format Args: data (str): log data from pgsql bench run Returns: list_data (list): data digestable by scripts with below format e.g.: [ {1: {'num_clients': '2','num_threads': '7','latency_avg': '7', 'lat_stddev': '0', 'tps_incl': '234', 'tps_excl': '243'}, {2: {'num_clients': '2','num_threads': '7','latency_avg': '7', 'lat_stddev': '0', 'tps_incl': '234', 'tps_excl': '243'}, {3: {'num_clients': '2','num_threads': '7','latency_avg': '7', 'lat_stddev': '0', 'tps_incl': '234', 'tps_excl': '243'}, ] where keys{1,2,3} are run-IDs """ match = data.split("PGBench Results") list_data = [] for i in range(2, len(match)): log = "".join(match[i].split("\n")) pgsql_data = dict() pgsql_data[i - 1] = {} clients = re.search(r"scaling_factor\':\s+(\d+),", log) if clients and clients.group(1): pgsql_data[i - 1]["scaling_factor"] = clients.group(1) clients = re.search(r"number_of_clients\':\s+(\d+),", log) if clients and clients.group(1): pgsql_data[i - 1]["num_clients"] = clients.group(1) threads = re.search(r"number_of_threads\':\s+(\d+)", log) if threads and threads.group(1): pgsql_data[i - 1]["num_threads"] = threads.group(1) clients = re.search(r"number_of_transactions_per_client\':\s+(\d+),", log) if clients and clients.group(1): pgsql_data[i - 1]["number_of_transactions_per_client"] = clients.group(1) clients = re.search( r"number_of_transactions_actually_processed\':\s+(\d+),", log ) if clients and clients.group(1): pgsql_data[i - 1][ "number_of_transactions_actually_processed" ] = clients.group(1) lat_avg = re.search(r"latency_average_ms\':\s+(\d+)", log) if lat_avg and lat_avg.group(1): pgsql_data[i - 1]["latency_avg"] = lat_avg.group(1) lat_stddev = re.search(r"latency_stddev_ms\':\s+(\d+)", log) if lat_stddev and lat_stddev.group(1): pgsql_data[i - 1]["lat_stddev"] = lat_stddev.group(1) tps_incl = re.search(r"tps_incl_con_est\':\s+(\w+)", log) if tps_incl and tps_incl.group(1): pgsql_data[i - 1]["tps_incl"] = tps_incl.group(1) tps_excl = re.search(r"tps_excl_con_est\':\s+(\w+)", log) if tps_excl and tps_excl.group(1): pgsql_data[i - 1]["tps_excl"] = tps_excl.group(1) list_data.append(pgsql_data) return list_data
5bd5cd43432b17be6bd52004b151b32a0f574980
3,628,161
from django.utils.encoding import force_text from django.core.mail import EmailMultiAlternatives from mailer.models import Message, PRIORITY_MAPPING def send_html_mail(subject, message, message_html, from_email, recipient_list, priority="medium", fail_silently=False, auth_user=None, auth_password=None, headers={}, attachments=None): """ Function to queue HTML e-mails """ priority = PRIORITY_MAPPING[priority] # need to do this in case subject used lazy version of ugettext subject = force_text(subject) message = force_text(message) message_html = force_text(message_html) email_obj = EmailMultiAlternatives( subject=subject, body=message, from_email=from_email, to=recipient_list, attachments=attachments, headers=headers ) email_obj.attach_alternative(message_html, "text/html") db_msg = Message( priority=priority, subject=subject ) db_msg.email = email_obj db_msg.set_recipients(recipient_list) db_msg.save() return 1
c77b7cd475fb42891310c2de5cdf0ceb599f637b
3,628,162
import sys def compute_tDCF(bonafide_score_cm, spoof_score_cm, Pfa_asv, Pmiss_asv, Pmiss_spoof_asv, cost_model, print_cost): """ Compute Tandem Detection Cost Function (t-DCF) [1] for a fixed ASV system. In brief, t-DCF returns a detection cost of a cascaded system of this form, Speech waveform -> [CM] -> [ASV] -> decision where CM stands for countermeasure and ASV for automatic speaker verification. The CM is therefore used as a 'gate' to decided whether or not the input speech sample should be passed onwards to the ASV system. Generally, both CM and ASV can do detection errors. Not all those errors are necessarily equally cost, and not all types of users are necessarily equally likely. The tandem t-DCF gives a principled with to compare different spoofing countermeasures under a detection cost function framework that takes that information into account. INPUTS: bonafide_score_cm A vector of POSITIVE CLASS (bona fide or human) detection scores obtained by executing a spoofing countermeasure (CM) on some positive evaluation trials. trial represents a bona fide case. spoof_score_cm A vector of NEGATIVE CLASS (spoofing attack) detection scores obtained by executing a spoofing CM on some negative evaluation trials. Pfa_asv False alarm (false acceptance) rate of the ASV system that is evaluated in tandem with the CM. Assumed to be in fractions, not percentages. Pmiss_asv Miss (false rejection) rate of the ASV system that is evaluated in tandem with the spoofing CM. Assumed to be in fractions, not percentages. Pmiss_spoof_asv Miss rate of spoof samples of the ASV system that is evaluated in tandem with the spoofing CM. That is, the fraction of spoof samples that were rejected by the ASV system. cost_model A struct that contains the parameters of t-DCF, with the following fields. Ptar Prior probability of target speaker. Pnon Prior probability of nontarget speaker (zero-effort impostor) Psoof Prior probability of spoofing attack. Cmiss_asv Cost of ASV falsely rejecting target. Cfa_asv Cost of ASV falsely accepting nontarget. Cmiss_cm Cost of CM falsely rejecting target. Cfa_cm Cost of CM falsely accepting spoof. print_cost Print a summary of the cost parameters and the implied t-DCF cost function? OUTPUTS: tDCF_norm Normalized t-DCF curve across the different CM system operating points; see [2] for more details. Normalized t-DCF > 1 indicates a useless countermeasure (as the tandem system would do better without it). min(tDCF_norm) will be the minimum t-DCF used in ASVspoof 2019 [2]. CM_thresholds Vector of same size as tDCF_norm corresponding to the CM threshold (operating point). NOTE: o In relative terms, higher detection scores values are assumed to indicate stronger support for the bona fide hypothesis. o You should provide real-valued soft scores, NOT hard decisions. The recommendation is that the scores are log-likelihood ratios (LLRs) from a bonafide-vs-spoof hypothesis based on some statistical model. This, however, is NOT required. The scores can have arbitrary range and scaling. o Pfa_asv, Pmiss_asv, Pmiss_spoof_asv are in fractions, not percentages. References: [1] T. Kinnunen, K.-A. Lee, H. Delgado, N. Evans, M. Todisco, M. Sahidullah, J. Yamagishi, D.A. Reynolds: "t-DCF: a Detection Cost Function for the Tandem Assessment of Spoofing Countermeasures and Automatic Speaker Verification", Proc. Odyssey 2018: the Speaker and Language Recognition Workshop, pp. 312--319, Les Sables d'Olonne, France, June 2018 (https://www.isca-speech.org/archive/Odyssey_2018/pdfs/68.pdf) [2] ASVspoof 2019 challenge evaluation plan TODO: <add link> """ # Sanity check of cost parameters if cost_model['Cfa_asv'] < 0 or cost_model['Cmiss_asv'] < 0 or \ cost_model['Cfa_cm'] < 0 or cost_model['Cmiss_cm'] < 0: print('WARNING: Usually the cost values should be positive!') if cost_model['Ptar'] < 0 or cost_model['Pnon'] < 0 or cost_model['Pspoof'] < 0 or \ np.abs(cost_model['Ptar'] + cost_model['Pnon'] + cost_model['Pspoof'] - 1) > 1e-10: sys.exit('ERROR: Your prior probabilities should be positive and sum up to one.') # Unless we evaluate worst-case model, we need to have some spoof tests against asv if Pmiss_spoof_asv is None: sys.exit('ERROR: you should provide miss rate of spoof tests against your ASV system.') # Sanity check of scores combined_scores = np.concatenate((bonafide_score_cm, spoof_score_cm)) if np.isnan(combined_scores).any() or np.isinf(combined_scores).any(): sys.exit('ERROR: Your scores contain nan or inf.') # Sanity check that inputs are scores and not decisions n_uniq = np.unique(combined_scores).size if n_uniq < 3: sys.exit('ERROR: You should provide soft CM scores - not binary decisions') # Obtain miss and false alarm rates of CM Pmiss_cm, Pfa_cm, CM_thresholds = compute_det_curve(bonafide_score_cm, spoof_score_cm) # Constants - see ASVspoof 2019 evaluation plan C1 = cost_model['Ptar'] * (cost_model['Cmiss_cm'] - cost_model['Cmiss_asv'] * Pmiss_asv) - \ cost_model['Pnon'] * cost_model['Cfa_asv'] * Pfa_asv C2 = cost_model['Cfa_cm'] * cost_model['Pspoof'] * (1 - Pmiss_spoof_asv) # Sanity check of the weights if C1 < 0 or C2 < 0: sys.exit( 'You should never see this error but I cannot evalute tDCF with negative weights - please check whether ' 'your ASV error rates are correctly computed?') # Obtain t-DCF curve for all thresholds tDCF = C1 * Pmiss_cm + C2 * Pfa_cm # Normalized t-DCF tDCF_norm = tDCF / np.minimum(C1, C2) # Everything should be fine if reaching here. if print_cost: print( 't-DCF evaluation from [Nbona={}, Nspoof={}] trials\n'.format(bonafide_score_cm.size, spoof_score_cm.size)) print('t-DCF MODEL') print(' Ptar = {:8.5f} (Prior probability of target user)'.format(cost_model['Ptar'])) print(' Pnon = {:8.5f} (Prior probability of nontarget user)'.format(cost_model['Pnon'])) print(' Pspoof = {:8.5f} (Prior probability of spoofing attack)'.format(cost_model['Pspoof'])) print(' Cfa_asv = {:8.5f} (Cost of ASV falsely accepting a nontarget)'.format(cost_model['Cfa_asv'])) print( ' Cmiss_asv = {:8.5f} (Cost of ASV falsely rejecting target speaker)'.format(cost_model['Cmiss_asv'])) print( ' Cfa_cm = {:8.5f} (Cost of CM falsely passing a spoof to ASV system)'.format(cost_model['Cfa_cm'])) print(' Cmiss_cm = {:8.5f} (Cost of CM falsely blocking target utterance which never reaches ASV)'.format( cost_model['Cmiss_cm'])) print('\n Implied normalized t-DCF function (depends on t-DCF parameters and ASV errors), s=CM threshold)') if C2 == np.minimum(C1, C2): print(' tDCF_norm(s) = {:8.5f} x Pmiss_cm(s) + Pfa_cm(s)\n'.format(C1 / C2)) else: print(' tDCF_norm(s) = Pmiss_cm(s) + {:8.5f} x Pfa_cm(s)\n'.format(C2 / C1)) return tDCF_norm, CM_thresholds
9245e7d3975ae4c6f910a32afdad20ab4fdbc7d9
3,628,163
def jd2gdate(myjd): """Julian date to Gregorian calendar date and time of day. The input and output are for the proleptic Gregorian calendar. Parameters ---------- myjd: julian date (float). Returns ------- y, m, d, f : int, int, int, float Four element tuple containing year, month, day and the fractional part of the day in the Gregorian calendar. The first three are integers, and the last part is a float. """ jd_i = int(myjd) f = myjd-jd_i # Set JD to noon of the current date. Fractional part is the # fraction from midnight of the current date. if -0.5 < f < 0.5: f += 0.5 elif f >= 0.5: jd_i += 1 f -= 0.5 elif f <= -0.5: jd_i -= 1 f += 1.5 l = jd_i + 68569 n = int((4 * l) / 146097.0) l -= int(((146097 * n) + 3) / 4.0) i = int((4000 * (l + 1)) / 1461001) l -= int((1461 * i) / 4.0) - 31 j = int((80 * l) / 2447.0) day = l - int((2447 * j) / 80.0) l = int(j / 11.0) month = j + 2 - (12 * l) year = 100 * (n - 49) + i + l return int(year), int(month), int(day), f
f43a299fd8627804893eb5b6266d6a016c191d72
3,628,164
def donation_journal_history_for_a_voter(voter_we_vote_id): """ :param voter_we_vote_id: :return: """ donation_manager = DonationManager() donation_journal_results = donation_manager.retrieve_donation_journal_list(voter_we_vote_id) refund_days = get_environment_variable("STRIPE_REFUND_DAYS") # Should be 30, the num of days we will allow refunds simple_donation_list = [] if donation_journal_results['success']: for donation_row in donation_journal_results['donation_journal_list']: json_data = { 'donation_journal_id': donation_row.id, 'created': str(donation_row.created), 'amount': '{:20,.2f}'.format(donation_row.amount/100).strip(), 'currency': donation_row.currency.upper(), 'record_enum': donation_row.record_enum, 'funding': donation_row.funding.title(), 'brand': donation_row.brand, 'exp_month': donation_row.exp_month, 'exp_year': donation_row.exp_year, 'last4': '{:04d}'.format(donation_row.last4), 'stripe_status': donation_row.stripe_status, 'charge_id': donation_row.charge_id, 'plan_type_enum': donation_row.plan_type_enum, 'subscription_id': donation_row.subscription_id, 'subscription_canceled_at': str(donation_row.subscription_canceled_at), 'subscription_ended_at': str(donation_row.subscription_ended_at), 'refund_days_limit': refund_days, 'last_charged': str(donation_row.last_charged), 'is_organization_plan': positive_value_exists(donation_row.is_organization_plan), 'organization_we_vote_id': str(donation_row.organization_we_vote_id), } simple_donation_list.append(json_data) return simple_donation_list
41b6a63d2893a7104fd15686e31090ee058438cd
3,628,165
from matplotlib import pyplot import numpy def imshow( subplot, data, title=None, sharex=None, sharey=None, vmin=-2.5, vmax=0.0, cmap=None, interpolation='lanczos', **kwargs, ): """Log-plot image using matplotlib.pyplot. Return plot axis and plot. Mirror symmetry is applied along the x and y axes. """ ax = pyplot.subplot(subplot, sharex=sharex, sharey=sharey, facecolor='k') if title: pyplot.title(title) if cmap is None: cmap = pyplot.cm.cubehelix # coolwarm else: cmap = pyplot.cm.get_cmap(cmap) try: # workaround: set alpha for i_bad cmap._init() cmap._lut[-1, -1] = 1.0 except AttributeError: pass im = pyplot.imshow( mirror_symmetry(numpy.log10(data)), vmin=vmin, vmax=vmax, cmap=cmap, interpolation=interpolation, **kwargs, ) pyplot.axis('off') return ax, im
33a090c95de533d90690eb3cb635b805634ac12a
3,628,166
import math def sigmoid_1(x): """ sigmoid at x """ return math.log(x / (1 - x))
a7ec76035e4f3b33613e7176e00dff7e243a695a
3,628,167
from typing import Tuple from typing import List def initialize_device_settings(use_cuda: bool, local_rank: int=-1, multi_gpu: bool=True) -> Tuple[List[str], int]: """ Returns a list of available devices. :param use_cuda: Whether to make use of CUDA GPUs (if available). :param local_rank: Ordinal of device to be used. If -1 and multi_gpu is True, all devices will be used. :param multi_gpu: Whether to make use of all GPUs (if available). """ if not use_cuda: devices = [paddle.set_device("cpu")] n_gpu = 0 elif local_rank == -1: if 'gpu' in paddle.get_device(): if multi_gpu: devices = [ paddle.set_device('gpu:{}'.format(device)) for device in range(paddle.device.cuda.device_count()) ] n_gpu = paddle.device.cuda.device_count() else: devices = [paddle.set_device("gpu")] n_gpu = 1 else: devices = [paddle.set_device("cpu")] n_gpu = 0 else: devices = [paddle.set_device('gpu:{}'.format(local_rank))] n_gpu = 1 logger.info( f"Using devices: {', '.join([str(device) for device in devices]).upper()}" ) logger.info(f"Number of GPUs: {n_gpu}") return devices, n_gpu
c84f1dcc13ea90b2539b5027324382466372a77d
3,628,168
def get_action_td_zero(exploration_mode, current_state, Q, epsilon, temperature, exploration_degree, actions, nextStates): """ choose action based on temporal difference learning @param exploration_mode: @param current_state: @param Q: @param epsilon: @param temperature: @param exploration_degree: @return: :param actions: """ if exploration_mode.value == ExplorationMode.CONSTANT_EPS.value \ or exploration_mode.value == ExplorationMode.FALLING_EPS.value: action = get_action_eps_greedy(current_state, Q, epsilon) elif exploration_mode.value == ExplorationMode.SOFTMAX.value: action = get_action_softmax(Q, current_state, temperature, actions, nextStates) elif exploration_mode.value == ExplorationMode.UCB.value: action = get_action_ucb(Q, current_state, exploration_degree) return action
ad5045f21f5048853371f4b12e9a08a6af50036d
3,628,169
def get_policy_targets(context, presentation): """ Returns our target node templates and groups if we have them. """ node_templates = [] groups = [] our_targets = presentation.targets if our_targets: all_node_templates = \ context.presentation.get('service_template', 'topology_template', 'node_templates') \ or {} all_groups = \ context.presentation.get('service_template', 'topology_template', 'groups') \ or {} for our_target in our_targets: if our_target in all_node_templates: node_templates.append(all_node_templates[our_target]) elif our_target in all_groups: groups.append(all_groups[our_target]) return node_templates, groups
f483b9749c25b7d56c0e0a02a6787d936782e470
3,628,170
def check_for_messages(folder, message_data, pst_name, folder_name): """ The check_for_messages function reads folder messages if present and passes them to the report function :param folder: pypff.Folder object :param message_data: list to pass and extend with message info :param pst_name: A string representing the name of the PST file :param folder_name: A string representing the name of the folder :return: Dictionary of results by folder """ for message in folder.sub_messages: message_dict = process_message(message) message_dict['pst_name'] = pst_name message_dict['folder_name'] = folder_name message_data.append(message_dict) return message_data
0022536bbd6b0d0f723c6e6daf9a96e83afcd18d
3,628,171
import socket def __get_host_ip(): """ 获取本地IP地址 :return: """ try: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(('8.8.8.8', 80)) ip = s.getsockname()[0] finally: s.close() return ip
a73d4c02618a0729501dafe12702287195123f2c
3,628,172
def uv_renew(u, s, v, X, gamma1, gamma2): """ This function will return the updated u, v and the corresponding lambdas. Only for internal use for the SSVD function. """ n,d = X.shape u = u.reshape((n,1)) v = v.reshape((d,1)) SSTO = np.sum(X**2) ## first, update v # compute the weights, which are OLS for v (Xu is also the ols) Xu = X.T @ u # this is also the v_tilde in the paper, Xu is (d,1) w2 = np.abs(Xu)**(-gamma2) # compute the estimated sigma2 hat for v #sigma2_hat_v = np.sum((Y - Yhat)**2) / (n*d - d) uvt = u @ v.T sigma2_hat_v = np.trace((X - s*uvt)@(X - s*uvt).T) / (n*d - d) #sigma2_hat_v = np.abs(SSTO - sum(Xu**2)) / (n*d - d) # then, find the possible lambdas for v # notice that, equivantly, we can write 2 * (X.T @ u) / w2 > lambda_v, and 2 * (X.T @ v) / w1 > lambda_u # thus, it makes more sense to search different lambdas according to the values of (X.T @ u)/w2 or (X.T @ v)/w1 index_v = np.where(w2 < 1e8) # the index where Xu is non-zero. Out of these values, the v will almost be zero. index_v = index_v[0] Xu_nonzero = Xu[index_v] w2_nonzero = w2[index_v] lamd_grid_v = 2 * Xu_nonzero / w2_nonzero #lamd_grid_v = Xu[index_v] / w2[index_v] lamd_grid_v = np.unique(np.append(0, np.abs(lamd_grid_v))) lamd_grid_v.sort() lamd_grid_v = lamd_grid_v[0:-1] lamd_grid_v = np.r_[lamd_grid_v, np.linspace(0, lamd_grid_v[-1], num = 50)] # find the optimized lambda for v lamd_v = opt_lambda_v(X, lamd_grid_v, Xu_nonzero, w2_nonzero, u, sigma2_hat_v, n, d, index_v) # update v sig_v = np.sign(Xu) v_new = sig_v * (np.abs(Xu) - lamd_v*w2/2) * (np.abs(Xu) >= lamd_v*w2/2) v_new = v_new / la.norm(v_new) ## then, update the u # compute the weights for u Xvnew = X @ v_new # this is also the u_tilde in the paper, Xvnew is (n,1) w1 = np.abs(Xvnew)**(-gamma1) # compute the estimated sigma2 hat for u uvt = u @ v_new.T sigma2_hat_u = np.trace((X - s*uvt)@(X - s*uvt).T) / (n*d - d) #sigma2_hat_u = np.abs(SSTO - sum(Xvnew**2)) / (n*d - n) # then, find the possible lambdas for u index_u = np.where(w1 < 1e8) index_u = index_u[0] Xv_nonzero = Xvnew[index_u] w1_nonzero = w1[index_u] lamd_grid_u = 2 * Xv_nonzero / w1_nonzero lamd_grid_u = np.unique(np.append(0, np.abs(lamd_grid_u))) lamd_grid_u.sort() lamd_grid_u = lamd_grid_u[0:-1] lamd_grid_u = np.r_[lamd_grid_u, np.linspace(0, lamd_grid_u[-1], num = 50)] # find the optimized lambda for u lamd_u = opt_lambda_u(X, lamd_grid_u, Xv_nonzero, w1_nonzero, v_new, sigma2_hat_u, n, d, index_u) # update u sig_u = np.sign(Xvnew) #u_new = sig_u * (np.abs(Xvnew) - lamd_u*w1/2) * (np.abs(Xvnew) >= lamd_u*w1/2) / la.norm(Xvnew) u_new = sig_u * (np.abs(Xvnew) - lamd_u*w1/2) * (np.abs(Xvnew) >= lamd_u*w1/2) u_new = u_new / la.norm(u_new) return v_new, u_new, lamd_v, lamd_u
cb3eef48dc3cac9fd8bae8f2d010d7883eed2a56
3,628,173
def merge_df(df1, df2): """ Genera un dataframe seleccionant les columnes que ens interessen dels dos dataframes, eliminant valors NA, i actualitza les notes de '538 Grade' simplificant-les. Keyword arguments: df1 -- dataframe que conté les dades de les entrevistes. df2 -- dataframe que conté les dades sobre els agents entrevistadors. """ # Generem el dataframe df = df1[['pollster', 'sample_size', 'party', 'end_date', 'subject', 'very', 'somewhat', 'not_very', 'not_at_all']].merge( df2[['Pollster', '538 Grade', 'Predictive Plus-Minus']], left_on='pollster', right_on='Pollster', how='left').dropna() # Simplifiquem les notes df['538 Grade'] = df['538 Grade'].map({'A': 'A', 'B': 'B', 'B-': 'B', 'B/C': 'C', 'C-': 'C', 'D-': 'D'}) return df
955569ebbbbcf141eedd64ca3ee9b9b89f7922be
3,628,174
def OpInfo_CONV( conv_ps: conv.Conv2DParams, s_id: str, vin_id: str, vout_id: str ) -> OpInfo: """ OpInfo for a CONV operation """ rd_a = ( "{{ {SID}[oh,ow] -> {VID}[id,ih,iw] " ": 0 <= oh < {OH} " "and 0 <= ow < {OW} " "and 0 <= id < {ID} " "and oh <= ih < oh + {FH} " "and ow <= iw < ow + {FW} " "}}".format( ID=conv_ps.i.d, OH=conv_ps.o.h, OW=conv_ps.o.w, FH=conv_ps.f.h, FW=conv_ps.f.w, SID=s_id, VID=vin_id, ) ) wr_a = ( "{{ {SID}[oh,ow] -> {VID}[ik,ih,iw] " ": 0 <= oh < {OH} " "and 0 <= ow < {OW} " "and 0 <= ik < {FL} " "and ih = oh + {P} " "and iw = ow + {P} " "}}".format( OH=conv_ps.o.h, OW=conv_ps.o.w, FL=conv_ps.f.l, P=conv_ps.p_out, SID=s_id, VID=vout_id, ) ) return pl.OpInfo("MxV", [RD_a(rd_a), WR_a(wr_a)])
6f4292a79b9905a9a9c1ce79607ab854b3e97d81
3,628,175
def mean_period(data): """Return mean-period of signal.""" peaks = len(find_peaks(data, height=0)[0]) return len(data) / peaks if peaks > 0 else len(data)
627be25f610f81e45c2387f2c0e778e0b5ce8c20
3,628,176
def rvsi_vi(imgData, wave, mask=0, bands=[-1,-1,-1]): """ Function that calculates the Red-edge Vegetation Stress Index. This functions uses bands 714, 733, and 752 nm. The closest bands to these values will be used. Citation: Merton, R. and Huntington, J. 1999. Early simulation results of the ARIES-1 satellite sensor for multi-temporal vegetation research derived from AVIRIS. Available at ftp://popo.jpl.nasa.gov/pub/docs/workshops/99_docs/41.pdf, NASA Jet Propulsion Lab., Pasadena, CA. INPUTS: 1) imgData: an array of hyperspectral data either as 3D [n_row x n_col x n_band] or 2D [n_row x n_band] 2) wave: an array of wavelengths in nanometers that correspond to the n_bands in imgData 3) mask: OPTIONAL - a binary array (same size as imgData) that designates which pixels should be included in analysis. Pixels with 1 are used, while pixels with 0 are not. 4) bands: OPTIONAL - if the user wants to define the bands used in the function provide the band index (not in nm) for each wavelength in this order [714 nm, 733 nm, 752 nm]. OUTPUTS: 1) vi: the calculated spectral index value for each pixel either returned as [n_row x n_col x 1] or [n_row x 1] 03/2020 - Susan Meerdink """ # Determine the bands used in function if len(bands) == 3: if bands[0] == -1: idx_714 = (np.abs(wave - 714)).argmin() else: idx_714 = bands[0] if bands[1] == -1: idx_733 = (np.abs(wave - 733)).argmin() else: idx_733 = bands[1] if bands[2] == -1: idx_752 = (np.abs(wave - 752)).argmin() else: idx_752 = bands[2] print('RVSI calls for bands 714, 733, and 752 nm. Using bands ' + str(wave[idx_714])+', '+ str(wave[idx_733])+', '+ str(wave[idx_752])) else: raise Exception('Not enough band indexes are provided by user.') # 3D data, hyperspectral image, [n_row x n_col x n_band] if imgData.ndim > 2: data_714 = np.reshape(imgData[:,:,idx_714],[-1,1]) data_733 = np.reshape(imgData[:,:,idx_733],[-1,1]) data_752 = np.reshape(imgData[:,:,idx_752],[-1,1]) # 2D data, flattened hyperspectral data, [n_row x n_band] else: data_714 = imgData[:,idx_714] data_733 = imgData[:,idx_733] data_752 = imgData[:,idx_752] # Calculate RVSI index = (data_714 + data_752)/ 2 - data_733 # If data was 3D, reshape the index value back into 3D shape if imgData.ndim > 2: index = np.reshape(index,[imgData.shape[0],imgData.shape[1]]) if isinstance(mask, int) is False: idx_x, idx_y = np.where(mask==0) index[idx_x,idx_y] = 0 return index
ddf08ac67979b5bff879faaac139bb25fdc660cb
3,628,177
def usb_pitop_peripherals(): """Returns a list with the status of USB pi-top peripherals. Returns: list: list of dictionaries with the status of USB peripherals """ return [ {"name": "pi-top Touchscreen", "connected": touchscreen_is_connected()}, {"name": "pi-top Keyboard", "connected": pitop_keyboard_is_connected()}, ]
5fa9100fdfa03bbfaebe3a053a036bc50b52dd59
3,628,178
import torch def isPD(B): """Check whether a matrix is positive definite. Args: B ([torch.Tensor]): [Input matrix.] Returns: [bool]: [Returns True if matrix is positive definite, otherwise False.] """ try: _ = torch.cholesky(B) return True except RuntimeError: return False
c51dc4f6f48ac7417f49ef41b81f3b04816b9279
3,628,179
def convert_TriMap_to_SelectedLEDs( best_led_config ): """ Returns a lookup dict of the selected LEDs. """ d = {} for tri_num in best_led_config: for led_num in best_led_config[tri_num]: d[led_num] = True return d
521a1be0d11cb8198944e437d20d4ac0349c8856
3,628,180
import os def get_stimulus_response(src_dir, src_dataset, stim_id, boundary=0, if_get_stim=True): """Get stimulus-response data for all datasets. Args : src_dir : Location of all joint embedding datasets. src_dataset : Dataset corresponding of a specific stimulus. stim_id : string ID of the stimulus. boundary : Remove cells within a boundary to the edges. if_get_stim : If False, do not load stimulus Returns : stimulus : Stimulus matrix (Time x dimx x dimy). responses : Discretized cell responses (Time x n_cells). dimx : X dimension of stimulus. dimy : Y dimension of stimulus. num_cell_types : number of cell types. """ # Copy data locally. # Since gfile does not support reading of large files directly from CNS, # we need to copy the data locally first. src = os.path.join(src_dir, src_dataset) if not gfile.IsDirectory(FLAGS.tmp_dir): gfile.MkDir(FLAGS.tmp_dir) dst = os.path.join(FLAGS.tmp_dir, src_dataset) print('Source %s' % src) print('Destination %s' % dst) copy_locally(src, dst) # Load stimulus-response data. if if_get_stim: data = h5py.File(os.path.join(dst, 'stimulus.mat')) stimulus = np.array(data.get('stimulus')) # Make dynamic range of stimuli from -0.5 to 0.5 stim_min = np.min(stimulus) stim_max = np.max(stimulus) stimulus -= stim_min stimulus /= (stim_max - stim_min) stimulus -= 0.5 # Make the stimuli mean 0 stimulus -= np.mean(stimulus) else: stimulus = None # Load responses from multiple retinas. datasets_list = os.path.join(dst, 'datasets.txt') datasets = open(datasets_list, 'r').read() training_datasets = [line for line in datasets.splitlines()] num_cell_types = 2 dimx_desired = 80 dimy_desired = 40 if stimulus is not None: dimx_actual = stimulus.shape[1] dimy_actual = stimulus.shape[2] else: stix_sz = np.int(src_dataset.split('-')[1]) dimx_actual = np.int(640 / stix_sz) dimy_actual = np.int(320 / stix_sz) responses = [] for idata in training_datasets: print(idata) data_file = os.path.join(dst, idata) data = sio.loadmat(data_file) data.update({'stimulus_key': stim_id}) process_dataset(data, dimx_desired, dimy_desired, dimx_actual, dimy_actual, num_cell_types, boundary=boundary) data.update({'piece': idata}) responses += [data] if FLAGS.minimize_disk_usage: gfile.DeleteRecursively(dst) return stimulus, responses, dimx_desired, dimy_desired, num_cell_types
695ac3dbbea4753d0722e9168f59b85b22776de3
3,628,181
import argparse import sys import logging def parse_cli(): """This function parses all the arguments, validates them and then stores them in a dictionary that is used throughout the script.""" usage = \ '''%(prog)s [--debug] -u username -k pkey -c cert \\ (-H hostname | -F hostfile) [-a altnames] [-d write_directory] %(prog)s [--debug] -u username -k pkey -c cert -t %(prog)s -h %(prog)s --version''' parser = argparse.ArgumentParser(add_help=False, usage=usage, description='Request and retrieve certificates from the InCommon IGTF server CA.') required = parser.add_argument_group('Required', 'Specify only one of -H/--hostname and -F/--hostfile') hosts = required.add_mutually_exclusive_group() hosts.add_argument('-H', '--hostname', action='store', dest='hostname', help='The hostname (FQDN) to request. If specified, -F/--hostfile will be ignored') hosts.add_argument('-F', '--hostfile', action=FilePathAction, dest='hostfile', help='File containing list of hostnames (FQDN), one per line, to request. Space separated ' 'subject alternative names (SANs) may be specified on the same line as each hostname.') required.add_argument('-u', '--username', action='store', required=True, dest='login', help="Specify requestor's InCommon username/login") required.add_argument('-c', '--cert', action=FilePathAction, required=True, dest='usercert', help="Specify requestor's user certificate (PEM Format)") required.add_argument('-k', '--pkey', action=FilePathAction, required=True, dest='userprivkey', help="Specify requestor's private key (PEM Format)") optional = parser.add_argument_group("Optional") optional.add_argument('-h', '--help', action='help', help='show this help message and exit') optional.add_argument('-a', '--altname', action='append', dest='altnames', default=[], help='Specify the SAN for the requested certificate (only works with -H/--hostname). ' 'May be specified more than once for additional SANs.') optional.add_argument('-d', '--directory', action='store', dest='write_directory', default='.', help="The directory to write the host certificate(s) and key(s)") optional.add_argument('--debug', action='store_true', dest='debug', default=False, help="Write debug output to stdout") optional.add_argument('-t', '--test', action='store_true', dest='test', default=False, help='Testing mode: test connection to InCommon API but does not request certificates. ' 'Useful to test authentication credentials, optional arguments are ignored.') optional.add_argument('-v', '--version', action='version', version=utils.VERSION_NUMBER) parsed_args = parser.parse_args() # We can't add altnames to the mutually exclusive 'hosts' group since it's not a required opt if parsed_args.hostfile and parsed_args.altnames: parsed_args.altnames = [] print("-a/--altname option ignored with -F/--hostfile", file=sys.stderr) if parsed_args.debug: # this sets the root debug level logging.getLogger().setLevel(logging.DEBUG) logger.debug('Debug mode enabled') # (-H/--hostname | -F/--hostfile) are mutually exclusive but not required so testing mode can be enabled with optional param -t/--test if not parsed_args.test and not parsed_args.hostname and not parsed_args.hostfile: parser.error('argument -H/--hostname or -F/--hostfile is required.') return parsed_args
3bcd60045225d5049e9c7c150667c1664ebc3e50
3,628,182
def get_by_id(id_estudiante, id_asignatura, edicion): """ Obtiene la tupla de la relación Estudiantes con el identificador :param id_estudiante: El identificador del estudiante :type id_estudiante: str :param id_asignatura: El identificadir de la asignatura :type id_asignatura: str :param edicion: La edición de la asignatura :type edicion: str :returns: La tupla de la relación :rtype: dict """ try: conn = helpers.get_connection() cur = conn.cursor() cur.execute(CALIFICACION_QUERY_ID, (id_estudiante, id_asignatura, edicion)) result = cur.fetchone() # Confirma los cambios y libera recursos conn.commit() cur.close() conn.close() return result except Exception as e: raise e
04fa8447359917896210c1252b92ce82dc40f511
3,628,183
def keyword(tokens, expected): """Case-insensitive keyword match.""" try: token = next(iter(tokens)) except StopIteration: return if token and token.name == "symbol" and token.value.lower() == expected: return TokenMatch(None, token.value, (token,))
2ae106158e4a17258283abdd3ddcf77c670957c4
3,628,184
def milestone_container(request): """milestone container""" template = loader.get_template('project_management/milestone_container.html') context = {} return HttpResponse(template.render(context, request))
5461448dccdc5a5a3a7d56cc324dbaff5b3dbcc2
3,628,185
def _run_symbolic_method(op_name, symbolic_fn, args): """ This trampoline function gets invoked for every symbolic method call from C++. """ try: return symbolic_fn(*args) except TypeError as e: # Handle the specific case where we didn't successfully dispatch # to symbolic_fn. Otherwise, the backtrace will have the clues # you need. e.args = ("{} (occurred when translating {})".format(e.args[0], op_name), ) raise
c95f8d18e4b3a0ed7a06ccc6bdf178a820537d08
3,628,186
import csv def load_input_source(input_source): """Load data from an arbitrary input source. Currently supported: JSON, JSON-String, CSV, CSV-String. Returns an empty list if no data is available.""" input_data = [] try: input_data = load_json_from_file(input_source) except ValueError as e: pass except IOError as e: pass except Exception as e: pass else: return input_data if not input_data: try: input_data = load_json_string(input_source) except AttributeError as e: pass except ValueError as e: pass except Exception as e: pass if not input_data: try: input_data = load_csv_from_file(input_source) except csv.Error as e: pass except IOError as e: pass except Exception as e: pass if not input_data: try: input_data = load_csv_string(input_source) except Exception as e: pass return input_data
82ea97978a39a93a8d42e024cef9890cf2535dbe
3,628,187
def total_intensity(field): """Calculates total intensity of the field. Computes intesity and sums over pixels.""" i = field2intensity(field) return i.sum(tuple(range(i.ndim))[-2:])
89a26b8d53c7520aff99ef54800cab5fcb0937bb
3,628,188
import array def g_function_bernier(Fo, rba): """ Bernier (2001) approximation of Ingersol et al. (1954) G-function of the cylindrical heat source problem. Fo = Fourier number rba = ratio of the radius where the temperature is calculated over the external radius of the borehole. Reference: ---------- Bernier, M.A., 2001. Ground-Coupled Heat Pump System Simulation. ASHRAE Transactions, 107(1):1-12. """ A = array([[-0.89129, 0.36081, -0.05508, 3.59617e-3, 0, 0], [-1.454099, 0.8993336, -0.311928, 0.061119, -0.00478046, 0], [-3.007691, 2.256059, -0.7928093, 0.134293, -0.00858244, 0], [-9.141771, 11.7025, -7.09574, 2.269837, -0.3669166, 0.023587]]) if rba == 1: a = A[0, :] elif rba == 2: a = A[1, :] elif rba == 5: a = A[2, :] elif rba == 10: a = A[3, :] else: raise Exception('Second input argument must be either 1, 2, 5 or 10') x = log10(Fo) arg = a[0] + a[1]*x + a[2]*x**2 + a[3]*x**3 + a[4]*x**4 + a[5]*x**5 G = 10**arg return G
58226463d498b5af839d7ffd3c1042e4a0a4b5ee
3,628,189
def clear_item_updates(api_key: str, item_id: str, *args, **kwargs): """Clear an item's updates. Parameters api_key : `str` The monday.com v2 API user key. item_id : `str` The item's unique identifier. args : `tuple` The list of item return fields. kwargs : `dict` Optional arguments for clearing item updates. Returns data : `dict` A monday.com column in item form. Return Fields assets : `list[moncli.entities.Asset]` The item's assets/files. board : `moncli.entities.Board` The board that contains this item. column_values : `list[moncli.entities.ColumnValue]` The item's column values. created_at : `str` The item's create date. creator : `moncli.entities.User` The item's creator. creator_id : `str` The item's unique identifier. group : `moncli.entities.Group` The group that contains this item. id : `str` The item's unique identifier. name : `str` The item's name. state : `str` The board's state (all / active / archived / deleted) subscriber : `moncli.entities.User` The pulse's subscribers. updated_at : `str` The item's last update date. updates : `moncli.entities.Update` The item's updates. """ kwargs['item_id'] = gql.IntValue(item_id) return execute_query(api_key, query_name=CLEAR_ITEM_UPDATES, operation_type=gql.OperationType.MUTATION, fields=args, arguments=kwargs)
fc487f8fb27d179df5823601179fbcbb858dd69c
3,628,190
import torch def compute_aucs(pred, targ): """ Computes Area Under the Curve (AUC) from prediction scores. Args: targ: Pytorch tensor on GPU, shape = [n_samples, n_classes] true binary labels. pred: Pytorch tensor on GPU, shape = [n_samples, n_classes] can either be probability estimates of the positive class, confidence values, or binary decisions. Returns: List of AUROCs of all classes. """ assert targ.shape[1] == pred.shape[1], f"pred:{pred.shape} targ:{targ.shape}" if isinstance(pred, torch.Tensor): pred = pred.float().cpu().detach().numpy() if isinstance(targ, torch.Tensor): targ = targ.float().cpu().detach().numpy() assert isinstance(pred, np.ndarray), 'pred must be tensor or np array' assert isinstance(targ, np.ndarray), 'targ must be tensor or np array' AUROCs = np.zeros(targ.shape[-1]) for i in range(targ.shape[-1]): try: AUROCs[i] = roc_auc_score(targ[:, i], pred[:, i]) except ValueError: AUROCs[i] = 0 return AUROCs
2b2b00eb55421ee1005534998f0519c96ed70696
3,628,191
def minhash(features): # type: (List[int]) -> List[int] """ Calculate a 64 dimensional minhash integer vector. :param List[int] features: List of integer features :return: Minhash vector :rtype: List[int] """ return [ min([(((a * f + b) & MAXI64) % MPRIME) & MAXH for f in features]) for a, b in zip(MPA, MPB) ]
c0bb534f4433fd517a7910647260f947c136111a
3,628,192
def get_gauss_beams(fwhm=7, nside=512, lmax=None): """the fwhm of 143GHz is 7.27, test fwhm: 7, 6, 5, 4 """ if lmax is None: lmax = 3*nside + 50 bl = hp.gauss_beam(fwhm*np.pi/10800., lmax=lmax) return bl
85a4edd2389afa49c166488089ad70433a4683bd
3,628,193
def create_long_model(model_specified, attention_window, max_pos): """Starting from the `roberta-base` (or similar) checkpoint, the following function converts it into an instance of `RobertaLong`. It makes the following changes: 1)extend the position embeddings from `512` positions to `max_pos`. In Longformer, we set `max_pos=4096` 2)initialize the additional position embeddings by copying the embeddings of the first `512` positions. This initialization is crucial for the model performance (check table 6 in [the paper](https://arxiv.org/pdf/2004.05150.pdf) for performance without this initialization) 3) replaces `modeling_bert.BertSelfAttention` objects with `modeling_longformer.LongformerSelfAttention` with a attention window size `attention_window` The output of this function works for long documents even without pretraining. Check tables 6 and 11 in [the paper](https://arxiv.org/pdf/2004.05150.pdf) to get a sense of the expected performance of this model before pretraining.""" model = BertModel.from_pretrained(model_specified) tokenizer = BertTokenizer.from_pretrained( model_specified, model_max_length=max_pos) config = model.config # extend position embeddings tokenizer.model_max_length = max_pos tokenizer.init_kwargs['model_max_length'] = max_pos current_max_pos, embed_size = model.embeddings.position_embeddings.weight.shape # max_pos += 2 # NOTE: RoBERTa has positions 0,1 reserved, so embedding size is max position + 2 config.max_position_embeddings = max_pos assert max_pos > current_max_pos # allocate a larger position embedding matrix new_pos_embed = model.embeddings.position_embeddings.weight.new_empty( max_pos, embed_size) # copy position embeddings over and over to initialize the new position embeddings k = 0 step = current_max_pos while k < max_pos - 1: new_pos_embed[k:(k + step)] = model.embeddings.position_embeddings.weight[:] k += step model.embeddings.position_embeddings.weight.data = new_pos_embed # replace the `modeling_bert.BertSelfAttention` object with `LongformerSelfAttention` config.attention_window = [attention_window] * config.num_hidden_layers for i, layer in enumerate(model.encoder.layer): longformer_self_attn = LongformerSelfAttention(config, layer_id=i) longformer_self_attn.query = layer.attention.self.query longformer_self_attn.key = layer.attention.self.key longformer_self_attn.value = layer.attention.self.value longformer_self_attn.query_global = layer.attention.self.query longformer_self_attn.key_global = layer.attention.self.key longformer_self_attn.value_global = layer.attention.self.value layer.attention.self = longformer_self_attn return model, tokenizer, config
ff7dc0f6f008ff03f834389f9dc02ccbdabceece
3,628,194
def api_redirect(): """Redirects the user to the API documentation page.""" return flask.redirect(flask.url_for("api_ns.api"))
471dcc20b64da5299572783af7345bf2b3effd29
3,628,195
from rasa.nlu import utils as nlu_utils import aiohttp async def download_file_from_url(url: Text) -> Text: """Download a story file from a url and persists it into a temp file. Returns the file path of the temp file that contains the downloaded content.""" if not nlu_utils.is_url(url): raise InvalidURL(url) async with aiohttp.ClientSession() as session: async with session.get(url, raise_for_status=True) as resp: filename = io_utils.create_temporary_file(await resp.read(), mode="w+b") return filename
373ca5e8a0726baeeb868a98b827110ca46e541b
3,628,196
import binascii def _decode_pem_private(lines, passphrase): """Decode a PEM format private key""" pem_name, headers, data, end = _decode_pem(lines, b'PRIVATE KEY') if pem_name == b'OPENSSH': return _decode_openssh_private(data, passphrase), end if headers.get(b'Proc-Type') == b'4,ENCRYPTED': if passphrase is None: raise KeyImportError('Passphrase must be specified to import ' 'encrypted private keys') dek_info = headers.get(b'DEK-Info', b'').split(b',') if len(dek_info) != 2: raise KeyImportError('Invalid PEM encryption params') alg, iv = dek_info try: iv = binascii.a2b_hex(iv) except binascii.Error: raise KeyImportError('Invalid PEM encryption params') from None try: data = pkcs1_decrypt(data, alg, iv, passphrase) except KeyEncryptionError: raise KeyImportError('Unable to decrypt PKCS#1 ' 'private key') from None try: key_data = der_decode(data) except ASN1DecodeError: raise KeyImportError('Invalid PEM private key') from None if pem_name == b'ENCRYPTED': if passphrase is None: raise KeyImportError('Passphrase must be specified to import ' 'encrypted private keys') pem_name = b'' try: key_data = pkcs8_decrypt(key_data, passphrase) except KeyEncryptionError: raise KeyImportError('Unable to decrypt PKCS#8 ' 'private key') from None if pem_name: return _decode_pkcs1_private(pem_name, key_data), end else: return _decode_pkcs8_private(key_data), end
64db7a54a1c35b35d435f64b45353c94029ab6c6
3,628,197
def kBtu_h2kW(x): """kBtu/h -> kW""" return Btu_h2kW(kBtu_h2Btu_h(x))
e528827fc9527dfbfa421741aea7c719979c7eb9
3,628,198
def plot_hist_centers(output, *args, **kwargs): """Plot 1-dimensinal output using pyplot.plot executes pyplot.plot(x, y, *args, **kwargs) with first two arguments overridden all other arguments are passed as is. Options: scale=float or 'width' - multiply bin by a scale or divide by bin width returns pyplot.plot() result """ height, lims, _ = get_1d_data(output, scale=kwargs.pop('scale', None)) centers = (lims[1:] + lims[:-1])*0.5 Plotter = kwargs.pop('axis', P) return Plotter.plot(centers, height, *args, **kwargs )
be0d7e2cf5f3b5229d3c781c3f675291c9a9d4ba
3,628,199