content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def alpha_to_weights(alpha): """归一化. 最终截面绝对值和为2. """ alpha = alpha - np.nanmean(alpha, axis=1, keepdims=True) mask_pos = (alpha > 0) mask_neg = (alpha < 0) alpha_pos = imposter(alpha) alpha_pos[mask_pos] = alpha[mask_pos] alpha_pos = alpha_pos / np.nansum(alpha_pos, 1, keepdims=True) alpha_neg = imposter(alpha) alpha_neg[mask_neg] = alpha[mask_neg] alpha_neg = -alpha_neg / np.nansum(alpha_neg, 1, keepdims=True) alpha[mask_pos] = alpha_pos[mask_pos] alpha[mask_neg] = alpha_neg[mask_neg] return alpha
a2a4436b3457fe644a130d463cf501c6cd623f2c
25,500
def complete_with_fake_data_for_warmup(minimum_n_rows_to_fit, X=None, fv_size=None): """Makes fake data to warmup a partial fit process. If no X is given, will return a random minimum_n_rows_to_fit x fv_size matrix (with values between 0 and 1) If X is given, will repeat the rows in a cycle until the minimum_n_rows_to_fit is reached >>> X = complete_with_fake_data_for_warmup(3, fv_size=2); >>> X.shape (3, 2) >>> import numpy as np >>> complete_with_fake_data_for_warmup(5, X=np.array([[1,2,3], [4,5,6]])) array([[1, 2, 3], [4, 5, 6], [1, 2, 3], [4, 5, 6], [1, 2, 3]]) """ if X is None: assert fv_size is not None, 'You need to have some data, or specify an fv_size' return np.random.rand(minimum_n_rows_to_fit, fv_size) else: nrows, fv_size = X.shape missing_n_rows = max(0, minimum_n_rows_to_fit - nrows) if missing_n_rows > 0: return np.array(X.tolist() * int(1 + np.ceil(missing_n_rows / nrows)))[ :minimum_n_rows_to_fit ] else: return X
e201fc50f06945b57a166e4c006252cc892865ed
25,501
from typing import Dict from pathlib import Path def check_integrity(signify: Dict[str, str], snapshot: Path, url: str) -> bool: """Check the integrity of the snapshot and retry once if failed files. signify -- the signify key and a signify signed file with SHA256 checksums snapshot -- the directory where the snapshot is stored url -- the snapshots' mirror URL """ whole, failed = verify(signify, snapshot) # if there are some failed files, retry once, five minutes # after. Downloads can fail or just get the mirror in the middle # of a sync. if failed: sleep(300) for f in failed: get_binary(parse.urljoin(url, f), f) whole, failed = verify(signify, snapshot) return whole
4e33ba5a2652eaba229815eec93dede4aaf6ef5f
25,502
def exp_slow(b, c): """ Returns the value b^c. Property: b^c = b * b^(c-1) Parameter b: the number to raise to a power Precondition: b is a number Parameter c: the exponent Precondition: c is an int >= 0 """ # get in the habit of checking what you can assert type(b) in [float, int], repr(b)+' is not a number' assert type(c) == int, repr(c)+' is not an int' assert c >= 0, repr(c)+' is negative' # Allows us to write to global variable. EVIL! Do not use! global count_frames # Work on small data (BASE CASE) if c == 0: return 1 # Break up into halves (RECURSIVE CASE) left = b right = exp_slow(b, c-1) # Used to count the number of frames count_frames = count_frames+1 # Combine the answer return left * right
0d58a98f2b7785c9ac69c8a3f4539cdf71d3f27b
25,503
def pick_theme(manual): """ Return theme name based on manual input, prefs file, or default to "plain". """ if manual: return manual pref_init() parser = cp.ConfigParser() parser.read(PREFS_FILE) try: theme = parser.get("theme", "default") except (cp.NoSectionError, cp.NoOptionError): theme = "plain" return theme
6e815a0f46b5de1f1a0ef16ffa0ba21b79ee048f
25,504
import socket def ip2host(ls_input): """ Parameters : list of a ip addreses ---------- Returns : list of tuples, n=2, consisting of the ip and hostname """ ls_output = [] for ip in ls_input: try: x = socket.gethostbyaddr(ip) ls_output.append((ip, x[0])) except Exception as e: print('Error: ', e) ls_output.append((ip, None)) return ls_output
234b42bf0406ae5fb67d2c1caba9f7f3a1e92a0c
25,505
from typing import Tuple from typing import List from pathlib import Path def process_all_content(file_list: list, text_path: str) -> Tuple[list, list]: """ Analyze the whole content of the project, build and return lists if toc_items and landmarks. INPUTS: file_list: a list of all content files text_path: the path to the contents folder (src/epub/text) OUTPUTS: a tuple containing the list of Toc items and the list of landmark items """ toc_list: List[TocItem] = [] landmarks: List[TocItem] = [] # We make two passes through the work, because we need to know # how many bodymatter items there are. So we do landmarks first. for textf in file_list: file_path = Path(text_path) / textf try: with open(file_path, encoding="utf8") as file: dom = se.easy_xml.EasyXhtmlTree(file.read()) except Exception as ex: raise se.InvalidFileException(f"Couldn’t open file: [path][link=file://{file_path}]{file_path}[/][/]. Exception: {ex}") from ex add_landmark(dom, textf, landmarks) # Now we test to see if there is only one body item body_items = [item for item in landmarks if item.place == Position.BODY] single_file = (len(body_items) == 1) nest_under_halftitle = False place = Position.NONE for textf in file_list: with open(Path(text_path) / textf, "r", encoding="utf-8") as file: dom = se.easy_xml.EasyXhtmlTree(file.read()) body = dom.xpath("//body") if body: place = get_place(body[0]) else: raise se.InvalidInputException("Couldn't locate body node") if place == Position.BACK: nest_under_halftitle = False process_headings(dom, textf, toc_list, nest_under_halftitle, single_file) if textf == "halftitlepage.xhtml": nest_under_halftitle = True # We add this dummy item because outputtoc always needs to look ahead to the next item. last_toc = TocItem() last_toc.level = 1 last_toc.title = "dummy" toc_list.append(last_toc) return landmarks, toc_list
51514892d173adf8a4fe9c3196781c558bc24c6a
25,506
import aiohttp import json def fuel(bot, mask, target, args): """Show the current fuel for Erfurt %%fuel [<city> <value> <type>]... """ """Load configuration""" config = { 'lat': 50.9827792, 'lng': 11.0394426, 'rad': 10 } config.update(bot.config.get(__name__, {})) sort_type = 'all' sort_value = 'dist' lat = config['lat'] lng = config['lng'] fuel_types = ['e5', 'e10', 'diesel', 'all'] if config['api_key'] == "your_apikey": return "I don't have your api key!" if '<city>' not in args or len(args['<city>']) < 1: bot.log.info('Fetching fuel info for Erfurt') lat = config['lat'] lng = config['lng'] else: if " ".join(args['<city>']) == 'sort': bot.log.info('Fetching fuel info for Erfurt') lat = config['lat'] lng = config['lng'] if '<value>' not in args or len(args['<value>']) < 1: sort_type = 'all' sort_value = 'dist' else: sort_type = " ".join(args['<value>']) sort_value = 'price' else: if " ".join(args['<city>']) == 'help': bot.log.info('Printing some Help') cmd = '!' bot.privmsg(target, '( ͡° ͜ʖ ͡°)') bot.privmsg(target, 'Example commands:') bot.privmsg(target, cmd + 'fuel') bot.privmsg(target, cmd + 'fuel help') bot.privmsg(target, cmd + 'fuel sort <fuel>') bot.privmsg(target, cmd + 'fuel sort e5') bot.privmsg(target, cmd + 'fuel sort e10') bot.privmsg(target, cmd + 'fuel sort diesel') bot.privmsg(target, cmd + 'fuel <place>') bot.privmsg(target, cmd + 'fuel erfurt') bot.privmsg(target, cmd + 'fuel <place> sort <fuel>') bot.privmsg(target, cmd + 'fuel erfurt sort e5') bot.privmsg(target, cmd + 'fuel bytespeicher sort e10') bot.privmsg(target, cmd + 'fuel krautspace sort diesel') return "" else: bot.log.info('Fetching fuel info for ' + str(" ".join(args['<city>']))) geolocator = Nominatim() location = geolocator.geocode(" ".join(args['<city>'])) lat = location.latitude lng = location.longitude if " ".join(args['<value>']) == 'sort': if '<type>' not in args or len(args['<type>']) < 1: sort_type = 'all' sort_value = 'dist' else: sort_type = " ".join(args['<type>']) sort_value = 'price' if sort_type not in fuel_types: return "Not supported fuel." try: url = "https://creativecommons.tankerkoenig.de/json/list.php?" + \ "lat=" + str(lat) + \ "&lng=" + str(lng) + \ "&rad=" + str(config['rad']) + \ "&sort=" + str(sort_value) + \ "&type=" + str(sort_type) + \ "&apikey=" + str(config['api_key']) with aiohttp.Timeout(10): with aiohttp.ClientSession(loop=bot.loop) as session: resp = yield from session.get(url) if resp.status != 200: bot.privmsg(target, "Error while retrieving station list") raise Exception() r = yield from resp.read() data = json.loads(r.decode('utf-8')) messages = [] for x in range(len(data['stations'])): brand = data[u'stations'][x][u"brand"] station_id = data['stations'][x][u"id"] postCode = data['stations'][x][u"postCode"] bot.log.info('Fetching fuel info for Erfurt station ' + str(station_id)) url = \ "https://creativecommons.tankerkoenig.de/json/detail.php?" + \ "id=" + station_id + \ "&apikey=" + str(config['api_key']) with aiohttp.Timeout(10): with aiohttp.ClientSession(loop=bot.loop) as session: resp = yield from session.get(url) if resp.status != 200: bot.privmsg(target, "Error while retrieving fuel data") raise Exception() r = yield from resp.read() details = json.loads(r.decode('utf-8')) e5 = str(details['station']['e5']) e10 = str(details['station']['e10']) diesel = str(details['station']['diesel']) dist = u"{:0.2} km".format(vincenty((details['station']['lat'], details['station']['lng']), (lat, lng)).meters / 1000) if brand == '': brand = 'GLOBUS' print_str = \ u" {:20}".format(brand + ', ' + str(postCode) + ': ') + \ u"{:5} ".format(e5) + \ u"{:5} ".format(e10) + \ u"{:5} ".format(diesel) + \ u"{:5} ".format(dist) messages.append(print_str) headline = u"{:23}".format('fuel prices:') + \ u"{:6} ".format('e5') + \ u"{:6} ".format('e10') + \ u"{:6} ".format('diesel') + \ u"{:6} ".format('dist') if len(messages) > 0: bot.privmsg(target, headline) for m in messages: bot.privmsg(target, m) else: return "No fuel data found!" except KeyError: bot.privmsg(target, "Error while retrieving fuel data") raise Exception()
371ecd5e8a7c99032f2544d8256e89475a8d0cd5
25,507
import os def is_executable_binary(file_path): """ Returns true if the file: * is executable * is a binary (i.e not a script) """ if not os.path.isfile(file_path): return False if not os.access(file_path, os.X_OK): return False return is_binary(file_path)
3b1ca2ab87f1568e275b2fe535fe2af7b47804d9
25,508
def findElemArray2D(x, arr2d): """ :param x: a scalar :param arr2d: a 2-dimensional numpy ndarray or matrix Returns a tuple of arrays (rVec, cVec), where the corresponding elements in each are the rows and cols where arr2d[r,c] == x. Returns [] if x not in arr2d. \n Example: \n arr2d = np.array([[1,2],[3,1]]), x = 1 findElemArray2D(x, arr2d) --> ([0, 1], [0, 1]). i.e., arr2d[0][0] and arr2d[1][1] both == x. .. note:: The type of each tuple member is the same as type(arr2d) """ res = np.where(arr2d == x) if len(res[0]): return res[0].flatten(), res[1].flatten() else: return [], []
37428b16b6f634483d584ef878eea90646d77028
25,509
import itertools def merge(cluster_sentences): """ Merge multiple lists. """ cluster_sentences = list(itertools.chain(*cluster_sentences)) return cluster_sentences
ec5c9bf7a89bf0d047050d3684876ed481617706
25,510
def reverse_str(s: str) -> str: """Reverse a given string""" # Python strings are immutable s = list(s) s_len = len(s) # Using the extra idx as a temp space in list s.append(None) for idx in range(s_len // 2): s[s_len] = s[idx] s[idx] = s[s_len - idx - 1] s[s_len - idx - 1] = s[s_len] return "".join(s)[:s_len]
8568ed59d004afde11bd97e0dba58189a447f954
25,511
def readme(): """Get text from the README.rst""" with open('README.rst') as f: return f.read()
3cf992e2f983d71445e743599dc8b78411bab288
25,512
def exact_account(source_account_id): """ Get the BU id, OU id by the account id in dynamodb table. """ try: response = dynamodb_table.get_item(Key={'AccountId': source_account_id}) except Exception as e: failure_notify("Unable to query account id {0}, detailed exception {1}".format(source_account_id, e)) print(response) mgt_account_id = response['Item']['MgtId'] ou_id = response['Item']['OuId'] return mgt_account_id, ou_id, source_account_id
07ff5ef933d00208a5b1aba573c24c5f5987a558
25,513
import re def filter_output(output, regex): """Filter output by defined regex. Output can be either string, list or tuple. Every string is split into list line by line. After that regex is applied to filter only matching lines, which are returned back. :returns: list of matching records """ result = [] if isinstance(output, str): for line in output.splitlines(): result += re.findall(regex, line) elif isinstance(output, (list, tuple)): for item in output: result.append(filter_output(item, regex)) else: raise RuntimeError('Only strings and lists are supported by filter_output(), ' 'but output has type {}'.format(type(output))) return result
d9760a644bb83aee513391966522946a6514ab72
25,514
def carteiralistar(request): """ Metódo para retornar o template de listar carteiras """ usuario = request.user try: # Pega o objeto carteira se já existir carteira = CarteiraCriptomoeda.objects.get(usuario=usuario) # Pega a chave da API e o saldo chave_api = carteira.chave_api saldo = carteira.saldo valor_operacao = carteira.valor_operacao num_operacoes = carteira.num_operacoes simulacao = carteira.simulacao existe_carteira = True # Se não tiver carteira cadastrada deixe em branco except ObjectDoesNotExist: chave_api = "" saldo = "" valor_operacao = "" num_operacoes = "" simulacao = "" existe_carteira = False return render(request, 'site-pytradebot/carteiralistar.html', {'usuario':usuario, 'chave_api':chave_api, 'saldo':saldo, 'valor_operacao':valor_operacao, 'num_operacoes':num_operacoes, 'simulacao':simulacao, 'existe_carteira':existe_carteira})
32fa51e5c8e6d5a3765b72755cefe24b0ce906a2
25,515
def scrub(text, stop_chars=DEFAULT_STOP_CHARS, reorder_chars=DEFAULT_REORDER_CHARS): """ Scrub text. Runs the relevant functions in an appropriate order. """ text = reorder_stop_chars(text, stop_chars=stop_chars, reorder_chars=reorder_chars) text = remove_columns(text) text = split_as_one_sentence_per_line(text, stop_chars=stop_chars) text = remove_excessive_whitespace(text) return text
c24a072e83b6936c04a2e591d2072b0e49849758
25,516
def simulate_evoked_osc(info, fwd, n_trials, freq, label, loc_in_label=None, picks=None, loc_seed=None, snr=None, mu=None, noise_type="white", return_matrix=True, filtering=None, phase_lock=False): """Simulate evoked oscillatory data based on a given fwd model and dipole. Parameters: ----------- info : MNE info object data info, e.g., from raw fwd : MNE forward object forward model object freq : float freq of simulated oscillation n_trials : int number of trials label : MNE label source space label to simulate data in loc_in_label : None | int Specify the random generator state for dipole simulation within the label. Defaults to np.random.RandomState if None. picks : None | string Channel types to pick from evoked, can be 'mag' or 'grad'. None defaults to all. seed : None | int Seed for the time series simulation, only relevant for location in label. snr : None | float If not None, signal-to-noise ratio in dB for resulting signal (adding noise). mu : None | float To directly manipulate noise level (e.g. to keep constant across conditions). noise_type : str Type of noise. Supported is at the moment: "white" and "brownian". return_matrix : bool If True, a matrix of epochs will be returned and the evoked object will be averaged across trials. filtering : None | dict If None (default), no filtering is done. If filtering should be done, the dictionary needs to contain the following keys: "hp" : high pass cutoff, float. "lp" : low pass cutoff, float. "fir_design" : FIR design, string, see evoked.filter() "lp_tw" : transition width for low pass, float, optional. "hp_tw" : transition width for high pass, float, optional. phase_lock : bool If True, the oscillation will be phase-locked across trials. Returns: -------- evoked : MNE evoked object Simulated sensor data. stc : MNE source time course object Simulated source space data. epochs : np.array Matrix with epochs, if return_matrix is True. """ if loc_seed is not None: np.random.seed(loc_seed) if loc_in_label is None: loc_in_label = np.random.RandomState() np.random.seed() # reset to random seed to not get funky results for noise times = np.arange(0., n_trials, 1./info['sfreq']) stc = simulate_sparse_stc(fwd['src'], n_dipoles=1, times=times, random_state=loc_in_label, labels=label, data_fun=lambda times: generate_signal(times, freq, n_trials, phase_lock=phase_lock)) # go to sensor space evoked = apply_forward(fwd, stc, info, verbose=False, use_cps=False) # pick channel types if applicable if picks is not None: evoked.pick_types(meg=picks) if filtering is not None: if "lp_tw" not in filtering: filtering["lp_tw"] = "auto" if "hp_tw" not in filtering: filtering["hp_tw"] = "auto" if snr is not None: snr = 10 ** (snr/20) # convert dB to ratio if noise_type == "white": noise_data = np.random.randn(*evoked.data.shape) elif noise_type == "brownian": # make white noise first noise_data = np.random.randn(*evoked.data.shape) elif noise_type == "pink": noise_data = make_pink_noise(evoked.data.shape[1], 10, evoked.data.shape[0]) else: raise ValueError('So far, only white, brownian, and pink noise is ' 'implemented, got %s' % noise_type) if filtering is not None: # filter the noise noise_evoked = evoked.copy() noise_evoked.data[:] = noise_data noise_evoked.filter(filtering["hp"], filtering["lp"], fir_design=filtering["fir_design"], l_trans_bandwidth=filtering["hp_tw"], h_trans_bandwidth=filtering["lp_tw"], verbose=False) noise_data = noise_evoked.data # scale the noise # shape: trials x sensor x time noise_matrix = noise_data.reshape([len(evoked.ch_names), n_trials, -1]).transpose( 1, 0, 2) signal_matrix = evoked._data.reshape([len(evoked.ch_names), n_trials, -1]).transpose(1, 0, 2) if mu is None: mu = np.linalg.norm(signal_matrix, 'fro', axis=(1, 2)) mu /= (snr * np.sqrt(len(evoked.ch_names) * (len(times) / n_trials))) if noise_type == 'brownian': noise_matrix = np.cumsum(mu[:, np.newaxis, np.newaxis] * noise_matrix, axis=1) signal_matrix += noise_matrix else: signal_matrix += (mu[:, np.newaxis, np.newaxis] * noise_matrix) evoked.data = signal_matrix.transpose(1, 0, 2).reshape( [len(evoked.ch_names), int(n_trials * (len(times) / n_trials))]) # evoked.data *= 1e-11 if filtering is not None: # filter all the data again evoked.filter(filtering["hp"], filtering["lp"], fir_design=filtering["fir_design"], l_trans_bandwidth=filtering["hp_tw"], h_trans_bandwidth=filtering["lp_tw"], verbose=False) # take care of trials: if return_matrix is True: epochs = evoked._data epochs = epochs.reshape([len(evoked.ch_names), n_trials, -1]).transpose(1, 0, 2) evoked.crop(0., evoked.times[int((times.shape[0] / n_trials) - 1)]) evoked._data[:, :] = epochs.mean(axis=0) return evoked, stc, epochs, mu else: return evoked, stc, mu
45a7fe74c4f84c96cdbf0aa09059778180064460
25,517
import requests def token_request(): """ Request a Access Token from Vipps. :return: A Access Token """ headers = config['token_request'] url = base_url + '/accesstoken/get' response = requests.post(url, headers=headers) return response.json()
3363179cf526422c53a0eafc8c353ba3f7f29e9f
25,518
from apex import amp def train(args, train_dataset, model, tokenizer, labels, pad_token_label_id): """ Train the model """ if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ {"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay}, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) if args.fp16: try: except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * ( torch.distributed.get_world_size() if args.local_rank != -1 else 1)) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 0 tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) set_seed(args) # Added here for reproductibility (even between python 2 and 3) for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): model.train() cuda_indices = [0, 1, 2, 3, 6, 7] batch = tuple(t.to(args.device) if i in cuda_indices else t for i, t in enumerate(batch)) inputs = {"input_ids": batch[0], "attention_mask": batch[1], "token_type_ids": batch[2], "span_labels": batch[3], "span_size": batch[4], "span_list": batch[5], "slot_labels": batch[6], "slot_mask": batch[7], "rel_size": batch[8], "rel_list": batch[9], "question_length": batch[10], "span_null_label_id": labels[0].index('O'), "global_step": global_step, "args": args} outputs = model(**inputs) loss = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc) # span_logits = outputs[1][0] # span_pred = [torch.max(sl, 2)[1] for sl in span_logits].detach().cpu().numpy() # print(span_pred.shape) # exit() if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) scheduler.step() # Update learning rate schedule optimizer.step() model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Log metrics if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well results, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="test",filename= os.path.join(args.data_dir, "{}.jsonl".format("test"))) for key, value in results.items(): tb_writer.add_scalar("eval_{}".format(key), value, global_step) tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step) tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step) logging_loss = tr_loss if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = model.module if hasattr(model, "module") else model # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, "training_args.bin")) logger.info("Saving model checkpoint to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step
9d475baa8865f932dd09265d7269eb58f3f31dc2
25,519
def extract_tunneled_layer(tunnel_packet: scapy.layers.l2.Ether, offset: int, protocol: str): """ Extract tunneled layer from packet capture. Args: tunnel_packet (scapy.layers.l2.Ether): the PDU to extract from offset (int): the byte offset of the tunneled protocol in data field of 'packet') protocol (str): the tunneled protocol to search for Returns: extracted_packet (scapy.layers.l2.Ether): """ data = tunnel_packet[Raw].load[offset:] extracted_packet = Ether(dst=tunnel_packet[Ether].dst, src=tunnel_packet[Ether].src, type=tunnel_packet[Ether].type) / IP(data) return extracted_packet
69596ba7cc5c9db41a2622aa68be1cad89855eb0
25,520
def draw_bbox(img, detections, cmap, random_color=True, figsize=(10, 10), show_text=True): """ Draw bounding boxes on the img. :param img: BGR img. :param detections: pandas DataFrame containing detections :param random_color: assign random color for each objects :param cmap: object colormap :param plot_img: if plot img with bboxes :return: None """ img = np.array(img) scale = max(img.shape[0:2]) / 416 line_width = int(2 * scale) for _, row in detections.iterrows(): x1, y1, x2, y2, cls, score, w, h = row.values color = list(np.random.random(size=3) * 255) if random_color else cmap[cls] cv2.rectangle(img, (x1, y1), (x2, y2), color, line_width) if show_text: text = f'{cls} {score:.2f}' font = cv2.FONT_HERSHEY_DUPLEX font_scale = max(0.3 * scale, 0.3) thickness = max(int(1 * scale), 1) (text_width, text_height) = cv2.getTextSize(text, font, fontScale=font_scale, thickness=thickness)[0] cv2.rectangle(img, (x1 - line_width//2, y1 - text_height), (x1 + text_width, y1), color, cv2.FILLED) cv2.putText(img, text, (x1, y1), font, font_scale, (255, 255, 255), thickness, cv2.LINE_AA) plt.figure(figsize=figsize) plt.imshow(img) plt.show() return img
f88bb4267d9d389dce589ee26058f4ad1e0fb096
25,521
def print_total_eval_info(data_span_type2model_str2epoch_res_list, metric_type='micro', span_type='pred_span', model_strs=('DCFEE-O', 'DCFEE-M', 'GreedyDec', 'Doc2EDAG'), target_set='test'): """Print the final performance by selecting the best epoch on dev set and emitting performance on test set""" dev_type = 'dev' test_type = 'test' avg_type2prf1_keys = { 'macro': ('MacroPrecision', 'MacroRecall', 'MacroF1'), 'micro': ('MicroPrecision', 'MicroRecall', 'MicroF1'), } name_key = 'EventType' p_key, r_key, f_key = avg_type2prf1_keys[metric_type] def get_avg_event_score(epoch_res): eval_res = epoch_res[1] avg_event_score = eval_res[-1][f_key] return avg_event_score dev_model_str2epoch_res_list = data_span_type2model_str2epoch_res_list[(dev_type, span_type)] test_model_str2epoch_res_list = data_span_type2model_str2epoch_res_list[(test_type, span_type)] has_header = False mstr_bepoch_list = [] print('=' * 15, 'Final Performance (%) (avg_type={})'.format(metric_type), '=' * 15) for model_str in model_strs: if model_str not in dev_model_str2epoch_res_list or model_str not in test_model_str2epoch_res_list: continue # get the best epoch on dev set dev_epoch_res_list = dev_model_str2epoch_res_list[model_str] best_dev_epoch, best_dev_res = max(dev_epoch_res_list, key=get_avg_event_score) test_epoch_res_list = test_model_str2epoch_res_list[model_str] best_test_epoch = None best_test_res = None for test_epoch, test_res in test_epoch_res_list: if test_epoch == best_dev_epoch: best_test_epoch = test_epoch best_test_res = test_res assert best_test_epoch is not None mstr_bepoch_list.append((model_str, best_test_epoch)) if target_set == 'test': target_eval_res = best_test_res else: target_eval_res = best_dev_res align_temp = '{:20}' head_str = align_temp.format('ModelType') eval_str = align_temp.format(model_str) head_temp = ' \t {}' eval_temp = ' \t & {:.1f} & {:.1f} & {:.1f}' ps = [] rs = [] fs = [] for tgt_event_res in target_eval_res[:-1]: head_str += align_temp.format(head_temp.format(tgt_event_res[0][name_key])) p, r, f1 = (100 * tgt_event_res[0][key] for key in [p_key, r_key, f_key]) eval_str += align_temp.format(eval_temp.format(p, r, f1)) ps.append(p) rs.append(r) fs.append(f1) head_str += align_temp.format(head_temp.format('Average')) ap, ar, af1 = (x for x in [np.mean(ps), np.mean(rs), np.mean(fs)]) eval_str += align_temp.format(eval_temp.format(ap, ar, af1)) head_str += align_temp.format(head_temp.format('Total ({})'.format(metric_type))) g_avg_res = target_eval_res[-1] ap, ar, af1 = (100 * g_avg_res[key] for key in [p_key, r_key, f_key]) eval_str += align_temp.format(eval_temp.format(ap, ar, af1)) if not has_header: print(head_str) has_header = True print(eval_str) return mstr_bepoch_list
e5b754facbf0d203cb143514e143844170400280
25,522
def build_sentence_representation(s): """ Build representation of a sentence by analyzing predpatt output. Returns a weighted list of lists of terms. """ s = merge_citation_token_lists(s) s = remove_qutation_marks(s) lemmatizer = WordNetLemmatizer() raw_lists = [] rep_lists = [] rep_lists_alt = [] # to be consistent with double annotating for 3 and 3.1 try: pp = PredPatt.from_sentence(s, cacheable=False) # for speed tests except Exception as e: print('= = = PredPatt exception = = =') print('input:\n{}'.format(s)) print('exception:\n{}'.format(e)) return rep_lists, rep_lists_alt if len(pp.events) == 0: return rep_lists, rep_lists_alt if CIT_BASED: for e in pp.events: depth, rep = build_tree_representation(e) if INCLUDE_PREDICATE: pred = get_predicate(e.root) rep = ['{}:{}'.format(pred, r) for r in rep] if len(rep) > 0: raw_lists.append([depth, rep]) weight = 1 for rl in sorted(raw_lists, key=itemgetter(0)): rep_lists.append([weight, rl[1]]) weight *= .5 if len(rep_lists) == 0: fallback = build_noun_representation( pp.events[0], global_root=True ) if INCLUDE_PREDICATE: pred = get_predicate(pp.events[0].root) fallback = ['{}:{}'.format(pred, f) for f in fallback] if len(fallback) > 0: rep_lists = [[.25, fallback]] else: # make a PPv3 and a PPv3.1 representation # - - - 3.1 - - - reps = [] for e in pp.events: rep = build_noun_representation(e) # 3.1 if INCLUDE_PREDICATE: pred = get_predicate(e.root) rep = ['{}:{}'.format(pred, f) for f in rep] reps.extend(rep) if len(reps) > 0: rep_lists = [[1, reps]] # - - - 3 - - - reps_alt = [] for e in pp.events: rep = build_noun_representation(e, global_root=True) # 3 if INCLUDE_PREDICATE: pred = get_predicate(e.root) rep = ['{}:{}'.format(pred, f) for f in rep] reps_alt.extend(rep) if len(reps) > 0: rep_lists_alt = [[1, reps_alt]] rep_lists = normalize_rep_lists(rep_lists, lemmatizer) rep_lists_alt = normalize_rep_lists(rep_lists_alt, lemmatizer) return rep_lists, rep_lists_alt
dd070aef016cc034a79412528aabc951605aa83c
25,523
import importlib def load_qconfig(): """ Attemps to load the Qconfig.py searching the current environment. Returns: module: Qconfig module """ try: modspec = importlib.util.find_spec(_QCONFIG_NAME) if modspec is not None: mod = importlib.util.module_from_spec(modspec) if mod is not None: modspec.loader.exec_module(mod) logger.debug('Loaded {}'.format(_QCONFIG_NAME)) return mod except Exception as e: logger.debug('Failed to load {} error {}'.format(_QCONFIG_NAME, str(e))) return None return None
00ec51be6d16011aa366904a5a0ab8705734c464
25,524
def create_glucose_previous_day_groups(day_groups: dict) -> dict: """ Create a dictionary of glucose subseries, unique to each day in the parent glucose series. Subseries data of each dictionary item will lag item key (date) by 1 day. Keys will be (unique dates in the parent series) + 1 day. Values will be the subseries with timestamp dates matching 1 day prior to the key. Args: day_groups: A dictionary of daily glucose series. Keys individual dates with glucose data. Values will be the glucose subseries with timestamp dates matching the key. Returns: The dictionary of glucose subsamples. Keys will be (unique dates in the parent series) + 1 day. Values will be the subseries with timestamp dates matching 1 day prior to the key. """ previous_day_groups = {} for previous_day, previous_glucose in day_groups.items(): today = previous_day + pd.Timedelta('1D') previous_day_groups[today] = previous_glucose return previous_day_groups
6b5373b25ab286291cc351bc115c016c83ea660b
25,525
def mean_abs_scaling(series: pd.Series, minimum_scale=1e-6): """Scales a Series by the mean of its absolute value. Returns the scaled Series and the scale itself. """ scale = max(minimum_scale, series.abs().mean()) return series / scale, scale
00f397993a3c51761ef634371d6e26885602e340
25,526
import os def init_dmriprep_wf( anat_only, debug, force_syn, freesurfer, hires, ignore, layout, longitudinal, low_mem, omp_nthreads, output_dir, output_spaces, run_uuid, skull_strip_fixed_seed, skull_strip_template, subject_list, use_syn, work_dir, ): """ Create the base workflow. This workflow organizes the execution of *dMRIPrep*, with a sub-workflow for each subject. If FreeSurfer's recon-all is to be run, a FreeSurfer derivatives folder is created and populated with any needed template subjects. Workflow Graph .. workflow:: :graph2use: orig :simple_form: yes import os from collections import namedtuple, OrderedDict BIDSLayout = namedtuple('BIDSLayout', ['root']) from dmriprep.workflows.base import init_dmriprep_wf os.environ['FREESURFER_HOME'] = os.getcwd() wf = init_dmriprep_wf( anat_only=False, debug=False, force_syn=True, freesurfer=True, hires=True, ignore=[], layout=BIDSLayout('.'), longitudinal=False, low_mem=False, omp_nthreads=1, output_dir='.', output_spaces=OrderedDict([ ('MNI152Lin', {}), ('fsaverage', {'density': '10k'}), ('T1w', {}), ('fsnative', {})]), run_uuid='X', skull_strip_fixed_seed=False, skull_strip_template=('OASIS30ANTs', {}), subject_list=['dmripreptest'], use_syn=True, work_dir='.', ) Parameters ---------- anat_only : bool Disable diffusion MRI workflows debug : bool Enable debugging outputs force_syn : bool **Temporary**: Always run SyN-based SDC freesurfer : bool Enable FreeSurfer surface reconstruction (may increase runtime) hires : bool Enable sub-millimeter preprocessing in FreeSurfer ignore : list Preprocessing steps to skip (may include "slicetiming", "fieldmaps") layout : BIDSLayout object BIDS dataset layout longitudinal : bool Treat multiple sessions as longitudinal (may increase runtime) See sub-workflows for specific differences low_mem : bool Write uncompressed .nii files in some cases to reduce memory usage omp_nthreads : int Maximum number of threads an individual process may use output_dir : str Directory in which to save derivatives output_spaces : OrderedDict Ordered dictionary where keys are TemplateFlow ID strings (e.g., ``MNI152Lin``, ``MNI152NLin6Asym``, ``MNI152NLin2009cAsym``, or ``fsLR``) strings designating nonstandard references (e.g., ``T1w`` or ``anat``, ``sbref``, ``run``, etc.), or paths pointing to custom templates organized in a TemplateFlow-like structure. Values of the dictionary aggregate modifiers (e.g., the value for the key ``MNI152Lin`` could be ``{'resolution': 2}`` if one wants the resampling to be done on the 2mm resolution version of the selected template). run_uuid : str Unique identifier for execution instance skull_strip_template : tuple Name of target template for brain extraction with ANTs' ``antsBrainExtraction``, and corresponding dictionary of output-space modifiers. skull_strip_fixed_seed : bool Do not use a random seed for skull-stripping - will ensure run-to-run replicability when used with --omp-nthreads 1 subject_list : list List of subject labels use_syn : bool **Experimental**: Enable ANTs SyN-based susceptibility distortion correction (SDC). If fieldmaps are present and enabled, this is not run, by default. work_dir : str Directory in which to store workflow execution state and temporary files """ dmriprep_wf = Workflow(name='dmriprep_wf') dmriprep_wf.base_dir = work_dir if freesurfer: fsdir = pe.Node( BIDSFreeSurferDir( derivatives=output_dir, freesurfer_home=os.getenv('FREESURFER_HOME'), spaces=[s for s in output_spaces.keys() if s.startswith('fsaverage')] + [ 'fsnative'] * ('fsnative' in output_spaces)), name='fsdir_run_' + run_uuid.replace('-', '_'), run_without_submitting=True) reportlets_dir = os.path.join(work_dir, 'reportlets') for subject_id in subject_list: single_subject_wf = init_single_subject_wf( anat_only=anat_only, debug=debug, force_syn=force_syn, freesurfer=freesurfer, hires=hires, ignore=ignore, layout=layout, longitudinal=longitudinal, low_mem=low_mem, name="single_subject_" + subject_id + "_wf", omp_nthreads=omp_nthreads, output_dir=output_dir, output_spaces=output_spaces, reportlets_dir=reportlets_dir, skull_strip_fixed_seed=skull_strip_fixed_seed, skull_strip_template=skull_strip_template, subject_id=subject_id, use_syn=use_syn, ) single_subject_wf.config['execution']['crashdump_dir'] = ( os.path.join(output_dir, "dmriprep", "sub-" + subject_id, 'log', run_uuid) ) for node in single_subject_wf._get_all_nodes(): node.config = deepcopy(single_subject_wf.config) if freesurfer: dmriprep_wf.connect(fsdir, 'subjects_dir', single_subject_wf, 'inputnode.subjects_dir') else: dmriprep_wf.add_nodes([single_subject_wf]) return dmriprep_wf
e5136a8632af08748fdd30b5288850291c6102e7
25,527
def count_total_parameters(): """ Returns total number of trainable parameters in the current tf graph. https://stackoverflow.com/a/38161314/1645784 """ total_parameters = 0 for variable in tf.trainable_variables(): # shape is an array of tf.Dimension shape = variable.get_shape() variable_parameters = 1 for dim in shape: variable_parameters *= dim.value total_parameters += variable_parameters return total_parameters
8ee1b116ac3338158c7a43acc570776940bb7e0f
25,528
def create_gradient_rms_plot(sticher_dict: dict[str, GDEFSticher], cutoff_percent=8, moving_average_n=1, x_offset=0, plotter_style: PlotterStyle = None) -> Figure: """ Creates a matplotlib figure, showing a graph of the root meean square of the gradient of the GDEFSticher objects in data_dict. The key value in data_dict is used as label in the legend. :param sticher_dict: :param cutoff_percent: :param moving_average_n: :param x_offset: :param plotter_style: :return: """ if plotter_style is None: plotter_style = PlotterStyle(300, (8, 4)) y_label = f"roughness(gradient) (moving average n = {moving_average_n})" plotter_style.set(y_label=y_label) data_list = [] pixel_width_list = [] label_list = [] for key, sticher in sticher_dict.items(): gradient_data = create_absolute_gradient_array(sticher.values, cutoff_percent / 100.0) data_list.append(gradient_data) pixel_width_list.append(sticher.pixel_width) label_list.append(key) result = create_rms_plot(data_list, pixel_width=pixel_width_list, label_list=label_list, moving_average_n=moving_average_n, x_offset=x_offset, plotter_style=plotter_style) return result
e628250d2c1d4548e6b52d48a8313ffa1b5131fe
25,529
from typing import List from re import T def reverse(ls: List[T]) -> List[T]: """ Reverses a list. :param ls: The list to be reversed :return: The reversed list """ for i in range(len(ls) // 2): ls[i], ls[len(ls) - 1 - i] = ls[len(ls) - 1 - i], ls[i] return ls
eacee56b5325178ec27a13283d64d0155c7a97ed
25,530
def test_get_annotations_not_5( test_gb_file, test_accession, coordination_args, monkeypatch ): """Test get_annotations when length of protein data is not 5.""" def mock_get_gb_file(*args, **kwargs): gb_file = test_gb_file return gb_file def mock_get_record(*args, **kwargs): return monkeypatch.setattr(get_genbank_annotations, "get_genbank_file", mock_get_gb_file) monkeypatch.setattr(get_genbank_annotations, "get_record_feature", mock_get_record) get_genbank_annotations.get_annotations( test_accession, coordination_args["args"], )
a9021af24ecb339ebea89d6ad7beb6e4097c5519
25,531
def increment_with_offset(c: str, increment: int, offset: int) -> str: """ Caesar shift cipher. """ return chr(((ord(c) - offset + increment) % 26) + offset)
50b10b6d3aff3dff157dfc46c368ae251ed060bb
25,532
import logging def uploadfiles(): """ function to upload csv to db :return: renders success.html """ # get the uploaded file uploaded_file = request.files['filename'] if uploaded_file.filename != '': csv_to_db(uploaded_file) return render_template('success.html') logging.info("No file uploaded") return render_template('startindex.html')
5baa9dfb8930e70ebd37b502a211ae847194e08f
25,533
def static_html(route): """ Route in charge of routing users to Pages. :param route: :return: """ page = get_page(route) if page is None: abort(404) else: if page.auth_required and authed() is False: return redirect(url_for("auth.login", next=request.full_path)) return render_template("page.html", content=page.content)
52c74b63c5856a04b294f8e539b4be26deec0209
25,534
import math def getCenterFrequency(filterBand): """ Intermediate computation used by the mfcc function. Compute the center frequency (fc) of the specified filter band (l) This where the mel-frequency scaling occurs. Filters are specified so that their center frequencies are equally spaced on the mel scale """ centerFrequency = 0 if filterBand == 0: centerFrequency = 0; elif filterBand >= 1 and filterBand <= 14: centerFrequency = (200.0 * filterBand) / 3.0 else: exponent = filterBand - 14 centerFrequency = math.pow(1.0711703, exponent) centerFrequency = centerFrequency * 1073.4 return centerFrequency
e043774093c4417658cdfd052d486ea5e30efb81
25,535
import numpy def phi_analytic(dist, t, t_0, k, phi_1, phi_2): """ the analytic solution to the Gaussian diffusion problem """ phi = (phi_2 - phi_1)*(t_0/(t + t_0)) * \ numpy.exp(-0.25*dist**2/(k*(t + t_0))) + phi_1 return phi
49fac597afa876f81ba5774bf82fedcfb88f6c7f
25,536
import subprocess def get_changed_files(base_commit: str, head_commit: str, subdir: str = '.'): """ Get the files changed by the given range of commits. """ cmd = ['git', 'diff', '--name-only', base_commit, head_commit, '--', subdir] files = subprocess.check_output(cmd) return files.decode('UTF-8').split('\n')
ebc0a117f2f11d585475f4781e67331e3ca9a06a
25,537
def geometric_median(X, eps=1e-5): """ calculate the geometric median as implemented in https://stackoverflow.com/a/30305181 :param X: 2D dataset :param eps: :return: median value from X """ y = np.mean(X, 0) while True: D = cdist(X, [y]) nonzeros = (D != 0)[:, 0] Dinv = 1 / D[nonzeros] Dinvs = np.sum(Dinv) W = Dinv / Dinvs T = np.sum(W * X[nonzeros], 0) num_zeros = len(X) - np.sum(nonzeros) if num_zeros == 0: y1 = T elif num_zeros == len(X): return y else: R = (T - y) * Dinvs r = np.linalg.norm(R) rinv = 0 if r == 0 else num_zeros/r y1 = max(0, 1-rinv)*T + min(1, rinv)*y if euclidean(y, y1) < eps: return y1 y = y1
9c8b0d69b4f66dc471bcb838b19ecac934493c54
25,538
def distance(bbox, detection): """docstring for distance""" nDetections = detection.shape[0] d = np.zeros(nDetections) D = detection - np.ones([nDetections,1])*bbox for i in xrange(nDetections): d[i] = np.linalg.norm(D[i],1) return d
21c4beea66df1dde96cd91cff459bf10f1b7a41e
25,539
from typing import TextIO from typing import Tuple def _read_float(line: str, pos: int, line_buffer: TextIO ) -> Tuple[float, str, int]: """Read float value from line. Args: line: line. pos: current position. line_buffer: line buffer for nnet3 file. Returns: float value, line string and current position. """ del line_buffer # Unused. tok, pos = read_next_token(line, pos) return float(tok), line, pos
f0c76b2224a17854902aadbe7a715ca00da64932
25,540
def psd_explore( data_folder, channel_index, plot=True, relative=False, reverse=False, export_to_csv=False): """PSD Explore. This assumes use with VR300 for the AD Feedback experiment. data_folder: path to a BciPy data folder with raw data and triggers channel_index: channel to use for PSD calculation plot: whether or not to plot the filtered data and psd spectrum relative: whether or not to export relative PSD output reverse: whether the level estimations should be descending (default; ie band increases with attention) or ascending export_to_csv: whether or not to write output to csv returns: average, standard deviation """ # construct the relevant data paths trigger_path = f'{data_folder}/{TRIGGERS_FN}' raw_data_path = f'{data_folder}/{RAW_DATA_FN}' # print helpful information to console print('CONFIGURATION:\n' f'Trial length: {TRIAL_LENGTH} \n' f'Downsample rate: {DOWNSAMPLE_RATE} \n' f'Notch Frequency: {NOTCH_FREQ} \n' f'Bandpass Range: [{FILTER_HP}-{FILTER_LP}] \n' f'Trigger Path: [{trigger_path}] \n' f'Raw Data Path: [{raw_data_path}] \n') # process and get the data from csv raw_data, _, channels, type_amp, fs = read_data_csv(raw_data_path) # print helpful information to console print( 'DEVICE INFO:' f'\nChannels loaded: {channels}. \n' f'Using channel: {channels[channel_index]} \n' f'Using Device: {type_amp} - {fs} samples/sec \n') # filter the data filtered_data, sampling_rate_post_filter = filter_data( raw_data, fs, DOWNSAMPLE_RATE, NOTCH_FREQ) # decode triggers and get a channel map _, trigger_targetness, trigger_timing, offset = trigger_decoder( mode=MODE, trigger_path=trigger_path) # add a static offset of 100 ms [TODO load from parameters] offset = offset + .1 # reshape the data x, y, num_seq, _ = trial_reshaper( trigger_targetness, trigger_timing, filtered_data, mode=MODE, fs=fs, k=DOWNSAMPLE_RATE, offset=offset, channel_map=analysis_channels(channels, type_amp), trial_length=TRIAL_LENGTH) data = create_sequence_exports( x, num_seq * 10, channel_index, TRIAL_LENGTH, sampling_rate_post_filter, plot, relative, reverse) # plot raw data for the trial index given if plot: time = np.arange( data.size) / sampling_rate_post_filter fig, ax = plt.subplots(1, 1, figsize=(12, 4)) plt.plot(time, data, lw=1.5, color='k') plt.xlabel('Time (seconds)') plt.ylabel('Voltage') plt.xlim([time.min(), time.max()]) plt.title('Raw Data Plot') sns.set(font_scale=1.2) sns.despine() plt.show() if export_to_csv: export_data_to_csv(exports) return exports
acbd883ebb9ecbb29efbc9a6a04f722d93b68c68
25,541
def pk_to_p2wpkh_in_p2sh_addr(pk, testnet=False): """ Compressed public key (hex string) -> p2wpkh nested in p2sh address. 'SegWit address.' """ pk_bytes = bytes.fromhex(pk) assert is_compressed_pk(pk_bytes), \ "Only compressed public keys are compatible with p2sh-p2wpkh addresses. See BIP49." # Script sig is just 0 + PUSH(20){hash160(cpk)} script_sig = OP_0 + push_bytes(hash160_bytes(pk_bytes)) # Address is then prefix + hash160(script_sig) address = Base58.check_encode(_prefix_bytes('p2sh', testnet=testnet) + hash160_bytes(script_sig)) return address
10e9b2659df98b02b5030c1eec1820c9bbdd1a8b
25,542
def remove_imaginary(pauli_sums): """ Remove the imaginary component of each term in a Pauli sum :param PauliSum pauli_sums: The Pauli sum to process. :return: a purely hermitian Pauli sum. :rtype: PauliSum """ if not isinstance(pauli_sums, PauliSum): raise TypeError("not a pauli sum. please give me one") new_term = sI(0) * 0.0 for term in pauli_sums: new_term += term_with_coeff(term, term.coefficient.real) return new_term
2edd93f338d4e2dc1878953ced5edf954f509ccc
25,543
def log_sigmoid_deprecated(z): """ Calculate the log of sigmod, avoiding overflow underflow """ if abs(z) < 30: return np.log(sigmoid(z)) else: if z > 0: return -np.exp(-z) else: return z
576d7de9bf61aa32c3e39fc5ca7f4428b43519bb
25,544
def roty(t): """Rotation about the y-axis.""" c = np.cos(t) s = np.sin(t) return np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]])
9c05a96c8c36fd3cd7eee1860574b9242d7543d6
25,545
def ranks_to_metrics_dict(ranks): """Calculates metrics, returns metrics as a dict.""" mean_rank = np.mean(ranks) mean_reciprocal_rank = np.mean(1. / ranks) hits_at = {} for k in (1, 3, 10): hits_at[k] = np.mean(ranks <= k)*100 return { 'MR': mean_rank, 'MRR': mean_reciprocal_rank, 'hits@[1,3,10]': hits_at }
60ee20fdf43240e3f0aa0e414fd49bcc52f83446
25,546
def bias_correction(input_data, output_filename='', mask_filename='', method="ants", command="/home/abeers/Software/ANTS/ANTs.2.1.0.Debian-Ubuntu_X64/N4BiasFieldCorrection", temp_dir='./'): """ A catch-all function for motion correction. Will perform motion correction on an input volume depending on the 'method' and 'command' inputted. Parameters ---------- input_data: str or array Can be a 4D volume or a filename. output_filename: str Location to save output data to. If left as '', will return numpy array. method: str Will perform motion correction according to the provided method. Currently available: ['fsl'] command: str The literal command-line string to be inputted via Python's subprocess module. temp_dir: str If temporary files are created, they will be saved here. Returns ------- output: array Output data, only if output_filename is left as ''. """ bias_correction_methods = ['ants', 'slicer'] if method not in bias_correction_methods: print 'Input \"method\" parameter is not available. Available methods: ', bias_correction_methods return if method == 'ants': # A good reason to have a Class for qtim methods is to cut through all of this extra code. temp_input, temp_output = False, False if not isinstance(input_data, basestring): input_filename = os.path.join(temp_dir, 'temp.nii.gz') save_numpy_2_nifti(input_data, input_filename) temp_input = True else: input_filename = input_data if output_filename == '': temp_output = True output_filename = os.path.join(temp_dir, 'temp_out.nii.gz') print ' '.join([command, '-i', input_filename, '-o', output_filename, '-x', mask_filename]) if mask_filename != '': subprocess.call([command, '-i', input_filename, '-o', output_filename, '-x', mask_filename]) else: subprocess.call([command, '-i', input_filename, '-o', output_filename]) if temp_input: os.remove(input_filename) pass if temp_output: output = convert_input_2_numpy(output_filename) os.remove(output_filename) return output if method == 'slicer': print 'Slicer method not yet implemented! Sorry...'
5236cff562dc50390146a5902a8f9924457e5426
25,547
def randperm2d(H, W, number, population=None, mask=None): """randperm 2d function genarates diffrent random interges in range [start, end) Parameters ---------- H : {integer} height W : {integer} width number : {integer} random numbers population : {list or numpy array(1d or 2d)} part of population in range(0, H*W) """ if population is None: population = np.array(range(0, H * W)).reshape(H, W) population = np.array(population) if mask is not None and np.sum(mask) != 0: population = population[mask > 0] population = population.flatten() population = np.random.permutation(population) Ph = np.floor(population / W).astype('int') Pw = np.floor(population - Ph * W).astype('int') # print(Pw + Ph * W) return Ph[0:number], Pw[0:number]
a3507c488740e0190673cb0bd920c0c0f15b77a1
25,548
def get_engine(db_credentials): """ Get SQLalchemy engine using credentials. Input: db: database name user: Username host: Hostname of the database server port: Port number passwd: Password for the database """ url = 'postgresql://{user}:{passwd}@{host}:{port}/{db}'.format( user=db_credentials['user'], passwd=db_credentials['pwd'], host=db_credentials['host'], port=db_credentials['port'], db=db_credentials['db']) engine = create_engine(url, pool_size = 50) return engine
ff66c10c7a79b0f5751979f0f5fc74c16d97eac0
25,549
def numpy_to_vtkIdTypeArray(num_array, deep=0): """ Notes ----- This was pulled from VTK and modified to eliminate numpy 1.14 warnings. VTK uses a BSD license, so it's OK to do that. """ isize = vtk.vtkIdTypeArray().GetDataTypeSize() dtype = num_array.dtype if isize == 4: if dtype != np.int32: raise ValueError( 'Expecting a numpy.int32 array, got %s instead.' % (str(dtype))) else: if dtype != np.int64: raise ValueError( 'Expecting a numpy.int64 array, got %s instead.' % (str(dtype))) return numpy_to_vtk(num_array, deep, vtkConstants.VTK_ID_TYPE)
149da1f117968839801f2720c132451045b21fb6
25,550
def denormalize_ged(g1, g2, nged): """ Converts normalized ged into ged. """ return round(nged * (g1.num_nodes + g2.num_nodes) / 2)
214813120d552ef5ece10349978238117fe26cf3
25,551
from datetime import datetime import time def get_current_time(): """just returns time stamp """ time_stamp = datetime.datetime.fromtimestamp( time()).strftime('%Y-%m-%d %H:%M:%S') return time_stamp
236bd2b141c3686bb4c05a18a6d0f0ef3b15ea6b
25,552
import os def get_bad_fname(p, subj, check_exists=True): """Get filename for post-SSS bad channels.""" bad_dir = op.join(p.work_dir, subj, p.bad_dir) if not op.isdir(bad_dir): os.mkdir(bad_dir) bad_file = op.join(bad_dir, 'bad_ch_' + subj + p.bad_tag) if check_exists: bad_file = None if not op.isfile(bad_file) else bad_file return bad_file
fe8ad5e09a5da68d2113e1053cdb294c43562444
25,553
import asyncio async def test_script_mode_2(hass, hass_ws_client, script_mode, script_execution): """Test overlapping runs with max_runs > 1.""" id = 1 def next_id(): nonlocal id id += 1 return id flag = asyncio.Event() @callback def _handle_event(_): flag.set() event = "test_event" script_config = { "script1": { "sequence": [ {"event": event, "event_data": {"value": 1}}, {"wait_template": "{{ states.switch.test.state == 'off' }}"}, {"event": event, "event_data": {"value": 2}}, ], "mode": script_mode, } } client = await hass_ws_client() hass.bus.async_listen(event, _handle_event) assert await async_setup_component(hass, "script", {"script": script_config}) hass.states.async_set("switch.test", "on") await hass.services.async_call("script", "script1") await asyncio.wait_for(flag.wait(), 1) # List traces await client.send_json({"id": next_id(), "type": "trace/list", "domain": "script"}) response = await client.receive_json() assert response["success"] trace = _find_traces(response["result"], "script", "script1")[0] assert trace["state"] == "running" # Start second run of script while first run is suspended in wait_template. flag.clear() await hass.services.async_call("script", "script1") await asyncio.wait_for(flag.wait(), 1) # List traces await client.send_json({"id": next_id(), "type": "trace/list", "domain": "script"}) response = await client.receive_json() assert response["success"] trace = _find_traces(response["result"], "script", "script1")[1] assert trace["state"] == "running" # Let both scripts finish hass.states.async_set("switch.test", "off") await hass.async_block_till_done() # List traces await client.send_json({"id": next_id(), "type": "trace/list", "domain": "script"}) response = await client.receive_json() assert response["success"] trace = _find_traces(response["result"], "script", "script1")[0] assert trace["state"] == "stopped" assert trace["script_execution"] == script_execution trace = _find_traces(response["result"], "script", "script1")[1] assert trace["state"] == "stopped" assert trace["script_execution"] == "finished"
76a251dc4f2f7aa17e280ee1bcb76aa8333388cb
25,554
def ease_of_movement(high, low, close, volume, n=20, fillna=False): """Ease of movement (EoM, EMV) It relate an asset's price change to its volume and is particularly useful for assessing the strength of a trend. https://en.wikipedia.org/wiki/Ease_of_movement Args: high(pandas.Series): dataset 'High' column. low(pandas.Series): dataset 'Low' column. close(pandas.Series): dataset 'Close' column. volume(pandas.Series): dataset 'Volume' column. n(int): n period. fillna(bool): if True, fill nan values. Returns: pandas.Series: New feature generated. """ emv = (high.diff(1) + low.diff(1)) * (high - low) / (2 * volume) emv = emv.rolling(n).mean() if fillna: emv = emv.replace([np.inf, -np.inf], np.nan).fillna(0) return pd.Series(emv, name='eom_' + str(n))
c25720e866b1d4635d7e8256b9ace94f78b463ed
25,555
def Document(docx=None, word_open_xml=None): """ Return a |Document| object loaded from *docx*, where *docx* can be either a path to a ``.docx`` file (a string) or a file-like object. Optionally, ``word_open_xml`` can be specified as a string of xml. Either ``docx`` or `word_open_xml`` may be specified, but not both. If *docx* is missing or ``None`` and *open_word_xml* is None, the built-in default document "template" is loaded. """ if docx and word_open_xml: msg = "Must either specify docx or word_open_xml, but not both" raise ValueError(msg) if word_open_xml is None: docx = _default_docx_path() if docx is None else docx document_part = Package.open(docx).main_document_part if document_part.content_type != CT.WML_DOCUMENT_MAIN: tmpl = "file '%s' is not a Word file, content type is '%s'" raise ValueError(tmpl % (docx, document_part.content_type)) else: # word_open_xml document_part = Package.open( word_open_xml, is_from_file=False ).main_document_part if document_part.content_type != CT.WML_DOCUMENT_MAIN: tmpl = "string '%s' is not a Word document, content type is '%s'" raise ValueError(tmpl % (word_open_xml, document_part.content_type)) return document_part.document
565dd4f7f1d815f2e5ef97226d1175283ba942de
25,556
def css_tag(parser, token): """ Renders a tag to include the stylesheet. It takes an optional second parameter for the media attribute; the default media is "screen, projector". Usage:: {% css "<somefile>.css" ["<projection type(s)>"] %} Examples:: {% css "myfile.css" %} {% css "myfile.css" "screen, projection"%} """ path = get_path_from_tokens(token) tokens = token.split_contents() if len(tokens) > 2: # Get the media types from the tag call provided by the user. media_type = tokens[2][1:-1] else: # Default values. media_type = "screen, projection" return CssTagNode(path, media_type=media_type)
b05deebf31c864408df33a41ba95016a06f48e2e
25,557
def camelcase(path): """Applies mixedcase and capitalizes the first character""" return mixedcase('_{0}'.format(path))
484bfcf8797637f56d5d0bdcad6c370f158773c0
25,558
import copy def ImproveData_v2 (Lidar_DataOld,Lidar_Data,Data_Safe,Speed,orientation,orientationm1): """ The function calculates new positions for obstacles now taking into account the car's relative speed in relation to each point. We need the accelerometer for that. Return: Advanced_Data : [step_i, distance_i,x_i,y_i, Xsafe_i or Xvel_i, Ysafe_i or Yvel_i] """ """ Filtering the data within the alpha range. Data_Safe -(90,270) DataSafeFiltered (-alpha,+alpha) """ temp=[] i=0 #Updating the data set with the new positions calculated according to the relative speed of the car and the objects Advanced_Data=copy.deepcopy(Data_Safe) while i<len(temp): j=0 while j<len(Data_Safe): if temp[i][0]==Advanced_Data[j][0]: Advanced_Data[j][4]=temp[i][6] Advanced_Data[j][5]=temp[i][7] j+=1 i+=1 return(Advanced_Data)
2bd6c0f167e65ad4a461d75a95539b68dc0b1a70
25,559
def label_by_track(mask, label_table): """Label objects in mask with track ID Args: mask (numpy.ndarray): uint8 np array, output from main model. label_table (pandas.DataFrame): track table. Returns: numpy.ndarray: uint8/16 dtype based on track count. """ assert mask.shape[0] == np.max(label_table['frame'] + 1) if np.max(label_table['trackId']) * 2 > 254: mask = mask.astype('uint16') for i in np.unique(label_table['frame']): sub_table = label_table[label_table['frame'] == i] sl = mask[i, :, :].copy() lbs = np.unique(sl).tolist() ''' if lbs[-1] + 1 != len(lbs): raise ValueError('Mask is not continuously or wrongly labeled.') ''' ori_labels = set(lbs) - {0} untracked = list(ori_labels - set(list(sub_table['continuous_label']))) # remove untracked for j in untracked: sl[mask[i, :, :] == j] = 0 # update tracked for j in sub_table.index: sl[mask[i, :, :] == sub_table.loc[j, 'continuous_label']] = sub_table.loc[j, 'trackId'] mask[i, :, :] = sl.copy() return mask
9190714e8cfc3955d1aeffd22d20574d14889538
25,560
import zipfile import xml def load_guidata(filename, report): """Check if we have a GUI document.""" report({'INFO'}, "load guidata..") guidata = None zdoc = zipfile.ZipFile(filename) if zdoc: if "GuiDocument.xml" in zdoc.namelist(): gf = zdoc.open("GuiDocument.xml") guidata = gf.read() gf.close() Handler = FreeCAD_xml_handler() xml.sax.parseString(guidata, Handler) guidata = Handler.guidata for key, properties in guidata.items(): # open each diffusecolor files and retrieve values # first 4 bytes are the array length, # then each group of 4 bytes is abgr if "DiffuseColor" in properties: # print ("opening:",guidata[key]["DiffuseColor"]) df = zdoc.open(guidata[key]["DiffuseColor"]) buf = df.read() # print (buf," length ",len(buf)) df.close() cols = [] for i in range(1, int(len(buf)/4)): cols.append( (buf[i*4+3], buf[i*4+2], buf[i*4+1], buf[i*4])) guidata[key]["DiffuseColor"] = cols zdoc.close() report({'INFO'}, "load guidata done.") # print("guidata:", guidata) return guidata
3828d895a5abb9c6f783eee52d8c747f2f32c20c
25,561
def question_answers(id2line, convos): """ Divide the dataset into two sets: questions and answers. """ questions, answers = [], [] for convo in convos: for index, line in enumerate(convo[:-1]): questions.append(id2line[convo[index]]) answers.append(id2line[convo[index + 1]]) assert len(questions) == len(answers) return questions, answers
f2654fcff2b9d90e78750cc8632eea9771361c4d
25,562
import copy def subgrid_kernel(kernel, subgrid_res, odd=False, num_iter=100): """ creates a higher resolution kernel with subgrid resolution as an interpolation of the original kernel in an iterative approach :param kernel: initial kernel :param subgrid_res: subgrid resolution required :return: kernel with higher resolution (larger) """ subgrid_res = int(subgrid_res) if subgrid_res == 1: return kernel nx, ny = np.shape(kernel) d_x = 1. / nx x_in = np.linspace(d_x/2, 1-d_x/2, nx) d_y = 1. / nx y_in = np.linspace(d_y/2, 1-d_y/2, ny) nx_new = nx * subgrid_res ny_new = ny * subgrid_res if odd is True: if nx_new % 2 == 0: nx_new -= 1 if ny_new % 2 == 0: ny_new -= 1 d_x_new = 1. / nx_new d_y_new = 1. / ny_new x_out = np.linspace(d_x_new/2., 1-d_x_new/2., nx_new) y_out = np.linspace(d_y_new/2., 1-d_y_new/2., ny_new) kernel_input = copy.deepcopy(kernel) kernel_subgrid = image_util.re_size_array(x_in, y_in, kernel_input, x_out, y_out) kernel_subgrid = kernel_norm(kernel_subgrid) for i in range(max(num_iter, 1)): # given a proposition, re-size it to original pixel size if subgrid_res % 2 == 0: kernel_pixel = averaging_even_kernel(kernel_subgrid, subgrid_res) else: kernel_pixel = util.averaging(kernel_subgrid, numGrid=nx_new, numPix=nx) delta = kernel - kernel_pixel temp_kernel = kernel_input + delta kernel_subgrid = image_util.re_size_array(x_in, y_in, temp_kernel, x_out, y_out)#/norm_subgrid kernel_subgrid = kernel_norm(kernel_subgrid) kernel_input = temp_kernel #from scipy.ndimage import zoom #ratio = subgrid_res #kernel_subgrid = zoom(kernel, ratio, order=4) / ratio ** 2 #print(np.shape(kernel_subgrid)) # whatever has not been matched is added to zeroth order (in squares of the undersampled PSF) if subgrid_res % 2 == 0: return kernel_subgrid kernel_pixel = util.averaging(kernel_subgrid, numGrid=nx_new, numPix=nx) kernel_pixel = kernel_norm(kernel_pixel) delta_kernel = kernel_pixel - kernel_norm(kernel) id = np.ones((subgrid_res, subgrid_res)) delta_kernel_sub = np.kron(delta_kernel, id)/subgrid_res**2 return kernel_norm(kernel_subgrid - delta_kernel_sub)
8c62e9a09052faf2f52dc2141b0432b115c79417
25,563
import spacy.en import logging def get_spacy(): """ Loads the spaCy english processor. Tokenizing, Parsing, and NER are enabled. All other features are disabled. Returns: A spaCy Language object for English """ logging.info('Loading spaCy...') nlp = spacy.en.English(tagger=False, parser=True, matcher=False) return nlp
6abe2c9cb8cb0027c53c5e013d4127829b339699
25,564
import astroobs as obs import re from datetime import datetime def get_JDs(period='102', night=True, arrays=True, verbose=True): """ Get the Julian days for all ESPRESSO GTO runs in a given period. If `night`=True, return the JD of sunset and sunrise. This function returns the runs' start and end in arrays (lists if `arrays`=False). """ if night: # from astroplan import Observer # paranal = Observer.at_site("paranal") VLT = obs.Observation('vlt', moonAvoidRadius=15, horizon_obs=0) if isinstance(period, int): period = str(period) if ',' in period: periods = period.split(',') else: periods = [period] starts, ends = [], [] for period in periods: if verbose: print(f'Period: {period},', end=' ') print('starting ESO query...', end=' ', flush=True) r = query(period) if verbose: print('done') lines = r.text.split('\n')[2:-1] pattern = re.compile(r"between \d* \w* \d* and \d* \w* \d*") if verbose and night: print('calculating sunset/sunrise times...') for line in lines: try: found = re.findall(pattern, line)[0] except IndexError: continue date1 = found[8:-16] if night: t = Time(parse_date(date1) + ' 12:00:00') VLT.change_date(t.datetime) jd1 = Time(datetime.datetime.strptime(str(VLT.sunset), r'%Y/%m/%d %H:%M:%S')).mjd # jd1 = paranal.sun_set_time(t, 'next').mjd else: jd1 = Time(parse_date(date1)).mjd # - 24e5 date2 = found[24:] if night: t = Time(parse_date(date2) + ' 12:00:00') VLT.change_date(t.datetime) jd2 = Time(datetime.datetime.strptime(str(VLT.sunset), r'%Y/%m/%d %H:%M:%S')).mjd # jd2 = paranal.sun_rise_time(t, 'previous').mjd else: jd2 = Time(parse_date(date2)).mjd # - 24e5 starts.append(jd1) ends.append(jd2) starts, ind = np.unique(starts, return_index=True) ends = np.array(ends)[ind] if arrays: return starts, ends else: return list(starts), list(ends)
f21aea967e0d1a481d599bf7ffea2316d401a7ea
25,565
def normalize_breton(breton_string: str) -> str: """Applies Breton mutations.""" return (breton_string.strip().lower() @ DO_PREPROCESSING @ DO_SOFT_MUTATION @ DO_HARD_MUTATION @ DO_SPIRANT_MUTATION @ DO_POSTPROCESSING).string()
f5536f98c881d854fc279b81b5a6e99e4811165f
25,566
from keras.utils.data_utils import get_file from art import DATA_PATH def load_mnist(raw=False): """Loads MNIST dataset from `DATA_PATH` or downloads it if necessary. :param raw: `True` if no preprocessing should be applied to the data. Otherwise, data is normalized to 1. :type raw: `bool` :return: `(x_train, y_train), (x_test, y_test), min, max` :rtype: `(np.ndarray, np.ndarray), (np.ndarray, np.ndarray), float, float` """ path = get_file('mnist.npz', cache_subdir=DATA_PATH, origin='https://s3.amazonaws.com/img-datasets/mnist.npz') f = np.load(path) x_train = f['x_train'] y_train = f['y_train'] x_test = f['x_test'] y_test = f['y_test'] f.close() # Add channel axis min_, max_ = 0, 255 if not raw: min_, max_ = 0., 1. x_train = np.expand_dims(x_train, axis=3) x_test = np.expand_dims(x_test, axis=3) x_train, y_train = preprocess(x_train, y_train) x_test, y_test = preprocess(x_test, y_test) return (x_train, y_train), (x_test, y_test), min_, max_
fc661afef4062e14a90a3cbc1a837cd6f68b6039
25,567
def word_flag(*args): """ word_flag() -> flags_t Get a flags_t representing a word. """ return _ida_bytes.word_flag(*args)
765051d3c51974f24cf71a846ab3ffed4767a3d0
25,568
from typing import Mapping from sys import path def get_spark_config(predictrip_config: Mapping[str, Mapping[str, str]]) -> SparkConf: """ Create an object representing the Spark configuration we want :type predictrip_config: mapping returned by load_config containing configuration options :return: pyspark.SparkConf instance """ # NOTE: contrary to https://www.geomesa.org/documentation/user/spark/pyspark.html#using-geomesa-pyspark, use of # geomesa_pyspark.configure() no longer necessary since Spark 2.1, as long as you tell spark to include the # geomesa_pyspark python module some other way (e.g. spark.files) sc = SparkConf() sc = sc.setAppName('PredicTrip ' + path.basename(__file__)) # FIXME: the following doesn't seem to be effective sc = sc.setAll([('fs.s3a.awsAccessKeyId', predictrip_config['AWS']['access_key_id']), ('fs.s3a.awsSecretAccessKey', predictrip_config['AWS']['secret_access_key'])]) # add to sc any spark options that might be set in predictrip_config if 'executor_cores' in predictrip_config['Spark']: sc = sc.set('spark.executor.cores', predictrip_config['Spark']['executor_cores']) if 'executor_memory' in predictrip_config['Spark']: sc = sc.set('spark.executor.memory', predictrip_config['Spark']['executor_memory']) return sc
cae2f7f4f384b2a05c8f66b65976fd588f15cd4a
25,569
from typing import Optional from typing import Dict from typing import Iterable from typing import Union from typing import List def get_sequence_annotations( sequence: str, allow: Optional[set] = {"H", "K", "L"}, scheme: Optional[str] = "chothia", cdr1_scheme: Optional[Dict[str, Iterable]] = { "H": range(26, 33), "L": range(24, 35), }, cdr2_scheme: Optional[Dict[str, Iterable]] = { "H": range(52, 57), "L": range(50, 57), }, cdr3_scheme: Optional[Dict[str, Iterable]] = { "H": range(95, 103), "L": range(89, 98), }, assign_germline: Optional[bool] = True, ) -> Dict[str, Union[str, int, List[str]]]: """ For VH or VL amino acid sequence returns the three CDR sequences as determined from the input numbering (scheme) and the given ranges. default ranges are Chothia CDRs. ============================================================================ Note: * Gracefully stolen and refactored get_cdr_simple() from Parapred source code. * Returns a dictionary with CDR sequences, numbering scheme positions for each CDR residue. """ anarci_output = run_and_parse_anarci( sequence, scheme=scheme, allow=allow, assign_germline=assign_germline ) numbering = anarci_output["NUMBERING"] # numbering starts with 1 and not 0 chain_type = anarci_output["CHAIN_TYPE"] if chain_type == "K" and chain_type not in cdr1_scheme: chain_type = "L" if chain_type not in cdr1_scheme: raise ValueError(f"chain_type {chain_type} is not in input CDR scheme") cdr1_scheme = cdr1_scheme[chain_type] cdr2_scheme = cdr2_scheme[chain_type] cdr3_scheme = cdr3_scheme[chain_type] # extract CDR sequences cdr1, cdr2, cdr3 = "", "", "" cdr1_numbering, cdr2_numbering, cdr3_numbering = [], [], [] for num_tuple, res in numbering: residue_position = str(num_tuple[0]) + num_tuple[1].rstrip() if num_tuple[0] in cdr1_scheme: if res != "-": cdr1_numbering.append(residue_position) cdr1 += res elif num_tuple[0] in cdr2_scheme: if res != "-": cdr2_numbering.append(residue_position) cdr2 += res elif num_tuple[0] in cdr3_scheme: if res != "-": cdr3_numbering.append(residue_position) cdr3 += res annotation_dict = { "CDR1": cdr1, "CDR1_NUMBERING": cdr1_numbering, "CDR2": cdr2, "CDR2_NUMBERING": cdr2_numbering, "CDR3": cdr3, "CDR3_NUMBERING": cdr3_numbering, } annotation_dict = {**annotation_dict, **anarci_output} del annotation_dict["NUMBERING"] return annotation_dict
3f7d74693086e7603215d912083653005cdddb5a
25,570
import stat def skew(variable=None, weights=None, data=None): """Return the asymmetry coefficient of a sample. Parameters ---------- data : pandas.DataFrame variable : array-like, str weights : array-like, str data : pandas.DataFrame Object which stores ``variable`` and ``weights``. Returns ------- skew : float References ---------- Moment (mathematics). (2017, May 6). In Wikipedia, The Free Encyclopedia. Retrieved 14:40, May 15, 2017, from https://en.wikipedia.org/w/index.php?title=Moment_(mathematics) Notes ----- It is an alias of the standardized third-order moment. """ variable, weights = utils.extract_values(data, variable, weights) return stat.skew(variable, weights)
08be7f2e9741855b699e847307c61b14ab6b3009
25,571
def deep_initial_state(batch_size, h_size, stack_size): """ Function to make a stack of inital state for a multi-layer GRU. """ return tuple(static_initial_state(batch_size, h_size) for layer in range(stack_size))
4d6bc65d2fcb158a99a08d88c755c81ca08433f3
25,572
def create_element(pan_elem, elem_type=None)->Element: """ Find the element type and call constructor specified by it. """ etype = 'ELEMENT TYPE MISSING' if elem_type is not None: etype = elem_type elif 't' in pan_elem: etype = pan_elem['t'] elif 'pandoc-api-version' in pan_elem: etype = 'Pandoc' if etype not in _ELEMENT_TYPES: # Invalid etype( = 'ELEMENT TYPE MISSING' or invalid `elem_type`) raise KeyError(etype) element = _ELEMENT_TYPES[etype]['class']( pan_elem, etype, _ELEMENT_TYPES[etype], create_element) return element
c5507a35e7a75676e450d0f960fd3b70c873440d
25,573
def load_weights(variables, file_name): """Reshapes and loads official pretrained Yolo weights. Args: variables: A list of tf.Variable to be assigned. file_name: A name of a file containing weights. Returns: A list of assign operations. """ with open(file_name, "rb") as f: # Skip first 5 values containing irrelevant info np.fromfile(f, dtype=np.int32, count=5) weights = np.fromfile(f, dtype=np.float32) assign_ops = [] ptr = 0 # Load weights for Darknet part. # Each convolution layer has batch normalization. for i in range(52): conv_var = variables[5 * i] gamma, beta, mean, variance = variables[5 * i + 1:5 * i + 5] batch_norm_vars = [beta, gamma, mean, variance] for var in batch_norm_vars: shape = var.shape.as_list() num_params = np.prod(shape) var_weights = weights[ptr:ptr + num_params].reshape(shape) ptr += num_params assign_ops.append(tf.assign(var, var_weights)) shape = conv_var.shape.as_list() num_params = np.prod(shape) var_weights = weights[ptr:ptr + num_params].reshape( (shape[3], shape[2], shape[0], shape[1])) var_weights = np.transpose(var_weights, (2, 3, 1, 0)) ptr += num_params assign_ops.append(tf.assign(conv_var, var_weights)) # Loading weights for Yolo part. # 7th, 15th and 23rd convolution layer has biases and no batch norm. ranges = [range(0, 6), range(6, 13), range(13, 20)] unnormalized = [6, 13, 20] for j in range(3): for i in ranges[j]: current = 52 * 5 + 5 * i + j * 2 conv_var = variables[current] gamma, beta, mean, variance = \ variables[current + 1:current + 5] batch_norm_vars = [beta, gamma, mean, variance] for var in batch_norm_vars: shape = var.shape.as_list() num_params = np.prod(shape) var_weights = weights[ptr:ptr + num_params].reshape(shape) ptr += num_params assign_ops.append(tf.assign(var, var_weights)) shape = conv_var.shape.as_list() num_params = np.prod(shape) var_weights = weights[ptr:ptr + num_params].reshape( (shape[3], shape[2], shape[0], shape[1])) var_weights = np.transpose(var_weights, (2, 3, 1, 0)) ptr += num_params assign_ops.append(tf.assign(conv_var, var_weights)) bias = variables[52 * 5 + unnormalized[j] * 5 + j * 2 + 1] shape = bias.shape.as_list() num_params = np.prod(shape) var_weights = weights[ptr:ptr + num_params].reshape(shape) ptr += num_params assign_ops.append(tf.assign(bias, var_weights)) conv_var = variables[52 * 5 + unnormalized[j] * 5 + j * 2] shape = conv_var.shape.as_list() num_params = np.prod(shape) var_weights = weights[ptr:ptr + num_params].reshape( (shape[3], shape[2], shape[0], shape[1])) var_weights = np.transpose(var_weights, (2, 3, 1, 0)) ptr += num_params assign_ops.append(tf.assign(conv_var, var_weights)) return assign_ops
3d953792ae1e13285044f40dd840fe2400f20243
25,574
def parsing_sa_class_id_response(pdu: list) -> int: """Parsing TaiSEIA class ID response protocol data.""" packet = SAInfoResponsePacket.from_pdu(pdu=pdu) if packet.service_id != SARegisterServiceIDEnum.READ_CLASS_ID: raise ValueError(f'pdu service id invalid, {pdu}') return int.from_bytes(packet.data_bytes, 'big')
e55c6e7041349f036babfd7e9699bfcfe1ff5dea
25,575
def wrr(self) -> int: """ Name: Write ROM port. Function: The content of the accumulator is transferred to the ROM output port of the previously selected ROM chip. The data is available on the output pins until a new WRR is executed on the same chip. The content of the ACC and the carry/link are unaffected. Syntax: WRR Assembled: 1110 0010 Symbolic: (ACC) --> ROM output lines Execution: 1 word, 8-bit code and an execution time of 10.8 usec. Side-effects: The LSB bit of the accumulator appears on I/O 0, Pin 16, of the 4001 ROM chip until it is changed. Notes: No operation is performed on I/O lines coded as inputs. 4 chips per bank, 8 banks = 32 addressable ROM ports. An address set by the previous SRC instruction is interpreted as follows: (Bits in this order : 12345678) Bits 1 - 4 = The ROM chip targetted Bits 5 - 8 = Not relevant """ rom, _unused1, _unused2 = \ decode_command_register(self.COMMAND_REGISTER, 'ROM_PORT') self.ROM_PORT[rom] = self.ACCUMULATOR self.increment_pc(1) return self.ACCUMULATOR
a019f176bba0e50d73906abd8a20862c4993b75f
25,576
def convert_to_signed_int_32_bit(hex_str): """ Utility function to convert a hex string into a 32 bit signed hex integer value :param hex_str: hex String :return: signed 32 bit integer """ val = int(hex_str, 16) if val > 0x7FFFFFFF: val = ((val+0x80000000) & 0xFFFFFFFF) - 0x80000000 return val
f8d39b20475c30f162948167f8534e367d9c58e8
25,577
def parent_node(max_child_node, max_parent_node): """ Parents child node into parent node hierarchy :param max_child_node: MaxPlus.INode :param max_parent_node: MaxPlus.INode """ max_child_node.SetParent(max_parent_node) return max_child_node
1a54d4c485e61361633165da0f05c8f871296ae6
25,578
import getpass import re import logging import sys def get_user(): """从终端获取用户输入的QQ号及密码""" username = input('please input QQ number: ').strip() if not re.match(r'^[1-9][0-9]{4,9}$', username): logging.error('\033[31mQQ number is wrong!\033[0m') sys.exit(1) password = getpass.getpass('password: ') return { 'username': username, 'password': password }
766e8332ea0bed1b793ba80cbf42a43bd54fb800
25,579
import tensorflow as tf import torch def to_numpy_or_python_type(tensors): """Converts a structure of `Tensor`s to `NumPy` arrays or Python scalar types. For each tensor, it calls `tensor.numpy()`. If the result is a scalar value, it converts it to a Python type, such as a float or int, by calling `result.item()`. Numpy scalars are converted, as Python types are often more convenient to deal with. This is especially useful for bfloat16 Numpy scalars, which don't support as many operations as other Numpy values. Args: tensors: A structure of tensors. Returns: `tensors`, but scalar tensors are converted to Python types and non-scalar tensors are converted to Numpy arrays. """ def _to_single_numpy_or_python_type(t): if isinstance(t, torch.Tensor): x = t.detach().cpu().numpy() return x.item() if np.ndim(x) == 0 else x return t # Don't turn ragged or sparse tensors to NumPy. return tf.nest.map_structure(_to_single_numpy_or_python_type, tensors)
34ea32fb2cf4fe8e45c429139876e7f1afc9f794
25,580
def _get_flow(args): """Ensure the same flow is used in hello world example and system test.""" return ( Flow(cors=True) .add(uses=MyTransformer, replicas=args.replicas) .add(uses=MyIndexer, workspace=args.workdir) )
625164c400f420cbb255cfdaa32f79c4862e23ea
25,581
def get_group_id( client: AlgodClient, txids: list ) -> list: """ Gets Group IDs from Transaction IDs :param client: an AlgodClient (GET) :param txids: Transaction IDs :return: gids - Group IDs """ # Get Group IDs gids = [] print("Getting gids...") try: while True: txn_infos = get_transaction_info( txids=txids, client=client ) if len(txn_infos) != 0: for txn_info in txn_infos: gid = txn_info['transaction']['group'] if len(gid) > 0: gids.append(gid) break except Exception as e: print(e.args) return gids
937b29f6b482ed1e62612a07cc80c17c6737c143
25,582
import logging import tqdm import multiprocessing def _simple_proc(st, sampling_rate=10, njobs=1): """ A parallel version of `_proc`, i.e., Basic processing including downsampling, detrend, and demean. :param st: an obspy stream :param sampling_rate: expected sampling rate :param njobs: number of jobs or CPU to use :return st: stream after processing """ # downsampling, detrend, demean do_work = partial(_proc, sampling_rate=sampling_rate) # trace_list = [] # for tr in st: # trace_list.append(tr) # st2 = Stream() logging.info("simple processing for full event correlogram.") print("simple processing for full event correlogram.") if njobs == 1: logging.info('do work sequential (%d cores)', njobs) for tr in tqdm(st, total=len(st)): tr2 = do_work(tr) st2.append(tr2) else: logging.info('do work parallel (%d cores)', njobs) pool = multiprocessing.Pool(njobs) for tr2 in tqdm(pool.imap_unordered(do_work, st), total=len(st)): st2.append(tr2) pool.close() pool.join() return st2
aa24340d0d43ad8f6c042ed5e04bc94f2ec28cc3
25,583
from datetime import datetime def closing_time(date=datetime.date.today()): """ Get closing time of the current date. """ return datetime.time(13, 0) if date in nyse_close_early_dates(date.year) else datetime.time(16, 0)
40670512dbebfe65c3eb2b2790881fc91415aa40
25,584
def cos_fp16(x: tf.Tensor) -> tf.Tensor: """Run cos(x) in FP16, first running mod(x, 2*pi) for range safety.""" if x.dtype == tf.float16: return tf.cos(x) x_16 = tf.cast(tf.mod(x, 2 * np.pi), tf.float16) return tf.cos(x_16)
3212eb19e43fa733490d2cfcfffcc0094715022b
25,585
from typing import Callable def is_documented_by(original: Callable) -> Callable[[_F], _F]: """ Decorator to set the docstring of the ``target`` function to that of the ``original`` function. This may be useful for subclasses or wrappers that use the same arguments. :param original: """ def wrapper(target: _F) -> _F: document_object_from_another(target, original) return target return wrapper
acd582112371ccfffd53762546415353abbd3129
25,586
def check_if_bst(root, min, max): """Given a binary tree, check if it follows binary search tree property To start off, run `check_if_bst(BT.root, -math.inf, math.inf)`""" if root == None: return True if root.key < min or root.key >= max: return False return check_if_bst(root.left, min, root.key) and check_if_bst( root.right, root.key, max )
1bb4b601ef548aec9a4ab2cf5242bc5875c587a2
25,587
import os import glob def assert_widget_image(tmpdir, widget, filename, fail_now=True): """ Render an image from the given WWT widget and assert that it matches an expected version. The expected version might vary depending on the platform and/or OpenGL renderer, so we allow for multiple references and just check if *any* of them matches well enough. We used to have specific test images identified for each platform, but the bookkeeping for that gets very finicky. """ # If requested, save the "actual" images in another directory that will be # preserved beyond the test run. if IMAGE_OUTPUT_DIR: actual = os.path.join(IMAGE_OUTPUT_DIR, filename) else: actual = tmpdir.join(filename).strpath widget.render(actual) # Compare to the references refdir = os.path.join(DATA, 'refimg_' + os.path.splitext(filename)[0]) results = [] for refbase in sorted(os.listdir(refdir)): refname = os.path.splitext(refbase)[0] expected = os.path.join(refdir, refbase) rv = compare_images( expected, actual, tol=IMAGE_COMPARISON_TOLERANCE, in_decorator=True ) if rv is None: # Success! Clean up any fail images (mostly for the IMAGE_OUTPUT_DIR mode) for p in glob(actual.replace('.png', '_vs_*.png')): os.unlink(p) return None failpath = actual.replace('.png', '-failed-diff.png') newfailpath = actual.replace('.png', '_vs_%s.png' % refname) os.rename(failpath, newfailpath) results.append((refname, rv['rms'])) # Nothing was good enough :-( # # We used to have machinery here to emit a "reproduction script" that # printed out Python code to recreate the image files using big # BASE64-encoded strings, but now we can just use Azure Pipelines artifacts. # Consult the Git history if the reproduction script stuff is needed again. msg = ( 'observed image %s did not match any references to required RMS tolerance of ' '%.2f; results were: %s' ) % (actual, IMAGE_COMPARISON_TOLERANCE, ', '.join('%s=%.2f' % t for t in results)) if fail_now: pytest.fail(msg, pytrace=False) return '{}: {}'.format(filename, msg)
6ffe5b6f573744e702af5a2ccf3ef69d1d9b7102
25,588
from typing import Union import pathlib from typing import Sequence from typing import Any import torchvision def create_video_file( root: Union[pathlib.Path, str], name: Union[pathlib.Path, str], size: Union[Sequence[int], int] = (1, 3, 10, 10), fps: float = 25, **kwargs: Any, ) -> pathlib.Path: """Create an video file from random data. Args: root (Union[str, pathlib.Path]): Root directory the video file will be placed in. name (Union[str, pathlib.Path]): Name of the video file. size (Union[Sequence[int], int]): Size of the video that represents the ``(num_frames, num_channels, height, width)``. If scalar, the value is used for the height and width. If not provided, ``num_frames=1`` and ``num_channels=3`` are assumed. fps (float): Frame rate in frames per second. kwargs (Any): Additional parameters passed to :func:`torchvision.io.write_video`. Returns: pathlib.Path: Path to the created image file. Raises: UsageError: If PyAV is not available. """ if isinstance(size, int): size = (size, size) if len(size) == 2: size = (3, *size) if len(size) == 3: size = (1, *size) if len(size) != 4: raise UsageError( f"The 'size' argument should either be an int or a sequence of length 2, 3, or 4. Got {len(size)} instead" ) video = create_image_or_video_tensor(size) file = pathlib.Path(root) / name torchvision.io.write_video(str(file), video.permute(0, 2, 3, 1), fps, **kwargs) return file
f11748ae86a80a5f4d9c859c313837fac7effa32
25,589
def aggregate(collection, pipeline): """Executes an aggregation on a collection. Args: collection: a `pymongo.collection.Collection` or `motor.motor_tornado.MotorCollection` pipeline: a MongoDB aggregation pipeline Returns: a `pymongo.command_cursor.CommandCursor` or `motor.motor_tornado.MotorCommandCursor` """ return collection.aggregate(pipeline, allowDiskUse=True)
03ea889ea23fb81c6a329ee270df2ac253e90d69
25,590
def decryptAES(key, data, mode=2): """decrypt data with aes key""" return aes.decryptData(key, data, mode)
30f5b4173a8ed388a13481a2fd41293cd2304b21
25,591
import requests def __ipv6_safe_get(endpoint: str, addr: str) -> str: """HTTP GET from endpoint with IPv6-safe Host: header Args: endpoint: The endpoint path starting with / addr: full address (IPV6 or IPv4) of server Notes: * This is needed because the Pyton requests module creates HTTP requests with the Host: header containing the scope (%xxxx) for IPv6, and some servers see this as invalid and return a 400 Bad Request. """ if(addr.startswith('[') and not addr.startswith('[::1]')): headers = {'Host': f'{addr.split("%")[0]}]'} else: headers = {} return requests.get(f"http://{addr}{endpoint}", headers=headers)
adb1c7c2300e9e41049a9eda957f264322095d9c
25,592
def format_advertisement(data): """ format advertisement data and scan response data. """ resolve_dict = { # FLAGS AD type st_constant.AD_TYPE_FLAGS: 'FLAGS', # Service UUID AD types st_constant.AD_TYPE_16_BIT_SERV_UUID: '16_BIT_SERV_UUID', st_constant.AD_TYPE_16_BIT_SERV_UUID_CMPLT_LIST: '16_BIT_SERV_UUID_CMPLT_LIST', st_constant.AD_TYPE_32_BIT_SERV_UUID: '32_BIT_SERV_UUID', st_constant.AD_TYPE_32_BIT_SERV_UUID_CMPLT_LIST: '32_BIT_SERV_UUID_CMPLT_LIST', st_constant.AD_TYPE_128_BIT_SERV_UUID: '128_BIT_SERV_UUID', st_constant.AD_TYPE_128_BIT_SERV_UUID_CMPLT_LIST: '128_BIT_SERV_UUID_CMPLT_LIST', # Local name AD types st_constant.AD_TYPE_SHORTENED_LOCAL_NAME: 'SHORTENED_LOCAL_NAME', st_constant.AD_TYPE_COMPLETE_LOCAL_NAME: 'COMPLETE_LOCAL_NAME', # TX power level AD type st_constant.AD_TYPE_TX_POWER_LEVEL: 'TX_POWER_LEVEL', # Class of device st_constant.AD_TYPE_CLASS_OF_DEVICE: 'CLASS_OF_DEVICE', # Security manager TK value AD type st_constant.AD_TYPE_SEC_MGR_TK_VALUE: 'SEC_MGR_TK_VALUE', # Security manager OOB flags st_constant.AD_TYPE_SEC_MGR_OOB_FLAGS: 'SEC_MGR_OOB_FLAGS', # Slave connection interval AD type st_constant.AD_TYPE_SLAVE_CONN_INTERVAL: 'SLAVE_CONN_INTERVAL', # Service solicitation UUID list AD types st_constant.AD_TYPE_SERV_SOLICIT_16_BIT_UUID_LIST: 'SERV_SOLICIT_16_BIT_UUID_LIST', st_constant.AD_TYPE_SERV_SOLICIT_32_BIT_UUID_LIST: 'SERV_SOLICIT_32_BIT_UUID_LIST', st_constant.AD_TYPE_SERV_SOLICIT_128_BIT_UUID_LIST: 'SERV_SOLICIT_128_BIT_UUID_LIST', # Service data AD type st_constant.AD_TYPE_SERVICE_DATA: 'SERVICE_DATA', # Manufaturer specific data AD type st_constant.AD_TYPE_MANUFACTURER_SPECIFIC_DATA: 'MANUFACTURER_SPECIFIC_DATA' } offset = 0 size = len(data) advertisement_dict = {} while offset < size: field_len = int.from_bytes(data[offset:offset + 1], 'little') if field_len == 0 or offset + field_len > size: return advertisement_dict field_type = int.from_bytes(data[offset + 1:offset + 2], 'little') field_value = data[offset + 2:offset + 2 + field_len - 1] advertisement_dict.update({resolve_dict[field_type]: field_value}) offset += field_len + 1 return advertisement_dict
a2b2740c45debe6c801ac80d99c8ed2b4537c205
25,593
def is_dicom_file(path): """Check if the given path appears to be a dicom file. Only looks at the extension, not the contents. Args: path (str): The path to the dicom file Returns: bool: True if the file appears to be a dicom file """ path = path.lower() for ext in DICOM_EXTENSIONS: if path.endswith(ext): return True return False
2bd20b0f9bf40db24e9c6df4591127f59d07f882
25,594
import math def build_graph(df_list, sens='ST', top=410, min_sens=0.01, edge_cutoff=0.0, edge_width=150, log=False): """ Initializes and constructs a graph where vertices are the parameters selected from the first dataframe in 'df_list', subject to the constraints set by 'sens', 'top', and 'min_sens'. Edges are the second order sensitivities of the interactions between those vertices, with sensitivities greater than 'edge_cutoff'. Parameters ----------- df_list : list A list of two dataframes. The first dataframe should be the first/total order sensitivities collected by the function data_processing.get_sa_data(). sens : str, optional A string with the name of the sensitivity that you would like to use for the vertices ('ST' or 'S1'). top : int, optional An integer specifying the number of vertices to display ( the top sensitivity values). min_sens : float, optional A float with the minimum sensitivity to allow in the graph. edge_cutoff : float, optional A float specifying the minimum second order sensitivity to show as an edge in the graph. edge_width : float, optional A float specifing the edge width to be displayed log : bool, optional take the log of all the values Returns -------- g : graph-tool object a graph-tool graph object of the network described above. Each vertex has properties 'param', 'sensitivity', and 'confidence' corresponding to the name of the parameter, value of the sensitivity index, and it's confidence interval. The only edge property is 'second_sens', the second order sensitivity index for the interaction between the two vertices it connects. """ # get the first/total index dataframe and second order dataframe df = df_list[0] df2 = df_list[1] # Make sure sens is ST or S1 if sens not in set(['ST', 'S1']): raise ValueError('sens must be ST or S1') # Make sure that there is a second order index dataframe try: if not df2: raise Exception('Missing second order dataframe!') except: pass # slice the dataframes so the resulting graph will only include the top # 'top' values of 'sens' greater than 'min_sens'. df = df.sort_values(sens, ascending=False) df = df.loc[df[sens] > min_sens, :].head(top) df = df.reset_index() # initialize a graph g = Graph() vprop_sens = g.new_vertex_property('double') vprop_conf = g.new_vertex_property('double') vprop_name = g.new_vertex_property('string') eprop_sens = g.new_edge_property('double') g.vertex_properties['param'] = vprop_name g.vertex_properties['sensitivity'] = vprop_sens g.vertex_properties['confidence'] = vprop_conf g.edge_properties['second_sens'] = eprop_sens # keep a list of all the vertices v_list = [] # Add the vertices to the graph for i, param in enumerate(df['Parameter']): v = g.add_vertex() vprop_sens[v] = df.loc[i, sens] vprop_conf[v] = 1 + df.loc[i, '%s_conf' % sens] / df.loc[i, sens] if log: vprop_sens[v] = math.log10(vprop_sens[v]) vprop_conf[v] = math.log10(vprop_conf[v]) vprop_name[v] = param v_list.append(v) # Make two new columns in second order dataframe that point to the vertices # connected on each row. df2['vertex1'] = -999 df2['vertex2'] = -999 for vertex in v_list: param = g.vp.param[vertex] df2.loc[df2['Parameter_1'] == param, 'vertex1'] = vertex df2.loc[df2['Parameter_2'] == param, 'vertex2'] = vertex # Only allow edges for vertices that we've defined df_edges = df2[(df2['vertex1'] != -999) & (df2['vertex2'] != -999)] # eliminate edges below a certain cutoff value pruned = df_edges[df_edges['S2'] > edge_cutoff] pruned.reset_index(inplace=True) # Add the edges for the graph for i, sensitivity in enumerate(pruned['S2']): v1 = pruned.loc[i, 'vertex1'] v2 = pruned.loc[i, 'vertex2'] e = g.add_edge(v1, v2) # multiply by a number to make the lines visible on the plot eprop_sens[e] = sensitivity if sensitivity > 0 else sensitivity * -1 # if log: # eprop_sens[e] = math.log10(eprop_sens[e]) eprop_sens[e] *= edge_width # These are ways you can reference properties of vertices or edges # g.vp.param[g.vertex(77)] # g.vp.param[v_list[0]] print('Created a graph with %s vertices and %s edges.\nVertices are the ' 'top %s %s values greater than %s.\nOnly S2 values (edges) ' 'greater than %s are included.' % (g.num_vertices(), g.num_edges(), top, sens, min_sens, edge_cutoff)) return g
b17b3f57ab21df0117e61a12005f401f81620368
25,595
def ranking_scores(prng=None, mix=False, permute=False, gamma=0.01, beta=5., N=100, l=1, means=None, stds=None): """ Generate the ranking scores. Parameters ---------- prng : random generator container Seed for the random number generator. mix : bool Flag for generating the ranking scores with a Gaussian mixture. permute : bool Flag for permuting the node before associating a ranking score to each of them, i.e. the hierarchical block structure induced on the adjacency matrix is randomized. gamma : float The spring constant for (s, origin). beta : float Inveres temperature parameter. N : int Number of nodes. l : int Number of leagues means : list List of means to be used for the scores generation. stds : list List of means to be used for the scores generation. Returns ---------- s : Numpy array N-dimensional array of real ranking scores for each node. nodes_s : Numpy array Result of the random permutation applied to the node IDs (if required). Can be used for inverting the permutation and induce the block structure generated by the leagues on the adjacency matrix. """ if prng is None: # Set seed random number generator prng = np.random.RandomState(seed = 42) if mix: if means is None: means = prng.randint(-5, 5, l) if stds is None: stds = prng.randint(0, 1, l) s = np.concatenate([prng.normal(means[i], stds[i], N // l) for i in range(l - 1)]) if N % l: s = np.concatenate([s, prng.normal(means[-1], stds[-1], N - s.shape[0])]) if permute: # shuffle s in order to not have a ranking structure overlapped to the communities one nodes_s = prng.permutation(N) s = s[nodes_s] else: nodes_s = np.arange(N) else: # Generate s through factorized Gaussian, l0 = 0 s = prng.normal(0, 1. / np.sqrt(gamma * beta), N) nodes_s = np.arange(N) return s, nodes_s
40801599ab67d852740d5219d22debdbed91de39
25,596
def calculate_direction(G, cutoff, normalize=True): """ Calculate direction for entire network Parameters ---------- G : nx.graph Fault network cutoff : int, float Cutoff distance for direction normalize : bolean Normalize direction (default: True) Returns ------- G nx.graph """ # Assertions assert isinstance(G, nx.Graph), 'G is not a NetworkX graph' # Calculation for node in G.nodes: length = nx.single_source_shortest_path_length(G, node, cutoff=cutoff) keys = [keys for keys, values in length.items() if values == max(length.values())] if len(keys) > 2: (node_0, node_1) = keys[:2] if len(keys) == 2: (node_0, node_1) = keys if len(keys) == 1: node_0 = keys[0] length = nx.single_source_shortest_path_length(G, node, cutoff=cutoff - 1) keys = [keys for keys, values in length.items() if values == max(length.values())] node_1 = keys[0] # extrac position pt_0 = G.nodes[node_0]['pos'] pt_1 = G.nodes[node_1]['pos'] # calculate vector dx = pt_0[0] - pt_1[0] dy = pt_0[1] - pt_1[1] # normalize v_norm = np.array([dx,dy])/np.linalg.norm([dx, dy]) dx = v_norm[0] dy = v_norm[1] # write to graph G.nodes[node]['dx'] = dx G.nodes[node]['dy'] = dy return G
9b64e0e8226579728f76ab510e672372cb708338
25,597
from datetime import datetime def generateVtBar(row): """生成K线""" bar = VtBarData() bar.symbol = row['code'] bar.exchange = '' bar.vtSymbol = bar.symbol bar.open = row['open'] bar.high = row['high'] bar.low = row['low'] bar.close = row['close'] bar.volume = row['volume'] bar.datetime = datetime.strptime(row['time_key'], '%Y-%m-%d %H:%M:%S') bar.date = bar.datetime.strftime("%Y%m%d") bar.time = bar.datetime.strftime("%H:%M:%S") return bar
8431b313927692743d727ef9225e33899cc6c916
25,598
async def is_valid_channel(ctx, channel_name): """ TODO: Use discord.py converters instead of is_valid_channel check """ matched_channels = [ channel for channel in ctx.guild.channels if channel.name == channel_name ] if len(matched_channels) == 0: await ctx.send( "Cannot find the channel " + channel_name + " ensure that the bot has permissions for the channel." ) return False return True
46fd29fff440151e9478c8a5bd9cad9d8c04edd3
25,599