content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def get_IoU_from_matches(match_pred2gt, matched_classes, ovelaps): """ if given an image, claculate the IoU of the segments in the image :param match_pred2gt: maps index of predicted segment to index of ground truth segment :param matched_classes: maps index of predicted segment to class number :param ovelaps: maps [predicted segment index, gt segment index] to the IoU value of the segments :return: 1. IoUs - IoU for all segments 2. IoUs_classes - mean IoU per class """ IoUs = [ [] for _ in range(5) ] match_pred2gt = match_pred2gt.astype(np.int32) for pred, gt in enumerate(match_pred2gt): if gt < 0: continue IoUs[matched_classes[pred]].append(ovelaps[pred, gt]) # mean segments's IoU according to classes IoUs_classes = np.zeros((5, 1)) for class_idx, lst in enumerate(IoUs): if not lst: continue arr = np.array(lst) IoUs_classes[class_idx] = (np.mean(arr)) return IoUs, IoUs_classes
2488c590d86a639898fc1e84c6a6d24afb7c2df4
3,639,600
def id_queue(obs_list, prediction_url='http://plants.deep.ifca.es/api', shuffle=False): """ Returns generator of identifications via buffer. Therefore we perform the identification query for the nxt observation while the user is still observing the current information. """ print "Generating the identification buffer ..." if shuffle: indices = np.arange(len(obs_list)) np.random.shuffle(indices) def gen(obs_list): for obs in obs_list: yield make_prediction(obs, prediction_url) return buffered_gen_threaded(gen(obs_list))
37773fc9d688b000a1d02b083f89e0b4996a52ea
3,639,601
def periodogram(x, nfft=None, fs=1): """Compute the periodogram of the given signal, with the given fft size. Parameters ---------- x : array-like input signal nfft : int size of the fft to compute the periodogram. If None (default), the length of the signal is used. if nfft > n, the signal is 0 padded. fs : float Sampling rate. By default, is 1 (normalized frequency. e.g. 0.5 is the Nyquist limit). Returns ------- pxx : array-like The psd estimate. fgrid : array-like Frequency grid over which the periodogram was estimated. Examples -------- Generate a signal with two sinusoids, and compute its periodogram: >>> fs = 1000 >>> x = np.sin(2 * np.pi * 0.1 * fs * np.linspace(0, 0.5, 0.5*fs)) >>> x += np.sin(2 * np.pi * 0.2 * fs * np.linspace(0, 0.5, 0.5*fs)) >>> px, fx = periodogram(x, 512, fs) Notes ----- Only real signals supported for now. Returns the one-sided version of the periodogram. Discrepency with matlab: matlab compute the psd in unit of power / radian / sample, and we compute the psd in unit of power / sample: to get the same result as matlab, just multiply the result from talkbox by 2pi""" x = np.atleast_1d(x) n = x.size if x.ndim > 1: raise ValueError("Only rank 1 input supported for now.") if not np.isrealobj(x): raise ValueError("Only real input supported for now.") if not nfft: nfft = n if nfft < n: raise ValueError("nfft < signal size not supported yet") pxx = np.abs(fft(x, nfft)) ** 2 if nfft % 2 == 0: pn = nfft / 2 + 1 else: pn = (nfft + 1 )/ 2 fgrid = np.linspace(0, fs * 0.5, pn) return pxx[:pn] / (n * fs), fgrid
899cacc316cf80e79871d01b0c0b3a84deda8042
3,639,602
def handle_429(e): """Renders full error page for too many site queries""" html = render.html("429") client_addr = get_ipaddr() count_ratelimit.labels(e, client_addr).inc() logger.error(f"Error: {e}, Source: {client_addr}") return html, 429
b7a27e55f753dc254e19d1b51ddb169c8e683a2c
3,639,603
def url_path_join(*items): """ Make it easier to build url path by joining every arguments with a '/' character. Args: items (list): Path elements """ return "/".join([item.lstrip("/").rstrip("/") for item in items])
d864c870f9d52bad1268c843098a9f7e1fa69158
3,639,604
def f_match (pattern, string, flags = None): """ Match function Args: pattern (string): regexp (pattern|/pattern/flags) string (string): tested string flags (int): regexp flage Return: boolean """ if build_regexp(pattern, flags).search(to_string(string)): return True return False
31871f35568ca71c86535cfda5d434a57008f981
3,639,605
def validate_epoch(val_loader, model, criterion, epoch, args): """Perform validation on the validation set""" batch_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() batch_time = AverageMeter() data_time = AverageMeter() # switch to evaluate mode model.eval() end = time.time() bar = Bar('Processing', max=len(val_loader)) for batch_idx, (input, target) in enumerate(val_loader): data_time.update(time.time() - end) target = target.cuda(async=True) input = input.cuda() input_var = torch.autograd.Variable(input, volatile=True) target_var = torch.autograd.Variable(target, volatile=True) # compute output output = model(input_var) loss = criterion(output, target_var) # measure accuracy and record loss prec1 = accuracy(output.data, target, topk=(1,))[0] losses.update(loss.data[0], input.size(0)) top1.update(prec1[0], input.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} '.format( batch=batch_idx + 1, size=len(val_loader), data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, top1=top1.avg, ) bar.next() bar.finish() return losses.avg, top1.avg
084d3c5b200470cd9b3a3d905c83c1046df0b96e
3,639,606
def utf8_german_fix( uglystring ): """ If your string contains ugly characters (like ü, ö, ä or ß) in your source file, run this string through here. This adds the German "Umlaute" to your string, making (ÄÖÜäöü߀) compatible for processing. \tprint( utf8_german_fix("ü߀") ) == ü߀ """ uglystring = uglystring.replace('ü','ü') uglystring = uglystring.replace('ö','ö') uglystring = uglystring.replace('ä','ä') uglystring = uglystring.replace('Ä','Ä') uglystring = uglystring.replace('Ö','Ö') uglystring = uglystring.replace('Ü','Ü') uglystring = uglystring.replace('ß','ß') # This was born out of necessity, as there were some issues with a certain API not processing German properly. # I am always looking for a smarter way to do this. nicestring = uglystring.replace('€','€') return nicestring
7ed12d819b384e3bb5cb019ce7b7afe3d6bb8b86
3,639,607
def subtends(a1, b1, a2, b2, units='radians'): """ Calculate the angle subtended by 2 positions on a sphere """ if units.lower() == 'degrees': a1 = radians(a1) b1 = radians(b1) a2 = radians(a2) b2 = radians(b2) x1 = cos(a1) * cos(b1) y1 = sin(a1) * cos(b1) z1 = sin(b1) x2 = cos(a2) * cos(b2) y2 = sin(a2) * cos(b2) z2 = sin(b2) theta = Angle.fromDegrees(degrees(acos(x1 * x2 + y1 * y2 + z1 * z2))) return theta
f9e99119666fba375240111668229d400f1e37e5
3,639,608
import os import sys def get_pcgr_bin(): """Return abs path to e.g. conda/env/pcgr/bin """ return os.path.dirname(os.path.realpath(sys.executable))
abd85ffc2ad348e2c5dee260561e1da2b18efca4
3,639,609
import inspect def get_args(obj): """Get a list of argument names for a callable.""" if inspect.isfunction(obj): return inspect.getargspec(obj).args elif inspect.ismethod(obj): return inspect.getargspec(obj).args[1:] elif inspect.isclass(obj): return inspect.getargspec(obj.__init__).args[1:] elif hasattr(obj, '__call__'): return inspect.getargspec(obj.__call__).args[1:] else: raise TypeError("Can't inspect signature of '%s' object." % obj)
e9fb13c155a8d8589a619491d44be1c9194c29bc
3,639,610
import os import json def get_assumed_role_creds(service_name, assume_role_policy): """ Returns a new assume role object with AccessID, SecretKey and SessionToken :param service_name: :param assume_role_policy: """ sts_client = boto3.client("sts", region_name=os.environ["AWS_REGION"]) assumed_role = sts_client.assume_role( RoleArn=os.environ["IAMROLE_LMDEXEC_ARN"], RoleSessionName="aws-saasfactory-s3", Policy=json.dumps(assume_role_policy), ) credentials = assumed_role["Credentials"] logger.info("helper.get_assumed_role_creds: assumed_role --> %s", assumed_role) return credentials
f73496bd9191eae4264131b3314c4a0f51924d63
3,639,611
def name_value(obj): """ Convert (key, value) pairs to HAR format. """ return [{"name": k, "value": v} for k, v in obj.items()]
d9a5bef186b259401302f3b489033325e32af1f5
3,639,612
def get_ids(id_type): """Get unique article identifiers from the dataset. Parameters ---------- id_type : str Dataframe column name, e.g. 'pubmed_id', 'pmcid', 'doi'. Returns ------- list of str List of unique identifiers in the dataset, e.g. all unique PMCIDs. """ global doc_df if doc_df is None: doc_df = get_metadata_df() unique_ids = list(doc_df[~pd.isna(doc_df[id_type])][id_type].unique()) return unique_ids
6b70d74d79ce7dcdd3654c09f1413ab468514eaa
3,639,613
def get_items_info(request): """Get a collection of person objects""" result = request.dbsession.query(Item).all() results=[] for c in result: results.append({'id':c.id, 'markup':c.markup}) return results
29265a41ffba7cda211fc86b8c60cae872167b12
3,639,614
def _test(value, *args, **keywargs): """ A function that exists for test purposes. >>> checks = [ ... '3, 6, min=1, max=3, test=list(a, b, c)', ... '3', ... '3, 6', ... '3,', ... 'min=1, test="a b c"', ... 'min=5, test="a, b, c"', ... 'min=1, max=3, test="a, b, c"', ... 'min=-100, test=-99', ... 'min=1, max=3', ... '3, 6, test="36"', ... '3, 6, test="a, b, c"', ... '3, max=3, test=list("a", "b", "c")', ... '''3, max=3, test=list("'a'", 'b', "x=(c)")''', ... "test='x=fish(3)'", ... ] >>> v = Validator({'test': _test}) >>> for entry in checks: ... print v.check(('test(%s)' % entry), 3) (3, ('3', '6'), {'test': ['a', 'b', 'c'], 'max': '3', 'min': '1'}) (3, ('3',), {}) (3, ('3', '6'), {}) (3, ('3',), {}) (3, (), {'test': 'a b c', 'min': '1'}) (3, (), {'test': 'a, b, c', 'min': '5'}) (3, (), {'test': 'a, b, c', 'max': '3', 'min': '1'}) (3, (), {'test': '-99', 'min': '-100'}) (3, (), {'max': '3', 'min': '1'}) (3, ('3', '6'), {'test': '36'}) (3, ('3', '6'), {'test': 'a, b, c'}) (3, ('3',), {'test': ['a', 'b', 'c'], 'max': '3'}) (3, ('3',), {'test': ["'a'", 'b', 'x=(c)'], 'max': '3'}) (3, (), {'test': 'x=fish(3)'}) """ return (value, args, keywargs)
c011c9386392c4b8dc8034fee33bfcfdec9845ed
3,639,615
from typing import Union from typing import Sequence from typing import Any from typing import Tuple def plot_chromaticity_diagram_CIE1976UCS( cmfs: Union[ MultiSpectralDistributions, str, Sequence[Union[MultiSpectralDistributions, str]], ] = "CIE 1931 2 Degree Standard Observer", show_diagram_colours: Boolean = True, show_spectral_locus: Boolean = True, **kwargs: Any, ) -> Tuple[plt.Figure, plt.Axes]: """ Plot the *CIE 1976 UCS Chromaticity Diagram*. Parameters ---------- cmfs Standard observer colour matching functions used for computing the spectral locus boundaries. ``cmfs`` can be of any type or form supported by the :func:`colour.plotting.filter_cmfs` definition. show_diagram_colours Whether to display the *Chromaticity Diagram* background colours. show_spectral_locus Whether to display the *Spectral Locus*. Other Parameters ---------------- kwargs {:func:`colour.plotting.artist`, :func:`colour.plotting.diagrams.plot_chromaticity_diagram`, :func:`colour.plotting.render`}, See the documentation of the previously listed definitions. Returns ------- :class:`tuple` Current figure and axes. Examples -------- >>> plot_chromaticity_diagram_CIE1976UCS() # doctest: +ELLIPSIS (<Figure size ... with 1 Axes>, <...AxesSubplot...>) .. image:: ../_static/Plotting_Plot_Chromaticity_Diagram_CIE1976UCS.png :align: center :alt: plot_chromaticity_diagram_CIE1976UCS """ settings = dict(kwargs) settings.update({"method": "CIE 1976 UCS"}) return plot_chromaticity_diagram( cmfs, show_diagram_colours, show_spectral_locus, **settings )
e9621c2e94dc7a43401905e9a633692a28a1a4d1
3,639,616
def generateFilter(targetType, left = False): """Generate filter function for loaded plugins""" def filter(plugins): for pi in plugins: if left: if not pi.isThisType(targetType): plugins.remove(pi) logger.info("Plugin: {} is filtered out by predefined filter"\ .format(pi.namePlugin())) else: if pi.isThisType(targetType): plugins.remove(pi) logger.info("Plugin: {} is filtered out by predefined filter"\ .format(pi.namePlugin())) return filter
db97ecd3700bd3c7b56a26cc3d49d4825fb9dc61
3,639,617
import os def load_vel_map(component="u"): """ Loads all mean streamwise velocity profiles. Returns a `DataFrame` with `z_H` as the index and `y_R` as columns. """ # Define columns in set raw data file columns = dict(u=1, v=2, w=3) sets_dir = os.path.join("postProcessing", "sets") latest_time = max(os.listdir(sets_dir)) data_dir = os.path.join(sets_dir, latest_time) flist = os.listdir(data_dir) z_H = [] for fname in flist: if "UMean" in fname: z_H.append(float(fname.split("_")[1])) z_H.sort() z_H.reverse() vel = [] for zi in z_H: fname = "profile_{}_UMean.xy".format(zi) rawdata = np.loadtxt(os.path.join(data_dir, fname), unpack=True) vel.append(rawdata[columns[component]]) y_R = rawdata[0]/R z_H = np.array(z_H)/H vel = np.array(vel).reshape((len(z_H), len(y_R))) df = pd.DataFrame(vel, index=z_H, columns=y_R) return df
53debae761a1c22124517d97f7ad9d7aa8c5ff38
3,639,618
import requests import json import sys import pprint def check_int(es_url, es_index, hash_id): """Query for interferograms with specified input hash ID.""" query = { "query":{ "bool":{ "must":[ {"term":{"metadata.input_hash_id":hash_id}}, ] } } } if es_url.endswith('/'): search_url = '%s%s/_search' % (es_url, es_index) else: search_url = '%s/%s/_search' % (es_url, es_index) r = requests.post(search_url, data=json.dumps(query)) if r.status_code != 200: print >>sys.stderr, "Failed to query %s:\n%s" % (es_url, r.text) print >>sys.stderr, "query: %s" % json.dumps(query, indent=2) print >>sys.stderr, "returned: %s" % r.text r.raise_for_status() result = r.json() pprint(result) total = result['hits']['total'] if total == 0: id = 'NONE' else: id = result['hits']['hits'][0]['_id'] return total, id
9b3ec23b233acb8471e13dfa6ae2d511c81aab76
3,639,619
def format_sample_case(s: str) -> str: """format_sample_case convert a string s to a good form as a sample case. A good form means that, it use LR instead of CRLF, it has the trailing newline, and it has no superfluous whitespaces. """ if not s.strip(): return '' lines = s.strip().splitlines() lines = [line.strip() + '\n' for line in lines] return ''.join(lines)
cd691f2bfc8cc56db85f2a55ff3bf4b5afd5f30e
3,639,620
from typing import Any import json def replace_floats_with_decimals(obj: Any, round_digits: int = 9) -> Any: """Convert all instances in `obj` of `float` to `Decimal`. Args: obj: Input object. round_digits: Rounding precision of `Decimal` values. Returns: Input `obj` with all `float` types replaced by `Decimal`s rounded to `round_digits` decimal places. """ def _float_to_rounded_decimal(s: Text) -> Decimal: return Decimal(s).quantize(Decimal(10) ** -round_digits) return json.loads(json.dumps(obj), parse_float=_float_to_rounded_decimal)
60529b4542a3b969b6b6fbe67fd6f26b3b7f3c25
3,639,621
def _calc_range_mixed_data_columns(data, observation, dtypes): """ Return range for each numeric column, 0 for categorical variables """ _, cols = data.shape result = np.zeros(cols) for col in range(cols): if np.issubdtype(dtypes[col], np.number): result[col] = max(max(data[:, col]), observation[col]) - min(min(data[:, col]), observation[col]) return result
c135227d50b5dd7c6fb1a047ed959ef6c22733f4
3,639,622
import os def get_from_hdfs(file_hdfs): """ compatible to HDFS path or local path """ if file_hdfs.startswith('hdfs'): file_local = os.path.split(file_hdfs)[-1] if os.path.exists(file_local): print(f"rm existing {file_local}") os.system(f"rm {file_local}") hcopy(file_hdfs, file_local) else: file_local = file_hdfs assert os.path.exists(file_local) return file_local
cdf2df71294ab73f589bd1ea821502459c03c02f
3,639,623
def search(request): """ Display search form/results for events (using distance-based search). Template: events/search.html Context: form - ``anthill.events.forms.SearchForm`` event_list - events in the near future searched - True/False based on if a search was done search_results - list of results (may be empty) """ upcoming_events = Event.objects.future().select_related()[0:5] if request.GET: form = SearchForm(request.GET) form.is_valid() name = form.cleaned_data['name'] location = form.cleaned_data['location'] location_range = form.cleaned_data['location_range'] # only events that haven't happened events = Event.objects.future().select_related() if name: events = events.filter(title__icontains=name) if location: events = events.search_by_distance(location, location_range) context = {'form': form, 'searched': True, 'search_results': events, 'event_list': upcoming_events} else: context = {'form': SearchForm(), 'event_list': upcoming_events} return render_to_response('events/search.html', context, context_instance=RequestContext(request))
adeb3f509854ab9dcd2a50aa6833d96714d8603b
3,639,624
def logout() -> Response: """Logout route. Logs the current user out. :return: A redirect to the landing page. """ name: str = current_user.name logout_user() flash(f'User "{name}" logged out.', 'info') url: str = url_for('root') output: Response = redirect(url) return output
26577da8f5a4bf5feb884c493043877e7c9bd5e7
3,639,625
def load_room(name): """ There is a potential security problem here. Who gets to set name? Can that expose a variable? """ return globals().get(name)
14034adf76b8fd086b798cd312977930d42b6e07
3,639,626
def call_ipt_func(ipt_id: str, function_name: str, source, **kwargs): """Processes an image/wrapper with an IPT using an function like syntax :param ipt_id: :param function_name: :param source: :param kwargs: :return: """ cls_ = get_ipt_class(ipt_id) if cls_ is not None: item = cls_(**kwargs) func = getattr(item, function_name, None) if callable(func): return func(wrapper=source) return None
08645a857981088f6fbde79c8a2aa7057c67445f
3,639,627
def is_running(service): """ Checks if service is running using sysdmanager library. :param service: Service to be checked. :return: Information if service is running or not. """ manager = get_manager() if manager.is_active(service + ".service"): return 1 return 0
55cef1df395c2082fa5e0243704a0804807a0b22
3,639,628
def smoothen(data, kernel): """Convolve data with odd-size kernel, with boundary handling.""" n, = kernel.shape assert n % 2 == 1 m = (n-1) // 2 # pad input data k = m//2 + 1 data_padded = np.concatenate([ np.full(m, data[:k].mean()), data, np.full(m, data[-k:].mean()) ]) smooth = np.convolve(data_padded, kernel, mode='same')[m:-m] assert smooth.shape == data.shape return smooth
06381249118dc54524ad1617f7e0c01a273cf4a8
3,639,629
def find_node_name(node_id, g): """Go through the attributes and find the node with the given name""" return g.node[node_id]["label"]
a4656659aeef0427a74822991c2594064b1a9411
3,639,630
from operator import xor def aes_cbc_decrypt(data, key, iv): """ Decrypt with aes in CBC mode @param {int[]} data cipher @param {int[]} key 16/24/32-Byte cipher key @param {int[]} iv 16-Byte IV @returns {int[]} decrypted data """ expanded_key = key_expansion(key) block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES)) decrypted_data = [] previous_cipher_block = iv for i in range(block_count): block = data[i * BLOCK_SIZE_BYTES : (i + 1) * BLOCK_SIZE_BYTES] block += [0] * (BLOCK_SIZE_BYTES - len(block)) decrypted_block = aes_decrypt(block, expanded_key) decrypted_data += xor(decrypted_block, previous_cipher_block) previous_cipher_block = block decrypted_data = decrypted_data[: len(data)] return decrypted_data
37b685f9e497456e75e3a3e83de9b3b4572da328
3,639,631
from typing import Dict from typing import Counter def score_concepts(merged_graph: AMR, counts: tuple, concept_alignments: Dict[str, str]) -> Counter: """ Calculate TF-IDF counts for each node(concept) in `merged_graph` according to their aligned words. Parameters: merged_graph(AMR): Graph which contains the concept to be scored. counts(tuple): A tuple returned by the DohareEtAl2018.get_tf_idf() function. concept_alignments(dict): A dictionary that maps concepts into a list of words. Returns: Counter: All TF-IDF scores for each concept. If the concept does not exist, the score is 0. """ tf_idf, tf_counts, df_counts, num_docs = counts # Get score for each node concept_scores = dict() for c in merged_graph.get_concept_nodes(): concept = merged_graph.nodes[c]['label'] if concept in concept_alignments: tf = 0 df = 0 for w in concept_alignments[concept]: try: tf += tf_counts[0, tf_idf.vocabulary_[w]] df += df_counts[0, tf_idf.vocabulary_[w]] except KeyError: pass concept_scores[concept] = tf * \ np.log((num_docs/(df + 1))) # TF-IDF concept_scores = Counter(concept_scores) return concept_scores
73739ede67ddbc74a3f7c17740b6f31929215e11
3,639,632
def timestamp_to_double(sparkdf): """ Utility function to cast columns of type 'timestamp' to type 'double.' """ for dtype in sparkdf.dtypes: if dtype[1] == 'timestamp': sparkdf = sparkdf.withColumn(dtype[0], col(dtype[0]).cast(DoubleType())) return sparkdf
5ee647dd5452c3c1f51140db944170698e81d7be
3,639,633
from typing import List import time def get_entrez_id_from_organism_full_name_batch(organism_full_names: List[str]) -> List[str]: """Retrieves the Entrez numeric ID of the given organisms. This numeric identifier is neccessary for BLAST and NCBI TAXONOMY searches. This function uses Biopython functions. Returns BLAST-compatible ID as txid + NCBI ID + [ORGN]. Arguments: >organism_full_names: List[str] ~ A list of full names of organisms, e.g. "Xanthomonas campestris pv. campesris B100" """ batch_start = 0 organism_ncbi_ids_result: List[str] = [] # Go through each organism :D while batch_start < len(organism_full_names): organism_full_names_slice = organism_full_names[batch_start:batch_start+NCBI_BATCH_SIZE] query_names = " OR ".join(organism_full_names_slice) # An e-mail has to be set, you may change it to yours if you want to # be notified if any problems occur. Entrez.email = "x@x.x" # Set the Entrez search to the NCBI TAXONOMY database. handle = Entrez.esearch(db="Taxonomy", term=query_names) # Wait in order to not overload the NCBI's server time.sleep(WAIT_TIME) # Reformat the Entrez search result in order to extract the Entrez ID record = Entrez.read(handle) organism_ncbi_ids = record["IdList"][::-1] # txid+NUMBER+[ORGN] is the form that is used for NCBI BLASTP searches to restrict a search # to an organism using the Entrez query constraint input. organism_ncbi_ids_result += ["txid"+x + "[ORGN]" for x in organism_ncbi_ids] batch_start += NCBI_BATCH_SIZE time.sleep(WAIT_TIME) # Return the retrieved IDs :D return organism_ncbi_ids_result
e0a84006a6646633c4462a1e68dcefe78d3b3bb1
3,639,634
def gunzip(content): """ Decompression is applied if the first to bytes matches with the gzip magic numbers. There is once chance in 65536 that a file that is not gzipped will be ungzipped. """ if len(content) == 0: raise DecompressionError('File contains zero bytes.') gzip_magic_numbers = [ 0x1f, 0x8b ] first_two_bytes = [ byte for byte in bytearray(content)[:2] ] if first_two_bytes != gzip_magic_numbers: raise DecompressionError('File is not in gzip format. Magic numbers {}, {} did not match {}, {}.'.format( hex(first_two_bytes[0]), hex(first_two_bytes[1]), hex(gzip_magic_numbers[0]), hex(gzip_magic_numbers[1]) )) return deflate.gzip_decompress(content)
8a74d6ce4d34589bb04a9ba48d32d6e8d6b6e530
3,639,635
def get_rigid_elements_with_node_ids(model: BDF, node_ids): """ Gets the series of rigid elements that use specific nodes Parameters ---------- node_ids : List[int] the node ids to check Returns ------- rbes : List[int] the set of self.rigid_elements """ try: nids = set(node_ids) except TypeError: print(node_ids) raise rbes = [] for eid, rigid_element in model.rigid_elements.items(): if rigid_element.type in ['RBE3', 'RBE2', 'RBE1', 'RBAR', 'RSPLINE', 'RROD', 'RBAR1']: independent_nodes = set(rigid_element.independent_nodes) dependent_nodes = set(rigid_element.dependent_nodes) rbe_nids = independent_nodes | dependent_nodes if nids.intersection(rbe_nids): rbes.append(eid) elif rigid_element.type == 'RSSCON': msg = 'skipping card in get_rigid_elements_with_node_ids\n%s' % str(rigid_element) model.log.warning(msg) else: raise RuntimeError(rigid_element.type) return rbes
58f264bff7a4fe71a5cd57b719762eaf06aa6120
3,639,636
def genFileBase(f): """ Given a filename, generate a safe 'base' name for HTML and PNG filenames """ baseName = w2res.getBaseMulti(f) baseName = "R"+w2res.removeGDBCharacters(baseName) return baseName
ce0e5b8e9261eb0410d8a912e1b77cbe5e25bde3
3,639,637
import time def retrieve_results_average(query, index, k=10, verbose=False, tfidf=False): """ (NOT USED) Given a query, return most similar papers from the specified FAISS index. Also prunes the resulting papers by filtering out papers whose authors do not have tags. This uses the average paper representations per author. Parameters: query (string): The search query index (obj): The loaded FAISS index populated by paper embeddings k (int): The amount of papers to retrieve verbose (bool): Whether to output the debugging information or not tfidf (bool): Whether the tf-idf embeddings are used for retrieval instead of SBERT. Returns: pruned (list): pruned list of most similar papers to the query """ initial_retrieval = k*5 s = time.time() if tfidf: most_similar_raw = get_most_similar_ids(query, index, initial_retrieval, tfidf_clf) else: most_similar_raw = get_most_similar_ids(query, index, initial_retrieval) s1 = time.time() pruned = prune_results_for_authors_wo_tags_average(most_similar_raw, query, k) s2 = time.time() if verbose: print(f"Full search execution time: {time.time() - s} seconds") print(f"from which {s1-s} s. in the search and {s2 - s1} s. in the pruning.") print("===") print("Pruned author IDS, sorted by similarity:") print(pruned[0]) print('Similarity scores:') print(pruned[1]) return pruned
2059d5ef62831c5968bdc0b20c3dbfea0b694bf9
3,639,638
import base64 def _json_custom_hook(d): """Serialize NumPy arrays.""" if isinstance(d, dict) and '__ndarray__' in d: data = base64.b64decode(d['__ndarray__']) return np.frombuffer(data, d['dtype']).reshape(d['shape']) elif isinstance(d, dict) and '__qbytearray__' in d: return _decode_qbytearray(d['__qbytearray__']) return d
f5fb62ad38b8822ae304ea00e537b66b7e3b75ee
3,639,639
def basic_pyxll_function_3(x): """docstrings appear as help text in Excel""" return x
3709d1bce92456b1456ed90d81002f71b7d9e754
3,639,640
import torch def log_mean_exp(x, dim=1): """ log(1/k * sum(exp(x))): this normalizes x. @param x: PyTorch.Tensor samples from gaussian @param dim: integer (default: 1) which dimension to take the mean over @return: PyTorch.Tensor mean of x """ m = torch.max(x, dim=dim, keepdim=True)[0] return m + torch.log(torch.mean(torch.exp(x - m), dim=dim, keepdim=True))
7f6476ba3a7ec7873ddb9f66754728bb77452721
3,639,641
def get_shot_end_frame(shot_node): """ Returns the end frame of the given shot :param shot_node: str :return: int """ return maya.cmds.getAttr('{}.endFrame'.format(shot_node))
efb67eb44afc807202ed46b0096627e8794d2bac
3,639,642
def is_integer(): """ Generates a validator to validate if the value of a property is an integer. """ def wrapper(obj, prop): value = getattr(obj, prop) if value is None: return (True, None) try: int(value) except ValueError: return (False, (msg.INVALID_VALUE,)) except TypeError: return (False, (msg.INVALID_VALUE,)) return (True, None) return wrapper
0f8a5a48c7b9c45666f20f6feede58fa4fc2ff5a
3,639,643
def int_inputs(n): """An error handling function to get integer inputs from the user""" while True: try: option = int(input(Fore.LIGHTCYAN_EX + "\n >>> ")) if option not in range(1, n + 1): i_print_r("Invalid Entry :( Please Try Again.") continue else: return option except ValueError: i_print_r("Invalid Entry :( Please Try again") continue
b3554bc13a2c8a43d0279b6e800ed2f6409e755a
3,639,644
def gen_binder_rst(fname, binder_conf): """Generate the RST + link for the Binder badge. Parameters ---------- fname: str The path to the `.py` file for which a Binder badge will be generated. binder_conf: dict | None If a dictionary it must have the following keys: 'url': The URL of the BinderHub instance that's running a Binder service. 'org': The GitHub organization to which the documentation will be pushed. 'repo': The GitHub repository to which the documentation will be pushed. 'branch': The Git branch on which the documentation exists (e.g., gh-pages). 'dependencies': A list of paths to dependency files that match the Binderspec. Returns ------- rst : str The reStructuredText for the Binder badge that links to this file. """ binder_url = gen_binder_url(fname, binder_conf) rst = ( "\n" " .. container:: binder-badge\n\n" " .. image:: https://static.mybinder.org/badge.svg\n" " :target: {}\n" " :width: 150 px\n").format(binder_url) return rst
65f8cfc04a11d6660c37cce669a85a133083517e
3,639,645
from datetime import datetime def downgrade(): """Make refresh token field not nullable.""" bind = op.get_bind() session = Session(bind=bind) class CRUDMixin(object): """Mixin that adds convenience methods for CRUD (create, read, update, delete) ops.""" @classmethod def create_as(cls, current_user, **kwargs): """Create a new record and save it to the database as 'current_user'.""" assert hasattr(cls, 'modified_by') and hasattr(cls, 'created_by') instance = cls(**kwargs) return instance.save_as(current_user) @classmethod def create(cls, **kwargs): """Create a new record and save it to the database.""" instance = cls(**kwargs) return instance.save() def update_as(self, current_user, commit=True, preserve_modified=False, **kwargs): """Update specific fields of the record and save as 'current_user'.""" for attr, value in kwargs.items(): setattr(self, attr, value) return self.save_as(current_user, commit=commit, preserve_modified=preserve_modified) def update(self, commit=True, preserve_modified=False, **kwargs): """Update specific fields of a record.""" for attr, value in kwargs.items(): setattr(self, attr, value) return self.save(commit=commit, preserve_modified=preserve_modified) def save_as(self, current_user, commit=True, preserve_modified=False): """Save instance as 'current_user'.""" assert hasattr(self, 'modified_by') and hasattr(self, 'created_by') # noinspection PyUnresolvedReferences if current_user and not self.created_at: # noinspection PyAttributeOutsideInit self.created_by = current_user if current_user and not preserve_modified: # noinspection PyAttributeOutsideInit self.modified_by = current_user return self.save(commit=commit, preserve_modified=preserve_modified) def save(self, commit=True, preserve_modified=False): """Save the record.""" session.add(self) if commit: if preserve_modified and hasattr(self, 'modified_at'): modified_dt = self.modified_at session.commit() self.modified_at = modified_dt session.commit() return self def delete(self, commit=True): """Remove the record from the database.""" session.delete(self) return commit and session.commit() class Model(CRUDMixin, Base): """Base model class that includes CRUD convenience methods.""" __abstract__ = True @staticmethod def _get_rand_hex_str(length=32): """Create random hex string.""" return getencoder('hex')(urandom(length // 2))[0].decode('utf-8') class SurrogatePK(object): """A mixin that adds a surrogate integer primary key column to declarative-mapped class.""" __table_args__ = {'extend_existing': True} id = Column(Integer, primary_key=True) @classmethod def get_by_id(cls, record_id): """Get record by ID.""" if any((isinstance(record_id, basestring) and record_id.isdigit(), isinstance(record_id, (int, float))),): # noinspection PyUnresolvedReferences return cls.query.get(int(record_id)) else: return None def reference_col(tablename, nullable=False, pk_name='id', ondelete=None, **kwargs): """Column that adds primary key foreign key reference. Usage :: category_id = reference_col('category') category = relationship('Category', backref='categories') """ return Column( ForeignKey('{0}.{1}'.format(tablename, pk_name), ondelete=ondelete), nullable=nullable, **kwargs) class Client(Model): """An OAuth2 Client.""" __tablename__ = 'clients' client_id = Column(String(32), primary_key=True) client_secret = Column(String(256), unique=True, nullable=False) is_confidential = Column(Boolean(), default=True, nullable=False) _redirect_uris = Column(Text(), nullable=False) _default_scopes = Column(Text(), nullable=False) # Human readable info fields name = Column(String(64), nullable=False) description = Column(String(400), nullable=False) modified_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False) modified_by_id = reference_col('users', nullable=False) modified_by = relationship('User', foreign_keys=modified_by_id) created_at = Column(DateTime, default=datetime.utcnow, nullable=False) created_by_id = reference_col('users', nullable=False) created_by = relationship('User', foreign_keys=created_by_id) def __init__(self, redirect_uris=None, default_scopes=None, **kwargs): """Create instance.""" client_id = Client._get_rand_hex_str(32) client_secret = Client._get_rand_hex_str(256) Model.__init__(self, client_id=client_id, client_secret=client_secret, **kwargs) self.redirect_uris = redirect_uris self.default_scopes = default_scopes def __repr__(self): """Represent instance as a unique string.""" return '<Client({name!r})>'.format(name=self.name) class Collection(SurrogatePK, Model): """A collection of library stuff, a.k.a. 'a sigel'.""" __tablename__ = 'collections' code = Column(String(255), unique=True, nullable=False) friendly_name = Column(String(255), unique=False, nullable=False) category = Column(String(255), nullable=False) is_active = Column(Boolean(), default=True) permissions = relationship('Permission', back_populates='collection', lazy='joined') replaces = Column(String(255)) replaced_by = Column(String(255)) modified_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False) modified_by_id = reference_col('users', nullable=False) modified_by = relationship('User', foreign_keys=modified_by_id) created_at = Column(DateTime, default=datetime.utcnow, nullable=False) created_by_id = reference_col('users', nullable=False) created_by = relationship('User', foreign_keys=created_by_id) def __init__(self, code, friendly_name, category, **kwargs): """Create instance.""" Model.__init__(self, code=code, friendly_name=friendly_name, category=category, **kwargs) def __repr__(self): """Represent instance as a unique string.""" return '<Collection({code!r})>'.format(code=self.code) class Role(SurrogatePK, Model): """A role for a user.""" __tablename__ = 'roles' name = Column(String(80), unique=True, nullable=False) user_id = reference_col('users', nullable=True) user = relationship('User', back_populates='roles') def __init__(self, name, **kwargs): """Create instance.""" Model.__init__(self, name=name, **kwargs) def __repr__(self): """Represent instance as a unique string.""" return '<Role({name})>'.format(name=self.name) class PasswordReset(SurrogatePK, Model): """Password reset token for a user.""" __tablename__ = 'password_resets' user_id = reference_col('users', nullable=True) user = relationship('User', back_populates='password_resets') code = Column(String(32), unique=True, nullable=False) is_active = Column(Boolean(), default=True, nullable=False) expires_at = Column(DateTime, nullable=False, default=lambda: datetime.utcnow() + timedelta(hours=7 * 24)) modified_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow) created_at = Column(DateTime, default=datetime.utcnow, nullable=False) def __init__(self, user, **kwargs): """Create instance.""" Model.__init__(self, user=user, code=self._get_rand_hex_str(32), **kwargs) def __repr__(self): """Represent instance as a unique string.""" return '<PasswordReset({email!r})>'.format(email=self.user.email) class User(UserMixin, SurrogatePK, Model): """A user of the app.""" __tablename__ = 'users' id = Column(Integer, primary_key=True) email = Column(String(255), unique=True, nullable=False) full_name = Column(String(255), unique=False, nullable=False) password = Column(LargeBinary(128), nullable=False) last_login_at = Column(DateTime, default=None) tos_approved_at = Column(DateTime, default=None) is_active = Column(Boolean(), default=False, nullable=False) is_admin = Column(Boolean(), default=False, nullable=False) permissions = relationship('Permission', back_populates='user', foreign_keys='Permission.user_id', lazy='joined') roles = relationship('Role', back_populates='user') password_resets = relationship('PasswordReset', back_populates='user') modified_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False) modified_by_id = reference_col('users', nullable=False) modified_by = relationship('User', remote_side=id, foreign_keys=modified_by_id) created_at = Column(DateTime, default=datetime.utcnow, nullable=False) created_by_id = reference_col('users', nullable=False) created_by = relationship('User', remote_side=id, foreign_keys=created_by_id) def __init__(self, email, full_name, password=None, **kwargs): """Create instance.""" Model.__init__(self, email=email, full_name=full_name, **kwargs) if password: self.set_password(password) else: self.set_password(hexlify(urandom(16))) def __repr__(self): """Represent instance as a unique string.""" return '<User({email!r})>'.format(email=self.email) class Permission(SurrogatePK, Model): """A permission on a Collection, granted to a User.""" __table_args__ = (UniqueConstraint('user_id', 'collection_id'), SurrogatePK.__table_args__) __tablename__ = 'permissions' user_id = reference_col('users', nullable=False) user = relationship('User', back_populates='permissions', foreign_keys=user_id, lazy='joined') collection_id = reference_col('collections', nullable=False) collection = relationship('Collection', back_populates='permissions', lazy='joined') registrant = Column(Boolean(), default=False, nullable=False) cataloger = Column(Boolean(), default=False, nullable=False) cataloging_admin = Column(Boolean(), default=False, nullable=False) modified_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False) modified_by_id = reference_col('users', nullable=False) modified_by = relationship('User', foreign_keys=modified_by_id) created_at = Column(DateTime, default=datetime.utcnow, nullable=False) created_by_id = reference_col('users', nullable=False) created_by = relationship('User', foreign_keys=created_by_id) def __init__(self, **kwargs): """Create instance.""" Model.__init__(self, **kwargs) def __repr__(self): """Represent instance as a unique string.""" return '<Permission({user!r}@{collection!r})>'.format(user=self.user, collection=self.collection) class Token(SurrogatePK, Model): """An OAuth2 Bearer token.""" __tablename__ = 'tokens' user_id = reference_col('users', nullable=False, ondelete='CASCADE') user = relationship('User') client_id = reference_col('clients', pk_name='client_id', nullable=False, ondelete='CASCADE') client = relationship('Client') token_type = Column(String(40), nullable=False, default='Bearer') access_token = Column(String(256), nullable=False, unique=True) refresh_token = Column(String(256), unique=True) expires_at = Column(DateTime, nullable=False, default=lambda: datetime.utcnow() + timedelta(seconds=3600)) _scopes = Column(Text, nullable=False) def __init__(self, scopes=None, **kwargs): """Create instance.""" Model.__init__(self, **kwargs) self.scopes = scopes @staticmethod def get_all_by_user(user): """Get all tokens for specified user.""" return Token.query.filter_by(user=user).all() @staticmethod def delete_all_by_user(user): """Delete all tokens for specified user.""" Token.query.filter_by(user=user).delete() @hybrid_property def expires(self): """Return 'expires_at'.""" return self.expires_at @hybrid_property def is_active(self): """Return still active (now < expires_at).""" return self.expires_at > datetime.utcnow() @hybrid_property def scopes(self): """Return scopes list.""" return self._scopes.split(' ') @scopes.setter def scopes(self, value): """Store scopes list as string.""" if isinstance(value, string_types): self._scopes = value elif isinstance(value, list): self._scopes = ' '.join(value) else: self._scopes = value def __repr__(self): """Represent instance as a unique string.""" return '<Token({user!r},{client!r})>'.format(user=self.user.email, client=self.client.name) # ensure all tokens have a refresh_token for token in session.query(Token).filter(Token.refresh_token == None).all(): # noqa: E711 token.refresh_token = Model._get_rand_hex_str() token.save(commit=True, preserve_modified=True) with op.batch_alter_table('tokens', schema=None) as batch_op: batch_op.alter_column('refresh_token', existing_type=sa.VARCHAR(length=256), nullable=False)
ce9f1e8665d126b08fde6f0b4652b431b111f34c
3,639,646
def height(grid): """Gets the height of the grid (stored in row-major order).""" return len(grid)
b90bdb029518cfdaaa4bf93dd77b8996e646b322
3,639,647
import uuid import json def test_blank_index_upload_missing_indexd_credentials_unable_to_load_json( app, client, auth_client, encoded_creds_jwt, user_client ): """ test BlankIndex upload call but unable to load json with a ValueError """ class MockArboristResponse: """ Mock response for requests lib for Arborist """ def __init__(self, data, status_code=200): """ Set up mock response """ self.data = data self.status_code = status_code def json(self): """ Mock json() call """ return self.data class MockResponse: """ Mock response for requests lib """ def __init__(self, data, status_code=200): """ Set up mock response """ self.data = data self.status_code = status_code def json(self): """ Mock json() call """ raise ValueError("unable to get json") def text(self): """ Mock text() call """ return self.data data_requests_mocker = mock.patch( "fence.blueprints.data.indexd.requests", new_callable=mock.Mock ) arborist_requests_mocker = mock.patch( "gen3authz.client.arborist.client.httpx.Client.request", new_callable=mock.Mock ) with data_requests_mocker as data_requests, arborist_requests_mocker as arborist_requests: data_requests.post.return_value = MockResponse( { "did": str(uuid.uuid4()), "rev": str(uuid.uuid4())[:8], "baseid": str(uuid.uuid4()), } ) data_requests.post.return_value.status_code = 401 arborist_requests.return_value = MockArboristResponse({"auth": True}) arborist_requests.return_value.status_code = 200 headers = { "Authorization": "Bearer " + encoded_creds_jwt.jwt, "Content-Type": "application/json", } file_name = "asdf" data = json.dumps({"file_name": file_name}) response = client.post("/data/upload", headers=headers, data=data) indexd_url = app.config.get("INDEXD") or app.config.get("BASE_URL") + "/index" endpoint = indexd_url + "/index/blank/" indexd_auth = (config["INDEXD_USERNAME"], config["INDEXD_PASSWORD"]) data_requests.post.assert_called_once_with( endpoint, auth=indexd_auth, json={"file_name": file_name, "uploader": user_client.username}, headers={}, ) assert response.status_code == 500, response assert not response.json
b91b921893d2d6c672a313d20fe3820b2027fbcd
3,639,648
import urllib def parameterize(url): """Encode input URL as POST parameter. url: a string which is the URL to be passed to ur1.ca service. Returns the POST parameter constructed from the URL. """ return urllib.urlencode({"longurl": url})
f665b67d3637074dcf419a1ebfb153dd7f69acb7
3,639,649
def sum_ints(*args, **kwargs): """ This function is contrived to illustrate args in a function. """ print args return sum(args)
4eb1f78d2e26c63b7e9d6086e55e9588d0257534
3,639,650
def set_have_mods(have_mods: bool) -> None: """set_have_mods(have_mods: bool) -> None (internal) """ return None
a8a504e19450887e473fa607fb7a33253d3de4f3
3,639,651
def user_required(handler): """ Decorator for checking if there's a user associated with the current session. Will also fail if there's no session present. """ def check_login(self, *args, **kwargs): """ If handler has no login_url specified invoke a 403 error """ if self.request.query_string != '': query_string = '?' + self.request.query_string else: query_string = '' continue_url = self.request.path_url + query_string login_url = self.uri_for('login', **{'continue': continue_url}) try: auth = self.auth.get_user_by_session() if not auth: try: self.redirect(login_url, abort=True) except (AttributeError, KeyError), e: self.abort(403) except AttributeError, e: # avoid AttributeError when the session was delete from the server logging.error(e) self.auth.unset_session() self.redirect(login_url) return handler(self, *args, **kwargs) return check_login
4bc794d08989729aa0e8cd8100fa66166083917a
3,639,652
def student_editapplication(request): """View allowing a student to edit and/or submit their saved application""" FSJ_user = get_FSJ_user(request.user.username) award_id = request.GET.get('award_id', '') try: award = Award.objects.get(awardid = award_id) application = Application.objects.get(award = award, student = FSJ_user) if (not application.award.is_active) or (not application.award.is_open()): return redirect('/awards/') if application.is_submitted: return redirect('/awards/') if request.method == "POST": form = ApplicationRestrictedForm(request.POST, request.FILES, instance = application) if form.is_valid(): application = form.save(commit = False) if '_save' in request.POST: application.is_submitted = False application.save() return redirect('/awards/') elif '_submit' in request.POST: if not award.is_open(): return redirect('/awards/') application.is_submitted = True if award.documents_needed == True and not application.application_file: messages.warning(request, 'Please upload a document.') else: application.save() return redirect('/awards/') elif '_delete' in request.POST: try: application = Application.objects.get(award=award, student=FSJ_user) if (not award.is_active) or (not award.is_open()): return redirect('/awards/') else: application.delete() except: pass return redirect('/awards/') else: form = ApplicationRestrictedForm(instance=application) context = get_standard_context(FSJ_user) template = loader.get_template("FSJ/student_apply.html") context["form"] = form context['award'] = award url = "/awards/edit/?award_id=" + str(award.awardid) context["url"] = url return HttpResponse(template.render(context, request)) except Application.DoesNotExist: return redirect('/awards/')
ebfda9d2ac12c3d75e4ffe0dd8a7d2a170e6f80c
3,639,653
import glob import os import warnings def create_and_calibrate(servers=None, nserver=8, npipeline_per_server=4, cal_directory='/home/ubuntu/mmanders'): """ Wraper to create a new BeamPointingControl instance and load bandpass calibration data from a directory. """ # Create the instance control_instance = BeamPointingControl(servers=servers, nserver=nserver, npipeline_per_server=npipeline_per_server, station=ovro) # Find the calibration files calfiles = glob.glob(os.path.join(cal_directory, '*.bcal')) calfiles.sort() if len(calfiles) == 0: warnings.warn(f"No calibration data found in '{cal_directory}'") # Load the calibration data, if found for calfile in calfiles: control_instance.set_beam1_calibration(calfile) # Start up the data flow control_instance.set_beam1_dest() control_instance.set_beam1_vlbi_dest() # Done return control_instance
5d55176e2bb35f06ccaef01d623da1f6c8e9c7c8
3,639,654
def get_haps_from_variants(translation_table_path: str, vcf_data: str, sample_id: str, solver: str = "CBC", config_path: str = None, phased = False) -> tuple: """ Same as get_haps_from_vcf, but bypasses the VCF file so that you can provide formatted variants from another input Get called haplotypes and additional information Args: translation_table_path (str): [description] vcf_file_path (str): [description] sample_id (str): [description] config_path ([type], optional): [description]. Defaults to None. Returns: tuple: translation_table_version, called_haplotypes, variants_associated_with_haplotye, matched_translation_table """ config = get_config(config_path) gene = AbstractGene(translation_table_path, variants = vcf_data, solver = solver, config = config, phased = phased) haplotype = Haplotype(gene, sample_id, config = config) haplotype.table_matcher() return haplotype.optimize_hap()
018e623532de1d414157610a9e63a3657dfdc061
3,639,655
import torch def _populate_number_fields(data_dict): """Returns a dict with the number fields N_NODE, N_EDGE filled in. The N_NODE field is filled if the graph contains a non-`None` NODES field; otherwise, it is set to 0. The N_EDGE field is filled if the graph contains a non-`None` RECEIVERS field; otherwise, it is set to 0. Args: data_dict: An input `dict`. Returns: The data `dict` with number fields. """ dct = data_dict.copy() for number_field, data_field in [[N_NODE, NODES], [N_EDGE, RECEIVERS]]: if dct.get(number_field) is None: if dct[data_field] is not None: dct[number_field] = torch.tensor(dct[data_field].size()[0], dtype=torch.int64) else: dct[number_field] = torch.tensor(0, dtype=torch.int64) return dct
999eee8573d3a11d889a361905f65ce5b996a3c0
3,639,656
import logging def to_graph(e, recursive=True, verbose=False, arg_values=None, arg_types=None, partial_types=None): """Compile a Python entity into equivalent TensorFlow code. Currently supported entities: * functions * classes Classes are handled by converting all their methods into a new class. Args: e: A Python entity. recursive: Whether to recursively convert any functions that the decorator function may call. verbose: Whether to output the compiled code in the logs. arg_values: A dict containing value hints for symbols like function parameters. arg_types: A dict containing type hints for symbols like function parameters. partial_types: A set of types (e.g. classes) that will not be converted entirely. Calls to member functions for these types will be renamed independently. Returns: A function with a signature identical to `o`, but which when executed it creates TF a graph that has the same functionality as the original entity. Raises: ValueError: If the converted function defines or refers to symbol names that are reserved for AutoGraph. """ program_ctx = converter.ProgramContext( recursive=recursive, autograph_decorators=(convert, do_not_convert, converted_call), partial_types=partial_types, autograph_module=tf_inspect.getmodule(to_graph), uncompiled_modules=config.DEFAULT_UNCOMPILED_MODULES) _, name, namespace = conversion.entity_to_graph(e, program_ctx, arg_values, arg_types) nodes = [] for dep in reversed(program_ctx.dependency_cache.values()): nodes.extend(dep) compiled_module, compiled_src = compiler.ast_to_object( nodes, source_prefix=program_ctx.required_imports, include_source_map=True) # The compiled code should see everything the entry entity saw. # TODO(mdan): This might not work well if the call tree spans modules? for key, val in namespace.items(): # Avoid overwriting entities that have been transformed. if key not in compiled_module.__dict__: compiled_module.__dict__[key] = val compiled = getattr(compiled_module, name) # Need this so the source_mapping attribute is available for the context # manager to access for runtime errors. # # Note that compiler.ast_to_object attaches the source map 'ag_source_map__' # symbol to the compiled module. # TODO(mdan): Record this statically in the generated code. # TODO(mdan): Rename this attribute to 'autograph_info__' source_map_attribute_name = 'ag_source_map' if getattr(compiled, source_map_attribute_name, None) is not None: raise ValueError('cannot convert %s because is has an attribute ' '"%s", which is reserved for AutoGraph.' % (compiled, source_map_attribute_name)) setattr(compiled, source_map_attribute_name, compiled_module.__dict__['ag_source_map__']) if verbose: logging.info('Compiled output of %s:\n\n%s\n', e, compiled_src) return compiled
dbd2e74e74fb384b0f82c77db811df9619513b50
3,639,657
def to_frame(nc): """ Convert netCDF4 dataset to pandas frames """ s_params = ["time", "bmnum", "noise.sky", "tfreq", "scan", "nrang", "intt.sc", "intt.us", "mppul", "scnum"] v_params = ["v", "w_l", "gflg", "p_l", "slist", "gflg_conv", "gflg_kde", "v_mad", "cluster_tag", "ribiero_gflg"] _dict_ = {k: [] for k in s_params + v_params} tparam = {"units":nc.variables["time"].units, "calendar":nc.variables["time"].calendar, "only_use_cftime_datetimes":False} for i in range(nc.variables["slist"].shape[0]): sl = nc.variables["slist"][:][i,:] idx = np.isnan(sl) L = len(sl[~idx]) for k in s_params: _dict_[k].extend(L*[nc.variables[k][i]]) for k in v_params: _dict_[k].extend(nc.variables[k][i,~idx]) o = pd.DataFrame.from_dict(_dict_) time = o.time.apply(lambda x: num2date(x, tparam["units"], tparam["calendar"], only_use_cftime_datetimes=tparam["only_use_cftime_datetimes"])).tolist() time = np.array([x._to_real_datetime() for x in time]).astype("datetime64[ns]") time = [dt.datetime.utcfromtimestamp(x.astype(int) * 1e-9) for x in time] o["dates"] = time o["mdates"] = o.dates.apply(lambda x: mdates.date2num(x)).tolist() o = o.sort_values(by=["dates"]) return o
5db0e24b113c0b19dba45df66ec8e42dee3e4b1a
3,639,658
def gather_grade_info(fctx, flow_session, answer_visits): """ :returns: a :class:`GradeInfo` """ all_page_data = (FlowPageData.objects .filter( flow_session=flow_session, ordinal__isnull=False) .order_by("ordinal")) points = 0 provisional_points = 0 max_points = 0 max_reachable_points = 0 fully_correct_count = 0 partially_correct_count = 0 incorrect_count = 0 unknown_count = 0 for i, page_data in enumerate(all_page_data): page = instantiate_flow_page_with_ctx(fctx, page_data) assert i == page_data.ordinal if answer_visits[i] is None: # This is true in principle, but early code to deal with survey questions # didn't generate synthetic answer visits for survey questions, so this # can't actually be enforced. # assert not page.expects_answer() continue if not page.is_answer_gradable(): continue grade = answer_visits[i].get_most_recent_grade() assert grade is not None feedback = get_feedback_for_grade(grade) max_points += grade.max_points if feedback is None or feedback.correctness is None: unknown_count += 1 points = None continue max_reachable_points += grade.max_points page_points = grade.max_points*feedback.correctness if points is not None: points += page_points provisional_points += page_points if grade.max_points > 0: if feedback.correctness == 1: fully_correct_count += 1 elif feedback.correctness == 0: incorrect_count += 1 else: partially_correct_count += 1 return GradeInfo( points=points, provisional_points=provisional_points, max_points=max_points, max_reachable_points=max_reachable_points, fully_correct_count=fully_correct_count, partially_correct_count=partially_correct_count, incorrect_count=incorrect_count, unknown_count=unknown_count)
516beddad0b9d58239e1d3c9ef675d2b078dd141
3,639,659
import re def numericalSort(value): """ 複数ファイルの入力の際、ファイル名を昇順に並べる。 Input ------ value : 読み込みたいファイルへのパス Output ------ parts : ファイル中の数字 """ numbers = re.compile(r'(\d+)') parts = numbers.split(value) parts[1::2] = map(int, parts[1::2]) return parts
1fc8c748b37a89fe9ea3fb0283b5ec8012781028
3,639,660
def add_markings(obj, marking, selectors): """ Append a granular marking to the granular_markings collection. The method makes a best-effort attempt to distinguish between a marking-definition or language granular marking. Args: obj: An SDO or SRO object. marking: identifier or list of marking identifiers that apply to the properties selected by `selectors`. selectors: list of type string, selectors must be relative to the TLO in which the properties appear. Raises: InvalidSelectorError: If `selectors` fail validation. Returns: A new version of the given SDO or SRO with specified markings added. """ selectors = utils.convert_to_list(selectors) marking = utils.convert_to_marking_list(marking) utils.validate(obj, selectors) granular_marking = [] for m in marking: if is_marking(m): granular_marking.append({'marking_ref': m, 'selectors': sorted(selectors)}) else: granular_marking.append({'lang': m, 'selectors': sorted(selectors)}) if obj.get('granular_markings'): granular_marking.extend(obj.get('granular_markings')) granular_marking = utils.expand_markings(granular_marking) granular_marking = utils.compress_markings(granular_marking) return new_version(obj, granular_markings=granular_marking, allow_custom=True)
b7ede77fac6524cba906fd736edb9d43fe41676b
3,639,661
def add_to_list(str_to_add, dns_names): """ This will add a string to the dns_names array if it does not exist. It will then return the index of the string within the Array """ if str_to_add not in dns_names: dns_names.append(str_to_add) return dns_names.index(str_to_add)
4720708778fccc7a16dc66ad52ec911a5acb1f94
3,639,662
def check_icmp_path(sniffer, path, nodes, icmp_type = ipv6.ICMP_ECHO_REQUEST): """Verify icmp message is forwarded along the path. """ len_path = len(path) # Verify icmp message is forwarded to the next node of the path. for i in range(0, len_path): node_msg = sniffer.get_messages_sent_by(path[i]) node_icmp_msg = node_msg.get_icmp_message(icmp_type) if i < len_path - 1: next_node = nodes[path[i + 1]] next_node_rloc16 = next_node.get_addr16() assert next_node_rloc16 == node_icmp_msg.mac_header.dest_address.rloc, "Error: The path is unexpected." else: return True return False
0080837e5f79435396d9cf6566c60bdf40d736c9
3,639,663
def ping(): """Determine if the container is working and healthy. In this sample container, we declare it healthy if we can load the model successfully.""" health = scoring_service.get_model() is not None # You can insert a health check here status = 200 if health else 404 return flask.Response(response="\n", status=status, mimetype="application/json")
8e3cde6098db42be1f93ee04ad4092bef1aec36f
3,639,664
def cyber_pose_to_carla_transform(cyber_pose): """ Convert a Cyber pose a carla transform. """ return carla.Transform( cyber_point_to_carla_location(cyber_pose.position), cyber_quaternion_to_carla_rotation(cyber_pose.orientation))
3bd700c8a3f31cadedcaea798f611d97b379115d
3,639,665
def _is_predator_testcase(testcase): """Return bool and error message for whether this testcase is applicable to predator or not.""" if build_manager.is_custom_binary(): return False, 'Not applicable to custom binaries.' if testcase.regression != 'NA': if not testcase.regression: return False, 'No regression range, wait for regression task to finish.' if ':' not in testcase.regression: return False, 'Invalid regression range %s.' % testcase.regression return True, None
4f9975801bf878522b729035a31685bef170f2dd
3,639,666
def _a_ij_Aij_Dij2(A): """A term that appears in the ASE of Kendall's tau and Somers' D.""" # See `somersd` References [2] section 4: Modified ASEs to test the null hypothesis... m, n = A.shape count = 0 for i in range(m): for j in range(n): count += A[i, j]*(_Aij(A, i, j) - _Dij(A, i, j))**2 return count
5deb884310984d23b70d3364d75d0795e847dcb3
3,639,667
import requests from bs4 import BeautifulSoup def getWeekHouseMsg(): """ 获取一周的房产信息 :return: """ response = requests.get(url=week_host, headers=headers).text soup = BeautifulSoup(response, 'lxml') house_raw = soup.select('div[class=xfjj]') # 二手房均价 second_hand_price = house_raw[0].select('.f36')[0].string # 二手房成交数目 second_hand_num = house_raw[1].select('.f36')[0].string # 新手房均价 new_house_price = house_raw[2].select('.f36')[0].string # 新房成交数目 new_house_num = house_raw[3].select('.f36')[0].string # print(second_hand_price, second_hand_num, new_house_price, new_house_num) return new_house_price, new_house_num, second_hand_price, second_hand_num
775fc1b2fa26c1f48890206d5a278f842c5aeaac
3,639,668
def _match(x, y): """Returns an array of the positions of (first) matches of y in x This is similar to R's `match` or Matlab's `[Lia, Locb] = ismember` See https://stackoverflow.com/a/8251757 This assumes that all values in y are in x, but no check is made Parameters ---------- x : 1-d array y : 1-d array Returns ------- yindex : 1-d array np.all(x[yindex] == y) should be True """ index = np.argsort(x) sorted_index = np.searchsorted(x, y, sorter=index) yindex = index[sorted_index] return yindex
e36b5ad1dce2b7ed18039da16aa6de7a741ecb14
3,639,669
import os import logging import requests import tarfile def download_mnist_tfrecords() -> str: """ Return the path of a directory with the MNIST dataset in TFRecord format. The dataset will be downloaded into WORK_DIRECTORY, if it is not already present. """ if not tf.gfile.Exists(WORK_DIRECTORY): tf.gfile.MakeDirs(WORK_DIRECTORY) filepath = os.path.join(WORK_DIRECTORY, MNIST_TF_RECORDS_FILE) if not tf.gfile.Exists(filepath): logging.info("Downloading {}".format(MNIST_TF_RECORDS_URL)) r = requests.get(MNIST_TF_RECORDS_URL) with tf.gfile.Open(filepath, "wb") as f: f.write(r.content) logging.info("Downloaded {} ({} bytes)".format(MNIST_TF_RECORDS_FILE, f.size())) logging.info("Extracting {} to {}".format(MNIST_TF_RECORDS_FILE, WORK_DIRECTORY)) with tarfile.open(filepath, mode="r:gz") as f: f.extractall(path=WORK_DIRECTORY) data_dir = os.path.join(WORK_DIRECTORY, "mnist-tfrecord") assert tf.gfile.Exists(data_dir) return data_dir
5e4ecf374fc15f9c7098dcfde3f1021d5df07bef
3,639,670
def buildMeanAndCovMatFromRow(row): """ Build a covariance matrix from a row Paramters --------- row : astropy Table row Entries: {X, Y, Z, U, V, W, dX, dY, ..., cXY, cXZ, ...} Return ------ cov_mat : [6,6] numpy array Diagonal elements are dX^2, dY^2, ... Off-diagonal elements are cXY*dX*dY, cXZ*dX*dZ, ... """ dim = 6 # CART_COL_NAMES = ['X', 'Y', 'Z', 'U', 'V', 'W', # 'dX', 'dY', 'dZ', 'dU', 'dV', 'dW', # 'c_XY', 'c_XZ', 'c_XU', 'c_XV', 'c_XW', # 'c_YZ', 'c_YU', 'c_YV', 'c_YW', # 'c_ZU', 'c_ZV', 'c_ZW', # 'c_UV', 'c_UW', # 'c_VW'] mean = np.zeros(dim) for i, col_name in enumerate(CART_COL_NAMES[:6]): mean[i] = row[col_name] std_vec = np.zeros(dim) for i, col_name in enumerate(CART_COL_NAMES[6:12]): std_vec[i] = row[col_name] corr_tri = np.zeros((dim,dim)) # Insert upper triangle (top right) correlations for i, col_name in enumerate(CART_COL_NAMES[12:]): corr_tri[np.triu_indices(dim,1)[0][i],np.triu_indices(dim,1)[1][i]]\ =row[col_name] # Build correlation matrix corr_mat = np.eye(6) + corr_tri + corr_tri.T # Multiply through by standard deviations cov_mat = corr_mat * std_vec * std_vec.reshape(6,1) return mean, cov_mat
f680035a39e72c9685cd563fb092109d3beb3add
3,639,671
import inspect def getNumArgs(obj): """Return the number of "normal" arguments a callable object takes.""" sig = inspect.signature(obj) return sum(1 for p in sig.parameters.values() if p.kind == inspect.Parameter.POSITIONAL_ONLY or p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD)
c2e9edef0b2d8c18a0f9e2af90a6a1573705d590
3,639,672
def min_distance_from_point(vec, p): """ Minimial distance between a single point and each point along a vector (in N dimensions) """ return np.apply_along_axis(np.linalg.norm, 1, vec - p).min()
2b21dec14dcb4026d97d6321d4549f49a9520218
3,639,673
def create_environment(env_config): """Creates an simple sequential testing environment.""" if env_config['num_candidates'] < 4: raise ValueError('num_candidates must be at least 4.') SimpleSequentialResponse.MAX_DOC_ID = env_config['num_candidates'] - 1 user_model = SimpleSequentialUserModel( env_config['slate_size'], seed=env_config['seed'], starting_probs=env_config['starting_probs']) document_sampler = SimpleSequentialDocumentSampler(seed=env_config['seed']) simple_seq_env = environment.Environment( user_model, document_sampler, env_config['num_candidates'], env_config['slate_size'], resample_documents=env_config['resample_documents']) return recsim_gym.RecSimGymEnv(simple_seq_env, total_reward, lambda _, __, ___: None, lambda _, __: None)
eef78ba1f134b492126b51dd13357ea8687df319
3,639,674
def nb_to_python(nb_path): """convert notebook to python script""" exporter = python.PythonExporter() output, resources = exporter.from_filename(nb_path) return output
4a918102fc9e6c35e3c7db89f33dc5c081a17df1
3,639,675
def add(data_source: DataSource) -> DataSource: """ Add a new data source to AuroraX Args: data_source: the data source to add (note: it must be a fully-defined DataSource object) Returns: the newly created data source Raises: pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error pyaurorax.exceptions.AuroraXDuplicateException: duplicate data source, already exists """ # set up request request_data = { "program": data_source.program, "platform": data_source.platform, "instrument_type": data_source.instrument_type, "source_type": data_source.source_type, "display_name": data_source.display_name, "ephemeris_metadata_schema": data_source.ephemeris_metadata_schema, "data_product_metadata_schema": data_source.data_product_metadata_schema, "metadata": data_source.metadata } if (data_source.identifier is not None): request_data["identifier"] = data_source.identifier # make request req = AuroraXRequest(method="post", url=urls.data_sources_url, body=request_data) res = req.execute() # evaluate response if (res.status_code == 409): raise AuroraXDuplicateException("%s - %s" % (res.data["error_code"], res.data["error_message"])) # return try: return DataSource(**res.data) except Exception: raise AuroraXException("Could not create data source")
4a1d39c9280308b6dda8835663a57ba62aca7f21
3,639,676
from typing import get_args import sys def initialize(): """Do all necessary actions before input loop starts""" isbench = False # udp,register,server,room arg_dict = get_args() if "udp" in arg_dict and arg_dict["udp"].isdigit(): StateHolder.udp_listen_port = int(arg_dict["udp"]) else: StateHolder.udp_listen_port = 5001 if "server" in arg_dict: StateHolder.server_ip = arg_dict["server"] else: StateHolder.server_ip = '0.0.0.0:5000' if "mode" in arg_dict: if arg_dict["mode"] == "total": StateHolder.room_type = roomTotal Benchmark.room_type = "total" else: # arg_dict["mode"] == "fifo": StateHolder.room_type = roomFIFO Benchmark.room_type = "fifo" else: StateHolder.room_type = roomFIFO Benchmark.room_type = "fifo" if "msgfile" in arg_dict: Benchmark.msg_file = arg_dict["msgfile"] if "benchname" in arg_dict: Benchmark.bench_name = arg_dict["benchname"] if "start" in arg_dict: isbench = True isfloat = False try: float(arg_dict["start"]) isfloat = True except ValueError: pass if isfloat: Benchmark.bench_start = float(arg_dict["start"]) Benchmark.schedule_benchmark() # StateHolder.server_ip = '0.0.0.0:5000' # StateHolder.udp_listen_port = 5001 if len(sys.argv) < 2 else int(sys.argv[1]) OutputHandler.initialize() CommandHandler.initialize() UDPbroker.initialize() print(">> Welcome to the chat client! Press `!h` for help.") print(InputHandler.prompt_msg,end="") sys.stdout.flush() if "register" in arg_dict: CommandHandler.pushCommand("!register {}".format(arg_dict["register"])) if "room" in arg_dict: CommandHandler.pushCommand("!j "+arg_dict["room"]) CommandHandler.pushCommand("!w "+arg_dict["room"]) # time.sleep(1) # CommandHandler.pushCommand("!register {}".format(arg_dict["register"])) return isbench
5a97fb46df6ad9c98c01beda04724615a98f1583
3,639,677
def read_text(file, num=False): """ Read from txt [file]. If [num], then data is numerical data and will need to convert each string to an int. """ with open(file,'r') as f: data = f.read().splitlines() if num: data = [int(i) for i in data] return data
f9b61d254b1c2188ae6be3b9260f94f0657bcd3a
3,639,678
def interpolate_rbf(x, y, z, x_val, y_val, z_val): """Radial basis function interpolation. Parameters ---------- x : np.ndarray x-faces or x-edges of a mesh y : np.ndarray y-faces or y-edges of a mesh z : np.ndarray z-faces or z-edges of a mesh x_val : np.ndarray curl values or electric field values in the x-direction y_val : np.ndarray curl values or electric field values in the y-direction z_val : np.ndarray curl values or electric field values in the z-direction Returns ------- scipy.interpolate.rbf.Rbf a radial basis function interpolation object """ x_interpolated = Rbf(x[:, 0], x[:, 1], x[:, 2], x_val) y_interpolated = Rbf(y[:, 0], y[:, 1], y[:, 2], y_val) z_interpolated = Rbf(z[:, 0], z[:, 1], z[:, 2], z_val) return x_interpolated, y_interpolated, z_interpolated
35f833a620fabbfa786b1d8e829e378b24d202ad
3,639,679
def svm_loss_naive(W, X, y, reg): """ Structured SVM loss function, naive implementation (with loops). Inputs have dimension D, there are C classes, and we operate on minibatches of N examples. Inputs: - W: A numpy array of shape (D, C) containing weights. - X: A numpy array of shape (N, D) containing a minibatch of data. - y: A numpy array of shape (N,) containing training labels; y[i] = c means that X[i] has label c, where 0 <= c < C. - reg: (float) regularization strength Returns a tuple of: - loss as single float - gradient with respect to weights W; an array of same shape as W """ dW = np.zeros(W.shape) # initialize the gradient as zero # compute the loss and the gradient num_classes = W.shape[1] num_train = X.shape[0] loss = 0.0 for i in range(num_train): scores = X[i].dot(W) correct_class_score = scores[y[i]] for j in range(num_classes): if j == y[i]: continue margin = scores[j] - correct_class_score + 1 # note delta = 1 if margin > 0: loss += margin dW[:, j] += X[i] dW[:, y[i]] -= X[i] # Right now the loss is a sum over all training examples, but we want it # to be an average instead so we divide by num_train. loss /= num_train dW /= num_train # Add regularization to the loss. loss += reg * np.sum(W * W) dW += reg * W ############################################################################# # TODO: # # Compute the gradient of the loss function and store it dW. # # Rather that first computing the loss and then computing the derivative, # # it may be simpler to compute the derivative at the same time that the # # loss is being computed. As a result you may need to modify some of the # # code above to compute the gradient. # ############################################################################# return loss, dW
6b5a56700e0be2397cd449a7d603f7498292d031
3,639,680
def get_batch(image_files, width, height, mode='RGB'): """ Get a single batch of data as an NumPy array """ data_batch = np.array( [get_image(sample_file, width, height, mode) for sample_file in image_files]).astype(np.float32) # Make sure the images are in 4 dimensions if len(data_batch.shape) < 4: data_batch = data_batch.reshape(data_batch.shape + (1,)) return data_batch
b94d095712c14bee2d856b1dba7a6e7286f5f16e
3,639,681
def ScriptProvenanceConst_get_decorator_type_name(): """ScriptProvenanceConst_get_decorator_type_name() -> std::string""" return _RMF.ScriptProvenanceConst_get_decorator_type_name()
a15d001dea73333e16c21697c95a8c11d6567264
3,639,682
import os def load(dataset, trainset_name = ''): """Load training sets ====== Add a new dataset to graph learning by saving the data and labels. Parameters ---------- dataset : string Name of dataset. trainset_name : string (optional), default='' A modifier to uniquely identify different training sets for each dataset. """ dataFile = dataset.lower() + trainset_name.lower() +"_permutations.npz" #Change this eventually dataFile_path = os.path.join(trainset_dir, dataFile) #Check if Data directory exists if not os.path.exists(trainset_dir): os.makedirs(trainset_dir) #Download trainset if needed if not os.path.exists(dataFile_path): urlpath = 'https://github.com/jwcalder/GraphLearning/raw/master/LabelPermutations/'+dataFile utils.download_file(urlpath, dataFile_path) trainset = utils.numpy_load(dataFile_path, 'perm') return trainset
d087ab856c53c2cd1f2cef531cf2377e15042e71
3,639,683
def parse_annotation(parameter): """ Tries to parse an internal annotation referencing ``Client`` or ``InteractionEvent``. Parameters ---------- parameter : ``Parameter`` The respective parameter's representation. Returns ------- choices : `None` or `dict` of (`str` or `int`, `str`) items Parameter's choices. description : `None` or `str` Parameter's description. > Returned as `None` for internal parameters or if `description` could nto be detected. name : `str` The parameter's name. type_ : `int` The parameter's internal type identifier. channel_types : `None` or `tuple` of `int` The accepted channel types. max_value : `None`, `int`, `float` The maximal accepted value. min_value : `None`, `int`, `float` The minimal accepted value. Raises ------ ValueError - If `parameter` annotation tuple's length is out of range [2:3]. - If `parameter` annotation tuple refers to an internal type. TypeError Parameter's type refers to an unknown type or string value. """ if parameter.has_annotation: annotation_value = parameter.annotation if isinstance(annotation_value, tuple): if len(annotation_value) == 0: annotation_value = parameter.name else: return parse_annotation_tuple(parameter) elif isinstance(annotation_value, SlashParameter): return parse_annotation_slash_parameter(annotation_value, parameter.name) else: annotation_value = parameter.name if not isinstance(annotation_value, (str, type)): raise TypeError(f'Parameter `{parameter.name}` is not `tuple`, `str`, nor `str` instance ' f'{annotation_value.__class__.__name__}; {annotation_value!r}.') else: annotation_type = parse_annotation_internal(annotation_value) if annotation_type is None: annotation_type, choices, channel_types = parse_annotation_type_and_choice(annotation_value, parameter.name) else: choices = None channel_types = None return choices, None, parameter.name, annotation_type, channel_types, None, None
076e0cf5dd60eec8624310bac96dccf53d11d441
3,639,684
def search(request): """ Search results """ query = request.GET.get('query') res = MsVerse.objects.filter(raw_text__icontains=query).order_by( 'verse__chapter__book__num', 'verse__chapter__num', 'verse__num', 'hand__manuscript__liste_id') return default_response(request, 'search.html', {'results': res, 'query': query})
4d5fafad400018981de68006540f4d990a1ebcea
3,639,685
from typing import Tuple from typing import List def _compare(pair: Tuple[List[int], List[int]]) -> float: """Just a wrapper for fingerprints.compare, that unpack its first argument""" return fingerprints.compare(*pair)
9b7947898e2cbf5579a7e31dc385b54a0a1bdd62
3,639,686
import operator import re def output_onto(conll_tokens, markstart_dict, markend_dict, file_name): """ Outputs analysis results in OntoNotes .coref XML format :param conll_tokens: List of all processed ParsedToken objects in the document :param markstart_dict: Dictionary from markable starting token ids to Markable objects :param markend_dict: Dictionary from markable ending token ids to Markable objects :return: serialized XML """ output_string = '<DOC DOCNO="' + file_name + '">\n<TEXT PARTNO="000">\n' for out_tok in conll_tokens: if int(out_tok.id) in markstart_dict: for out_mark in sorted(markstart_dict[int(out_tok.id)], key=operator.attrgetter('end'), reverse=True): output_string += '<COREF ID="' + str(out_mark.group) + '" ENTITY="' + out_mark.entity + '" INFSTAT="' + out_mark.infstat if not out_mark.antecedent == "none": output_string += '" TYPE="' + out_mark.coref_type output_string += '">' if int(out_tok.id) > 0: output_string += re.sub("&","&amp;",out_tok.text) if ";" not in out_tok.text else out_tok.text if int(out_tok.id) in markend_dict: for out_mark in markend_dict[int(out_tok.id)]: output_string += "</COREF>" if int(out_tok.id) > 0: output_string += ' ' return output_string + "\n</TEXT>\n</DOC>\n"
f1a917e85735e9581326e60e3add94176e4f84cc
3,639,687
def vertical() -> np.array: """Returns the Jones matrix for a horizontal linear polarizer.""" return np.asarray([[0, 0], [0, 1]])
692653446e0e7f96bf2970353f7de702b9e502ca
3,639,688
def resource_id(d, i, r): """Get resource id from meter reading. :param d: Report definition :type: d: Dict :param i: Item definition :type i: Dict :param r: Meter reading :type r: usage.reading.Reading """ return _get_reading_attr(r, 'resource_id')
73700abbbf34f634435e1f95d52d2730cc3d532b
3,639,689
import logging def create_provider_router(neutron_client, project_id): """Create the provider router. :param neutron_client: Authenticated neutronclient :type neutron_client: neutronclient.Client object :param project_id: Project ID :type project_id: string :returns: Router object :rtype: dict """ routers = neutron_client.list_routers(name='provider-router') if len(routers['routers']) == 0: logging.info('Creating provider router for external network access') router_info = { 'router': { 'name': 'provider-router', 'tenant_id': project_id } } router = neutron_client.create_router(router_info)['router'] logging.info('New router created: %s', (router['id'])) else: logging.warning('Router provider-router already exists.') router = routers['routers'][0] return router
c9eb1de728d141d73c9f7b169df87c01829892f6
3,639,690
from typing import List import shlex def split(string: str) -> List[str]: """ Split string (which represents a command) into a list. This allows us to just copy/paste command prefixes without having to define a full list. """ return shlex.split(string)
360fceeba7d6280e27068f61d2420cfd9fbfbcc2
3,639,691
def compute_prevalence_percentage(df, groupby_fields): """ base: ['topic_id', 'year'] """ # agg_df = df.groupby(groupby_fields)['topic_weight'].sum().reset_index() # groupby_fields.append('topic_weight') # wide_df = agg_df[groupby_fields].copy().pivot(index=groupby_fields[0],columns=groupby_fields[1],values="topic_weight").fillna(0) # new_df = pd.DataFrame(index=wide_df.index.values) # for column in list(wide_df.columns.values): # new_df[column] = (wide_df[column]/wide_df[column].sum())*100 # long_df = new_df.unstack().reset_index() # merged_df = pd.merge(agg_df, # long_df, # how='left', # left_on=[groupby_fields[0],groupby_fields[1]], # right_on = ['level_1','level_0']) # merged_df.rename(columns = {0:'normalized_weights'}, inplace = True) # merged_df.drop(['level_0','level_1'], axis=1, inplace=True) pdf = df.groupby(groupby_fields).agg({'norm_topic_weight': 'sum'}) pdf2 = pdf.groupby(level=0).apply(lambda x: x / x.sum()).reset_index() groupby_fields.append('proportional_weight') pdf2.columns = groupby_fields pdf2 = pdf2.merge(labels, on=groupby_fields[1]) return merged_df
72bc8f04c6cf05d64ddd36b93a73a81136dfedf9
3,639,692
from datetime import datetime def get_token_history(address) -> pd.DataFrame: """Get info about token historical transactions. [Source: Ethplorer] Parameters ---------- address: str Token e.g. 0xf3db5fa2c66b7af3eb0c0b782510816cbe4813b8 Returns ------- pd.DataFrame: DataFrame with token historical transactions. """ response = make_request("getTokenHistory", address, limit=1000) all_operations = [] operations = response["operations"] try: first_row = operations[0]["tokenInfo"] name, symbol, _ = ( first_row.get("name"), first_row.get("symbol"), first_row.get("balance"), ) decimals = first_row.get("decimals") except Exception: name, symbol = "", "" decimals = None for operation in operations: operation.pop("type") operation.pop("tokenInfo") operation["timestamp"] = datetime.fromtimestamp(operation["timestamp"]) all_operations.append(operation) df = pd.DataFrame(all_operations) if df.empty: return df df[["name", "symbol"]] = name, symbol df["value"] = df["value"].astype(float) / (10 ** int(decimals)) return df[["timestamp", "name", "symbol", "value", "from", "to", "transactionHash"]]
941d02b3ef4a4525e376c1b90519391c97e128eb
3,639,693
def top1_accuracy(pred, y): """Main evaluation metric.""" return sum(pred.argmax(axis=1) == y) / float(len(y))
d011b432c7c04331ff09d16ba8151c8c4f056ead
3,639,694
from typing import Optional import os def lookup_default_client_credentials_json() -> Optional[str]: """ Try to look up the default Json file containing the Mix client credentials :return: str or None, the path to the default Json file, or none if not found """ path_client_cred_json = os.path.realpath(os.path.join(os.getcwd(), DEFAULT_MIX_SERVICE_CRED_JSON)) if os.path.isfile(path_client_cred_json): return path_client_cred_json else: return lookup_file_from_mixcli_userhome(DEFAULT_MIX_SERVICE_CRED_JSON)
bbb0d49244028aaf365d02717a5e0528ce6c7555
3,639,695
import requests def dividend_history (symbol): """ This function returns the dividend historical data of the seed stock symbol. Args: symbol (:obj:`str`, required): 3 digits name of the desired stock. """ data = requests.get('https://apipubaws.tcbs.com.vn/tcanalysis/v1/company/{}/dividend-payment-histories?page=0&size=20'.format(symbol)).json() df = json_normalize(data['listDividendPaymentHis']).drop(columns=['no', 'ticker']) return df
0775deaeaa4a6a574af62821273cbd052625c889
3,639,696
def reftype_to_pipelines(reftype, cal_ver=None, context=None): """Given `exp_type` and `cal_ver` and `context`, locate the appropriate SYSTEM CRDSCFG reference file and determine the sequence of pipeline .cfgs required to process that exp_type. """ context = _get_missing_context(context) cal_ver = _get_missing_calver(cal_ver) with log.augment_exception("Failed determining required pipeline .cfgs for", "EXP_TYPE", srepr(reftype), "CAL_VER", srepr(cal_ver)): config_manager = _get_config_manager(context, cal_ver) return config_manager.reftype_to_pipelines(reftype)
a8443ae6e762322681272bb4b348f535aa4b954b
3,639,697
def levy(x: np.ndarray): """ The function is usually evaluated on the hypercube xi ∈ [-10, 10], for all i = 1, …, d. :param x: c(x1, x2, ..., xd) :return: the y-value (float) """ w = 1 + (x - 1) / 4 # same shape as x term1 = (np.sin(np.pi * w.T[0])) ** 2 term3 = (w.T[-1] - 1) ** 2 * (1 + 1 * (np.sin(2 * np.pi * w.T[-1])) ** 2) wi = w.T[:-1] sum = np.sum((wi - 1) ** 2 * (1 + 10 * (np.sin(np.pi * wi + 1)) ** 2)) return term1 + sum + term3
e24744982def1509548dd269be596bf310ff6eb6
3,639,698
import warnings def _select_programme(state, audio_programme=None): """Select an audioProgramme to render. If audio_programme_id is provided, use that to make the selection, otherwise select the only audioProgramme, or the one with the lowest id. Parameters: state (_ItemSelectionState): 'adm' must be set. audio_programme (AudioProgramme): audioProgramme to select if there are multiple programmes. Returns: _ItemSelectionState: state with audioProgramme set if one is found, None otherwise. """ if audio_programme is None: if len(state.adm.audioProgrammes) > 1: warnings.warn("more than one audioProgramme; selecting the one with the lowest id") return evolve(state, audioProgramme=min(state.adm.audioProgrammes, key=lambda programme: programme.id)) elif len(state.adm.audioProgrammes) == 1: return evolve(state, audioProgramme=state.adm.audioProgrammes[0]) else: return evolve(state, audioProgramme=None) else: assert in_by_id(audio_programme, state.adm.audioProgrammes), "selected audioProgramme not in ADM." return evolve(state, audioProgramme=audio_programme)
a7e5cbc9ad2be80b7bfd5f3651b610c83b3f15fe
3,639,699