content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
from typing import Tuple def should_commit(kwargs: Kwargs) -> Tuple[bool, Kwargs]: """Function for if a schema class should create a document on instance.""" return kwargs.pop('create') if 'create' in kwargs else True, kwargs
3e554d661b069e71da86dc8f3d43e754236a9037
24,200
from typing import Dict import os import collections def create_view_files_widget(ws_names2id: Dict[str, str], ws_paths: Dict[str, WorkspacePaths], output): """Create an ipywidget UI to view HTML snapshots and their associated comment files.""" workspace_chooser = widgets.Dropdown( options=ws_names2id, value=None, description='<b>Choose the workspace</b>:', style={'description_width': 'initial'}, layout=widgets.Layout(width='900px') ) user_chooser = widgets.Dropdown( options=[], value=None, description='<b>Choose the user</b>:', style={'description_width': 'initial'}, layout=widgets.Layout(width='900px') ) date_chooser = widgets.Dropdown( options=[], value=None, description='<b>Choose the date</b>:', style={'description_width': 'initial'}, layout=widgets.Layout(width='900px') ) time_chooser = widgets.Dropdown( options=[], value=None, description='<b>Choose the time</b>:', style={'description_width': 'initial'}, layout=widgets.Layout(width='900px') ) file_chooser = widgets.Dropdown( options=[], value=None, description='<b>Choose the file</b>:', style={'description_width': 'initial'}, layout=widgets.Layout(width='900px') ) view_comment_button = widgets.Button( description='View the comment for the HTML snapshot', disabled=False, button_style='success', layout=widgets.Layout(width='300px'), tooltip='Click the button to view the comment associated with the HTML snapshot of the notebook.' ) view_html_button = widgets.Button( description='View the HTML snapshot', disabled=False, button_style='success', layout=widgets.Layout(width='250px'), tooltip='Click the button to view the HTML snapshot of the notebook.' ) def on_view_comment_button_clicked(_): with output: output.clear_output() if not file_chooser.value: display(HTML('''<div class="alert alert-block alert-warning"> No comment files found for HTML snapshots in this workspace.</div>''')) return comment_file = file_chooser.value.replace('.html', WorkspacePaths.COMMENT_FILE_SUFFIX) comment = get_ipython().getoutput(f"gsutil cat '{comment_file}'") display(HTML(f'''<div class="alert alert-block alert-info">{comment}</div>''')) view_comment_button.on_click(on_view_comment_button_clicked) def on_view_html_button_clicked(_): with output: output.clear_output() if not file_chooser.value: display(HTML('''<div class="alert alert-block alert-warning"> No HTML snapshots found in this workspace.</div>''')) return source = file_chooser.value dest = TEMP_HTML.name get_ipython().system(f"set -o xtrace ; gsutil cp '{source}' '{dest}'") display(IFrame(os.path.join('.', os.path.basename(TEMP_HTML.name)), width='100%', height=800)) view_html_button.on_click(on_view_html_button_clicked) def on_choose_workspace(changed): output.clear_output() user_chooser.options = [] if changed['new']: workspace_paths = ws_paths[changed['new']] items = tf.io.gfile.glob(pattern=workspace_paths.get_user_glob()) if items: user_chooser.options = {os.path.basename(item): item for item in items} workspace_chooser.observe(on_choose_workspace, names='value') def on_choose_user(changed): date_chooser.options = [] if changed['new']: workspace_paths = ws_paths[workspace_chooser.value] items = tf.io.gfile.glob(pattern=workspace_paths.add_date_glob_to_path(path=changed['new'])) if items: date_chooser.options = collections.OrderedDict(sorted( {os.path.basename(item): item for item in items}.items(), reverse=True)) user_chooser.observe(on_choose_user, names='value') def on_choose_date(changed): time_chooser.options = [] if changed['new']: workspace_paths = ws_paths[workspace_chooser.value] items = tf.io.gfile.glob(pattern=workspace_paths.add_time_glob_to_path(path=changed['new'])) if items: time_chooser.options = collections.OrderedDict(sorted( {os.path.basename(item): item for item in items}.items(), reverse=True)) date_chooser.observe(on_choose_date, names='value') def on_choose_time(changed): file_chooser.options = [] if changed['new']: workspace_paths = ws_paths[workspace_chooser.value] items = tf.io.gfile.glob(pattern=workspace_paths.add_html_glob_to_path(path=changed['new'])) if items: file_chooser.options = {os.path.basename(item): item for item in items} time_chooser.observe(on_choose_time, names='value') return widgets.VBox( [widgets.HTML(''' <h3>View an HTML snapshot of a notebook</h3> <p>Use the dropdowns to select the workspace, user, date, time, and particular HTML snapshot. <br>Then click on the 'view' buttons to see either the comment for the snapshot or the actual snapshot. </p><hr>'''), workspace_chooser, user_chooser, date_chooser, time_chooser, file_chooser, widgets.HBox([view_comment_button, view_html_button])], layout=widgets.Layout(width='auto', border='solid 1px grey'))
115eba9c894010b461e0a654ba239bc7c8157bb1
24,201
def _get_frame_time(time_steps): """ Compute average frame time. :param time_steps: 1D array with cumulative frame times. :type time_steps: numpy.ndarray :return: The average length of each frame in seconds. :rtype: float """ if len(time_steps.shape) != 1: raise ValueError("ERROR: Time series must be a 1D array.") frame_time = time_steps[-1]/(len(time_steps) - 1) # Need to ignore the first frame (0). return frame_time
e849e5d6bcbc14af357365b3e7f98f1c50d93ee4
24,202
import random def next_symbol_to_learn(ls): """Returns the next symbol to learn. This always returns characters from the training set, within those, gives higher probability to symbols the user doesn't know very well yet. `ls` is the learn state. Returns a tuple like ("V", "...-") """ total = 0.0 candidates = [ ] for k in ls["learning_set"]: weight = 1.0/ls[k] total += weight candidates.append((k, weight)) r = random.uniform(0.0, total) sum = 0.0 for c in candidates: symbol = c[0] weight = c[1] sum += weight if r <= sum: return (symbol, morse.to_morse[symbol]) print("Ooops, should have selected a candidate symbol")
d4b574a6f841ee3f2e1ce4be9f67a508ed6fb2de
24,203
def query_table3(song): """ This function returns the SQL neccessary to get all users who listened to the song name passed as an argument to this function. """ return "select user_name from WHERE_SONG where song_name = '{}';".format(song)
ed9a3fb7eb369c17027871e28b02600b78d483a9
24,204
import sys import os def create_background( bg_type, fafile, outfile, genome="hg18", size=200, nr_times=10, custom_background=None, ): """Create background of a specific type. Parameters ---------- bg_type : str Name of background type. fafile : str Name of input FASTA file. outfile : str Name of output FASTA file. genome : str, optional Genome name. size : int, optional Size of regions. nr_times : int, optional Generate this times as many background sequences as compared to input file. Returns ------- nr_seqs : int Number of sequences created. """ size = int(size) config = MotifConfig() fg = Fasta(fafile) if bg_type in ["genomic", "gc"]: if not genome: logger.error("Need a genome to create background") sys.exit(1) if bg_type == "random": f = MarkovFasta(fg, k=1, n=nr_times * len(fg)) logger.debug("Random background: %s", outfile) elif bg_type == "genomic": logger.debug("Creating genomic background") f = RandomGenomicFasta(genome, size, nr_times * len(fg)) elif bg_type == "gc": logger.debug("Creating GC matched background") f = MatchedGcFasta(fafile, genome, nr_times * len(fg)) logger.debug("GC matched background: %s", outfile) elif bg_type == "promoter": fname = Genome(genome).filename gene_file = fname.replace(".fa", ".annotation.bed.gz") if not gene_file: gene_file = os.path.join(config.get_gene_dir(), "%s.bed" % genome) if not os.path.exists(gene_file): print("Could not find a gene file for genome {}") print("Did you use the --annotation flag for genomepy?") print( "Alternatively make sure there is a file called {}.bed in {}".format( genome, config.get_gene_dir() ) ) raise ValueError() logger.info( "Creating random promoter background (%s, using genes in %s)", genome, gene_file, ) f = PromoterFasta(gene_file, genome, size, nr_times * len(fg)) logger.debug("Random promoter background: %s", outfile) elif bg_type == "custom": bg_file = custom_background if not bg_file: raise IOError("Background file not specified!") if not os.path.exists(bg_file): raise IOError("Custom background file %s does not exist!", bg_file) else: logger.info("Copying custom background file %s to %s.", bg_file, outfile) f = Fasta(bg_file) median_length = np.median([len(seq) for seq in f.seqs]) if median_length < (size * 0.95) or median_length > (size * 1.05): logger.warn( "The custom background file %s contains sequences with a " "median size of %s, while GimmeMotifs predicts motifs in sequences " "of size %s. This will influence the statistics! It is recommended " "to use background sequences of the same size.", bg_file, median_length, size, ) f.writefasta(outfile) return len(f)
4babaf0eb6ed2f7e600fc7db7a8f8a4ee3b59d3b
24,205
def train_test_data(x_,y_,z_,i): """ Takes in x,y and z arrays, and a array with random indesies iself. returns learning arrays for x, y and z with (N-len(i)) dimetions and test data with length (len(i)) """ x_learn=np.delete(x_,i) y_learn=np.delete(y_,i) z_learn=np.delete(z_,i) x_test=np.take(x_,i) y_test=np.take(y_,i) z_test=np.take(z_,i) return x_learn,y_learn,z_learn,x_test,y_test,z_test
7430e9ea2c96356e9144d1689af03f50b36895c6
24,206
def construct_Tba(leads, tleads, Tba_=None): """ Constructs many-body tunneling amplitude matrix Tba from single particle tunneling amplitudes. Parameters ---------- leads : LeadsTunneling LeadsTunneling object. tleads : dict Dictionary containing single particle tunneling amplitudes. tleads[(lead, state)] = tunneling amplitude. Tba_ : None or ndarray nbaths by nmany by nmany numpy array containing old values of Tba. The values in tleads are added to Tba_. Returns ------- Tba : ndarray nleads by nmany by nmany numpy array containing many-body tunneling amplitudes. The returned Tba corresponds to Fock basis. """ si, mtype = leads.si, leads.mtype if Tba_ is None: Tba = np.zeros((si.nleads, si.nmany, si.nmany), dtype=mtype) else: Tba = Tba_ # Iterate over many-body states for j1 in range(si.nmany): state = si.get_state(j1) # Iterate over single particle states for j0 in tleads: (j3, j2), tamp = j0, tleads[j0] # Calculate fermion sign for added/removed electron in a given state fsign = np.power(-1, sum(state[0:j2])) if state[j2] == 0: statep = list(state) statep[j2] = 1 ind = si.get_ind(statep) if ind is None: continue Tba[j3, ind, j1] += fsign*tamp else: statep = list(state) statep[j2] = 0 ind = si.get_ind(statep) if ind is None: continue Tba[j3, ind, j1] += fsign*np.conj(tamp) return Tba
83c582535435564b8132d3bd9216690c127ccb79
24,207
def inplace_update_i(tensor_BxL, updates_B, i): """Inplace update a tensor. B: batch_size, L: tensor length.""" batch_size = tensor_BxL.shape[0] indices_Bx2 = tf.stack([ tf.range(batch_size, dtype=tf.int64), tf.fill([batch_size], tf.cast(i, tf.int64)) ], axis=-1) return tf.tensor_scatter_nd_update(tensor_BxL, indices_Bx2, updates_B)
61cb7e8a030debf6ff26154d153de674645c23fe
24,208
from zipfile import ZipFile from bert import tokenization import tarfile import sentencepiece as spm from .texts._text_functions import SentencePieceTokenizer import os def bert(model = 'base', validate = True): """ Load bert model. Parameters ---------- model : str, optional (default='base') Model architecture supported. Allowed values: * ``'multilanguage'`` - bert multilanguage released by Google. * ``'base'`` - base bert-bahasa released by Malaya. * ``'small'`` - small bert-bahasa released by Malaya. validate: bool, optional (default=True) if True, malaya will check model availability and download if not available. Returns ------- BERT_MODEL: malaya.bert._Model class """ if not isinstance(model, str): raise ValueError('model must be a string') if not isinstance(validate, bool): raise ValueError('validate must be a boolean') model = model.lower() if model not in available_bert_model(): raise Exception( 'model not supported, please check supported models from malaya.bert.available_bert_model()' ) if validate: check_file(PATH_BERT[model]['model'], S3_PATH_BERT[model]) else: if not check_available(PATH_BERT[model]['model']): raise Exception( 'bert-model/%s is not available, please `validate = True`' % (model) ) if model == 'multilanguage': if not os.path.exists(PATH_BERT[model]['directory']): with ZipFile(PATH_BERT[model]['model']['model'], 'r') as zip: zip.extractall(PATH_BERT[model]['path']) bert_vocab = PATH_BERT[model]['directory'] + 'vocab.txt' bert_checkpoint = PATH_BERT[model]['directory'] + 'bert_model.ckpt' tokenizer = tokenization.FullTokenizer( vocab_file = bert_vocab, do_lower_case = False ) cls = '[CLS]' sep = '[SEP]' else: if not os.path.exists(PATH_BERT[model]['directory']): with tarfile.open(PATH_BERT[model]['model']['model']) as tar: tar.extractall(path = PATH_BERT[model]['path']) bert_checkpoint = PATH_BERT[model]['directory'] + 'model.ckpt' sp_model = spm.SentencePieceProcessor() sp_model.Load(PATH_BERT[model]['directory'] + 'sp10m.cased.v4.model') with open( PATH_BERT[model]['directory'] + 'sp10m.cased.v4.vocab' ) as fopen: v = fopen.read().split('\n')[:-1] v = [i.split('\t') for i in v] v = {i[0]: i[1] for i in v} tokenizer = SentencePieceTokenizer(v, sp_model) cls = '<cls>' sep = '<sep>' bert_config = PATH_BERT[model]['directory'] + 'bert_config.json' bert_config = modeling.BertConfig.from_json_file(bert_config) model = _Model(bert_config, tokenizer, cls = cls, sep = sep) model._saver.restore(model._sess, bert_checkpoint) return model
c35de9c8d65c356dc00029ba527aacf0c844218e
24,209
import matplotlib.pyplot as plt def surface( x_grid, y_grid, z_grid, cmap="Blues", angle=(25, 300), alpha=1., fontsize=14, labelpad=10, title="", x_label="", y_label="", z_label="log-likelihood"): """ Creates 3d contour plot given a grid for each axis. Arguments: ``x_grid`` An NxN grid of values. ``y_grid`` An NxN grid of values. ``z_grid`` An NxN grid of values. z_grid determines colour. ``cmap`` (Optional) Colour map used in the plot ``angle`` (Optional) tuple specifying the viewing angle of the graph ``alpha`` (Optional) alpha parameter of the surface ``fill`` (Optional) Used to specify whether or not contour plot should be filled Default False. ``fontsize`` (Optional) the fontsize used for labels ``labelpad`` (Optional) distance of axis labels from the labels ``x_label`` (Optional) The label of the x-axis ``y_label`` (Optional) The label of the y-axis ``z_label`` (Optional) The label of the z-axis Returns a ``matplotlib`` figure object and axes handle. """ ax = plt.axes(projection='3d') # Data for a three-dimensional line ax.plot_surface(x_grid, y_grid, z_grid, cmap=cmap, alpha=alpha) ax.view_init(*angle) fontsize = fontsize labelpad = labelpad if title: plt.title(title, fontsize=fontsize) if x_label: ax.set_xlabel(x_label, fontsize=fontsize, labelpad=labelpad) if y_label: ax.set_ylabel(y_label, fontsize=fontsize, labelpad=labelpad) if z_label: ax.set_zlabel(z_label, fontsize=fontsize, labelpad=labelpad) return ax
5c7b1933e451978e9dab5006126663e7f44ef6dc
24,210
async def record_trade_volume() -> RecordTradeVolumeResponse: """ This api exists for demonstration purposes so you don't have to wait until the job runs again to pick up new data """ await deps.currency_trade_service.update_trade_volumes() return RecordTradeVolumeResponse(success=True)
2921353360c71e85d7d5d64f6aed505e5f9a66b9
24,211
import os def dataload_preprocessing(data_path, dataset, long_sent=800): """ :param data_path: base directory :param dataset: select dataset {'20news', 'mr', 'trec', 'mpqa'} :param long_sent: if dataset has long sentences, set to be constant length value :return: seq_length, num_classes, vocab_size, x_train, y_train, x_test, y_test, pre-train_word (GloVe 840b), word_idx """ assert os.path.exists(data_path) is True x = load_pickle_data(data_path, dataset) data_frame, pretrain_word, len_train, n_exist_word, vocab, word_idx = x max_l = int(np.max(pd.DataFrame(data_frame)["num_words"])) if dataset in ["reuters", "20news", "imdb", 'mr']: train, test = make_idx_data(data_frame, word_idx, len_train, long_sent) else: train, test = make_idx_data(data_frame, word_idx, len_train, max_l) # train[:, :-1] = word idx # train[:, -1] = true label x_train = train[:, :-1] y_train = train[:, -1] x_test = test[:, :-1] y_test = test[:, -1] sequence_length = len(x_train[0]) # make one-hot labels = sorted(list(set(y_train))) one_hot = np.zeros((len(labels), len(labels)), int) np.fill_diagonal(one_hot, 1) label_dict = dict(zip(labels, one_hot)) y_train = np.eye(len(label_dict))[y_train] num_class = y_train.shape[1] y_test = np.eye(len(label_dict))[y_test] vocab_size = pretrain_word.shape[0] print("sequence length :", sequence_length) print("vocab size :", vocab_size) print("num classes :", num_class) return sequence_length, num_class, vocab_size, x_train, y_train, x_test, y_test, pretrain_word, word_idx
cb3462f5ec731a631de2a6f1a70aa6d7e32f79c9
24,212
def logged_in(): """ Method called by Strava (redirect) that includes parameters. - state - code - error """ error = request.args.get('error') state = request.args.get('state') if error: return render_template('login_error.html', error=error, competition_title=config.COMPETITION_TITLE) else: code = request.args.get('code') client = Client() token_dict = client.exchange_code_for_token(client_id=config.STRAVA_CLIENT_ID, client_secret=config.STRAVA_CLIENT_SECRET, code=code) # Use the now-authenticated client to get the current athlete strava_athlete = client.get_athlete() athlete_model = data.update_athlete_auth(strava_athlete, token_dict) if not athlete_model: return render_template('login_error.html', error="ATHLETE_NOT_FOUND", competition_title=config.COMPETITION_TITLE) multiple_teams = None no_teams = False team = None message = None try: team = data.register_athlete_team( strava_athlete=strava_athlete, athlete_model=athlete_model, ) except MultipleTeamsError as multx: multiple_teams = multx.teams message = multx except NoTeamsError as noteamsx: no_teams = True message = noteamsx if not no_teams: auth.login_athlete(strava_athlete) return redirect(url_for('user.rides')) else: return render_template( 'login_results.html', athlete=strava_athlete, team=team, multiple_teams=multiple_teams, no_teams=no_teams, message=message, competition_title=config.COMPETITION_TITLE, )
71a2590f2f2fbcc67e73a2afb9180a5974d98252
24,213
def unit_string_to_cgs(string: str) -> float: """ Convert a unit string to cgs. Parameters ---------- string The string to convert. Returns ------- float The value in cgs. """ # distance if string.lower() == 'au': return constants.au # mass if string.lower() in ('solarm', 'msun'): return constants.solarm # time if string.lower() in ('year', 'years', 'yr', 'yrs'): return constants.year raise ValueError('Cannot convert unit')
32b16bf6a9c08ee09a57670c82da05655cb3fd16
24,214
import os import stat def _make_passphrase(length=None, save=False, file=None): """Create a passphrase and write it to a file that only the user can read. This is not very secure, and should not be relied upon for actual key passphrases. :param int length: The length in bytes of the string to generate. :param file file: The file to save the generated passphrase in. If not given, defaults to 'passphrase-<the real user id>-<seconds since epoch>' in the top-level directory. """ if not length: length = 40 passphrase = _make_random_string(length) if save: ruid, euid, suid = os.getresuid() gid = os.getgid() now = mktime(localtime()) if not file: filename = str('passphrase-%s-%s' % uid, now) file = os.path.join(_repo, filename) with open(file, 'a') as fh: fh.write(passphrase) fh.flush() fh.close() os.chmod(file, stat.S_IRUSR | stat.S_IWUSR) os.chown(file, ruid, gid) log.warn("Generated passphrase saved to %s" % file) return passphrase
c802e74d367a9aa09bac6776637fe846e6d8b3b6
24,215
from operator import mul def Mul(x, x_shape, y, y_shape, data_format=None): """mul""" if data_format: x_new = broadcast_by_format(x, x_shape, data_format[0], y_shape) y_new = broadcast_by_format(y, y_shape, data_format[1], x_shape) else: x_new = x y_new = y return mul.mul(x_new, y_new)
b6bf343e8a3ceb5fe5a0dc8c7bd96b34ecb7ab2f
24,216
import logging def new_authentication_challenge(usr: User) -> str: """ Initiates an authentication challenge. The challenge proceeds as follows: 1. A user (:class:`sni.user`) asks to start a challenge by calling this method. 2. This methods returns a UUID, and the user has 60 seconds to change its teamspeak nickname to that UUID. 3. The user notifies SNI that (s)he has done so. 4. The server checks (see :meth:`sni.teamspeak.complete_authentication_challenge`), and if sucessful, the corresponding teamspeak client is registered in the database and bound to that user. The nickname is also automatically assigned. """ logging.info( "Starting authentication challenge for %s", usr.character_name ) challenge_nickname = utils.random_code(20) TeamspeakAuthenticationChallenge.objects(user=usr).update( set__challenge_nickname=challenge_nickname, set__created_on=utils.now(), set__user=usr, upsert=True, ) return challenge_nickname
d0c27b211aadc94556dc285a1588ff908338b950
24,217
def create_channel(application_key): """Create a channel. Args: application_key: A key to identify this channel on the server side. Returns: A string id that the client can use to connect to the channel. Raises: InvalidChannelTimeoutError: if the specified timeout is invalid. Other errors returned by _ToChannelError """ request = channel_service_pb.CreateChannelRequest() response = channel_service_pb.CreateChannelResponse() request.set_application_key(application_key) try: apiproxy_stub_map.MakeSyncCall(_GetService(), 'CreateChannel', request, response) except apiproxy_errors.ApplicationError, e: raise _ToChannelError(e) return response.client_id()
8b54ac3204af4dbeaf603e788aa0b41829f4807b
24,218
def generate_new_admin_class(): """ we need to generate a new dashboard view for each `setup_admin` call. """ class MockDashboard(DashboardView): pass class MockAdmin(Admin): dashboard_class = MockDashboard return MockAdmin
7f691e8f294bf6d678cb8f1ce59b4f12ca77c866
24,219
def for_default_graph(*args, **kwargs): """Creates a bookkeeper for the default graph. Args: *args: Arguments to pass into Bookkeeper's constructor. **kwargs: Arguments to pass into Bookkeeper's constructor. Returns: A new Bookkeeper. Raises: ValueError: If args or kwargs are provided and the Bookkeeper already exists. """ graph = tf.get_default_graph() collection = graph.get_collection(_BOOKKEEPER) if collection: if args or kwargs: raise ValueError('Requesting construction of a BookKeeper that already ' 'exists: %s %s' % (args, kwargs)) return collection[0] else: books = BOOKKEEPER_FACTORY(*args, g=graph, **kwargs) graph.add_to_collection(_BOOKKEEPER, books) return books
649f2c33c5cdedf4d08c2ac991c0d1a044c50fe4
24,220
def check_paragraph(index: int, line: str, lines: list) -> bool: """Return True if line specified is a paragraph """ if index == 0: return bool(line != "") elif line != "" and lines[index - 1] == "": return True return False
b5737a905b32b07c0a53263255d3c581a8593dfa
24,221
import tqdm import os def most_similar(train_path, test_path, images_path, results_path, cuda=False): """ Nearest Neighbor Baseline: Img2Vec library (https://github.com/christiansafka/img2vec/) is used to obtain image embeddings, extracted from ResNet-18. For each test image the cosine similarity with all the training images is computed in order to retrieve similar training images. The caption of the most similar retrieved image is returned as the generated caption of the test image. :param train_path: The path to the train data tsv file with the form: "image \t caption" :param test_path: The path to the test data tsv file with the form: "image \t caption" :param images_path: The path to the images folder :param results_path: The folder in which to save the results file :param cuda: Boolean value of whether to use cuda for image embeddings extraction. Default: False If a GPU is available pass True :return: Dictionary with the results """ img2vec = Img2Vec(cuda=cuda) # Load train data train_data = pd.read_csv(train_path, sep="\t", header=None) train_data.columns = ["id", "caption"] train_images = dict(zip(train_data.id, train_data.caption)) # Get embeddings of train images print("Calculating visual embeddings from train images") train_images_vec = {} print("Extracting embeddings for all train images...") for train_image in tqdm(train_data.id): image = Image.open(os.path.join(images_path, train_image)) image = image.convert('RGB') vec = img2vec.get_vec(image) train_images_vec[train_image] = vec print("Got embeddings for train images.") # Load test data test_data = pd.read_csv(test_path, sep="\t", header=None) test_data.columns = ["id", "caption"] # Save IDs and raw image vectors separately but aligned ids = [i for i in train_images_vec] raw = np.array([train_images_vec[i] for i in train_images_vec]) # Normalize image vectors to avoid normalized cosine and use dot raw = raw / np.array([np.sum(raw,1)] * raw.shape[1]).transpose() sim_test_results = {} for test_image in tqdm(test_data.id): # Get test image embedding image = Image.open(os.path.join(images_path, test_image)) image = image.convert('RGB') vec = img2vec.get_vec(image) # Compute cosine similarity with every train image vec = vec / np.sum(vec) # Clone to do efficient mat mul dot test_mat = np.array([vec] * raw.shape[0]) sims = np.sum(test_mat * raw, 1) top1 = np.argmax(sims) # Assign the caption of the most similar train image sim_test_results[test_image] = train_images[ids[top1]] # Save test results to tsv file df = pd.DataFrame.from_dict(sim_test_results, orient="index") df.to_csv(os.path.join(results_path, "onenn_results.tsv"), sep="\t", header=False) return sim_test_results
348064308bb942d9c762b2ebd4a29f8b22e5fe8a
24,222
from fnmatch import fnmatchcase import os def find_packages(where='.', exclude=()): """Return a list all Python packages found within directory 'where' 'where' should be supplied as a "cross-platform" (i.e. URL-style) path; it will be converted to the appropriate local path syntax. 'exclude' is a sequence of package names to exclude; '*' can be used as a wildcard in the names, such that 'foo.*' will exclude all subpackages of 'foo' (but not 'foo' itself). """ out = [] stack = [(convert_path(where), '')] while stack: where, prefix = stack.pop(0) for name in os.listdir(where): fn = os.path.join(where, name) if ( '.' not in name and os.path.isdir(fn) and os.path.isfile(os.path.join(fn, '__init__.py')) ): out.append(prefix + name) stack.append((fn, prefix + name + '.')) for pat in list(exclude) + ['ez_setup']: out = [item for item in out if not fnmatchcase(item, pat)] return out
f8c7ea3641506fb013bcb90fe8ffd186f737dc89
24,223
import logging def mask(node2sequence, edge2overlap, masking: str = "none"): """If any of the soft mask or hard mask are activated, mask :param dict exon_dict: Dict of the shape exon_id: sequence. :param dict overlap_dict: Dict of the shape (exon1, exon2): overlap between them. :param str masking: Type of masking to apply. Options: hard, soft, none (Default value = "None") . """ logging.info('Masking sequences') if masking == 'none': return node2sequence # Compose a dataframe of name, sequence, bases to trim to the left # and bases to trim to the right logging.info('Computing bases to trim to the right and to the left') complete = node2sequence.merge( edge2overlap[['u', 'overlap']]\ .rename(columns={'u': 'name', 'overlap': 'mask_right'}), on=['name'], how='outer' ).merge( edge2overlap[['v', 'overlap']]\ .rename(columns={'v': 'name', 'overlap': 'mask_left'}), on=['name'], how='outer' )\ .fillna(0)\ .astype({'mask_right': np.int64, 'mask_left':np.int64}) logging.info('Removing negative masking') complete['mask_right'] = complete.mask_right\ .map(lambda x: x if x > 0 else 0) complete['mask_left'] = complete.mask_left\ .map(lambda x: x if x > 0 else 0) if masking == "hard": logging.info("Hard masking sequences") complete['sequence'] = complete.apply( lambda x: hard_mask(x.sequence, x.mask_left, x.mask_right), axis=1 ) elif masking == "soft": logging.info("Soft masking sequences") complete['sequence'] = complete.apply( lambda x: soft_mask(x.sequence, x.mask_left, x.mask_right), axis=1 ) logging.info('Tidying up') node2sequence_masked = complete\ [['name', 'sequence']]\ .reset_index(drop=True) logging.info('Done') return node2sequence_masked
5f10491773b4b60a844813c06a6ac9e810162daa
24,224
import os def load_patch_for_test_one_subj(file_path, sub_i, patch_shape, over_lap=10, modalities=['MR_DWI', 'MR_Flair', 'MR_T1', 'MR_T2'], mask_sym='MR_MASK', suffix='.nii.gz', use_norm=True): """ for test, split the full image, similar to load_patch_for_epoch, may merge to one function. :param file_path: :param sub_i: :param patch_shape: :param over_lap: :param modalities: :param mask_sym: :param suffix: :param use_norm: :return: """ # first load image, mask. mask_name = os.path.join(file_path, str(sub_i), mask_sym + suffix) if os.path.exists(mask_name): mask_img = read_img(mask_name) channels_one_sub = [] for i in range(len(modalities)): img_name = os.path.join(file_path, str(sub_i), modalities[i] + suffix) img_name_norm = os.path.join(file_path, str(sub_i), modalities[i] + '_norm' + suffix) if not os.path.exists(img_name): raise Exception('cannot find the path %s!' % img_name) if not os.path.exists(img_name_norm): # may raise error but my data has mask. write_norm_img(img_name, img_name_norm, mask_img) elif use_norm: # if exist norm data and use, replace to load it. img_name = img_name_norm channels_one_sub.append(read_img(img_name)) if not os.path.exists(mask_name): mask_img = np.ones(shape=channels_one_sub[0].shape, dtype=np.float) channels_one_sub.append(mask_img) channels_one_sub = np.asarray(channels_one_sub) # second sample patch. indices = compute_patch_indices(channels_one_sub[0].shape, patch_shape, over_lap) patches, chosen = get_patches_according_to_indices(channels_one_sub, patch_shape, np.transpose(indices), True, True) indices = indices[chosen] return np.asarray(patches[:, :len(modalities)]), np.asarray(indices)
901e449da0d3defa13c5b5d401775d7d217d78e7
24,225
def extract_el_from_group(group, el): """Extract an element group from a group. :param group: list :param el: element to be extracted :return: group without the extracted element, the extracted element """ extracted_group = [x for x in group if x != el] return [extracted_group] + [[el]]
ed6598fd0d7dcb01b35a5c2d58c78d8c2a2397f5
24,226
def example_function_with_shape(a, b): """ Example function for unit checks """ result = a * b return result
33403e6f67d4d6b18c92b56996e5e6ed21f6b3ad
24,227
from typing import Mapping from typing import Any def fields( builder: DataclassBuilder, *, required: bool = True, optional: bool = True ) -> "Mapping[str, Field[Any]]": """Get a dictionary of the given :class:`DataclassBuilder`'s fields. .. note:: This is not a method of :class:`DataclassBuilder` in order to not interfere with possible field names. This function will use special private methods of :class:`DataclassBuilder` which are excepted from field assignment. :param builder: The dataclass builder to get the fields for. :param required: Set to False to not report required fields. :param optional: Set to False to not report optional fields. :return: A mapping from field names to actual :class:`dataclasses.Field`'s in the same order as the `builder`'s underlying :func:`dataclasses.dataclass`. """ # pylint: disable=protected-access return builder._fields(required=required, optional=optional)
47b3bd86076ac14f9cca2f24fedf665370c5668f
24,228
from typing import Dict from typing import List def gemm(node: NodeWrapper, params: Dict[str, np.ndarray], xmap: Dict[str, XLayer]) -> List[XLayer]: """ ONNX Gemm to XLayer Dense (+ Scale) (+ BiasAdd) conversion function Compute Y = alpha * A' * B' + beta * C See https://github.com/onnx/onnx/blob/master/docs/Operators.md#Gemm """ logger.info("ONNX Gemm-> XLayer Dense (+ Scale) (+ BiasAdd)") assert len(node.get_outputs()) == 1 assert len(node.get_inputs()) in [2, 3] name = node.get_outputs()[0] bottoms = node.get_inputs() node_attrs = node.get_attributes() iX = xmap[bottoms[0]] # NC or CN _, in_c = iX.shapes W_name = bottoms[1] wX = xmap[W_name] assert len(wX.shapes) == 2 B_name = bottoms[2] if len(bottoms) == 3 else None bX = xmap[B_name] if len(bottoms) == 3 else None alpha = node_attrs['alpha'] if 'alpha' in node_attrs else 1.0 beta = node_attrs['beta'] if 'beta' in node_attrs else 1.0 trans_A = node_attrs['transA'] > 0 if 'transA' in node_attrs else False trans_B = node_attrs['transB'] > 0 if 'transB' in node_attrs else False if alpha != 1.0: raise NotImplementedError("Alpha != 1.0 not supported in ONNX Gemm to" " XLayer Dense conversion") if beta != 1.0: raise NotImplementedError("Beta != 1.0 not supported in ONNX Gemm to" " XLayer Dense conversion") # Quant_info (optional) vai_quant_in = node_attrs['vai_quant_in'] \ if 'vai_quant_in' in node_attrs else [] vai_quant_out = node_attrs['vai_quant_out'] \ if 'vai_quant_out' in node_attrs else [] vai_quant_weights = node_attrs['vai_quant_weights'] \ if 'vai_quant_weights' in node_attrs else [] vai_quant_biases = node_attrs['vai_quant_biases'] \ if 'vai_quant_biases' in node_attrs else [] vai_quant = node_attrs['vai_quant'] \ if 'vai_quant' in node_attrs else [] vai_quant_dense = [a for a in vai_quant if str(a) != 'vai_quant_biases'] vai_quant_bias_add = [a for a in vai_quant if str(a) == 'vai_quant_biases'] Xs = [] if trans_A: # iX is in CN -> Transform to NC iX = xlf.get_xop_factory_func('Transpose')( op_name=iX.name + '_transpose', axes=[1, 0], input_layer=iX, onnx_id=name ) Xs.append(iX) if not trans_B: # iX is in IO -> Transform to OI wX = xlf.get_xop_factory_func('Transpose')( op_name=W_name + '_transpose', axes=[1, 0], input_layer=wX, onnx_id=name ) Xs.append(wX) units = wX.shapes[0] dense_name = name if B_name is None else name + '_Dense' X = xlf.get_xop_factory_func('Dense')( op_name=px.stringify(dense_name), units=units, input_layer=iX, weights_layer=wX, vai_quant=vai_quant_dense, vai_quant_in=vai_quant_in, vai_quant_out=vai_quant_out, vai_quant_weights=vai_quant_weights, onnx_id=name ) Xs.append(X) if B_name is not None: bias_add_X = xlf.get_xop_factory_func('BiasAdd')( op_name=px.stringify(name), axis=1, input_layer=X, bias_layer=bX, vai_quant=vai_quant_bias_add, vai_quant_biases=vai_quant_biases, onnx_id=name ) Xs.append(bias_add_X) return Xs
dbc257c98fa4e4a9fdb14f27e97132d77978f0c2
24,229
from datetime import datetime def check_response(game_id, response): """Check for correct response""" if response["result"]["@c"] == "ultshared.rpc.UltSwitchServerException": game = Game.query.filter(Game.game_id == game_id).first() if "newHostName" in response["result"]: print("new host: " + response["result"]["newHostName"]) game.game_host = "http://" + response["result"]["newHostName"] db.session.commit() else: print("Game does not exist") game.end_of_game = True game.end_at = datetime.now() db.session.commit() job = scheduler.get_job(str(game.game_id)) if job is not None: job.remove() raise GameDoesNotExistError("Game %s is not found" % game_id + \ "on the Supremacy 1914 server") return False return True
a5de41170d13022393c15d30816cc3c51f813f36
24,230
from sys import path import zipfile def extract_to_dst(src, dst): """extract addon src zip file to destination.""" copied_items = [] zip_file = path.basename(src) zip_name, _ = path.splitext(zip_file) cache_path = path.join(root_path, 'cache', zip_name) with zipfile.ZipFile(src, 'r') as z: # create folder and extract to cache mkdir(cache_path) z.extractall(cache_path) trim_os_hidden_files(cache_path) top_levels = [path.join(cache_path, c) for c in listdir(cache_path)] if len(top_levels) > 1: # zip's top-level has multiple files or folder # if it contains only folders, we should copy everything to dst # otherwize, this is not a standard addon package, we should raise an exception if not only_dirs_or_not(cache_path): remove_src(cache_path) raise Exception('addon-zip contents contain file, this is not a standard addon.') results = copy_contents(cache_path, dst) copied_items.extend(results) elif len(top_levels) == 1: if not only_dirs_or_not(top_levels[0]): # extracted-folder which contains files and folders. # it means that we only should copy this folder to dst result = copy_src_to_dst(top_levels[0], dst) copied_items.append(result) else: # extracted-folder which contains only folders. # it means that we should copy every sub-folders to dst results = copy_contents(top_levels[0], dst) copied_items.extend(results) # delete cache folder before return remove_src(cache_path) return copied_items
1ddaa1fd2c1697f166dfdceb58490c9ceb963b73
24,231
def get_signal_handler(): """Get the singleton signal handler""" if not len(_signal_handler_): construct_signal_handler() return _signal_handler_[-1]
bd74ddb1df0c316d4e62e21259e80c0213177aeb
24,232
def post_rule(team_id): """Add a new rule. .. :quickref: POST; Add a new rule. **Example request**: .. sourcecode:: http POST /v1/teams/66859c4a-3e0a-4968-a5a4-4c3b8662acb7/rules HTTP/1.1 Host: example.com Accept: application/json { "name": "Servers", "description": "Compute the QOS of our servers" } **Example response**: .. sourcecode:: http HTTP/1.1 201 CREATED { "checks": [], "createdAt": "2018-05-17T12:01:09Z", "description": "Compute the QOS of our servers", "id": "ff130e9b-d226-4465-9612-a93e12799091", "name": "Servers", "updatedAt": "2018-11-09T15:33:06Z" } :resheader Content-Type: application/json :status 201: the created rule """ if not TeamPermission.is_manager_or_editor(team_id): abort(403) payload = get_payload() payload["team_id"] = team_id rule = RuleController.create(payload) return jsonify(format_rule(rule)), 201
687873cb4398877afb6ed444263f4990039a9f6d
24,233
def intents(interface): """ Method to get an object that implements interface by just returning intents for each method call. :param interface: The interface for which to create a provider. :returns: A class with method names equal to the method names of the interface. Each method on this class will generate an Intent for use with the Effect library. """ return interface._ziffect_intents
4e514424721ba2fc2cf4261cc856f6984d3781de
24,234
def model(X, Y, learning_rate=0.3, num_iterations=30000, print_cost=True, is_plot=True, lambd=0, keep_prob=1): """ 实现一个三层的神经网络:LINEAR ->RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID 参数: X - 输入的数据,维度为(2, 要训练/测试的数量) Y - 标签,【0(蓝色) | 1(红色)】,维度为(1,对应的是输入的数据的标签) learning_rate - 学习速率 num_iterations - 迭代的次数 print_cost - 是否打印成本值,每迭代10000次打印一次,但是每1000次记录一个成本值 is_polt - 是否绘制梯度下降的曲线图 lambd - 正则化的超参数,实数 keep_prob - 随机删除节点的概率 返回 parameters - 学习后的参数 """ grads = {} costs = [] m = X.shape[1] layers_dims = [X.shape[0], 20, 3, 1] # 初始化参数 parameters = reg_utils.initialize_parameters(layers_dims) # 开始学习 for i in range(0, num_iterations): # 前向传播 ##是否随机删除节点 if keep_prob == 1: ###不随机删除节点 a3, cache = reg_utils.forward_propagation(X, parameters) elif keep_prob < 1: ###随机删除节点 a3, cache = forward_propagation_with_dropout(X, parameters, keep_prob) else: print("keep_prob参数错误!程序退出。") exit # 计算成本 ## 是否使用二范数 if lambd == 0: ###不使用L2正则化 cost = reg_utils.compute_cost(a3, Y) else: ###使用L2正则化 cost = compute_cost_with_regularization(a3, Y, parameters, lambd) # 反向传播 ##可以同时使用L2正则化和随机删除节点,但是本次实验不同时使用。 assert (lambd == 0 or keep_prob == 1) ##两个参数的使用情况 if (lambd == 0 and keep_prob == 1): ### 不使用L2正则化和不使用随机删除节点 grads = reg_utils.backward_propagation(X, Y, cache) elif lambd != 0: ### 使用L2正则化,不使用随机删除节点 grads = backward_propagation_with_regularization(X, Y, cache, lambd) elif keep_prob < 1: ### 使用随机删除节点,不使用L2正则化 grads = backward_propagation_with_dropout(X, Y, cache, keep_prob) # 更新参数 parameters = reg_utils.update_parameters(parameters, grads, learning_rate) # 记录并打印成本 if i % 1000 == 0: ## 记录成本 costs.append(cost) if (print_cost and i % 10000 == 0): # 打印成本 print("第" + str(i) + "次迭代,成本值为:" + str(cost)) # 是否绘制成本曲线图 if is_plot: plt.plot(costs) plt.ylabel('cost') plt.xlabel('iterations (x1,000)') plt.title("Learning rate =" + str(learning_rate)) plt.show() # 返回学习后的参数 return parameters
39130fffd282a8f23f29a8967fe0e15386817ed1
24,235
from collections import OrderedDict from urllib.request import urlretrieve from urllib import urlretrieve import scipy.ndimage as nd def load_phoenix_stars(logg_list=PHOENIX_LOGG, teff_list=PHOENIX_TEFF, zmet_list=PHOENIX_ZMET, add_carbon_star=True, file='bt-settl_t400-7000_g4.5.fits'): """ Load Phoenix stellar templates """ try: except: # file='bt-settl_t400-5000_g4.5.fits' # file='bt-settl_t400-3500_z0.0.fits' try: hdu = pyfits.open(os.path.join(GRIZLI_PATH, 'templates/stars/', file)) except: #url = 'https://s3.amazonaws.com/grizli/CONF' #url = 'https://erda.ku.dk/vgrid/Gabriel%20Brammer/CONF' url = ('https://raw.githubusercontent.com/gbrammer/' + 'grizli-config/master') print('Fetch {0}/{1}'.format(url, file)) #os.system('wget -O /tmp/{1} {0}/{1}'.format(url, file)) res = urlretrieve('{0}/{1}'.format(url, file), filename=os.path.join('/tmp', file)) hdu = pyfits.open(os.path.join('/tmp/', file)) tab = GTable.gread(hdu[1]) tstars = OrderedDict() N = tab['flux'].shape[1] for i in range(N): teff = tab.meta['TEFF{0:03d}'.format(i)] logg = tab.meta['LOGG{0:03d}'.format(i)] try: met = tab.meta['ZMET{0:03d}'.format(i)] except: met = 0. if (logg not in logg_list) | (teff not in teff_list) | (met not in zmet_list): #print('Skip {0} {1}'.format(logg, teff)) continue label = 'bt-settl_t{0:05.0f}_g{1:3.1f}_m{2:.1f}'.format(teff, logg, met) tstars[label] = SpectrumTemplate(wave=tab['wave'], flux=tab['flux'][:, i], name=label) if add_carbon_star: cfile = os.path.join(GRIZLI_PATH, 'templates/stars/carbon_star.txt') sp = read_catalog(cfile) if add_carbon_star > 1: cflux = nd.gaussian_filter(sp['flux'], add_carbon_star) else: cflux = sp['flux'] tstars['bt-settl_t05000_g0.0_m0.0'] = SpectrumTemplate(wave=sp['wave'], flux=cflux, name='carbon-lancon2002') return tstars
39807e591acf1a7338a7e36f5cd50ffffa1ff66b
24,236
def write_ini(locStr_ini_file_path, locStr_ini): """ .. _write_ini : Write the given string into the given INI file path. Parameters ---------- locStr_ini_file_path : str The file full path of the INI file. If the extension ".ini" is not included, it would be added to the path. locStr_ini : str The string to be written into the INI file. Returns ------- bool Returns True if deemed successful (no exception). Returns False if deemed unsuccessful (on exception). Examples -------- >>> write_ini('C:\\Temp\\testini', '[User configurations]\\nsome string') 2017-11-21, 16:24:40:INI file save start 2017-11-21, 16:24:40:INI file save complete Out[51]: True Content of the INI file would be: | '[User configurations] | some string' """ print(date_time_now() + 'INI file save start') try: # check whether the INI file path ends with '.ini' (case insensitive) if locStr_ini_file_path[-4:].lower() == '.ini': # if yes, pass pass else: # if no, append locStr_ini_file_path = locStr_ini_file_path + '.ini' # open the INI for write locIni_file = open(locStr_ini_file_path, 'w') # write the string into the INI locIni_file.write(locStr_ini) # close the INI file locIni_file.close() print(date_time_now() + 'INI file save complete') return True except: print(date_time_now() + 'INI file save failed') return False
1376f50fa9d91c797cbaccc4066c379e0c085aea
24,237
def create_form(data, form_idx=0): """ Creates PDB structure forms. form_idx = 0 is apo; 1 - holo1; and 2 - holo2 Note: Only works for homodimers. """ # Make a deep copy of BioPandas object to make changes data_out = deepcopy(data) # If form_idx == 2 that's holo2 already if form_idx == 1: hetatm_record_len = data_out.df['HETATM'].shape[0] # Keep only one ligand data_out.df['HETATM'] = data_out.df['HETATM'][:int(hetatm_record_len/2)] elif form_idx == 0: # Delete all 'HETATM' records data_out.df['HETATM'] = pd.DataFrame(columns=data_out.df['HETATM'].columns) return data_out
45058e1770519a51677c47a7b78d1b1c2ca2c554
24,238
from typing import Dict import logging def get_verbosity(parsed_arguments: Dict) -> int: """ Gets the verbosity level from parsed arguments. Assumes parameter is being parsed similarly to: ``` parser.add_argument(f"-{verbosity_parser_configuration[VERBOSE_PARAMETER_KEY]}", action="count", default=0, help="increase the level of log verbosity (add multiple increase further)") ``` Parsed arguments can be gathered into an appropriate dict as show below: ``` assert type(argument_parser) is ArgumentParser parsed_arguments = {x.replace("_", "-"): y for x, y in vars(argument_parser.parse_args(arguments)).items()} ``` :param parsed_arguments: parsed arguments in dictionary form :return: the verbosity level implied :raises ValueError: if the logging level is too high """ verbosity_parameter = verbosity_parser_configuration[VERBOSE_PARAMETER_KEY] verbosity = verbosity_parser_configuration[DEFAULT_LOG_VERBOSITY_KEY] - ( int(parsed_arguments.get(verbosity_parameter)) * 10) if verbosity < 10: raise ValueError("Cannot provide any further logging - reduce log verbosity") assert verbosity <= logging.CRITICAL return verbosity
b0bf38c8883335f76000a29dcdefe46eccc5040a
24,239
def update_versions_in_library_versions_kt(group_id, artifact_id, old_version): """Updates the versions in the LibrarVersions.kt file. This will take the old_version and increment it to find the appropriate new version. Args: group_id: group_id of the existing library artifact_id: artifact_id of the existing library old_version: old version of the existing library Returns: True if the version was updated, false otherwise. """ group_id_variable_name = group_id.replace("androidx.","").replace(".","_").upper() artifact_id_variable_name = artifact_id.replace("androidx.","").replace("-","_").upper() new_version = increment_version(old_version) # Special case Compose because it uses the same version variable. if group_id_variable_name.startswith("COMPOSE"): group_id_variable_name = "COMPOSE" # Open file for reading and get all lines with open(LIBRARY_VERSIONS_FP, 'r') as f: library_versions_lines = f.readlines() num_lines = len(library_versions_lines) updated_version = False # First check any artifact ids with unique versions. for i in range(num_lines): cur_line = library_versions_lines[i] # Skip any line that doesn't declare a version if 'Version(' not in cur_line: continue version_variable_name = cur_line.split('val ')[1].split(' =')[0] if artifact_id_variable_name == version_variable_name: if not should_update_version_in_library_versions_kt(cur_line, new_version): break # Found the correct variable to modify if version_variable_name == "COMPOSE": new_version_line = (" val COMPOSE = Version(" "System.getenv(\"COMPOSE_CUSTOM_VERSION\") " "?: \"" + new_version + "\")\n") else: new_version_line = " val " + version_variable_name + \ " = Version(\"" + new_version + "\")\n" library_versions_lines[i] = new_version_line updated_version = True break if not updated_version: # Then check any group ids. for i in range(num_lines): cur_line = library_versions_lines[i] # Skip any line that doesn't declare a version if 'Version(' not in cur_line: continue version_variable_name = cur_line.split('val ')[1].split(' =')[0] if group_id_variable_name == version_variable_name: if not should_update_version_in_library_versions_kt(cur_line, new_version): break # Found the correct variable to modify if version_variable_name == "COMPOSE": new_version_line = (" val COMPOSE = Version(" "System.getenv(\"COMPOSE_CUSTOM_VERSION\") " "?: \"" + new_version + "\")\n") else: new_version_line = " val " + version_variable_name + \ " = Version(\"" + new_version + "\")\n" library_versions_lines[i] = new_version_line updated_version = True break # Open file for writing and update all lines with open(LIBRARY_VERSIONS_FP, 'w') as f: f.writelines(library_versions_lines) return updated_version
0f579f10c6e675330f332b1fe0d790e25448d23f
24,240
def GetIdpCertificateAuthorityDataFlag(): """Anthos auth token idp-certificate-authority-data flag, specifies the PEM-encoded certificate authority certificate for OIDC provider.""" return base.Argument( '--idp-certificate-authority-data', required=False, help='PEM-encoded certificate authority certificate for OIDC provider.')
99fa02a0998a1c5e58baa8b334561d715ca4421a
24,241
def MapBasinKeysToJunctions(DataDirectory,FilenamePrefix): """ Function to write a dict of basin keys vs junctions Args: DataDirectory (str): the data directory fname_prefix (str): the name of the DEM Returns: A dictionary with the basin key as the key and the junction as the value Author: FJC """ # load the channel data ChannelData = ReadChannelData(DataDirectory, FilenamePrefix) #print BasinChannelData # load the hillslopes data HillslopeData = ReadHillslopeData(DataDirectory, FilenamePrefix) basin_keys = ChannelData.basin_key.unique() basin_junctions = HillslopeData.BasinID.unique() basin_dict = {} for i, key in enumerate(basin_keys): print(basin_junctions[i], key) basin_dict[key] = basin_junctions[i] print(basin_dict) return basin_dict
adb206e711373c07ac28e477cf8dbf842af33d91
24,242
def password_renew(_name: str, old_password: str, new_password: str): """パスワード変更""" old_dat = old_password new_dat = new_password new_hs = sha256(new_dat.encode()).hexdigest() # sha256で暗号化 old_hs = sha256(old_dat.encode()).hexdigest() # sha256で暗号化 if User.select().where(User.name != _name): raise HTTPException(status_code=401, detail='すでにユーザーは存在していません') elif User.select().where(User.password != old_hs): raise HTTPException( status_code=401, detail='パスワードが間違っていますもう一度確認してください') else: User.update(password=new_hs).where(User.name == _name).execute() return {'message': '新しいパスワードになりました'}
c8ecc0d905b190535e3770838eeec37159dea95b
24,243
from typing import Callable from typing import List from typing import Tuple from typing import Dict import requests def fetch_abs(compare_res_fn: Callable[[res_arg_dict], List[BadResult]], paper_id: str) -> Tuple[Dict, List[BadResult]]: """Fetch an abs page.""" ng_url = ng_abs_base_url + paper_id legacy_url = legacy_abs_base_url + paper_id res_dict: res_arg_dict = {'ng_url': ng_url, 'legacy_url': legacy_url, 'ng_res': requests.get(ng_url), 'legacy_res': requests.get(legacy_url), 'paper_id': paper_id, 'id': paper_id} compare_config = {'ng_url': ng_url, 'legacy_url': legacy_url, 'paper_id': paper_id, 'id': paper_id} return compare_config, list(compare_res_fn(res_dict))
a7e239b06213684cda34935956bf1ad1ec29ea6e
24,244
def is_happy(number:int) -> bool: """Returns a bool that states wether a number is happy or not""" results = [] result = thing(number) results.append(result) while results.count(result) < 2: # Checking if a number has shown up in the list of previous results again as that is result = thing(result) # the point where you can determine if the number is happy or not results.append(result) return (result == 1)
80a96325c28c346b2b23b5c6fb67c9cc62d0477c
24,245
def self_play(n_iterations=10, ben_steps=1000, training_steps=int(1e4), n_eval_episodes=100, **kwargs): """ Returns an agent that learns from playing against himself from random to optimal play. """ agents = [RLAgent(**kwargs), RandomAgent()] for _ in range(n_iterations): benchmark(agents[0], agents[1], ben_steps, training_steps, n_eval_episodes) # adding the trained agent as the new opponent to exploit agents[1] = opposite_agent(agents[0]) agents[1].eps = agents[0].original_eps return agents[0]
b38d593c53ecc528a3932fe8eba2091fdcd68067
24,246
import json import base64 import time def auth(event, context): """ Return the plain text session key used to encrypt the CAN Data File event dictionary input elements: - CAN Conditioner Serial Number - Encrypted data Prerequisites: The CAN Conditioner must be provisioned with a securely stored key tied to the serial number. """ #Determine the identity of the requester. requester_data = event["requestContext"] if requester_data["authorizer"]["claims"]["email_verified"]: identity_data = event["requestContext"]["identity"] ip_address = identity_data["sourceIp"] email = requester_data["authorizer"]["claims"]["email"].lower() else: return response(400, "Email not verified.") #Check if email is the uploader or has share access if not email in item['uploader'] and not email in item['access_list']: return response(400, "You do not have permission to decrypt.") #load the event body into a dictionary body = json.loads(event['body']) # Test to be sure the necessary elements are present try: assert 'serial_number' in body assert 'encrypted_session_key' in body except AssertionError: return response(400, "Missing required parameters.") # Lookup the data needed from the unique CAN Logger by its serial number dbClient = boto3.resource('dynamodb', region_name=region) table = dbClient.Table("CANConditioners") try: item = table.get_item( Key = {'id': body['serial_number'],} ).get('Item') except: return response(400, "Unable to retrieve table item.") # load the device's public key which was stored as a base64 encoded binary device_public_key_bytes = base64.b64decode(item['device_public_key']).decode('ascii') device_bytes = b'\x04' + device_public_key_bytes device_public_key = ec.EllipticCurvePublicKey.from_encoded_point(ec.SECP256R1(),device_bytes) # Decrypt the data key before using it cipher_key = base64.b64decode(item['encrypted_data_key']) data_key_plaintext = decrypt_data_key(cipher_key) if data_key_plaintext is None: return response(400, "Data Key is Not Available") # Decrypt the private key for the device f = Fernet(data_key_plaintext) decrypted_pem = f.decrypt(base64.b64decode(item['encrypted_server_pem_key'])) #load the serialized key into an object server_key = serialization.load_pem_private_key(decrypted_pem, password=None, backend=default_backend()) #Derive shared secret shared_secret = server_key.exchange(ec.ECDH(),device_public_key) #use the first 16 bytes (128 bits) of the shared secret to decrypt the session key cipher = Cipher(algorithms.AES(shared_secret[:16]), modes.ECB(), backend=default_backend()) decryptor = cipher.decryptor() clear_key = decryptor.update(session_key) + decryptor.finalize() # set attribution data timestamp = get_timestamp(time.time()) access_tuple = str((timestamp, email, ip_address)) print("Access Tuple: {}".format(access_tuple)) download_list = item["download_log"] download_list.append(access_tuple) #update the download log with the user details. Keep the last 100 access tuples table.update_item( Key = {'digest':body['digest']}, UpdateExpression = 'SET download_log= :var', ExpressionAttributeValues = {':var':download_list[-100:]}, ) #return the string base64 encoded AES key for that session. return response(200, base64.b64encode(clear_key).decode('ascii'))
a040fa68b0c1a65c5f0ca25ac4a58326796598ce
24,247
def xpro_aws_settings(aws_settings): """Default xPRO test settings""" aws_settings.XPRO_LEARNING_COURSE_BUCKET_NAME = ( "test-xpro-bucket" ) # impossible bucket name return aws_settings
72a7bd4a6ba40b19a6fda530db2bf67b0e4e5fc2
24,248
def function_check(arg, result): """arg ↝ result : return""" if result == TypeBuiltin(): return TypeBuiltin() if arg == KindBuiltin() and result == KindBuiltin(): return KindBuiltin() if arg == SortBuiltin() and result in (KindBuiltin(), SortBuiltin()): return SortBuiltin() raise TypeError('Function check failed for `{} ↝ {}`'.format( arg.to_dhall(), result.to_dhall(), ))
23840d8c2fba48803d7acc9b32b68ab0903d1d57
24,249
def test_parameter_1_1(): """ Feature: Check the names of parameters and the names of inputs of construct. Description: If the name of the input of construct is same as the parameters, add suffix to the name of the input. Expectation: No exception. """ class ParamNet(Cell): def __init__(self): super(ParamNet, self).__init__() self.param_a = Parameter(Tensor([1], ms.float32), name="name_a") self.param_b = Parameter(Tensor([2], ms.float32), name="name_b") def construct(self, name_a): return self.param_a + self.param_b - name_a net = ParamNet() res = net(Tensor([3], ms.float32)) assert res == 0
f5d5be6f1403884192c303f2a8060b95fd3e9fca
24,250
def frule_edit(request, frule_id): """ FM模块编辑应用包下载规则 """ try: frule = FRule.objects.filter(id=frule_id).first() if not frule: response = '<script>alert("Rule id not exist!");' response += 'location.href=document.referrer;</script>' return HttpResponse(response) name = request.POST['name'].strip() desc = request.POST['description'].strip() source_url = request.POST['source_url'].strip() regex = request.POST['regex'].strip() regex_content = request.POST['regex_content'].strip() vendor_id = request.POST['vendor_id'] if name != frule.name: if check_filter(name): response = '<script>alert("New rule name contain filter chars!");' response += 'location.href=document.referrer;</script>' return HttpResponse(response) try: process_rule_rename(frule.id, name) except Exception, ex: response = '<script>alert("Cant rename rule!");alert("%s");' % str(ex) response += 'location.href=document.referrer;</script>' return HttpResponse(response) frule.name = name frule.desc = desc frule.source_url = source_url frule.regex = regex frule.regex_content = regex_content frule.vendor_id = vendor_id frule.save() response = '<script>alert("Success!");location.href=document.referrer;</script>' return HttpResponse(response) except Exception, ex: response = '<script>alert("Error!");alert("%s");' % str(ex) response += 'location.href=document.referrer;</script>' return HttpResponse(response)
1d5d83aaeff5483905e28f428719a6ce0b7833bc
24,251
from typing import Tuple def load_preprocess_data(days_for_validation: int, lag_variables: list, random_validation: bool = False, seed: int = None, lag: int = 8, reload: bool = True, save_csv: bool = True) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame]: """Loading and data preprocessing for the Stream water temperature case study Parameters ---------- days_for_validation : int Number of days used for validation lag_variables : list[str] List with variable names that should be lagged random_validation : seed : int Random seed. Only relevant if random_validation=True lag : int number of lagged time steps that are computed for all lag_variables. reload : bool Should a previously computed processed data set be loaded? True/False save_csv : bool Should the preprocessed data be saved as a csv? Necessary if reload=True will be used. Returns ------- Tuple of pd.DataFrames: data : Full preprocessed data set x_train : Training features y_train : Training labels x_test : Test features y_test : x : All features y : All labels """ if isfile('data/processed/data.csv') and reload: print('Load previously computed data set from "data/preprocessed/data.csv"') data = pd.read_csv('data/processed/data.csv') x_train = pd.read_csv("data/processed/x_train.csv") y_train = pd.read_csv("data/processed/y_train.csv") x_test = pd.read_csv("data/processed/x_test.csv") y_test = pd.read_csv("data/processed/y_test.csv") x = pd.read_csv("data/processed/x.csv") y = pd.read_csv("data/processed/y.csv") else: append_data = [] for index in ['C', 'V', 'V3']: # Meteorological Data met_data = pd.read_excel('data/raw/Input' + index + '.xlsx', sheet_name="met_data") precip = pd.read_excel('data/raw/Input' + index + '.xlsx', sheet_name="precip") dis_data = pd.read_excel('data/raw/Input' + index + '.xlsx', sheet_name="dis_data", skiprows=1, header=None) discharge_805 = pd.DataFrame({'Discharge (m3/s)': dis_data.iloc[4, 1:].transpose()}) # observed wt wt_observed = pd.read_excel('data/raw/Input' + index + '.xlsx', sheet_name="temp", header=None).transpose() measurement_points = pd.read_excel('data/raw/Input' + index + '.xlsx', sheet_name="temp_t0_data") wt_observed.columns = ["wt_observed_point_" + str(i) for i in measurement_points["Distance (m)"]] # observed wt at boundary x0_data = pd.read_excel('data/raw/Input' + index + '.xlsx', sheet_name="temp_x0_data") x0_data = x0_data.drop(labels='Time (min)', axis=1) x0_data.columns = ['x0 Temperature (deg C)'] # predicted wt wt_predicted = pd.read_csv('data/raw/Output' + index + '.csv', header=None) # rows: m of stream, columns: timesteps in min # get only relevant points and every 15th time steps wt_predicted = wt_predicted.iloc[measurement_points["Distance (m)"]] wt_predicted = wt_predicted.iloc[:, ::15].transpose() wt_predicted.columns = ["wt_predicted_point_" + str(i) for i in measurement_points["Distance (m)"]] # get shading predictions measurement_points = pd.read_excel('data/raw/Input' + index + '.xlsx', sheet_name="temp_t0_data") # fix index columns x0_data.index = wt_observed.index wt_predicted.index = wt_observed.index discharge_805.index = wt_observed.index # concat data data_sub = pd.concat([met_data, precip.iloc[:, 1], discharge_805, wt_observed, wt_predicted, x0_data], axis=1) append_data.append(data_sub) # Concatenate full data set data = pd.concat(append_data) data_time_index = pd.DataFrame({'year': data.Year.tolist(), 'month': data.Month.tolist(), 'hour': data.Hour.tolist(), 'minute': data.Minute.tolist(), 'day': data.Day.tolist()}) data.index = pd.to_datetime(data_time_index) data = data.sort_index() # Define training/validation column validation_timesteps = 4 * 24 * days_for_validation cal_ts = len(data.index) - validation_timesteps if random_validation: cal_val = ["calibration" for i in range(cal_ts)] + ["validation" for i in range(validation_timesteps)] shuffled_index = np.random.RandomState(seed=seed).permutation(len(cal_val)).tolist() cal_val = [cal_val[i] for i in shuffled_index] else: # cal_val = ["calibration" for x in range(cal_ts)] + ["validation" for x in range(validation_timesteps)] cal_val = ["validation" for x in range(validation_timesteps)] + ["calibration" for x in range(cal_ts)] data['calibration_validation'] = pd.Series(cal_val, index=data.index) # Compute residual columns for point in measurement_points["Distance (m)"]: data['residuals_point_' + str(point)] = data['wt_predicted_point_' + str(point)] - \ data['wt_observed_point_' + str(point)] # Save as csv data['sin_hour'] = np.sin(2 * np.pi * data.Hour / 24) data['cos_hour'] = np.cos(2 * np.pi * data.Hour / 24) # remove dupolicated rows if any exist data = data[~data.index.duplicated(keep='first')] # create lagged features data = create_lags(data, lag_variables, lag) # Data for ML models lagged_variable_names = [[x + "_lag" + str(y + 1) for y in range(lag)] for x in lag_variables] model_variables = ['sin_hour', 'cos_hour'] + lag_variables + sum(lagged_variable_names, []) # training data training_data = data[data["calibration_validation"] != "validation"] x_train = training_data[model_variables] y_train = training_data['residuals_point_640'] # Validation data validation_data = data[data["calibration_validation"] == "validation"] x_test = validation_data[model_variables] y_test = validation_data['residuals_point_640'] # full dataset x, y x = data[model_variables] y = data['residuals_point_640'] # Save as csv if save_csv: data.to_csv("data/processed/data.csv", index_label=False) x_train.to_csv("data/processed/x_train.csv", index_label=False) y_train.to_csv("data/processed/y_train.csv", index_label=False) x_test.to_csv("data/processed/x_test.csv", index_label=False) y_test.to_csv("data/processed/y_test.csv", index_label=False) x.to_csv("data/processed/x.csv", index_label=False) y.to_csv("data/processed/y.csv", index_label=False) print('Finished preprocessing. Final data sets are stored in "data/preprocessed/"') if not random_validation: print("Time periods") training_data = data[data["calibration_validation"] != "validation"] validation_data = data[data["calibration_validation"] == "validation"] print(f"Training: {training_data.index[0]} - {training_data.index[-1]}") print(f"Validation: {validation_data.index[0]} - {validation_data.index[-1]}") return data, x_train, y_train, x_test, y_test, x, y
7238841e8f5e32be5ecb15ab5811720b41e8ad63
24,252
def extract_sha256_hash(hash): """Extrach SHA256 hash or return None """ prefix = 'sha256:' if hash and hash.startswith(prefix): return hash.replace(prefix, '') return None
11e9f352f3783657d52772c4b69387151d13f3d2
24,253
def logout(): """User logout""" global bandwidth_object, qos_object bandwidth_object = {} qos_object = {} success_login_form = None return redirect(url_for('base_blueprint.login'))
d3ec08fe6e8e0ca70f2f81b11878750efa101781
24,254
from typing import OrderedDict def draft_intro(): """ Controller for presenting draft versions of document introductions. """ response.files.append(URL('static/js/codemirror/lib', 'codemirror.js')) response.files.append(URL('static/js/codemirror/lib', 'codemirror.css')) response.files.append(URL('static/js/codemirror/theme', 'solarized.css')) response.files.append(URL('static/js/codemirror/mode/xml', 'xml.js')) response.files.append(URL('static/js/summernote', 'summernote.min.js')) response.files.append(URL('static/js/summernote', 'summernote.css')) session.filename = get_truename(request.args[0]) filename = session.filename docrow = db(db.draftdocs.filename == filename).select().first() if not docrow: # draft document does not exist in the database, so can't be edited return {'doc_exists': False, 'editing_permission': False, 'filename': filename} else: # draft document does exist in database and can be edited editor_ids = [docrow['editor'], docrow['editor2'], docrow['editor3'], docrow['editor4'], docrow['assistant_editor'], docrow['assistant_editor2'], docrow['assistant_editor3'], docrow['proofreader'], docrow['proofreader2'], docrow['proofreader3'] ] if auth.has_membership('administrators') \ or (auth.has_membership('editors') and auth.user_id in editor_ids): # current user has permission to edit this page body_fields = OrderedDict([(v, docrow[k]) for k, v in DISPLAY_FIELDS.iteritems() if docrow[k]]) editor_names = OrderedDict([]) for ed in ['editor', 'editor2', 'editor3', 'editor4']: if docrow[ed]: editor_names[docrow[ed]['id']] = '{} {}'.format(docrow[ed]['first_name'], docrow[ed]['last_name']) asst_editor_names = OrderedDict([]) for ed in ['assistant_editor', 'assistant_editor2', 'assistant_editor3']: if docrow[ed]: asst_editor_names[docrow[ed]['id']] = '{} {}'.format(docrow[ed]['first_name'], docrow[ed]['last_name']) proofreader_names = OrderedDict([]) for ed in ['proofreader', 'proofreader2', 'proofreader3']: if docrow[ed]: proofreader_names[docrow[ed]['id']] = '{} {}'.format(docrow[ed]['first_name'], docrow[ed]['last_name']) return {'doc_exists': True, 'editing_permission': True, 'title': docrow['name'], 'body_fields': body_fields, 'citation_format': docrow['citation_format'], 'editors': editor_names, 'assistant_editors': asst_editor_names, 'proofreaders': proofreader_names, 'filename': filename, 'version': docrow['version']} else: # current user does not have permission return {'doc_exists': True, 'editing_permission': False, 'filename': filename, 'title': docrow['name']}
1ae932af2a9b89a35efbe0b1da91e26fe66f6403
24,255
import pathlib def collect_shape_data(gtfs_dir): """Calculate the number of times a shape (line on a map) is travelled. Appends some additional information about the route that the shape belongs to. Args: gtfs_dir: the directory where the GTFS file is extracted Returns: pandas.DataFrame: contains shape data """ gtfs_dir = pathlib.Path(gtfs_dir) service_days = calculate_service_days(gtfs_dir) trips = pd.read_csv(gtfs_dir / 'trips.txt', index_col=2) routes = pd.read_csv(gtfs_dir / 'routes.txt', index_col=0) route_id_diffs = trips \ .groupby('shape_id') \ .aggregate({'route_id': [min, max]}) if any(route_id_diffs[('route_id', 'min')] != route_id_diffs[('route_id', 'max')]): raise ValueError("Shape ids must uniquely identify route_ids") route_info = trips \ .join(service_days, on="service_id", how="left") \ .groupby(["shape_id"]) \ .aggregate({'days': sum, 'route_id': 'first'}) \ .rename(columns={'days': 'times_taken'}) \ .join( routes[['route_short_name', 'route_type', 'route_color']], on="route_id", how="left" ) \ .reset_index() return route_info
0fa16cc889696f01b25b4eb60ded423968b6aa20
24,256
def lick(): """ Returns a string when a user says 'lick' (This is a joke command) :return: A string """ return "*licks ice cream cone*"
a4e92d7371abe078c48196b0f7d7e899b1b0e19e
24,257
def from_dict(obj, node_name='root'): """Converts a simple dictionary into an XML document. Example: .. code-block:: python data = { 'test': { 'nodes': { 'node': [ 'Testing', 'Another node' ] }, } } xml = from_dict(data) # <test><nodes><node>Testing</node><node>Another node</node></nodes></test> Args: node_name (string): the initial node name in case there are multiple top level elements. """ return __dict_to_xml(obj, node_name)
3308fb85baea5c145f4acd22fb49a70458f4cc51
24,258
def parse_ascii(state: str, size: int) -> str: """ Args: state: an ascii picture of a cube size: the size of the cube Returns: a string of the cube state in ULFRBD order """ U = [] L = [] F = [] R = [] B = [] D = [] lines = [] for line in state.splitlines(): line = line.strip().replace(" ", "") if line: lines.append(line) U = "".join(lines[0:size]) for line in lines[size : size * 2]: L.append(line[0:size]) F.append(line[size : size * 2]) R.append(line[size * 2 : size * 3]) B.append(line[size * 3 : size * 4]) L = "".join(L) F = "".join(F) R = "".join(R) B = "".join(B) D = "".join(lines[size * 2 : size * 4]) return "".join([U, L, F, R, B, D])
7ec24a22c3052a76c820dcca54c913c2d5229e5d
24,259
def _get_build_failure_reasons(build): # type: (Build) -> List[str] """Return the names of all the FailureReasons associated with a build. Args: build (Build): The build to return reasons for. Returns: list: A sorted list of the distinct FailureReason.reason values associated with the build. """ failure_reasons = [r for r, in db.session.query( distinct(FailureReason.reason) ).join( JobStep, JobStep.id == FailureReason.step_id, ).filter( FailureReason.build_id == build.id, JobStep.replacement_id.is_(None), ).all()] # The order isn't particularly meaningful; the sorting is primarily # to make the same set of reasons reliably result in the same JSON. return sorted(failure_reasons)
7f446ff96f93443a59293e36f4d071d79218f24d
24,260
import re def parse_line(line: str): """ Parses single record from a log according to log_pattern. If error occurs in parsing request_time, the log line is considered broken and function returns None. If error occurs in parsing URL, while request_time is present, the URL is marked as 'parse_failed' to allow further statistical checking. :param line: UTF-8 encoded string of a log record. :return: dictionary, made up according to regex_log_pattern or None. """ log_contents = {} request_time_pat = ' \d*[.]?\d*$' request_pat = '"(GET|HEAD|POST|PUT|DELETE|CONNECT|OPTIONS|TRACE|PATCH)\s(?P<url>.+?)\sHTTP/.+"\s' log_contents['request_time'] = re.search(request_time_pat, line)[0].strip() request = re.findall(request_pat, line) log_contents['request'] = request[0][1] if request else 'bad_request' if log_contents['request_time']: return log_contents else: return None
1d747d22b28019f030c982455bfc89ea03e8631f
24,261
def for_all_arglocs(*args): """ for_all_arglocs(vv, vloc, size, off=0) -> int Compress larger argloc types and initiate the aloc visitor. @param vv (C++: aloc_visitor_t &) @param vloc (C++: argloc_t &) @param size (C++: int) @param off (C++: int) """ return _ida_typeinf.for_all_arglocs(*args)
9cc568f16d64f8a1bb206a08a73cdb4c3b6adcc4
24,262
def fetch_project_check_perm(id, user, perm): """Fetches a project by id and check the permission. Fetches a project by id and check whether the user has certain permission. Args: project_id: The id of the project. user: A User instance. perm: Permission to check. Example: "nlpviewer_backend.read_project" Returns: A json response of the or forbidden or not found. """ project = get_object_or_404(Project, pk=id) check_perm_project(project, user, perm) return project
dcf7271ebe171f77748eebdc61b2c74039da0690
24,263
def toRoman(n): """ Convert an integer to Roman numeral.""" if not (0 < n < 5000): raise OutOfRangeError("number out of range (must be 1..4999)") if int(n) != n: raise NotIntegerError("decimals can not be converted") result = "" for numeral, integer in romanNumeralMap: while n >= integer: result += numeral n -= integer return str(result)
275cd966e6dda8adfbde16ffc9ba0f6a4928ad3e
24,264
import matplotlib.pyplot as plt def imsave(addr,im): """ input a string of save address, an im array\n save the image to the address """ return plt.imsave(addr,im)
0931ec70c1258827a9d65f4c5b7d2ba9aa2e6a99
24,265
import multiprocessing def simulate_one(ticket: Ticket, strategy: Strategy, trials: int) -> float: """ :param ticket: :return: """ diagnostics = False workers = multiprocessing.cpu_count() things = [(strategy, ticket) for x in range(0, trials)] chunksize = int(len(things) / workers) with multiprocessing.Pool(processes=workers) as pool: results = pool.map(playone, things, chunksize) return sum(results) / trials
cba86eaabc1b25681cf8b4e9d4c3134c186d5d43
24,266
def download_prostate(): """Download prostate dataset.""" return _download_and_read('prostate.img')
a65174dd85491d259c94b9df31c739b62a9e50be
24,267
import json import random import hashlib def decBIPKey(encrypted_privK, passphrase, currency): """ Decrypt an encrypted Private key Show the corresponding public address """ #using the currencies.json file, get the currency data with open('currencies.json', 'r') as dataFile: currencies = json.load(dataFile) for cur in currencies: if cur['currency'] == currency: break #randomly choose a prefix if multiples exist prefixes = cur['prefix'].split('|') prefix = prefixes[random.randint(0, (len(prefixes)-1))] #decrypt the BIP key PrivK, Addresshash = bip38.decrypt(str(encrypted_privK), str(passphrase), 8) #calculate the address from the key PrivK = enc.decode(PrivK, 256) publicAddress = address.publicKey2Address(address.privateKey2PublicKey(PrivK), int(cur['version']), prefix, int(cur['length'])) #check our generated address against the address hash from BIP if hashlib.sha256(hashlib.sha256(publicAddress).digest()).digest()[0:4] != Addresshash: return False, False else: return address.privateKey2Wif(PrivK, cur['version'], prefix, cur['length']), publicAddress
743a87753463ca269ff6a120024813a5e61445ac
24,268
def plot_data(coordinate, box=[], plt_inst=None, **kwargs): """ Plot the coordinate with the "std box" around the curve Args: coordinate (float[]): 1D array of the coordinate to plot box (float[]): 1D array of the box around the curve plt_inst (pyplot): pyplot instance Returns: (plt_inst) """ if plt_inst is None: plt_inst = plt if len(box) == len(coordinate): plt_inst.fill_between(np.arange(len(box)), box[:, 0:1].squeeze(), box[:, 1:].squeeze(), zorder=1, alpha=0.2) plt_inst.plot(coordinate[:, 0:1].squeeze(), coordinate[:, 1:].squeeze(), **kwargs) return plt_inst
4c549425f076217cb8b0302a49137bc8e85b661a
24,269
import os import yaml import json def read_config_file(config_file): """Read an YAML config file :param config_file: [description] :type config_file: [type] """ if os.path.isfile(config_file): extension = os.path.splitext(config_file)[1] try: with open(config_file) as data_file: if extension in ['.yml', '.yaml']: config_json = yaml.safe_load(data_file.read()) elif extension in ['.json']: config_json = json.safe_load(data_file.read()) except IOError: print("Unable to read the file", config_file) exit(1) else: print("Cannot find the file", config_file) exit(1) return config_json
e8c7d09d0d303be100d67adfe36f39766eb325af
24,270
def param_curve(t, R, r, d): """Coordinates of a hypotrochoid for parameters t, R, r and d""" x = (R - r)*cos(t) + d*cos((R - r)/r*t) y = (R - r)*sin(t) - d*sin((R - r)/r*t) z = 3*sin(t) return x, y, z
dd60c3aada02e589d50566910bbc63b6b67c40d8
24,271
from core.models import Snapshot, ArchiveResult from typing import Optional from typing import Iterable from pathlib import Path import os from datetime import datetime def archive_link(link: Link, overwrite: bool=False, methods: Optional[Iterable[str]]=None, out_dir: Optional[Path]=None) -> Link: """download the DOM, PDF, and a screenshot into a folder named after the link's timestamp""" # TODO: Remove when the input is changed to be a snapshot. Suboptimal approach. try: snapshot = Snapshot.objects.get(url=link.url) # TODO: This will be unnecessary once everything is a snapshot except Snapshot.DoesNotExist: snapshot = write_link_to_sql_index(link) ARCHIVE_METHODS = get_default_archive_methods() if methods: ARCHIVE_METHODS = [ method for method in ARCHIVE_METHODS if method[0] in methods ] out_dir = out_dir or Path(link.link_dir) try: is_new = not Path(out_dir).exists() if is_new: os.makedirs(out_dir) link = load_link_details(link, out_dir=out_dir) write_link_details(link, out_dir=out_dir, skip_sql_index=False) log_link_archiving_started(link, out_dir, is_new) link = link.overwrite(updated=datetime.now(timezone.utc)) stats = {'skipped': 0, 'succeeded': 0, 'failed': 0} start_ts = datetime.now(timezone.utc) for method_name, should_run, method_function in ARCHIVE_METHODS: try: if method_name not in link.history: link.history[method_name] = [] if should_run(link, out_dir, overwrite): log_archive_method_started(method_name) result = method_function(link=link, out_dir=out_dir) link.history[method_name].append(result) stats[result.status] += 1 log_archive_method_finished(result) write_search_index(link=link, texts=result.index_texts) ArchiveResult.objects.create(snapshot=snapshot, extractor=method_name, cmd=result.cmd, cmd_version=result.cmd_version, output=result.output, pwd=result.pwd, start_ts=result.start_ts, end_ts=result.end_ts, status=result.status) # bump the updated time on the main Snapshot here, this is critical # to be able to cache summaries of the ArchiveResults for a given # snapshot without having to load all the results from the DB each time. # (we use {Snapshot.id}-{Snapshot.updated} as the cache key and assume # ArchiveResults are unchanged as long as the updated timestamp is unchanged) snapshot.save() else: # print('{black} X {}{reset}'.format(method_name, **ANSI)) stats['skipped'] += 1 except Exception as e: raise Exception('Exception in archive_methods.save_{}(Link(url={}))'.format( method_name, link.url, )) from e # print(' ', stats) try: latest_title = link.history['title'][-1].output.strip() if latest_title and len(latest_title) >= len(link.title or ''): link = link.overwrite(title=latest_title) except Exception: pass write_link_details(link, out_dir=out_dir, skip_sql_index=False) log_link_archiving_finished(link, link.link_dir, is_new, stats, start_ts) except KeyboardInterrupt: try: write_link_details(link, out_dir=link.link_dir) except: pass raise except Exception as err: print(' ! Failed to archive link: {}: {}'.format(err.__class__.__name__, err)) raise return link
c45216d61b4a4fae8dec6a7829959211cb829813
24,272
import inspect import os def get_tests_dir(append_path=None): """ Args: append_path: optional path to append to the tests dir path Return: The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is joined after the `tests` dir the former is provided. """ # this function caller's __file__ caller__file__ = inspect.stack()[1][1] tests_dir = os.path.abspath(os.path.dirname(caller__file__)) if append_path: return os.path.join(tests_dir, append_path) else: return tests_dir
b7af8440b1835e862550b88ea781093b6cea54a6
24,273
def create_collection(metadata_url: str = METADATA_URL, thumbnail_url: str = THUMBNAIL_URL) -> pystac.Collection: """Create a STAC Collection using AAFC Land Use metadata Args: metadata_url (str, optional): Metadata json provided by AAFC Returns: pystac.Collection: pystac collection object """ metadata = get_metadata(metadata_url) provider = Provider( name=metadata.provider, roles=[ ProviderRole.HOST, ProviderRole.LICENSOR, ProviderRole.PROCESSOR, ProviderRole.PRODUCER, ], url=PROVIDER_URL, ) extent = pystac.Extent( pystac.SpatialExtent([metadata.bbox_polygon]), pystac.TemporalExtent( [[metadata.datetime_start, metadata.datetime_end]]), ) collection = pystac.Collection( id=LANDUSE_ID, title=metadata.title, description=metadata.description, providers=[provider], license=metadata.license_id, extent=extent, catalog_type=pystac.CatalogType.RELATIVE_PUBLISHED, keywords=KEYWORDS, ) collection.add_link( Link(rel="license", target=metadata.license_url, title=metadata.license_title)) # Add the metadata url and thumbnail url as assets collection.add_asset( "metadata", pystac.Asset( href=metadata_url, media_type=pystac.MediaType.JSON, roles=["metadata"], title="AAFC Land Use collection metadata", ), ) collection.add_asset( "thumbnail", pystac.Asset( href=thumbnail_url, media_type=pystac.MediaType.PNG, roles=["thumbnail"], title="AAFC Land Use collection thumbnail", ), ) collection_label = LabelExtension.summaries(collection, add_if_missing=True) collection_label.label_type = [LabelType.RASTER] collection_label.label_tasks = [LabelTask.CLASSIFICATION] collection_label.label_properties = None collection_label.label_classes = [ # TODO: The STAC Label extension JSON Schema is incorrect. # https://github.com/stac-extensions/label/pull/8 # https://github.com/stac-utils/pystac/issues/611 # When it is fixed, this should be None, not the empty string. LabelClasses.create(list(CLASSIFICATION_VALUES.values()), "") ] collection_proj = ProjectionExtension.summaries(collection, add_if_missing=True) collection_proj.epsg = [metadata.epsg] collection_item_asset = ItemAssetsExtension.ext(collection, add_if_missing=True) collection_item_asset.item_assets = { "metadata": AssetDefinition( dict( type=pystac.MediaType.JSON, roles=["metadata"], title="AAFC Land Use metadata", )), "landuse": AssetDefinition({ "type": pystac.MediaType.COG, "roles": [ "data", "labels", "labels-raster", ], "title": "AAFC Land Use COG", "raster:bands": [ RasterBand.create( nodata=0, sampling=Sampling.AREA, data_type=DataType.UINT8, spatial_resolution=30, ).to_dict() ], "file:values": [{ "values": [value], "summary": summary } for value, summary in CLASSIFICATION_VALUES.items()], "label:type": collection_label.label_type[0], "label:tasks": collection_label.label_tasks, "label:properties": None, "label:classes": [collection_label.label_classes[0].to_dict()], "proj:epsg": metadata.epsg, }), } return collection
97fa19b32b6b9556ad1a117a248721027cac1db0
24,274
def calculate_average_crossing_per_month_and_measure(num_of_months, list_with_agg_values): """Calculates the average crossings per month and per measure. Args: num_of_months: the number of months based on the frequency of each measure, saved as a dict or a list. list_with_agg_values: the list with Border, Date, Measure, and aggregated values. Returns: list_with_avg (list): the list with the average crossing values per month and per measure """ list_with_avg = [] # Going through the list of aggregated valves backwards # the list was sorted with the most recent date up first, so hence we are adding from the # the bottom up and not top down direction for i in range(len(list_with_agg_values) - 1, 0, -1): each_row = list_with_agg_values[i] # Now check whether the number of the months per measure is the same or not: # If it's not, we going to calculate the average for each measure's frequency if isinstance(num_of_months, dict): for key, value in num_of_months.items(): if each_row[2] == key: if i % value == 0: accumulation, counter = 0, 0 each_row = each_row + [0] else: # Add up each of the previous months' values each_row_before = list_with_agg_values[i + 1] accumulation += each_row_before[3] # Similarly add for each month to the counter counter += 1 # For each row, get the average value of crossing based for each measure and border each_row = each_row + [my_round(accumulation / counter)] # And keep track in the list list_with_avg.append(each_row) else: # Otherwise, if the frequency is the same for all of the measures if i % (num_of_months - 1) == 0: accumulation, counter = 0, 0 each_row = each_row + [0] else: # Add up each of the previous months' values each_row_before = list_with_agg_values[i + 1] accumulation += each_row_before[3] # Similarly add for each month to the counter counter += 1 # For each row, get the average value of crossing based for each measure and border each_row = each_row + [my_round(accumulation / counter)] # And keep track in the list list_with_avg.append(each_row) return list_with_avg
750d1b944a4f8723a4f39fc2f92b42f1011ea9c7
24,275
import re import os def scan_album_folder(folder, file_list): """ Renames all files in a folder. If all the files in the folder have the same Year and Album metadata, the folder itself will be renamed to the format "[YEAR] ALBUM" """ folder_data = [] folder_counts = {'found': 0, 'renamed': 0, 'unchanged': 0, 'missing': 0, 'folder_rename': ''} if folder_data\ and len(folder_data[0]) == 3\ and folder_data[0][0]\ and folder_data[0][1]\ and all((x[0] == folder_data[0][0] and x[1] == folder_data[0][1]) for x in folder_data): pass for file in file_list: folder_d = rename_file(folder, file) if folder_d is not None: folder_counts[folder_d[2]] += 1 folder_counts['found'] += 1 folder_data.append(folder_d) if folder_data\ and len(folder_data[0]) == 3\ and folder_data[0][0]\ and folder_data[0][1]\ and all((x[0] == folder_data[0][0] and x[1] == folder_data[0][1]) for x in folder_data): folder_name = YEAR_ENCLOSER[0] + \ folder_data[0][0] + YEAR_ENCLOSER[1] + ' ' + folder_data[0][1] parent_path = re.sub(r'[^\\/]+[\\/]?$', '', folder) if folder != '.' and folder != parent_path + folder_name: counter = 2 base_dir = parent_path + folder_name base_dir = re.search(r'(.*?)\.+$', base_dir) if base_dir is None: base_dir = parent_path + folder_name else: base_dir = base_dir.group(1) base_dir = base_dir.strip() try_dir = base_dir while os.path.isdir(try_dir) and counter < 100: if try_dir == folder: break try_dir = base_dir + ' (' + str(counter) + ')' counter += 1 if try_dir != folder: folder_counts['folder_rename'] = (folder, try_dir) return folder_counts
a679a1de082a994c75b8afea925851e537426a12
24,276
from typing import List from typing import Optional from typing import Dict def predict_with_inferer( images: Tensor, network, keys: List[str], inferer: Optional[SlidingWindowInferer] = None ) -> Dict[str, List[Tensor]]: """ Predict network dict output with an inferer. Compared with directly output network(images), it enables a sliding window inferer that can be used to handle large inputs. Args: images: input of the network, Tensor sized (B, C, H, W) or (B, C, H, W, D) network: a network that takes an image Tensor sized (B, C, H, W) or (B, C, H, W, D) as input and outputs a dictionary Dict[str, List[Tensor]] or Dict[str, Tensor]. keys: the keys in the output dict, should be network output keys or a subset of them. inferer: a SlidingWindowInferer to handle large inputs. Return: The predicted head_output from network, a Dict[str, List[Tensor]] Example: .. code-block:: python # define a naive network import torch import monai class NaiveNet(torch.nn.Module): def __init__(self, ): super().__init__() def forward(self, images: torch.Tensor): return {"cls": torch.randn(images.shape), "box_reg": [torch.randn(images.shape)]} # create a predictor network = NaiveNet() inferer = monai.inferers.SlidingWindowInferer( roi_size = (128, 128, 128), overlap = 0.25, cache_roi_weight_map = True, ) network_output_keys=["cls", "box_reg"] images = torch.randn((2, 3, 512, 512, 512)) # a large input head_outputs = predict_with_inferer(images, network, network_output_keys, inferer) """ if inferer is None: raise ValueError("Please set inferer as a monai.inferers.inferer.SlidingWindowInferer(*)") head_outputs_sequence = inferer(images, _network_sequence_output, network, keys=keys) num_output_levels: int = len(head_outputs_sequence) // len(keys) head_outputs = {} for i, k in enumerate(keys): head_outputs[k] = list(head_outputs_sequence[num_output_levels * i : num_output_levels * (i + 1)]) return head_outputs
2184c5f681bcf13787b59a036d0f4572a391a852
24,277
import re def split_data(line): """ method splits varibles on line """ data = list() arr = np.array([string for string in line.split(", ")], dtype=str) for _, item in enumerate(arr): word_parse = re.compile(r''' ((?<=:.)-*[0-9]+\.*[0-9]*)''', re.X) parts = word_parse.findall(item) if parts != []: data.append(float(parts[0])) if len(data) > 1: return data else: return []
8fcab989a6220ddccf653552b5e9eaf98bd83277
24,278
def show_outcome_group_global(request_ctx, id, **request_kwargs): """ :param request_ctx: The request context :type request_ctx: :class:RequestContext :param id: (required) ID :type id: string :return: Show an outcome group :rtype: requests.Response (with OutcomeGroup data) """ path = '/v1/global/outcome_groups/{id}' url = request_ctx.base_api_url + path.format(id=id) response = client.get(request_ctx, url, **request_kwargs) return response
0e8d8c9411e3bc6d7cdbdede38cca65878dccb65
24,279
import hashlib def md5sum(file: str) -> str: """ Create a strings with the md5 of a given file :param file: filename of the file whose md5 is computed for :return: md5 string """ md5_hash = hashlib.md5() with open(file, "rb") as file: content = file.read() md5_hash.update(content) digest = md5_hash.hexdigest() return digest
0ec81688aa298e73a064034760cdd1687b2561a4
24,280
def read_data(filetype, filename, prn): """Calls the appropriate position reader function based on the filetype.""" func_name = filetype + '_data' possibles = globals().copy() possibles.update(locals()) func = possibles.get(func_name) if func is None: raise NotImplementedError(func + ' is not an implemented function.') return func(filename, prn)
91949a7cc1573a44ebb504b3a5542ff289b2100a
24,281
from typing import List from typing import Tuple def processContours(contours: List[float], contourpoints: List[List[float]], frame: pims.frame.Frame, debug=False) -> Tuple[List[List[float]], pims.frame.Frame]: """Get bounding boxes for each contour. Parameters ---------- contours : List[float] List of contours to find bounding boxes for. contourpoints : List[List[float]] List of bounding boxes. Does this need passed in? frame : pims.frame.Frame Frame from which the contours are from debug : bool, optional If true then draw bounding boxes Returns ------- Tuple[List[List[float]], pims.frame.Frame] List of bounding boxes, and frame """ for cnt in contours: x, y, w, h = cv2.boundingRect(cnt) cx = x + (w / 2) cy = y + (h / 2) contourpoints.append([cx, cy]) if debug: cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) return contourpoints, frame
95bf52e0377b6df8b1d80cc0bc0a1bb8979c359b
24,282
def simulate_games(num_games, switch, num_doors=3): """ Simulate a multiple game of the Monty Hall problem. Parameters: - num_games: Integer, the number of games you want to simulate. - switch: Boolean, whether or not your strategy is to switch doors after the reveal. - num_doors: Integer, the number of doors in the game. Default is 3 for the classic game with 2 goats and 1 car. Returns: 1 if you won, 0 if you lost """ if not isinstance(num_games, int) or num_games < 1: raise ValueError('`num_games` must be an integer greater than or equal to 1.') wins = 0 for _ in range(num_games): wins += play(switch, num_doors) return f'winning percentage: {wins / num_games:.2%}'
0296375eb5f57f1b5e9580086f08150774a30956
24,283
from typing import Iterable import functools import operator def prod(iterable:Iterable) -> Iterable: """math.prod support for Python versions < v3.8""" return functools.reduce(operator.mul, iterable, 1)
be811e39b7dd70669fbfc84db5492b4c7383d68f
24,284
import subprocess def compress_video(video_path): """ Compress video. :param video_path: Path to the video. :return: None. """ return subprocess.call(["gzip", video_path]) == 0
9159076bae502da7c863dc6ef16372a6e2da4161
24,285
def trim_resource(resource): """ trim_resource """ return resource.strip(" \t\n\r/")
5a9d9bbf6da72cf967eee1e9198d109f096e3e41
24,286
import requests def wikipedia_request_page_from_geocoding(flatitude, flongitude): """ Get list of wikipedia page identifiers related to the specified geocode """ places_list = [] loc = "{}|{}".format(flatitude, flongitude) print(loc) parameters = { "action": "query", "list": "geosearch", "gscoord": loc, "gsradius": __RADIUS_DEFAULT__, "gslimit": __GS_LIMIT_DEFAULT__, "format": "json", } # API Request response = requests.get(url=__WIKIPEDiA_URL__, params=parameters) if response.status_code == 200: reply_dict = response.json() places_list = reply_dict['query']['geosearch'] if places_list: for idx, place in enumerate(places_list): print(idx, "W#{}".format(place['pageid']), place['title'], place['dist'], "m") else: print('address not found') lg.warning('address not found') else: print('mediawiki reply error') lg.warning('mediawiki reply error') del response return places_list
b61ea747c40f132d312e03c6d3b649e35f53430c
24,287
def globalBinarise(logger, img, thresh, maxval): """ This function takes in a numpy array image and returns a corresponding mask that is a global binarisation on it based on a given threshold and maxval. Any elements in the array that is greater than or equals to the given threshold will be assigned maxval, else zero. Parameters ---------- img : {numpy.ndarray} The image to perform binarisation on. thresh : {int or float} The global threshold for binarisation. maxval : {np.uint8} The value assigned to an element that is greater than or equals to `thresh`. Returns ------- binarised_img : {numpy.ndarray, dtype=np.uint8} A binarised image of {0, 1}. """ try: binarised_img = np.zeros(img.shape, np.uint8) binarised_img[img >= thresh] = maxval except Exception as e: # logger.error(f'Unable to globalBinarise!\n{e}') print((f"Unable to globalBinarise!\n{e}")) return binarised_img
d16bcc8a78a62b5ec945c6e0ff245a10402d22f1
24,288
import os import logging def connect_to_db(schema='sys', database='', return_df=True): """Query database and fetch table data. Args: schema (str): MySQL table schema. Default to 'sys'. database (str): MySQL table name. Deafult to ''. return_df (bool): Condition to return the dataframe. """ load_dotenv() db_user = os.getenv("db_user") db_password = os.getenv("db_password") db_host = 'traffic-accidents.c1npf904zyic.sa-east-1.rds.amazonaws.com' db_port = '3306' params = f'{db_user}:{db_password}@{db_host}:{db_port}/{schema}' try: engine = create_engine("mysql+mysqlconnector://%s" % params, max_identifier_length=128, pool_size=1) engine.connect() Session = sessionmaker(bind=engine) session = Session() except Exception: logging.error("%s - Could not connect to database", database) if return_df == True: db_tb = pd.read_sql(f"SELECT * FROM {schema}.{database}", session.bind) return db_tb, engine, session else: return engine, session
cb0f6ab06a9fcd05bfb2ca8b19859ca72a6072c2
24,289
def times_by_stencil(results): """Collects times of multiple results by stencils. Args: results: List of `Result` objects. Returns: A tuple of lists (stencils, times). """ stencils = results[0].stencils if any(stencils != r.stencils for r in results): raise ValueError('All results must include the same stencils') times = by_stencils(r.times_by_stencil() for r in results) return stencils, times
a304924f6f82e6611c9469a21f92592f67d7c84d
24,290
def get_bulk_and_slab(bulk, miller=[1,1,1], layers=4, vacuum=16): """Create a slab and conventional bulk cell from a bulk cell input Parameters ---------- bulk : pymatgen structure pymatgen structure of the bulk material miller : list list of miller indices layers : int number of atomic layers vacuum : float, optional thickness of vacuum Returns ------- oriented_primitive_bulk_o : pymatgen structure pymatgen structure of the bulk primitive_slab : pymatgen structure pymatgen structure of the slab """ #vaccum is now also in unit planes!!!! we adjust vacuum anyways in the end # to do. set absolute thickness and then calculate how many layers these are, making it # an even number in total, so no atom is exactlty at 0.5 so we have always one central # layer that is unrelaxed when doubling the cell!!! # Achtung: reorient lattice has Problems: orthogonal cell is the wrong!!! # so do it by hand via newstructure lattice sl = SlabGenerator(bulk, miller, layers, vacuum, lll_reduce=True, center_slab=True, in_unit_planes=True, primitive=True, max_normal_search=None, reorient_lattice=False) slab = sl.get_slab() primitive_slab = slab.get_orthogonal_c_slab() inplaneshift = primitive_slab.frac_coords[np.argmax(primitive_slab.frac_coords[:,2])] inplaneshift[2] = 0 primitive_slab = Structure( Lattice.from_lengths_and_angles( primitive_slab.lattice.lengths, primitive_slab.lattice.angles), primitive_slab.species, primitive_slab.frac_coords-inplaneshift, validate_proximity=True, to_unit_cell=True, coords_are_cartesian=False,) slab_bulkref = slab.oriented_unit_cell #The bulkref is not primitive and not oriented like slab!!! zgen = ZSLGenerator_mod() atoms = AseAtomsAdaptor.get_atoms(slab_bulkref) res = list(zgen(slab_bulkref.lattice.matrix[:2,:], slab.lattice.matrix[:2,:], lowest=True)) #print(res) #Attention: ZSLgen uses reduced_vectors (Zur) which randomly interchanges a and b vectors. #This is totally shit to get to the same supercell. As we cannot in this way get the real transformation tests = [np.array(i) for i in list(combinations(list(product([1, 0, -1] , repeat = 2)), 2)) if np.isclose(np.abs(np.linalg.det(np.array(i))), 1.)] for t in tests: tt = np.dot(t, np.dot(res[0]['substrate_transformation'], slab.lattice.matrix[:2,:])) if np.isclose(slab_bulkref.lattice.matrix[:2,:]-tt, 0).all(): break inv = np.linalg.inv(np.dot(t, res[0]['substrate_transformation'])) break backtrafomatrix = np.linalg.inv( np.array([t[0].tolist() + [0], t[1].tolist() + [0], [0,0,1]])).astype(int) sst = SupercellTransformation(backtrafomatrix) newbulkcell = sst.apply_transformation(slab_bulkref) t = res[0]['substrate_transformation'] bstrafo = np.array([t[0].tolist() + [0], t[1].tolist() + [0], [0,0,1]]) prim_bulk_cell = np.dot( np.linalg.inv(bstrafo), newbulkcell.lattice.matrix) # Here we find the in-plane primitive lattice vectors for the bulk cell # it seems the lattice is still in directions xyz as the bulk. # this is nice because then we can get the exact rotation matrix w.r.t. the bulk conventional cell # one could implement the strain contributions here # Now we could take over the lattice directly from the slab structure and put e.g. also all slab atóms in the bulk cell #they are still not aligned in xyz, which we want to do now!!! tests = Structure(Lattice(prim_bulk_cell), [list(newbulkcell.species)[0]], [newbulkcell.cart_coords[0]], validate_proximity=True, to_unit_cell=True, coords_are_cartesian=True) species = newbulkcell.species coords = newbulkcell.cart_coords s = tests.copy() # we add the other atoms for i, sp in enumerate(species): try: s.insert(i, sp, coords[i],\ coords_are_cartesian=True,\ validate_proximity=True) except: pass oriented_primitive_bulk = s.get_sorted_structure() #put into cell oriented_primitive_bulk = Structure(oriented_primitive_bulk.lattice, oriented_primitive_bulk.species, oriented_primitive_bulk.cart_coords, validate_proximity=True,to_unit_cell=True, coords_are_cartesian=True) def test(matrix1, matrix2): vecs = (np.isclose(np.linalg.norm(matrix1[0]), np.linalg.norm(matrix2[0])) and np.isclose(np.linalg.norm(matrix1[2]), np.linalg.norm(matrix2[2]))) r = np.cross(matrix1[0], matrix1[1]) right = (np.dot(r, matrix1[2]) > 0) return vecs, right combinationslist = [[[1,0],[0,1]], [[-1,0],[0,1]], [[-1,0],[0,-1]], [[1,0],[0,-1]],\ [[0,1],[1,0]], [[0,-1],[1,0]], [[0,-1],[-1,0]], [[0,1],[-1,0]], ] for c in combinationslist: for c3 in [1,-1]: m = np.zeros((3,3)) m[:2,:2] = np.array(c) m[2,2] = c3 newm = np.dot(m, oriented_primitive_bulk.lattice.matrix) vecs, right = test(newm, primitive_slab.lattice.matrix) if vecs and right: break sst = SupercellTransformation(m.astype(int)) oriented_primitive_bulk = sst.apply_transformation(oriented_primitive_bulk) #this is the primitive bulk, with surface spanned by 0 and 1 component but not oriented! #slab is already orthogonalized an orthognonalized slab primitive_slab_L = primitive_slab.lattice.matrix primitive_slab_LTM2 = np.cross(primitive_slab_L[0], primitive_slab_L[1]) primitive_slab_LTM2 /= np.linalg.norm(primitive_slab_LTM2) primitive_slab_LT = [primitive_slab_L[0], primitive_slab_L[1], primitive_slab_LTM2] # this is prim slab lattice matrix with 1 length in zdir # z-component does not matter # this is a fake lattice to find rotation matrix in 3D #oriented prim bulk is oriented as slab abnd not as the the orthogonalized prim slab lattice oriented_primitive_bulk_L = oriented_primitive_bulk.lattice.matrix oriented_primitive_bulk_LTM2 = np.cross(oriented_primitive_bulk_L[0], oriented_primitive_bulk_L[1]) oriented_primitive_bulk_LTM2 /= np.linalg.norm(oriented_primitive_bulk_LTM2) oriented_primitive_bulk_LT = [oriented_primitive_bulk_L[0], oriented_primitive_bulk_L[1], oriented_primitive_bulk_LTM2] # this is a fake lattice to find rotation matrix in 3D #it should be tested if this is really a rot (LH and RH lattice is enforced by cross) #Note there could be still lattice vector 1 be lattice vector 2 rot = np.dot(np.linalg.inv(oriented_primitive_bulk_LT), primitive_slab_LT) print("THIS VALUE SHOULD BE 1 ALWAYS", np.linalg.det(rot)) oriented_primitive_bulk_lattice = np.dot( oriented_primitive_bulk_L, rot ) oriented_primitive_bulk_o = Structure(Lattice(oriented_primitive_bulk_lattice), oriented_primitive_bulk.species, oriented_primitive_bulk.frac_coords, validate_proximity=True, to_unit_cell=True, coords_are_cartesian=False) return oriented_primitive_bulk_o, primitive_slab
4a914dfba1ee4efea747464036b868a07311cb9d
24,291
def gogogo_figure(ipympl, figsize, ax=None): """ gogogo the greatest function name of all """ if ax is None: if ipympl: with ioff: fig = figure(figsize=figsize) ax = fig.gca() else: fig = figure(figsize=figsize) ax = fig.gca() return fig, ax else: return ax.get_figure(), ax
750b75b669f233b833cd575cbf450de44b0ad910
24,292
from re import L def unzip6(xs): """ unzip6 :: [(a, b, c, d, e, f)] -> ([a], [b], [c], [d], [e], [f]) The unzip6 function takes a list of six-tuples and returns six lists, analogous to unzip. """ a = L[(i[0] for i in xs)] b = L[(i[1] for i in xs)] c = L[(i[2] for i in xs)] d = L[(i[3] for i in xs)] e = L[(i[4] for i in xs)] f = L[(i[5] for i in xs)] return a, b, c, d, e, f
04ac4aae355b82f1709479296239e4d197224975
24,293
import re def grep(lines=None,expr=None,index=False): """ Similar to the standard unit "grep" but run on a list of strings. Returns a list of the matching lines unless index=True is set, then it returns the indices. Parameters ---------- lines : list The list of string lines to check. expr : str Scalar string expression to search for. index : bool, optional If this is ``True`` then the indices of matching lines will be returned instead of the actual lines. index is ``False`` by default. Returns ------- out : list The list of matching lines or indices. Example ------- Search for a string and return the matching lines: .. code-block:: python mlines = grep(lines,"hello") Search for a string and return the indices of the matching lines: .. code-block:: python index = grep(lines,"hello",index=True) """ if lines is None: raise ValueError("lines must be input") if expr is None: raise ValueError("expr must be input") out = [] cnt = 0 for l in np.array(lines,ndmin=1): m = re.search(expr,l) if m != None: if index is False: out.append(l) else: out.append(cnt) cnt = cnt+1 return out
aefbf15ba94e8ac2ceced3ed3958abb7e4a70163
24,294
from bs4 import BeautifulSoup from typing import Dict def get_table_map_from_text(sp: BeautifulSoup, keep_table_contents=True) -> Dict: """ Generate table dict only :param sp: :param keep_table_contents: :return: """ table_map = dict() for flt in sp.find_all('float'): try: if flt.name and flt.get('name') == 'table': if flt.get('id'): # normalize table id ref_id = flt.get('id').replace('uid', 'TABREF') # form tabmap entry table_map[ref_id] = { "num": flt.get('id-text', None), "text": None, # placeholder "content": extract_table(flt) if keep_table_contents else None, "ref_id": ref_id } for row in flt.find_all('row'): row.decompose() except AttributeError: print('Attribute error with table float: ', flt.name) continue for tab in sp.find_all('table'): try: # skip inline tables if tab.get('rend') == 'inline': continue # process them if tab.name and tab.get('id'): # normalize table id ref_id = tab.get('id').replace('uid', 'TABREF') # form tabmap entry table_map[ref_id] = { "num": tab.get('id-text', None), "text": None, # placeholder "content": extract_table(tab) if keep_table_contents else None, "ref_id": ref_id } for row in tab.find_all('row'): row.decompose() except AttributeError: print('Attribute error with table: ', tab.name) continue return table_map
686cad1a219e53a4d5548bf55e5696da94bd7170
24,295
import os def tag_copier(path, cliargs): """This is the tag copier worker function. It gets a path from the Queue and searches index for the same path and copies any existing tags (from index2) Updates index's doc's tag and tag_custom fields. """ doclist = [] # doc search (matching path) in index for existing tags from index2 # filename f = os.path.basename(path[0]) # parent path p = os.path.abspath(os.path.join(path[0], os.pardir)) data = { "size": 1, "_source": ['tag', 'tag_custom'], "query": { "query_string": { "query": "filename: \"" + f + "\" AND path_parent: \"" + p + "\"" } } } # check if file or directory if path[3] == 'directory': # search ES res = es.search(index=cliargs['index'], doc_type='directory', body=data, request_timeout=config['es_timeout']) else: res = es.search(index=cliargs['index'], doc_type='file', body=data, request_timeout=config['es_timeout']) # mark task done if no matching path in index and continue if len(res['hits']['hits']) == 0: return True # existing tag in index2 docid = res['hits']['hits'][0]['_id'] # update tag and tag_custom fields in index d = { '_op_type': 'update', '_index': cliargs['index'], '_type': path[3], '_id': docid, 'doc': {'tag': path[1], 'tag_custom': path[2]} } if path[3] is 'directory': doclist.append(d) else: doclist.append(d) index_bulk_add(es, doclist, config, cliargs)
832a2acbb260835d278940efadd8b1f778baf821
24,296
def grainfromVertices(R=None,fname='shape.txt',mixed=False,eqv_rad=10.,rot=0.,radians=True,min_res=4): """ This function generates a mesh0 from a text file containing a list of its vertices in normalised coordinates over a square grid of dimensions 1 x 1. Centre = (0,0) coordinates must be of the form: j i x x x x x x . . . . . . and the last coordinate MUST be identical to the first. Additionally function will take an array R instead, of the same form. Args: mixed: logical; partially filled cells on or off rot: float; rotation of the grain (radians) areascale: float; Fraction between 0 and 1, indicates how to scale the grain min_res: int; Minimum resolution allowed for a grain Returns: mesh_: square array with filled cells, with value 1 """ if radians is not True: rot = rot*np.pi/180. assert eqv_rad > 0, "ERROR: Equivalent radius must be greater than 0!" # If no coords provided use filepath if R is None: J_ = np.genfromtxt(fname,comments='#',usecols=0,delimiter=',') I_ = np.genfromtxt(fname,comments='#',usecols=1,delimiter=',') # else use provided coords elif type(R) == list: R = np.array(R) if type(R) == np.ndarray: J_ = R[:,0] I_ = R[:,1] # if coords not yet normalised; normalise them onto the range -1. to 1. if np.amax(abs(I_)>1.) or np.amax(abs(J_))>1.: MAXI = np.amax(I_) MINI = np.amin(I_) MAXJ = np.amax(J_) MINJ = np.amin(J_) diffI = MAXI - MINI diffJ = MAXJ - MINJ # scale coords onto whichever coordinates have the largest difference if diffI>diffJ: I_ = 2.*(I_-MINI)/(MAXI-MINI) - 1. J_ = 2.*(J_-MINI)/(MAXI-MINI) - 1. else: I_ = 2.*(I_-MINJ)/(MAXJ-MINJ) - 1. J_ = 2.*(J_-MINJ)/(MAXJ-MINJ) - 1. # last point MUST be identical to first; append to end if necessary if J_[0] != J_[-1]: J_ = np.append(J_,J_[0]) I_ = np.append(I_,I_[0]) # equivalent radius is known and polygon area is known # scale shape as appropriate radius = np.sqrt(polygon_area(I_,J_)/np.pi) lengthscale = eqv_rad/radius J_ *= lengthscale I_ *= lengthscale # rotate points according by angle rot theta = rot ct = np.cos(theta) st = np.sin(theta) J = J_*ct - I_*st I = J_*st + I_*ct # find max radii from centre and double it for max width radii = np.sqrt(I**2+J**2) maxwidth = int(2*np.amax(radii)+2) maxwidth = max(maxwidth,min_res) if maxwidth%2!=0: maxwidth+=1 # Add double max rad + 1 for mini mesh dims mesh_ = np.zeros((maxwidth,maxwidth)) # define ref coord as 0,0 and centre to mesh_ centre qx = 0. qy = 0. y0 = float(maxwidth/2.) x0 = y0 I += x0 J += y0 path = mpath.Path(np.column_stack((I,J))) for i in range(maxwidth): for j in range(maxwidth): in_shape = path.contains_point([i+.5,j+.5]) if in_shape and mixed == False: mesh_[i,j] = 1. elif in_shape and mixed == True: for ii in np.arange(i,i+1,.1): for jj in np.arange(j,j+1,.1): in_shape2 = path.contains_point([ii+.05,jj+.05]) if in_shape2: mesh_[i,j] += .01 return mesh_
12333a4be631dc8fe8646677d8830646b8563624
24,297
def get_block(blockidx, blocksz, obj): """ Given obj, a list, return the intersection of obj[blockidx*blocksz:(blockidx+1)*blocksz] and obj Ex: get_block(2, 100, range(250) returns [200, 201, ..., 249] """ if blockidx*blocksz > len(obj): return [] elif (blockidx+1)*blocksz > len(obj): return obj[blockidx*blocksz:] else: return obj[blockidx*blocksz:(blockidx+1)*blocksz]
8666cc30be23619a49f899beec17d3ba1f0fb357
24,298
import warnings def RDS(net,waves,coupons,p,size,seeds,posseed,poswave): """Conducts respondent-driven sampling Input: net: network, networkx graph waves: maximum number of waves, integer (use 0 with poswave=True for contract tracing) coupons: number of coupons per respondent, integer p: probability of participation, float size: target sample size seeds: number of seeds posseed: whether the seed should be HIV-positive, boolean, requires node attribute 'hiv_status' with values of 0 and 1 (positive) for net poswave: whether recruitment continues past wave limit for positive agents, boolean, requires node attribute 'hiv_status' with values of 0 and 1 (positive) for net Output: sampled: list of sampled nodes """ #Check if HIV status is needed if posseed or poswave: #Check for missing HIV status node attribute if nx.get_node_attributes(net,"hiv_status")=={}: #Warning warnings.warn('Warning Message: no node attribute "hiv_status", posseed and poswave set to False') #Set posseed to False posseed=False #Set poswave to False poswave=False #Count number of nodes n=np.shape(net)[0] #Initialize sample sample={} #Initialize list of already sampled agents sampled=[] #Check for HIV positive seed if posseed: #Choose seeds from HIV positive nodes seed=rand.choices([x for x,y in net.nodes(data=True) if y['hiv_status']==1],k=seeds) #Random seed else: #Choose seeds from all nodes seed=rand.choices(list(range(n)),k=seeds) #Store seeds as 0th wave sample[0]=seed #Add seed to list of sampled agents sampled=sampled+seed #Initilaize wave counter wave=0 #Initilaize count of nodes sampled nodes=1 #Check for waves still to be completed, unsampled nodes, nodes sampled in previous wave, and under target sample size while wave<waves and nodes<n and sample[wave]!=[] and nodes<size: #Increase wave counter wave=wave+1 #Initialize list of nodes sampled in current wave sample[wave]=[] #loop through nodes sampled in previous wave for i in sample[wave-1]: #Identify neighbors of node i nbrs=list(net[i]) #Remove already sampled nodes nbrs=list(set(nbrs)-set(sampled)) #Initialize count of used coupons used=0 #Check for unsampled nodes and remaining coupons while used<coupons and nbrs!=[]: #Sample one node from list of neighbors node=rand.choice(nbrs) #Probabilioty check on node participation if np.random.uniform(0,1)<p: #Add sampled node to list of nodes sampled during current wave sample[wave]=sample[wave]+[node] #Add sampled node to list of sampled nodes sampled=sampled+[node] #Increase counter for sampled nodes nodes=nodes+1 #Increase count of used coupons used=used+1 #Remove node from list of neighbors nbrs.remove(node) else: #Remove node from list of neighbors nbrs.remove(node) #Check for continuing past final wave for HIV-positive agents if poswave: #Create network from last wave last=nx.subgraph(net,sample[wave]) #Generate list of HIV-positive nodes in last wave positive=[x for x,y in last.nodes(data=True) if y['hiv_status']==1] #Check for HIV-positive nodes in last wave, unsampled nodes, and nodes sampled in previous wave while positive!=[] and nodes<n and sample[wave]!=[]: wave=wave+1 #Initialize list of nodes sampled in current wave sample[wave]=[] #loop through nodes sampled in previous wave for i in positive: #Identify neighbors of node i nbrs=list(net[i]) #Remove already sampled nodes nbrs=list(set(nbrs)-set(sampled)) #Initialize count of used coupons used=0 #Check for unsampled nodes and remaining coupons while used<coupons and nbrs!=[]: #Sample one node from list of neighbors node=rand.choice(nbrs) #Probabilioty check on node participation if np.random.uniform(0,1)<p: #Add sampled node to list of nodes sampled during current wave sample[wave]=sample[wave]+[node] #Add sampled node to list of sampled nodes sampled=sampled+[node] #Increase counter for sampled nodes nodes=nodes+1 #Increase count of used coupons used=used+1 #Remove node from list of neighbors nbrs.remove(node) else: #Remove node from list of neighbors nbrs.remove(node) #Create network from last wave last=nx.subgraph(net,sample[wave]) #Generate list of HIV-positive nodes in last wave positive=[x for x,y in last.nodes(data=True) if y['hiv_status']==1] return sampled
5480a85e9f160f988cff384306a90913a6eac905
24,299