query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Download and save original photos and videos for all Photo objects (or just those that don't already have them). fetch_all Boolean. Fetch ALL photos/videos, even if we've already got them?
def _fetch_files(self, fetch_all): photos = Photo.objects.filter(user=self.account.user) if not fetch_all: photos = photos.filter(original_file="") error_messages = [] for photo in photos: try: self._fetch_and_save_file(photo=photo, media_type=...
[ "def downloading_all_photos(self):\n self.create_folder()\n pic_counter = 1\n for url_link in self.pic_url_list:\n print(pic_counter)\n pic_prefix_str = self.g_search_key + \"/\" + self.g_search_key + str(pic_counter)\n self.download_single_image(url_link.encode...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Downloads a video or photo file and saves it to the Photo object.
def _fetch_and_save_file(self, photo, media_type): if media_type == "video": url = photo.remote_video_original_url # Accepted video formats: # https://help.yahoo.com/kb/flickr/sln15628.html # BUT, they all seem to be sent as video/mp4. acceptable_cont...
[ "def _download_and_save(cls, url, filepath):\n r = requests.get(url)\n r.raise_for_status()\n with filepath.open(mode='wb') as f:\n f.write(r.content)", "def __download_file(self):\n\n self.__logger.info('start download as %s', self.__args.output)\n\n video_file = ope...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the apartment by number. Returns None if not found.
def get_ap_by_number(bl, nr): # for ap in bl: # if get_nr(ap) == nr: # return ap bl = [ap for ap in bl if get_nr(ap) == nr] return bl[0] if len(bl) > 0 else None
[ "def find_phone_number(name):\n name = format_name(name)\n if name in contacts and name is not None: # if then name is in the dictionary and is formatted correctly\n return contacts[name] # return the matching phone number\n else:\n return None", "def get_ad_by_number(number: str):\n t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A parallel map function that reports on its progress. Applies `func` to every item of `iterable` and return a list of the results. If `processes` is greater than one, a process pool is used to run the functions in parallel. `should_print_progress` is a boolean value that indicates whether a string 'N of M' should be pr...
def pmap(func, iterable, processes, should_print_progress, filter_=None, *args, **kwargs): global _current global _total _current = multiprocessing.Value('i', 0) _total = multiprocessing.Value('i', len(iterable)) func_and_args = [(func, arg, should_print_progress, filter_) for arg in iterable] ...
[ "def parallel_map(function, sequence, numcores=None, progressbar=False):\n if not callable(function):\n raise TypeError(\"input function '%s' is not callable\" % repr(function))\n\n if not numpy.iterable(sequence):\n raise TypeError(\"input '%s' is not iterable\" % repr(sequence))\n\n size = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Maximum number of batches to iterate over in an epoch.
def max_num_batches(self): return self._max_num_batches
[ "def max_iterations(self) -> int:\n return self._max_epochs", "def batchSize(self):\n x = int(ceil(self.p))\n return self.batchSizeList[x]", "def get_batch_size(self):\n return max(min(self.settings.get('batch_size', 100), 10000), 1)", "def num_batches(self):\n return math.c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates number of batches to iterate over.
def _update_num_batches(self): # maximum possible number of batches is equal to number of whole times # batch_size divides in to the number of data points which can be # found using integer division possible_num_batches = self.inputs.shape[0] // self.batch_size if self.max_num_ba...
[ "def update_batches(self):\n with self._commit_lock:\n self._update_batches_force()", "def batch_size(self, new_batch_size):\n\n self._batch_size = int(new_batch_size)", "def _update_batch_size(self):\n candidate = None\n for op in self.operations:\n op_batch_si...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Starts a new epoch (pass through data), possibly shuffling first.
def new_epoch(self): self._curr_batch = 0 if self.shuffle_order: self.shuffle()
[ "def prepare_next_epoch(self, model, data, sess, epoch):\n raise NotImplementedError()", "def train_one_epoch(self, *args, **kwargs):\r\n raise NotImplementedError", "def pre_epoch(self):\n pass", "def start_epoch(self):\n self.epochs.append({\"error\":0,\"patterns\":[]})", "def ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Opens a subprocess and executes the command. Print the returncode,output and error message.
def execute_cmd(cmd): p = Popen(cmd , shell=True, stdout=PIPE, stderr=PIPE) out, err = p.communicate() print "Return code: ", p.returncode print out.rstrip(), err.rstrip()
[ "def call_command(*args):\n c = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n output, error = c.communicate()\n if c.returncode != 0:\n if error:\n print(error)\n print(\"Error running `%s'\" % ' '.join(args))\n return output", "def run_cmd(cmd):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
gets token scope from user
def __get_token_scope(self): print(self.df_scopes['scope']) while True: try: user_input = int(input('What is your token\'s scope? >> ')) except ValueError: print('Please enter an int. Try again.') continue if user_input ...
[ "def require_scope(f):\n @wraps(f)\n def wrapped(*args, **kwargs):\n global _user\n if _user.get('scope') is not None:\n result = f(*args, **kwargs)\n _user = None\n return result\n else:\n if _logger:\n _logger.warning('API call ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
gets info about athlete
def get_athlete(self): REQUEST_URL = self.API_BASE + 'athlete' r = requests.get(REQUEST_URL, headers=self.hd) return dict(r.json())
[ "def get_athlete_details(an_athlete):\n athlete_results = [] \n url = base_url + '/v3/athletes/{0}'.format(an_athlete)\n r = requests.get(url, headers=extra_headers)\n results = r.json()\n athlete_results = {'athlete_id': an_athlete,\n 'athlete_sex': results.get('sex'),\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Funcion que recibe el path de un archivo de word para analizarlo, buscar por un patron y retorna un DataFrame de pandas Con la informacion agrupada segun el patron
def leer_documento(nombre_archivo): #Analizar el archivo de word y almacenarlo en un string my_text = docx2txt.process(nombre_archivo) pattern=""" (Number\:\\t)(?P<ClaimNumber>.*)\\n{2} .* (Reference\:\\t)(?P<CrossReference>.*)\\n{2} .* (Name\:\\t)(?P<Name>.*)\\n{2} .* (Date\:\\...
[ "def load_docs(self, path):\n df = pd.read_csv(path)\n df['numero_PL'] = df['numero_fecha_PL'].apply(lambda x: x.split('-')[0][-4:]) # keeps only the last 4 digits of the PL number\n df['texto'] = df['texto'].apply(lambda x: self.cleanup_text(x))\n # print(df.head(10))\n return ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Funcion que recibe DataFrame y separa la informacion de ClaimNumber en dos columnas (Code1_mc y Code2_mi). Tambien separa Address en Direccion, Condado, Estado y Codigo postal
def separar_columnas(df): codigos= df['ClaimNumber'].str.split(expand=True) #Separar ClaimNumber df['Code1_mc']=codigos[0] # Primer elemento es Code1_mc df['Code2_mi']=codigos[1] # Segundo elemento es Code2_mi. Si no hay, se guarda como vacio df.drop(columns='ClaimNumber',inplace= True) # Remover colum...
[ "def from_dataframe(df, msc_bank=MSC.load(5)):\n input_, output_ = df.columns\n df_proc = pd.DataFrame(columns = [input_, output_])\n count = 0\n # code length will be the same among all the keys, so use the first\n code_length = len(msc_bank.sorted_keys[0])\n for index, ro...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Funcion que reemplaza vacios con None, remueve registros con informacion en DeathDate y elimina columnas DeathDate y CrossReference
def depurar_datos(df): for column in df.columns: df[column]=df[column].replace([''],[None]) df.drop(df[~df['DeathDate'].isnull()].index, inplace=True) df.drop(columns=['CrossReference','DeathDate'],inplace=True) return df
[ "def filter_null_dates(self):\n df = self.df[~pd.isnull(self.df['discharge_date_es'])] \n df = df[~pd.isnull(df['hospital_date_es'])] \n \n logging.info('Atalaia: Patients with NULL discharge dates and NULL hospital dates has been filtered out!')\n\n return df", "def clean_data...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Funcion que guarda el DataFrame recibido en un archivo de excel y en un archivo csv
def guardar(df_limpio): df_limpio.to_excel('nydb.xlsx',sheet_name='test1') df_limpio.to_csv('nydb.csv')
[ "def _saveCSV( self ):", "def save_csv(self, df: pd.DataFrame, filename: str) -> None:\n fullname = self.absolute(filename)\n df.to_csv(fullname, index=False)", "def save_data_to_csv(data_frame, csv_path):\n\tdata_frame.to_csv(csv_path, index=False)", "def saveDataframe(df):\n current_date = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Funcion que controla el flujo de limpieza de datos
def limpieza_datos(df): df=remover_empty(df) df=remover_strings(df) df=separar_columnas(df) df=depurar_datos(df) df=depurar_codigos(df) return df
[ "def set_get_datos(objeto_quipos,dic):\r\n if dic['NOMBRE']!='':\r\n objeto_quipos.set_nombre(dic['NOMBRE'])\r\n objeto_quipos.set_modelo(dic['MODELO'])\r\n objeto_quipos.set_serie(dic['SERIE'])\r\n objeto_quipos.set_ip(dic['IP'])\r\n objeto_quipos.set_usuario(dic['USUARIO'])\r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Funcion main que controla el flujo de lectura de documentos, limpieza de datos y guardado
def main(mypath): # Creamos DataFrame vacio para concatenar la informacion resultante de leer_documento df=pd.DataFrame(columns=['ClaimNumber','CrossReference','Name','BirthDate','DeathDate','Sex','Address']) #Para cada archivo que se encuentre en mypath, leer documentos y concatenar registros en df f...
[ "def leer_documento(nombre_archivo):\n #Analizar el archivo de word y almacenarlo en un string\n my_text = docx2txt.process(nombre_archivo)\n\n pattern=\"\"\"\n (Number\\:\\\\t)(?P<ClaimNumber>.*)\\\\n{2}\n .*\n (Reference\\:\\\\t)(?P<CrossReference>.*)\\\\n{2}\n .*\n (Name\\:\\\\t)(?P<Name>...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return True if 'line' only edits an image tag or if 'line' is not a changed line in a diff
def change_type(lines: List[str]) -> Change: added_images = [] removed_images = [] diff_lines = [] for line in lines[2:]: if not line.startswith("-") and not line.startswith("+"): # not a diff line, ignore it continue diff_lines.append(line) if len(diff_lin...
[ "def isLine(pixel1, changes):\n for pixel2 in changes.pixels:\n if abs(pixel1.x-pixel2.x) == cS and pixel1.y == pixel2.y:\n return True\n elif abs(pixel1.y-pixel2.y) == cS and pixel1.x == pixel2.x:\n return True\n return False", "def _start_of_patch(self, line, filename):...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
CTOR callback Function called when a packet is received. name Instance Name. input_signature A gr.io_signature instance. output_signature A gr.io_signature instance.
def __init__(self, callback, name, input_signature=gr.io_signature(1, 1, gr.sizeof_gr_complex), output_signature=gr.io_signature(0, 0, 0)): UHDRxPktArch.__init__(self, name=name, callback=callback, ...
[ "def __init__(__self__, *,\n filters: Optional[pulumi.Input[Sequence[pulumi.Input['PacketCaptureFilterArgs']]]] = None,\n maximum_bytes_per_packet: Optional[pulumi.Input[int]] = None,\n maximum_bytes_per_session: Optional[pulumi.Input[int]] = None,\n m...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
creates a cavity from a coupler and a mirror it will connect the W0 port of the mirror to both E1 and W1 ports of the coupler creating a resonant cavity
def cavity( component: Component, coupler: Component = coupler, length: float = 0.1, gap: float = 0.2, wg_width: float = 0.5, ) -> Component: mirror = pp.call_if_func(component) coupler = pp.call_if_func(coupler, length=length, gap=gap, wg_width=wg_width) c = pp.Component() cr = c <...
[ "def __init__(self, position, serveur_local=None):\n\n self.client = Client(PORT, IP_SERVEUR)\n\n self.serveur_local = serveur_local\n\n if serveur_local == None:\n c1 = Carte(AS, PIQUE) #on s'en fout, elle apparait face cachee\n while True:\n c2 = self.clie...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns list of disallowed keywords in query if any exist. Keywords are disallowed if they can lead to modifications of the
def _find_disallowed_keywords(query: str) -> list: disallowed = [ "alter", "call", "commit", "create", "delete", "drop", "explain", "grant", "insert", "lock", "merge", "rename", "revoke", "savepoint", ...
[ "def find_missing_keywords(keywords, text):\n found = set()\n for key in keywords:\n if key in text:\n found.add(key)\n return list(set(keywords) - found)", "def list_words_not_in_dictionary(input_words, spelling_dict):\n not_in_dict = []\n for word in input_words:\n if wor...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load csv into sqlite database.
def import_csv_into_sqlite( csv_table_path: str, table_name: str, sqlite_db_path: str ) -> None: subprocess.run( [ 'sqlite3', '-separator', ',', sqlite_db_path, f".import {csv_table_path} {table_name}", ] )
[ "def populate_db(dbpath, csvfilename, tablename):\n conn = sqlite3.connect(dbpath)\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n\n with open(csvfilename, 'rb') as csvfile:\n urls = csv.reader(csvfile, delimiter=',', quotechar='|')\n for row in urls:\n #row[0], row[1], r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a map of properties from properties file
def getProperties(file): if isinstance(file, io.IOBase): properties = {} with file as f: for line in f: if "=" in line: name, value = line.split("=", 1) properties[name.strip()] = value.strip() f.close() return...
[ "def getPropertyMap(self, properties = None):\n if properties is None:\n properties = self.getProperties()\n\n rv = dict()\n for prefix in [\"omero\",\"Ice\"]:\n for k,v in properties.getPropertiesForPrefix(prefix).items():\n rv[k] = v\n return rv", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read sql queries from sql file
def getSqls(file): if isinstance(file, io.IOBase): sqls = file.read().split("\n") file.close() return sqls
[ "def run_sql_file(db, sql_file_name, sql_dir=config.sql_dir):\r\n with open(sql_dir + sql_file_name) as sql_file:\r\n query = sql_file.read()\r\n return get_db_query_results(db, query)", "def get_sql_from_file(self, path):\n with open(self.make_sql_file_path(path), 'rb') as f:\n sql...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes Pearson correlation and its significance (using a t distribution) on a pandas.DataFrame. Ignores null values when computing significance. Based on
def p_corr(df1, df2): corr = df1.corr(df2) N = np.sum(df1.notnull()) t = corr*np.sqrt((N-2)/(1-corr**2)) p = 1-scipy.stats.t.cdf(abs(t),N-2) # one-tailed return corr, t, p # Here is a simple function for doing a number of pair-wise t-tests (later I can figure out how to generate ta...
[ "def pearsonr(df):\n r, p = nwise_apply(df, metrics.pearsonr, n=2, comm=True)\n return _dict_to_namedtuple(r, 'Pearsons_r'), _dict_to_namedtuple(p, 'p_value')", "def pearson_corr(matrix):\n (n, m) = matrix.shape\n\n DO = matrix - (np.sum(matrix, 0) / np.double(n))\n # note that mean row will be app...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Apply one step of gradient descent on the loss function `loss`, with stepsize `step_size`, and returns the updated parameters of the neural network.
def update_params(self, loss, step_size=0.5, first_order=False): grads = torch.autograd.grad(loss, self.parameters(), create_graph=not first_order) updated_params = OrderedDict() for (name, param), grad in zip(self.named_parameters(), grads): updat...
[ "def gradient_step(v: Vector, \n gradient: Vector, \n step_size: float) -> Vector:\n assert len(v) == len(gradient)\n return add(v, scalar_mulitply(step_size, gradient))", "def gradient_ascent(x, iterations, step, max_loss=None):\n for i in range(iterations):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
start a sqlalchemy session for the uhcsdb metadata store
def uhcsdb_session(dbpath): engine = create_engine('sqlite:///' + dbpath) Base.metadata.bind = engine dbSession = sessionmaker(bind=engine) db = dbSession() try: yield db finally: db.close()
[ "def connect_to_database():\n engine = create_engine(app.config['DATABASE_URL'])\n Base.metadata.bind = engine\n db_session = sessionmaker(bind=engine)\n session = db_session()\n return session", "def init_dbsession(dbsession, tm=None): # pragma: no cover\n zope.sqlalchemy.register(dbsession, t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Como usuario debo ser capaz de crear un programa.
def test_create_program(self): # Se verifica que los usuarios visitantes no puedan ingresar al formulario self.assertLoginRequired('educacion_programcreate') self.login('admin', 'fakepass') # Se verifica que se pueda acceder al formulario. response = self.client_get('e...
[ "def create_computer():\r\n new_computer = input(\"| Enter the name of the Computer |\")\r\n adcomputer.ADComputer.create(new_computer, enable=True)\r\n return \"| Computer created |\"", "def test_view_can_create_program(self):\n # first create the org to own the program\n org_res = self.cl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Source metamodel ot ValueError if this metamodel is not registered yet (which should not happen).
def sourceMetamodel(self): #type: (MetamodelDependency) -> Metamodel try: from modelscripts.megamodels import Megamodel return Megamodel.theMetamodel(id=self.sourceId) except: raise ValueError( 'No target "%s" metamodel registered from %s' % ( ...
[ "def targetMetamodel(self):\n # type: (MetamodelDependency) -> Metamodel\n try:\n from modelscripts.megamodels import Megamodel\n\n return Megamodel.theMetamodel(id=self.targetId)\n except:\n raise ValueError(\n 'From \"%s\" metamodel not register...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Target metamodel ot ValueError if this metamodel is not registered yet (which should not happen).
def targetMetamodel(self): # type: (MetamodelDependency) -> Metamodel try: from modelscripts.megamodels import Megamodel return Megamodel.theMetamodel(id=self.targetId) except: raise ValueError( 'From "%s" metamodel not registered to %s' % ( ...
[ "def sourceMetamodel(self):\n #type: (MetamodelDependency) -> Metamodel\n try:\n from modelscripts.megamodels import Megamodel\n return Megamodel.theMetamodel(id=self.sourceId)\n except:\n raise ValueError(\n 'No target \"%s\" metamodel registered...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Model dependencies based on this metamodel dependency. This could raise a ValueError.
def modelDependencies(self): # type: (MetamodelDependency) -> List(ModelDependency) # could raise a ValueError from modelscripts.megamodels import Megamodel return Megamodel.modelDependencies( metamodelDependency=self)
[ "def _get_dependencies_for_model(self, app_label, model_name):\n dependencies = []\n model_state = self.to_state.models[app_label, model_name]\n for field in model_state.fields.values():\n if field.is_relation:\n dependencies.extend(\n self._get_depe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create sequences with syntax error.
def syntax_error(smi: str) -> list: tokens = smi_tokenizer(smi) syn_sym = ["=", "#", "-", "(", ")"] # could also only use numbers present in the sequence random_value = random.random() if random_value < 0.1: # sequence starts with syntax token tokens.insert(0, random.choice(syn_sym))...
[ "def test_latex_invalid_consecutive_bin_end_after_other(self):\n with self.assertRaises(lamarksyntaxerror.LaMarkSyntaxError):\n ast = self._make_ast([\n lexertokens.BIN_START(\"{%latex%}\",0),\n lexertokens.BIN_END(\"{%end%}\",0),\n lexertokens.OTHER(\"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
adds a fragment to an atom with a full valence, with a single, double or triple type bond the correct smile is changed to the correct smile + fragment, seperated by dot or changes bond order to be 1 or 2 orders higher
def valence_error(smiles, fragment): # get editable mol file of both core and fragment core = Chem.MolFromSmiles(smiles) corfrag = smiles random_value = random.random() if random_value < 0.5: # add fragment frag = Chem.MolFromSmiles(fragment) combo = Chem.CombineMols(core, fr...
[ "def add_atom(self, atom):\n return self.add_vertex(atom)", "def putFragment(self, bundle, key, fragment):\n pass", "def Struct_substitute_atom(bas,atom_old,atom_new,scheme):\n nat = len(bas)\n nsb = len(scheme)\n\n ia_sub = 0\n bas_new = []\n for ia in range(nat):\n at = bas[ia][0]\n x...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Introduces errors into valid SMILES
def introduce_error(smile, fragment, vocab, invalid_type: str = "all", num_errors: int = 1): corfrag = smile i = 0 while Chem.MolFromSmiles(smile) is not None and i < 20: i += 1 try: if invalid_type...
[ "def syntax_error(smi: str) -> list:\n tokens = smi_tokenizer(smi)\n syn_sym = [\"=\", \"#\", \"-\", \"(\", \")\"]\n # could also only use numbers present in the sequence\n random_value = random.random()\n if random_value < 0.1:\n # sequence starts with syntax token\n tokens.insert(0, r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert a list of images from BGR format to RGB format.
def bgr_to_rgb(ims): out = [] for im in ims: out.append(im[:,:,::-1]) return out
[ "def to_grayscale(img_list):\n gray_list = []\n\n img_idxs = [i for i in range(len(img_list))]\n\n for idx, im in enumerate(img_list):\n if idx in img_idxs:\n gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n gray_list.append(gray)\n \n return gray_list", "def rgb_to_gray_i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the single join
def test_singleJoin(spark_test_session): first = spark_test_session.createDataFrame([{'first_id': 1, 'value': None}, {'first_id': 2, 'value': 2}]) second = spark_test_session.createDataFrame([{'second_id': 1, 'value': 1}, {'second_id': 2, 'value': 22}]) expected_df = first.join(second,first.fi...
[ "def test_join_pathed_tables_1(self, join_mock, build_on_clause_mock):\n join_mock.return_value = self.center\n build_on_clause_mock.return_value = \"onclause\"\n\n querying.join_pathed_tables(self.graph, self.table_pathing_1) \n\n build_on_clause_mock.assert_any_call(self.graph, \"cent...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize each unit test by creating three patches a, b, and c with their contents equal to their filename in upper case.
def setUp(self): for patch_name in ['a', 'b', 'c']: open(os.path.join(self.STASH_PATH, patch_name), 'w').write(patch_name.upper())
[ "def test_file_creation(Nfiles):\n command = ('python specFit/demo/demo_preprocess_tiff.py '\n '--processed_dir {} --raw_dir {} --Nfiles {} --Nx {} --Ny {} --spectra_type {}')\\\n .format(processed_dir(Nfiles), raw_dir(Nfiles), Nfiles, Nx, Ny, spectra_type)\n os.system(command)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that it is possible to retrieve all stashed patches.
def test_get_patches(self): assert_equal(Stash.get_patches(), ['a', 'b', 'c'])
[ "def test_removing_patch(self):\n Stash.remove_patch('b')\n assert_equal(Stash.get_patches(), ['a', 'c'])\n\n Stash.remove_patch('c')\n assert_equal(Stash.get_patches(), ['a'])\n\n Stash.remove_patch('a')\n assert_equal(Stash.get_patches(), [])", "def test_get_patch(self)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that it is possible to remove stashed patches.
def test_removing_patch(self): Stash.remove_patch('b') assert_equal(Stash.get_patches(), ['a', 'c']) Stash.remove_patch('c') assert_equal(Stash.get_patches(), ['a']) Stash.remove_patch('a') assert_equal(Stash.get_patches(), [])
[ "def test_removing_non_existent_patch_raises_exception(self):\n assert_raises(StashException, Stash.remove_patch, 'd')", "def test_get_patches(self):\n assert_equal(Stash.get_patches(), ['a', 'b', 'c'])", "def test_create_empty_patch():\n _p = Patch('some_patch_name')", "def test_delete_cubby...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that removing a non existent patch raises an exception.
def test_removing_non_existent_patch_raises_exception(self): assert_raises(StashException, Stash.remove_patch, 'd')
[ "def test_removing_patch(self):\n Stash.remove_patch('b')\n assert_equal(Stash.get_patches(), ['a', 'c'])\n\n Stash.remove_patch('c')\n assert_equal(Stash.get_patches(), ['a'])\n\n Stash.remove_patch('a')\n assert_equal(Stash.get_patches(), [])", "def test_getting_non_exi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that it is possible to retrieve the contents of stashed patches.
def test_get_patch(self): assert_equal(Stash.get_patch('a'), 'A') assert_equal(Stash.get_patch('b'), 'B') assert_equal(Stash.get_patch('c'), 'C')
[ "def test_get_patches(self):\n assert_equal(Stash.get_patches(), ['a', 'b', 'c'])", "def test_removing_patch(self):\n Stash.remove_patch('b')\n assert_equal(Stash.get_patches(), ['a', 'c'])\n\n Stash.remove_patch('c')\n assert_equal(Stash.get_patches(), ['a'])\n\n Stash.r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that showing a non existent patch raises an exception.
def test_getting_non_existent_patch_raises_exception(self): assert_raises(StashException, Stash.get_patch, 'd')
[ "def test_patchNonExisting(self):\n self.monkeyPatcher.addPatch(self.testObject, 'nowhere',\n 'blow up please')\n self.assertRaises(AttributeError, self.monkeyPatcher.patch)", "def test_removing_non_existent_patch_raises_exception(self):\n assert_raises(Stas...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that public transport mode is handled.
async def test_public_transport(hass: HomeAssistant) -> None: entry = MockConfigEntry( domain=DOMAIN, unique_id="0123456789", data={ CONF_ORIGIN_LATITUDE: float(ORIGIN_LATITUDE), CONF_ORIGIN_LONGITUDE: float(ORIGIN_LONGITUDE), CONF_DESTINATION_LATITUDE: fl...
[ "def test_transport_instantiation(sync_transport_no_abc):\n assert sync_transport_no_abc", "def test_PluggableTransport_init(self):\n pt = bridges.PluggableTransport()\n self.assertIsInstance(pt, bridges.PluggableTransport)", "def test_private_to_public(self):\r\n pass", "def test_publ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that no_attribution is handled.
async def test_no_attribution_response(hass: HomeAssistant) -> None: entry = MockConfigEntry( domain=DOMAIN, unique_id="0123456789", data={ CONF_ORIGIN_LATITUDE: float(ORIGIN_LATITUDE), CONF_ORIGIN_LONGITUDE: float(ORIGIN_LONGITUDE), CONF_DESTINATION_LATIT...
[ "def test_not_collected():\n assert False", "def test_html_with_no_visitors_planned(self):\n message = 'Você não possui entradas autorizadas.'\n self.assertContains(self.resp, message)", "def test_owner_no_ownership(self):\n self.assert_ownership(True)", "def testForMissingUserData(sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that a not existing destination_entity_id is caught.
async def test_destination_entity_not_found( hass: HomeAssistant, caplog: pytest.LogCaptureFixture ) -> None: entry = MockConfigEntry( domain=DOMAIN, unique_id="0123456789", data={ CONF_ORIGIN_LATITUDE: float(ORIGIN_LATITUDE), CONF_ORIGIN_LONGITUDE: float(ORIGIN_L...
[ "def test_get_destination_no_destination(self):\n router_config = self.create_router_config()\n resp = yield self.post('/routers/', router_config)\n router_id = (yield resp.json())['result']['id']\n\n resp = yield self.get(\n '/routers/{}/destinations/bad-destination-id'.forma...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that an invalid state of the destination_entity_id is caught.
async def test_invalid_destination_entity_state( hass: HomeAssistant, caplog: pytest.LogCaptureFixture ) -> None: hass.states.async_set( "device_tracker.test", "test_state", ) entry = MockConfigEntry( domain=DOMAIN, unique_id="0123456789", data={ CONF_...
[ "def test_validate_invalid_transition_state(self):\n with nose.assert_raises(exceptions.InvalidStateError):\n self.ntm1.transitions['q4'] = self.ntm1.transitions['q0']\n self.ntm1.validate()", "def test_user_cannot_create_flight_with_invalid_destination(self):\n\n self.testHelp...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that an invalid state of the origin_entity_id is caught.
async def test_invalid_origin_entity_state( hass: HomeAssistant, caplog: pytest.LogCaptureFixture ) -> None: hass.states.async_set( "device_tracker.test", "test_state", ) entry = MockConfigEntry( domain=DOMAIN, unique_id="0123456789", data={ CONF_ORIGI...
[ "async def test_restoring_invalid_entity_id(opp, opp_storage):\n entity = RestoreEntity()\n entity.opp = opp\n entity.entity_id = \"test.invalid__entity_id\"\n now = dt_util.utcnow().isoformat()\n opp_storage[STORAGE_KEY] = {\n \"version\": 1,\n \"key\": STORAGE_KEY,\n \"data\": ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test sensor restore state.
async def test_restore_state(hass: HomeAssistant) -> None: # Home assistant is not running yet hass.state = CoreState.not_running last_reset = "2022-11-29T00:00:00.000000+00:00" mock_restore_cache_with_extra_data( hass, [ ( State( "sensor.t...
[ "def test_restore_run(self):\n pass", "async def test_restore_state_uncoherence_case(hass: HomeAssistant) -> None:\n _mock_restore_cache(hass, temperature=20)\n\n calls = _setup_switch(hass, False)\n _setup_sensor(hass, 15)\n await _setup_climate(hass)\n await hass.async_block_till_done()\n\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that transit errors are correctly handled.
async def test_transit_errors( hass: HomeAssistant, caplog: pytest.LogCaptureFixture, exception, expected_message ) -> None: with patch( "here_transit.HERETransitApi.route", side_effect=exception(), ): entry = MockConfigEntry( domain=DOMAIN, unique_id="0123456...
[ "def test_transition_raises(self, caplog):\n\n def handle(event):\n raise NotImplementedError(\"Exception description\")\n\n self.ae = ae = AE()\n ae.add_supported_context(Verification)\n ae.add_requested_context(Verification)\n handlers = [(evt.EVT_FSM_TRANSITION, hand...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
tokenizes the command into atomic tokens
def tokenize(cmd): return cmd.strip().split(' ')
[ "def _redisize(cls, command):\n d = shlex.split(command)\n d[0] = d[0].upper()\n return Redisizer.tokens(d)", "def getTokens(command):\n command = str(command) # In case the command is unicode, which fails.\n f = cStringIO.StringIO(command)\n # tokens is a list of token tuples, each...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
canonicalizes the command and returns it
def canonicalize_command(tokens): command = tokens[0] canonical = NO_COMMAND_CANON for c in COMMANDS: if command in c: canonical = COMMANDS[c] break return canonical
[ "def canonical_command(cmd):\n fields = cmd.split('!')\n if fields[0] == 'check_nrpe2':\n return fields[1]\n else:\n return fields[0]", "def _normalize_cmd(command):\r\n command = command.rstrip(\"\\n\")\r\n command += \"\\n\"\r\n return command", "def _command_name_n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Serializer patch method to get access to model instance. Triggers instance media dump by sending the `pre_dump` signal.
def get_dump_object(self, obj): pre_dump.send(sender=type(obj), instance=obj) return Serializer.get_dump_object(self, obj)
[ "def on_post_init(self, instance, sender, **kwargs):\n instance._original_values = self.serialize_object(instance)", "def dump_instance(instance):\n return json.dumps(model_to_dict(instance), cls=DjangoJSONEncoder)", "def get_serializer(self, *args, **kwargs):\n serializer = super().get_ser...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Display a List of Tracked Coins.
def coinList(coincodes): global config if coincodes == '*': print("You asked to list all tracked coins") else: print("You asked to list tracked coins like %s" % (coincode)) try: print config['Active']['Coins'] except KeyError: print "There are curre...
[ "def coins(request):\n return render(request, 'coins.html', {})", "def display_stats(coins: list) -> None:\n average_value = sum(coin.volume_usd for coin in coins) / len(coins)\n sorted_data = sorted(coins, key=lambda coin: coin.volume_usd)\n max_coin = sorted_data[-1]\n min_coin = sorted_data[0]\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete a Coin or set of Coins from the List of Tracked Coins.
def deleteCoins(coincodes): global appdatadir global config activecoins = "" print("You asked to delete coin(s) %s" % (coincode)) if not 'Active' in config.sections(): print "No coins currently exist in configuration." return if config.has_option('Active', 'Coins'): ac...
[ "def remove_coin(self, coin_symbol: str):\n self.wallet.pop(coin_symbol, None)\n self.dump()", "def current_user_saved_tracks_delete(self, tracks=None):\n tlist = []\n if tracks is not None:\n tlist = [self._get_id(\"track\", t) for t in tracks]\n return self._delete(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Display the last week of a set of Coins
def coinWeek(coincodes): global config coinlist = [] if coincodes == '*': print("You asked to list the last week for all tracked coins") coinlist = config.get('Active','Coins').split(' ') else: coinlist = coincodes print("You asked to list the last week...
[ "def week():", "def last_weeks_leaderborad(driver):\n # .click() selects the button on the webpage to change the leader board viewed\n elem = driver.find_element_by_xpath('//*[@class=\"button last-week\"]').click()\n text_out = driver.find_element_by_xpath('//*[@class=\"dense striped sortable\"]')\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check the online status of the clients, and remove it when offline.
def check_status(self): while True: time.sleep(5) for i in range(len(self._agents['ip'])): if time.time() - self._agents['time'][i] > 12: ip = self._agents['ip'].pop(i) self._agents['port'].pop(i) self._ag...
[ "def free_unused_clients():\n _get_client.cache_clear()", "async def online(ctx):\r\n\tserver = ctx.message.server\r\n\tmembers = 0\r\n\tmembersOnline = 0\r\n\tfor member in server.members:\r\n\t\tmembers += 1\r\n\t\tif str(member.status).lower() == \"online\":\r\n\t\t\tmembersOnline += 1\r\n\tmsg = 'There are...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get GC data of port
def get_gc(self, ip, port, interface): res = self.request.request('get', ip, port, interface) if res.status_code == 200: response = json.loads(res.content.decode()) logger.debug(f'The GC data of the port {port} of the server {ip} is {response}') if response['code...
[ "def get_csdata(self) -> None:", "def gcs_data_sink(self) -> 'outputs.GcsDataResponse':\n return pulumi.get(self, \"gcs_data_sink\")", "def get_gc_content(self):\n c = self.sequence.count('C')\n g = self.sequence.count('G')\n return round((c + g) / self.length, 4)", "def get_gc_con...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Intialize with stuff you want to interpolate pOutput [nlev/nlay] pressure output vertical grid pInput [nprofiles,nlev/nlay] pressure input grid items (dictonary of arrays) [nprofiles,nlev/nlay] items on input grid
def __init__(self, pOutput, pInput, items): self.pOutput = pOutput self.pInput = pInput self.items = items self.nv = pOutput.shape[0] self.nprof = pInput.shape[0] self.pOutputGrid = np.zeros([self.nprof,self.nv]) self.itemsInterp = {} for i in list(self.i...
[ "def InputVariables(parameters_dict, n_option = \"random\", nmin = 0.1, nmax = 0.2, m = 0.03):\n nx, ny = parameters_dict['nx'], parameters_dict['ny'] #retrieve grid size\n dx = parameters_dict['dx']\n\n # set cell initial distribution based on function input\n while n_option not in ['uniform', 'random'...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Do a loglinear interpolation.
def logLinear(self, x, xo, yo): logX = np.log(x) logXo = np.log(xo) logYo = np.log(yo) return np.exp(np.interp(logX, logXo, logYo))
[ "def viper_log_to_linear(value, **kwargs):\n\n value = np.asarray(value)\n\n return 10 ** ((1023 * value - 1023) / 500)", "def s_log_to_linear(value, **kwargs):\n\n return S_LOG_EOCF(value)", "def v_log_to_linear(value, **kwargs):\n\n return V_LOG_EOCF(value)", "def linear_to_viper_log(value, **kw...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Apply interpolation to all provided profiles.
def interpProfiles(self,method='crtm-wrap'): for i in list( self.items.keys() ): for ii in list(range(self.nprof)): if(self.items[i].ndim>2): # jacobian with profile, channels, nlevels for k in list(range(self.items[i].shape[1])): self.item...
[ "def interpolate_profiles(subargodb):\n\n all_tags_infos = tools.retrieve_infos_from_tag(subargodb.index)\n subargodb['WMO'] = all_tags_infos['WMO']\n subargodb['IDAC'] = all_tags_infos['IDAC']\n subargodb['IPROF'] = all_tags_infos['IPROF']\n \n subargo_to_interp = subargodb[(subargodb['FLAG'] == ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates and fill a velib Station line in the DB using provided values
def insertvelibstation(values): station = sqlModels.Station() station.station_number = values['number'] station.station_name = values['name'] station.contract_name = values['contract_name'] station.address = values['address'] station.banking = values['banking'] station.bonus = values['bonus'...
[ "def create_stations_db(self):\n newDf = self.df.loc[:, 'stop_name'].drop_duplicates()\n for i in newDf.index:\n lat = self.df[self.df[\"stop_name\"] == newDf[i]]['lat'].values[0]\n lon = self.df[self.df[\"stop_name\"] == newDf[i]]['lon'].values[0]\n add_station_db(sta...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates and fill a weather information line in the DB using provided values
def insertweather(values): weather = sqlModels.Weather() weather.weather_group = values['weather'][0]['main'] # TODO: is it the right data here? weather.temperature = values['main']['temp'] weather.pressure = values['main']['pressure'] weather.humidity_percentage = values['main']['humidity'] we...
[ "def _weather(args):\n weather.record(args.database, args.port, args.ssl, args.api_key, args.lat, args.lon)", "def fill_weather_db():\n\n # -- set the date range\n st = datetime.datetime(2013, 10, 1)\n en = datetime.datetime(2017, 10, 27)\n nday = (en - st).days + 1\n dlist = [d for d in ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates and fill a stationelevation information line in the DB using provided values
def insertstationelevation(values, station_number, contract_name): elevation = sqlModels.StationElevation() elevation.station_number = station_number elevation.latitude = values['results'][0]['location']['lat'] elevation.longitude = values['results'][0]['location']['lng'] elevation.elevation = value...
[ "def set_elevation(self, elevation):\r\n self.obs.telel = float(elevation)\r\n self.ave.telaz = self.obs.telel\r\n self.hot.telaz = self.obs.telel\r\n self.cold.telaz = self.obs.telel\r\n self.ref.telaz = self.obs.telel\r\n print(\"Setting Elevation: %6.1f d\" % (self.obs.t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if this station's elevation number is already in the DB
def doeselevationexist(station_number, contract_name): count = sqlModels.StationElevation.select().where(sqlModels.StationElevation.station_number == station_number, sqlModels.StationElevation.contract_name == contract_name).count() return count >= 1
[ "def in_elevation_map(row: int, column: int, len_elevation_map: int) -> bool:\n if 0 <= row < len_elevation_map:\n if 0 <= column < len_elevation_map:\n return True\n return False\n return False", "def insertstationelevation(values, station_number, contract_name):\n elevation = s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
emotive_speech(x,fs,typeOfEmotion) A Caller Module
def emotive_speech(x,fs,typeOfEmotion): TIME_STAMPS = bp.process_variables(x,fs,CHUNK_SIZE)[0] CONSECUTIVE_BLOCKS = bp.process_variables(x,fs,CHUNK_SIZE)[1] fundamental_frequency_in_blocks = bp.batch_analysis(x,fs,CHUNK_SIZE)[0] voiced_samples = bp.batch_analysis(x,fs,CHUNK_SIZE)[1] rms = bp.batch_analysis(x,...
[ "def em_step(t, eng, fre):\n\t# TODO", "def detect_intent(self):\n self.grammars = \"builtin:speech/transcribe,builtin:dtmf/digits\" \n self.synth_and_recog()", "def main():\r\n \r\n\r\n record() \r\n #print('Loading model from file {}'.format(deepspeech-0.8.2-models.pbmm), file=sys.stde...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes edges returns distance between neighbours
def get_neighbour_dist(x, y, edges): return list(filter(lambda edge: (edge[0] == x and edge[1] == y) or (edge[0] == y and edge[1] == x), edges))[0][2]
[ "def get_edge_dist(self, i, j):\n return self.distance(self.nodes[i], self.nodes[j])", "def numConnectedEdges(*args, **kwargs):\n \n pass", "def getDistances(digraph, path):\r\n total_dist = 0\r\n outdoor_dist = 0\r\n for i in range(len(path) - 1):\r\n for node, edge in digraph....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Render task's command into a string. Returns
def render_command(self): return ' '.join(self.command)
[ "def getCmdString(self,cmd):\n if hasattr(cmd,\"command\") and isinstance(cmd.command, Command):\n cmd.command = cmd.command.composeCmdString()\n return super(self.__class__,self).getCmdString(cmd)\n elif isinstance(cmd,list):\n cmdarr = []\n for c in cmd:\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Render a runner script according to the template.
def render_runner(self, template): modules = self.opts.get('modules') if modules is None: modules = '' else: modules = ' '.join(modules) contents = template % { 'command': self.render_command(), 'modules': modules} return co...
[ "def render_template(self, *args, **kwargs):\n return self.renderer.render(*args, **kwargs)", "def run(self):\n self.time_writer('AutoSpider run(): Running command from template...')\n\n if not os.path.exists(self.script_dir):\n os.mkdir(self.script_dir)\n\n # Get filepath a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a prefix for the runner file.
def get_runner_prefix(self): return self.__class__.__name__
[ "def prefix(self):\n return self._config['DEFAULT']['prefix']", "def get_prefix(testing_framework, inst_type):\n dirname = testing_framework.value\n if inst_type == INSTRUMENTATION_TYPE.METHOD:\n dirname += \"Method\"\n elif inst_type == INSTRUMENTATION_TYPE.TEST:\n dirname += \"Test...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
When out of connections, block for timeout seconds, then raise
async def test_connection_pool_blocks_until_timeout(self, master_host): connection_kwargs = {"host": master_host[0]} async with self.get_pool( max_connections=1, timeout=0.1, connection_kwargs=connection_kwargs ) as pool: c1 = await pool.get_connection("_") s...
[ "def test_connection_timeout_raised(self):\n conn = LibcloudConnection(host=\"localhost\", port=8080, timeout=0.1)\n # use a not-routable address to test that the connection timeouts\n host = \"http://10.255.255.1\"\n with self.assertRaises(ConnectTimeout):\n conn.request(\"GE...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
When out of connections, block until another connection is released to the pool
async def test_connection_pool_blocks_until_conn_available(self, master_host): connection_kwargs = {"host": master_host[0], "port": master_host[1]} async with self.get_pool( max_connections=1, timeout=2, connection_kwargs=connection_kwargs ) as pool: c1 = await pool.get_c...
[ "def connection_pool_blocks_until_another_connection_released(self):\n pool = self.get_pool(max_connections=1, timeout=2)\n c1 = pool.get_connection('_')\n\n def target():\n time.sleep(0.1)\n pool.release(c1)\n\n Thread(target=target).start()\n start = time.t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If Redis raises a LOADING error, the connection should be disconnected and a BusyLoadingError raised
async def test_busy_loading_disconnects_socket(self, r): with pytest.raises(redis.BusyLoadingError): await r.execute_command("DEBUG", "ERROR", "LOADING fake message") if r.connection: assert not r.connection._reader
[ "def test_busy_loading_from_pipeline(self, r):\n pipe = r.pipeline()\n pipe.execute_command('DEBUG', 'ERROR', 'LOADING fake message')\n with pytest.raises(redis.BusyLoadingError):\n pipe.execute()\n pool = r.connection_pool\n assert not pipe.connection\n assert l...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
BusyLoadingErrors should raise from Pipelines that execute a command immediately, like WATCH does.
async def test_busy_loading_from_pipeline_immediate_command(self, r): pipe = r.pipeline() with pytest.raises(redis.BusyLoadingError): await pipe.immediate_execute_command( "DEBUG", "ERROR", "LOADING fake message" ) pool = r.connection_pool assert n...
[ "def test_busy_loading_from_pipeline_immediate_command(self, r):\n pipe = r.pipeline()\n with pytest.raises(redis.BusyLoadingError):\n pipe.immediate_execute_command('DEBUG', 'ERROR',\n 'LOADING fake message')\n pool = r.connection_pool\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
BusyLoadingErrors should be raised from a pipeline execution regardless of the raise_on_error flag.
async def test_busy_loading_from_pipeline(self, r): pipe = r.pipeline() pipe.execute_command("DEBUG", "ERROR", "LOADING fake message") with pytest.raises(redis.BusyLoadingError): await pipe.execute() pool = r.connection_pool assert not pipe.connection assert l...
[ "def test_busy_loading_from_pipeline(self, r):\n pipe = r.pipeline()\n pipe.execute_command('DEBUG', 'ERROR', 'LOADING fake message')\n with pytest.raises(redis.BusyLoadingError):\n pipe.execute()\n pool = r.connection_pool\n assert not pipe.connection\n assert l...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
READONLY errors get turned into ReadOnlyError exceptions
async def test_read_only_error(self, r): with pytest.raises(redis.ReadOnlyError): await r.execute_command("DEBUG", "ERROR", "READONLY blah blah")
[ "def isReadOnly():\n\n # XXX Note that this method doesn't really buy us much,\n # especially since we have to account for the fact that a\n # ostensibly non-read-only storage may be read-only\n # transiently. It would be better to just have read-only errors.", "def test_readonly(self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A health check happens before the first [p]subscribe
async def test_health_check_in_pubsub_before_subscribe(self, r): p = r.pubsub() p.connection = await p.connection_pool.get_connection("_") p.connection.next_health_check = 0 with mock.patch.object( p.connection, "send_command", wraps=p.connection.send_command ) as m: ...
[ "async def test_health_check_in_pubsub_after_subscribed(self, r):\n p = r.pubsub()\n p.connection = await p.connection_pool.get_connection(\"_\")\n p.connection.next_health_check = 0\n with mock.patch.object(\n p.connection, \"send_command\", wraps=p.connection.send_command\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Pubsub can handle a new subscribe when it's time to check the connection health
async def test_health_check_in_pubsub_after_subscribed(self, r): p = r.pubsub() p.connection = await p.connection_pool.get_connection("_") p.connection.next_health_check = 0 with mock.patch.object( p.connection, "send_command", wraps=p.connection.send_command ) as m: ...
[ "async def test_health_check_in_pubsub_before_subscribe(self, r):\n p = r.pubsub()\n p.connection = await p.connection_pool.get_connection(\"_\")\n p.connection.next_health_check = 0\n with mock.patch.object(\n p.connection, \"send_command\", wraps=p.connection.send_command\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Polling a pubsub connection that's subscribed will regularly check the connection's health.
async def test_health_check_in_pubsub_poll(self, r): p = r.pubsub() p.connection = await p.connection_pool.get_connection("_") with mock.patch.object( p.connection, "send_command", wraps=p.connection.send_command ) as m: await p.subscribe("foo") subscr...
[ "async def test_health_check_in_pubsub_before_subscribe(self, r):\n p = r.pubsub()\n p.connection = await p.connection_pool.get_connection(\"_\")\n p.connection.next_health_check = 0\n with mock.patch.object(\n p.connection, \"send_command\", wraps=p.connection.send_command\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Look through our args object and set the proper default config values in our snap config, based on those args.
def process_init_args(args): if args.auto and not (args.control or args.compute): raise ValueError('A role (--compute or --control) must be specified ' ' when using --auto') if args.compute or args.control: config_set(**{'config.is-clustered': 'true'}) if args.comp...
[ "def init(args):\n Configuration.load_config(vars(args).get(\"config\"))", "def _get_args_defaults(env, args):\n defaults = {'height': _get_config_variable(env, 'height', 500),\n 'column_width': _get_config_variable(env, 'column_width', 40),\n 'res_days': _get_config_variab...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find and use the default network on a machine. Helper to find the default network on a machine, and configure MicroStack to use it in its default settings.
def set_network_info() -> None: try: ip, gate, cidr = default_network() except Exception: # TODO: more specific exception handling. log.exception( 'Could not determine default network info. ' 'Falling back on 10.20.20.1') return check('snapctl', 'set'...
[ "def GetDefaultWiredNetwork(self):\n profileList = self.config.sections()\n for profile in profileList:\n if misc.to_bool(self.config.get(profile, \"default\")):\n return profile\n return None", "def _network_choice(self) -> str:\n return self.config.getoption...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
createFolderHierarchy does... guess what.
def create_folder_hierarchy(self): this_dir = self.project_directory for d in [self.experiment_name, self.subject.initials]: try: this_dir = os.path.join(this_dir, d) os.mkdir(this_dir) except OSError: pass for p in ['raw',...
[ "def create_folder_structure(depth=2, sibling=2, parent=None):\n if depth > 0 and sibling > 0:\n depth_range = range(1, depth+1)\n depth_range.reverse()\n for d in depth_range:\n for s in range(1,sibling+1):\n name = \"folder: %s -- %s\" %(str(d), str(s)) \n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
import_raw_data loops across edf_files and their respective aliases and copies and renames them into the raw directory.
def import_raw_data(self, edf_files, aliases): for (edf_file, alias,) in zip(edf_files, aliases): self.logger.info('importing file ' + edf_file + ' as ' + alias) ExecCommandLine('cp "' + edf_file + '" "' + os.path.join(self.base_directory, 'raw', alias + '.edf"'))
[ "def read_all_raw_files():\n pass", "def raw2processed(self):\n # start logger\n logger = logging.getLogger(__name__)\n logger.info('Splitting raw data into time series and ancillary part.')\n\n file_dir = os.path.join(self.raw_dir_csse, \"US\")\n # process\n for file ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
import_all_data loops across the aliases of the sessions and converts the respective edf files, adds them to the self.ho's hdf5 file.
def import_all_data(self, aliases): for alias in aliases: self.ho.add_edf_file(os.path.join(self.base_directory, 'raw', alias + '.edf')) self.ho.edf_message_data_to_hdf(alias=alias) self.ho.edf_gaze_data_to_hdf(alias=alias)
[ "def import_and_export(self):\n timer = gfs.Timer()\n timer.tic()\n print('\\nimporting all data:\\n')\n self.import_all_yaml()\n timer.toc()\n print('\\ndumping all data:\\n')\n self.dump_all_as_pickle()\n timer.toc()\n print('\\ndump successful\\n')",...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get an error counter. The errors from each worker thread are summed
def get_err_counter(self, name): return sum(self.get_counter(name))
[ "def error_count(self):\n\n return self._error_count", "def error_count(self):\n if self._is_root:\n err_cnt = self._error_count\n else:\n err_cnt = self._root_recorder.error_count\n return err_cnt", "def errors(self):\n return [thread.err for thread in s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compare two directories recursively. Files in each directory are assumed to be equal if their names and contents are equal.
def compare_dir_trees(inputDir, baselineDir): dirs_cmp = filecmp.dircmp(inputDir, baselineDir) newFiles.append(dirs_cmp.left_only) if dirs_cmp.left_only else newFiles missingFiles.append(dirs_cmp.right_only) if dirs_cmp.right_only else missingFiles notComparableFiles.append(dirs_cmp.funny_files) if di...
[ "def diff_dir(path1, path2, root):\n\n paths1, paths2 = os.listdir(path1), os.listdir(path2)\n paths1.sort()\n paths2.sort()\n paths1, paths2 = iter(paths1), iter(paths2)\n\n def nxt(path, piter, subst=False):\n \"\"\" Return path, absolute path, path relative to root. \"\"\"\n\n # Get ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update the edge histogram
def update_edge(self, vert1, vert2, size): key = self._construct_key(vert1, vert2) self._increment(self.edge_histogram, key, size)
[ "def update_edge(self, e):\n pass", "def out_edge_count(self):", "def append_edge(self, edge):", "def update_edge(self, cell: int, assignment: dict):\n if cell not in self.cell_map:\n return\n incident_block = self.cell_map[cell]['block']\n if 2*incident_block not in ass...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Pads the shorter vectors with zeros so that both have the same length
def pad_with_zeros(*args, dtype=float) -> tuple[np.ndarray]: maxlen = 0 vv = [] for v in args: vv.append(np.array(v, dtype=dtype)) maxlen = len(v) if len(v) > maxlen else maxlen for i in range(len(vv)): if len(vv[i]) < maxlen: vv[i] = np.hstack((vv[i], np.zeros(maxle...
[ "def zero_pad(signal):\n power = log(len(signal), 2)\n power = ceil(power)\n\n if len(signal) == 2**power:\n return signal.copy()\n else:\n return np.concatenate(signal, np.zeros((2**power - len(signal), 1)))", "def MakeSameSize(signal_1, signal_2):\n len1 = signal_1.shape[0]\n len...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the distance between two ndimensional points. The shorter coordinate vector gets padded with zeros.
def distance(point1: list | np.ndarray, point2: list | np.ndarray) -> float: v1, v2 = pad_with_zeros(point1, point2) return np.linalg.norm(v2 - v1)
[ "def distance_vector(self, n1, n2):\n n1 -= 1\n n2 -= 1\n dist = self.atoms[n2].position - self.atoms[n1].position\n return dist", "def distance(point1, point2):\n return point1.dist_to(point2)", "def distance(x, y):\n return np.linalg.norm(x - y)", "def distances(self, v1,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the angle between three ndimensional points with the angle being at the 1st point.
def angle3p(point1: list | np.ndarray, point2: list | np.ndarray, point3: list | np.ndarray, out: str = "radians") -> float: p1, p2, p3 = pad_with_zeros(point1, point2, point3) v1 = p2 - p1 v2 = p3 - p1 angle = acos(max(min(np.dot(v1, v2) / (np.linalg.nor...
[ "def angle_between_three_points(point1, point2, point3):\n return angle_between_points(\n vector_subtraction(point1, point2), vector_subtraction(point3, point2))", "def angle(p1, p2, p3):\n try:\n return acos((dist(p2,p3)**2 + dist(p1,p2)**2 - dist(p1,p3)**2)\n / (2 * dist(p2,p3...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the projection of point P on a line defined by points A and B
def project_point_to_line(P: list | np.ndarray, A: list | np.ndarray, B: list | np.ndarray) -> np.ndarray: p, a, b = pad_with_zeros(P, A, B) n = unit(b - a) return a + np.dot(p - a, n) * n
[ "def proj(a,b):\n return np.dot(a,b) * b / (np.linalg.norm(b)**2)", "def distance_point_to_line(P: list | np.ndarray,\n A: list | np.ndarray,\n B: list | np.ndarray) -> float:\n return distance(P, project_point_to_line(P, A, B))", "def project(self, poin...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the projection of point P to a plane defined by points A, B and C
def project_point_to_plane(P: list | np.ndarray, A: list | np.ndarray, B: list | np.ndarray, C: list | np.ndarray) -> float: p, a, b, c = pad_with_zeros(P, A, B, C) n1 = unit(b - a) n2 = unit(c - a) return a + np.dot(p - a,...
[ "def plane_projection(self, PP=None):\n PS = self.ambient_space()\n n = PS.dimension_relative()\n if n == 2:\n raise TypeError(\"this curve is already a plane curve\")\n C = self\n H = Hom(PS, PS)\n phi = H([PS.gens()[i] for i in range(n + 1)])\n for i in ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds two closes points on two lines defined by two points each. If the points are the same returns just one point and None, that means the lines intersect.
def closest_line_to_line(A1: list | np.ndarray, A2: list | np.ndarray, B1: list | np.ndarray, B2: list | np.ndarray) -> tuple[np.ndarray]: a1, a2, b1, b2 = pad_with_zeros(A1, A2, B1, B2) a = unit(a2 - a1) b = unit(b2 - b1) # firs...
[ "def intersect_line_line(line1_start, line1_end, line2_start, line2_end):\n x1, y1 = line1_start\n x2, y2 = line1_end\n u1, v1 = line2_start\n u2, v2 = line2_end\n\n try:\n b1 = (y2 - y1) / float(x2 - x1)\n except ZeroDivisionError:\n # line 1 is vertical, we'll approach that with a ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the distance from point P to a line defined by points A and B
def distance_point_to_line(P: list | np.ndarray, A: list | np.ndarray, B: list | np.ndarray) -> float: return distance(P, project_point_to_line(P, A, B))
[ "def distance(point, line):\n \n return point.y - line(point.x)", "def distPointToLine(point, line):\n\n [xp, yp] = point\n [a, c] = line\n b = -1\n\n return abs((a*xp + b*yp + c) / np.linalg.norm([a, b]))", "def perp_distance(point, line):\n \n return (point.y - line(point.x)) / math.sq...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the distance from point P to a plane defined by points A, B and C
def distance_point_to_plane(P: list | np.ndarray, A: list | np.ndarray, B: list | np.ndarray, C: list | np.ndarray) -> float: return distance(P, project_point_to_plane(P, A, B, C))
[ "def project_point_to_plane(P: list | np.ndarray,\n A: list | np.ndarray,\n B: list | np.ndarray,\n C: list | np.ndarray) -> float:\n p, a, b, c = pad_with_zeros(P, A, B, C)\n n1 = unit(b - a)\n n2 = unit(c - a)\n return a + n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reverse CuthillMcKee algorithm for reordering matrix for smallest badwidth The square A matrix format is such that 1 at position (i, j) means that node i is connected to variable j A = [[1. 0. 0. 0. 1. 0. 0. 0.] [0. 1. 1. 0. 0. 1. 0. 1.] [0. 1. 1. 0. 1. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 1. 0.] [1. 0. 1. 0. 1. 0. 0. 0.] [0. ...
def reverse_cuthill_mckee(A: np.ndarray, reorder: bool = False): def getAdjacency(Mat: np.ndarray): """ return the adjacncy matrix for each node """ adj = [0] * Mat.shape[0] for i in range(Mat.shape[0]): q = np.flatnonzero(Mat[i]) q = list(q) ...
[ "def retrace_matrix(L, A, m, n):\n lcs_reverse = \"\"\n x = n\n y = m\n while x > 0 or y > 0:\n curr = L[y][x]\n if curr[1] == 2 and curr[2] == 1:\n lcs_reverse += A[y-1]\n x -= 1\n y -= 1\n elif curr[1] == 1:\n x -=...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the MAC value of two vectors MAC = (A . B)^2 / ((A . A) (B . B))
def MAC(vector1: list | dict | np.ndarray, vector2: list | dict | np.ndarray) -> float: if type(vector1) is dict: A = np.array([v for k, v in vector1.items()], dtype = float).flatten() else: A = np.array(vector1, dtype=float).flatten() if type(vector1) is dict: B = np.array([v for k...
[ "def amac(**args):\r\n # @todo : TEST ME\r\n if len(args) == 1:\r\n V1 = args[0]\r\n V2 = V1\r\n if len(args) == 2:\r\n V1 = args[0]\r\n V2 = args[1]\r\n else:\r\n raise(ValueError)\r\n\r\n (N1, M1) = V1.shape()\r\n (N2, M2) = V2.shape()\r\n\r\n M = np.ndarray...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Downloads Stock from Quandl. Computes daily Returns based on Adj Close. Returns pandas dataframe.
def getStockFromQuandl(symbol, name, start, end): import Quandl df = Quandl.get(symbol, trim_start = start, trim_end = end, authtoken="your token") df.columns.values[-1] = 'AdjClose' df.columns = df.columns + '_' + name df['Return_%s' %name] = df['AdjClose_%s' %name].pct_change...
[ "def getStockFromQuandl(symbol, name, start, end):\n import Quandl\n df = Quandl.get(symbol, trim_start = start, trim_end = end, authtoken=\"your token\")\n\n df.columns.values[-1] = 'AdjClose'\n df.columns = df.columns + '_' + name\n df['Return_%s' %name] = df['AdjClose_%s' %name].pct_change()\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
applies rolling mean and delayed returns to each dataframe in the list
def applyRollMeanDelayedReturns(datasets, delta): for dataset in datasets: columns = dataset.columns adjclose = columns[-2] returns = columns[-1] for n in delta: addFeatures(dataset, adjclose, returns, n) return datasets
[ "def applyRollMeanDelayedReturns(datasets, delta):\n for dataset in datasets:\n columns = dataset.columns\n adjclose = columns[-2]\n returns = columns[-1]\n for n in delta:\n addFeatures(dataset, adjclose, returns, n)\n\n return datasets", "def rolling_apply(\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
apply time lag to return columns selected according to delta. Days to lag are contained in the lads list passed as argument. Returns a NaN free dataset obtained cutting the lagged dataset at head and tail
def applyTimeLag(dataset, lags, delta): dataset.Return_Out = dataset.Return_Out.shift(-1) maxLag = max(lags) columns = dataset.columns[::(2*max(delta)-1)] for column in columns: for lag in lags: newcolumn = column + str(lag) dataset[n...
[ "def applyTimeLag(dataset, lags, delta):\n\n dataset.Return_Out = dataset.Return_Out.shift(-1)\n maxLag = max(lags)\n\n columns = dataset.columns[::(2*max(delta)-1)]\n for column in columns:\n for lag in lags:\n newcolumn = column + str(lag)\n dataset[newcolumn] = dataset[co...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
performs classification on daily returns using several algorithms (method). method > string algorithm parameters > list of parameters passed to the classifier (if any) fout > string with name of stock to be predicted savemodel > boolean. If TRUE saves the model to pickle file
def performClassification(X_train, y_train, X_test, y_test, method, parameters, fout, savemodel): if method == 'RF': return performRFClass(X_train, y_train, X_test, y_test, parameters, fout, savemodel) elif method == 'KNN': return performKNNClass(X_train, y_train, X_test, y_test,...
[ "def performClassification(X_train, y_train, X_test, y_test, method, parameters, fout, savemodel):\n\n if method == 'RF':\n return performRFClass(X_train, y_train, X_test, y_test, parameters, fout, savemodel)\n\n elif method == 'KNN':\n return performKNNClass(X_train, y_train, X_test, y_test, pa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate the positions DataFrame, based on the signals provided by the 'signals' DataFrame.
def generate_positions(self): positions = pd.DataFrame(index=self.signals.index).fillna(0.0) positions[self.symbol] = self.shares*self.signals['signal'] return positions
[ "def generate_signals(self):\n signals = pd.DataFrame(index=self.bars.index)\n signals['signal'] = np.sign(np.random.randn(len(signals)))\n\n # The first five elements are set to zero in order to minimize\n # upstream Nan errors in the forecaster.\n signals['signal'][0:5] = 0.0\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }