query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Delete image belonging to an answer with answer_id.
def delete_a_image(answer_id): current_image = get_answer_image(answer_id) if current_image: remove_answer_image(answer_id) try: os.remove("static/uploads/" + current_image) except FileNotFoundError: pass
[ "def remove_answer_image(answer_id):\n SQL = \"\"\"UPDATE answer SET image = NULL WHERE id = %s;\"\"\"\n data = (answer_id,)\n fetch = None\n db.run_statements(((SQL, data, fetch),))", "def remove_answer_and_get_q_id(answer_id):\n image_to_delete, question_id = get_answer_image_and_q_id(answer_id)\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove answer image by updating database, setting image to NULL.
def remove_answer_image(answer_id): SQL = """UPDATE answer SET image = NULL WHERE id = %s;""" data = (answer_id,) fetch = None db.run_statements(((SQL, data, fetch),))
[ "def delete_a_image(answer_id):\n current_image = get_answer_image(answer_id)\n if current_image:\n remove_answer_image(answer_id)\n try:\n os.remove(\"static/uploads/\" + current_image)\n except FileNotFoundError:\n pass", "def db_delete_one_image(imgId):\n\tprint...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return question_id based on answer_id.
def get_question_id(answer_id): SQL = """SELECT question_id FROM answer WHERE id = %s;""" data = (answer_id,) fetch = "one" question_id = db.run_statements(((SQL, data, fetch),))[0][0] return question_id
[ "def get_answer_to_question(question_id):\n return Question.query.filter_by(id=question_id).first_or_404().answer", "def get_answer_by_id(answer_id):\n\n return Answer.query.get(answer_id)", "def _get_answer(self, answer_id):\n return self._translate_sent(self.answer_pool[answer_id])", "def get_a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Mark an answer accepted. Deselect any previously marked answer, since only one accepted question might remain.
def mark_accepted_exclusively(answer_id, question_id): other_answer_ids = get_other_answer_ids(answer_id, question_id) if not other_answer_ids: other_answer_ids = (None,) SQL1 = """UPDATE answer SET accepted = true WHERE id = %s;""" data1 = (answer_id,) SQL2 = """UPDATE answer SET accepted ...
[ "def remove_accept_mark(answer_id):\n SQL = \"\"\"UPDATE answer SET accepted = false WHERE accepted = true and id = %s;\"\"\"\n data = (answer_id,)\n fetch = None\n db.run_statements(((SQL, data, fetch),))", "def accept_answer(self, post, user=None):\n assert post is None or post.topic == self,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return all answer ids for question with question_id, other than answer_id.
def get_other_answer_ids(answer_id, question_id): SQL = """SELECT id FROM answer WHERE question_id = %s AND id != %s;""" data = (question_id, answer_id) fetch = "col" other_answer_ids = db.run_statements(((SQL, data, fetch),))[0] return other_answer_ids
[ "def get_answer_ids(self):\n return # osid.id.IdList", "def get_forum_question_ids():\n return list(map(lambda q: q.id, get_forum_questions()))", "def answered_questions(self):\n return self.question_set.filter(\n status=question_constants.ANSWERED\n )", "def get_questions_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove accept mark from answer.
def remove_accept_mark(answer_id): SQL = """UPDATE answer SET accepted = false WHERE accepted = true and id = %s;""" data = (answer_id,) fetch = None db.run_statements(((SQL, data, fetch),))
[ "def clean_answer(self, answer):\n answer = answer.replace('\"', \"\")\n answer = re.sub(r'\\(.*?\\)', '', answer)\n if answer[0:4].lower() == \"the \":\n answer = answer[4:]\n if answer[0:3].lower() == \"an \":\n answer = answer[3:]\n if answer[0:2].lower() ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
``NullAgency.before_create`` returns the ``IObject`` passed to it with no modifications.
def test_before_create(self, obj): state = _KubernetesState() actual = NullAgency().before_create(state, obj) self.assertThat(actual, Equals(obj))
[ "def test_after_create(self, obj):\n state = _KubernetesState()\n actual = NullAgency().after_create(state, obj)\n self.assertThat(actual, Equals(obj))", "def before_create_object(self, data, view_kwargs):\n raise NotImplementedError", "def pre_create(cls, **kwargs):\n return ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
``NullAgency.after_create`` returns the ``IObject`` passed to it with no modifications.
def test_after_create(self, obj): state = _KubernetesState() actual = NullAgency().after_create(state, obj) self.assertThat(actual, Equals(obj))
[ "def after_create_object(self, obj, data, view_kwargs):\n raise NotImplementedError", "def test_before_create(self, obj):\n state = _KubernetesState()\n actual = NullAgency().before_create(state, obj)\n self.assertThat(actual, Equals(obj))", "def post_creation(self):\n self.de...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Any version other than ``None`` is interpreted as a string representation of an integer and incremented.
def test_incremented(self): version = _incrementResourceVersion(_incrementResourceVersion(None)) updated = _incrementResourceVersion(version) self.expectThat(updated, IsInstance(unicode)) self.expectThat(updated, AfterPreprocessing(int, Equals(int(version) + 1)))
[ "def inc_version(self, ref: str) -> int:\n if ref in self.varcounts:\n self.varcounts[ref] += 1\n return self.varcounts[ref]\n else:\n self.varcounts[ref] = 1\n return 1", "def increment_null(self):\n self.null_count += 1", "def autoincrement_vers...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Edit the nickname of the bot in all servers.
async def edit_nick(self, nick: str) -> None: for guild in self.guilds: await guild.get_member(self.user.id).edit(nick=nick)
[ "async def nickpoo(self, ctx, target: discord.Member):\n await target.edit(nick=\"\\U0001f4a9\")\n await ctx.send(\"Nickname changed successfully\")", "def set_nick_name(self, val):\n self.nick = val", "def rename(server, name):\r\n server.update(name)", "def rename(self, new_nickname)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test farhenheit to kelvin
def testf_to_k(self): for deg_C, deg_F, deg_K in self.knownC_F_K: result = conversions.convertfarhenheittokelvin(deg_F) self.assertEqual(deg_K, result)
[ "def testConvertFahrenheitToKelvin(self): \n for val in self.known_values:\n from_val = val[1]\n expected_val = val[2]\n returned_val = c.convertFahrenheitToKelvin(from_val)\n self.assertEqual(returned_val,\n expected_val,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the number of arguments of the given function.
def _num_arguments(func: Callable) -> int: sig = signature(func) return len(sig.parameters)
[ "def number_of_arguments(func):\n if isinstance(func, functools.partial):\n total_args = len(inspect.signature(func.func).parameters)\n return total_args - len(func.args) - len(func.keywords)\n return len(inspect.signature(func).parameters)", "def getArgumentCount(self):\n return len(se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decorator or method to register a callable as a given name.
def register(self, name: str, func: Optional[Callable] = None) -> Optional[Callable]: if name in self.callables.keys(): logging.warning("Overriding callable of name '%s'.", name) def assign(func): if not callable(func): raise ValueError("Argument func must be call...
[ "def register(name, fn):\n return el.Dotted.register(name, fn)", "def register_binning(name: Optional[str] = None):\n\n def decorator(f: Callable) -> Callable:\n key = name or f.__name__[:-8]\n binning_methods[key] = f\n return f\n\n return decorator", "def register(name):\n\n d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find a callable given its name.
def find(self, name: str) -> Optional[Callable]: return self.callables.get(name)
[ "def locate_qualified_function(qualified_name: str) -> Callable[[], Iterable[ET]]:\n if \".\" not in qualified_name:\n raise QueryException(\"Could not find a '.' in the function name, e.g. my.reddit.rexport.comments\")\n rdot_index = qualified_name.rindex(\".\")\n return locate_function(qualified_n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add the given datacontainer to the view. This is used in the file open method
def addDataContainer(self, datacontainer): if isinstance(datacontainer, DataHandling.DataContainer.DataContainer): self._datacontainer.append(datacontainer) if self.show_window: self.view.addFileToFilelist(datacontainer)
[ "def add_container(self, container):\n self.__container_list.append(container)", "def merge_container(self, container):\n logger.debug('Merging containers')\n print(type(self))\n\n self._add_to_container(\n container.data,\n container.electrode_positions, containe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get all the datacontainers that are currently loaded Returns list The list of datacontainers
def getDataContainerList(self): return self._datacontainer
[ "def list_containers(self):\r\n return list(self.iterate_containers())", "def containers():", "def containers(self):\n return SystemCommand(self.cmd.containers).output", "def get_all_containers(client):\n return [client.inspect_container(container[\"Id\"])\n for container in client...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Replace the datacontainer at the given index.
def replaceDataContainer(self, index, datacontainer): if (isinstance(datacontainer, DataHandling.DataContainer.DataContainer) and index >= 0 and index < len(self._datacontainer)): self._datacontainer[index] = datacontainer if self.show_window...
[ "def replace(self, index: int, dataset: Optional[_TypeMultiBlockLeaf]) -> None:\n name = self.get_block_name(index)\n self[index] = dataset\n self.set_block_name(index, name)", "def put(self, index: int, value: Any):\n self[index] = value", "def __setitem__(\n self,\n i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the datapoint of the background_datacontainer at the given index
def _getBackgroundDataPoint(self, background_datacontainer, index): if isinstance(background_datacontainer, DataHandling.DataContainer.DataContainer): return background_datacontainer.datapoints[index] elif isinstance(background_datacontainer, (list, tuple)): return backg...
[ "def datum(self, *index):\n data = self.get_data(None)\n if data is None:\n raise ValueError(\n \"ERROR: Can't return an element when there is no data array\"\n )\n\n return data.datum(*index)", "def get_data_point_info(self, data_info, index):\n\t\tfor da...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a human readable string of the indices in the list_data. The list_data is assumed to be a list full of integers. This function will
def zipIndices(self, list_data): # sort the list list_data.sort() # the return value zip_indices = "" # the last element in the list_data last_element = None # the index of the list_data when an element had been added added_i = None ...
[ "def print_indexes(my_list):\n\n # for i in range(len(my_list)):\n # print \"{} {}\".format(i, my_list[i]) \n\n # return\n\n # learned 'enumerate' function, i is the index position and item is the \n # element in that position in the list\n # print both of those elemetns\n\n for i, item in ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates an array which contains all the indices in the given indices_string. This is the inverted method for the GraphWizard.zipIndices method. Paramters
def unzipIndices(self, indices_string): space_regexp = re.compile("\\s+") indices_string = space_regexp.sub("", indices_string) datapoints = [] # split datapoints by ; indices_string = indices_string.split(";") for datapoint_range in indices_string: ...
[ "def indices():\n return [1.0, 3.0, 1.0, 3.0, 1.0]", "def split_indices(self, indices):\n out_ind = [[] for _ in range(self.num_patitions)]\n for key in indices:\n part = self.get_partition_index(key)\n ind = self.mapping_to_partition[part][key]\n out_ind[part].ap...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the full range of the given iterable, this will return a zipString received by the Controller.zipIndices() index of the given iterable
def zipFullRange(self, iterable): try: l = len(iterable) except TypeError as e: return "" return self.zipIndices(list(range(0, l)))
[ "def get_slice(seq,start=0,stop=None,step=1):\n if stop == None:\n stop = len(seq)\n item = lambda i: seq[i]\n return map(item,xrange(start,stop,step))", "def list(self):\n return range(*self._vec)", "def Slice(iterable, start=None, *args, **kwargs):\n return itt.islice(iterable, start, *a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes all the list elements in the list_data that are the same. This supports only numeric lists. The threshold can be a numeric value which defines how much the list_datas elements can differ form the others and still count as the "same value"
def uniqueListThreshold(self, list_data, threshold): # prepare variables uniques = [] full_list = {} # go through each list element for x in list_data: # tells whether the element has been found or not found = False ...
[ "def remove_list_by_fraction(source_lst):\n\tflags = [0]\n\ttemp = source_lst[0]\n\tfor i in range(len(source_lst)):\n\t\tif source_lst[i] != temp:\n\t\t\tflags.append(i)\n\t\t\ttemp = source_lst[i]\n\n\treturn flags", "def RemoveSmallRiderCountsForStation(counts, l1):\n try:\n l1p = []\n for l i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new background for the given datacontainer file by intermpolating the background_datacontainer. The measurement_type tells whether the file is a M(T) or a M(H) measurement. The filepath is the target filepath to save in the generated background.
def createNewBackground(self, datacontainer, background_datacontainer, measurement_type, filepath = None): if not isinstance(filepath, str): filepath = "<temporary created background>" return DataHandling.calculation.createBackgroundDataContainer( datacontainer,...
[ "def create_backgrounds(\n outdir, background=None, genome=\"hg38\", size=200, custom_background=None\n):\n if background is None:\n background = [\"random\"]\n nr_sequences = {}\n\n # Create background for motif prediction\n if \"gc\" in background:\n pred_bg = \"gc\"\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cut all the datapoints of the given datacontainer with the given condition. The condition has to be a list of dicts, each dict has to have a key index which holds the key and a min and/or a max index.
def cutDataPointRows(self, datacontainer, conditions): if isinstance(datacontainer, DataHandling.DataContainer.DataContainer): datacontainer = copy.deepcopy(datacontainer) datacontainer.addAttribute("Datapoints edited") for datapoint in datacontainer.dat...
[ "def apply_cuts(self, data, cuts):\n cut_mask = np.array([True], dtype=np.bool)\n for cut_key, [cut_low, cut_high] in cuts.items():\n if \"{reco}\" in cut_key:\n cut_key = cut_key.replace(\"{reco}\", self.reco)\n\n if cut_low is not None:\n cut_mask ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Log the given message. There are (currently) 3 different log_type, they are all given in the View Constants. There is the LOG_STATUSBAR which will print the message in the status bar in the view, the LOG_CONSOLE will log in the internal console and the LOG_DEBUG will also log in the console but this will be only displa...
def log(self, message, log_type = Constants.LOG_CONSOLE): self.view.log(message, log_type)
[ "def log_message(self) -> global___LogMessage:", "def log(self, msg=\"\", level=1):\n\n if self.log_level >= level:\n print(\"[%s] %s\" % (time.strftime(\"%I:%M.%S\"), msg))", "def _log(type: str, message: str, *args: t.Any, **kwargs: t.Any) -> None:\n global _logger\n\n if _logger is No...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Pause or unpause the displaying of errors. The errors will be added to the internal collection and they will be displayed when the error displaying is unpaused again. Use with caution! This may hide errors so the user does not know what is going on!
def pauseErrorDisplay(self, pause): b = self._block_errors self._block_errors = (pause == True) if self._block_errors == False and b != self._block_errors: for error in self._error_collection: self.error(error[0], error[1], error[2]) ...
[ "def clear_errors(self):\n self.epicsLive.clear_errors()", "def showErrors(self):\n self.log.error('There were {0} errors encountered while executing all operations:'.format(len(self.error_list)))\n for i, error in enumerate(self.error_list):\n self.log.error('[{0}] {1}'.format(i, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Raise an error message with the given error_type. The types are defined in the Constants. The error_details will be added to the error dialog, they can contain further information
def error(self, message, error_type = Constants.NOTICE, error_details = None): if self._block_errors: self._error_collection.append((message, error_type, error_details, time.time())) return False error_string = "An Error" error_details_string = "" ...
[ "def error_type(self, error_type):\n allowed_values = [\"N/A\", \"QuerySyntaxError\", \"QueryExecutionError\", \"Timeout\"] # noqa: E501\n if (self._configuration.client_side_validation and\n error_type not in allowed_values):\n raise ValueError(\n \"Invalid v...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Carga los datos de las habitaciones en el listView
def listadohab(listhabitaciones): try: variables.listado = listar() listhabitaciones.clear() for registro in variables.listado: listhabitaciones.append(registro[0:3]) except sqlite3.OperationalError as e: print(e) conexion.conex.rollback()
[ "def datalist(self,model):\n calender = {1:'Jan',2:'Feb',3:'Mar',4:'Apr',5:'May',6:'Jun',7:'Jul',8:'Aug',9:'Sep',10:'Oct',11:'Nov',12:'Dec'}\n data = model.objects.filter(owner=self.request.user).values().order_by('date')\n a= 0\n d= []\n \n for i in data:\n g={}\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
busca los turnos para una fecha
def turno_centro_fecha(self, centro_id, fecha): return Turnos.query.filter( and_(Turnos.dia == fecha, Turnos.centro_id == centro_id) ).all()
[ "def rangoFechas():\n anio = int(strftime(\"%Y\", gmtime()))\n mes = int(strftime(\"%m\", gmtime()))\n l = []\n for x in [0]:\n \n diff = mes - x\n if diff <= 0:\n l.append([anio - 1, 12+ diff])\n else:\n l.append([anio, diff])\n return l", "def get...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
busca el id de bloque por su hora de inicio
def find_by_hora_inicio(self, hora): bloque = Bloque.query.filter_by(hora_inicio=hora).first() return bloque
[ "def siguiente(self,id):\n consulta = \"select * from socios m \" \\\n \"where m.idsocio = (select min(idsocio) from socios s \" \\\n \"where s.idsocio > %s);\"\n try:\n datos = AccesoDatos()\n cur = datos.conectar()\n cur.execute(co...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
devuelve los bloques ocupados en un centro en una fecha
def bloques_ocupados(self, centro_id, fecha): return ( db.session.query(Bloque) .join(Bloque.turnos) .filter( and_( and_(Turnos.dia == fecha, Turnos.centro_id == centro_id), and_(Turnos.turno_id == Bloque.id, Turnos.esta...
[ "def pedidosPorPeriodo(self,fechaIni,fechaFin):", "def rangoFechas():\n anio = int(strftime(\"%Y\", gmtime()))\n mes = int(strftime(\"%m\", gmtime()))\n l = []\n for x in [0]:\n \n diff = mes - x\n if diff <= 0:\n l.append([anio - 1, 12+ diff])\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
devuelve todos los centros
def all(self): centros = Centro.query.all() return centros
[ "def melhores_alunos_de_todos():\n mensagem = '========= MELHORES ALUNOS ========='\n print(f'\\n\\033[1;31m{mensagem}\\033[m')\n for nome_aluno in melhores_alunos:\n print(f\"\\033[0;34m{nome_aluno.center(len(mensagem))}\\033[m\")\n print(f'\\033[1;31m{\"=\" * len(mensagem)}\\033[m')", "def ce...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
devuelve todos los centros aprobados y paginado
def aprobados_paginado(self, page, per_page): centros = Centro.query.filter( and_(Centro.estado == "Aceptado", Centro.activo == True) ).paginate(page=page, per_page=per_page, error_out=False) return centros
[ "def listaProyectos_a_iniciar(self,page=1):\n try:\n proy = DBSession.query(Proyecto).filter_by(iniciado=False).order_by(Proyecto.id_proyecto)\n usuario = DBSession.query(Usuario).filter_by(nombre_usuario=request.identity['repoze.who.userid']).first()\n proyectos=[]\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
valida que el centro no exista antes de agregarlo a la bd
def validate_centro_creation(self, nombre, direccion, municipio): centro = Centro.query.filter( and_( and_(Centro.municipio == municipio, Centro.direccion == direccion), Centro.nombre == nombre, ) ).first() return centro
[ "def check_db_entry(self):\n raise NotImplementedError", "def testCampoNombreLineaBase(self):\n try:\n self.linea_base.nombre = None\n self.DBSession.flush()\n except IntegrityError:\n pass\n else:\n self.fail(\"Se esperaba un IntegrityError! Ver...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
modifica el centro a publicado o despublicado boolean
def update_publicado(self, centro_id, publicado): centro = Centro().find_by_id(centro_id) if publicado == "True": centro.publicado = True else: centro.publicado = False db.session.commit()
[ "def IsPublic(self) -> bool:", "def IsNotPublic(self) -> bool:", "def find_center(self):\n return False", "def idudeddvivacanampragrhyam(self):\n self.Pragrhya = False\n # PMS: 1.1.11. IdUdeddvivacanam pragfhyam\n if self.External and self.Pada1 in pragrhya_set:\n self.Pragrhya = True", "def is_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
elimina un centro pasandolo a activo=False
def eliminar(self, id): turno = Turnos().turno_centro(id) for x in turno: x.estado = "CANCELADO" centro = Centro().find_by_id(id) centro.activo = False db.session.commit() return centro
[ "def eliminar_pieza(self, cantidad_a_eliminar):\n pass", "def suppressionfich(self):\n\n os.remove(self.chfich)", "def removerRoupa(self):\n\n id_roupa = super().validarInteiro(self=Cadastro, mensagem=\"Digite o id da roupa a ser pesquisada: \")\n \n super().executarQuery(self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
takes a queryset of regpersons and a search string returns a filtered queryset with filters acted upon core_ids
def search_core_ids(regpersons_queryset, search_string, as_of_date=None): core_id_fields = ['national_id', 'birth_reg_id', 'workforce_id', 'beneficiary_id'] search_strings = tokenize_search_string(search_string) q_filter = Q() for search_string in search_strings: fo...
[ "def filter_persons(query, *, queryset=None):\n if queryset is None:\n queryset = models.Person.objects.all()\n\n if query:\n for token in query.strip().split(' '):\n queryset = queryset.filter(\n Q(last_name__icontains=token)\n | Q(first_name__icontains=...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a queryset and a list of field names that the search string can act on.
def direct_field_search(queryset, field_names, search_string, as_of_date=None): # Split the string in case of first name, surname e.t.c search_strings = tokenize_search_string(search_string) q_filter = Q() for search_string in search_strings: for field in field_names: q_filter ...
[ "def _filter_by_multiple_fields(self, queryset, fields, value):\n\n query = Q()\n for field in fields:\n query |= Q(**{field + \"__icontains\": value})\n\n return queryset.filter(query).distinct()", "def filter_in_string(queryset: QuerySet, field_name: str, values: list):\n if n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Render the main page with a list of experiment runs.
def show_runs(): # return render_template("runs.html", runs=data.runs(), type=type) return render_template("runs.html", runs=[], type=type)
[ "def experiment():\n return render_template('experiment.html', array = array)", "def index():\r\n redis = _get_redis_connection()\r\n return render_template('split/index.html',\r\n experiments=Experiment.all(redis)\r\n )", "def render_index(request):\n all_setups = logic.get_all_se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Launch TensorBoard for a given run ID and log ID of that run.
def run_tensorboard(run_id, tflog_id): data = current_app.config["data"] # optimisticaly suppose the run exists... run = data.get_run(run_id) base_dir = Path(run["experiment"]["base_dir"]) log_dir = Path(run["info"]["tensorflow"]["logdirs"][tflog_id]) # TODO ugly!!! if log_dir.is_absolute():...
[ "def create(\n cls,\n tensorboard_run_id: str,\n tensorboard_experiment_name: str,\n tensorboard_id: Optional[str] = None,\n display_name: Optional[str] = None,\n description: Optional[str] = None,\n labels: Optional[Dict[str, str]] = None,\n project: Optional...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Stop all TensorBoard instances launched by Sacredboard.
def close_tensorboards(): stop_all_tensorboards() return "Stopping tensorboard"
[ "def _tensorboard_kill(self):\n print('Closing current session of tensorboard.')\n if sys.platform == 'win32':\n os.system(\"taskkill /f /im tensorboard.exe\")\n elif sys.platform == 'linux':\n os.system('pkill tensorboard')\n else:\n print('No running i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a random first name.
def gen_first_name(ucase=2, lcase=2, gender=False): gen_name = {} if not gender: if random.randrange(1,100) > GENDER_BIAS: gender = 'f' else: gender = 'm' _name = None _male_name_seed = random.randrange(1, 90040) _female_name_seed = random.randrange(1500...
[ "def get_random_name():\n first_name = get_rnd('first_name')\n last_name = get_rnd('last_name')\n username = first_name[0:2] + last_name[0:6]\n return (\"%s\" % username.lower(), \"%s %s\" % (first_name, last_name))", "def create_random_surname(self):\n surname = ''\n for _ in range(self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Internal function to randomly generate a gender.
def gen_random_gender(bias=GENDER_BIAS): _random = random.randrange(0, 99) if _random <= bias: return 'f' else: return 'm'
[ "def gender(self, auto=True):\n if auto:\n if (self.first_name.endswith('a')\n or self.first_name.endswith('ine')\n or self.first_name.endswith('ie')):\n return 'Female'\n return 'Male'\n return random.choice(['Male', 'Female'])", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates a random email address based on the first and last names.
def gen_personal_email(first_name, last_name): domains = ['gmail.com', 'yahoo.com', 'hotmail.com', 'icloud.com', 'aol.com', 'outlook.com'] domain_seed = random.randrange(0, len(domains)) first_seed = random.randrange(0, 2) account = '' if first_seed == 0: account = '{0}.{1}@...
[ "def create_random_email(self):\n email = ''\n for _ in range(self.NAME_LENGTH):\n email += choice(ascii_letters)\n email += '@example.local'\n return email", "def generate_email(self, user_name):\n\t\tfirst_name, last_name = user_name.split()\n\t\treturn first_name.lower() ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates a random address for the specified state.
def gen_address(state=None): if state: if state in STATE_ADDRESS_LIST: address_list_length = len(STATE_ADDRESS_LIST[state]) state_seed = random.randrange(0, address_list_length - 1) # print(state_addresses[state]) return STATE_ADDRESS_LIST[state][state_seed] ...
[ "def random_state(self, state):\n pass", "def generate_addresses(self, session):\n # fill a few foreign key dependencies\n session.add(context.State(\n short_name='WA',\n long_name='Washington'\n ))\n\n addresses = [\n context.Address(\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a random employer for the specified state.
def gen_employer(state): # print(config.pickle_path) pkf = open(config.pickle_path +'employers.pkl', "rb") employers = pickle.load(pkf) pkf.close() sublist = [employer for employer in employers if employer['state'] == state] seed = random.randrange(0, len(sublist)) return sublist[seed]
[ "def gen_address(state=None):\n if state:\n if state in STATE_ADDRESS_LIST:\n address_list_length = len(STATE_ADDRESS_LIST[state])\n state_seed = random.randrange(0, address_list_length - 1)\n # print(state_addresses[state])\n return STATE_ADDRESS_LIST[state][st...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates a business email address based on the persons name and employer.
def gen_business_email(first, last, company_name): company_fixed = company_name.replace('&', ' ').replace(' ', ' ') company_parts = company_fixed.split() if len(company_parts) > 3: company_domain = company_parts[0] + company_parts[1] #+ y[2] elif len(company_parts) > 1: company_domain =...
[ "def generate_email(self, user_name):\n\t\tfirst_name, last_name = user_name.split()\n\t\treturn first_name.lower() +last_name.lower() + \"@dummy.com\"", "def gen_personal_email(first_name, last_name):\n domains = ['gmail.com', 'yahoo.com', 'hotmail.com',\n 'icloud.com', 'aol.com', 'outlook.com']...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates a set of dates. Passing birth_year ensures dates are chronological.
def gen_dates(birth_year=None): birthdate = None if birth_year: byear = random.randrange(birth_year - 5, birth_year + 5) else: byear = random.randrange(1944, 1992) birthdate = datetime.date(byear, random.randrange(1, 12), random.randrange(1, 28)) wyear = random.randrange(byear +...
[ "def generate_selected_dates(year_from=2000, year_to=2020, doy_start=1, doy_end=-1):\n import calendar, time\n dates = []\n for year in range(year_from, year_to+1):\n if doy_end == -1:\n if calendar.isleap(year):\n end_day = 367\n else:\n end_day =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates a random set of net worth, income and liquid asset data.
def gen_financials(): net_worth = random.randrange(5, 33) * 10000 liquid_assets = net_worth / random.randrange(1, 10) annual_income = random.randrange(7, 42) * 5000 financials = {'net_worth' : net_worth, 'liquid_assets' : liquid_assets, 'annual_income' : annual_income} return fin...
[ "def generate(cls):\n account_id = random.randint(0, 10)\n amount = random.randint(0, 20000)\n auction_id = random.randint(0, 20)\n time_unit = random.randint(0, 100)\n return cls(account_id=account_id, amount=amount, auction_id=auction_id, time_unit=time_unit)", "def generate_r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a random phone number.
def gen_phone_number(): area_code = random.randrange(100, 799) phone_1 = random.randrange(100, 999) phone_2 = random.randrange(1000, 9999) return str(area_code) + str(phone_1) + str(phone_2)
[ "def random_phone_generator():\n first = str(randint(100, 999))\n second = str(randint(1, 888)).zfill(3)\n last = (str(randint(1, 9998)).zfill(4))\n while last in ['1111', '2222', '3333', '4444', '5555', '6666', '7777', '8888']:\n last = (str(randint(1, 9998)).zfill(4))\n \n return '{}-{}-{...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a random bank account number between 9 and 14 characters in length. Some values have an appeneded text string.
def gen_bank_account(numeric_only = False): num_len = random.randrange(7, 12) upper_range = int(math.pow(10, num_len)-1) account_number = random.randrange(1, upper_range) if numeric_only: first_letter_seed = 22 #the percentage of account numbers with 1-2 initial letters. account_number_...
[ "def generation_account_number():\n return random.randrange(1111111111, 9999999999)", "def paymentcard_digit_gen():\n return uuid.uuid4().hex[:10]", "def card_digit_gen ():\n return uuid.uuid4().hex[:8]", "def gen_credit_card_number():\n return random.choice(CC_TYPES_ACTIVE)()", "def randstring(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a random 16 digit numeric value, common to Visa style credit card numbers.
def gen_credit_card_number(): return random.choice(CC_TYPES_ACTIVE)()
[ "def rand16(self):\n \n data=self.EZSPtrans([0x49]);\n if data==None:\n print \"Insufficient random data.\";\n return 0;\n return ord(data[6])+(ord(data[7])<<8);", "def card_digit_gen ():\n return uuid.uuid4().hex[:8]", "def paymentcard_digit_gen():\n retu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
iterative_levenshtein(s, t) > ldist ldist is the Levenshtein distance between the strings s and t. For all i and j, dist[i,j] will contain the Levenshtein distance between the first i characters of s and the first j characters of t
def iterative_levenshtein(s, t): rows = len(s)+1 cols = len(t)+1 dist = [[0 for x in range(cols)] for x in range(rows)] # source prefixes can be transformed into empty strings # by deletions: for i in range(1, rows): dist[i][0] = i # target prefixes can be created from an empty source string # by inserting t...
[ "def levenshtein_distance_using_lexical_tree(lexical_tree, input_string, strategy=0, case_sensitive=0):", "def levenshtein_distance(str_1, str_2):\n return textdistance.levenshtein.normalized_similarity(str_1, str_2)", "def rel_levenshtein(s1, s2):\n maxlen = max(len(s1), len(s2))\n if maxlen > 0:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test fiducial averaging (preload_all = False).
def test_fiducials_10(): peaks = {"x" : numpy.array([1.0, 2.0, 3.0]), "y" : numpy.array([1.0, 1.0, 1.0])} filename = "test_fiducials.hdf5" h5_name = storm_analysis.getPathOutputTest(filename) storm_analysis.removeFile(h5_name) # Write data. with saH5Py.SAH5Py(h5_name, is_existing ...
[ "def test(model, loaders, avg=True, device='cpu', loss_func=None):\n model.eval()\n\n acc_arr = []\n loss_arr = []\n\n for loader in loaders:\n loss, acc = test_dataset(model, loader, device, loss_func)\n acc_arr.append(acc)\n loss_arr.append(loss.item())\n\n model.train()\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Strips invalid characters from a filename and ensures that the file_length is less than `max_bytes` bytes.
def strip_invalid_filename_characters(filename: str, max_bytes: int = 200) -> str: filename = "".join([char for char in filename if char.isalnum() or char in "._- "]) filename_len = len(filename.encode()) if filename_len > max_bytes: while filename_len > max_bytes: if len(filename) == 0:...
[ "def make_filename_safe(filename):\n allowed_length = 255 # windows doesn't support more than 255 character filenames\n allowed_chars = string.ascii_letters + string.digits + \"~ -_.()\"\n safe_filename = ''.join(c for c in filename if c in allowed_chars)\n return safe_filename[:allowed_length]", "def...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Runs async functions in sync scopes. Can be used in any scope.
def synchronize_async(func: Callable, *args, **kwargs) -> Any: return fsspec.asyn.sync(fsspec.asyn.get_loop(), func, *args, **kwargs) # type: ignore
[ "def async_test(func):\r\n\r\n def wrapper(*args, **kwargs):\r\n future = func(*args, **kwargs)\r\n asyncio.run(future)\r\n return wrapper", "async def run_sync(func: Callable[..., T], *args: Any) -> T:\n loop = asyncio.get_event_loop()\n return await loop.run_in_executor(None, func, *ar...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert the json schema into a python type hint
def json_schema_to_python_type(schema: Any) -> str: type_ = get_type(schema) if type_ == {}: if "json" in schema["description"]: return "Dict[Any, Any]" else: return "Any" elif type_ == "null": return "None" elif type_ == "integer": return "int" ...
[ "def _simple_to_json_schema(_schema):\n if _schema is str:\n return {'type': 'string'}\n elif _schema is int:\n return {'type': 'integer'}\n elif _schema is float:\n return {'type': 'number'}\n elif _schema is bool:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse command line arguments to dictionary. The numeric values are auto convert to the according types.
def parse_args(sys_argv: List[str]) -> Dict[str, Union[str, int, float, bool]]: kwargs = {} # type: Dict[str, Union[str, int, float, bool]] if len(sys_argv) > 1: for arg in sys_argv[1:]: k = arg.split("=")[0][2:] v = arg.split("=")[1] # type: Union[str, int, float, bool] ...
[ "def _args_to_dict() -> Dict[str, str]:\r\n arguments = {}\r\n for argument in sys.argv[1:]:\r\n if '=' in argument:\r\n separated = argument.find('=')\r\n key, value = argument[:separated], argument[separated + 1:]\r\n arguments[key] = value\r\n return arguments", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a subscription to NETCONF server callback userdefined callback function to be invoked when a notfiication arrives errback userdefined function to be invoked when an error occurs manager Manager object returned when user connects to NETCONF server, used to store connection info so ncclient can reconnect using tha...
def request(self, callback, errback, manager=None, retries=20, delay=1, stream=None, filter=None, start_time=None, stop_time=None): if callback is None: raise ValueError("Missing a callback function") if errback is None: raise ValueError("Missing a errback function") ...
[ "def callback(self, root, raw):\n tag, attrs = root\n if tag != qualify(\"notification\", NETCONF_NOTIFICATION_NS):\n self.user_errback(NotificationError(\"Received a message not of type notification\"))\n return\n notification = Notification(raw)\n self.reconnect_t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Called when a new RFC 5277 notification is received. The root argument allows the callback to determine whether the message is a notification. Here, root is a tuple of (tag, attributes) where tag is the qualified name of the root element and attributes is a dictionary of its attributes (also qualified names). raw will ...
def callback(self, root, raw): tag, attrs = root if tag != qualify("notification", NETCONF_NOTIFICATION_NS): self.user_errback(NotificationError("Received a message not of type notification")) return notification = Notification(raw) self.reconnect_time = notificat...
[ "def data_received_callback(self, raw: bytes):\n if raw:\n try:\n knxipframe = KNXIPFrame(self.xknx)\n knxipframe.from_knx(raw)\n knx_logger.debug(\"Received: %s\", knxipframe)\n self.handle_knxipframe(knxipframe)\n except Coul...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns all of the Wishlists
def list_wishlists(): app.logger.info("Request for wishlists") wishlists = [] category = request.args.get("category") name = request.args.get("name") if category: wishlists = WishList.find_by_category(category) elif name: wishlists = WishList.find_by_name(name) else: ...
[ "def retrieve_all_wishlists(self, include_deleted=False):\n\n all_wishlists = []\n\n if include_deleted:\n # use a list comprehension to easily retrieve the dictionaries and merge them together into a JSON string\n all_wishlists = [{key: contents} for key, contents in self._wishl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a wishlist by id
def get_wishlist(wishlist_id): app.logger.info('Request for an wishlist') wl = WishList() found_wl = wl.find(wishlist_id) found_wl_serialized = found_wl.serialize() found_wl_id = str(found_wl_serialized['id']) app.logger.info(f'Returning item: {found_wl_id}') response_body = { 'data'...
[ "def retrieve_wishlist(self, wishlist_id):\n\n if self._verify_wishlist_exists(wishlist_id):\n return json.dumps(self._wishlist_resources[wishlist_id], indent=4)\n else:\n raise WishlistNotFoundException", "def show_wishlist(wishlist_id: UUID):\n wishlist = list_wishlist_ser...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete a Wishlist This endpoint will delete a wishlist based the id specified in the path
def delete_wishlists(wishlist_id): app.logger.info(f'Request to delete wishlist with id: {wishlist_id}') wishlist = WishList.find(wishlist_id) if wishlist: wishlist.delete() app.logger.info(f'Wishlist with ID [{wishlist_id}] delete complete.') return make_response("ITS GONE!", status.HTTP_2...
[ "def delete_wishlist(wishlist_id: UUID):\n delete_wishlist_service.run(wishlist_id)\n return", "def delete_wishlist(self, wishlist_id):\n\n try:\n # even if a delete wishlist call was already made, this will just set the value to True again\n self._wishlist_resources[wishlist_id...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update a Wishlist This endpoint will update a Wishlist based the body that is posted
def update_wishlists(wishlist_id): app.logger.info("Request to update wishlist with id: %s", wishlist_id) check_content_type("application/json") wishlist = WishList.find(wishlist_id) if not wishlist: raise NotFound("Wishlist with id '{}' was not found.".format(wishlist_id)) wishlist.deseria...
[ "def update_wishlist(wishlist_id: UUID, wishlist: WishlistUpdateRequestBody):\n updated_wishlist = update_wishlist_service.run(\n wishlist_id=wishlist_id,\n title=wishlist.title,\n description=wishlist.description,\n )\n return model_to_dict(updated_wishlist)", "def update_watchlist(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns all of the items in a wishlist
def get_items(wishlist_id): app.logger.info('Request for items in wishlist') #item = Item() items = [] name = request.args.get("name") price = request.args.get("price") if name: items = Item.find_by_name(name) elif price: items = Item.find_by_price(price) else: ...
[ "def load_wishlist():\n #records = wishlist.GetRecords(1,300)\n\n row_query = ListQuery()\n row_query.start_index = str(1)\n rows_feed = client.GetListFeed(key=app.config['SPREADSHEET_KEY'], visibility='public', projection='full', wksht_id=wishlist.id.text.split('/')[-1])\n\n records = []\n\n for ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns one item in a wishlist
def get_item(wishlist_id, item_id): app.logger.info('Request for an item in wishlist') item = Item() found_item = item.find(item_id) found_item_serialized = found_item.serialize() found_item_id = str(found_item_serialized['id']) app.logger.info(f'Returning item: {found_item_id}') response_bo...
[ "def get_wishlist(wishlist_id):\n app.logger.info('Request for an wishlist')\n wl = WishList()\n found_wl = wl.find(wishlist_id)\n found_wl_serialized = found_wl.serialize()\n found_wl_id = str(found_wl_serialized['id'])\n app.logger.info(f'Returning item: {found_wl_id}')\n response_body = {\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates an item in a wishlist
def create_item(wishlist_id): app.logger.info("Request to create an item in a wishlist") check_content_type("application/json") item = Item() item.deserialize(request.get_json()) item.create() message = item.serialize() location_url = url_for("get_item", wishlist_id=item.wishlist_id, item_i...
[ "def create_wishlist(user_id: UUID, wishlist: WishlistRequestBody):\n new_wishlist = create_wishlist_service.run(\n user_id=user_id,\n title=wishlist.title,\n description=wishlist.description,\n )\n return model_to_dict(new_wishlist)", "def create_wishlist(self, name, user_id):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Competetion between the given two competitors. Their ranks are updated as a result of the competition. If a winner_id is not specified or an id that does not belong to one of the competitors is specified, the winner will be determined semirandomly, with a winner chosen at random, but weighted towards the competitor wit...
def compete(comp1, comp2, winner_id=None, decr_uncertainty=0.002, min_uncertainty=0.05): # Check that both competitors are valid. try: isValidCompetitor(comp1) isValidCompetitor(comp2) except: raise TypeError("Invalid competitor") # If a winner_id isn't specified, one is semi-randomly determine...
[ "def updateRanks(winner, loser, decr_uncertainty=0.002, min_uncertainty=0.05):\n # Check that both competitors are valid.\n try:\n isValidCompetitor(winner)\n isValidCompetitor(loser)\n except:\n raise TypeError(\"Invalid competitor\")\n\n # Determine the favored competitor.\n favored = None\n favore...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks whether the given object is a valid competitor that includes an id and a rank. If the object is not a valid an error will be thrown.
def isValidCompetitor(comp): if not isinstance(comp, dict): raise TypeError("Invalid object; competitor must be a dict") if not "id" in comp: raise KeyError("Invalid competitor; competitor must include an id") if not "rank" in comp: raise KeyError("Invalid competitor; competitor must include a rank") ...
[ "def valid_object(obj):\n return cmds.objExists(obj)", "def validate_object_id(object_id):\n\n rex = re.compile('^[A-Za-z0-9-]+$')\n if is_it_a_string(object_id):\n if not rex.match(object_id):\n msg = \"Object ID failed validation: {}\".format(object_id)\n raise CloudPassage...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates the ranks of the winner and loser. The amount that each's rank is changed is based on which competitor was favored. An optional uncertainty value is also checked to determine the amount that each's rank is altered. If an uncertainty value is not specified, a value of 10% (0.10) will be used. Uncertainty values ...
def updateRanks(winner, loser, decr_uncertainty=0.002, min_uncertainty=0.05): # Check that both competitors are valid. try: isValidCompetitor(winner) isValidCompetitor(loser) except: raise TypeError("Invalid competitor") # Determine the favored competitor. favored = None favored_rank = 0 unfa...
[ "def update_ranking(self):\n self.ranking = self.calculate_ranking()\n self.last_ranked_date = datetime.datetime.now()\n # TODO: should this save?\n self.save()", "def rankChange(rank1,rank2,PlayerOutcome):\n deltaWin = -48.0*(math.atan((rank1 - rank2)*1.0/1800.0))/math.pi + 24\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests the comepete() function and checks that the ranks are properly updated for each competitor. This is a manual test, meaning that with the exception of errors being raised, the test won't fail. Results will be output to the terminal for analysis.
def rankTest(): # Set default rank and uncertainty. base = 1000 rank = 500 uncertainty = 0.15 # Test Competitors. competitors = list() for i in range(1, 9): comp = {"id": "Player_{0}".format(i), "rank": (base + (rank * i)), "uncertainty": uncertainty} competitors.append(comp) print "\n\...
[ "def testRanks(self): # unit test for ranks 1-13\r\n \r\n for i in range(1,14):\r\n myCard = Card(i,'c') # create i of clubs\r\n self.assertEqual(myCard.rank(),i) # verifies that the card's rank is i\r", "def test_computer_loop(self):\n\n s = 0\n for i in range(100):\n gam...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Count newsletters for this group
def get_count_newsletters(self, obj): return obj.newsletter_set.count()
[ "def number_of_articles():", "def test_number_emails_per_all_users(self):\n jotd.settings.DAYCOUNT = 5\n jotd.store_messages()\n \n curs.execute(\"SELECT COUNT(*) FROM jotd_emails\")\n msg_count = curs.fetchone()[0]\n self.assertEqual((jotd.settings.DAYCOUNT * len(self.re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a file to the repository
def add_file(self, file_path): self._repo.index.add([str(file_path)])
[ "def test_add_file(self):\n filename = \"quux\"\n file_path = os.path.join(self.repo, filename)\n with salt.utils.files.fopen(file_path, \"w\") as fp_:\n fp_.write(\n salt.utils.stringutils.to_str(\n \"This is a test file named {}.\\n\".format(filena...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Are there changes that need to be committed?
def has_changes(self): return self._repo.is_dirty()
[ "def hasChanges(self):\n return self.changes", "def has_pending_changes(self):\n status = self._execute(['git', 'status', '--porcelain',\n '--untracked-files=no',\n '--ignore-submodules=dirty'])\n return status != ''", "def track...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Roll back repository to previous commit, removing untracked files
def rollback(self): self._repo.head.reset('HEAD~', working_tree=True) for f in self.untracked_files: os.remove(os.path.join(self._root, f))
[ "def rollback():\n with project():\n with update_changed_requirements():\n update = \"git checkout\" if env.git else \"hg up -C\"\n run(\"%s `cat last.commit`\" % update)\n with cd(join(static(), \"..\")):\n run(\"tar -xf %s\" % join(env.proj_path, \"last.tar\"))\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get commit object relating to version
def get_commit(self, version): if version == 'latest': return self.latest_commit else: return Commit(self, self._repo.commit(version))
[ "def get_commit(self, seq_no):\n\n return self.commits[seq_no]", "def _version_by_commit_checksum(self, commit: str) -> Version:\n checksums = [version.checksum for version in self.versions]\n checksums.append(commit)\n checksums = sorted(checksums)\n candidate_checksum = checks...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Path of COMBINE manifest file
def manifest_path(self): return self.full_path(MANIFEST_FILENAME)
[ "def manifest_path(self):\n return os.path.join(self._dirname, 'build', 'manifest.txt')", "def get_qc_manifest_path():\n manifest_path = \"//allen/programs/braintv/workgroups/nc-ophys/visual_behavior/2020_cache/production_cache/manifest.json\"\n return manifest_path", "def conanfile(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate COMBINE manifest file for repository index Stages the manifest file for commit. Will overwrite existing manifest.
def generate_manifest(self, master_filename=None): writer = ManifestWriter() for entry in sorted(e for (e, _) in self._repo.index.entries): writer.add_file(entry, is_master=entry == master_filename) writer.write(self.manifest_path) self.add_file(self.manifest_path)
[ "def write_manifest (self):\r\n self.execute(file_util.write_file,\r\n (self.manifest, self.filelist.files),\r\n \"writing manifest file '%s'\" % self.manifest)", "def create_manifest(output_dir, pipeline_name, paths):\n if not os.path.isdir(output_dir):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
All filenames in this commit
def filenames(self): return {blob.name for blob in self._commit.tree.blobs} | self.ephemeral_file_names
[ "def _get_list_of_committed_files():\n files = []\n # pylint: disable=E1103\n diff_index_cmd = 'git diff-index --cached %s' % _current_commit()\n output = subprocess.check_output(\n diff_index_cmd.split()\n )\n for result in output.split('\\n'):\n if result != '':\n result...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
All files in this commit
def files(self): return chain(self._commit.tree.blobs, self.ephemeral_files)
[ "def _get_list_of_committed_files():\n files = []\n # pylint: disable=E1103\n diff_index_cmd = 'git diff-index --cached %s' % _current_commit()\n output = subprocess.check_output(\n diff_index_cmd.split()\n )\n for result in output.split('\\n'):\n if result != '':\n result...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SHA of the commit
def sha(self): return self._commit.hexsha
[ "def get_commit_hash():\n return git.Repo().head.object.hexsha", "def get_git_commit_sha():\n\n return os.getenv(\"GIT_COMMIT\")", "def get_head_sha(self, path): # type: (str) -> str\n command = [\n 'git',\n 'rev-parse',\n 'HEAD',\n ]\n abspath = os.pat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Author of the commit
def author(self): return self._commit.author
[ "def get_author(self, commit_hash):\n\n command = []\n command.append(COMMAND_GIT)\n command.append(OPTION_SHOW)\n command.append(\"-s\")\n command.append(\"--format=%cE\")\n command.append(commit_hash)\n\n std_out, std_err = self._run(command)\n\n author_emai...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Datetime representation of commit timestamp
def timestamp(self): return datetime.fromtimestamp(self._commit.committed_date).replace(tzinfo=utc)
[ "def timestamp_to_str(commit_timestamp) -> str:\n date = datetime.fromtimestamp(float(str(commit_timestamp)[:-3]))\n d = date.strftime(\"%A, %B %d, %Y %H:%M\")\n return str(commit_timestamp) + \" (\" + d + \")\"", "def get_commit_date(commit):\n return commit['commit']['author']['date']", "def get_r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get name of master file on this commit, as defined by COMBINE manifest
def master_filename(self): reader = ManifestReader() for file_ in self.files: if file_.name == MANIFEST_FILENAME: reader.read(file_.data_stream) return reader.master_filename
[ "def cctFileName(self):\n p = os.path.basename(self.cctFilePath())\n return p", "def name(self):\n # `git rev-parse --show-toplevel` prints the path to the top-level\n # directory of the repository.\n return os.path.basename(\n self.run_git_cmd(['rev-parse', '--show-t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a git note to this commit
def add_note(self, note): cmd = self._repo._repo.git cmd.notes('--ref', self.NOTE_REF, 'add', '-f', '-m', note, self.sha)
[ "def write_note(data, commit='HEAD'):\n command = 'git notes --ref=%s add ' % NOTES_REF\n for k, v in data.iteritems():\n command = '%s -m \"%s: %s\"' % (command, k, v)\n run_command(command)", "def add_log_note(self, note):", "def add_note(self):\n note_id = __notes__.new_note()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the git note of this commit
def get_note(self): cmd = self._repo._repo.git try: return cmd.notes('--ref', self.NOTE_REF, 'show', self.sha) except GitCommandError: return None
[ "def read_note(commit='HEAD'):\n try:\n output, _ = run_command('git notes --ref=%s show' % NOTES_REF,\n trap_stdout=True, trap_stderr=True,\n output_on_error=False)\n except CalledProcessError:\n return {}\n\n data = {}\n for l...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add an ephemeral file as a git note.
def add_ephemeral_file(self, name, content=None, path=None): if name in self.filenames: raise ValueError("File name '{}' has already been used".format(name)) cmd = self._repo._repo.git with TemporaryDirectory() as tmpdir: if path is None: # Write content t...
[ "def add_note(self, note):\n cmd = self._repo._repo.git\n cmd.notes('--ref', self.NOTE_REF, 'add', '-f', '-m', note, self.sha)", "def write_note(data, commit='HEAD'):\n command = 'git notes --ref=%s add ' % NOTES_REF\n for k, v in data.iteritems():\n command = '%s -m \"%s: %s\"' % (comm...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the contents of an ephemeral file as a Blob object.
def get_ephemeral_file(self, name): cmd = self._repo._repo.git try: blob_hexsha = cmd.notes('--ref', self.FILE_REF_BASE + name, 'list', self.sha) binsha = binascii.a2b_hex(blob_hexsha) return Blob(self._repo._repo, binsha, path=name) except GitCommandError: ...
[ "def _get_blob(self):\n return self.bucket.get_blob(self.flowerpot_path)", "def get_blob(self):\n return Blob.Blob(self._internal.get_blob())", "def fetch_blob(server, uuid, instance, reference, as_json=False, *, session=None):\n r = session.get(f'{server}/api/node/{uuid}/{instance}/blobstore/{...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the names of any ephemeral files associated with this commit.
def list_ephemeral_files(self): cmd = self._repo._repo.git try: names_note = cmd.notes('--ref', self.FILE_LIST_REF, 'show', self.sha) return {n for n in names_note.split('\n') if n} except GitCommandError: return set()
[ "def filenames(self):\n return {blob.name for blob in self._commit.tree.blobs} | self.ephemeral_file_names", "def ephemeral_files(self):\n for name in self.ephemeral_file_names:\n yield self.get_ephemeral_file(name)", "def files(self):\n return chain(self._commit.tree.blobs, self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
An iterable of `git.Blob` objects representing ephemeral files.
def ephemeral_files(self): for name in self.ephemeral_file_names: yield self.get_ephemeral_file(name)
[ "def files(self):\n return chain(self._commit.tree.blobs, self.ephemeral_files)", "def list_ephemeral_files(self):\n cmd = self._repo._repo.git\n try:\n names_note = cmd.notes('--ref', self.FILE_LIST_REF, 'show', self.sha)\n return {n for n in names_note.split('\\n') if ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Iterable over immediate parents of this commit.
def parents(self): return (Commit(self._repo, parent) for parent in self._commit.parents)
[ "def ancestors(self):\n return (\n Commit(self._repo, parent)\n for parent in self._commit.iter_parents()\n )", "def get_parents(self):\n return []", "def parents(self, rev):\n self._scanparents(rev)\n return [r for _c, r in sorted(self._parents.get(rev, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Iterable over all ancestors of this commit.
def ancestors(self): return ( Commit(self._repo, parent) for parent in self._commit.iter_parents() )
[ "def ancestors(self):\n stack = deque([self])\n parent = self.parent\n while parent:\n stack.appendleft(parent)\n parent = parent.parent\n return list(stack)", "def ancestors(self):\r\n ancestor_list = [self,]\r\n if self.superordinate is not None:\r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Grab the JARM fingerprint from the local 'possible_jarms' store. This should be ran after setup.sh to output the possible configurations.
def get_jarm_from_local(tls_version, cipher): try: with open(config.paths['possible_jarms']) as _file: jarms = json.load(_file) for j in jarms: if any(c['tls_version'] == tls_version and c['cipher'] == cipher for c in jarms[j]['configs']): return j except:...
[ "def jar(self):\n\n\t\treturn self.tool_config.get('jar', default = 'minecraft_server.jar')", "def get_jamf_information():\n r = requests.get('{}/JSSResource/computers/serialnumber/{}'.format(API_URL, get_serial_number()),\n auth=(API_USER, API_PASS))\n xml_response = untangle.parse(r.te...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get all streamers from db
def get_all(): # Get all streamers from db streamers = Streamer.query.all() # Serialize the data for the response streamer_schema = StreamerSchema(many=True) data = streamer_schema.dump(streamers) return data
[ "def get_streams(self):\n return self.result_set", "def entries(self):\n return self._streams", "def data_streams(self):\n return self._GetDataStreams()", "def make_list():\n list_of_streamers = []\n for st in loop:\n st = Streamer(name=st['name'], real_name=['real_name'], uid=st...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get all streamers matching with username
def get_one(username): # Get matching streamers (if multiple platforms there could be more than one result) streamers = Streamer.query.filter(Streamer.username == username).all() # If no streamer found if len(streamers) == 0: # Try to fetch streamer from platform API verif_found = Fals...
[ "def search_stream(self, stream_name):\n rows = self.session.query(Stream.name).filter(Stream.name.ilike('%'+stream_name+'%')).all()\n if rows:\n return rows\n else:\n return []", "def get_viewers(streamer_obj) -> list:\r\n session = requests.Session()\r\n retry = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create streamer in database
def create_db(streamer): schema = StreamerSchema() new_streamer = schema.load(streamer, session=db.session) # Add the streamer to the database db.session.add(new_streamer) db.session.commit()
[ "def createDataStream(self):\n\n name = \"SL_MIXED_MUL_PY_12327\"\n self.datastream.set_name(name)\n\n timezone = \"GMT\"\n self.time.set_zone(timezone)\n\n timeIdentifier = \"time\"\n self.time.set_identifier(timeIdentifier)\n\n self.time.set_format(\"millis\")\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete streamers with matching username
def delete(username): # Find matching streamers existing_streamers = Streamer.query.filter(Streamer.username == username).all() if existing_streamers is not None: # Delete streamer for all platforms for streamer in existing_streamers: db.session.delete(streamer) db.sess...
[ "def remove(self):\n testing = Streamer(name=self)\n for test in loop:\n if test['name'] == testing.name:\n removed_streamer = {\n \"name\": testing.name,\n \"real_name\": testing.real_name,\n \"id\": testing.uid,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get LaTeX diffs of 'file' (actual copy with its youngest repository version)
def LaTeXDiff(target=None, source=None, env=None): print 'SOURCES :', source print 'TARGET :', target do_rev = env['DOREV'] if do_rev: # # The command below seems not to work with me :-( # svn diff -r [REV] $SOURCEFILE | patch -R -p0 -o $TARGET # What follows is mor...
[ "def git_diff(filepath, since):\n html_diff = None\n commits = git_commits(filepath, since)\n if commits:\n cmd = ('git', '--no-pager', 'diff', commits[-1]+'^', '--',\n filepath)\n stdout, stderr = execute(cmd)\n\n i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
We return a dataset or datapoint with all attributes that have the given role
def __getattr__( self, role: str ) -> Union["algoneer.dataset.Dataset", "algoneer.dataset.Attribute"]: relevant_columns: List[str] = [] for attribute in self._obj.attributes.values(): if role in attribute.roles: relevant_columns.append(attribute.column) re...
[ "def data(self, column, role):\n return self.columns[column](self._project, role)", "def filter_granted(self, queryset):\n return Dataset.filter_by_user(self.request.user)", "def data(self, column, role):\n return self.columns[column](self._user, role)", "def skillsRelevantForRole(self, r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Randomly sample datapoints without labels.
def sample_unlabeled_data(n_samples): gen = ToyDataGenerator() xs, _ = gen.generate_uniform_dataset(n_samples) return xs
[ "def get_random_datapoint(self):\n random_datapoint = None\n if self.missing_label_placeholder is not None:\n random_datapoint = np.random.choice(\n len(self.y_[self.labeled_indices_]))\n else:\n random_datapoint = np.random.randint(low=0, high=len(self.y_))...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }