query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Get back list of words of each message in the database twitch and table twitch stems them if any argument is passed
def get_sentences(*arg): sql_query =""" SELECT edited_message FROM twitch; """ messages = pd.read_sql_query(sql_query,con) messages['edited_message']=messages['edited_message'].apply(lambda x:re.split('[^a-z0-9]',x.lower())) if len(arg)!=0: messages['edited_message']=messages['edited_message'].apply(english_ste...
[ "async def words(self, ctx):\n wcheck = await self.bot.pool.fetch(\"SELECT * FROM cursewords WHERE guildid = $1\", ctx.guild.id)\n if wcheck == []:\n await ctx.send(\"There are no blocked words in this server.\")\n return\n\n wordsstring = []\n\n for words in wcheck...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Exactrs features as follows [...word clusters...,does it end in a question mark,caps ratio,symbol densinity, message length]
def extrace_advanced_features(word_vec,cluster_model,message,edited_message,*arg): number_of_clusters = len(cluster_model.cluster_centers_) vocab = word_vec.wv.vocab features = [0]*(number_of_clusters) words = re.split('[^a-z0-9]',edited_message.lower()) words = filter(lambda x: x != '', words) if len(arg)!=0: ...
[ "def msg_features( msg, spam_words, ham_words ):\n msg = msg.lower()\n sw = get_spam_words( msg, spam_words )\n \n base_dict = {\n \"length\" : get_length_bucket( len( msg ) ), \n \"contains_excl\" : ( \"!\" in msg ),\n \"many_excl\" : msg.count( \"!\" ) > 2,\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If the user sends a no intent, there are two possible meanings Either the user is saying no, the will not hit the button, or the user is saying no, I would not like to play another game despite that not being part of what alexa is asking for. We handle this by keeping track of the current state of the session using the...
def no(): if session.attributes['state'] != DILEMMA_STATE: # If we haven't just asked them a dilemma, then quit the session! return quit_session() else: # If we just asked them a dilemma, then handle that correctly. return answer_question(False)
[ "def fallback_intent(handler_input):\n # type: (HandlerInput) -> Response\n session_attr = handler_input.attributes_manager.session_attributes\n\n if (\"game_state\" in session_attr and\n session_attr[\"game_state\"]==\"STARTED\"):\n speech_text = (\n \"The {} skill can't help ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
When the user's session ends, send this response to the AVS
def session_ended(): return "", 200
[ "def response_done(self):\n self.session._res_done(self)", "def _end_sessioninfo(self):\n self.db[self.tag].endtime = self.msg.timestamp\n if self.sipmsg.method == \"CANCEL\":\n self.db[self.tag].status = \"CANCEL\"\n else:\n self.db[self.tag].status = \"ENDED\"",...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a question ID and the user's response, notify the website, and get the most updated stats about the question.
def get_response_stats_data(question_id, user_response): webpage = 'https://willyoupressthebutton.com/{0}/'.format(question_id) if user_response: webpage += 'yes' else: webpage += 'no' webpage_content = get_webpage(webpage) soup = bs(webpage_content, 'html.parser') main_contai...
[ "def update_question(questid, userid):\n\n quest = db(current.db.question.id == questid).select().first()\n\n answers_per_level = 3\n\n # first step is to select the related user and question records their should\n # only ever be one of each of these and we update as much as possible here \n # becaus...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Commandline tool for gitcheck.check_repos
def main(): print("#") print("# [\u2713] = Committed [\u2717] = Dirty [?] = Not a git repository") print("#") if len(sys.argv) > 1: for path in sys.argv[1:]: print("# Checking {}".format(path)) gitcheck.check_repos(path) else: print("# Checking {}".format(o...
[ "def git( args ):\n run( *args )", "def check_repo_closure():\n configs = glob.glob(\n os.path.join(os.environ.get('SUITE'), '*reposync*.repo')\n )\n if not configs:\n raise RuntimeError(\"Could not find reposync config file.\")\n for config in configs:\n command = reposync_con...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set all states back to zero using a given batch_size
def reset(self, batch_size): for layer in self.layers: layer.reset(batch_size=batch_size)
[ "def reset(self):\n self.losses = []\n self.batch_sizes = []", "def reset(self):\n self.batch = []\n self.batch_size_estimation = 0\n self.trust_batch_estimation = True", "def zero_state(self, batch_size, dtype):\n with ops.name_scope(type(self)....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Try to load commands from the given file. Returns (commands, max_x, max_y)
def load_gcode_commands(filename: str) -> [List[ESPPoint], float, float]: gcode = open(filename, "r") commands = [] # type: List[ESPPoint] last_move_point = ESPPoint() max_x = 0.0 max_y = 0.0 for file_line in gcode: line = file_line.rstrip().upper() if line.startswith('G1')...
[ "def load_game(filename):\n fin = open(filename)\n nextline = fin.readline()\n maxX, maxY = [int(token) for token in nextline.split()]\n gameArray = []\n for i in range(maxX):\n nextline = fin.readline()\n gameArray.append(nextline.split())\n fin.close()\n return gameArray", "de...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Replaces all characters after a certain count with XXXXX
def redact_after_count(text :str, count:int) ->str:
[ "def repeating_chars(text: str, *, chars: str, maxn: int = 1) -> str:\n return re.sub(r\"({}){{{},}}\".format(re.escape(chars), maxn + 1), chars * maxn, text)", "def replaceSharpWithPadding(string, index):\n if string.count(\"#\") == 0:\n string += \"#\"\n\n digit = str(index)\n while len(digit...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Replaces all characters after a certain symbol appears with XXXXX
def redact_after_symbol(text:str, symbol:str) ->str:
[ "def prepareEmojiStrReplacement(emojiStr):\n replacement = emojiStr.lower().replace(\" \", \"_\")\n return \" ***\" + replacement + \"*** \"", "def replace_second_occcurance(str1):\n char = str1[0] #get the char\n print(char)\n str1 = str1.replace(char, '$') # replace the both r string\n print(str1)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate the actual "color dictionary" data structure based on the specified parameters. Returns A defaultdict (from the collections library) for all the colors in the data file according to the specified parameters. The defaultdict was chosen (as opposed to a normal python dictionary) for easier syntax when dealing wi...
def gen_color_dict(self) -> collections.defaultdict: color_dict = collections.defaultdict(list) for line in self.data: left, right = line.split(" contain ") container = clean_text(left) for contents in right.split(","): if not self.include_counts: ...
[ "def color_dict(dates):\n f_date = '%Y %#m %#d' if sys.platform == 'win32' else '%Y %-m %-d'\n cycle = cycledate(dates, f_date)\n ovul = ovulation(dates, f_date)\n c_dict = {}\n if cycle:\n d = {-5: 'gold2', -4: 'orange', -3: 'dark orange',\n -2: 'DarkOrange3', -1: 'OrangeRed3', 0:...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Console script for crcnsget
def main(username,password,dataset): crcnsget.download(dataset,username,password)
[ "def test_pi18_fullcommand_MCHGCR(self):\n protocol = pi()\n result = protocol.get_full_command(\"MCHGCR\")\n expected = b'^P009MCHGCR\\r'\n # print(result)\n self.assertEqual(result, expected)", "def str_crc_example(self, crcdict, message=None):\n \n crclen = crcd...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return statements that will create a complete offerchain
def make_offer(cls, oc): return OfferChainStmt.get_create_stmts(oc)
[ "def get_create_stmts(cls, oc):\n uuid = OfferChainMetaDataStmt.uuid(meta=oc.get('metadata'), elig=oc.get('eligibility'), oc=oc)\n\n if not oc.get('id'):\n oc['id'] = binascii.crc32(uuid) & 0xffffffff\n cls.offerChainId = oc['id']\n\n stmts = []\n stmts.append('BEGIN')\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return statements that will create a complete offerchain. Iterate down through configuration to create lower level statements
def get_create_stmts(cls, oc): uuid = OfferChainMetaDataStmt.uuid(meta=oc.get('metadata'), elig=oc.get('eligibility'), oc=oc) if not oc.get('id'): oc['id'] = binascii.crc32(uuid) & 0xffffffff cls.offerChainId = oc['id'] stmts = [] stmts.append('BEGIN') stmts...
[ "def make_offer(cls, oc):\n return OfferChainStmt.get_create_stmts(oc)", "def get_create_stmts(cls, conf):\n stmts = []\n if conf is None:\n return stmts\n\n for c in conf:\n stmts.append(\"BEGIN\")\n stmts.append(\" SELECT 1 INTO TMP_VAR FROM ACTION W...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create stmts for the notifications. Ensure that the foreign key values exist as well. Create if need be.
def get_create_stmts(cls, conf): stmts = [] if conf is None: return stmts for c in conf: stmts.append("BEGIN") stmts.append(" SELECT 1 INTO TMP_VAR FROM ACTION WHERE NAME = '{NAME}';".format(NAME=c['action'])) stmts.append("EXCEPTION") ...
[ "def insert_notification_list_db(self, jsonData, recover_by, session):\n\n # NOTE: The notification item 'endTime' may have a NULL value.\n # reference : The Notification Spec for RecoveryController.\n # JSON decoder perform null -> None translation\n try:\n if not jsonD...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Flip [to_swap] case each time it appears in phrase. >>> flip_case('Aaaahhh', 'a') 'aAAAhhh' >>> flip_case('Aaaahhh', 'A') 'aAAAhhh' >>> flip_case('Aaaahhh', 'h') 'AaaaHHH'
def flip_case(phrase, to_swap): swap_test = {to_swap.lower(), to_swap.upper()} phrase_flip = [ letter.swapcase() if letter in swap_test else letter for letter in phrase] return "".join(phrase_flip)
[ "def truecase(word: str, case_counter: Dict[str, int]):\n lcount = case_counter.get(word.lower(), 0)\n ucount = case_counter.get(word.upper(), 0)\n tcount = case_counter.get(word.title(), 0)\n if lcount == 0 and ucount == 0 and tcount == 0:\n return word #: we don't have enough information to ch...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract location vertices with given color
def copy_color_area(self, arr, color): arr_color = np.where(np.all(arr == color, axis=-1)) location_color = list(zip(arr_color[0], arr_color[1])) return location_color
[ "def pick_points_on_shape(self):\r\n a = self.a \r\n N = 81 # number of vertices\r\n t = np.linspace(-4,4,N)\r\n verts = np.zeros((N,2))\r\n verts[:,0] = a*(np.abs(t))**3 - 1.0\r\n verts[:,1] = t\r\n return t, verts", "def get_vertex(self, id_num):", "def g...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given the X and Y coordinates of a point, returns the Z coordinate that is in the plane.
def getZ(self, x, y): if self.perpVector[Z] == 0: return self.point[Z] return self.point[Z] + self.perpVector[X]*(self.point[X]-x) + self.perpVector[Y]*(self.point[Y]-y)
[ "def get_y(plane, x, z):\n p_a, p_b, p_c, p_d = plane\n y = (-p_a*x - p_c*z - p_d)/p_b\n return y", "def pointAtZ(z, point, vec):\n return point + vec * ((z-point.getZ()) / vec.getZ())", "def project_onto_plane(plane, point):\n pt = np.array(point).reshape(3, 1)\n pt[2, 0] = 1\n new_z = pla...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
injectfind searches process memory for potentially injected code
def main(): process = flaredbg.get_process_obj() found = False for mbi in process.get_memory_map(): if mbi.is_executable() and mbi.is_private(): base_addr = mbi.BaseAddress size = mbi.RegionSize print '-' * 0x40 print "Path: %s Pid: ...
[ "def find_pattern_in_process_memory(pattern, pid, read_chunk=0xffff, start_addr=0, end_addr=0x7fffffff):\n found_addresses = []\n buf = ctypes.create_string_buffer(read_chunk)\n bytes_read = ctypes.c_size_t()\n process_handle = OpenProcess(PROCESS_ALL_ACCESS, False, pid)\n # scan memory\n for i in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns all legal moves for the peg in the specified coordinate (r, c) as a list of coordinates the peg can jump to
def get_legal_actions(self, r: int, c: int): legal_actions = [] neighbors = self.board.get_neighbors(r, c) for node in neighbors: row = node[0] col = node[1] cell = self.board.cells[row][col] # If the current neighboring cell is filled ...
[ "def legal_moves(state, color):\n # TODO You have to write this\n get = []\n possible = []\n flag = 0\n ro = 0\n col = 0\n for row in state:\n for square in row:\n if square == color:\n get.append((ro, col))\n col += 1\n col = 0\n ro += ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Opens a connection to the RabbitMQ message bus, waits for messages and publishes them to all connected websockets.
async def consume_messages_from_bus(loop): connection = await rabbitmq.get_aio_connection(loop) async with connection: channel = await connection.channel() exchange = await channel.declare_exchange( '/messages/:POST', type=aio_pika.exchange.ExchangeType.FANOUT) queue = await ...
[ "async def connect_to_amqp(self):\n self._status = StatusCodes.CONNECTING\n rabbitmq_config = await self.get_rabbitmq_configuration()\n await self.set_queues_prefix_iostreams()\n self.amqp_connection = AMQPConnection(\n **rabbitmq_config, ioloop=self.loop, on_error_callback=se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate an attribute dictionary from an element
def attrib_parser(element, fields): attr_dict = {} # Fill attr_dict from element attributes but only attributes designated by field for attr in element.attrib: # takes elements specified in field if attr in fields: attr_dict[attr] = element.attrib[attr] ...
[ "def gather_all_attributes_for_element(driver, element):\n return driver.execute_script(\n '''\n var items = {};\n for (index = 0; index < arguments[0].attributes.length; ++index) {\n items[arguments[0].attributes[index].name] = arguments[0].attributes[index].value\n };\n return items;\n '''...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A special makeRecord class for the logger to convert msg and args into strings using the correct encoding if they are unicode strings. This function also makes sure we have at least a basic handler.
def make_record(self, name, level, fn, lno, msg, args, *_args, **_kwargs): if len(self.root.handlers) == 0: # create handler, we don't have one create_logger() # convert message to string msg = unicode_to_str(msg) # convert args to string args = tuple([ unicode_to_str(x) for x in ar...
[ "def _makeRecord(name, level, fn, lno, msg, args, exc_info,\n func=None, extra=None, sinfo=None):\n rv = _LogRecord(name, level, fn, lno, msg, args, exc_info, func, sinfo)\n\n if extra is not None:\n for key in extra:\n if (key in [\"message\", \"asctime\"]) or (key in rv.__di...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if the given string is too long.
def ValidateStringLenth(value, max_length=_MAX_STRING_LENGTH): if isinstance(value, basestring): if len(value) <= max_length: return True return False
[ "def _checkStringSize(self, lengthAsString):\n if len(lengthAsString) > self._maxLengthSize():\n raise NetstringParseError(self._TOO_LONG % (self.MAX_LENGTH,))", "def tweetswarm_string_validate(s):\n return s.__len__() < 140 and s.__len__() > 0", "def isLongString(obj):\n\treturn type(obj)=...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure that if ``max_size`` is specified, that ``self.client`` is running 6.1 or higher.
def _check_max_size(self, data): try: if 'max_size' in data['conditions']: version = get_version(self.client) if version < (6,1,0): raise ConfigurationError( 'Your version of elasticsearch ({0}) does not support ' ...
[ "def configure(self, maxsize):\n self.maxsize = maxsize", "def set_maximum_message_size(self, max_size):\n self._maximum_message_size = max_size", "def check_max_request_body_size(audit_options):\n default = audit_options['cinder-conf']['DEFAULT']\n oslo_middleware = audit_options['cinder-co...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a body from conditions and settings
def body(self): retval = {} retval['conditions'] = self.conditions if self.settings: retval['settings'] = self.settings return retval
[ "def create_body(self, body=None, **kwargs):\r\n if self.locked:\r\n raise LockedError('Cannot create a body while simulating')\r\n\r\n if body is not None:\r\n body=copy(body)\r\n else:\r\n body=Body(**kwargs)\r\n\r\n self.add_body(body)\r\n retur...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Log the results based on whether the index rolled over or not
def log_result(self, result): dryrun_string = '' if result['dry_run']: dryrun_string = 'DRY-RUN: ' self.loggit.debug('{0}Result: {1}'.format(dryrun_string, result)) rollover_string = '{0}Old index {1} rolled over to new index {2}'.format( dryrun_string, ...
[ "def log_cluster(self):\n pass", "def log(err):\r\n\tglobal count\r\n\tcount = count+1\r\n\tprint currentpoint+\": \"+err", "def compute_logs(self):\n raise Exception(\"Not implemented\")", "def test_low_cardinality_indexes(self):\n cluster = self.cluster\n time.sleep(1)\n c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A method to upload audio to the temp file folder
def upload_audio(request): audio = request.FILES.get("file") audio_temp = AudioTemp(position=audio) audio_temp.save() return JsonResponse({ "status": 0, "id": audio_temp.id, "url": audio_temp.position.name})
[ "def test_upload_temporary_file(self):\n pass", "def __upload_blob(self, path_to_audio_file):\n storage_client = storage.Client()\n\n bucket = storage_client.get_bucket(self.__bucket_name)\n blob = bucket.blob(self.__blob_path)\n blob.upload_from_filename(path_to_audio_file)", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A method to upload course. First of all, course will be created in the table, according to the input parameters. Then, the pictures will be created in the table of picture.
def upload_course(request): status = 0 form = json.loads(request.POST.get("updateForm")) img_info = form["imgInfo"] profile_url = None for img in img_info: if img["start"] == 0: profile_url = img["id"] break try: profile = PictureTemp.objects.get(pk=profil...
[ "def _insert_pictrue_(image_list, course):\n try:\n for img in image_list:\n try:\n picture_id = int(img[\"id\"])\n picture = PictureTemp.objects.get(pk=picture_id).position\n pic = Picture()\n name = picture.name.split(\"/\")\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A method to edit course. The inner idea is almost same as method `upload_course`, excepts deleting checking of picture and audio.
def edit_course(request): try: status = 0 result = "" form = json.loads(request.POST.get("updateForm")) course_id = form["courseId"] img_info = form["imgInfo"] img_remove_list = form["imgRemoveList"] try: _update_pictures_(course_id, img_info, img_...
[ "def course_edit(request, course_id):\n\n course = Course.objects.get(id=course_id)\n courses = Course.objects.filter(owner=request.user)\n\n\n if request.method != 'POST':\n #Initial request; pre-fill form with the current entry.\n form = CourseForm(instance=course)\n else:\n form ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
An inner method to update audio while editing courses.
def _update_audio_(course_id, audio_info): course = Course.objects.get(course_id=course_id) dir = audio_info["url"].split("/") if dir[-2] == "audio_temp": audio = AudioTemp.objects.get(pk=audio_info["id"]).position course.audio_url = File(audio, dir[-1]) audio.close() course....
[ "def upload_course(request):\n status = 0\n form = json.loads(request.POST.get(\"updateForm\"))\n img_info = form[\"imgInfo\"]\n profile_url = None\n for img in img_info:\n if img[\"start\"] == 0:\n profile_url = img[\"id\"]\n break\n try:\n profile = PictureTem...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
An inner method to insert pictures into the course.
def _insert_pictrue_(image_list, course): try: for img in image_list: try: picture_id = int(img["id"]) picture = PictureTemp.objects.get(pk=picture_id).position pic = Picture() name = picture.name.split("/") pic.post...
[ "def addImage(self, img) -> None:\n ...", "def OnInsertImg(self, ev):\n self.PlaceNewCard(\"Image\", pos=self.menu_position)", "def add_images(ibs, gpath_list):\n print('[ibs] add_images')\n print('[ibs] len(gpath_list) = %d' % len(gpath_list))\n #print('[ibs] gpath_list = %r'...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
An inner method to return course information
def _get_course_info_(course): try: course_info = { "courseTitle": course.course_name, "courseDescription": course.description, "courseContain": course.content, "messageOn": course.message_on, "price": course.price, "destroyTime": cours...
[ "def api_courses_get():\n\tpass", "def course(course_code):\n base_url = (\n f\"{settings.UQ_BASE_URL}/programs-courses/course.html?course_code={course_code}\"\n )\n soup = helpers.get_soup(base_url)\n\n if soup is None or soup.find(id=\"course-notfound\"):\n return None\n\n course_su...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get an existing channel or create a new one.
def get_or_create(session, channel_id, name): channel = session.query(Channel).get(channel_id) if channel is None: channel = Channel(channel_id, name) session.add(channel) session.commit() return channel
[ "def get_or_create_channel(db: Connection, channel_id: Union[int, str], device_id: int=None) -> Optional[Channel]:\n channel = get_channel(db, channel_id)\n if channel is not None:\n return channel\n\n channel = create_channel(db, device_id, channel_id)\n if channel is None:\n raise System...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
exponent(num, power) raises num to specified power. Power defaults to 2.
def exponent(num, power=2): return num ** power
[ "def power(number, exp=2):\n return number ** exp", "def exponentiate(self, base, exponent):\n result = float (base) ** float (exponent)\n return result", "def mod_exponential_2(num, power, mod):\n #This one doesn't do anything special just calculates the mod of and exponential\n remainde...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This is the setup for the human user. It prints the currents scores then goes into rolling for the current move. It incorporates a while loop with a nested ifelse statment. It also controls the case of if the user rolls a '1'.
def human_move(computer_score, human_score): print("current computer score",computer_score) print("current human score",human_score) print("current difference",abs(computer_score - human_score),""" """) Restart = True temp_score=0 while Restart: dieroll=ro...
[ "def gameRoll(self):\n print(\"---SHAKE SHAKE AND ROLL---\\n\")\n self.cup.roll()\n print(\"Goal is: {}\".format(self.balance)) #added after video\n self.__payout(self.cup.getSum())\n self.printBalance()", "def play_round(self):\n print(\"-\" * 30)\n\n points = 0\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Direct Implementation of Ackermann function recursive approach
def ackermann(m: int, n: int) -> int: if m == 0: return n + 1 elif n == 0: return ackermann(m - 1, 1) else: return ackermann(m - 1, ackermann(m, n - 1))
[ "def ackermann(m, n):\n if m == 0:\n return n+1\n if n == 0:\n return ackermann(m-1, 1)\n\n if (m, n) in cache:\n return cache[m, n]\n else:\n cache[m, n] = ackermann(m-1, ackermann(m, n-1))\n return cache[m, n]", "def ack_3(n):\n return 2**(n+3)-3", "def acknow...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieves the URL for a file contained in a given dataset by version (optional) and filepath.
def get_dataset_file(self, dataset_id, file_path, version = None): if version == None: return self._get_content_from_url(self.api_url + '/data_sets/' + str(dataset_id) + '/file/' + quote(file_path)) else: return self._get_content_from_url(self.api_url + '/data_sets/' + str(datase...
[ "def _getFileUrl(rooturl, filename):\n return rooturl + \"/\" + filename", "def url(self, filename):\n urlbase = self.config.get('base_url')\n return urljoin(urlbase, filename)", "def get_download_path(self, version='latest'):\n raise NotImplementedError", "def get_file_url(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieves JSON representation of a PIF from a given dataset.
def get_pif(self, dataset_id, uid, version = None): if version == None: return self._get_content_from_url(self.api_url + '/datasets/' + str(dataset_id) + '/pif/' + str(uid)) else: return self._get_content_from_url(self.api_url + '/datasets/' + str(dataset_id) + '/version/' + str(...
[ "def get_dataset_http(selected_dataset, access_token):\n r = requests.get(f\"{PENNSIEVE_URL}/datasets/{selected_dataset}\", headers={\"Authorization\": f\"Bearer {access_token}\"})\n r.raise_for_status()\n\n return r.json()", "def getfdict(dataset, name):\n import simplejson as json\n from urllib i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper method to create an S3 presigned url from the json.
def _get_s3_presigned_url(input_json): url = input_json['url'] return url['scheme']+'://'+url['host']+url['path']+'?'+url['query']
[ "def _get_s3_presigned_put_url(s3_client, bucket, filepath, md5sum, lifetime_sec):\n # S3's PUT Object parameters:\n # https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html\n method = \"put_object\"\n fields = {\n \"Bucket\": bucket,\n \"Key\": filepath,\n }\n\n response...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper method to generate the url for creating a new data set.
def _get_create_data_set_url(self): return self.api_url+'/data_sets/create_dataset'
[ "def _get_create_data_set_version_url(self, data_set_id):\n return self.api_url+'/data_sets/'+str(data_set_id)+'/create_dataset_version'", "def build_dataset_url(app, uuid, basename, aset, extension):\n return '/apps/%s/datasets/%s/%s.%s.%s' % (app, uuid, basename, aset, extension)", "def create_url(s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper method to generate the url for updating a data set.
def _get_update_data_set_url(self, data_set_id): return self.api_url+'/data_sets/'+str(data_set_id)+'/update'
[ "def __set_update_endpoint(self, dataset: str):\n self.__update_endpoint = 'http://localhost:3030/' + dataset + '/update'", "def _construct_update_url(self, path):\n return self.redmine.url + path", "def _taskUpdateUrl(self, task):\n return '/tasks/gci/task/update/%s' %task.key().id()", "def ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new data set version.
def create_data_set_version(self, data_set_id): url = self._get_create_data_set_version_url(data_set_id) return requests.post(url, headers=self.headers)
[ "def new_dataset(dataset_name: str):\n icedata.template.generate_dataset(dataset_name)", "def create(ds_name, description, tsuid_list):\n\n tdm = TemporalDataMgr()\n return tdm.import_data_set(data_set_id=ds_name, description=description, tsuid_list=tsuid_list)", "def create_dataset(self, size)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper method to generate the url for creating a new data set version.
def _get_create_data_set_version_url(self, data_set_id): return self.api_url+'/data_sets/'+str(data_set_id)+'/create_dataset_version'
[ "def _get_create_data_set_url(self):\n return self.api_url+'/data_sets/create_dataset'", "def build_dataset_url(app, uuid, basename, aset, extension):\n return '/apps/%s/datasets/%s/%s.%s.%s' % (app, uuid, basename, aset, extension)", "def create_url(self, URL):\r\n return '{0}{1}'.format(self....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the MJD infromation for the image data array.
def set_mjd(self, mjd, dmjd=None): from numpy import isscalar, asarray if isscalar(mjd): mjd_arr = asarray([mjd], dtype="float64") else: mjd_arr = mjd.copy() self.data["mjd"] = mjd_arr
[ "def image_dilate(self, image_dilate):\n\n self._image_dilate = image_dilate", "def set_data_date(self, data_date):\n self.record['data_date'] = data_date", "def ymd_to_mjd(self,date):\n\t\ty,m,d = [int(n) for n in date.split('/')]\n\t\treturn Time(dt.datetime(y,m,d)).mjd", "def loadDataMJD():\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the frequency infromation for the image data array.
def set_freq(self, freq): from numpy import isscalar, asarray if isscalar(freq): freq_arr = asarray([freq], dtype="float64") else: freq_arr = freq.copy() self.data["freq"] = freq_arr
[ "def set_frequency(self, frequency):\r\n self.obs.centerFreqHz = float(frequency)\r\n self.ref.centerFreqHz = float(frequency)\r\n self.ave.centerFreqHz = float(frequency)\r\n self.hot.centerFreqHz = float(frequency)\r\n self.cold.centerFreqHz = float(frequency)\r\n deltaNu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a specified geometric model to the image
def add_geomodel(self, geomodel, idx=(0, 0, 0), inplace=False): if inplace: outimage = self else: outimage = self.copy() # get x,y coordinates xg, yg = self.get_xygrid(angunit="rad", twodim=True) # compute the intensity imarr = geomodel.I(xg, yg)...
[ "def add(name):\n ierr = c_int()\n lib.gmshModelAdd(\n c_char_p(name.encode()),\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshModelAdd returned non-zero error code: \",\n ierr.value)", "def AddGeom(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convolve the image with an input geometrical model
def convolve_geomodel(self, geomodel, inplace=False): from numpy import real, asarray, unravel_index, conj from numpy.fft import fftshift, ifftshift, fft2, ifft2 if inplace: outimage = self else: outimage = self.copy() # get the array shape imarr...
[ "def convolve(self, img):", "def convolve(im, kernel):\n if (len(im.shape)==2):\n im = np.expand_dims(im, 2)\n H, W, B = im.shape\n imc = np.zeros((H, W, B))\n for band in range(B):\n imc[:, :, band] = sps.correlate2d(im[:, :, band], kernel, mode='same')\n return imc", "def Convolve(image, kernel):\r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Regrid the image (only in x and y coordinates) using the grid defined in the input template image.
def regrid(self, template, preconv=True, order=1): from numpy import arange, zeros, meshgrid, unravel_index, asarray from scipy.ndimage import map_coordinates # get image grid information dx0 = self.meta["dx"].val dy0 = self.meta["dy"].val ixr0 = self.meta["ixref"].val ...
[ "def reproject_image(image, template, outfile='auto', overwrite=True):\n\n import numpy as np\n from astropy.io import fits\n from reproject import reproject_interp\n\n print(\"Reprojecting \"+image+\" onto \"+template)\n print(\"This can take several minutes for large cubes.\")\n\n # load files\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load a FITS Image in ehtim's format into an imdata.Image instance.
def load_fits_ehtim(cls, infits): import astropy.io.fits as pf from numpy import abs, deg2rad from astropy.coordinates import SkyCoord from ..util.units import DEG isfile = False if isinstance(infits, str): hdulist = pf.open(infits) isfile = True ...
[ "def get_im(self):\n # Clear out HDUList in case we fail\n self.HDUList = None\n if not self.CCDCamera.ImageReady:\n raise EnvironmentError('CCD Camera image is not ready')\n # For some reason, we can't get at the image array or its FITS\n # header through CCDCamera.Ima...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load a FITS Image in CASA's format into an imdata.Image instance.
def load_fits_casa(cls, infits, imdtype=None): import astropy.io.fits as pf import numpy as np from numpy import abs, deg2rad, arange, array from numpy import float32, float64, int32, int16 from astropy.coordinates import SkyCoord from astropy.time import Time fro...
[ "def load_image(self, image, get_meta=False):\n reader = LoadImageCzi()\n image = reader.load_image(image, get_meta_data=True)\n log.info(\n \"Loaded file using aicsimage. File path: {}.\".format(\n image.get_meta(\"aics_filePath\")\n )\n )\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Replaces all interfaces in a specified VLAN that are set as an administrative mode Access, to a different VLAN. > current_vlan VLAN to match access ports currently configured in this vlan > new_vlan VLAN to assign ports
def replace_interfaces_access_vlan(self, current_vlan, new_vlan, backup=True): all_interfaces = self.get_interfaces_switchport() current_interface_config = self.get_interfaces_config() interface_backup = [] config_set = [] for interface in all_interfaces: ...
[ "def replace_interfaces_voice_vlan(self, current_vlan, new_vlan, backup=True):\n\n all_interfaces = self.get_interfaces_switchport()\n\n current_interface_config = self.get_interfaces_config()\n interface_backup = []\n\n config_set = []\n\n for interface in all_interfaces:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Replaces the voice VLAN for all interfaces configured with the specified current voice vlan > current_vlan Current VOICE Vlan to Match > new_vlan VLAN to assign ports
def replace_interfaces_voice_vlan(self, current_vlan, new_vlan, backup=True): all_interfaces = self.get_interfaces_switchport() current_interface_config = self.get_interfaces_config() interface_backup = [] config_set = [] for interface in all_interfaces: if interf...
[ "def replace_interfaces_access_vlan(self, current_vlan, new_vlan, backup=True):\n\n all_interfaces = self.get_interfaces_switchport()\n\n current_interface_config = self.get_interfaces_config()\n interface_backup = []\n \n config_set = []\n\n for interface in all_interfaces...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Configure IP Address on interface Need to improve this function for checking etc.. > interface Interface syntax > ip IP Address > mask Subnet Mask
def set_interface_ip(self, interface, ip, mask): cmds = ['interface %s' %(interface), 'ip address %s %s' %(ip, mask)] output = self.iosapi.bcp_send_config_command(self.iosapi.netmiko_session, cmds) self.iosapi.bcp_log("info", "(%s) set_interface_ip() : Attempting to set interface %s IP" %(__na...
[ "def change_addr_interface(self, name, ip, mask):\n\n try:\n with self.ipdb_controller.interfaces[name] as iface:\n iface.add_ip(ip+'/'+mask)\n except Exception:\n logging.error('Cannot add port to interface')\n return", "def set_static_ip(self, ip, if...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set interface mode to either Access or Trunk > interface Interface Syntax > mode access or trunk
def set_l2_interface_mode(self, interface, mode): modes = ['access', 'trunk'] if mode not in modes: print("%s is an Invalid mode... Valid modes are: %s" %(mode, modes)) self.iosapi.bcp_log("info", "(%s) set_l2_interface_mode() : Invalid mode %s for interface %s" %(__name__, mod...
[ "def _enable_and_set_mode(self, interface, mode):\n # Enable switching\n url = self._construct_url(interface)\n payload = '<switchport></switchport>'\n self._make_request('POST', url, data=payload,\n acceptable_error_codes=(409,))\n\n # Set the interface ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set Portfast on either Access port or Trunk port (Need to improve this function for additional checking) > interface Interface Syntax > enabled Enable Portfast
def set_l2_stp_portfast(self, interface, enabled=True, mode='access'): cmds = ['interface %s' %(interface)] if mode == 'access': cmds.append('spanning-tree portfast') else: cmds.append('spanning-tree portfast trunk') output = self.iosapi.bcp_send_config_command...
[ "def enable_ports(self):\n pass", "def enable_ports(module, internal_ports, task, msg):\n cli = pn_cli(module)\n clicopy = cli\n cli += ' switch-local port-config-show format enable no-show-headers '\n if 'off' in run_command(module, cli, task, msg).split():\n cli = clicopy\n cli ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns JSON type format for interface switchports
def get_interfaces_switchport(self): cmd = 'show interfaces switchport' interface_mapper = { 'Gi' : 'GigabitEthernet', 'Fa' : 'FastEthernet', 'TenGi' : 'TenGigabitEthernet' } output = self.iosapi.bcp_send_command(self.iosapi.netmiko_session, cmd...
[ "def _get_switch_port_dict(self):\n rec_dict = {'id': \"phy1234\",\n 'switch_id': \"test_switch1\",\n 'port_name': \"Tengig0/1\",\n 'lag_id': None}\n return rec_dict", "def ichipassay_Type(instance):\n return \"{} ({})\".format(instance.Typ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns JSON type format for show ip int brief
def get_ip_int_brief(self): cmd = 'show ip interface brief' output = self.iosapi.bcp_send_command(self.iosapi.netmiko_session, cmd) self.iosapi.bcp_log("info", "(%s) get_ip_int_brief() : Attempting to run show ip int brief" %(__name__)) return(self.iosapi.textfsm_extractor('cisco_ios_s...
[ "def show_ip(): #TODO\n pass", "def get_ip_details(ip, analyst):\n\n allowed_sources = user_sources(analyst)\n ip = IP.objects(ip=ip, source__name__in=allowed_sources).first()\n template = None\n args = {}\n if not ip:\n template = \"error.html\"\n error = ('Either no data exists f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the simple version of vector note and vote, just sum along all results for each (lang_stim, vowel, lang_indiv)
def get_v1_vectors(file_results_assimilation): f = open(file_results_assimilation, 'r') ind = f.readline().split(',') dico_all = {'english':{}, 'french':{}} for line in f: new_line = line.replace('\n', '').split(',') language_indiv = new_line[ind.index('language_indiv')] lang_sti...
[ "def _vectorize_and_add(self, text):\n words = text.split()\n sum = np.zeros(self.word_model.vector_size)\n for w in words:\n try:\n v = self.word_model[w]\n sum += v\n except:\n pass\n return sum", "def sum_lang(self):\n temp_d = {}\n for repo in self.__re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Broadcast where. Implements out[i] = new_val[i] mask + old_val[i] (1 mask)
def _list_bcast_where(F, mask, new_val_l, old_val_l): return [F.broadcast_mul(new_val, mask) + F.broadcast_mul(old_val, 1 - mask) for new_val, old_val in zip(new_val_l, old_val_l)]
[ "def __apply_input_mask(self):\n with torch.no_grad():\n # apply the input mask\n for tid, in_tensor in enumerate(self.dummy_input):\n if isinstance(in_tensor, torch.Tensor) and self.in_masks[tid] is not None:\n # in_tensor.data = in_tensor.data * \\\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r"""Generates the initial decoder states based on the encoder outputs.
def init_state_from_encoder(self, encoder_outputs, encoder_valid_length=None): raise NotImplementedError
[ "def init_state(self, src, memory_bank, encoder_final):\n def _fix_enc_hidden(hidden):\n # The encoder hidden is (layers*directions) x batch x dim.\n # We need to convert it to layers x batch x (directions*dim).\n assert self.bidirectional_encoder\n if self.bidire...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Encoder the inputs given the states and valid sequence length.
def __call__(self, inputs, states=None, valid_length=None): #pylint: disable=arguments-differ return super(TransformerEncoder, self).__call__(inputs, states, valid_length)
[ "def _add_encoder(self, encoder_inputs, seq_len, num_layers=1):\r\n\t\twith tf.variable_scope('encoder'):\r\n\t\t cell_fw = self.get_rnn_cell(num_layers)\r\n\t\t cell_bw = self.get_rnn_cell(num_layers)\r\n\t\t (encoder_outputs, (fw_st, bw_st)) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, encoder_inputs, d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Onestepahead decoding of the Transformer decoder.
def __call__(self, step_input, states): #pylint: disable=arguments-differ return super(TransformerDecoder, self).__call__(step_input, states)
[ "def decode(self, data):\n\t\traise NotImplementedError()", "def decode_step(self, prev_state, prev_tokens, **flags):\n \n prev_gru0_state, enc_seq, enc_mask, _ = prev_state\n attn, attn_prob = self.attn(enc_seq, prev_gru0_state, enc_mask)\n \n x = self.emb_out(prev_tokens)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate SourcetoDistortion Ratio improvement (SDRi).
def cal_SDRi(src_ref, src_est, mix): num = 0 new_sdr = 0 orig_sdr = 0 avg_SDRi = 0 for ref, est in zip (src_ref, src_est): num = num + 1 src_anchor = mix sdr, sir, sar, popt = bss_eval_sources(ref, est) new_sdr = new_sdr + sdr sdr0, sir0, sar0, popt0 = bss_eva...
[ "def compute_snr(self, doplot='online'):\n\n # Apply extinction\n self.apply_atmos_ext()\n # Apply frontend throughput\n self.apply_throughput_front()\n # Apply injection efficiency\n self.apply_injeff()\n # Apply backend throughput\n self.apply_throughput_bac...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find and return all match in POST request. Name in POST request necessary parameter
def names_find(): if not (request.json and 'name' in request.json): abort(400) search = Search() names = search.total2(request.json['name']) logging.info(type(names)) if names: return jsonify({'names': names}), 201 else: abort(404)
[ "def search_match(value):\n names=[]\n\n for name,values in params.iteritems():\n for v in values:\n if v == value: \n names.append(name)\n\n return names", "def __load_fields_from_POST(self):\n\n self.request_url = self.data.request_url_root.split('?', 1)[0]\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
select the best model for self.this_word based on BIC score for n between self.min_n_components and self.max_n_components
def select(self): warnings.filterwarnings("ignore", category=DeprecationWarning) largest_BIC = float("inf") # return value of highest average C.V best_model= self.base_model(self.n_constant) # the corrosponding model woth top_score for n_components in range(self.min_n_compon...
[ "def select(self):\n\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\n results = list()\n for num_components in range(self.min_n_components, self.max_n_components+1):\n try:\n model = self.base_model(num_components) #Build the model\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
in edition mode, we create 3 buttons when b is pressed on a selected character, one to adjust the horizontal speed another to adjust the horizontal speed when sprinting, the last one is used for the height of the jump
def create_buttons(self): pos1 = [self.pos[0] + self.width + 10, self.pos[1] + self.height//2] pos2 = [self.pos[0]+self.width//2, self.pos[1]+self.height+10] pos3 = [self.pos[0], self.pos[1] + self.height] b1 = Button(self.game, pos1, 0, 0, 'speed : ', self.speed...
[ "def create_character(self):\n def on_face_default(button):\n \"\"\"Handle pressing SQUARE button.\"\"\"\n self.character_id = 0\n for b in self.character_buttons:\n self.objects.remove(b)\n self.mouse_handlers.remove(b.handle_mouse_event)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Mark a challenge as solved.
def mark_as_solved(self, solver_list, solve_date=None): self.is_solved = True self.solver = solver_list self.solve_date = solve_date or int(time.time())
[ "def unmark_as_solved(self):\n self.is_solved = False\n self.solver = None", "def wait_for_challenge_to_be_solved():\n input(\"Please solve the challenge. When done, press Enter to continue...\")", "def check_solve():\n data = request.form or request.get_json()\n\n team_id = str(data[\"te...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Unmark a challenge as solved.
def unmark_as_solved(self): self.is_solved = False self.solver = None
[ "def unflag(self):\n self._flagged = False", "def test_reset_unmask(self):\n module = CapaFactory.create(xml=self.common_shuffle_xml)\n get_request_dict = {CapaFactory.input_key(): 'mask_0'}\n module.submit_problem(get_request_dict)\n # On reset, 'old_state' should use unmasked ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update the list of tags for this challenge by adding the given tag. Return True if a modification was made, False otherwise.
def add_tag(self, tag): dirty = False if tag not in self.tags and len(self.tags) < self.MAX_TAGS: # The tag doesn't exist and there's room to add it, let's do so self.tags.append(tag) dirty = True return dirty
[ "def add_tag(self, tag):\n self.tags = list(set(self.tags or []) | set([tag]))", "def tag_add(self, tag: str):\n self.tags.append(tag)", "def add_tag(self, tag: str) -> None:\n if tag in self._tag_dict:\n self._tag_dict[tag] += 1\n else:\n self._tag_dict[tag] = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update the list of tags for this challenge by removing the given tag. Return True if a modification was made, False otherwise.
def remove_tag(self, tag): dirty = False if tag in self.tags: # The tag exists, let's remove it self.tags.remove(tag) dirty = True return dirty
[ "def remove_tag(self, tag):\n self.tags = list(set(self.tags or []) - set([tag]))", "def del_tag(self, tag):\n\n numbers = self.numbers_from_tag(tag)\n if len(numbers) > 0:\n return False\n\n try:\n # Delete links and tag\n self.link.filter(tag = tag).d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove a player from the list of working players using a given slack user ID.
def remove_player(self, user_id): try: del self.players[user_id] except KeyError: # TODO: Should we allow this to percolate up to the caller? pass
[ "def remove_player(self, player):\n try:\n self.players.remove(player)\n except ValueError:\n pass", "def remove_player(self, player_shot: Name):\n del self.players[player_shot]\n for name, player in self.players.items():\n player.remove_player(player_s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new terminal.
def create(self): name, term = self.new_named_terminal() return self._finish_create(name, term)
[ "def create_terminal(name=None):\n if name is not None and name not in supported_terminals.keys():\n raise UnsupportedTerminal(\"%s is not a supported terminal type [%s]\" %\n (name, supported_terminals.keys()))\n if name == konsole:\n if not rocon_python_utils.system.whi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a list of all running terminals.
def list(self): models = [self.get_terminal_model(name) for name in self.terminals] # Update the metric below to the length of the list 'terms' TERMINAL_CURRENTLY_RUNNING_TOTAL.set( len(models) ) return models
[ "def get_terminals(self):\n return self.rules.getTerminals()", "def listPorts(self):\n ports = glob.glob('/dev/tty[A-Za-z]*')\n print(ports)", "def show_running_threads(self):\n\n running_threads = []\n for t in self.threads:\n if t.is_alive():\n runn...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check a that terminal 'name' exists and raise 404 if not.
def _check_terminal(self, name): if name not in self.terminals: raise web.HTTPError(404, u'Terminal not found: %s' % name)
[ "def commandNotFound():\n print \"Doesn't exist, yo\"", "async def name_exists(self, name: str) -> bool:", "def room_not_exist_by_name(room_name):\n return Response(\"room %s is not exist\" % room_name, 400)", "def handle_404(client, nick, cmd):\n return ('`%s` is not a valid command. ' % cmd + \n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Start culler if 'cull_inactive_timeout' is greater than zero. Regardless of that value, set flag that we've been here.
def _initialize_culler(self): if not self._initialized_culler and self.cull_inactive_timeout > 0: if self._culler_callback is None: loop = IOLoop.current() if self.cull_interval <= 0: # handle case where user set invalid value self.log.warning("In...
[ "def _cull(self):\n right_now = time.time()\n\n cull_from = -1\n for index in xrange(len(self._call_times)):\n if right_now - self._call_times[index].time >= self._seconds_per_timeunit:\n cull_from = index\n self._outstanding_calls -= self._call_times[in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Disable our exception hook and restore to the system default.
def disable(): sys.excepthook = sys.__excepthook__
[ "def unhook_exception_ipdb():\n assert hasattr(_custom_exception_hook, 'origin_hook')\n sys.excepthook = _custom_exception_hook.origin_hook", "def uninstall_excepthook():\n\tsys.excepthook = sys.__excepthook__", "def hook_exception_ipdb():\n if not hasattr(_custom_exception_hook, 'origin_hook'):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a generic ssh connection Returns
def create_connection(self): ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) return ssh
[ "def createSshClient(self):\n try:\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.connect(self.ip, port=self.port, username=self.username, password=self.password, allow_agent=False)\n channel = client.invoke_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run a command on the master node (first ip in list)
def run_on_master(self, command, wait=True): task = self.thread_pool.submit(self.run_on_node, self.public_ips[0], command) if wait: while not task.done(): continue return task.result() return task
[ "def run(self):\n\n self.logger.info('Master started...')\n\n self.run_command('build/master/master --procam-total %d' % len(self.hosts))", "def add_master_node(self, host_ip):\n\t\tself.swarm_manager.add_master_node(host_ip)", "def _run_nodetool_command(self, cmd, *args, **kwargs):\n return utils....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run a command on all worker nodes
def run_on_workers(self, command, wait=True): tasks = [self.thread_pool.submit(self.run_on_node, worker, command) \ for worker in self.public_ips[1:]] if wait: while not all([i.done() for i in tasks]): continue return [i.result() for i in tasks] ...
[ "def run_on_all(self, command, wait=True):\n '''tasks = [self.thread_pool.submit(self.node_bash, node, command) \\\n for node in self.public_ips]'''\n tasks = [self.thread_pool.submit(self.run_on_node, worker, command) \\\n for worker in self.public_ips]\n if wai...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run a command on all worker nodes
def run_on_all(self, command, wait=True): '''tasks = [self.thread_pool.submit(self.node_bash, node, command) \ for node in self.public_ips]''' tasks = [self.thread_pool.submit(self.run_on_node, worker, command) \ for worker in self.public_ips] if wait: ...
[ "def run_on_workers(self, command, wait=True):\n tasks = [self.thread_pool.submit(self.run_on_node, worker, command) \\\n for worker in self.public_ips[1:]]\n if wait:\n while not all([i.done() for i in tasks]):\n continue\n return [i.result() for i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that returns the status and duration of the job
async def job_status(request): job_id = request.match_info['job_id'] try: chunks = await get_job_chunks_status(request.app['engine'], job_id) except JobNotFound as e: raise web.HTTPNotFound(text="Job '%s' not found" % job_id) from e now = datetime.datetime.now() def elapsed_time(s...
[ "def _job_status(job):\n\n if 'activeRuns' in job:\n return \"Running\"\n # short circuit will prevent failure\n elif 'schedules' not in job or not job['schedules']:\n return \"Unscheduled\"\n else:\n return \"Scheduled\"", "def status(self) -> 'outputs.JobStatusResponse':\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new error from a given error locale.
def __init__(self, error_locale, status_code=None): self.error_code = error_locale[0] self.error_message = error_locale[1] if status_code is not None: self.status_code = status_code
[ "def from_validation_error(\n cls, error: ValidationError, *, source_path: Optional[str] = None, error_on_unknown_field=False\n ):\n obj = cls.from_validation_messages(\n error.messages, data=error.data, error_on_unknown_field=error_on_unknown_field\n )\n if source_path:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a dict form of the error.
def to_dict(self): return { 'error_code': self.error_code, 'error_message': self.error_message }
[ "def _get_error_dict(self, error_code, error_msg):\n return {\n 'error': error_code,\n 'error_msg': error_msg,\n }", "def _error_details(self):\n return ErrorDetails(\n protocol=self._protocol_error,\n noniterable_str=self._noniterable_str_error,\n typed...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that loads the CSO from the file in a dictionary.
def load_cso(): with open(CSO_PATH, 'r') as ontoFile: topics = {} topics_wu = {} broaders = {} narrowers = {} same_as = {} primary_labels = {} primary_labels_wu = {} ontology = co.reader(ontoFile, delimiter=';') for triple in ontology: ...
[ "def loadPhaseCal(self, phaseCalFile):\n #load the dictionary raw\n try:\n with open(phaseCalFile, \"r\") as stream:\n dataMap = yaml.safe_load(stream) #just assume its correct\n except IOError: \n return None\n return dataMap", "def load_cows(filename):\n cow_dict = {} # cow dic...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that loads CSO. This file has been serialised using Pickle allowing to be loaded quickly.
def load_ontology_pickle(): check_ontology() fcso = pickle.load(open(CSO_PICKLE_PATH, "rb")) return fcso
[ "def load_cso():\n\n with open(CSO_PATH, 'r') as ontoFile:\n topics = {}\n topics_wu = {}\n broaders = {}\n narrowers = {}\n same_as = {}\n primary_labels = {}\n primary_labels_wu = {}\n ontology = co.reader(ontoFile, delimiter=';')\n\n for triple in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that loads both CSO and Word2vec model. Those two files have been serialised using Pickle allowing to be loaded quickly.
def load_ontology_and_model(): check_ontology() check_model() fcso = pickle.load(open(CSO_PICKLE_PATH, "rb")) fmodel = pickle.load(open(MODEL_PICKLE_PATH, "rb")) print("Computer Science Ontology and Word2vec model loaded.") return fcso, fmodel
[ "def load_word2vec_classification_model():\n with open(W2V_CLASSIFICATION_MODEL_LOC, \"rb\") as f:\n model = pickle.load(f)\n\n return model", "def load_word2vec():\n return Word2Vec.load(MODEL_SOURCE)", "def load_model(self, path):\n return gensim.models.word2vec.Word2Vec.load(path)", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that loads both CSO and the cached Word2vec model. The ontology file has been serialised with Pickle. The cached model is a json file (dictionary) containing all words in the corpus vocabulary with the corresponding CSO topics. The latter has been created to speed up the process of retrieving CSO topics given ...
def load_ontology_and_chached_model(): check_ontology() check_cached_model() fcso = pickle.load(open(CSO_PICKLE_PATH, "rb")) with open(CACHED_MODEL) as f: fmodel = json.load(f) print("Computer Science Ontology and cached model loaded.") return fcso, fmodel
[ "def load_ontology_and_model():\n\n check_ontology()\n check_model()\n\n fcso = pickle.load(open(CSO_PICKLE_PATH, \"rb\"))\n fmodel = pickle.load(open(MODEL_PICKLE_PATH, \"rb\"))\n\n print(\"Computer Science Ontology and Word2vec model loaded.\")\n return fcso, fmodel", "def load_word2vec():\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that checks if the ontology is available. If not, it will check if a csv version exists and then it will create the pickle file.
def check_ontology(): if not os.path.exists(CSO_PICKLE_PATH): print("Ontology pickle file is missing.") if not os.path.exists(CSO_PATH): print("The csv file of the Computer Science Ontology is missing. Attempting to download it now...") download_file(CSO_REMOTE_URL, CSO_PAT...
[ "def check_model():\n\n if not os.path.exists(MODEL_PICKLE_PATH):\n print('[*] Beginning model download from', MODEL_PICKLE_REMOTE_URL)\n download_file(MODEL_PICKLE_REMOTE_URL, MODEL_PICKLE_PATH)", "def finishedCSV():\n exceptionRaised = False\n try:\n # Tries to open the file\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that checks if the model is available. If not, it will attempt to download it from a remote location. Tipically hosted on the CSO Portal.
def check_model(): if not os.path.exists(MODEL_PICKLE_PATH): print('[*] Beginning model download from', MODEL_PICKLE_REMOTE_URL) download_file(MODEL_PICKLE_REMOTE_URL, MODEL_PICKLE_PATH)
[ "def check_cached_model():\n\n if not os.path.exists(CACHED_MODEL):\n print('[*] Beginning download of cached model from', CACHED_MODEL_REMOTE_URL)\n download_file(CACHED_MODEL_REMOTE_URL, CACHED_MODEL)", "def _download_model(self) -> None:\n if not self.interactive:\n if not se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that checks if the cached model is available. If not, it will attempt to download it from a remote location. Tipically hosted on the CSO Portal.
def check_cached_model(): if not os.path.exists(CACHED_MODEL): print('[*] Beginning download of cached model from', CACHED_MODEL_REMOTE_URL) download_file(CACHED_MODEL_REMOTE_URL, CACHED_MODEL)
[ "def check_model():\n\n if not os.path.exists(MODEL_PICKLE_PATH):\n print('[*] Beginning model download from', MODEL_PICKLE_REMOTE_URL)\n download_file(MODEL_PICKLE_REMOTE_URL, MODEL_PICKLE_PATH)", "def cache_exists(self):\n\n if not is_server(): # we only check the cache on the server\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that returns the primary (preferred) label for a topic. If this topic belongs to a cluster.
def get_primary_label(topic, primary_labels): try: topic = primary_labels[topic] except KeyError: pass return topic
[ "def predict_topic(self, document):\n if self.lda is None:\n print(\"ERROR in lda_topic_model.predict_topic(): Need to create_lda() before predicting topics.\")\n dict_lda = getattr(self.lda, 'id2word')\n lda_vector = self.lda[dict_lda.doc2bow(self.to_bow(document))]\n return ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that climbs the ontology. This function might retrieve just the first broader topic or the whole branch up until root .
def climb_ontology(cso, found_topics, climb_ont): all_broaders = {} inferred_topics = {} num_narrower = 1 if climb_ont == 'first': all_broaders = get_broader_of_topics(cso, found_topics, all_broaders) elif climb_ont == 'all': while True: """ recursively addi...
[ "def getRoot(wg):\n start = ''\n output = rdflib.URIRef(\"http://geographicknowledge.de/vocab/Workflow.rdf#output\")\n for s,p,o in (wg.triples((None, output, None))):\n start = s\n break\n objects = getNeighbours(wg,start)\n while (len(objects)>0):\n start = objects[0]\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that returns all the broader topics for a given set of topics. It analyses the broader topics of both the topics initially found in the paper, and the broader topics found at the previous iteration. It incrementally provides a more comprehensive set of broader topics.
def get_broader_of_topics(cso, found_topics, all_broaders): topics = list(found_topics) + list(all_broaders.keys()) for topic in topics: if topic in cso['broaders']: broaders = cso['broaders'][topic] for broader in broaders: if broader in all_broaders: ...
[ "def get_topics(topics, mode='p', top=0.5):\n t = sorted(topics, key=lambda x: x[1], reverse=True)\n t2 = []\n s = 0\n i = 0\n if mode == 'p':\n while s < top and i < len(t):\n t2.append(t[i])\n s += t[i][1]\n i += 1\n elif mode == 'n':\n while i < to...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that extracts the network from a given set of topics.
def get_network(cso, found_topics): if type(found_topics) is dict: list_of_topics = [] for key, value in found_topics.items(): list_of_topics += value list_of_topics = list(set(list_of_topics)) elif type(found_topics) is list: list_of_topics = found_topics from...
[ "def getNewsFromTopic(topic):\n articles = []\n mediagroups = rssUrlList[topic]\n for mediagroup, url in mediagroups.items():\n articles.extend(getNews(topic, mediagroup, url))\n return articles", "def parse_topics(self):\n #Client\n for item in self.client_config.get(\"topics\")...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that plots the network of topics. It mainly relies on networkx
def plot_network(network): G = nx.DiGraph() labels = {} for node in network["nodes"]: G.add_node(node["label"]) labels[node["label"]] = r'$' + node["label"] + '$' for edge in network["edges"]: G.add_edge(edge["source"], edge["target"], kind=edge["kind"]) pos = nx.spring_la...
[ "def visualize(self):\n return nx.draw_networkx(self.graph_Hz)", "def plot_network(g) :\n rows, cols = np.where(g == 1)\n edges = zip(rows.tolist(), cols.tolist())\n gr = nx.DiGraph() # Calling the DIRECTED graph method\n gr.add_nodes_from(range(n))\n gr.add_edges_from(edges)\n # Add nod...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that for a given topics, it returns its coverage. This coverage is computed based on how many its descendants have been identified.
def get_coverage(cso, found_topics): coverage = {} if type(found_topics) is dict: list_of_topics = [] for key, value in found_topics.items(): list_of_topics += value list_of_topics = list(set(list_of_topics)) elif type(found_topics) is list: list_of_topics = fo...
[ "def quality_function(self, dict_of_clusters={}, medoids=np.ndarray(2)):\n quality = 0\n for node_i, cluster_i in dict_of_clusters.iteritems():\n quality += self.find_distance(source=node_i, target=medoids[cluster_i])\n\n return quality", "def conceptchaincover(self, uncovered=0.1)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If the record matches the pattern, write to the matching filebase.
def write_if_match(self, record, pattern, time_str): # Find the compiled regex matching the pattern regex = self.compiled_filebase_map.get(pattern, None) if not regex: logging.error(f'System error: found no regex pattern matching "{pattern}"!') return None # If t...
[ "def write_filename(self, record, pattern, filename):\n\n # Are we currently writing to this file? If not, open/create it.\n if not filename == self.current_filename.get(pattern, None):\n logging.info('LogfileWriter opening new file: %s', filename)\n self.current_filename[pattern...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write record to filename. If it's the first time we're writing to this filename, create the appropriate FileWriter and insert it into the map for the relevant pattern.
def write_filename(self, record, pattern, filename): # Are we currently writing to this file? If not, open/create it. if not filename == self.current_filename.get(pattern, None): logging.info('LogfileWriter opening new file: %s', filename) self.current_filename[pattern] = filena...
[ "def write_if_match(self, record, pattern, time_str):\n # Find the compiled regex matching the pattern\n regex = self.compiled_filebase_map.get(pattern, None)\n if not regex:\n logging.error(f'System error: found no regex pattern matching \"{pattern}\"!')\n return None\n\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that matrices are equal if their elements are equal
def test_matrix_equality(self): m1 = matrices.Matrix(2, 2) m1.set_row(0, [1, 2]) m1.set_row(1, [1, 4]) m2 = matrices.Matrix(2, 2) m2.set_row(0, [1, 2]) m2.set_row(1, [1, 4]) self.assertTrue(m1 == m2) m2.set(1, 1, 50) self.assertFalse(m1 == m2)
[ "def test_eq(self):\n p = self.ab_pairs\n a = PairMatrix.empty(p)\n b = PairMatrix.empty(p)\n assert a is not b\n self.assertEqual(a, b)\n c = PairMatrix([1,2,3,4], p)\n d = PairMatrix([1,2,3,4], p)\n assert c is not d\n self.assertEqual(c, d)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }