query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Get data from a URL as a python dictionary
def get_data_as_dict(url): print url result = requests.get(url) data = json.loads(result.text) return data
[ "def fetch_data(url):\r\n response = requests.get(url)\r\n return response.json()", "def get_data(url):\n \n request_string = url \n r=requests.get(request_string)\n data = r.json() \n #print data.keys()\n meta, results = data[\"meta\"], data[\"results\"]\n \n return [meta, resu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Draws the maze walls, the goal, and the ball at its starting location.
def drawMaze(self): self.mySquare = self.wallCanvas.create_rectangle(40, 40, 76, 76, fill="blue") self.goal = self.wallCanvas.create_rectangle(230, 250, 280, 300, fill="green", outline="green") text = self.wallCanvas.create_text(255, 275, text="GOAL", fill="white") wallBounds = [[0, 0, 1...
[ "def draw_maze(self):\n\n # plot the walls", "def draw_maze(self):\n self._display.fill(COLOURS['white'])\n \n for i in range(0, self.width + 1, 20):\n pdraw.line(self._display, COLOURS['black'], (i, 0), (i, self.height))\n for i in range(0, self.height + 1, 20):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks to see if the player's square is either touching a wall or touching the goal, and if so it marks the game as over and displays a gameover message. Note that find_overlapping always includes the square itself!
def checkForEndOfGame(self): # Find list of items on canvas that overlap with region of square (x1, y1, x2, y2) = self.wallCanvas.coords(self.mySquare) onItems = self.wallCanvas.find_overlapping(x1, y1, x2, y2) # If more than one overlaps, then the square is touching a wall or the goal ...
[ "def gameOverCheck(self, snake):\n gameOver = False\n headX = snake.snakeBody[-1][0]\n headY = snake.snakeBody[-1][1]\n\n if headX < 0:\n gameOver = True\n print(\"Collides with left Wall\")\n elif headY < 0:\n gameOver = True\n print(\"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Callback function for the Quit button, closes the main window and ends the event loop.
def doQuit(self): self.mainWin2.destroy()
[ "def quit(self):\r\n self.root.quit()\r\n self.root.destroy()", "def shutdown_gui(self):\n Gtk.main_quit()", "def quit(self, event):\n self.Destroy()", "def close_window(_):\n root.destroy()", "def closeWindowCallback(self, event):\n\t\tself.EndModal(self.status)", "def ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates an Amazon Redshift cluster on AWS
def create_redshift_cluster(config, iam_role_arn, cluster_sg_id): try: response = redshift_client.describe_clusters(ClusterIdentifier=config.get('CLUSTER', 'CLUSTERIDENTIFIER')) print('Redshift Cluster already exists: ' + response['Clusters'][0]['ClusterIdentifier']) return None except: respon...
[ "def start_cluster(redshift, roleArn):\n global DWH_CLUSTER_TYPE, DWH_NODE_TYPE, DWH_NUM_NODES, \\\n DWH_DB, DWH_CLUSTER_IDENTIFIER, DWH_DB_USER, DWH_DB_PASSWORD\n print('Starting the cluster...')\n try:\n response = redshift.create_cluster( \n #HW\n ClusterTyp...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
user submits an expense
def submit_expense(request): # TODO validation this_token = request.POST['token'] this_user = User.objects.filter(token__token=this_token).get() if 'date' not in request.POST now = datetime.now() Expense.objects.create( user=this_user, amount=request.POST['amount'], text=request.POST['t...
[ "def add_expense(user_name, expense_date, expense_amount, expense_description):\n\n return \"Expense added successfully!\"", "def test_expenses_post(self):\n pass", "def SUBMIT_EXPENSE_CATEGORY_ENTRY():\n print(\"DOING Expense CATEGORIES\")\n ASSET_INPUT = ExpenseCategory_ENTRY()...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Encodes a URL to a shortened URL.
def encode(self, longUrl: str) -> str: short=str(hash(longUrl)) self.shorttolong[short]=longUrl return "http://tinyurl.com/"+short
[ "def encode(self, longUrl):\n global ID\n encoded = hex(ID).lstrip('0xX')\n url_map[ID] = longUrl\n ID += 1\n return 'http://shorturl.com/' + encoded", "def encode(self, longUrl: str) -> str:\n path = self.service.insert(longUrl)\n return 'https://a.com/' + path", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Torrents should be a list If passed a string it will be downloaded file_filter will filter the files we want to download this will be a list of strings that should match the file names
def add_torrents(self,torrents, download_dir=None, file_filter=None): if not isinstance(torrents,(tuple,list)): assert isinstance(torrents,basestring) torrents = [torrents] if download_dir is None: download_dir = self.download_dir for torrent in torrents: ...
[ "def filter_torrent(self,torrent,file_filter):\r\n try:\r\n files_dict = self.wait_for_files(torrent,timeout=5*60)\r\n files = []\r\n for file_key in files_dict.keys():\r\n for file_name in file_filter:\r\n if re.match(file_name,files_dict[fi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function will take a torrent and select only the relevant files on it if there's nothing to use in it, it will remove it from the queue
def filter_torrent(self,torrent,file_filter): try: files_dict = self.wait_for_files(torrent,timeout=5*60) files = [] for file_key in files_dict.keys(): for file_name in file_filter: if re.match(file_name,files_dict[file_key]['name'],r...
[ "def cleanup_completed_torrents(self):\r\n torrent_ids = self.transmission.list().keys()\r\n torrent_ids = filter(lambda my_id: self.check_torrent_name(self.transmission.get_torrent(my_id)._fields['name'].value),torrent_ids)\r\n # Now we have only our interesting torrents\r\n for my_id i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
At this point we'll querry the Transmission database and get all apropriate files and filter them We'll also stop the torrent
def cleanup_completed_torrents(self): torrent_ids = self.transmission.list().keys() torrent_ids = filter(lambda my_id: self.check_torrent_name(self.transmission.get_torrent(my_id)._fields['name'].value),torrent_ids) # Now we have only our interesting torrents for my_id in torrent_ids...
[ "def filter_torrent(self,torrent,file_filter):\r\n try:\r\n files_dict = self.wait_for_files(torrent,timeout=5*60)\r\n files = []\r\n for file_key in files_dict.keys():\r\n for file_name in file_filter:\r\n if re.match(file_name,files_dict[fi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Imports a photo from a photo import schema
def import_photo(photo: PhotoImportSchema): pass
[ "def import_image(imgfn):\n r = rio.open(imgfn)\n metadata = r.meta.copy()\n img = r.read()\n \n return img, metadata", "def import_image(filepath, landmark_resolver=same_name, normalise=True):\n kwargs = {'normalise': normalise}\n return _import(filepath, image_types,\n lan...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Save objects. >>> Storage.save_objects(objects=[0,1,2],save_path='__cases/obj.pk') True
def save_objects(objects, save_path): if os.path.exists(save_path): os.remove(save_path) with open(save_path, 'wb') as file: pickle.dump(objects, file, pickle.HIGHEST_PROTOCOL) return True
[ "def save_all(cls, objects):\n db.session.bulk_save_objects(objects)\n db.session.commit()", "def save_objects(path, frame, objects):\n full_path = path + str(frame) + '/'\n create_folder(full_path)\n cnt = 0\n for obj in objects:\n img = Image.fromarray(obj.cu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
load objects. >>> Storage.load_objects(saved_path='__cases/obj.pk') [0, 1, 2]
def load_objects(saved_path): objects = None if os.path.exists(saved_path): with open(saved_path, 'rb') as file: objects = pickle.load(file) return objects
[ "def load_obj(load_dir):\r\n return pickle.load(open(load_dir, 'rb'))", "def loadPrefObjects():\n pass", "def load_obj(path):\n with open(path, \"rb\") as f:\n return pickle.load(f)", "def load_object(self, obj):\n pass", "def load_pickled_objects(name, bucket=CAMD_S3_BUCKET):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract inputs from features dictionary.
def _extract_input(self, feat_dict): sa_xyz = feat_dict['sa_xyz'] sa_features = feat_dict['sa_features'] assert len(sa_xyz) == len(sa_features) return sa_xyz, sa_features
[ "def get_input_features(self):\n input_features = self.get_input_example().columns\n\n return list(input_features)", "def input_features(self, x):\n x = self.add_on_layers(x)\n return x", "def _extract_features(self, preprocessed_inputs): \n preprocessed_inputs = shape_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if it is a substring of a string.
def is_substring(string, substring): return substring in string
[ "def isSubstring(str1, str2):", "def findSubstring(self, s):\n\t\treturn self.traverse(s) is not None", "def checkSubstring(str1, str2):\n\tlen1 = len(str1)\n\tlen2 = len(str2)\n\tfor i in range(len2-len1+1):\n\t\tif str1 == str2[i:len1 + i]:\n\t\t\treturn True\n\treturn False", "def is_substring(string1, str...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if str2 is a rotation of str1 using only one call of is_substring.
def string_rotation(str1, str2): if len(str1) == len(str2): return is_substring(str1+str1, str2) return False
[ "def rotateString(self, A: str, B: str) -> bool:\n return len(A) == len(B) and B in A + A", "def isSubstring(str1, str2):", "def checkSubstring(str1, str2):\n\tlen1 = len(str1)\n\tlen2 = len(str2)\n\tfor i in range(len2-len1+1):\n\t\tif str1 == str2[i:len1 + i]:\n\t\t\treturn True\n\treturn False", "def ca...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
should 404 if no events for requested semester
def test_no_events(self, db, client): response = client.get(reverse("events:by-semester", args=["spring", 2099])) assert response.status_code == 404
[ "def events(request):\n try:\n if request.method == 'GET':\n events_list = Events.retrieve_all()\n if events_list is not []: # not empty list\n node_id = request.GET.get('node_id', '')\n user_id = request.GET.get('user_id', '')\n status =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Overload div operator for HerosStats objects to do a numpy array division on data portion of attrs.
def __div__(self, other): res = np.array(zip(*self.hero_and_value)[1]) / np.array(zip(*other.hero_and_value)[1], dtype=float) hero_and_value = zip(zip(*self.hero_and_value)[0], res) hs = HerosStats(self.stat_name, hero_and_value) return hs
[ "def _div_scalar(data=None, scalar=_Null, name=None, attr=None, out=None, **kwargs):\n return (0,)", "def _scatter_elemwise_div(lhs=None, rhs=None, name=None, attr=None, out=None, **kwargs):\n return (0,)", "def _DivScalar(data=None, scalar=_Null, name=None, attr=None, out=None, **kwargs):\n return (0,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the heros attribute of this object.
def set_heros(self): heros = [] for pair in self.hero_and_value: heros.append(pair[0]) self.heros = heros
[ "def set_historico(self):\n\n pass", "def set_children(self, children) :\n self.__children = children", "def set(self, chore):\n\n # Just set using the node and dumped data\n\n self.redis.set(f\"/chore/{chore['id']}\", json.dumps(chore))", "def set_halo(self, halo_dict=None):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method to return list of hero to value mappings for a HerosStats instance.
def get_data(self): return map(lambda pair: pair[1], self.hero_and_value)
[ "def observed_stat(heroes):\n\n return ...", "def set_heros(self):\n heros = []\n for pair in self.hero_and_value:\n heros.append(pair[0])\n self.heros = heros", "def stats(self):\n for hero in self.heroes:\n if hero.deaths == 0:\n hero.deaths ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
You can implement a configurable callable behaviour by implemenenting the call(...) method. Of course, it is also backward compatible with legacy __call__ override.
def __call__(self, *args, **kwargs): return self.call(*args, **kwargs)
[ "def call(fn, args=(), kwargs={}):\r\n return fn(*args, **kwargs)", "def __call__(self, *args, **kwargs): # real signature unknown; restored from __doc__\n pass", "def _do_call(call):\n try:\n return _CALL_CACHE[call]\n except KeyError:\n if callable(call[0]):\n result =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to fix fields in the vcf header
def fix_vcf_header( vcf_reader ): #dbNSFP_clinvar_clnsig has a Integer type but sometimes it is a String, e.g. 2|2 vcf_reader.infos['dbNSFP_clinvar_clnsig'] = pyvcf.parser._Info("dbNSFP_clinvar_clnsig",1,"String","Field 'clinvar_clnsig' from dbNSFP", None, None) return( vcf_reader )
[ "def set_info_header(vcf):\n vcf.infos = {\n 'IMPRECISE': py_vcf.parser._Info(\"IMPRECISE\", 0, \"Flag\", \"Imprecise structural variant\", \"NanoSV\", __version__),\n 'SVTYPE': py_vcf.parser._Info(\"SVTYPE\", 1, \"String\", \"Type of structural variant\", \"NanoSV\", __version__),\n 'SVMETH...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to get the sample name from the bam file
def get_sample_name( bamfile ): header = bamfile.header if 'RG' in header: if type(header['RG']) is list: return(header['RG'][0]['SM']) else: return(header['RG']['SM']) return( False )
[ "def get_sample_name_from_bam(bam):\n\n sample_name = str(subprocess.check_output(\"%s view -H %s | egrep '^@RG'\"%(samtools, bam), shell=True)).split(\"ID:\")[1].split(\"\\\\t\")[0]\n \n return sample_name", "def strainName(self):\n basename = os.path.basename(self.bamfile)\n if basename.l...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to check a pileup read. Returns True if the read needs to be kept and returns False if read can be skipped.
def check_pileupread( pileupread ): if pileupread.alignment.is_duplicate: return( False ) if pileupread.is_del: return( False ) if pileupread.is_refskip: return( False ) if not pileupread.query_position: return( False ) if pileupread.alignment.mapq < args.mapq: ...
[ "def read_ok(read):\n if any([ord(c)-33 < _BASE_QUAL_CUTOFF for c in list(read.qual)]):\n return False\n else:\n return True", "def keep(self, read):\n\t\tif self.discard_trimmed and read.trimmed:\n\t\t\treturn False\n\t\tif self.discard_untrimmed and not read.trimmed:\n\t\t\tr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to check a record. Returns True if the record needs to be kept and returns False if record can be skipped.
def check_record( record ): if record.QUAL < args.QUAL: return( False ) if record.FILTER: return( False ) return( True )
[ "def _check_record_ok(line):\n if 3 != len(line):\n logger.debug(\"Broken record: %s\", line)\n return False\n\n if line[1] is None or len(line[1]) == 0:\n logger.debug(\"Broken url: %s\", line)\n return False\n\n return True", "def has_record(self) -> bool:\n # since a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Constructor of the class. This method initializes the attributes needed to run the Algebraic version of PageRank algorithm. It uses the `tfgraph.TransitionMatrix` as transition matrix.
def __init__(self, sess: tf.Session, name: str, graph: Graph, beta: float, writer: tf.summary.FileWriter = None, is_sparse: bool = False) -> None: name = name + "_alg" T = TransitionMatrix(sess, name, graph) PageRank.__init__(self, sess, name, beta, T, writer, is_sparse)
[ "def __init__(self, alphabet, states, metric_function, reduced=False):\n self.states = states\n self.alphabet = alphabet\n self.previous_states = []\n self.next_states = []\n self.metric_function = metric_function\n self.setup_trellis(reduced)", "def __init__(self, prior,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Admin urls set have to be updated or all new registered models will be shown as disabled in admin area
def update_admin_urls(): # Delete the old admin URLs old_pattern = None admin_regex = r'^admin/' project_urls = import_module(settings.ROOT_URLCONF) for url_item in project_urls.urlpatterns: try: if url_item.app_name == 'admin': old_pattern = url_item ...
[ "def get_admin_url(self, obj):\n info = (self.opts.app_label, self.opts.model_name)\n return reverse('admin:%s_%s_changelist' % info)", "def getAdminView(self):\n return include(admin.site.urls)", "def get_admin_urls_for_registration(self):\n urls = ()\n for instance in self.m...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for humangenes_get
def test_humangenes_get(self): pass
[ "def test_humangenes_id_get(self):\n pass", "def test_get_all_histories_using_get(self):\n pass", "def test_uniformity_values(self, known_HU_dict):\n self.cbct.analyze()\n for key, roi in self.cbct.UN.ROIs.items():\n exp_val = known_HU_dict[key]\n meas_val = roi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for humangenes_id_get
def test_humangenes_id_get(self): pass
[ "def test_greenalgas_id_get(self):\n pass", "def test_musicals_id_get(self):\n pass", "def test_mousegenes_id_get(self):\n pass", "def test_vicars_id_get(self):\n pass", "def test_v1_supervision_identities_id_get(self):\n pass", "def test_murderers_id_get(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get XML File from ZIP.
def getXmlFileFromZip(file_path: str, zipfiile_path: str): zipfile = zf.ZipFile(zipfiile_path) file_string = zipfile.read(file_path) file_xml = lxml.etree.fromstring(file_string) return file_xml
[ "def extract_from_zip(self, url):\n try:\n r = requests.get(url)\n z = zipfile.ZipFile(io.BytesIO(r.content))\n for name in z.namelist():\n log.info(f\"XML extracted from zip file is {name}\")\n self.extracted_xml_name = name\n ex_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get File From ZIP.
def getFileFromZip(file_path: str, zipfile_path: str) -> bytes: zipfile = zf.ZipFile(zipfile_path) file = zipfile.read(file_path) return file
[ "def fetch_zip( filename, local_dir='.'):\n\n url = posixpath.join( URL_FILE_STORE, filename)\n path = os.path.join( local_dir, filename)\n if not os.path.exists( local_dir):\n os.makedirs( local_dir)\n\n if not os.path.exists(path):\n urllib.urlretrieve(url, path)\n\n Zip = zipfile.Zip...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieves the directory string from a path string.
def getDirectoryFromPath(path: str) -> str: path_temp = path.rpartition("/") new_path = path_temp[0] + path_temp[1] return new_path
[ "def parse_directory(path):\n p = pathlib.Path(path)\n\n if p.parts[0] == \"gs:\":\n return str(pathlib.Path(* p.parts[2:-1], p.stem)), str(p.parts[1])\n else:\n return str(p), None", "def first_dir(path_string):\n parts = path_string.split(os.path.sep)\n return parts[0]", "def _dir_from_url(valu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
replaces the number in the path with the given number.
def changeFileNoInFilePath(path: str, fileNo: int) -> str: separator = r"[0-9]+\." splitted_path = re.split(separator, path, 1) new_path = splitted_path[0] + str(fileNo) + "." + splitted_path[1] return new_path
[ "def pat_number(self, number):\n self._pat_number(number)", "def parse_num(path):\n bn = path.basename\n if bn.startswith(prefix):\n try:\n return int(bn[len(prefix):])\n except ValueError:\n pass", "def replace_digits_with_paths(self, use...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Moses BLEU is 0 when there are no matching 4grams.
def test_less_than_four_bleu(): hypotheses = ['a b c'] references = [['a b c']] bleu = bleu_score(hypotheses, references) assert(bleu == 0.0)
[ "def unmatchedCount(sequence1, sequence2):\n # TODO: Write me\n if len(sequence1) == 0 or len(sequence2) == 0:\n return 0\n else:\n if matchingBase(sequence1[0]) == \"T\" and matchingBase(sequence2[0]) == \"A\":\n return 0 + unmatchedCount(sequence1[1:], sequence2[1:])\n eli...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ordinary points are just pairs (x, y) where x and y are both between 0 and n 1
def ordinary_points(n): return [(x, y) for x in range(n) for y in range(n)]
[ "def GenAdjacentPoints(origin):\n for i in [1, 0, -1]:\n for j in [-1, 0, 1]:\n if i == 0 and j == 0:\n continue\n yield Point(origin.x + j, origin.y + i)", "def get_adjacent(x, y):\n return [(x + 1, y), (x + 1, y + 1), (x + 1, y - 1),\n (x, y - 1), (x,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns the vertical line with the specified xcoordinate in the finite projective plane of degree n includes 'infinity infinity'
def vertical_line(x, n): return [(x, y) for y in range(n)] + [u"∞"]
[ "def line_at_infinity(n):\n return points_at_infinity(n)", "def render_visible(V):\n\n # make V into list sorted by slope: O(nlogn)\n V = sorted(V, key=lambda l: l.m)\n X = visible_intersections(V)\n\n # add point beyond left end point to have a support point for the line\n # with smallest slope...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
the line at infinity just contains the points at infinity
def line_at_infinity(n): return points_at_infinity(n)
[ "def testPlotCurveInfinite(self):\n tests = {\n 'y all not finite': ([0, 1, 2], [numpy.inf, numpy.nan, -numpy.inf]),\n 'x all not finite': ([numpy.inf, numpy.nan, -numpy.inf], [0, 1, 2]),\n 'x some inf': ([0, numpy.inf, 2], [0, 1, 2]),\n 'y some inf': ([0, 1, 2], [...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialization code for autonomous mode may go here. Users may override this method for initialization code which will be called each time the robot enters autonomous mode, regardless of the selected autonomous mode. This can be useful for code that must be run at the beginning of a match.
def autonomousInit(self) -> None: pass
[ "def autonomousInit(self) -> None:\n ...", "def autonomous(self) -> None:\n\n self.__nt_put_mode(\"auto\")\n self.__nt_put_is_ds_attached(self.__is_ds_attached())\n\n self._on_mode_enable_components()\n\n try:\n self.autonomousInit()\n except:\n self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Periodic code for test mode should go here.
def testPeriodic(self) -> None: pass
[ "def testPeriodic(self) -> None:\n ...", "def run_post_test(self):\n pass", "def run_pre_test(self):\n pass", "def run(self,test_mode):\n\n self._start_profile_prompt()\n self.checker._check_directories()\n \n\n if test_mode == 'check':\n data_path = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Periodic code for all modes should go here. Users must override this method to utilize it but it is not required. This function gets called last in each mode. You may use it for any code you need to run during all modes of the robot (e.g NetworkTables updates) The default implementation will update SmartDashboard, Live...
def robotPeriodic(self) -> None: watchdog = self.watchdog self.__sd_update() watchdog.addEpoch("SmartDashboard") self.__lv_update() watchdog.addEpoch("LiveWindow") # self.__sf_update() # watchdog.addEpoch("Shuffleboard")
[ "def autonomous(self) -> None:\n\n self.__nt_put_mode(\"auto\")\n self.__nt_put_is_ds_attached(self.__is_ds_attached())\n\n self._on_mode_enable_components()\n\n try:\n self.autonomousInit()\n except:\n self.onException(forceReport=True)\n\n auto_funct...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
MagicRobot will do The Right Thing and automatically load all autonomous mode routines defined in the autonomous folder.
def autonomous(self) -> None: self.__nt_put_mode("auto") self.__nt_put_is_ds_attached(self.__is_ds_attached()) self._on_mode_enable_components() try: self.autonomousInit() except: self.onException(forceReport=True) auto_functions: Tuple[Callabl...
[ "def autonomousInit(self) -> None:\n ...", "def RegisterAutonomous(self):\n\t\t# These run the whole time to spin the wheel and make sure the tilt\n\t\t# statys at the right position\n\t\tscheduler.RegisterAutonomousTask(\"ShooterContinuous\", shooter.ShooterContinuous, scheduler.PARALLEL_TASK)\n\t\tschedu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run components and all periodic methods.
def _enabled_periodic(self) -> None: watchdog = self.watchdog for name, component in self._components: try: component.execute() except: self.onException() watchdog.addEpoch(name) self._do_periodics() for reset_dict, c...
[ "def run(self):\n for instance in self.component_instances:\n instance_thread = instance.spawn()\n instance_thread.start()", "def do_main(self):\n self.pool.spawn_n(self._periodic_runner)\n super(Manager, self).do_main()", "def robotPeriodic(self) -> None:\n wat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns histogram of each color channel in RGB img
def color_histo(img): # check if rgb(a): if img.shape[2] in (3,4): channels = (img[::1],img[::2],img[::3]) elif img.shape[2] == 1: channels = img[::1] # return channels: else: print "weird number of color channels going on: ", img.shape return (histogram(chan) for chan in channels)
[ "def analyze_color_histogram(img):\n color = ('b', 'g', 'r')\n hist = [] \n for i, col in enumerate(color):\n hist.append(cv2.calcHist([img],[i],None,[256],[0,256])) \n \n blue = hist[0]\n green = hist[1]\n red = hist[2]\n \n return blue, green, red", "def get_histogra...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
blanks out any color in image within tolerancepercentage of color given
def remove_color(colored_pic, rgb_color, tolerance=0.7): # surprisingly, a high tolerance works best for the training pic... img = colored_pic.copy() # create color tolerance limits based on rgb color rlims,glims,blims = ((rgb_color[i]*(1.0-tolerance),rgb_color[i]*(1+tolerance)) for i in range(3)) # set to black w...
[ "def percentFilled(w, h, cnt):\n return cv2.contourArea(cnt) >= 0.7 * w * h", "def percentage_open(image):\n red_count = 0\n for i in range(128):\n for j in range(128):\n r, g, b = image.getpixel((i,j))\n if r > b:\n red_count += 1\n return float(red_count)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
use std blobdetection function to plot circles surrounding segmented blobs
def blobber(img): blobs = blob_dog(img, min_sigma=20, threshold=.1) blobs[:, 2] = blobs[:, 2] * sqrt(2) fig, ax = plt.subplots() ax.imshow(img, cmap="gray") for blob in blobs: y, x, r = blob c = plt.Circle((x, y), r, color="0.75", linewidth=2, fill=False) ax.add_patch(c)
[ "def blob_detection(post_masked_processed, draw):\n img = cv2.normalize(post_masked_processed, None, 0, 255, cv2.NORM_MINMAX)\n img = img.astype('uint8')\n img = cv2.medianBlur(img, 7)\n\n th2 = filters.threshold_sauvola(img)\n th2 = 255-th2\n th2 = th2.astype(\"uint8\")\n # Set our filtering p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Search for places that match query
def search(): # parses querie into key word array q = request.args.get("q") # parases query into an array q_array = q.split(" ") # remove any commas (if any) query = [] for item in q_array: if item[len(item) - 1] == ",": item = item.replace(",", "") query.a...
[ "def get_potential_matches_from_address(self, address):", "def search_places(request):\n response = requests.get(f\"https://maps.googleapis.com/maps/api/place/textsearch/json?query={request}&key={places_key}\")\n formatted = response.json()\n lat = formatted['results'][0]['geometry']['location']['lat']\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates the old word with a new word into the trie.
def update_word(self, old_word, new_word) -> None: if self.__delitem__(old_word): self.add(new_word)
[ "def change_weight(mytrie, word, updatefunc):\r\n assert isinstance(word, str), \"The word to change weight should be a string.\"\r\n\r\n node = Trie(mytrie).searchTrie(word)\r\n # if word doesn't exist in trie or it is not a full word in trie\r\n if node is None or node.weight == -1:\r\n return ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Appends the word to a prefix into the trie.
def append_to_word(self, prefix, word) -> None: # mark is_word to false self.__delitem__(prefix) # add/append the word self.add(prefix + word)
[ "def add(self, word):\n current = self\n for letter in word:\n current = current._children.setdefault(letter, Trie())\n current._endsHere = True", "def add(self, word: str):\n current_node = self.root\n for c in word:\n children = current_node.children\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Postprocess the model output predictions.
def postprocess_predictions(self, predictions: Prediction, metadata: Union[None, ProcessingMetadata]) -> Prediction: pass
[ "def postprocess(cls, output) -> \"OutputModel\":\n pass", "def postprocess_model_outputs(self, predictions, expected):\n expected[\"y\"] = expected[\"y\"].numpy()\n expected[\"display_ids\"] = expected[\"display_ids\"].numpy()\n\n return predictions.numpy(), expected", "def postproc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the equivalent photometric preprocessing module for this processing. A photometric preprocessing apply a transformation to the image pixels, without changing the image size. This includes RGB > BGR, standardization, normalization etc. If a Processing subclass does not have change pixel values, it should return an n...
def get_equivalent_photometric_module(self) -> Optional[nn.Module]: pass
[ "def get_image_preprocessor(self):\n image_size = self.model.get_image_size()\n input_data_type = tf.float32\n\n shift_ratio = 0\n if self.job_name:\n # shift_ratio prevents multiple workers from processing the same batch\n # during a step\n assert self.worker_hosts\n shift_ratio = f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Infer the output image shape from the processing.
def infer_image_input_shape(self) -> Optional[Tuple[int, int]]: output_shape = None for p in self.processings: new_output_shape = p.infer_image_input_shape() if new_output_shape is not None: output_shape = new_output_shape return output_shape
[ "def infer_image_input_shape(self) -> Optional[Tuple[int, int]]:\n return self.output_shape", "def output_shape(self):\n return None", "def output_shape_for(self, input_shape):\n # N1, C1, W1, H1 = input_shape\n # output_shape = (N1, self.n_classes, W1, H1)\n x = input_shape\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Infer the output image shape from the processing.
def infer_image_input_shape(self) -> Optional[Tuple[int, int]]: return self.output_shape
[ "def infer_image_input_shape(self) -> Optional[Tuple[int, int]]:\n output_shape = None\n for p in self.processings:\n new_output_shape = p.infer_image_input_shape()\n if new_output_shape is not None:\n output_shape = new_output_shape\n\n return output_shape"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Processing parameters commonly used for training YoloX on COCO dataset.
def default_yolox_coco_processing_params() -> dict: image_processor = ComposeProcessing( [ ReverseImageChannels(), DetectionLongestMaxSizeRescale((640, 640)), DetectionBottomRightPadding((640, 640), 114), ImagePermute((2, 0, 1)), ] ) params =...
[ "def default_ppyoloe_coco_processing_params() -> dict:\n\n image_processor = ComposeProcessing(\n [\n ReverseImageChannels(),\n DetectionRescale(output_shape=(640, 640)),\n NormalizeImage(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),\n ImagePermu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Processing parameters commonly used for training PPYoloE on COCO dataset.
def default_ppyoloe_coco_processing_params() -> dict: image_processor = ComposeProcessing( [ ReverseImageChannels(), DetectionRescale(output_shape=(640, 640)), NormalizeImage(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]), ImagePermute(permutati...
[ "def process_epidemic_parameters(self):", "def default_yolox_coco_processing_params() -> dict:\n\n image_processor = ComposeProcessing(\n [\n ReverseImageChannels(),\n DetectionLongestMaxSizeRescale((640, 640)),\n DetectionBottomRightPadding((640, 640), 114),\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the processing parameters for a pretrained model.
def get_pretrained_processing_params(model_name: str, pretrained_weights: str) -> dict: if pretrained_weights == "coco": if "yolox" in model_name: return default_yolox_coco_processing_params() elif "ppyoloe" in model_name: return default_ppyoloe_coco_processing_params() ...
[ "def get_preprocess(self) -> Dict:\n input_shape = get_input_shape(self.deploy_cfg)\n load_from_file = self.model_cfg.data.test.pipeline[0]\n model_cfg = process_model_config(self.model_cfg, [''], input_shape)\n preprocess = model_cfg.data.test.pipeline\n preprocess[0] = load_from...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initializes the class with a test_name, a config file, a results database, and a file_handle_object. Test_name should correspond to a directory with the name 'test_name'.
def __init__(self, test_name, config_file, results_database, data_path=PERFORMERNAME, file_handle_object=None): self.__resultsdb = results_database self.__config_file_lines = config_file.read().split("\n") self.__test_name = test_name self.__fho = file_h...
[ "def __init__(self, test_name, data=None):\n\n self.test_name = test_name\n self.json_file = os.path.join(config.benchmark_location,\n self.test_name + '.json')\n if data is None:\n self.data = self.get()\n else:\n self.data = da...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handles a new randomness seed appropriately.
def __handle_seed(self, randseed): sr.seed(int(randseed))
[ "def update_random_seed(self):\n iseed = self.run_card['iseed']\n if iseed == 0:\n randinit = open(pjoin(self.me_dir, 'SubProcesses', 'randinit'))\n iseed = int(randinit.read()[2:]) + 1\n randinit.close()\n randinit = open(pjoin(self.me_dir, 'SubProcesses', 'ran...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handles a new test type appropriately.
def __handle_test_type(self, test_type): self.__test_type = igf.TEST_TYPES.value_to_number[test_type]
[ "def test_tool_types_create(self):\n pass", "def set_testtype(self, name):\n self.testID['TESTTYPE'] = name", "def test_instantiating_a_new_type_returns_expected_type():\n NewType = make_type(int, \"NewType\", [numeric.Minimum(0), numeric.Maximum(10)])\n instance = NewType(5)\n assert isi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handles a new num_levels appropriately.
def __handle_num_levels(self, num_levels): self.__num_levels = int(num_levels)
[ "def npl_changed(self, value):\n self.levels_new = value", "def __set_levels(self):\n # only really needs to be called after all adding is done\n # max complexity is number of vertices\n\n i = 0\n for vertex in self.__graph_dict:\n if len(self.__in_graph_dict[vertex])...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handles a new number of circuits appropriately.
def __handle_num_circuits(self, num_circuits): self.__num_circuits = int(num_circuits)
[ "def __make_circuits(self):\n # update the params if needed:\n self.__handle_new_params()\n # make self.__num_circuits circuits:\n for circuit_num in xrange(self.__num_circuits):\n # generate a random circuit:\n if self.__test_type == igf.TEST_TYPES.RANDOM:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handles a new number of inputs appropriately.
def __handle_num_inputs(self, num_inputs): self.__num_inputs = int(num_inputs)
[ "def input_count(self, input_count):\n\n self._input_count = input_count", "def inputs_changed(self, inputs):\n if DEBUG:\n logger.info(\"* %s\" % binstring(inputs))\n self.inputs = inputs\n self.limits.check(inputs=self.inputs)", "def ask_numbers():", "def handle_inputs(self):\n u...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes the new params (consisting of K, L, D (or num_levels)).
def __handle_new_params(self): if self.__test_type == igf.TEST_TYPES.RANDOM: sec_param_text = ",".join(["L" + "=" + str(self.__L), "D" + "=" + str(self.__D), "K" + "=" + str(self.__K)]) else: sec_param_...
[ "def _updateLevelSetParameters(self):\n parameters = LevelSetParameters()\n parameters.iterationNumber = self._iterationSpinBox.value\n parameters.inflation = self._inflationSlider.value\n parameters.attraction = self._attractionSlider.value\n parameters.curvature = self._curvatureSlider.value\n p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes in a path, and returns the same path relative to the appropriate directory for the test file.
def __get_testfile_path(self, path): path = os.path.relpath( path, os.path.join(self.__data_path, os.pardir)) return path
[ "def subject_relative_path(path):\n directory = path\n subject = component_name(path)\n\n filename = os.path.basename(path)\n directory = os.path.dirname(path)\n parent = os.path.basename(directory)\n\n if re.match(r\"index(?:[-._](?:spec|unit|test|acceptance))?\\.jsx?$\", filename):\n if re.match(r\"__tes...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates circuits with the current parameters
def __make_circuits(self): # update the params if needed: self.__handle_new_params() # make self.__num_circuits circuits: for circuit_num in xrange(self.__num_circuits): # generate a random circuit: if self.__test_type == igf.TEST_TYPES.RANDOM: gen...
[ "def generate_circuit(config: Dict[str, Any]):\n print(\"-\" * 80)\n print(f\"Creating circuit number\")\n\n n_qubits = random.randint(config[\"min_n_qubits\"], config[\"max_n_qubits\"])\n n_ops = random.randint(config[\"min_n_ops\"], config[\"max_n_ops\"])\n\n if (config[\"strategy_program_generatio...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handles writing a circuit, both to the circuit file and to the test file.
def __write_circuit(self, circ): # find the circuit id: self.__circuit_id = self.__resultsdb.get_next_circuit_id() # write the circuit to the results database: row = {t2s.CIRCUIT_TESTNAME: self.__test_name, t2s.CIRCUIT_CID: self.__circuit_id, t2s.CIRCUIT_PID...
[ "def _write_component(component_spec: ComponentSpec, output_path: str):\n component_spec.save(output_path)", "def test_write(self):\n reqs = Requirementz.from_lines(TEST_LINES)\n reqs.write(filename=TEST_FILE)", "def write_products(self):\n if self.has_option('write.pattern'):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handles writing an input, both to the input file and to the test file.
def __write_input(self, inp): # find the input id: self.__input_id = self.__resultsdb.get_next_input_id() # write the input to the results database: row = {t2s.INPUT_TESTNAME: self.__test_name, t2s.INPUT_IID: self.__input_id, t2s.INPUT_CID: self.__circuit_id...
[ "def cmd_write_inp(self):\n self.ensure_base_path()\n\n self.log.debug(\"Writing inp file\")\n self.write_inp()\n\n self.cmd_write_bloominp()\n self.cmd_write_runid()", "def test_write_orbitize_input():\n input_file = os.path.join(orbitize.DATADIR, \"test_val.csv\")\n test...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read all atoms in pdb file
def get_all(self): with open(self.filename) as pdb: atoms = [atom(line) for line in pdb if line.startswith('ATOM')] return atoms
[ "def parse_pdb(path):\n\n pdb_dict = defaultdict(lambda: defaultdict(list))\n res_dict = defaultdict(list)\n with open(path) as o:\n lines = o.readlines()\n for line in lines:\n if line[:4] == 'ATOM':\n atom_info = process_atom_info(line)\n identifier = '{}{}'.format(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read only nucleic acid atoms in pdb file
def get_nucleic(self): with open(self.filename) as pdb: atoms = [atom(line) for line in pdb if re.search ('(^ATOM)\s*\S*\s*\S*\s*' '(DA5|DA3|DA|DT5|DT3|DT|DG5|DG3|DG|DC5|DC3|DC)', line)] return atoms
[ "def exercise_atom_xyz_9999():\n pdb_inp = pdb.input(source_info=None, lines=flex.split_lines(\"\"\"\\\nATOM 10849 C ILE A1445 50.977 77.127 41.547 1.00129.33 A\nATOM 10850 O ILE A1445 50.257 76.569 42.421 1.00129.33 A\nATOM 10851 OXT ILE A1445 50.752 78.273 41.078 1.001...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write selected atoms to pdb
def write(self, atoms, out = open('atoms.pdb', 'w')): out.write('REMARK generated by pdb.py\n') for atom in atoms: vals = (['ATOM', atom['atom_num'], atom['atom_name'], atom['res_name'], atom['res_num'], atom['x'], atom['y'], atom['z'], '1...
[ "def write_pdb(self, which = 1):\n n = which\n for model in self.structure:\n if n == which:\n print(\"MODEL%9s\"%which)\n n += 1\n else:\n print(\"ENDMDL\\nMODEL%9s\"%n)\n n += 1\n for atom in model:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Group atoms by helix strand Set n as starf of residue numbering
def strands(self, n = 2): prev_atom = {'res_num': n} for i, atom in enumerate(self.atoms): if (atom['res_num'] != prev_atom['res_num'] and atom['res_num'] == n): strand1 = self.atoms[0:i] strand2 = self.atoms[i:] prev_atom = at...
[ "def homotopy_group(self, n):\n if n not in ZZ or n < 2:\n raise ValueError(\"\"\"homotopy groups can only be computed\n for dimensions greater than 1\"\"\")\n lgens = __homotopy_list__(self._kenzo, n).python()\n if lgens is not None:\n trgens = [0 if i == 1...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get base pairs from start to end, inclusive
def get_pairs(self, start, end): # Both strands are numbered 5' to 3' return [a for a in self.atoms if a['pair'] in range(start, end + 1)]
[ "def key_range_iterator(key, start=\"\", finish=\"\", count=100):\r\n return (key.clone(key=k) for k in key_range(key, start, finish, count))", "def inclusive_list(start, stop):\n return list(range(start, stop + 1))", "def location_range(start: int, end: int) -> Iterable[int]:\n step = 1\n if start ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Minimize rotation using ksh's best_rotation
def minimize(self): rotation = ksh.best_rotation(self.crdset) best = rotation[0].calc_all() self.res = best[1] self.phi = rotation[1] self.the = rotation[2] self.best = best return self.best
[ "def min_rotation(target_degrees, source_degrees):\n return (target_degrees - source_degrees + 180) % 360 - 180", "def GetRotOfLowerSymm(new_latt, old_latt, old_rot):\n \n # Q = np.transpose(np.dot(old_latt, np.linalg.inv(new_latt)))\n P = np.dot(new_latt, np.linalg.inv(old_latt)) # // horizontal ve...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write minimized fit to Molecule.fits
def write_minimize(self, Molecule): Molecule.fits.append(self.best) return Molecule.fits
[ "def write_fits(self, filename, moctool=''):\n datafile = os.path.join(os.path.dirname(\n os.path.abspath(__file__)), 'data', 'MOC.fits')\n hdulist = fits.open(datafile)\n cols = fits.Column(name='NPIX', array=self._uniq(), format='1K')\n tbhdu = fits.BinTableHDU.from_columns(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__() should create an RAMSTKUser model.
def test_ramstkuser_create(test_common_dao): _session = test_common_dao.RAMSTK_SESSION( bind=test_common_dao.engine, autoflush=False, expire_on_commit=False) DUT = _session.query(RAMSTKUser).first() assert isinstance(DUT, RAMSTKUser) # Verify class attributes are properly initialized. asse...
[ "def _make_user_model(self):\n class User(UserBase):\n \"\"\"\n A User as defined by the response from Keystone.\n\n Note: This class is dynamically generated by :class:`FlaskKeystone`\n from the :class:`flask_keystone.UserBase` class.\n\n :param request...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Use this function to determine the step size between two successive points in a profile. Starting from the last estimate of the profile parameters, it determines the optimal step size between the last estimate and the next one. The step is null for all parameters except the one whose profile is being computed, and is o...
def init_step_size(parameters,parameter_index,bounds,likelihood_function,likelihood_args,d_par_init=0.1,d_likelihood=0.1,max_step=3,alpha=0.95): likelihood = likelihood_function(parameters, *likelihood_args) df = parameters.shape[0] #number of parameters = number of degrees of freedom chi2_threshold = scipy.stat...
[ "def _stepSize(self, gradientHist=None, prevStepSize=None, recommend=None, **kwargs):\n # grad0 = gradientHist[-1][1]\n # grad1 = gradientHist[-2][1] if len(gradientHist) > 1 else None\n # FIXME try using the step directions instead\n step0 = prevStepSize[-1]['versor']\n if step0 is None:\n step...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function computes the Profile Likelihood of a dynamic model with respect to one of its parameters. Starting at the bestfit parameter set, it tries to increase the log Likelihood of the model up to the identifiability threshold on each side of the optimum, up to a certain extent. If it doesn't reach the identifiabi...
def Compute_Profile(parameters,parameter_index,likelihood_function,likelihood_args,bounds,target_sample_size=100,max_sample_size=1000,d_par_init=0.002,max_step=10,number_initial_guess_samples=30,alpha=0.95,verbose_success=False,verbose_error=False): chi2 = likelihood_function(parameters, *likelihood_args) df = par...
[ "def paramLikelihoodProfile(i=0,j=0,drug_type=0,n=20):\n print('parameter=',i,'patient=',j,'drug type =',drug_type)\n if drug_type == 0:\n reps = 1\n cost_reps =[None]*reps\n pat_data = [None]*2\n tot_dose = pk.CPT11_tot_dose[0]\n pat_data[0] = pk.CPT11[0]\n pat_data[...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given the Profile of a Likelihood function with respect to one of its parameters, this function extracts the borders of its confidence interval at a certain confidence level alpha.
def Confidence_Interval(Profile, parameter_index, alpha=0.95): df, number_points=Profile['Parameters'].shape #number of parameters of the model and number of points in the profile opt_likelihood=np.min(Profile['Profile_Likelihood']) opt_index=np.argmin(Profile['Profile_Likelihood']) #first index of an optimum ...
[ "def confidence_interval(self, alpha=0.9):\n m, _, _ = scipy.stats.bayes_mvs(\n [r[self.metric_name_] for _, r in self.results_], alpha=alpha)\n return m", "def confidence_interval(self, alpha=.05):\n return self.deltas_dist.percentiles([100 * alpha, 100 * (1-alpha)])", "def _li...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads the profile stored in a file into a friendly format
def Read_Profile(input_file): Data=np.genfromtxt(input_file) Data={'Parameters':Data[:-1], 'Profile_Likelihood':Data[-1]} return(Data)
[ "def parse_profile(self, file):\n profile_file = open(file, \"r\")\n try:\n for line in profile_file:\n line.strip()\n words = line.split(\"=\")\n if words[0] == \"name\":\n self.name = words[1].strip()\n elif wo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plots the profile of a parameter.
def Plot_Profile(Profile,Parameter_index,alpha=0.95,show=True,output_file=None,xtitle='',ytitle='',maintitle=''): plt.clf() df=Profile['Parameters'].shape[0] #number of estimated parameters threshold=np.min(Profile['Profile_Likelihood']) + chi2.ppf(alpha,df) plt.plot(Profile['Parameters'][Parameter_index], P...
[ "def plot_likelihood_profile(self, parameter, ax=None, **kwargs):\n import matplotlib.pyplot as plt\n\n if ax is None:\n ax = plt.gca()\n\n ts_diff = self.likelihood_profiles[parameter][\"likelihood\"] - self.total_stat\n values = self.likelihood_profiles[parameter][\"values\"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plots the comparison of two profile likelihood curves for the same parameter
def Plot_Two_Profiles(Profile1,Profile2,Parameter_index,alpha=0.95,show=True,output_file=None,xtitle='',ytitle='',label1='',label2='',maintitle=''): df=Profile1['Parameters'].shape[0] #number of estimated parameters threshold1=np.min(Profile1['Profile_Likelihood']) + chi2.ppf(alpha,df) threshold2=np.min(Profil...
[ "def compare_plot( xsc1, xsc2, title=\"comparison plot\", legend1=\"first file\", legend2=\"second file\",\n saveFile=None, legendXY = (0.05, 0.95) ):\n from fudge.vis.matplotlib import plot2d\n import matplotlib.pyplot as plt\n\n if xsc1.domain() != xsc2.domain():\n xsc1, xsc2 = xsc1.mutuali...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Look up a query in Freebase. The query should be a unicode string.
def lookup(self, query): key = query.encode('utf8') if key not in self.cache: self.cache[key] = self.fetch(key) return self.cache[key]
[ "def fqlQuery(self, query, callback):\n j = Json().put(u\"query\", query)\n self.callMethod(u\"fql.query\", j.getJavaScriptObject(), callback)", "def handle_wolframalpha_search(self, query):\n\n client = wolframalpha.Client(app_id)\n res = client.query(query)\n\n if len(res.pods...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Do the actual communication with remote API. Query here should be already encoded to UTF8. Returns a triple, first item is the name recognized by Freebase, second is a most likely category, the last is the score. First two items in returned value are unicode, last item is a float.
def fetch(self, query): self.params['query'] = query url = SERVICE_URL + '?' + urllib.urlencode(self.params) response = json.loads(urllib.urlopen(url).read()) for result in response['result'][0:2]: if 'notable' in result: name = result['name'] ...
[ "def handle_wolframalpha_search(self, query):\n\n client = wolframalpha.Client(app_id)\n res = client.query(query)\n\n if len(res.pods) > 0:\n string = \"\"\n pod = res.pods[1]\n if pod.text:\n string = pod.text\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Constructor for a Funder object Can be called with no arguments, creating a Funder object with no attributes set, or with a dict of information to be set at object creation.
def __init__(self, information_dict=None): if information_dict is not None: super(Funder, self).__init__(**information_dict) else: super(Funder, self).__init__()
[ "def constructor(self, **kwargs):\n if len(kwargs) > 0:\n self.__dict__.update(kwargs)", "def __init__(self, *args):\n this = _coin.new_SoFieldData(*args)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a string representation of a funder object
def __str__(self): return "Funder #{id}: {name}".format(id=self.id, name=self.name)
[ "def __str__(self):\n\n out = \"\"\n\n # Include source object if set\n if self.source_object:\n out += \"({}/{}) -\".format(self.source_object.type.name, self.source_object.value)\n\n # Add fact type\n out += \"[{}\".format(self.type.name)\n\n # Add value if set...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Instantiate SW and AW kernels according to parameters and flavour. Kernel flags are set based on the flavour.
def abb_init_kernels(L, U, N, flavour, plotname): sw = ABBSW() aw = ABBAW() for k in [ sw, aw ]: k.L, k.U = L, U k.setup(MidPoint(), N) k.measurements('kernels/abb/%s.csv' % plotname, plotname) flags = flavours[flavour] for flag in flags: setattr(k, fla...
[ "def _compile_kernels(self) -> None:\n ...", "def _instantiate_kernels(self):\n self.kernels = {}\n for language in self.indus_languages:\n for code_shop in self.get_shops(language):\n self.kernels['{}_{}'.format(language, code_shop)] = DiamanKernel(language,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load a texture cube as described by the supplied ``TextureDescription```
def load(self): pos_x = self._load_face(self.meta.pos_x, face_name="pos_x") pos_y = self._load_face(self.meta.pos_y, face_name="pos_y") pos_z = self._load_face(self.meta.pos_z, face_name="pos_z") neg_x = self._load_face(self.meta.neg_x, face_name="neg_x") neg_y = self._load_face(...
[ "def make_cube_1(texture, texture_index): \n glBindTexture(GL_TEXTURE_2D,texture[texture_index])\t \n # Front Face (Each texture's corner is matched a quad's corner.) \n glBegin(GL_QUADS)\t \n\tglTexCoord2f(0.0, 0.0); glVertex3f(-1.0, -1.0, 1.0)\t# Bottom Left Of The Texture and Quad \n\tglTex...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validates each face ensuring components and size it the same
def _validate(self, faces): components = faces[0].components data_size = len(faces[0].data) for face in faces: if face.components != components: raise ImproperlyConfigured( "Cubemap face textures have different number of components" ...
[ "def is_single_face_valid(img) -> int:\n # TODO stub\n return 0", "def __check_correctness_face(self, face):\n first_number_face = face[0, 0]\n for number in nditer(face):\n if first_number_face != number:\n return False\n return True", "def checkDegenerateFa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper funcion to print legend according to invoice type.
def _get_legend(self, invoice): legend = _('This document is a printed representation od the CFDI') if invoice.journal_id.name.split('-')[1] =="NOTA DE CARGO": legend = _("Nota Cargo") else: if invoice.type == 'out_invoice': legend = _("Factura") else: legend = _("Nota Crédito") return legend +...
[ "def getLegendTitle(self):\n\n if self.outlookType.startswith( 'Cat' ):\n return 'Categorical Outlook Legend'\n elif self.outlookType.startswith( 'Prob' ):\n return 'Total Severe Probability Legend (in %)'\n return f'{self.outlookType} Probability Legend (in %)'", "def _get_legend(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
encode all captions into one large array, which will be 1indexed. also produces label_start_ix and label_end_ix which store 1indexed and inclusive (Luastyle) pointers to the first and last caption for each image in the dataset.
def encode_captions(imgs, params, wtoi): max_length = params['max_length'] # min_length = params['min_length'] N = len(imgs) M = sum(len(img['final_captions']) for img in imgs) # total number of captions label_arrays = [] label_start_ix = np.zeros(N, dtype='uint32') # note: these will be one...
[ "def _populate_caption_data(self, data: Dict[str, Any], image_id: int) -> None:\n data[\"caption\"] = []\n annotation_ids = self.captions.getAnnIds(imgIds=image_id)\n if annotation_ids:\n annotations = self.captions.loadAnns(annotation_ids)\n for annotation in annotations:...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
calculate BM25 score between caption and sentences in the article take into account the named entity rather than the pointer remove number and stopwords
def BM25_score(cap, sent, df_dict, stopwords, dataset): if dataset == 'breakingnews': N = 2423309 ave_sen_len = 20 else: N = 5953950 ave_sen_len = 20 k1 = 2.0 k2 = 1.0 b = 0.75 sent_tf = {} cap_tf = {} score = 0 cleaned_cap = [] # remove number and...
[ "def analyze_bbc():\n import nltk\n nltk.download('punkt')\n from nltk.tokenize import word_tokenize\n tokens = word_tokenize(article.text)\n # print(tokens)\n tokens = [w.lower() for w in tokens]\n import string\n table = str.maketrans(\"\",\"\", string.punctuation)\n stripped = [w.trans...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a boolean for whether solar resource requires clearsky irrad. Returns
def clearsky(self): if self._clearsky is None: self._clearsky = False for v in self.inputs.values(): self._clearsky = any((self._clearsky, bool(v.get('clearsky', False)))) if self._clearsky: logger.debug('...
[ "def zone_resilient(self) -> Optional[bool]:\n return pulumi.get(self, \"zone_resilient\")", "def has_sres(self) -> bool:\n return self.check_sensi_orders((1,), MODE_RES)", "def will_have_clear(self):\n return weather.any_status_is(self.forecast.weathers, \"sun\", self._wc_registry)", "de...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a boolean for whether bifacial solar analysis is being run. Returns
def bifacial(self): if self._bifacial is None: self._bifacial = False for v in self.inputs.values(): bi_flags = ('bifaciality', 'spe_is_bifacial', 'cec_is_bifacial', '6par_is_bifacial') bi_bools = [bool(v.get(flag, 0)) for flag ...
[ "def is_blanc(self):\n \n return self.binning is None", "def is_solution(self):\n return self.state.is_solution()", "def is_beta(self):\n if self.connective in logic.DISJ:\n return True\n elif self.connective in logic.CONJ:\n return False\n elif se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a boolean for whether wind generation is considering icing. Returns
def icing(self): if self._icing is None: self._icing = False for v in self.inputs.values(): self._icing = any((self._icing, bool(v.get('en_icing_cutoff', False)))) if self._icing: logger.debug('Icing analysis...
[ "def HasITD(self):\n return self.__has('ITD')", "def is_worth_it(self):\n return True if self.immersion - self.crew * Ship.crew_immersion_multiplier > 20 else False", "def HasIFC(self):\n return self.__has('IFC')", "def issiso(self):\n return self.ninputs == 1 and self.noutputs == ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Resolution to downscale NSRDB resource to. Returns dict | None Option for NSRDB resource downscaling to higher temporal resolution. The config expects a str entry in the Pandas frequency format, e.g. '5min' or a dict of downscaling kwargs
def downscale(self): if self._downscale is None: ds_list = [] for v in self.inputs.values(): ds_list.append(v.get('downscale', None)) self._downscale = ds_list[0] ds_list = list({str(x) for x in ds_list}) if len(ds_list) > 1: ...
[ "def regrid_downscale_static(self):\n # TODO: Get these next two from the environment if possible\n downscale_temp = True\n downscale_precip = True\n if downscale_temp or downscale_precip:\n elevation_difference = self._regrid_downscale_generate_elevation_difference()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the SAM input file(s) (JSON/JSON5/YAML/TOML) and return as a dictionary.
def inputs(self): if self._inputs is None: self._inputs = {} for key, config in self.items(): # key is ID (i.e. sam_param_0) that matches project points json # fname is the actual SAM config file name (with path) if isinstance(config, str):...
[ "def _read_input_file(self):\n with open(self.input_path) as input_file:\n return json.load(input_file)", "def input_files():\n input_patterns = {} # The input values and expected results\n\n # do traversal of input_files\n absolute_path_prefix = path.dirname(path.realpath(__file__)) ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run checks on a SAM input json config.
def check(cls, config): c = cls(config) c._run_checks()
[ "def test_parse_config(self):\n with open(self.config_file_json, 'w+',\n encoding=\"utf-8\", errors=\"ignore\") as config_f:\n json.dump({\n 'analyzer': ['--analyzers', 'clangsa'],\n 'parse': ['--trim-path-prefix', '/workspace']},\n con...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print braille data visually. (We don't use it in the final solution)
def print_braille_row(data): assert len(data) == NUM_COLS text = "" rows = ["", "", "", ""] for byte in data: byte = ord(byte) rows[0] += "O" if byte & (1 << 0) else "." rows[1] += "O" if byte & (1 << 1) else "." rows[2] += "O" if byte & (1 << 2) else "." row...
[ "def printBeskjed():\n print(\"Hvilken kolonne er tallet ditt i? (v/m/h) \") #Printer ut en beskjed.", "def panda(self):\n print\n print 32 * ' ' + \".;;.\"\n print 31 * ' ' + \"/;;;;\\ ___ .;;. \" + \\\n Fore.GREEN + \" |\\\\\" + Fore.RESET\n print 30 * '...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
We get a braille data, and parse it as english. We use the 'en_nabcc' encoding from the BRLTTY project.
def parse_braille_row(data): assert len(data) == NUM_COLS text = "" for byte in data: byte = ord(byte) brl_chr = 0 brl_chr |= BRL_DOT1 if byte & (1 << 0) else 0 brl_chr |= BRL_DOT2 if byte & (1 << 1) else 0 brl_chr |= BRL_DOT3 if byte & (1 << 2) else 0 brl_ch...
[ "def __init__(self, encoding):\n self.trans = {}\n for char in 'ÀÁÂẦẤẪẨẬÃĀĂẰẮẴẶẲȦǠẠḀȂĄǍẢ':\n self.trans[char] = 'A'\n for char in 'ȀǞ':\n self.trans[char] = 'Ä'\n self.trans['Ǻ'] = 'Å'\n self.trans['Ä'] = 'Ae'\n self.trans['Å'] = 'Aa'\n for char...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse URB_INTERRUPT packet with the EuroBraille device. Adapted from the 'handleSystemInformation' function.
def parse_interrupt(data): # Extract the data seqnum = struct.unpack("<B", data[0])[0] STX_index = 1 if data[1] == PAD: STX_index = 2 assert data[STX_index] == STX data_length = struct.unpack(">H",data[STX_index+1:STX_index+3])[0] assert data[data_length+STX_index+1] == ETX pack...
[ "def handle_ip(self, byte):\n self.log.debug('IAC IP: Interrupt Process')", "def handle_abort(self, byte):\n self.log.debug('IAC ABORT: Abort')", "def read_interrupt():\n#\n#--- read data\n#\n file = house_keeping + '/all_data'\n f = open(file, 'r')\n data = [line.strip() for line in f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ce je konec igre, vrne igralca, ki je zmagal, ce je remi, vrne 'remi', ce pa igre se ni konec, vrne False.
def konec_igre(plosca): # preveri vrstice vrstice_konec = preveri_vrstice(plosca) if vrstice_konec: return vrstice_konec # preveri stolpce stolpci_konec = preveri_vrstice(list(zip(*plosca))) # preveri vrstice transponirane plosce if stolpci_konec: return stolpci_konec # preveri diagonali ...
[ "def __zavrsi (zadnji):\n\n self.__zavrsena = True\n\n if zadnji is not None:\n # \"Pocisti\" stol.\n while self.__stol:\n karta = self.__stol.pop()\n self.__igraci[zadnji]['skupljeno'] |= {karta}\n\n # Pronadi igra...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }