query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Postprocess inference result to normalize probabilities and render with labels
def postprocess(self, data): if self.error is not None: return [self.error] # Iterating over inference results to render the normalized probabilities response = [] for inference_result in data: softmax_result = inference_result.softmax().asnumpy() for...
[ "def inference(input_layer, para_data, train_phase, keep_prob):", "def post_process_predictions(self, labels, scene):\n pass", "def postprocess(prediction):\n\n # pred, uncertainty = prediction\n pred = prediction\n\n # Validate. As an example, if the output is an int, check that it is positive....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get Mapbox access token from arg or environment
def _get_token(token=None): if token is not None: return token else: return os.environ.get("MAPBOX_ACCESS_TOKEN") or os.environ.get( "MapboxAccessToken" )
[ "def get_token_from_environment():\n auth_token = os.environ.get('PLATFORM_AUTH_TOKEN')\n if not auth_token:\n return None\n return {\n \"auth_token\": auth_token\n }", "def get_auth():\n return 'api_token=<your token goes here>&api_token_secret=<your secret>'", "def get_access_toke...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new tileset with a recipe. $ tilesets create is in the form of username.handle for example "mapbox.neattileset". The handle may only include "" or "_" special characters.
def create( tileset, recipe, name=None, description=None, privacy=None, token=None, indent=None ): mapbox_api = _get_api() mapbox_token = _get_token(token) url = "{0}/tilesets/v1/{1}?access_token={2}".format( mapbox_api, tileset, mapbox_token ) body = {} body["name"] = name or "" ...
[ "def create_tile_set():\n # pylint: disable=no-member\n print('Creating Tile Set...')\n for tile in TileSet: # enum of MapTile objs\n print(' - %s' % tile.value.name)\n gridrealm.DBS.add(tile.value)\n\n gridrealm.DBS.commit()\n print('...commited')", "def create_tile(cls, grs, tile, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Publish your tileset. tilesets publish
def publish(tileset, token=None, indent=None): mapbox_api = _get_api() mapbox_token = _get_token(token) url = "{0}/tilesets/v1/{1}/publish?access_token={2}".format( mapbox_api, tileset, mapbox_token ) r = requests.post(url) if r.status_code == 200: click.echo(json.dumps(r.json(),...
[ "def publish(self, settings, item):\n\n publisher = self.parent\n engine = publisher.engine\n document = item.properties[\"document\"]\n\n path = _document_path(document)\n item.properties[\"upload_path\"] = path\n item\n psdProject = PSDImage.open(path)\n\n #...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
View the current queue/processing/complete status of your tileset. tilesets status
def status(tileset, token=None, indent=None): mapbox_api = _get_api() mapbox_token = _get_token(token) url = "{0}/tilesets/v1/{1}/status?access_token={2}".format( mapbox_api, tileset, mapbox_token ) r = requests.get(url) click.echo(json.dumps(r.json(), indent=indent))
[ "def queue_status(self):\n return self._queue_status", "def thread_status():\n global dataSession\n return jsonify(dict(status=('finished' if len(dataSession) > 1 else 'running')))", "def get_current_status(self):\n for ct, device in enumerate(self.LIST_OF_DEVICES): # for each device\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
View all jobs for a particular tileset. tilesets jobs
def jobs(tileset, stage, token=None, indent=None): mapbox_api = _get_api() mapbox_token = _get_token(token) url = "{0}/tilesets/v1/{1}/jobs?access_token={2}".format( mapbox_api, tileset, mapbox_token ) if stage: url = "{0}/tilesets/v1/{1}/jobs?stage={2}&access_token={3}".format( ...
[ "def all_jobs():\n\n jobs = Job.get_all()\n\n oneoffs = OneOff.get_all()\n\n job = JobView(None, jobs, oneoffs, False, Job.count() > 0)\n\n add_trello_task_links_to_g()\n\n return render_template(\"jobs.template.html\", page_title=\"Jobs\", job_info=job)", "def job(tileset, job_id, token=None, inde...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
View a single job for a particular tileset. tilesets job
def job(tileset, job_id, token=None, indent=None): mapbox_api = _get_api() mapbox_token = _get_token(token) url = "{0}/tilesets/v1/{1}/jobs/{2}?access_token={3}".format( mapbox_api, tileset, job_id, mapbox_token ) r = requests.get(url) click.echo(json.dumps(r.json(), indent=indent))
[ "def jobs(tileset, stage, token=None, indent=None):\n mapbox_api = _get_api()\n mapbox_token = _get_token(token)\n url = \"{0}/tilesets/v1/{1}/jobs?access_token={2}\".format(\n mapbox_api, tileset, mapbox_token\n )\n if stage:\n url = \"{0}/tilesets/v1/{1}/jobs?stage={2}&access_token={3...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
List all tilesets for an account. By default the response is a simple list of tileset IDs. If you would like an array of all tileset's information, use the versbose flag. tilests list
def list(username, verbose, token=None, indent=None): mapbox_api = _get_api() mapbox_token = _get_token(token) url = "{0}/tilesets/v1/{1}?access_token={2}".format( mapbox_api, username, mapbox_token ) r = requests.get(url) if r.status_code == 200: if verbose: for tile...
[ "async def TileMatrixSet_list(request: Request):\n return {\n \"tileMatrixSets\": [\n {\n \"id\": tms.name,\n \"title\": tms.name,\n \"links\": [\n {\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validate a Recipe JSON document tilesets validaterecipe
def validate_recipe(recipe, token=None, indent=None): mapbox_api = _get_api() mapbox_token = _get_token(token) url = "{0}/tilesets/v1/validateRecipe?access_token={1}".format( mapbox_api, mapbox_token ) with open(recipe) as json_recipe: recipe_json = json.load(json_recipe) r ...
[ "def validate_recipe(recipe_data):\n recipe_errors = ValidateRecipe.__validator.validate_data(\n recipe_data, {\n \"name\": {\n \"required\": True,\n \"min\": 3,\n \"max\": 200\n },\n \"steps\": {...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
View a tileset's recipe JSON tilesets viewrecipe
def view_recipe(tileset, token=None, indent=None): mapbox_api = _get_api() mapbox_token = _get_token(token) url = "{0}/tilesets/v1/{1}/recipe?access_token={2}".format( mapbox_api, tileset, mapbox_token ) r = requests.get(url) if r.status_code == 200: click.echo(json.dumps(r.json(...
[ "def view_recipe(recipe_id):\n recipes = mongo.db.recipes\n my_recipe = mongo.db.recipes.find_one({\"_id\": ObjectId(recipe_id)})\n return render_template('view_recipe.html', recipe=my_recipe)", "def view_recipe(request, recipe, **_kwargs):\n return render(request, \"deployments/disp_rec...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update a Recipe JSON document for a particular tileset tilesets updaterecipe
def update_recipe(tileset, recipe, token=None, indent=None): mapbox_api = _get_api() mapbox_token = _get_token(token) url = "{0}/tilesets/v1/{1}/recipe?access_token={2}".format( mapbox_api, tileset, mapbox_token ) with open(recipe) as json_recipe: recipe_json = json.load(json_recipe)...
[ "def test_full_update_recipe(self):\n\n recipe = sample_recipe(user=self.user)\n recipe.tags.add(sample_tag(user=self.user))\n\n payload = {\n 'title': 'update spaghetti carhonara',\n 'time_miniutes': 32,\n 'price': 5.00\n }\n\n url = recipe_detail...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create/add a tileset source tilesets addsource
def add_source(ctx, username, id, features, no_validation, token=None, indent=None): mapbox_api = _get_api() mapbox_token = _get_token(token) url = ( f"{mapbox_api}/tilesets/v1/sources/{username}/{id}?access_token={mapbox_token}" ) with tempfile.TemporaryFile() as file: for feature ...
[ "def add_source(self, source):\n return self._add(source)", "def _add_source(self, source: \"Source\") -> None:\n self.sources.append(source)", "def _add_source(self, source: _Source) -> None:\n\n self._sources.append(source)", "def from_source(self, source):\n self._technical_visi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
View a Tileset Source's information tilesets viewsource
def view_source(username, id, token=None, indent=None): mapbox_api = _get_api() mapbox_token = _get_token(token) url = "{0}/tilesets/v1/sources/{1}/{2}?access_token={3}".format( mapbox_api, username, id, mapbox_token ) r = requests.get(url) if r.status_code == 200: click.echo(jso...
[ "def view_sources(self):\r\n return self._view_sources", "def GetTileViewport(self):\n ...", "def list_sources(username, token=None):\n mapbox_api = _get_api()\n mapbox_token = _get_token(token)\n url = \"{0}/tilesets/v1/sources/{1}?access_token={2}\".format(\n mapbox_api, username...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete a Tileset Source + all of its files. tilesets deletesource
def delete_source(username, id, force, token=None): if not force: click.confirm( "Are you sure you want to delete {0} {1}?".format(username, id), abort=True ) mapbox_api = _get_api() mapbox_token = _get_token(token) url = "{0}/tilesets/v1/sources/{1}/{2}?access_token={3}".for...
[ "def delete(self, source):\n _source = self._source_prefix+source\n assert _source in self.cache.keys()\n del self.cache[_source]", "def cleanup_data(source_file = None):\n\n # Find temporary files.\n _temp_files = glob(TEMP_DIR + TEMP_FILENAME + \"*\")\n # Delete them.\n for _fil...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
List all Tileset Sources for an account. Response is an unordered array of sources. tilesets listsources
def list_sources(username, token=None): mapbox_api = _get_api() mapbox_token = _get_token(token) url = "{0}/tilesets/v1/sources/{1}?access_token={2}".format( mapbox_api, username, mapbox_token ) r = requests.get(url) if r.status_code == 200: for source in r.json(): cl...
[ "def get_sources():\n url = base_url + \"sources\"\n params = {\"language\": \"en\"}\n resp = requests.get(url, params=params)\n data = resp.json()\n sources = [src['id'].strip() for src in data['sources']]\n print(\"all the sources:\")\n print(sources)\n return sources", "def get_sources(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Mutes everyone that you are following
def auto_mute_following(): following = set(t.friends.ids(screen_name=TWITTER_HANDLE)["ids"]) muted = set(t.mutes.users.ids(screen_name=TWITTER_HANDLE)["ids"]) not_muted = following - muted # put user IDs of people you do not want to mute here users_keep_unmuted = set([]) # mute al...
[ "def follow_reciprocated(self, target):\n if random.randint(1, 1000) == 1: # 1 in 20 are public @replies\n self.tweet_user(target)\n else:\n try:\n self.dm_user(target)\n except:\n pass", "def follow_user(cls, user, following):\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Unmutes everyone that you have muted
def auto_unmute(): muted = set(t.mutes.users.ids(screen_name=TWITTER_HANDLE)["ids"]) # put user IDs of people you want to remain muted here users_keep_muted = set([]) # mute all for user_id in muted: if user_id not in users_keep_muted: t.mutes.users.destroy(...
[ "def unmute(self):\n if self.is_muted:\n self.toggle_mute()", "def unmute(self):\n return self.connection.delete_request(f\"/users/{self.id}/mute\")", "async def unmute(self, ctx, user: Redeemed):\n if member == None or member == ctx.message.author:\n await ctx.send(\"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extend `unichr` for all possible Unicode values (n).
def unicode_char(n): try: return unichr(n) except ValueError: # Generate bytes object packed as int. bytes_object = struct.pack('i', n) # Return decoded w/ utf-32 codec. return bytes_object.decode('utf-32')
[ "def int_to_unichr(codepoint):\n if PY2:\n return unichr(codepoint)\n return chr(codepoint)", "def make_unicode():\r\n for num in range(300, 320):\r\n yield unichr(num)", "def safe_unichr(intval):\n try:\n return unichr(intval)\n except ValueError:\n # ValueError: unic...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test ``create_engine`` with invalid adapter.
def test_create_engine_no_adapters(): engine = create_engine("shillelagh://") with pytest.raises(ProgrammingError) as excinfo: Table("dummy://", MetaData(bind=engine), autoload=True) assert str(excinfo.value) == "Unsupported table: dummy://"
[ "def test_create_engine_no_adapters(registry: AdapterLoader) -> None:\n registry.clear()\n engine = create_engine(\"shillelagh://\")\n metadata = MetaData()\n metadata.reflect(engine)\n\n with pytest.raises(ProgrammingError) as excinfo:\n Table(\"dummy://\", metadata, autoload_with=engine)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
init the class object and simply pass either twitter object or tweets plain text to this method. The analysis will return a list indicating the polarity and subjectivity of the tweets.
def analyse(self, tweet): if (type(tweet) == dict): text = self.clean_tweet(self.to_text(tweet)) else: text = self.clean_tweet(tweet) analysis = TextBlob(text) polarity = analysis.polarity subjectivity = analysis.subjectivity res = [] #...
[ "def __init__(self):\n self.sentimentFromText = None #Sentiment object for containing the sentiment information associated with the provided text\n self.sentimentFromUrl = None #Sentiment object for containing the sentiment information associated with the provided url", "def analyze_tweets():", "d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Discrete Variable to Feature Convertor. var value of variable varname name of variable. lims = range of numbers lims = range of discretization. collapse = list with two binary vals. collapse all below lim[0] to lim[0] & collapse all above lim[1] to lim[1] e.g., fdict = discVar2Feature(8, 'positive adjective', lims = [1...
def discVar2Feature( var, varname, lims = [1,3], collapse = [False, False], ctxt = 'Has'): vals = xrange(lims[0], lims[1]+1) keystr = ctxt + ' %s ' + varname fdict = {keystr % val:False for val in vals} if collapse[0] == True: if lims[0] > var: var = lims[0] #va...
[ "def discVar2FeatureOld( var, varname, lims = [1,5], collapse = [False, False], ctxt = 'contains'):\n nums = ['zero','one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten']\n \n vals = range(lims[0], lims[1]+1) \n \n #init fdict\n fdict = dict() \n for k, val in en...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Discrete Variable to Feature Convertor. var value of variable varname name of variable. lims = range of numbers lims = range of discretization. collapse = list with two binary vals. collapse all below lim[0] to lim[0] & collapse all above lim[1] to lim[1] e.g., fdict = discVar2Feature(8, 'positive adjective', lims = [1...
def discVar2FeatureOld( var, varname, lims = [1,5], collapse = [False, False], ctxt = 'contains'): nums = ['zero','one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten'] vals = range(lims[0], lims[1]+1) #init fdict fdict = dict() for k, val in enumerate(vals...
[ "def discVar2Feature( var, varname, lims = [1,3], collapse = [False, False], ctxt = 'Has'):\n \n vals = xrange(lims[0], lims[1]+1) \n \n keystr = ctxt + ' %s ' + varname\n fdict = {keystr % val:False for val in vals} \n\n if collapse[0] == True:\n if lims[0] > var:\n var = lims[...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if featureVals contains FKEY This is a check to see if a core feature function has been previously computed.
def haskey(featureVals, fkey): try: featureVals[fkey] except KeyError: return False #warn(HASKEYMSG % (fkey)) return True
[ "def check_features(self, data):\n\n # Get feature keys for the current session\n session_features = data\n # If there are no features in the feature set, record\n # the features from the current session\n if len(self.features) == 0:\n self.features = session_features\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
End detection. described in Eq. (50) of S. Watanabe et al "Hybrid CTC/Attention Architecture for EndtoEnd Speech Recognition"
def end_detect(ended_hyps, i, M=3, d_end=np.log(1 * np.exp(-10))): if len(ended_hyps) == 0: return False count = 0 best_hyp = sorted(ended_hyps, key=lambda x: x["score"], reverse=True)[0] for m in six.moves.range(M): # get ended_hyps with their length is i - m hyp_length = i - m ...
[ "def end_detect(ended_hyps, i, M=3, D_end=np.log(1 * np.exp(-10))):\n if len(ended_hyps) == 0:\n return False\n count = 0\n best_hyp = sorted(ended_hyps, key=lambda x: x[\"score\"], reverse=True)[0]\n for m in range(M):\n # get ended_hyps with their length is i - m\n hyp_length = i ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Geocode the addresses and build an address table
def build_addresses(self): from ambry.geo.geocoders import DstkGeocoder facilities = self.partitions.find(table='facilities') def address_gen(): for row in facilities.query("SELECT * FROM facilities"): address = "{}, {}, {} {}".format(row['dba_address1'], r...
[ "def add_geocode_addresses():\n\n for address in results:\n g = geocoder.google(str(address[1]), key=config.api_key)\n latitude.append(g.latlng[0])\n longitude.append(g.latlng[1])\n\n return latitude, longitude", "def fill_address(self, processes: int) -> None:\n try:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build the facilities_blockgroups crosswalk file to assign facilities to blockgroups.
def build_block_cross(self): from ambry.geo.util import find_geo_containment, find_containment from geoid import civick lr = self.init_log_rate(3000) def gen_bound(): boundaries = self.library.dep('blockgroups').partition # Note, ogc_fid is the pr...
[ "def task_output_block_groups():\n for dept in Department.list():\n yield {\n 'name': dept.name,\n 'file_dep': [dept.block_groups_path],\n 'targets': [dept.block_groups_output],\n 'actions': ['cp %(dependencies)s %(targets)s'],\n 'clean': True,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This will run through all of the web interface using selenium. The input should be a pdb_file full path. It will download the wrappers/bonds as PDB_NAME_wrappers.txt and PDB_NAME_bonds.txt.
def run_wrappa(browser, pdb_file): # Wrappa has 3 MB limit if os.path.getsize(pdb_file) > 3000000: logging.warn("%s is too large (size is %d), skipping", pdb_file, os.path.getsize(pdb_file)) return False if os.path.isfile(pdb_file[:-4] + "_bonds.txt"): logging.warn("%s has already b...
[ "def dl_pdb(url_dom, pdb_id, dom_sid):\n good_url = re.sub(r'(output=html)', 'output=txt', url_dom)\n\n print(\"Dowloading the good domain of \" + pdb_id + \".pdb from the SCOP \" +\n \"website...\")\n urlreq.urlretrieve(good_url, \"data/\" + dom_sid + '.pdb')\n print(\"Download finished !\\n\"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generic function to take write .rst files and convert to pdf/html. Accepts a report template and dictionary. Writes rst once with full paths for image files and generates a pdf, then strips leading path components and writes again, generating an html file that exepects to live in the same directory as report images.
def write_workflow_report(workflow_name, report_template, report_dict): from os.path import exists, basename from subprocess import check_output # Plug the values into the template for the pdf file report_rst_text = report_template % report_dict # Write the rst file and convert to pdf report_p...
[ "def to_pdf(paper_dir, template_dir=None, use_shell_escape=False, flatten=False, keep_comments=False):\n template_dir = template_dir or scriptorium.CONFIG['TEMPLATE_DIR']\n\n paper_dir = os.path.abspath(paper_dir)\n if os.path.isdir(paper_dir):\n fname = paper_root(paper_dir)\n elif os.path.isfil...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
S.feed(handle, consumer) Feed in a BLAST report for scanning. handle is a filelike object that contains the BLAST report. consumer is a Consumer object that will receive events as the report is scanned.
def feed(self, handle, consumer): if isinstance(handle, File.UndoHandle): uhandle = handle else: uhandle = File.UndoHandle(handle) # Try to fast-forward to the beginning of the blast report. read_and_call_until(uhandle, consumer.noevent, contains='BLAST') ...
[ "def feed(self, handle, consumer):\n\n consumer.start_record()\n self.num_pops = None\n self.num_loci = None\n self.loci_data = []\n \n data_org = int(handle.readline().rstrip())\n consumer.data_org(data_org)\n num_pops = int(handle.readline().rstrip()...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
next(self) > object Return the next Blast record from the file. If no more records, return None.
def next(self): lines = [] query = False while 1: line = self._uhandle.readline() if not line: break # If I've reached the next one, then put the line back and stop. if lines and (line.startswith('BLAST') o...
[ "def next(self):\n return self.__file.next()", "def next(self):\n nxt = self.readentry()\n if nxt is None:\n raise StopIteration\n return nxt", "def next(self):\n next_data = self.load_next()\n if next_data is None:\n raise StopIteration\n retur...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Execute and retrieve data from standalone BLASTPALL as handles (OBSOLETE). NOTE This function is obsolete, you are encouraged to the command line wrapper Bio.Blast.Applications.BlastallCommandline instead. Execute and retrieve data from blastall. blastcmd is the command used to launch the 'blastall' executable. program...
def blastall(blastcmd, program, database, infile, align_view='7', **keywds): _security_check_parameters(keywds) att2param = { 'matrix' : '-M', 'gap_open' : '-G', 'gap_extend' : '-E', 'nuc_match' : '-r', 'nuc_mismatch' : '-q', 'query_genetic_code' : '-Q', ...
[ "def rpsblast(blastcmd, database, infile, align_view=\"7\", **keywds):\n\n import warnings\n warnings.warn(\"This function is obsolete, you are encouraged to the command line wrapper Bio.Blast.Applications.BlastrpsCommandline instead.\", PendingDeprecationWarning)\n _security_check_parameters(keywds)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Execute and retrieve data from standalone BLASTPGP as handles (OBSOLETE). NOTE This function is obsolete, you are encouraged to the command line wrapper Bio.Blast.Applications.BlastpgpCommandline instead. Execute and retrieve data from blastpgp. blastcmd is the command used to launch the 'blastpgp' executable. database...
def blastpgp(blastcmd, database, infile, align_view='7', **keywds): import warnings warnings.warn("This function is obsolete, you are encouraged to the command line wrapper Bio.Blast.Applications.BlastpgpCommandline instead.", PendingDeprecationWarning) _security_check_parameters(keywds) att2param = {...
[ "def rpsblast(blastcmd, database, infile, align_view=\"7\", **keywds):\n\n import warnings\n warnings.warn(\"This function is obsolete, you are encouraged to the command line wrapper Bio.Blast.Applications.BlastrpsCommandline instead.\", PendingDeprecationWarning)\n _security_check_parameters(keywds)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Execute and retrieve data from standalone RPSBLAST as handles (OBSOLETE). NOTE This function is obsolete, you are encouraged to the command line wrapper Bio.Blast.Applications.RpsBlastCommandline instead. Execute and retrieve data from standalone RPSBLAST. blastcmd is the command used to launch the 'rpsblast' executabl...
def rpsblast(blastcmd, database, infile, align_view="7", **keywds): import warnings warnings.warn("This function is obsolete, you are encouraged to the command line wrapper Bio.Blast.Applications.BlastrpsCommandline instead.", PendingDeprecationWarning) _security_check_parameters(keywds) att2param...
[ "def blastall(blastcmd, program, database, infile, align_view='7', **keywds):\n\n _security_check_parameters(keywds)\n\n att2param = {\n 'matrix' : '-M',\n 'gap_open' : '-G',\n 'gap_extend' : '-E',\n 'nuc_match' : '-r',\n 'nuc_mismatch' : '-q',\n 'query_genetic_code' ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Start BLAST and returns handles for stdout and stderr (PRIVATE). Expects a command line wrapper object from Bio.Blast.Applications
def _invoke_blast(cline): import subprocess, sys blast_cmd = cline.program_name if not os.path.exists(blast_cmd): raise ValueError("BLAST executable does not exist at %s" % blast_cmd) #We don't need to supply any piped input, but we setup the #standard input pipe anyway as a work around for ...
[ "def main():\n args = parse_args()\n if args.log is None:\n logfile = sys.stdout\n else:\n logfile = args.log\n logging.basicConfig(\n filename=logfile,\n level=logging.DEBUG,\n filemode='w',\n format='%(asctime)s %(message)s',\n d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize a parser that tries to catch BlastErrors.
def __init__(self, bad_report_handle = None): self._bad_report_handle = bad_report_handle #self._b_parser = BlastParser() self._scanner = _Scanner() self._consumer = _BlastErrorConsumer()
[ "def __init__(self, parser: Any = None):", "def test_create_new_gerber_parser(self):\n parser = Gerber()\n assert parser != None", "def _parse(self):\n try:\n # parse token stream into abstract syntax tree (AST)\n self._ast = self._rule_container()\n\n except Pa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse a handle, attempting to diagnose errors.
def parse(self, handle): results = handle.read() try: self._scanner.feed(File.StringHandle(results), self._consumer) except ValueError, msg: # if we have a bad_report_file, save the info to it first if self._bad_report_handle: # send the info ...
[ "def _diagnose_error(self, handle, data_record):\n line = handle.readline()\n\n while line:\n # 'Searchingdone' instead of 'Searching......done' seems\n # to indicate a failure to perform the BLAST due to\n # low quality sequence\n if line.startswith('Search...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Attempt to diagnose an error in the passed handle.
def _diagnose_error(self, handle, data_record): line = handle.readline() while line: # 'Searchingdone' instead of 'Searching......done' seems # to indicate a failure to perform the BLAST due to # low quality sequence if line.startswith('Searchingdone'): ...
[ "def _validate_handle(self):\n\n if self._handle is None:\n raise DriverException('No file handle')\n\n if self._handle.value == win_constants.INVALID_HANDLE.value:\n raise DriverException(\n 'Failed to open {}. GetLastError(): {}'.format(\n self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decrease dataset size by cutting requested classes smaller
def cut_classes(self, dataset, classes, max_size, label): # Cherry picked classes class_dfs = [] for c in classes: picked_data = dataset.loc[(dataset.loc[:,label] == c),:].reset_index(drop=True) class_dfs.append(picked_data.loc[0:min(len(picked_data), max_size),:]) ...
[ "def equalize_class_sizes(data, classes):\n\tclasssizes = bincount(classes)\n\tprint(classsizes)\n\tmin_size = min(classsizes[nonzero(classsizes)]) #ignore classes with 0 entries\n\tprint(min_size)\n\tfilter = zeros(classes.shape, dtype = bool)\n\tfor cls in range(1, NCLASSES + 1):\n\t\tthis_cls = where(classes == ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Save prediction results to csv file for visualisation purposes.
def save_prediction(self, meta, y_pred, y, filename): df = pd.DataFrame(meta) df['y_pred'] = y_pred df['y'] = y print(df) df.loc[:, 'id'] = df.index self.df_to_csv(df, filename, store_header=False)
[ "def export_predictions(self):\n with open('prediction/submission.csv', 'w', newline='') as csvfile:\n writer = csv.writer(csvfile, delimiter=',')\n for i in range(len(self.test_predictions)):\n writer.writerow([str(i) + \", \" + self.test_predictions[i]])", "def export...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Upload all files from folder to bucket
def _upload_dir_to_bucket(self, path, ext_path): for file in os.listdir(path): self._upload_to_bucket(path+'/'+file, ext_path+'/'+file)
[ "def upload_files(bucket, src):\n\n for dir_path, dir_name, file_name in os.walk(src):\n dir_name = dir_name\n for name in file_name:\n upload_to_s3(bucket, (os.path.join(dir_path, name)))\n #click.echo(os.path.join(dir_path, name))", "def upload_files_s3(files, bucket):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Upload file to bucket if bucket is set and ext_filename is not None
def _upload_to_bucket(self, filename, ext_filename): if ext_filename is None: return if self.s3: self.bucket.upload_file(filename, ext_filename) logging.info('Uploaded {} to S3 with name {}'.format(filename, ext_filename)) if self.gs: try: ...
[ "def _upload_to_bucket(self, filename, ext_filename):\n if ext_filename is None:\n return\n\n if self.s3:\n raise ValueError('S3 not implemented')\n if self.gs:\n try:\n client = storage.Client()\n bucket = client.get_bucket(self.bu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Download all files from bucket and save them to 'local_path'
def _download_dir_from_bucket(self, ext_path, local_path, force=False): if os.path.exists(local_path) and not force: logging.info('Path {} already exists. Not overwriting...'.format(local_path)) return if os.path.exists(local_path) and force: logging.info('Path {} alr...
[ "def copyFiles(self):\n # Using makedirs as it's recursive\n if not os.path.exists(self.LOCAL_PATH):\n os.makedirs(self.LOCAL_PATH)\n for key_list in self.bucket_list:\n key = str(key_list.key)\n # Get the log filename (L[-1] can be used to access the last item in a list).\n fil...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Download file from bucket and save it to 'local_filename'
def _download_from_bucket(self, ext_filename, local_filename, force=False): if os.path.exists(local_filename) and not force: logging.info('File {} already exists. Not overwriting...'.format(local_filename)) return if os.path.exists(local_filename) and force: logging.i...
[ "def _download_from_bucket(self, ext_filename, local_filename, force=False):\n if os.path.exists(local_filename) and not force:\n logging.info('File {} already exists. Not overwriting...'.format(local_filename))\n return\n if os.path.exists(local_filename) and force:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Report CV results and save them to file
def report_cv_results(self, results, scores=['score'], filename=None, n_top=5): res = "" for score in scores: res += "{}\n".format(score) res += "-------------------------------\n" for i in range(1, n_top + 1): candidates = np.flatnonzero(results['ran...
[ "def _save_results(self):\n iofuncs.write_scores(self.outdir/rc.report_input_scores, self.inputs, sort=True)\n iofuncs.write_scores(self.outdir/rc.report_result_scores, self.results, sort=True)", "def save_results(self):\n\n save_results_path = os.path.join(\n self.logs_path,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
locate droplets in a (potentially periodic) data set on a Cartesian grid This function locates droplets respecting periodic boundary conditions.
def _locate_droplets_in_mask_cartesian( grid: CartesianGridBase, mask: np.ndarray ) -> Emulsion: if mask.shape != grid.shape: raise ValueError( f"The shape {mask.shape} of the data is not compatible with the grid " f"shape {grid.shape}" ) # pad the array to simulate ...
[ "def _locate_droplets_in_mask_cylindrical(\n grid: CylindricalSymGrid, mask: np.ndarray\n) -> Emulsion:\n assert np.all(mask.shape == grid.shape)\n\n if grid.periodic[1]:\n # locate droplets respecting periodic boundary conditions in z-direction\n\n # pad the array to simulate periodic bounda...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
locates droplets in a binary data set on a spherical grid
def _locate_droplets_in_mask_spherical( grid: SphericalSymGridBase, mask: np.ndarray ) -> Emulsion: assert np.all(mask.shape == grid.shape) # locate clusters in the binary image labels, num_labels = ndimage.label(mask) if num_labels == 0: return Emulsion([], grid=grid) # locate cluster...
[ "def _locate_droplets_in_mask_cylindrical_single(\n grid: CylindricalSymGrid, mask: np.ndarray\n) -> Emulsion:\n # locate the individual clusters\n labels, num_features = ndimage.label(mask)\n if num_features == 0:\n return Emulsion([], grid=grid)\n\n # locate clusters on the symmetry axis\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
locate droplets in a data set on a single cylindrical grid
def _locate_droplets_in_mask_cylindrical_single( grid: CylindricalSymGrid, mask: np.ndarray ) -> Emulsion: # locate the individual clusters labels, num_features = ndimage.label(mask) if num_features == 0: return Emulsion([], grid=grid) # locate clusters on the symmetry axis object_slice...
[ "def _locate_droplets_in_mask_cylindrical(\n grid: CylindricalSymGrid, mask: np.ndarray\n) -> Emulsion:\n assert np.all(mask.shape == grid.shape)\n\n if grid.periodic[1]:\n # locate droplets respecting periodic boundary conditions in z-direction\n\n # pad the array to simulate periodic bounda...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
locate droplets in a data set on a (periodic) cylindrical grid This function locates droplets respecting periodic boundary conditions.
def _locate_droplets_in_mask_cylindrical( grid: CylindricalSymGrid, mask: np.ndarray ) -> Emulsion: assert np.all(mask.shape == grid.shape) if grid.periodic[1]: # locate droplets respecting periodic boundary conditions in z-direction # pad the array to simulate periodic boundary conditions...
[ "def _locate_droplets_in_mask_cartesian(\n grid: CartesianGridBase, mask: np.ndarray\n) -> Emulsion:\n if mask.shape != grid.shape:\n raise ValueError(\n f\"The shape {mask.shape} of the data is not compatible with the grid \"\n f\"shape {grid.shape}\"\n )\n\n # pad the ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Locates droplets in the phase field This uses a binarized image to locate clusters of large concentration in the phase field, which are interpreted as droplets. Basic quantities, like position and size, are determined for these clusters.
def locate_droplets( phase_field: ScalarField, threshold: Union[float, str] = 0.5, modes: int = 0, minimal_radius: float = 0, refine: bool = False, interface_width: Optional[float] = None, ) -> Emulsion: assert isinstance(phase_field, ScalarField) dim = phase_field.grid.dim # dimensiona...
[ "def _locate_droplets_in_mask_cylindrical(\n grid: CylindricalSymGrid, mask: np.ndarray\n) -> Emulsion:\n assert np.all(mask.shape == grid.shape)\n\n if grid.periodic[1]:\n # locate droplets respecting periodic boundary conditions in z-direction\n\n # pad the array to simulate periodic bounda...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Refines droplet parameters by fitting to phase field This function varies droplet parameters, like position, size, interface width, and potential perturbation amplitudes until the overlap with the respective phase field region is maximized. Here, we use a constraint fitting routine.
def refine_droplet( phase_field: ScalarField, droplet: DiffuseDroplet, least_squares_params: Optional[Dict[str, Any]] = None, ) -> DiffuseDroplet: assert isinstance(phase_field, ScalarField) if least_squares_params is None: least_squares_params = {} if not isinstance(droplet, DiffuseDro...
[ "def phase_fit(dbf, phase_name, symmetry, subl_model, site_ratios, datasets, refdata, aliases=None):\n if not hasattr(dbf, 'varcounter'):\n dbf.varcounter = 0\n # First fit endmembers\n all_em_count = len(list(itertools.product(*subl_model)))\n endmembers = sorted(set(\n canonicalize(i, sy...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the structure factor associated with a field Here, the structure factor is basically the power spectral density of the field `scalar_field` normalized so that regridding or rescaling the field does not change the result.
def get_structure_factor( scalar_field: ScalarField, smoothing: Union[None, float, str] = "auto", wave_numbers: Union[Sequence[float], str] = "auto", add_zero: bool = False, ) -> Tuple[np.ndarray, np.ndarray]: logger = logging.getLogger(__name__) if not isinstance(scalar_field, ScalarField): ...
[ "def scalar_potential(field, coord_sys):\n\n # Check whether field is conservative\n if not is_conservative(field):\n raise ValueError(\"Field is not conservative\")\n if field == Vector.zero:\n return S.Zero\n # Express the field exntirely in coord_sys\n # Substitute coordinate variabl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create and return a stub test.
def CreateStubTest(phases=None, params=None): # pylint: disable=invalid-name test_metadata = htftest.TestMetadata('foo') # pylint: disable=protected-access if params is not None: test_metadata._parameter_list = ( parameters.TestParameterList(params.parameters)) return htftest.HTFTest(test_metadata,...
[ "def CreateStubTest(phases=None): # pylint: disable=invalid-name\n test_metadata = phase_data.TestMetadata('foo')\n return phase_data.phase_data(test_metadata, phases or [])", "def create_stub(cls, proto_py_module, stub_name):\n\n return cls.create_stubs(proto_py_module, stub_name)", "def create_teste...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Logic which should be executed for given 'rsm_ctx'.
def handle(self, rsm_ctx): pass
[ "def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_RESULT", "def handle(self, rsm_ctx):\n rsm_ctx.log(\n 'info',\n 'Starting executing for \"list\" operation for get usage ...'\n )\n\n execution_id = rsm_ctx.run_execution(wait=False)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check support 'rsm_ctx' type by handler. Instance should be None.
def can_handle(self, rsm_ctx): return not rsm_ctx.instance.type
[ "def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_RESULT", "def handle(self, rsm_ctx):\n rsm_ctx.log(\n 'info',\n 'Node instance has type with is not supported by '\n 'Resource Management Plugin. Skipping'\n )", "def can_handle(self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Logic which should be executed for given 'rsm_ctx'. Write to log message that type is unsupported.
def handle(self, rsm_ctx): rsm_ctx.log( 'info', 'Node instance has type with is not supported by ' 'Resource Management Plugin. Skipping' )
[ "def can_handle(self, rsm_ctx):\n return not rsm_ctx.instance.type", "def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_RESULT", "def handle(self, rsm_ctx):\n pass", "def check_r_type(r):\n if type(r) is str:\n raise TypeError('Get Error message.')", "...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check support 'rsm_ctx' type by handler. Instance should be NODE_TYPE_PROJECT.
def can_handle(self, rsm_ctx): return rsm_ctx.instance.type == NODE_TYPE_PROJECT
[ "def handle(self, rsm_ctx):\n rsm_ctx.log(\n 'info',\n 'Node instance has type with is not supported by '\n 'Resource Management Plugin. Skipping'\n )", "def can_handle(self, rsm_ctx):\n return not rsm_ctx.instance.type", "def can_handle(self, rsm_ctx):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Logic which should be executed for given 'rsm_ctx'. Run resolve project on 'rsm_ctx'.
def handle(self, rsm_ctx): rsm_ctx.log('info', 'Processing of project started') rsm_ctx.resolve_project()
[ "def handle(self, rsm_ctx):\n pass", "def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_PROJECT", "def process( self ):\n\n if self.driver_conf == None:\n self.__panic( \"Missing DriverConf.py\" , \"\"\"Could not find module DriverConf.py in the slither d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set value by resource managment context instance
def _set_value(rsm_ctx, value, value_type, resource_name=None): value_dict = {value_type: value} if resource_name: value_dict['resource_name'] = resource_name rsm_ctx.log('debug', 'Setting {}', value_dict) rsm_ctx.set_value(**value_dict)
[ "def _set(cls, context):\n cls._local._context_instance = context", "def set_resource_data(self, resource, meta):", "def set_attribute(self, context: ResourceCommandContext, obj_ref: str, attr_name: str, attr_value: str) -> None:\n self.handler.set_attribute(obj_ref, attr_name, attr_value)", "def pr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check support 'rsm_ctx' type by handler. Instance should be NODE_TYPE_QUOTA.
def can_handle(self, rsm_ctx): return rsm_ctx.instance.type == NODE_TYPE_QUOTA
[ "def can_handle(self, rsm_ctx):\n return not rsm_ctx.instance.type", "def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_USAGE", "def can_handle(self, rsm_ctx):\n return super(OpenstackQuotaHandler, self).can_handle(rsm_ctx) and \\\n SYSTEM_NAME_OPENSTACK ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Logic which should be executed for given 'rsm_ctx'. Process quota state from properties and run set_value on 'rsm_ctx'.
def handle(self, rsm_ctx): self._process_runtime_properties( rsm_ctx, rsm_ctx.instance.runtime_properties, self.VALUE_TYPE_QUOTA )
[ "def handle(self, rsm_ctx):\n pass", "def handle(self, rsm_ctx):\n runtime_properties = rsm_ctx.get_execution_result()\n\n rsm_ctx.log(\n 'info',\n 'Got {} runtime_properties after execution',\n runtime_properties.keys()\n )\n\n self._process_run...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check support 'rsm_ctx' type by handler. Instance should be NODE_TYPE_USAGE.
def can_handle(self, rsm_ctx): return rsm_ctx.instance.type == NODE_TYPE_USAGE
[ "def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_RESULT", "def can_handle(self, rsm_ctx):\n return not rsm_ctx.instance.type", "def handle(self, rsm_ctx):\n rsm_ctx.log(\n 'info',\n 'Node instance has type with is not supported by '\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check support 'rsm_ctx' type by handler. Instance should be NODE_TYPE_USAGE.
def can_handle(self, rsm_ctx): return rsm_ctx.instance.type == NODE_TYPE_USAGE
[ "def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_RESULT", "def can_handle(self, rsm_ctx):\n return not rsm_ctx.instance.type", "def handle(self, rsm_ctx):\n rsm_ctx.log(\n 'info',\n 'Node instance has type with is not supported by '\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Logic which should be executed for given 'rsm_ctx'. Run execution on 'rsm_ctx'.
def handle(self, rsm_ctx): rsm_ctx.log( 'info', 'Starting executing for "list" operation for get usage ...' ) execution_id = rsm_ctx.run_execution(wait=False) rsm_ctx.log( 'info', 'Execution started with ID: {} ...'.format(execution_id) ...
[ "def handle(self, rsm_ctx):\n pass", "def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_RESULT", "def handle(self, rsm_ctx):\n runtime_properties = rsm_ctx.get_execution_result()\n\n rsm_ctx.log(\n 'info',\n 'Got {} runtime_properties af...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Logic which should be executed for given 'rsm_ctx'. Process state from properties and run set_value on 'rsm_ctx'.
def handle(self, rsm_ctx): runtime_properties = rsm_ctx.get_execution_result() rsm_ctx.log( 'info', 'Got {} runtime_properties after execution', runtime_properties.keys() ) self._process_runtime_properties( rsm_ctx, runtime_pr...
[ "def handle(self, rsm_ctx):\n pass", "def handle(self, rsm_ctx):\n self._process_runtime_properties(\n rsm_ctx,\n rsm_ctx.instance.runtime_properties,\n self.VALUE_TYPE_QUOTA\n )", "def handle(self, rsm_ctx):\n rsm_ctx.log(\n 'info',\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Key in SUPPRESS list
def _suppress(self, key): return key in self.SUPPRESS
[ "def get_possible_keys(self):", "async def _keys_list(self, ctx):\n keys = await self.conf.keys()\n if not keys:\n return await ctx.send(\"No API keys are currently registered\")\n message = f\"The following keys are currently registered: {humanize_list(list(map(inline, keys.keys()...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Translate key by translate dict
def _translate(self, key): return self.TRANSLATE.get(key, key)
[ "def translate(key, dictionary):\n if key in dictionary:\n return dictionary[key]\n if key in dictionary.values():\n return key\n raise Exception(\"no entry {} in dictionary {}\".format(key, dictionary))", "def _make_trans_from_dict(translations):\n\n from_str = ''\n to_str = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check support 'rsm_ctx' type by handler. Instance should be NODE_TYPE_QUOTA and SYSTEM_NAME_OPENSTACK in 'system_name'.
def can_handle(self, rsm_ctx): return super(OpenstackQuotaHandler, self).can_handle(rsm_ctx) and \ SYSTEM_NAME_OPENSTACK in rsm_ctx.instance.system_name
[ "def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_QUOTA", "def can_handle(self, rsm_ctx):\n return not rsm_ctx.instance.type", "def handle(self, rsm_ctx):\n rsm_ctx.log(\n 'info',\n 'Node instance has type with is not supported by '\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check support 'rsm_ctx' type by handler. Instance should be NODE_TYPE_RESULT.
def can_handle(self, rsm_ctx): return rsm_ctx.instance.type == NODE_TYPE_RESULT
[ "def can_handle(self, rsm_ctx):\n return not rsm_ctx.instance.type", "def handle(self, rsm_ctx):\n rsm_ctx.log(\n 'info',\n 'Node instance has type with is not supported by '\n 'Resource Management Plugin. Skipping'\n )", "def can_handle(self, rsm_ctx):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Logic which should be executed for given 'rsm_ctx'. Dump state to runtime properties.
def handle(self, rsm_ctx): rsm_ctx.log( 'info', 'Dumping gathered data to runtime_properties of {} node instance', rsm_ctx.instance.id ) rsm_ctx.add_result_instance_id() rsm_ctx.set_runtime_properties({ 'data': rsm_ctx.dump() })
[ "def handle(self, rsm_ctx):\n pass", "def handle(self, rsm_ctx):\n runtime_properties = rsm_ctx.get_execution_result()\n\n rsm_ctx.log(\n 'info',\n 'Got {} runtime_properties after execution',\n runtime_properties.keys()\n )\n\n self._process_run...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine ticket id either from existing subject line or from uid If the Subject line contains an ID, it is taken. If it doesn't, a new one is generated.
def determine_ticket_ID(self): hashid = hashids.Hashids(salt=self.config.idSalt, alphabet=self.config.idAlphabet, min_length=self.config.idMinLength) # See if hashid is set in headers if self.parsed["X-Jicket-HashID"] is not None: self.tickethash = self.parsed["X-Jicket-HashID"] ...
[ "def _get_ticket_id(self, str):\n pat = r'^\\s*#(\\d+)'\n try:\n return int(re.search(pat, str).group(1))\n except:\n return 0", "def ticket_id(self):\n return self._ticket_id", "def create_ticket(self, ticket):\r\n ticket_url = self._zendesk_instance.cre...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert text bodies to text that can be attached to an issue
def textfrombodies(self) -> str: type_priority = ["plain", "html", "other"] # TODO: Make configurable for texttype in type_priority: if texttype == "plain" and texttype in self.textbodies: """Text is plain, so it can be used verbatim""" return self.textbodie...
[ "def raw_body_text(self) -> str:\n return '\\n'.join([para.text for para in self.body_text])", "def generate_body(issue):\n markdown = \"### {}\\n\".format(issue.pop('title'))\n for k, v in issue.iteritems():\n markdown += \"- {}: {}\\n\".format(k, v)\n return markdown", "def bodyToInt(te...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Save the given TF session at PATH = "./model/tmpmodel"
def _save_model(graph_or_sess): if isinstance(graph_or_sess, tf.Graph): ops = graph_or_sess.get_operations() for op in ops: if 'variable' in op.type.lower(): raise ValueError('Please input a frozen graph (no variables). Or pass in the session object.') wit...
[ "def save_session(self):\n\n # Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it\n checkpoint_dir = os.path.abspath(os.path.join(self.FLAGS.model_dir, \"checkpoints\"))\n checkpoint_prefix = os.path.join(checkpoint_dir, \"model\")\n if not os....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fix the markdown links based on the pages that we know.
def _fix_links(self, text, page_names): for n in page_names: text = text.replace(f"]({n})", f"]({n}.html)") text = text.replace(f"]({n}.md)", f"]({n}.html)") return text
[ "def fixLinks():", "def on_page_markdown(self, markdown, **kwargs):\n for autolink in self.config[\"autolinks\"]:\n markdown = replace_autolink_references(markdown, autolink[\"reference_prefix\"], autolink[\"target_url\"])\n\n return markdown", "def fix_links(self):\n\n # 1. Loop...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Split the markdown into parts based on sections. Each part is either text or a tuple representing a section.
def _split(self): text = self.md self.parts = parts = [] self.headers = headers = [] lines = [] # Split in parts for line in text.splitlines(): if line.startswith(("# ", "## ", "### ", "#### ", "##### ")): # Finish pending lines ...
[ "def split_into_sections(text):\n headings_regex = re.compile(\n r'^={1,6}.*?={1,6}(?: *<!--.*?-->)?\\s*$', flags=re.M\n )\n sections = list()\n last_match_start = 0\n for match in headings_regex.finditer(text):\n match_start = match.start()\n if match_start > 0:\n sec...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validate a redirected error response. All the URL components should match the original redirect_uri, with the exception of the parameters, which should contain an 'error' and an 'error_description' field of the provided types.
def assertValidRedirect(self, response, redirect_uri, expected_status_code, **kwargs): self.assertEqual(expected_status_code, response.status_code) # Split the url into parts. location = response.headers.get('Location') location_url = urlparse.urlparse(locati...
[ "def handle_error(self, error, response):\n query_params = {\"error\": error.error}\n\n query = urlencode(query_params)\n\n location = \"%s?%s\" % (self.client.redirect_uri, query)\n\n response.status_code = 302\n response.body = \"\"\n response.add_header(\"Location\", loc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This test ensures that the authorize request against the oauth endpoint succeeds with expected values.
def test_valid_authorize_request(self): random_state = six.text_type(uuid.uuid4()) # Simple GET with various parameters response = self.get_json(path='/openid/authorize', expect_errors=True, state=random_state, ...
[ "def test_o_auth2_authorize(self):\n pass", "def test_oauth(self):\n oauth_headers = self._get_oauth_headers(self.user)\n self.client.logout()\n response = self.client.get(self.path(), **oauth_headers)\n assert response.status_code == 200\n body = {'user_id': 'staff', 'ac...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Assert that an invalid response_type redirects back to the redirect_uri and provides the expected error response.
def test_authorize_invalid_response_type(self): invalid_params = self.valid_params.copy() invalid_params['response_type'] = 'invalid_code' # Simple GET with invalid code parameters random_state = six.text_type(uuid.uuid4()) response = self.get_json(path='/openid/authorize', ...
[ "def test_http_error_raise_with_redirect(self):\n\n resp = self.r(\n HTTPError(http_status.HTTP_201_CREATED, redirect_url='http://google.com/')\n )\n\n self.assertIsInstance(\n resp, werkzeug.wrappers.Response\n )\n\n self.assertEqual(302, resp.status_code)\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Assert that an nonexistent response_type redirects back to the redirect_uri and provides the expected error response.
def test_authorize_no_response_type(self): invalid_params = self.valid_params.copy() del invalid_params['response_type'] # Simple GET with invalid code parameters random_state = six.text_type(uuid.uuid4()) response = self.get_json(path='/openid/authorize', ...
[ "def test_authorize_invalid_response_type(self):\n invalid_params = self.valid_params.copy()\n invalid_params['response_type'] = 'invalid_code'\n\n # Simple GET with invalid code parameters\n random_state = six.text_type(uuid.uuid4())\n response = self.get_json(path='/openid/autho...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Assert that an invalid scope redirects back to the redirect_uri and provides the expected error response.
def test_authorize_invalid_scope(self): invalid_params = self.valid_params.copy() invalid_params['scope'] = 'invalid_scope' # Simple GET with invalid code parameters random_state = six.text_type(uuid.uuid4()) response = self.get_json(path='/openid/authorize', ...
[ "def test_authorize_invalid_redirect_uri(self):\n invalid_params = self.valid_params.copy()\n invalid_params['redirect_uri'] = 'not_a_valid_uri'\n\n # Simple GET with invalid code parameters\n random_state = six.text_type(uuid.uuid4())\n response = self.get_json(path='/openid/auth...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Assert that a nonexistent scope redirects back to the redirect_uri and provides the expected error response.
def test_authorize_no_scope(self): invalid_params = self.valid_params.copy() del invalid_params['scope'] # Simple GET with invalid code parameters random_state = six.text_type(uuid.uuid4()) response = self.get_json(path='/openid/authorize', expec...
[ "def test_authorize_invalid_scope(self):\n invalid_params = self.valid_params.copy()\n invalid_params['scope'] = 'invalid_scope'\n\n # Simple GET with invalid code parameters\n random_state = six.text_type(uuid.uuid4())\n response = self.get_json(path='/openid/authorize',\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Assert that an invalid redirect_uri returns a 400 message with the appropriate error message encoded in the body of the response.
def test_authorize_invalid_redirect_uri(self): invalid_params = self.valid_params.copy() invalid_params['redirect_uri'] = 'not_a_valid_uri' # Simple GET with invalid code parameters random_state = six.text_type(uuid.uuid4()) response = self.get_json(path='/openid/authorize', ...
[ "def test_invalid_redirect_fails(client, idp):\n response = client.get(\"/login/{}?redirect=https://evil-site.net\".format(idp))\n assert response.status_code == 400", "def assertHttpBadRequest(self, response):\r\n self.assertEqual(response.status_code, 400)", "def test_http_error_raise_with_redire...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Assert that a nonexistent redirect_uri returns a 400 message with the appropriate error message encoded in the body of the response.
def test_authorize_no_redirect_uri(self): invalid_params = self.valid_params.copy() del invalid_params['redirect_uri'] # Simple GET with invalid code parameters random_state = six.text_type(uuid.uuid4()) response = self.get_json(path='/openid/authorize', ...
[ "def test_authorize_invalid_redirect_uri(self):\n invalid_params = self.valid_params.copy()\n invalid_params['redirect_uri'] = 'not_a_valid_uri'\n\n # Simple GET with invalid code parameters\n random_state = six.text_type(uuid.uuid4())\n response = self.get_json(path='/openid/auth...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the mock response from the openid endpoint to either true or false.
def _mock_response(self, mock_post, valid=True): mock_post.return_value.status_code = 200 if valid: mock_post.return_value.content = \ 'is_valid:true\nns:http://specs.openid.net/auth/2.0\n' else: mock_post.return_value.content = \ 'is_vali...
[ "def test_update_true(self, mock_decorator):\n BceInstitutionRepository.create(\n uai='0802145Z', is_institution=False)\n response = self.client.put(\n '/api/bce_institutions/0802145Z',\n content_type='application/json',\n headers={'Authorization': 'Bearer t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This test ensures that the access token request may execute properly with a valid token.
def test_valid_access_request(self): # Generate a valid auth token with base.HybridSessionManager(): authorization_code = auth_api.authorization_code_save({ 'user_id': 2, 'state': 'test_state', 'code': 'test_valid_code' }) ...
[ "def test_get_access_token(self):\n pass", "def test_read_o_auth_access_token(self):\n pass", "def test_create_o_auth_access_token(self):\n pass", "def test_access_token_get(self):\n client = oauth.Client(self.consumer, None)\n resp, content = client.request(self._uri('reque...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Assert that a newly created access token is valid if storyboard is installed in a multitude of timezones.
def test_valid_access_token_time(self): # Store the old TZ info, if it exists. old_tz = None if 'TZ' in os.environ: old_tz = os.environ['TZ'] # Convert now into every possible timezone out there :) for name in self.tested_timezones: # Override the 'defa...
[ "def test_expired_access_token_time(self):\n\n expired = datetime.datetime.now(pytz.utc) - datetime.timedelta(\n minutes=6)\n\n # Store the old TZ info, if it exists.\n old_tz = None\n if 'TZ' in os.environ:\n old_tz = os.environ['TZ']\n\n # Convert now into ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This test ensures that an access token is seen as expired if storyboard is installed in multiple timezones.
def test_expired_access_token_time(self): expired = datetime.datetime.now(pytz.utc) - datetime.timedelta( minutes=6) # Store the old TZ info, if it exists. old_tz = None if 'TZ' in os.environ: old_tz = os.environ['TZ'] # Convert now into every possible ...
[ "def test_valid_access_token_time(self):\n\n # Store the old TZ info, if it exists.\n old_tz = None\n if 'TZ' in os.environ:\n old_tz = os.environ['TZ']\n\n # Convert now into every possible timezone out there :)\n for name in self.tested_timezones:\n\n # Ove...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This test ensures that invalid grant_type parameters get the appropriate error response.
def test_invalid_grant_type(self): # Generate a valid auth token with base.HybridSessionManager(): authorization_code = auth_api.authorization_code_save({ 'user_id': 2, 'state': 'test_state', 'code': 'test_valid_code', 'expires...
[ "def testInvalidGrantType(self):\n request = self.generateValidTokenRequest(arguments={'grant_type': b'grantType\\xFF\\xFF'},\n authentication=self._VALID_CLIENT)\n result = self._TOKEN_RESOURCE.render_POST(request)\n self.assertFailedTokenRequest...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This test ensures that a valid refresh token can be converted into a valid access token, and cleans up after itself.
def test_valid_refresh_token(self): # Generate a valid access code with base.HybridSessionManager(): authorization_code = auth_api.authorization_code_save({ 'user_id': 2, 'state': 'test_state', 'code': 'test_valid_code' }) ...
[ "def test_refresh_token_returns_access_token(self):\n payload = {\n 'email': 'teste@email.com',\n 'password': '12345678'\n }\n res = self.client.post(TOKEN_URL, payload)\n\n refresh_token = res.data['refresh']\n old_access_token = res.data['access']\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This test ensures that an invalid refresh token can be converted into a valid access token.
def test_invalid_refresh_token(self): content_type = 'application/x-www-form-urlencoded' # Generate an auth and a refresh token. resp_1 = self.app.post('/v1/openid/token', params={ 'refresh_token': 'invalid_refresh_token', ...
[ "def test_mail_client_invalid_refresh_token_expired_access_token(self):\n self.mail_client._client._expires_at = 1\n self.mail_client.token['refresh_token'] = 'invalidrefreshtoken'\n with self.assertRaises(InvalidGrantError):\n self.mail_client.request('GET', urljoin(BASE_URL, MOCK_E...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test retrieving all players
def test_retrieve_players(self): Player.objects.create(name='Mayita', victories=0, defeats=0) Player.objects.create(name='Moiso', victories=0, defeats=0) res = self.client.get(PLAYERS_URL) players = Player.objects.all().order_...
[ "def test_get_players_from_initial_table(self, client):\n resp = client.get('/player')\n json_data = resp.get_json()\n assert resp.status_code == 200\n assert json_data['success'] == 'true'\n assert len(json_data['players']) == 15", "def test_gridironfootballplayers_get(self):\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test creating a new player
def test_create_player_successful(self): payload = {'name': 'Mayita', 'victories': 0, 'defeats': 0} self.client.post(PLAYERS_URL, payload) print('PLAYERS_URL: ',PLAYERS_URL) exists = Player.objects.filter( name=payload['name'] ).exists() self.assertTrue(exist...
[ "def test_03_create_players(self):\n tournament_handler = TournamentHandler()\n\n players = [\n Player('FALLEN \"O VERDADEIRO\"', datetime.datetime.now()),\n Player('COLDZERA', datetime.datetime.now()),\n Player('FNX', datetime.datetime.now()),\n Player('DEV...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test creating a new player with invalid payload
def test_create_player_invalid(self): payload = {'name': ''} res = self.client.post(PLAYERS_URL, payload) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
[ "def test_player_created(self):\n res = self.client().post('api/v1/players/new', headers={'Content-Type': 'application/json'}, data=json.dumps(self.player))\n json_data = json.loads(res.data)\n self.assertTrue(json_data.get('jwt_token'))\n self.assertEqual(res.status_code, 201)", "def test_create_play...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the basic info of the current tree. return
def info(self): return nx.info(self.tree) # def children(self): """ Return the children of the current node. """ # return self.left, self.right
[ "def _info(self):\n return self._server.get_node_info(self.name)", "def getInfo(self):\n\t\treturn self.info", "def tree(self):\n return self._tree", "def return_tree(self):\n\n return self.tree, self.ParentMap", "def __str__(self):\n\t\treturn self.getFileStructureFromNodeAndBeyond(self.ro...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of operators based on deepfirst search algorithm. the order is from left to right parameter
def dfs_operators(self, node=None): if not node: node = self.get_nodes_from_position('root')[0] bfs_all_nodes = list(nx.dfs_edges(self.tree, node)) operators = [i for i, j in bfs_all_nodes] # operators = [i for i, j in bfs_all_nodes if self.tree.out_degree(j) == 0] ...
[ "def _search_brother_ops(self, graph, op_node):\n visited = [op_node.idx()]\n stack = []\n brothers = []\n for op in graph.next_ops(op_node):\n if (op.type() != 'conv2d') and (op.type() != 'fc') and (\n not op._is_bwd_op()):\n stack.append(op)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of nodes of the position. parameter
def get_nodes_from_position(self, position=None): return [nodes for nodes, positions in self.tree.nodes(data=True) if positions["position"] == position]
[ "def get_nodes(self):\n return self.node_list", "def get_nodes(self):\n\n nodes = []\n\n if not self.node:\n return nodes\n \n nodes.extend(self.node.left.get_nodes())\n nodes.append(self.node.vp)\n nodes.extend(self.node.right.get_nodes())\n\n re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a float value of inclusion probability. pi_i = n / N, where n = |sequence| = |s| N = |population| = |M|
def inclusion_probability(M, s): # initialize a dictionary to store events with inclusion probabilites s_with_inclusion_probabilites = {} # calculating the events of intersection and difference. V = [i for i in M.tree.nodes() if not (i.__contains__('parallel') or i.__contains__('series'))] s_in_M ...
[ "def calc_pi(n=100):\n\n hit = 0\n for i in range(1, n):\n x = random.uniform(-1, 1)\n y = random.uniform(-1, 1)\n if x**2 + y**2 <= 1:\n hit += 1\n return float(hit)*4/n", "def probability_of_generating_containing_events(M, s):\n\n # initialize the probabilities of gen...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a float value of inclusion probability. f(pv, s) = product_{v in s}(pv) product_{v not in s}(1 pv)
def probability_of_generating_containing_events(M, s): # initialize the probabilities of generating containing events. f = 1 s_with_inclusion_probabilities = inclusion_probability(M, s) for v, p in s_with_inclusion_probabilities.items(): f *= p return f
[ "def inclusion_probability(M, s):\n\n # initialize a dictionary to store events with inclusion probabilites\n s_with_inclusion_probabilites = {}\n\n # calculating the events of intersection and difference.\n V = [i for i in M.tree.nodes() if not (i.__contains__('parallel') or i.__contains__('series'))]\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a int value of the number of completed extension of the SPorder. We store the number of extension of each subtree(subpartial order) such each node can have its partial order information with its current number of extension.
def number_of_extensions(M, root=None): sp_order_formula = [i for i in M.series_partial_order_representation(root) if (i.__contains__('series') or i.__contains__('parallel'))] while sp_order_formula: # Extend the children of the current operator operator = sp_order_for...
[ "def get_number_of_parts(score): \n number_of_parts = 0\n for e in score.recurse().parts:\n number_of_parts = number_of_parts + 1\n\n return( number_of_parts ) # get_number_of_parts ", "def total_length(self):\n # YOUR CODE HERE\n return sum([path.total_length for path in self.su...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a random number of closes based on close_parens_probabilities. close_parens_probabilities defaults to [0.772, 0.206, 0.021, 0.001]. This is roughly equivalent to each selection coming from a binomial distribution with n=4 and p=1/16.
def generate_close_count(self): prob = random.random() close_probabilities = reductions( lambda i, j: i + j, self.close_parens_probabilities ) + [1.0] parens = 0 while prob > close_probabilities[1]: parens += 1 del close_probabilit...
[ "def weighted_random_choice(probs):\n # Verify probs is normalized\n assert np.sum(probs) == 1\n\n cs = np.cumsum(probs)\n\n r = np.random.random()\n\n ret = np.where(cs <= r)[0].shape[0]\n #print(ret)\n\n return ret", "def random_coefficients(self, n=3, max_range = 10):\n return np.ra...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts an atom into a plush gene.
def atom_to_plush_gene(self, atom): is_literal = False proc_atom = None if callable(atom): # If it is callable, then it is likely a function that will # produce a literal. fn_element = atom() if callable(fn_element): # It's another function! ...
[ "def convert_atom(self, atom):\n\t\treturn atom", "def convert_protein_to_gene(self, identifier):\n gene_id = None\n if identifier.startswith('UniProtKB'):\n identifier = identifier.split(':', 1)[1]\n\n try:\n results = self.mygene_client.query(identifier, fields='HGNC')...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }