query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
This function will return the from_vegref from the converted DAU data. Returns a list
def get_from_vegref(file_name_path): open_data = json.load(open(file_name_path)) from_vegrefs = [] for feature in open_data["features"]: properties = feature["properties"] from_vegrefs.append(str(properties["from_vegref"])) return convert_vegref(from_vegrefs)
[ "def get_to_vegref(file_name_path):\n open_data = json.load(open(file_name_path))\n to_vegrefs = []\n\n for feature in open_data[\"features\"]:\n properties = feature[\"properties\"]\n to_vegrefs.append(str(properties[\"to_vegref\"]))\n return convert_vegref(to_vegrefs)", "def get_refere...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list containing the to_vegref data from a JSON file.
def get_to_vegref(file_name_path): open_data = json.load(open(file_name_path)) to_vegrefs = [] for feature in open_data["features"]: properties = feature["properties"] to_vegrefs.append(str(properties["to_vegref"])) return convert_vegref(to_vegrefs)
[ "def get_from_vegref(file_name_path):\n\n open_data = json.load(open(file_name_path))\n from_vegrefs = []\n\n for feature in open_data[\"features\"]:\n properties = feature[\"properties\"]\n from_vegrefs.append(str(properties[\"from_vegref\"]))\n return convert_vegref(from_vegrefs)", "de...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Start SimpleHTTPServer as a background process from rootDir on the given component. The webserver will listen on port and if specified, output will be redirected to logDir.
def start( self, component, rootDir, port=8000, logDir=None ): retValue = main.TRUE self.rootDir = rootDir try: # Save component for this instance so other functions can use it self.component = component main.log.info( "Starting SimpleHTTPServer on " + compone...
[ "def start():\n port = cfg.web.port\n\n events.dispatcher.register_target(event_logger)\n\n logging.info('Starting web server: port=%d' % port)\n utils.DaemonThread(target=bottle.run,\n kwargs={'host': cfg.web.bind,\n 'port': cfg.web.port}).start()", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate custom metadata file in the root directory using the custom onosgenpartitions file which should also be located in the root directory. Note that this function needs to be run after the start function has been called for this instance.
def generateFile( self, nodes, equal=False, filename="cluster.json" ): retValue = main.TRUE try: if self.component.handle: assert self.component, "Component not specified. Please start the server first" assert self.rootDir, "Root directory not found" ...
[ "def post_build_write_partitions(self):\n import yaml\n\n if self.database.exists():\n partitions = [\n p.identity.name.partital_dict for p in self.partitions]\n\n else:\n partitions = []\n\n fn = self.filesystem.path('meta', 'partitions.yaml')\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function, considering a given headline, applies methods to generate new expressions based on proverbs and chosen words (computed by different methods).
def headline_generator_v2(headline, use_expressions, model, dict_forms_labels, dict_lemmas_labels, gen_method, headline_keywords=None, shorter_expressions=None): print("[START] ", headline, "["+gen_method+"]") all_generated_expressions = [] if not headline_keywords: print...
[ "def new_headline(head1, head2):\n print(head1, \" | \", head2)\n pn1 = get_ps(head1, list(['NNP', 'NNPS']))\n pn2 = get_ps(head2, list(['NNP', 'NNPS']))\n adj1 = get_ps(head1, list(['JJ']))\n adj2 = get_ps(head2, list(['JJ']))\n if random.choice([True, False]):\n pn_reps = list(zip(pn1, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Go to vault, get our login credentials and return a dict properly formatted for authenticating with the web site.
def get_login_credentials(): hvac_client = get_hvac_client() login_credentials = hvac_client.secrets.kv.v1.read_secret( VAULT_SECRETS_PATH ) return login_credentials["data"]
[ "def authenticate():\n\n if settings.user_login == 'read_only':\n log.error('Write access denied for read_only user.')\n sys.exit(1)\n else:\n log.info('Authenticating login: %s' % (settings.user_login))\n if settings.user_login == 'kaboom':\n password = 'password'\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handles the event that a new rte list has been published. If there is a rte with type 'unknown', further processing is canceled as the rte monitor does not deliver usable values yet
def __delegate_rte_list_received(self, rte_list): received_rte_list = list(rte_list.runtimeEvidences) # rte monitor cannot provide usable values, yet. if any((rte.type == 'unknown' or rte.type == 'omission') for rte in received_rte_list): if debug_mode: rospy.loginf...
[ "async def checkNew(self):\n if self.source:\n items = self.source.getRecent()\n items.reverse()\n if items:\n for item in items:\n if item.title not in self.cache:\n logger.info(f'New entry from {str(self.source)}: {it...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a relative path starting from the game_path.
def abs_path_to_game_path_rel(game_path, abs_path): if not game_path or not abs_path: return None rel_path = None i = abs_path.find(game_path) if i != -1 and i == 0: rel_path = abs_path[len(game_path):] return rel_path
[ "def rel_to_abs(path):\r\n current_dir = os.path.abspath(os.path.dirname(__file__))\r\n return os.path.join(current_dir, path)", "def makefullpath(path):\n try:\n os.makedirs(os.path.split(path)[0])\n except:\n pass", "def get_full_path(relative_path, package=\"chemper\"):\n if os.p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Register the User/Poll Pair. Register the User/Poll pair and assign them a UUID that can be used to identify them in subsequent requests.
async def register_poll(user_credentials: JotFormCredentials): app_key, poll_id = user_credentials.appKey, user_credentials.pollID if possible_uuid := redis.get(f"{app_key}-{poll_id}"): return {"uuid": possible_uuid} # If the user is already registered # Do not re-register them. ...
[ "def register_user(self, user_info) -> Dict:\n raise NotImplementedError", "def register(self, nickname, passwordHash):\n # Checks in the DB that the nickname was not already used. If ok, create\n # the player in the DB.\n if self.playersColl.find_one({'nickname': nickname}) == None:\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Proxy form submission requests Proxy the form submission requests to the JotForm API, return the responses verbatim. For more information on request and response formats,
async def proxy_submit_submission(uuid: str, submission: list[dict[str, Union[dict, list]]]): credentials = redis.get(uuid) if credentials is None: raise HTTPError(401, "Unauthorised request") app_key, poll_id = credentials.decode("utf-8").split("-") # Get back our credentials. ...
[ "async def process_post(self, form: dict) -> str:\n\n logger = logging.getLogger(__name__)\n logger.debug('_BaseAgent.process_post: >>> form: {}'.format(form))\n\n validate_form(form, self.cfg.get('proxy-relay', False))\n\n if form['type'] == 'agent-nym-lookup':\n resp_proxy_j...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Proxy form question requests Proxy the requests to get all the questions of a form to the JotForm API, return the responses verbatim. For more information on request and response formats,
async def proxy_get_questions(uuid: str): credentials = redis.get(uuid) if credentials is None: raise HTTPError(401, "Unauthorised request.") app_key, poll_id = credentials.decode("utf-8").split( "-") # Get back user credentials. reply = get(f"https://api.jotform...
[ "def do_answer():\n global nextQuestion, responses\n answer = request.form['answer']\n responses.append(answer)\n nextQuestion += 1\n if nextQuestion >= len(survey.questions):\n return redirect(\"/thanks\")\n return redirect(f\"/questions/{nextQuestion}\")", "def __init__(self, question):...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the poll stats. Fetch all the answers from the JotForm API and convert them to cumilative statistics.
async def get_poll_stats(uuid: str): credentials = redis.get(uuid) if credentials is None: raise HTTPError(401, "Unauthorised request.") app_key, poll_id = credentials.decode("utf-8").split( "-") # Get back user credentials. submissions = get_submissions(poll_id,...
[ "def get_results(poll):\n\n assert poll is not None, \"Invalid poll: None\"\n\n if not poll['closed']:\n return None\n\n results = {}\n\n # Get cached results\n results_db = get_entries('results', 'poll', poll['uid'])\n\n # If no cache, compute the results and store them\n if len(results...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts nordpool data into NVE format. Only cities with an assigned "area number" will be extracted. See constants.NP2PO.
def standardize_nordpool(df: pd.DataFrame) -> pd.DataFrame: std_rep = [] for _, row in df.iterrows(): for city, area_num in NP2PO.items(): data_item = { DATE: row['Date'], AREA_NUM: area_num, "Price": row[city], "Unit": "NOK/MWh...
[ "def pipeline():\n f = Dataset('/Users/akapadia/Scratch/SpaceApps/data/cru_vap_clim_1991-2000.nc', 'r')\n time = f.variables['time'][:]\n lons = f.variables['longitude'][:]\n lats = f.variables['latitude'][:]\n vap = numpy.array(f.variables['vap'][:])\n units = f.variables['vap'].units\n \n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load the links from assets(nordpool_files.yml)
def _load_nordpool_links() -> List[str]: data = yaml.safe_load(pkgutil.get_data(__name__, "assets/nordpool_files.yml")) return data.get('files', [])
[ "def links():\n links_list = tasks.json_list(os.path.join(pathlib.Path(__file__).parent.absolute(),'static/links.json'))\n return render_template('links.html',title='collegeSmart - Helpful Links',links=links_list)", "def load_links():\n # if .hn doesn't exist, return empty list\n if not os.path.isfile...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the cache key for assemble status. ``task`` must be one of the ``AssembleTask`` values. The scope can be the identifier of any model, such as the organization or project that this task is performed under. ``checksum`` should be the SHA1 hash of the main file that is being assembled.
def _get_cache_key(task, scope, checksum): return 'assemble-status:%s' % hashlib.sha1(b'%s|%s|%s' % ( str(scope).encode('ascii'), checksum.encode('ascii'), task, )).hexdigest()
[ "def get_assemble_status(task, scope, checksum):\n cache_key = _get_cache_key(task, scope, checksum)\n rv = default_cache.get(cache_key)\n if rv is None:\n return None, None\n return tuple(rv)", "def hash_task(self, _task):\n ht = hashlib.blake2b(digest_size=12)\n ht.update(_task....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks the current status of an assembling task. Returns a tuple in the form ``(status, details)``, where ``status`` is the ChunkFileState, and ``details`` is either None or a string containing a notice or error message.
def get_assemble_status(task, scope, checksum): cache_key = _get_cache_key(task, scope, checksum) rv = default_cache.get(cache_key) if rv is None: return None, None return tuple(rv)
[ "def build_status():\r\n from SCons.Script import GetBuildFailures\r\n bf = GetBuildFailures()\r\n if bf:\r\n # bf is normally a list of build failures; if an element is None,\r\n # it's because of a target that scons doesn't know anything about.\r\n status = 'failed'\r\n failur...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates the status of an assembling task. It is cached for 10 minutes.
def set_assemble_status(task, scope, checksum, state, detail=None): cache_key = _get_cache_key(task, scope, checksum) default_cache.set(cache_key, (state, detail), 600)
[ "def update_task_state():\n task_data = json.loads(request.data)\n task_id = task_data['taskId']\n task_to_update = Task.query.get(task_id)\n\n if task_to_update: \n task_to_update.finished = not task_to_update.finished\n db.session.commit()\n \n return jsonify({})", "def update...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return percentage of background annotations.
def bg_perc(self): if self._bg_perc is None and self.task == 'preddet': return 0.0 if self._bg_perc is None: return 1.0 return self._bg_perc
[ "def getSegmentationProgessInPercentage(self) -> int:\n segmentedCount = self.getNumOfSegmented()\n float_Num = segmentedCount / self.getTotalNumImages()\n return int(float_Num * 100)", "def get_background(self):\r\n\r\n\t\tif len(self) > 0:\r\n\t\t\tglobal_bg = np.array([0] * len(self[0].bg)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return path of stored dataset images.
def orig_img_path(self): _dataset = 'VG' if 'VG' in self.dataset else self.dataset return osp.join(self.prerequisites_path, _dataset, 'images', '')
[ "def getDataPath(self, img): \n if self.__singleMode:\n return os.path.join(GG.utils.DATA_PATH, img)\n else:\n pathFile = os.path.join(GG.utils.LOCAL_DATA_PATH, img)\n if not os.path.isfile(pathFile):\n imgData = self.__system.getResource(img) \n if imgData:\n if not os....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return upper limit of examined relations in a train image.
def relations_per_img_limit(self): return 2000
[ "def MaximumThreshold(self) -> int:", "def upper_limit_points(self):\n return self._upper_limit_points", "def max_size(self):\n return self.info_sliced.largest_intermediate", "def getJointUpperLimit(self, jointName):\n return self.getJointInfo(jointName)[9]", "def get_upperbound(self) -...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write the contents of poscar to filename.
def write_POSCAR(poscar,filename): global hashes f=StringIO() f.write("1.0\n") for i in range(3): f.write("{0[0]:>20.15f} {0[1]:>20.15f} {0[2]:>20.15f}\n".format( (poscar["lattvec"][:,i]).tolist())) f.write("{0}\n".format(" ".join(poscar["elements"]))) f.write("{0}\n"...
[ "def __writeToFile(self, score):\n with open(self.file, \"w\") as f:\n f.write(str(score))", "def save(self, filename):\n\t\tself.getZ().write(filename)", "def write_file(self, filepath, contents):\n with open(filepath, 'w') as f:\n f.write(contents.getvalue())", "def to_fi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a copy of poscar with each atom iat displaced by random \Aring along its icoordth Cartesian coordinate.
def move_atoms(poscar): nruter=copy.deepcopy(poscar) displist = np.array([0.,0,0]) ntot = nruter["positions"].shape[1] for iat in range(ntot): disp = (0.01 * np.random.randn(3) + 0.03) * random_sign() displist = np.vstack((displist,disp)) # will the vstack here drag the speed? ...
[ "def initiate_atoms_randomly(self,quantitiy):\n coords=[]\n for i in range(np.shape(self.grid)[0]):\n for j in range(np.shape(self.grid)[1]):\n coords.append([i,j])\n np.random.shuffle(coords)#shuffles created coords without repetitions\n for i in range(quantiti...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Process a batch of microversions tasks and commit them.
def process_data(max_tasks=DEFAULT_MAX_TASKS, allow_transient_errors=False): vcs = versions.VersionControlService() queue = taskqueue.Queue(TASKQUEUE_NAME) # The size of this list will be O(max changes of the same file path). # A new changeset is added for each change to the same file, within the set # of le...
[ "def _submit_tasks(\n self,\n taskgroup_uuid: uuid.UUID | None,\n endpoint_uuid: uuid.UUID,\n user_endpoint_config: dict | None,\n futs: list[ComputeFuture],\n tasks: list[_TaskSubmissionInfo],\n ):\n if taskgroup_uuid is None and self.task_group_id:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Like process_data, but with exponential backoff.
def process_data_with_backoff( timeout_seconds=DEFAULT_PROCESSING_TIMEOUT_SECONDS, max_tasks=DEFAULT_MAX_TASKS): results = utils.run_with_backoff( func=process_data, runtime=timeout_seconds, max_tasks=max_tasks, allow_transient_errors=True) for result in results: if result is not...
[ "def _procces_in_batch(self) -> None:\n if not self._handler:\n raise HandlerNotSet()\n\n start_at = time.time()\n buffer = self._wait_buffer_ready()\n elapsed_time = time.time() - start_at\n\n # When _wait_for_ready_buffer is stopped buffer could be empty\n # a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method to terminate student and change state to terminate.
def save_terminate(self): student_rec = self.env["student.student"].browse( self._context.get("active_id") ) student_rec.write( { "state": "terminate", "terminate_reason": self.reason, "active": False, } ...
[ "def terminate(self):\n self.clear_current_data()\n self.terminated = True", "def terminate(self) -> None:\r\n self.__state__ = JobState.TERMINATE\r\n log_to_console(self.get_echo())", "def save_terminate(self):\n student_obj = self.env['student.student'\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the gradient of input samples.
def _gradient(self, inputs, labels): sens = Tensor(np.array([1.0], inputs.dtype)) # get grad of loss over x out_grad = self._loss_grad(Tensor(inputs), Tensor(labels), sens) if isinstance(out_grad, tuple): out_grad = out_grad[0] gradient = out_grad.asnumpy() i...
[ "def gradient(x):\n\t\tpass", "def _gradient(self, _x, _y):\n ### YOUR CODE HERE\n dl_dwx = self.softmax(_x) - _y\n dl_dx = np.matmul(_x.reshape(self.n_features,1), dl_dwx.reshape(1,self.k))\n _g = dl_dx\n return _g\n ### END YOUR CODE", "def grad_input(self, x):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test conversion of xml string to dict
def test_xml_to_dict(self): expected = self.data result = convert.xml_to_dict(self.xml) self.assertEqual(expected, result)
[ "def test_xmldict(self):\n xml = self.xml_data\n from_string = xmldict.xml_to_dict(xml)\n\n d = {'contact': {'fname': 'Joe', 'lname': 'Smith'},\n 'query': {'field': 'ass', 'where': 'ass'}}\n\n from_dict = xmldict.dict_to_xml(d)\n\n # print the dict created from xml str...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test conversion of dict to xml string
def test_dict_to_xml(self): expected = self.xml result = convert.dict_to_xml(self.data) self.assertEqual(expected, result)
[ "def test_xmldict(self):\n xml = self.xml_data\n from_string = xmldict.xml_to_dict(xml)\n\n d = {'contact': {'fname': 'Joe', 'lname': 'Smith'},\n 'query': {'field': 'ass', 'where': 'ass'}}\n\n from_dict = xmldict.dict_to_xml(d)\n\n # print the dict created from xml str...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test xml > dict > xml
def test_xml_reconvert(self): _dict = convert.xml_to_dict(self.xml) result = convert.dict_to_xml(_dict) expected = self.xml self.assertEqual(expected, result)
[ "def test_xmldict(self):\n xml = self.xml_data\n from_string = xmldict.xml_to_dict(xml)\n\n d = {'contact': {'fname': 'Joe', 'lname': 'Smith'},\n 'query': {'field': 'ass', 'where': 'ass'}}\n\n from_dict = xmldict.dict_to_xml(d)\n\n # print the dict created from xml str...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the timestamp currrently stored in the SSM parameter.
def get_timestamp(self): param = self.client.get_parameter(Name=self.param_name) timestamp = param['Parameter']['Value'] return timestamp
[ "def timestamp(self) -> datetime:\n return self.context['embryo'].get('timestamp')", "def _get_timestamp(self):\n return datetime.datetime.now()", "def timestamp(self) -> Decimal:\n return self.__dict__[\"timestamp\"]", "def timestamp(self) -> int:\n return self.summary[\"timestamp...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that and entry is added to SiteConfigurationHistory model each time a new SiteConfiguration is added.
def test_site_configuration_post_save_receiver(self): # add SiteConfiguration to database site_configuration = SiteConfigurationFactory.create( site=self.site, ) # Verify an entry to SiteConfigurationHistory was added. site_configuration_history = SiteConfigurationHi...
[ "def test_site_configuration_post_update_receiver(self):\n # add SiteConfiguration to database\n site_configuration = SiteConfigurationFactory.create(\n site=self.site,\n )\n\n site_configuration.site_values = {'test': 'test'}\n site_configuration.save()\n\n # Ve...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that and entry is added to SiteConfigurationHistory each time a SiteConfiguration is updated.
def test_site_configuration_post_update_receiver(self): # add SiteConfiguration to database site_configuration = SiteConfigurationFactory.create( site=self.site, ) site_configuration.site_values = {'test': 'test'} site_configuration.save() # Verify an entry ...
[ "def test_site_configuration_post_save_receiver(self):\n # add SiteConfiguration to database\n site_configuration = SiteConfigurationFactory.create(\n site=self.site,\n )\n\n # Verify an entry to SiteConfigurationHistory was added.\n site_configuration_history = SiteCon...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that and entry is NOT added to SiteConfigurationHistory each time a SiteConfiguration is updated with save_siteconfig_without_historical_record().
def test_site_configuration_post_update_receiver_with_skip(self): # Add SiteConfiguration to database. By default, the site_valutes field contains only "{}". site_configuration = SiteConfigurationFactory.create( site=self.site, ) # Update the SiteConfiguration we just creat...
[ "def test_site_configuration_post_save_receiver(self):\n # add SiteConfiguration to database\n site_configuration = SiteConfigurationFactory.create(\n site=self.site,\n )\n\n # Verify an entry to SiteConfigurationHistory was added.\n site_configuration_history = SiteCon...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that and entry is not added to SiteConfigurationHistory if there is an error while saving SiteConfiguration.
def test_no_entry_is_saved_for_errors(self): # add SiteConfiguration to database site_configuration = SiteConfigurationFactory.create( site=self.site, ) # Verify an entry to SiteConfigurationHistory was added. site_configuration_history = SiteConfigurationHistory.obj...
[ "def test_site_configuration_post_save_receiver(self):\n # add SiteConfiguration to database\n site_configuration = SiteConfigurationFactory.create(\n site=self.site,\n )\n\n # Verify an entry to SiteConfigurationHistory was added.\n site_configuration_history = SiteCon...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that get_all_orgs returns all orgs from site configuration.
def test_get_all_orgs(self): expected_orgs = [self.test_config1['course_org_filter'], self.test_config2['course_org_filter']] # add SiteConfiguration to database SiteConfigurationFactory.create( site=self.site, site_values=self.test_config1 ) SiteConfigura...
[ "def test_get_all_orgs_returns_only_enabled(self):\n expected_orgs = [self.test_config2['course_org_filter']]\n # add SiteConfiguration to database\n SiteConfigurationFactory.create(\n site=self.site,\n site_values=self.test_config1,\n enabled=False,\n )\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that get_all_orgs returns only those orgs whose configurations are enabled.
def test_get_all_orgs_returns_only_enabled(self): expected_orgs = [self.test_config2['course_org_filter']] # add SiteConfiguration to database SiteConfigurationFactory.create( site=self.site, site_values=self.test_config1, enabled=False, ) Site...
[ "def test_get_all_orgs(self):\n expected_orgs = [self.test_config1['course_org_filter'], self.test_config2['course_org_filter']]\n # add SiteConfiguration to database\n SiteConfigurationFactory.create(\n site=self.site,\n site_values=self.test_config1\n )\n S...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A tuple (width,height) in pixels of a movie frame.
def frame_size(self): return self.display.width, self.display.height
[ "def frameSize(self):\n size = None\n if self.isVideo():\n if 'width' in self.__dict__ and 'height' in self.__dict__:\n try:\n size = (int(self.__dict__['width']),int(self.__dict__['height']))\n except Exception as e:\n pas...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Grab the image information from the display and save as a movie frame. The keyword arguments are not being used in the subclass.
def grab_frame(self, **savefig_kwargs): try: image = pyglet.image.get_buffer_manager().get_color_buffer().get_image_data() self._proc.stdin.write(image.get_data('RGBA', -4 * self.display.width)) except RuntimeError: ...
[ "def saveFrame(self, filename):\n\t\tself.frameList.append(filename)\n\t\tvisualizer = self.visualizer\n\t\timageType = self.imageType\n\t\tLogging.info(\"Saving screenshot to \", filename, kw = \"visualizer\")\n\t\tcomm = \"visualizer.getCurrentMode().saveSnapshot(filename)\"\n\t\teval(comm)", "def saveFrame(sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
calculates h_t = h_{t1} + d (w_t (af_t / r pi^2) h_{t1}) and appends it to head history
def _update_head(self): wl = self.wl + self.seasonal_component() prev_h = self.head_history[-1] new_h = prev_h + self.d * (wl - self.flow_component() - prev_h) self.head_history.append(new_h)
[ "def updateMotionHistory(silhouette, mhi, timestamp, duration):\n pass", "def history_RewardTensor(env, h):\n StateHists = StateHistsIx(env, h)\n Zh = len(StateHists)\n dims = list(env.R.shape)\n dims[1] = Zh\n dims[-1] = Zh\n\n Rh = np.zeros(dims)\n for h, hist in enumerate(StateHists):\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
convert timestep to datetime and return (month, weekday, hour)
def t_to_month_weekday_hour(self): dt = self.t0_datetime + timedelta(minutes=self.t * self.ts_size) return (int(dt.month), int(dt.weekday()), int(dt.hour))
[ "def transform_date(observation):\n \n date_ = observation.get(\"Date\")\n \n try:\n date = pd.Timestamp(date_)\n hour = date.hour\n month = date.month\n day_of_week = date.day_name()\n except:\n hour = 0\n month = 0\n day_of_week = '' \n\n retur...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
testing the surround() method
def test1(self): self.field.surround() self.assertEqual(self.field, [ [None, None, None, None, None], [None, 1 , 2 , 3 , None], [None, 4 , 5 , 6 , None], ...
[ "def test_surround_adds_two_extra_lines(self):\n label_surrounded_string = self.g._surround_with_label(\n self.graph_string,\n 100,\n 4,\n 0,\n 1.29,\n 2.5\n )\n assert len(label_surrounded_string.splitlines()) == len(self.graph_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Show up image of each connector of a KSK
def show_KSK_images(self, index: int = None): KSK_name = "" if type(index) == int and 0 <= index < self.list_widget.count(): KSK_name = self.list_widget.item(index).text() self.list_widget.selectedItems().clear() self.list_widget.setCurrentRow(index) e...
[ "def show(self):\n plt.figure(randint(0, 256))\n plt.imshow(self.image,)\n plt.xticks([]), plt.yticks([])\n plt.show()", "def plot_pair_img_label(img,label,figsize=(8,4)):\n\n fig=plt.figure(figsize=figsize)\n columns = 2\n rows = 1\n\n\n fig.add_subplot(rows, columns, 1)\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fill up the KSK list widget by KSK names
def fill_KSK_list_widget(self): self.list_widget.clear() search_query = self.search_box.text() KSK_names, dates = search_for_KSK(search_query) if self.cb.count() == 0: self.cb.addItem('Filter by date') self.cb.addItems({date for date in dates.keys()}) ...
[ "def _populateEntries(self):\n\n widgets = self.setting.getWidgetList()\n\n # we only need the list of names\n names = list(widgets.keys())\n names.sort()\n\n utils.populateCombo(self, names)", "def show_KSK_images(self, index: int = None):\r\n KSK_name = \"\"\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Apply correction factors (in place) to PM2.5 data in data_list
def applyCorrectionFactorsToList(data_list, pm25_key=None): # Open the file and get correction factors with open(getenv("CORRECTION_FACTORS_FILENAME")) as csv_file: read_csv = csv_reader(csv_file, delimiter=',') rows = [row for row in read_csv] header = rows[0] rows = rows[1...
[ "def apply_correction(data):\r\n \r\n \r\n arduinos = data.keys()\r\n \r\n temp_correction = {1: 0.09, 2: 0.10, 3: -0.02, 4: -0.23, 5: -0.20,\r\n 6: 0.05, 7: 0.15, 8: 0.12, 9: -0.10, 10: 0.11,\r\n 11: 0.0}#-0.08}\r\n temp_bias = 0.4896611061095239\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if latitude is valid
def verifyLatitude(lat:float) -> bool: return (-90 <= lat <= 90)
[ "def check_latitude(lat):\n if lat < -90 or lat > 90:\n raise ValueError(\"Latitude (%f) out of range [-90 - 90]\" % lat)", "def invalidLatitude(latitude):\n\ttry:\n\t\tlatitude = float(latitude)\n\texcept ValueError:\n\t\treturn True\n\n\tif (-90 <= latitude <= 90):\n\t\treturn False\n\telse:\n\t\tretu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if longitude is valid
def verifyLongitude(lon:float) -> bool: return (-180 <= lon <= 180)
[ "def check_longitude(lon):\n if lon < -180 or lon > 180:\n raise ValueError(\"Longitude (%f) out of range [-180, 180]\" % lon)", "def check_longitude(self, ds):\n ret_val = []\n\n recommended = 'degrees_east'\n acceptable = ['degree_east', 'degree_E', 'degrees_E', 'degreeE', 'degree...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if lat/lon are valid
def verifyLatLon(lat:float, lon:float) -> bool: return verifyLatitude(lat) and verifyLongitude(lon)
[ "def test_validate_coordinates():\n lat_less_than = (-91.00, 1.0)\n lat_more_than = (91.00, 1.0)\n lon_less_than = (1.00, -181.0)\n lon_more_than = (1.00, 181.0)\n\n assert validate_coordinates(lat_less_than) == [lat_less_than, \"latitude less than -90\"]\n assert validate_coordinates(lat_more_tha...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if valid radius for Earth in kilometers
def verifyRadius(radius:float) -> bool: return (0 < radius < 6371)
[ "def check_range(lat_user, lon_user, lat_test, lon_test, radius):\n distance = haversine(lon_user,lat_user, lon_test, lat_test)\n if distance <= radius:\n return True\n else:\n return False", "def checkWithinRITRadius(latitude,longitude):\n radius_to_check=2 #2 miles radius\n RIT_cent...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check list of devices (12char HEX strings) Require ALL devices to be valid. This is intentional instead of filtering out bad IDs because the user might not notice that some devices are incorrect.
def verifyDeviceList(devices:[str]) -> bool: return all(map(verifyDeviceString, devices))
[ "def test_validate_list_true(self):\n subset_list = ['0064F', '0088E', '00890']\n self.assertTrue(\n self.utils.validate_list(self.data.device_list, subset_list))", "def check_devices(self) -> bool:\n\t\tpass", "def validate(self):\n\n bad = list()\n logger.debug('Loading ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse both radius and center arguments. If neither is specified return None. If only one is specified return error. If both are specified return the pair as a tuple
def argParseRadiusArgs(r:float, c:str): try: x = (argParseRadius(r), argParseCenter(c)) if all(x): return x elif not any(x): return None else: raise ArgumentError("Arguments 'radius' and 'center' must both be specified. Argument 'radius' must be ...
[ "def find_center_oval(x1, y1, x2, y2) -> tuple:\r\n return int((x1 + x2)/2), int((y1+y2)/2)", "def circle_from_points(p1, p2, p3):\n\n # center is intersection of bisectors of segments p1-p2 and p2-p3\n\n s = np.real((p3-p1)*np.conj(p3-p2)) / np.imag((p3-p1)*np.conj(p3-p2))\n center = (p1+p2)/2 + ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensures valid `docstatus` transition.
def allow_transition_from_0_to_2(self, docstatus): if self.docstatus > 2: frappe.throw(_('This document is currently queued for execution. Please try again'), title=_('Document Queued'), indicator='red') if not self.docstatus: self.docstatus = 0 if docstatus==0: if self.docstatus==0: self._acti...
[ "def assert_valid_status_transition(cls, old_status: str, new_status: str):\n for status in (old_status, new_status):\n cls.assert_valid_status(status)\n\n # Status can never transition to NEW.\n # All other transitions are valid, at least for now.\n if not (old_status == cls....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test string rotation returns false.
def test_string_rotration_false(): from string_rotation import string_rotation assert string_rotation('hello', 'nothello') is False
[ "def test_string_rotation_true():\n from string_rotation import string_rotation\n assert string_rotation('hello', 'elloh') is True", "def string_rotation(str1, str2):\n if len(str1) == len(str2):\n return is_substring(str1+str1, str2)\n return False", "def IsRotation(a,b):\n\trotation = 0\n\t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test string rotation returns true.
def test_string_rotation_true(): from string_rotation import string_rotation assert string_rotation('hello', 'elloh') is True
[ "def test_string_rotration_false():\n from string_rotation import string_rotation\n assert string_rotation('hello', 'nothello') is False", "def string_rotation(str1, str2):\n if len(str1) == len(str2):\n return is_substring(str1+str1, str2)\n return False", "def IsRotation(a,b):\n\trotation =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compare dict1 keys with dict2 keys and see if dict1 has extra keys compared to dict2
def compare_dict_keys(dict1, dict2): return dict1.keys() - dict2.keys()
[ "def cmp_dict(d1, d2, ignore_keys=[]):\n # https://stackoverflow.com/questions/10480806/compare-dictionaries-ignoring-specific-keys\n return {k: v for k, v in d1.items() if k not in ignore_keys} \\\n == {k: v for k, v in d2.items() if k not in ignore_keys}", "def compare_dicts_structure(dict1, dict2)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Log in as Admin Click on User Menu for dropdown Click on 'Admin' on user menu Click "Stats" Click "Concept Coach" The user is presented with Concept Coach Statistics (t2.07.01) Corresponds to... t2.07.01
def test_view_stats_admin(self): # t2.07.01 --> The user is presented with Concept Coach Statistics self.admin.login() self.admin.goto_admin_control() self.admin.sleep(5) self.admin.wait.until( expect.visibility_of_element_located( (By.PARTIAL_LINK_TEX...
[ "def test_admin_dashboard_page(self):\n response = self.client.get('/admin/')\n self.assertContains(\n response,\n '<h2>User graph</h2>',\n html=True,\n )\n self.assertContains(\n response,\n '<h2>...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieves all the synonym names of a data collection (this list includes the original name).
def getDataTypeSynonyms(self, name): res = self.serv.getDataTypeSynonyms(name) return res
[ "def synonyms(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"synonyms\")", "def _get_pubchem_synonyms(self):\n syn_list = []\n url = PUBCHEM_SYNONYMS_PATH % self._get_cid()\n try:\n response = requests.get(url)\n except requests.except...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieves the physical location (URL) of a web page providing knowledge about a specific entity, using a specific resource.
def getLocation(self, uri, resource): res = self.serv.getLocation(uri, resource) return res
[ "def get_url_page(self, product):\n return product.get('url')", "def url_for(self, path_or_page):\n\n if isinstance(path_or_page, Page):\n return self.relpath(self._absolute_path(path_or_page.path), is_page=True)\n else:\n return self.relpath(path_or_page)", "def page_url(self, page_pk): ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieves some information about these Web Services.
def getServicesInfo(self): res = self.serv.getServicesInfo() return res
[ "def get_services(self):\n xpath = [\"Services\", \"Service\"]\n return self.find_anywhere(xpath)", "def services(self):\n _log.debug('get service list')\n result = self._requestJSON('services', '')\n return self._getKey(result, 'name')", "def get_service_info(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Says if a URI of a data collection is deprecated.
def isDeprecated(self, uri): res = self.serv.isDeprecated(uri) res = self._boolean_convertor(res) return res
[ "def _is_deprecated(self, api_node: doc_generator_visitor.ApiTreeNode):\n if doc_controls.is_deprecated(api_node.py_object):\n return True\n\n decorator_list = signature.extract_decorators(api_node.py_object)\n if any('deprecat' in dec for dec in decorator_list):\n docstring = getattr(api_node.py...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Merge consecutive Filter(x), Filter(y) blocks into Filter(x && y) block.
def merge_consecutive_filter_clauses(ir_blocks): if not ir_blocks: return ir_blocks new_ir_blocks = [ir_blocks[0]] for block in ir_blocks[1:]: last_block = new_ir_blocks[-1] if isinstance(last_block, Filter) and isinstance(block, Filter): new_ir_blocks[-1] = Filter( ...
[ "def _filter_chain(accumulated, additional):\n return lambda block_key: accumulated(block_key) and additional(block_key)", "def filter_fir_shared(clock, reset, x, y, b):\n assert isinstance(x, Samples)\n assert isinstance(y, Samples)\n\n ntaps = len(b)\n scnt = Signal(intbv(ntaps+1, min=0, max=ntap...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validate that the OutputContextVertex is correctly representable.
def validate(self): super(OutputContextVertex, self).validate() if self.location.field is not None: raise ValueError(u'Expected location at a vertex, but got: {}'.format(self.location))
[ "def validate(self, model_output_shape: Tuple, target_shape: Tuple) -> None:\n raise NotImplementedError", "def is_vertex(self) -> \"bool\":\n return self._value.getType() == Value.VVAL", "def is_vertex(ent):\n if isinstance(ent, tuple):\n if len(ent) in [2, 3]:\n return True\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lower ContextFieldExistence expressions into lowerlevel expressions.
def lower_context_field_existence(ir_blocks, query_metadata_table): def regular_visitor_fn(expression): """Expression visitor function that rewrites ContextFieldExistence expressions.""" if not isinstance(expression, ContextFieldExistence): return expression location_type = quer...
[ "def regular_visitor_fn(expression):\n if not isinstance(expression, ContextFieldExistence):\n return expression\n\n location_type = query_metadata_table.get_location_info(expression.location).type\n\n # Since this function is only used in blocks that aren't ConstructResult,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Expression visitor function that rewrites ContextFieldExistence expressions.
def regular_visitor_fn(expression): if not isinstance(expression, ContextFieldExistence): return expression location_type = query_metadata_table.get_location_info(expression.location).type # Since this function is only used in blocks that aren't ConstructResult, # the locat...
[ "def lower_context_field_existence(ir_blocks, query_metadata_table):\n def regular_visitor_fn(expression):\n \"\"\"Expression visitor function that rewrites ContextFieldExistence expressions.\"\"\"\n if not isinstance(expression, ContextFieldExistence):\n return expression\n\n loc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Construct a map from simple optional locations to their inner location and traversed edge.
def extract_simple_optional_location_info( ir_blocks, complex_optional_roots, location_to_optional_roots): # Simple optional roots are a subset of location_to_optional_roots.values() (all optional roots) # We filter out the ones that are also present in complex_optional_roots. location_to_preceding_...
[ "def _build_default_map(self):\n\n return [[\"-\" for x in range(self.map_size[0])] \n for y in range(self.map_size[1])]", "def _iso_inv_map(d):\n _d = {}\n for src, d2 in d.items():\n for tgt, data in d2.items():\n if tgt is not None and src != tgt:\n if t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of IR blocks as a copy of the original, with EndOptional blocks removed.
def remove_end_optionals(ir_blocks): new_ir_blocks = [] for block in ir_blocks: if not isinstance(block, EndOptional): new_ir_blocks.append(block) return new_ir_blocks
[ "def get_block_chain(self) -> List[Block]:\n return [Block(h, t) for h, t in self.chain.items()]", "def copy(self):\n new_list = CircularPositionalList()\n for e in self:\n new_list.add_last(e)\n return new_list", "def copy(self) -> \"SbProjector *\":\n return _coin...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print MYHOMECMD version, build and date
def print_version(): logger.debug("print_version") print "MYHOMECMD Version: " + __version__ print __date__.replace('$', '') logger.debug("Exit 0") sys.exit(0)
[ "def version(self):\n# import subprocess\n# p = subprocess.Popen('ecl --version', shell=True, stdin=subprocess.PIPE,\n# stdout = subprocess.PIPE, stderr=subprocess.PIPE)\n# return AsciiArtString(p.stdout.read())\n return \"Version information is given by lisp....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
find_tty_usb('10c4', 'ea60') > '/dev/ttyACM0'
def find_tty_usb(idVendor, idProduct): # trouver l'idVendor et l'idProduct grace a lsusb -v # Note: if searching for a lot of pairs, it would be much faster to search # for the enitre lot at once instead of going over all the usb devices # each time. for dnbase in os.listdir('/sys/bus/usb/devices'...
[ "def connect():\n for (COMMS_CHANNEL, NAME, deviceId) in serial.tools.list_ports.comports():\n if re.match(r\"^\\s*USB VID:PID=0*2341:0*3e\\b\", deviceId, re.I):\n f = serial.Serial(COMMS_CHANNEL, 115200, timeout=60)\n return f\n raise RuntimeError(\"Could not locate arduino seria...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Construct a batch from a list of trajectories and their information.
def construct_batch( self, trajs: List[Dict[str, List[Tuple[Graph, GraphAction]]]], cond_info: float, rewards: float, ) -> gd.Batch: torch_graphs = [ self.ctx.graph_to_data(i[0]) for tj in trajs for i in tj["traj"] ] actions = [ self.c...
[ "def split(self):\n trajectories = []\n start = 0\n for i, length in enumerate(self.lengths):\n stop = start + length\n traj = TrajectoryBatch(\n env_spec=self.env_spec,\n observations=self.observations[start:stop],\n last_obser...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create the image on which to perform the centroiding given a fits file containing an exposure with multiple groups. In the case of multiple integrations in the exposure, use only the first, and produce a single TA image.
def make_ta_image(infile, ext=0, useframes=3, save=False, silent=False): # Read in data. Convert to floats with fits.open(infile) as h: data = h[ext].data head = h[ext].header data = data * 1. shape = data.shape #pdb.set_trace() if len(shape) <= 2: raise Ru...
[ "def create_ana_images(self):\n log.debug(\"start\")\n os.chdir(self._p_analysis_tmp)\n exif_attributes=self._exif_attributes\n exif_attributes=\" \".join([\"-\"+a for a in exif_attributes])\n\n # quiet option suppreses regular output\n cmd_exif=ImageAnalyzer.CMD_EXIFTOOL_J...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Apply flat field to TA image. Assume the flat has the format matching those to be used on board by GENTALOCATE. Pixel values are multiplied by 1000 relative to traditional flat field files. (i.e. flat is normalized to a value of 1000). Bad pixels have a value of 65535. Bad pixels receive a value that is interpolated fr...
def apply_flat_field(image, flat): # Make sure flat field values are floats flat = flat * 1. # Find bad pixels and set to NaN bad = flat == 65535 print("Found {} bad pixels in the flat.".format(np.sum(bad))) flat[bad] = np.nan # Apply flat image /= (flat/1000.) # Use surro...
[ "def apply_flat_field(science, flat):\n\n # Extract subarray from reference data, if necessary\n if reffile_utils.ref_matches_sci(science, flat):\n flat_data = flat.data\n flat_dq = flat.dq\n else:\n log.info(\"Extracting matching subarray from flat\")\n sub_flat = get_subarray_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
HSV values in [0..1[ Returns [r, g, b] values from 0 to max inclusive
def hsv_to_rgb(h, s, v, max): h_i = int(h * 6) f = h * 6 - h_i p = v * (1 - s) q = v * (1 - f * s) t = v * (1 - (1 - f) * s) if h_i == 0: r, g, b = v, t, p elif h_i == 1: r, g, b = q, v, p elif h_i == 2: r, g, b = p, v, t elif h_i == 3: r, g, b = p, q,...
[ "def num_to_hsv(value):\n return cv2.cvtColor(np.uint8([[num_to_bgr(value)]]), cv2.COLOR_BGR2HSV)", "def hsv_to_rgb(h, s, v):\n if s == 0.0:\n return v, v, v\n\n i = int(h * 6.0) # XXX assume int() truncates!\n\n f = (h * 6.0) - i\n p, q, t = v * (1.0 - s), v * (1.0 - s * f), v * (1.0 - s *...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Flush the given logs time frame. So that all of its entries is printed through the given output.
def _flush_frame(logs, output): for timestamp in sorted(logs): entries = logs[timestamp] (level, color, pkrid, process, source, logger, log) = entries[0] try: lcolor = LEVEL_COLORS[level] except KeyError: lcolor = LEVEL_COLORS['E'] lcolor = 16 + 36 * l...
[ "def flush(self) -> None:\n for handler in self.logger.handlers:\n handler.flush()", "def flush():\n\n if stdout != NULL:\n fflush(stdout)", "def flush(self):\n self._output_flush()", "def flush():\n actual_flush()", "def _flush_streams(self):\n sys.stdout.fl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts a list of examples of pairs of tokens into the corresponding indices according to the given Vocabulary.
def examples_to_indices( examples: list[tuple[str, str]], vocab: Vocabulary ) -> list[tuple[int, int]]: # zip(*...) "unzips" the list of tuples into a tuple of lists targets, contexts = zip(*examples) target_indices = vocab.tokens_to_indices(targets) context_indices = vocab.tokens_to_indices(context...
[ "def tokens_from_index_list(index_list, id2vocab):\n token_list = []\n for i in range(len(index_list)):\n if index_list[i] > len(id2vocab)-1:\n token_list.append(\"<UNK>\")\n else:\n token_list.append(id2vocab[index_list[i]])\n return token_list", "def sentences2idx(se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts a given array into a min heap
def create_min_heap(self, arr): n = len(arr) # last n/2 elements will be leaf nodes (CBT property) hence already min heaps # loop from n/2 to 0 index and convert each index node into min heap for i in range(int(n / 2), -1, -1): self.min_heapify(i, arr, n)
[ "def min_heapify(arr):\n parent = ((len(arr) - 1) - 1) // 2\n while parent >= 0:\n shift_down(arr, parent)\n parent -= 1", "def heapify(self, array):\n\n heap = MinHeap()\n heap.list = array\n\n if len(heap.list) > 0:\n height = math.floor(math.log2(len(heap.lis...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Assuming sub trees are already min heaps, converts tree rooted at current indx into a min heap.
def min_heapify(self, indx, arr, size): # Get index of left and right child of indx node left_child = indx * 2 + 1 right_child = indx * 2 + 2 smallest = indx # check what is the smallest value node in indx, left child and right child if left_child < size: if...
[ "def min_heapify(arr):\n parent = ((len(arr) - 1) - 1) // 2\n while parent >= 0:\n shift_down(arr, parent)\n parent -= 1", "def repair_heap(array, start_index, heap_size):\n\n # Check given given parameter data type.\n if not type(array) == list:\n raise TypeError('array must be a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the language column and create a new k for it in the structure_by_section while creating a file_answer to be able to be plot later in analysis
def create_language_section(self, df, structure_by_section): path_to_language = os.path.join('../survey_creation', self.year, self.country, 'listAnswers', 'languages.csv') list_of_languages = self.df['startlanguage. Start language'].unique() if len(list_of_languages) > 1: with open(...
[ "def load_wiktionary(configuration, verbose=0):\n\n df = pandas.read_csv(configuration['wiktionary_translations_path'],\n sep='\\t', usecols=['ID', 'Concept_ID', 'Concept', 'Languoid', 'Language_name', 'Form'])\n\n\n if verbose:\n print()\n print('number of available lang...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If a column as no values at all (all nan), the column is removed to avoid problem later in the analysis
def remove_empty_column(self, df): return df.dropna(axis=1, how='all')
[ "def drop_nan(df, col='mrn'):\n return df[pd.notnull(df[col])]", "def remove_nan_and_zeroes_from_columns(df, variable):\n filtered_df = df[(df[variable].notnull()) & (df[variable]>0)]\n return filtered_df", "def del_missings(data_frame):\n data_frame = data_frame.replace('unk...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove rows that are not the appropriate country
def remove_not_right_country(self, df): # Use the package pycountry to get the language from the country code if len(self.country) == 2: if self.country == 'uk': country = pycountry.countries.get(alpha_2='GB'.upper()) else: country = pycountry.coun...
[ "def filtercountry(df, country):\n mask = df.country.eq(country)\n return df[mask]", "def iatas_without_country():\n codes_w_country = []\n for v in IATAS_BY_COUNTRIES.values():\n codes_w_country += v\n\n if not len(codes_w_country) == len(set(codes_w_country)):\n print(f\"Total codes...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Use the option set up in config file `section_nbr_to_keep_after` to know which section is considered as the prove that the participant dropped and did not reply. It uses the label `Last page` to know which last page the participant reached
def dropping_dead_participant(self, df): return self.df.loc[self.df['lastpage. Last page']> self.section_nbr_to_keep_after]
[ "def exit_last_section() -> None:\n ContextPrinter.self.headers = ContextPrinter.self.headers[:-1]", "def __goToLastPage(self):\n try:\n pagination_tag = self.soup.find('div', 'pagenav')\n if not pagination_tag:\n return\n uri = None\n last_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Some question may not have any answer. If the unique value of that question is array([nan]) the question is dropped
def dropping_empty_question(self, df): return self.df.dropna(axis=1, how='all')
[ "def test_nans_replaced():\n a = rem.fix_missing()\n new_list = [x for sublist in a for x in sublist]\n unique_vals = set(new_list)\n try:\n None not in unique_vals\n '?' in unique_vals\n print \"NaNs removed, matrix has appropriate missing data symbols.\"\n except:\n rais...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Various cleaning white spaces in columns name Can extend that function if some other form of errors are found later
def cleaning_columns_white_space(self, df): return df.rename(columns=lambda x: self.cleaning_some_white_space(x))
[ "def tidy_data(df):\n\n ##clean up column headings\n df.columns = df.columns.str.strip().str.lower().str.replace(' ', '_').str.replace('(', '').str.replace(')', '')", "def clean_cols(data):\n clean_col_map = {x: x.lower().strip() for x in list(data)}\n return data.rename(index=str, columns=clean_col_m...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The questions YN and the likert questions can be grouped together to have one plot for each. The likert questions need to be checked on their answer_format for not mixing different type of likert scale
def grouping_likert_yn(group_question): group_survey_q, group_original_question = list(), list() previous_answer_format = None previous_file_answer = None previous_order_question = None file_answer = None for q in group_question: cu...
[ "def scores_vs_rating():\n\n rating_comparison = {\n 1: [], 2: [], 3: [], 4: [], 5: []\n }\n\n rating_key = \"like_rating_specific\"\n\n for user, session in Session.get_users_with_surveys():\n\n boundary = HistogramBoundary(user)\n\n survey = user.get_survey()\n\n for playli...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
When there is an option for 'Other', the column contains the value typed by the participants. However, to plot later, it is better to recode all this values as for the other items, then duplicating these values in another column with the tags [Other Raw] to keep the information for later. There are two cases when [OTHE...
def duplicating_other(self, df): for col in df.columns: if col[-7:] == '[Other]': # Duplicate the column df['[OTHER_RAW]. '+ col] = df[col] # Replace all the values with 'Yes' df[col] = df[col].apply(lambda x: 'Yes' if not pd.isnull(x) ...
[ "def choice_col(self):\n return 'chosen'", "def tidy_dic():\n #defining path for data\n fname = os.path.join(data_path, \"gardner_time_to_catastrophe_dic_tidy.csv\")\n\n #read csv\n df = pd.read_csv(fname)\n\n # Since just True or False on a plot legend doesn't make much sense, we'll create ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Stores training points x_train and their correponsindg labels w_train, and estimates the a prior probabilities p(w_i) for each class w_i.
def fit(self, x_train, w_train): # Store examples. self.x_train = x_train self.w_train = w_train # Estimate a prior probabilities p(wi) for each class wi. self.p_w = DataLoader.compute_a_priori(w_train) self.num_classes = len(self.p_w) return self
[ "def learn(self, Xtrain, ytrain):\n\n ### YOUR CODE HERE\n if self.params['usecolumnones']:\n self.numfeatures = Xtrain.shape[1]\n \n else:\n self.numfeatures = Xtrain.shape[1] - 1\n\n ### END YOUR CODE\n\n origin_shape = (self.numclasses, self.nu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Runs KNN prediction/estimation for each point x in x_set. Returns an array containing the predicted classes for each input point.
def predict(self, x_set): def classify(x): # Pick top-voted label among the k nearest neighbors. label_votes = self.knn_label_votes(x) return max(label_votes, key=label_votes.get) return np.array(list(map(classify, x_set)))
[ "def classify(self, testing_set):\n class_predictions = []\n class_actuals = []\n\n for attribute in testing_set.itertuples(index=False): # iterates through each attribute in the testing set\n class_probabilities = []\n actual_class = None\n\n for category in s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the a posteriori probability p(wi|x) for each class wi by dividing the number of votes of each label among the k nearest neighbors by K.
def compute_a_posteriori(self, x): # Compute label votes for k nearest neighbors. knn_label_votes = self.knn_label_votes(x) # p(wi|x) = num_votes(wi)/K. Map label index into probability. return np.array(list(map( lambda label: knn_label_votes.get(label, 0) / float(self.K), range(self.num_cl...
[ "def knn(p, data, k):\n \n \"\"\" Steps:\n 1. Iterate through samples in data and store the \n distance from p in the dictionary \"distance\"; key is the \n distance, value is the sample.\n 2. Creat a sorted list of samples according to ascending\n order of the dist...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds the k nearest neighbors, and counts their labels. Returns a dict mapping each label to their count.
def knn_label_votes(self, x): # Evaluate the distance L2 of x to all training points. dist = np.linalg.norm(x - self.x_train, axis=1) # Compute the indices of the k nearest points (with respect to x_train). # Use negative distances to force min-heap behave like a max-heap. nearest_k_indices = ...
[ "def predictkNNLabelsReg(closest_neighbors, y_train):\n total = 0;\n for i in range(len(closest_neighbors)):\n total = total + y_train[closest_neighbors[i]][0];\n LabelPrediction = total/len(closest_neighbors)\n return LabelPrediction", "def neighbor_counts(living):\n n = collections.Counter...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test delete movies endpoint
def test_delete_movies(self): response = self.client.delete('/movies/1') body = json.loads(response.data.decode()) self.assertEqual(response.status_code, 200) self.assertEqual(body['message'], 'Movie Successfully deleted.')
[ "def test_delete_movie(self):\n response = self.client.delete('/movies/0')\n body = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 404)\n self.assertEqual(body['message'], \"resource not found\")", "def test_delete_valid(self):\n response = self.app....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test post movies endpoint
def test_post_movies(self): body = { "release_date": "2020/06/11", "title": "test" } response = self.client.post('/movies', content_type='application/json', data=json.dumps(body)) body = json....
[ "def test_post_movie(self):\n body = {\"release_date\": \"2020/06/11\"}\n response = self.client.post('/movies',\n content_type='application/json',\n data=json.dumps(body))\n body = json.loads(response.data.decode())\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test update movies endpoint
def test_patch_movies(self): body = { "title": "patch" } response = self.client.patch('/movies/2', content_type='application/json', data=json.dumps(body)) body = json.loads(response.data.decode()) ...
[ "def test_update_video_watched(self):\n response = self.client.open(\n '/api/video',\n method='PUT')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def edit_movies():\n movie_id = int(request.args.get('id', 0))\n\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test delete movies endpoint with unexisting id
def test_delete_movie(self): response = self.client.delete('/movies/0') body = json.loads(response.data.decode()) self.assertEqual(response.status_code, 404) self.assertEqual(body['message'], "resource not found")
[ "def test_delete_movies(self):\n response = self.client.delete('/movies/1')\n body = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n self.assertEqual(body['message'], 'Movie Successfully deleted.')", "def test_api_videos_id_delete(self):\n pass...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test post movies endpoint without a title
def test_post_movie(self): body = {"release_date": "2020/06/11"} response = self.client.post('/movies', content_type='application/json', data=json.dumps(body)) body = json.loads(response.data.decode()) self.assertEqu...
[ "def test_post_movies(self):\n body = {\n \"release_date\": \"2020/06/11\",\n \"title\": \"test\"\n }\n response = self.client.post('/movies',\n content_type='application/json',\n data=json.dumps(body))\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Disables secure boot on node, if secure boot is enabled on node. This method checks if secure boot is enabled on node. If enabled, it disables same and returns True.
def _disable_secure_boot(task): cur_sec_state = False try: cur_sec_state = sdflex_common.get_secure_boot_mode(task) except exception.SDFlexOperationNotSupported: LOG.debug('Secure boot mode is not supported for node %s', task.node.uuid) else: if cur_sec_state: ...
[ "def disable_secure_boot_if_supported(task):\n try:\n sdflex_common.update_secure_boot_mode(task, False)\n # We need to handle SDFlexOperationNotSupported exception so that if\n # the user has incorrectly specified the Node capability\n # 'secure_boot' to a node that does not have that capability...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if directed lanboot is requested
def is_directed_lanboot_requested(node): directed_lanboot_requested = ( str(node.driver_info.get('enable_directed_lanboot', 'false')).lower()) return directed_lanboot_requested == 'true'
[ "def has_lldp_cli(self):\n cmd = self.cli(\"show config dynamic\", ignore_errors=True)\n return \"enable lldp\" in cmd", "def is_lcd_reachable():\n\n response = requests.get(NODE_INFO_ENDPOINT)\n return True if response.status_code == 200 else False", "def iswitch_initialized():\n return ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Disables secure boot on node, does not throw if its not supported.
def disable_secure_boot_if_supported(task): try: sdflex_common.update_secure_boot_mode(task, False) # We need to handle SDFlexOperationNotSupported exception so that if # the user has incorrectly specified the Node capability # 'secure_boot' to a node that does not have that capability and #...
[ "def _disable_secure_boot(task):\n cur_sec_state = False\n try:\n cur_sec_state = sdflex_common.get_secure_boot_mode(task)\n except exception.SDFlexOperationNotSupported:\n LOG.debug('Secure boot mode is not supported for node %s',\n task.node.uuid)\n else:\n if cur...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prepares the boot of Ironic ramdisk using PXE. This method prepares the boot of the deploy or rescue ramdisk after reading relevant information from the node's driver_info and instance_info.
def prepare_ramdisk(self, task, ramdisk_params): if task.node.provision_state in (states.DEPLOYING, states.RESCUING, states.CLEANING, states.INSPECTING): prepare_node_for_deploy(task) if not http_utils.is_http_boot_requested(task.node): su...
[ "def prepare_ramdisk(self, task, ramdisk_params):\n node = task.node\n\n # Label indicating a deploy or rescue operation being carried out on\n # the node, 'deploy' or 'rescue'. Unless the node is in a rescue like\n # state, the mode is set to 'deploy', indicating deploy operation is\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prepares the boot of instance. This method prepares the boot of the instance after reading relevant information from the node's instance_info. In case of UEFI HTTP Boot, it switches to UEFI HTTP config. In case of localboot, it cleans up the PXE config. In case of 'boot from volume', it updates the iSCSI info onto SDFl...
def prepare_instance(self, task): # Need to enable secure boot, if being requested. # update_secure_boot_mode checks and enables secure boot only if the # deploy has requested secure boot boot_option = deploy_utils.get_boot_option(task.node) if boot_option != "kickstart": ...
[ "def prepare_instance(self, task):\n boot_mode_utils.sync_boot_mode(task)\n node = task.node\n boot_device = None\n boot_option = deploy_utils.get_boot_option(node)\n if boot_option != \"kickstart\":\n boot_mode_utils.configure_secure_boot_if_needed(task)\n\n ins...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cleans up the boot of instance. This method cleans up the PXE / HTTP environment that was setup for booting the instance. It unlinks the instance kernel/ramdisk in the node's directory in tftproot / httproot and removes it's PXE config / HTTP config. In case of Directed LAN Boot / UEFI HTTP Boot BIOS setting are reset....
def clean_up_instance(self, task): manager_utils.node_power_action(task, states.POWER_OFF) disable_secure_boot_if_supported(task) node = task.node if (is_directed_lanboot_requested(node) or http_utils.is_http_boot_requested(node)): # In this cleaning step it ...
[ "def clean_up_instance(self, task):\n manager_utils.node_power_action(task, states.POWER_OFF)\n disable_secure_boot_if_supported(task)\n\n node = task.node\n\n sdflex_common.reset_network_setting_dhcpless_boot(node)\n image_utils.cleanup_iso_image(task)", "def wipe_puppet(self):...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize the SdflexRedfish Virtual Media Boot interface.
def __init__(self): super(SdflexRedfishVirtualMediaBoot, self).__init__() if not sushy: raise ironic_exception.DriverLoadError( driver='sdfelx-redfish', reason=_('Unable to import the sushy library'))
[ "def init(verbose):\n\n\tif verbose:\n\t\tlog.basicConfig(format=\"%(levelname)s: %(message)s\", level=log.DEBUG)\n\telse:\n\t\tlog.basicConfig(format=\"%(levelname)s: %(message)s\")\n\n\tlog.info(\"Initializing SmartSpa subsystems.\")\n\n\tglobal real_time_config\n\treal_time_config = Config(\"real_time\")\n\n\tin...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the boot iso image name for a given node.
def _get_iso_image_name(node): return "boot-%s.iso" % node.uuid
[ "def get_name() -> str:\n return platform.node()", "def get_img_name(self):\n\n name = self.img\n idx = name.rindex(\".\")\n return name[:idx]", "def get_img_name(shortcut):\n name = \"Chess_{}{}t45.svg\".format(shortcut.lower(),\n \"l\" if sho...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }