query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Get the points to estimate head pose sideways
def head_pose_points(image, rotation_vector, translation_vector, camera_matrix): rear_size = 1 rear_depth = 0 front_size = image.shape[1] front_depth = front_size*2 val = [rear_size, rear_depth, front_size, front_depth] point_2d = get_2d_points(image, rotation_vector, translation_vector, camera_...
[ "def get_pose(self):\n\n (success, rvec, tvec) = cv2.solvePnP(self.model_points, self.image_points,\n self.camera_matrix, self.dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE)\n '''\n The OpenCV Solve PnP method computes the rotation and translation vectors...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of capabilities for the given user.
def getCapabilities4User(session_key, user=None): roles = [] capabilities = [] # Get user info if user is not None: logger.debug('Retrieving role(s) for current user: %s', user) userEntities = entity.getEntities('authentication/users/%s' % user, count=-1, sessio...
[ "def capabilities(self):\n return []", "def capabilities(self) -> Mapping[str, str]:\n return pulumi.get(self, \"capabilities\")", "def get_capabilities(self) -> List[str]:\n return list(self._get_controller().capabilities)", "def capabilities(self) -> Sequence['outputs.SkuCapabilityRespo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of the review statuses in dictionary with the key set to the label (or stanza, if label is undefined).
def refreshStatusLabelMap(self, session_key, force_refresh=False): if force_refresh or self.status_label_map is None: logger.debug("Reloading the review statuses list") reviewStatusesEntities = entity.getEntities('alerts/reviewstatuses', count=-1, sessionKey=session_key) self...
[ "def filter_labels(self, labels, status=None):\n labels_f = []\n if status is not None and isinstance(status, list):\n for l in labels:\n if \"label_status\" not in l.keys():\n print(l)\n elif l[\"label_status\"] in status:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the urgency override state.
def isUrgencyOverrideAllowed(session_key): notable_en = entity.getEntity(NotableEventUpdate.LOG_REVIEW_REST_URL, 'notable_editing', namespace=NotableEventUpdate.DEFAULT_NAMESPACE, owner=NotableEvent...
[ "def compliance_state(self):\n return self._compliance_state", "def compliance_state(self) -> str:\n return pulumi.get(self, \"compliance_state\")", "def get_flux_urgency(cls, urgency) -> int:\n raise NotImplementedError()", "def priority(self, state):\r\n return -1 * self.heuristi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the length of the comment required.
def commentLengthRequired(session_key): comment_en = entity.getEntity(NotableEventUpdate.LOG_REVIEW_REST_URL, 'comment', namespace=NotableEventUpdate.DEFAULT_NAMESPACE, owner=NotableEventUpdate.DEFA...
[ "def num_comments(self):\n return len(self.comments)", "def comment_count(self):\n return self._comment_count", "def comments_count(self) -> int:\n return pulumi.get(self, \"comments_count\")", "def get_comments_num(self): \n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the status ID of the default systemwide review status.
def getDefaultStatus(session_key): # Get the list of statuses logger.debug("Getting the default status") statuses_list = entity.getEntities(NotableEventUpdate.REVIEW_STATUSES_REST_URL, namespace=NotableEventUpdate.DEFAULT_NAMESPACE, ...
[ "def get_default_status(self):\n return self.bot_data_file[\"bot_status\"][\"defaultStatus\"]", "def get_default_invoice_status(self) -> str:\n return InvoiceStatus.APPROVED.value", "def review_status(self) -> str:\n if len(self.source_code_comments) == 1:\n return self.source_co...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Refresh the list of correlation searches from splunkd via REST.
def refreshCorrelationSearches(self, session_key): logger.debug("Reloading the correlation searches") self.correlation_searches = entity.getEntities('alerts/correlationsearches', count=-1, sessionKey=session_key) self.correlation_search_info = {k: {'rule_name': v['rule_name'], 'default_status': ...
[ "def refresh_my_clients(self):\n\n try:\n client_search_response = self.clients.get_clients(page_size=1000)\n self.my_clients = client_search_response['_embedded']['clients']\n except (RequestFailed, StatusCodeError, MaxRetryError):\n raise", "def refresh(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create an audit record for a list of updated events.
def create_audit_records(self, status_records, session_key): uri = '/services/receivers/simple' getargs = {'index': '_audit', 'sourcetype': 'incident_review', 'output_mode': 'json'} # Double list-comprehension: # a. Comma-separate the fields in each record, replacing "None" with the ...
[ "def update_events_in_database(self):\n for i in range(0, len(self._event_id_list), 1):\n e_id = self._event_id_list[i] # DB ID\n e_ind = self._event_index_list[i] # Index of the event\n e_db = Event.objects.get(id=e_id) # Event as stored in the DB\n e_db...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the search results for the given search ID.
def getSearchResults(searchID, session_key): job = splunk.search.getJob(searchID, sessionKey=session_key) if not job.isDone: raise SearchNotDoneException("Search is not done, search must be completed before results can be processed") if job.reportSearch: logger.warn("T...
[ "async def retrieve_search_results(self):\n return await self.retrieve_ranked_content_hashes(self.search_results_key)", "async def get_search_results(search_string: str):\n database = get_db()\n result = []\n search_string = search_string.lower()\n search_strings = search_utils.preprocess_searc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the status of the events that match a search with the given ID.
def setStatusBySearchID(self, searchID, urgency, status, comment, newOwner, reviewTime, capabilities, session_key, currentUser=None, force_refresh=False, rule_ids_to_change=None, existing_statuses=None): # This class instance will record the number of events successfully changed status_change_meta = Lo...
[ "def update_status(self, id, status):\n sql = f\"UPDATE incidences SET status = \\'{status}\\'\\\n WHERE incidences.id = {id}\"\n conn = Db().con\n curr = conn.cursor()\n curr.execute(sql)\n conn.commit()", "def set_match_id(match_id):\n conn = get_connect()\n conn...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the status of the events with the given rule IDs
def setStatusByIDs(self, rule_ids, urgency, status, comment, newOwner, reviewTime, session_key, currentUser=None, existing_statuses=None): # This class provides information on the operations performed status_change_meta = LogReviewStatusChanges() # Make sure the comment is the minimum length (...
[ "def set_rules(rules):\n\n global _rules\n\n _rules = rules", "def update_rules(dataset: str, rules: List[Rule]):\n rules = [rule._convert_to_labeling_rule() for rule in rules]\n api.active_api().update_dataset_labeling_rules(dataset, rules)", "async def set_rules(self, ctx: discord.ext.commands.con...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates 1/ from fit data f and possibly stretch exponent s, with errors if given
def calculate_f(f, s = None, f_err = None, s_err = None, scale = 1000): if s is None: return f, f_err else: f0 = f * s / gamma(1./s) if (f_err is not None) and (s_err is not None): sigma = np.sqrt(f_err ** 2 + ((s + polygamma(0, 1/s))/s/gamma(1/s)* s_err)**2) else: ...
[ "def ForceFitPowerlaw(p0, f, x, model='h'):\n hertz = ['h', 'H', 'hertz', 'Hertz']\n sneddon = ['s', 'S', 'sneddon', 'Sneddon']\n if model in hertz:\n model = 3./2\n def erf(p, f, x, model):\n return f - p[0]*np.power(x,model)\n elif model in sneddon:\n model = 2.\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Process detectors from a stream of text data
def readTextStream( self, stream, sourcename=None, postcheck=True, strict=True ): if not isinstance(stream, io.TextIOBase): raise TypeError("Stream is not a source of text data") elif not stream.readable(): raise AttributeError("Stream is not readable") detec...
[ "def run(self, stream):\n \n # Run any preparations that need to exist before the handler runs\n self.prepare()\n \n for line in stream:\n if self.terminate_early:\n break \n\n line = line.rstrip('\\n')\n \n if line...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return workflow history of this context, for all workflows in its chain. Taken from plone_scripts/getWorkflowHistory.py
def workflowHistory(self, complete=True): context = aq_inner(self.context) # Since switching to DCWorkflow's getInfoFor, we rely on its # permission checks. #if not (_checkPermission('Request review', context) or # _checkPermission('Review portal content', context)): #...
[ "async def get_workflow_execution_history(\n self, *, request: GetWorkflowExecutionHistoryRequest\n ) -> GetWorkflowExecutionHistoryResponse:\n\n return await self._unary_unary(\n \"/temporal.api.workflowservice.v1.WorkflowService/GetWorkflowExecutionHistory\",\n request,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build a URI template using the url_key and constants from the API definition found in const.py
def build_uri_template(url_key: str) -> URITemplate: _skeleton = ''.join([API_PATH['base'], API_PATH[url_key]]) _template = URITemplate(_skeleton) return _template
[ "def uri_template(app, **kwargs):\n assert len(kwargs) == 1\n\n endpoint = kwargs.keys()[0]\n parameters = kwargs.values()[0]\n\n for url in app.url_map.iter_rules():\n if url.endpoint == endpoint:\n break\n else:\n return ''\n\n ut = url.rule\n\n for param, replacement...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
/items/{type}/{no}/supersets (see Bricklink API)
def get_supersets(self, itemid: str, itemtypeid: str)->dict: self.__validate(itemid=itemid, itemtype=itemtypeid) url = build_uri_template('get_supersets').expand(type=itemtypeid, no=itemid) logger.info("Getting supersets: {}".format(url)) data = self._get_data(url) return data
[ "def get_subsets(self, itemid: str, itemtypeid: str)->dict:\n self.__validate(itemid=itemid, itemtype=itemtypeid)\n url = build_uri_template('get_subsets').expand(type=itemtypeid, no=itemid)\n logger.info(\"Getting subsets: {}\".format(url))\n data = self._get_data(url)\n return d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This is used to _get a set inventory /items/{type}/{no}/subsets (see Bricklink API)
def get_subsets(self, itemid: str, itemtypeid: str)->dict: self.__validate(itemid=itemid, itemtype=itemtypeid) url = build_uri_template('get_subsets').expand(type=itemtypeid, no=itemid) logger.info("Getting subsets: {}".format(url)) data = self._get_data(url) return data
[ "def get_subsets(self, project):\n serializer = SubsetSerializer(project.subsets.all(), many=True)\n return serializer.data", "def list_subsets(self, workspace_unique_id=None, user_id=None, request=None):\n# print('list_subsets_request', request)\n subset_list = []\n# subset_uui...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generic function to extract a list from a binary file.
def read_list_bin(file_name): try: extracted_list = [] with open(file_name, "rb") as binary_file: extracted_list = pickle.load(binary_file) return extracted_list except FileNotFoundError: print("File not found: ",file_name) except Exception as e: print(typ...
[ "def load_list_from_file(filepath):\n listdump = []\n with open(filepath, 'rb') as infile:\n listdump = pickle.load(infile)\n \n return listdump", "def load_list(list):\n with open(\"../data/{}.txt\".format(list), \"rb\") as file:\n the_list = pickle.load(file)\n return the_lis...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generic function to write a list to a binary file (replace content).
def write_list_bin(inserted_list, file_name): try: with open(file_name, "wb") as binary_file: pickle.dump(inserted_list, binary_file) except Exception as e: print(type(e), e) sys.exit()
[ "def list_to_file(path, the_list):\n content = '\\n'.join(the_list)\n\n with open(file_path(path), 'w', encoding='utf-8') as f:\n f.write(content)", "def write_list_to_file(filepath, listdump):\n if filepath:\n os.makedirs(os.path.dirname(filepath), exist_ok=True)\n \n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generic function to check if an index already exists in a list of AutoBaseObject.
def index_already_there(index, given_list): # check if ID already exists already_there = False if len(given_list)>0: for item in given_list: if isinstance(item, AutoBaseObject): if item.ID == index: already_there = True break ...
[ "def __contains__(self, item: Any) -> bool:\n return item in self.item_to_index", "def has(self, index):\n raise NotImplementedError()", "def _check_indexes(cls, document: dict) -> bool:\n criteria = [\n field_name\n for field_name in cls._get_index_fields(IndexType.Ot...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generic function to get an indexed entry from a list of AutoBaseObject.
def get_indexed_item_from_list(index, given_list): returned_item = None if len(given_list)>0: for item in given_list: if isinstance(item, AutoBaseObject): if item.ID == index: returned_item = item break else: ...
[ "def __getitem__(self, indx):\n\n return self._lEntities[indx]", "def find_index(self, obj):\n return self.model.indexlist[obj]", "def __getitem__(self, uid):\n return self.entities[uid]", "def __getitem__(self, index) -> 'DXFEntity':\n return self.entities[index]", "def item(sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generic function to get an indexed entry from a list of AutoBaseObject stored in a binary file.
def get_indexed_item_from_file(index, file_name): list_in_file = read_list_bin(file_name) return get_indexed_item_from_list(index, list_in_file)
[ "def get_indexed_item_from_list(index, given_list):\n\n returned_item = None\n\n if len(given_list)>0:\n for item in given_list:\n if isinstance(item, AutoBaseObject):\n if item.ID == index:\n returned_item = item\n break\n else...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run currently selected test code. Common code runs here, specific code is invoked through test_code_list and test_code_ID. Optional parameters can be passed if needed (unnamed or named), interpreted accordingly by selected test code.
def run_test_code(self, *test_code_args, **test_code_kwargs): try: # here, trigger start code from challenge def (to simulate VM failure), manage Recovery time measurement, # specific monitoring of VNF, trigger stop code from challenge def time1 = datetime.now() # get time ...
[ "def test_code001(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code001 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')", "def test_code010(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code010 from TestDefinition #\", self.ID...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case code number 001.
def test_code001(self, *test_code_args, **test_code_kwargs): print("This is test_code001 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
[ "def test_code010(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code010 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')", "def test_code009(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code009 from TestDefinition #\", self.ID...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case code number 003.
def test_code003(self, *test_code_args, **test_code_kwargs): print("This is test_code003 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
[ "def test_code008(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code008 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')", "def test_code009(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code009 from TestDefinition #\", self.ID...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case code number 004.
def test_code004(self, *test_code_args, **test_code_kwargs): print("This is test_code004 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
[ "def test_code010(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code010 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')", "def test_code009(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code009 from TestDefinition #\", self.ID...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case code number 006.
def test_code006(self, *test_code_args, **test_code_kwargs): print("This is test_code006 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
[ "def test_code010(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code010 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')", "def test_code008(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code008 from TestDefinition #\", self.ID...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case code number 007.
def test_code007(self, *test_code_args, **test_code_kwargs): print("This is test_code007 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
[ "def test_code008(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code008 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')", "def test_code010(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code010 from TestDefinition #\", self.ID...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case code number 008.
def test_code008(self, *test_code_args, **test_code_kwargs): print("This is test_code008 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
[ "def test_code010(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code010 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')", "def test_code009(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code009 from TestDefinition #\", self.ID...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case code number 009.
def test_code009(self, *test_code_args, **test_code_kwargs): print("This is test_code009 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
[ "def test_code010(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code010 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')", "def test_code008(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code008 from TestDefinition #\", self.ID...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case code number 010.
def test_code010(self, *test_code_args, **test_code_kwargs): print("This is test_code010 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='')
[ "def test_code009(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code009 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')", "def test_code008(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code008 from TestDefinition #\", self.ID...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to initialize test definition data.
def init_test_definitions(): test_definitions = [] # add info to list in memory, one by one, following signature values test_def_ID = 5 test_def_name = "VM failure impact on virtual firewall (vFW VNF)" test_def_challengeDefID = 5 test_def_testCaseID = 5 test_def_VNFIDs = [1] test_def_as...
[ "def setUpTestData(cls):\n pass", "def make_test_data(self):\n import data", "def test_init(self):\n pass", "def test_init(self, testdata: TestData) -> None:\n for data in testdata['observation_type']:\n observation_type = ObservationType(**data)\n for key, va...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run currently selected challenge code, start portion. Optional parameters can be passed if needed (unnamed or named), interpreted accordingly by selected test code.
def run_start_challenge_code(self, *chall_code_args, **chall_code_kwargs): try: code_index = self.challenge_code_ID - 1 # lists are indexed from 0 to N-1 # invoke corresponding start method, via index self.start_challenge_code_list[code_index](*chall_code_args, **chall_code...
[ "def start_challenge_code001(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code001 from ChallengeDefinition #\",self.ID, sep='')", "def start_challenge_code010(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code010 from ChallengeDefi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run currently selected challenge code, stop portion. Optional parameters can be passed if needed (unnamed or named), interpreted accordingly by selected test code.
def run_stop_challenge_code(self, *chall_code_args, **chall_code_kwargs): try: code_index = self.challenge_code_ID - 1 # lists are indexed from 0 to N-1 # invoke corresponding stop method, via index self.stop_challenge_code_list[code_index](*chall_code_args, **chall_code_kwa...
[ "def run_test_code(self, *test_code_args, **test_code_kwargs):\n try:\n # here, trigger start code from challenge def (to simulate VM failure), manage Recovery time measurement,\n # specific monitoring of VNF, trigger stop code from challenge def\n\n time1 = datetime.now() #...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Start Challenge code number 001.
def start_challenge_code001(self, *chall_code_args, **chall_code_kwargs): print("This is start_challenge_code001 from ChallengeDefinition #",self.ID, sep='')
[ "def start_challenge_code010(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code010 from ChallengeDefinition #\",self.ID, sep='')", "def start_challenge_code009(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code009 from ChallengeDefi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Stop Challenge code number 001.
def stop_challenge_code001(self, *chall_code_args, **chall_code_kwargs): print("This is stop_challenge_code001 from ChallengeDefinition #",self.ID, sep='')
[ "def stop_challenge_code010(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code010 from ChallengeDefinition #\",self.ID, sep='')", "def stop_challenge_code009(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code009 from ChallengeDefiniti...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Start Challenge code number 004.
def start_challenge_code004(self, *chall_code_args, **chall_code_kwargs): print("This is start_challenge_code004 from ChallengeDefinition #",self.ID, sep='')
[ "def start_challenge_code009(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code009 from ChallengeDefinition #\",self.ID, sep='')", "def start_challenge_code001(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code001 from ChallengeDefi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Stop Challenge code number 004.
def stop_challenge_code004(self, *chall_code_args, **chall_code_kwargs): print("This is stop_challenge_code004 from ChallengeDefinition #",self.ID, sep='')
[ "def stop_challenge_code010(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code010 from ChallengeDefinition #\",self.ID, sep='')", "def stop_challenge_code009(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code009 from ChallengeDefiniti...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Start Challenge code number 006.
def start_challenge_code006(self, *chall_code_args, **chall_code_kwargs): print("This is start_challenge_code006 from ChallengeDefinition #",self.ID, sep='')
[ "def start_challenge_code010(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code010 from ChallengeDefinition #\",self.ID, sep='')", "def start_challenge_code008(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code008 from ChallengeDefi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Stop Challenge code number 006.
def stop_challenge_code006(self, *chall_code_args, **chall_code_kwargs): print("This is stop_challenge_code006 from ChallengeDefinition #",self.ID, sep='')
[ "def stop_challenge_code008(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code008 from ChallengeDefinition #\",self.ID, sep='')", "def stop_challenge_code010(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code010 from ChallengeDefiniti...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Start Challenge code number 008.
def start_challenge_code008(self, *chall_code_args, **chall_code_kwargs): print("This is start_challenge_code008 from ChallengeDefinition #",self.ID, sep='')
[ "def start_challenge_code010(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code010 from ChallengeDefinition #\",self.ID, sep='')", "def start_challenge_code009(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code009 from ChallengeDefi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Stop Challenge code number 008.
def stop_challenge_code008(self, *chall_code_args, **chall_code_kwargs): print("This is stop_challenge_code008 from ChallengeDefinition #",self.ID, sep='')
[ "def stop_challenge_code010(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code010 from ChallengeDefinition #\",self.ID, sep='')", "def stop_challenge_code009(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code009 from ChallengeDefiniti...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Start Challenge code number 009.
def start_challenge_code009(self, *chall_code_args, **chall_code_kwargs): print("This is start_challenge_code009 from ChallengeDefinition #",self.ID, sep='')
[ "def start_challenge_code010(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code010 from ChallengeDefinition #\",self.ID, sep='')", "def start_challenge_code001(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code001 from ChallengeDefi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Stop Challenge code number 009.
def stop_challenge_code009(self, *chall_code_args, **chall_code_kwargs): print("This is stop_challenge_code009 from ChallengeDefinition #",self.ID, sep='')
[ "def stop_challenge_code010(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code010 from ChallengeDefinition #\",self.ID, sep='')", "def stop_challenge_code008(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code008 from ChallengeDefiniti...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Start Challenge code number 010.
def start_challenge_code010(self, *chall_code_args, **chall_code_kwargs): print("This is start_challenge_code010 from ChallengeDefinition #",self.ID, sep='')
[ "def start_challenge_code009(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code009 from ChallengeDefinition #\",self.ID, sep='')", "def start_challenge_code001(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code001 from ChallengeDefi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Stop Challenge code number 010.
def stop_challenge_code010(self, *chall_code_args, **chall_code_kwargs): print("This is stop_challenge_code010 from ChallengeDefinition #",self.ID, sep='')
[ "def stop_challenge_code009(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code009 from ChallengeDefinition #\",self.ID, sep='')", "def stop_challenge_code008(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code008 from ChallengeDefiniti...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to initialize challenge definition data.
def init_challenge_definitions(): challenge_defs = [] # add info to list in memory, one by one, following signature values chall_def_ID = 5 chall_def_name = "VM failure" chall_def_challengeType = ChallengeType.CLOUD_COMPUTE_FAILURE chall_def_recipientID = 1 chall_def_impactedCloudResourcesI...
[ "def initializeData(self, problem, random, structure):\n raise NotImplementedError", "def initialize_data():\n description = \"This is a platform to openly collaborate with others to \" \\\n \"build and launch a startup. We have rethought the way \" \\\n \"companies are...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to initialize metric definition data.
def init_metric_definitions(): metric_definitions = [] # add info to list in memory, one by one, following signature values metric_def_ID = 1 metric_def_name = "Recovery Time" metric_def_info = "Measures time taken by ONAP to restore a VNF" metric_definitions.append(RecoveryTimeDef(metric_def_I...
[ "def init_metrics(self):\n raise NotImplementedError()", "def _metric_init(self): # pragma: no cover\n raise NotImplementedError('_metric_init() must be implemented by %r' % self)", "def init_metrics(self):\n\n self.metrics = {}\n\n self.metrics['train_loss'] = np.zeros(0)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to initialize physical resource data.
def init_physical_resources(): test_physical_resources = [] # add info to list in memory, one by one, following signature values phys_resrc_ID = 1 phys_resrc_name = "small-cavium-1" phys_resrc_info = "Jump server in Arm pod, 48 cores, 64G RAM, 447G SSD, aarch64 Cavium ThunderX, Ubuntu OS" phys_...
[ "def __init__(self, addr: DataBaseAddress,\n initial_resource_list: Optional[List[BellResource]]=None) -> None:\n super().__init__(addr)\n for r in (initial_resource_list or []):\n self.create_resource(r)", "def __init__(self, resource_type_dict):\n # name\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to initialize cloud virtual resource data.
def init_cloud_virtual_resources(): test_cldvirt_resources = [] # add info to list in memory, one by one, following signature values cldvirtres_ID = 1 cldvirtres_name = "nova-compute-1" cldvirtres_info = "nova VM in Arm pod" cldvirtres_IPAddress = "50.60.70.80" cldvirtres_URL = "http://50.6...
[ "def __init__(__self__,\n resource_name: str,\n args: VirtualHardDiskArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def pre_virtual_machine_create(self, resource_dict):\n pass", "def init(self):\n self.caller_id = MasterUser....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to initialize VNFs and e2e Services data.
def init_VNFs_Services(): test_VNFs_Services = [] # add info to list in memory, one by one, following signature values vnf_serv_ID = 1 vnf_serv_name = "vCPE-1" vnf_serv_info = "virtual CPE in Arm pod" vnf_serv_IPAddress = "5.4.3.2" vnf_serv_URL = "http://5.4.3.2:8080" vnf_serv_related_p...
[ "def _init_services(self) -> None:\n pass", "def initializeSolutionVectors(self, runtimeMemoryManager, solutionDataFile):\n\n \"\"\" Initialize Solution Vectors \"\"\"\n\n print(\"\"\" Initialize Solution Vectors \"\"\")\n\n self.dsetGroup = solutionDataFile.create_group('Heart')\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Append an object to a list of strings and adds a timestamp.
def append_to_list(self, string_to_append): if type(string_to_append)==str: current_time = datetime.now() self.__string_list.append(string_to_append) self.__timestamp_list.append(current_time) # timestamp will have the same index as string else: print("ap...
[ "def add(self, timestamp):\n self.total_count += 1\n self.times.append(timestamp)", "def append(self, timedelta, message):\n try:\n timedelta_list = self[timedelta]\n except KeyError:\n self[timedelta] = []\n timedelta_list = self[timedelta]\n ti...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return a list of strings with timestamps as prefixes (not showing microseconds).
def get_timestamped_strings(self): ret_list = [] i = 0 while i < len(self.__string_list): ret_list.append(self.__timestamp_list[i].strftime("%Y-%m-%d %H:%M:%S")+" "+self.__string_list[i]) i += 1 return ret_list
[ "def get_date_time_strings(self) -> List[str]:\n return_list: List[str] = []\n for occ in self.occurrences:\n s: str = Event.occurrence_to_string(occ)\n return_list.append(s)\n return return_list", "def get_timestamped_metric_values_as_strings(self):\n ret_list = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generic function to dump all Challenge Execution data in a CSV file.
def write_to_csv(self): dump_list = [] # add rows one by one, each as a list, even if only 1 element dump_list.append(["challenge execution ID",self.ID]) dump_list.append(["challenge execution name",self.name]) dump_list.append(["challenge definition ID",self.challenge_def_ID...
[ "def output_ctfd_csv(challenges: List[Challenge]): # TODO: unused\n chal_file = open(\"challenges.csv\", \"w\")\n chal_file.write(f\"id,name,description,max_attempts,value,category,type,state,requirements\\n\")\n for challenge in challenges:\n chal_file.write(repr(challenge) + \"\\n\")\n chal_fil...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Append a metric value (MetricValue) to the list. MetricValue already has a timestamp attribute.
def append_to_list(self, metric_value_to_append): if type(metric_value_to_append)==MetricValue: self.__metric_value_list.append(metric_value_to_append) else: print("appended object must be a MetricValue, metric_value_to_append=",metric_value_to_append) sys.exit() # s...
[ "def _add_single_metric(self, timestamp, metric_name, value):\n # note that this method is built this way to make it possible to\n # support live-refreshing charts in Bokeh at some point in the future.\n self._data[\"timestamp\"].append(timestamp)\n self._data[\"metric_name\"].append(met...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of strings with metric values and timestamps as prefixes (not showing microseconds). Also show the metric def ID in parentheses.
def get_timestamped_metric_values_as_strings(self): ret_list = [] i = 0 while i < len(self.__metric_value_list): ret_list.append(self.__metric_value_list[i].timestamp.strftime("%Y-%m-%d %H:%M:%S") + " " + str(self.__metric_value_list[i].value) + ...
[ "def metrics_names(self):\n return []", "def metric_names():\n return ['total', 'exe', 'fetch']", "def _gen_metrics(metrics:[(str, float)]) -> str:\n metrics = tuple(metrics)\n metric_name_size = max((len(m) for m, _ in metrics))\n for metric, value in metrics:\n if isinstance(valu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generic function to dump all Test Execution data in a CSV file.
def write_to_csv(self): dump_list = [] # add rows one by one, each as a list, even if only 1 element dump_list.append(["test execution ID",self.ID]) dump_list.append(["test execution name",self.name]) dump_list.append(["test definition ID",self.test_def_ID]) test_def_...
[ "def Dump():\n with open(path.join(MAIN_PATH, INST), \"wb\") as f:\n writer = csv.writer(f, delimiter=\",\")\n\n for inst in instances:\n writer.writerow(inst)\n \n with open(path.join(MAIN_PATH, \"test_instances.csv\"), \"wb\") as f:\n writer = csv.writer(f, delimit...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Save the codes and configuration file. During the training, we may modify the codes. It will be problematic when we try to extract embeddings using the old model and the new code. So we save the codes when we train the model and use the saved codes to extract embeddings.
def save_codes_and_config(cont, model, config): if cont: # If we want to continue the model training, we need to check the existence of the checkpoint. if not os.path.isdir(os.path.join(model, "nnet")) or not os.path.isdir(os.path.join(model, "codes")): sys.exit("To continue training the...
[ "def save(self):\n if self._cache_file:\n self._logger.info(f\"Saving calculated embeddings\")\n with open(self._cache_file, \"wb\") as out_file:\n pickle.dump(self._embeddings, out_file)", "def save_embeddings(self):\n output = open(PREVIOUSLY_USED_CHAR_EMBEDDIN...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the pretrained model and copy to the target model as the initial version.
def get_pretrain_model(pretrain_model, target_model, checkpoint='-1'): if not os.path.isfile(os.path.join(pretrain_model, "checkpoint")): sys.exit("[ERROR] Cannot find checkpoint in %s." % pretrain_model) ckpt = tf.train.get_checkpoint_state(pretrain_model) model_checkpoint_path = ckpt.model_checkp...
[ "def get_pretrained_model(self, destination):\n url = 'http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_coco_2018_01_28.tar.gz'\n os.system(\"curl -o ssd_mobilenet_v1_coco_2018_01_28.tar.gz {0}\".format(url))\n with tarfile.open(\"ssd_mobilenet_v1_coco_2018_01_28.tar.gz\"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load learning rate from a saved file
def load_lr(filename): learning_rate_array = [] with open(filename, "r") as f: for line in f.readlines(): _, lr = line.strip().split(" ") learning_rate_array.append(float(lr)) return learning_rate_array
[ "def load_weights(self, filename):", "def loadweights(self, filename):", "def load(self):\n latest = tf.train.latest_checkpoint(self.checkpoint_dir)\n self.model.load_weights(latest)", "def init_lr(self):\n if isinstance(self.config.train.lr, str):\n self.learning_rate = 0.02\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load valid loss from a saved file
def load_valid_loss(filename): min_loss = ValidLoss() with open(filename, "r") as f: for line in f.readlines(): epoch, loss = line.strip().split(" ")[:2] epoch = int(epoch) loss = float(loss) if loss < min_loss.min_loss: min_loss.min_loss =...
[ "def load_good(self):\n try:\n good = pu.good_model_file_by_loss(self.get('model_dir'))\n self.load(good)\n except:\n pass # Before a model has been saved, this would raise an exception", "def load_loss() :\n#{{{\n assert settings.RANK == 0\n\n try :\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute pairwise EER using cosine similarity. The EER is estimated by interp1d and brentq, so it is not the exact value and may be a little different each time.
def compute_cos_pairwise_eer(embeddings, labels, max_num_embeddings=1000): embeddings /= np.sqrt(np.sum(embeddings ** 2, axis=1, keepdims=True) + 1e-12) num_embeddings = embeddings.shape[0] if num_embeddings > max_num_embeddings: # Downsample the embeddings and labels step = num_embeddings /...
[ "def compute_EER(self, FAR, FRR):\r\n print('Computing EER')\r\n distance = abs(FAR - FRR)\r\n min_distance = min(distance)\r\n idx = np.where(distance == min_distance)\r\n return np.mean((FAR[idx] + FRR[idx]) / 2)", "def compute_cos_pairwise_eer(embeddings, labels, max_num_embe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check whether part of the string s appears in the list.
def substring_in_list(s, varlist): if varlist is None: return False is_sub = False for v in varlist: if v in s: is_sub = True break return is_sub
[ "def list_in_string(self, stringlist, string):\n for lstring in stringlist:\n if lstring in string:\n return True\n return False", "def nameContains(self, s):\n name = self.name.lower()\n if isinstance(s, list):\n return any(n.lower() in name for n in s)\n else:\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a summary for activations given the endpoints.
def activation_summaries(endpoints): sum = [] with tf.name_scope('summaries'): for act in endpoints.values(): tensor_name = act.op.name sum.append(tf.summary.histogram(tensor_name + '/activations', act)) # sum.append(tf.summary.scalar(tensor_name + '/sparsity', tf.nn....
[ "def _activation_summary(x):\n # session. This helps the clarity of presentation on tensorboard.\n tf.summary.histogram(x.op.name + '/activations', x)\n tf.summary.scalar(x.op.name + '/sparsity', tf.nn.zero_fraction(x))", "def _activation_summary(x):\n # Remove 'tower_[0-9]/' from the name in case thi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Executes SSM document for given document name and input parameters.
def execute(self, document_name, input_params): if self._document_exists(document_name): self.logger.info("Executing SSM document [%s] with parameters: [%s]", document_name, input_params) # Executing SSM document execution_id = self.ssm_client.start_automation_execution( ...
[ "def execute_ssm_automation(ssm_document, ssm_document_name, cfn_output_params, cfn_installed_alarms, ssm_test_cache,\n ssm_input_parameters):\n parameters = ssm_document.parse_input_parameters(cfn_output_params, cfn_installed_alarms, ssm_test_cache,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns SSM document final execution status, if status is in PROGRESS/PENDING it will wait till SSM document execution will be completed.
def wait_for_execution_completion(self, execution_id, document_name=None): # Fetch ssm execution status status = self._get_execution_status(execution_id, document_name) # Wait for execution to be completed while status == 'InProgress' or status == 'Pending' or status == 'Cancelling' or ...
[ "def _get_execution_status(self, execution_id, document_name=None):\n execution = self.ssm_client.get_automation_execution(\n AutomationExecutionId=execution_id\n )\n # TODO(semiond): we can remove document name as parameter, can take it by execution id.\n document_name = docu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns SSM document step output for given execution id, step name and output key.
def get_step_output(self, execution_id, step_name, output_key): execution = self.ssm_client.get_automation_execution( AutomationExecutionId=execution_id ) step_executions = execution['AutomationExecution']['StepExecutions'] step = self._get_step_by_name(step_executions, step_...
[ "def _extract_output(self, output, step, task, ret, task_output):\n pass", "def _get_step_output_uri(self, step):\n # parse in reverse order, in case there are multiple -output args\n args = step.args()\n for i, arg in reversed(list(enumerate(args[:-1]))):\n if arg == '-outp...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cancels SSM document execution in waits till 'TriggerRollback' step triggered SSM execution is completed.
def cancel_execution_with_rollback(self, execution_id: str): execution_url = self.get_execution_url(execution_id) try: self.logger.info("Canceling SSM execution: {}".format(execution_url)) self.ssm_client.stop_automation_execution(AutomationExecutionId=execution_id, Type='Cancel'...
[ "def cancel(self):\n self.session.rollback()", "def test_cancel_scheduled_batch_job(self):\n pass", "def abort_document_runs_on_delete(sender, instance, using, **kwargs):\n if instance.cloudfactorydocumentrun_set.exists():\n tasks = importlib.import_module('myhpom.tasks')\n for ru...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns SSM document execution status for given execution id.
def _get_execution_status(self, execution_id, document_name=None): execution = self.ssm_client.get_automation_execution( AutomationExecutionId=execution_id ) # TODO(semiond): we can remove document name as parameter, can take it by execution id. document_name = document_name ...
[ "def get_workflow_status(self, execution_id):\n workflow_status = self.execution_db.session.query(WorkflowStatus).filter_by(execution_id=execution_id).first()\n if workflow_status:\n return workflow_status.status\n else:\n logger.error(\"Workflow execution id {} does not e...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns execution step status for given execution id and step name.
def _get_execution_step_status(self, execution_id, step_name): execution = self.ssm_client.get_automation_execution( AutomationExecutionId=execution_id ) step_executions = execution['AutomationExecution']['StepExecutions'] step = self._get_step_by_name(step_executions, step_n...
[ "def get_successfully_executed_steps_by_order(self, execution_id):\n execution = self.ssm_client.get_automation_execution(\n AutomationExecutionId=execution_id\n )\n step_executions = execution['AutomationExecution']['StepExecutions']\n step_names: List = []\n if step_e...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns successfully executed steps by order of their execution
def get_successfully_executed_steps_by_order(self, execution_id): execution = self.ssm_client.get_automation_execution( AutomationExecutionId=execution_id ) step_executions = execution['AutomationExecution']['StepExecutions'] step_names: List = [] if step_executions: ...
[ "def executed(self):\n return all(step.achieved for step in self.steps)", "def _validate_execution_order(self, results):\n # Adapted from\n # http://ipython.org/ipython-doc/dev/parallel/dag_dependencies.html\n self._logger.write(\"Validating execution order... \")\n for node in ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns SSM document step by given status.
def _get_step_by_status(self, steps, status): if steps: for s in steps: if s['StepStatus'] == status: return s
[ "def select_step_with_status(status, steps):\n for step in steps:\n assert isinstance(step, model.Step), \"TYPE-MISMATCH: \"+\\\n \"step.class={0}\".format(step.__class__.__name__)\n if step.status == status:\n return step\n # -- OTHERWISE: No step with the give...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns SSM document step by a given name.
def _get_step_by_name(self, steps, step_name): if steps: for s in steps: if s['StepName'] == step_name: return s
[ "def step_by_name( self, name):\n\n# print( \"Looking for {}\".format( name ))\n assert name in self._step_index, \"No step named '{}'\".format( name )\n\n return self._steps[ self._step_index[ name ]]", "def find_step(self, name):\r\n for step_dict in self.all_steps:\r\n i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns True if SSM document for given name exist, False otherwise.
def _document_exists(self, document_name): return len(self.ssm_client.list_document_versions(Name=document_name)['DocumentVersions']) >= 1
[ "def check_if_doc_exists(self, file_name: str):\n return self.es.exists(index=self.index, id=file_name)", "def documentExists(self, id):\n uri = \"/%s/%s\" % (self.name, urllib.quote_plus(id))\n docExists = False\n try:\n self.makeRequest(uri, {}, 'HEAD')\n return...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return ssm document execution step URL.
def get_execution_step_url(self, execution_id: str, step_name: str, steps: [] = None) -> str: if not steps or len(steps) < 1: execution = self.ssm_client.get_automation_execution(AutomationExecutionId=execution_id) steps = execution['AutomationExecution']['StepExecutions'] step ...
[ "def run_url(self) -> str:\n return self._run_url", "def invoke_url(self) -> pulumi.Output[str]:\n return self.stage.invoke_url # type: ignore[no-any-return]", "def run_url(self):\r\n\r\n return self.metadata[\"origin\"][\"runurl\"]", "def runbook_url(self) -> pulumi.Output[Optional[str]...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns SSM document step execution sequence index
def _get_step_execution_index(self, step_executions: [], step_name): index = 1 for step_execution in step_executions: if step_name == step_execution['StepName']: return index index += 1
[ "def get_sequence_index(self):\n\t\treturn call_sdk_function('PrlBootDev_GetSequenceIndex', self.handle)", "def get_current_step_index(self) -> Optional[int]:", "def step_index(self, step):\n return self.steps.index(step)", "def get_step_idx(self, step_id: str) -> int:\n return self.step_id2idx....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Eviction filings broken down into a weekbyweek basis
def weekly(evictiondata): evictions_per_week = {} for index, row in evictiondata.iterrows(): if row['week_date'] not in evictions_per_week.keys(): evictions_per_week[row['week_date']] = row['filings_2020'] else: evictions_per_week[row['week_date']] += row['filings_2...
[ "def weekly():", "def weekly():\n\n response = {}\n\n # 0..6 => Sunday..Saturday\n for i in range(7):\n hours = []\n interactions = 0\n\n for j in range(25):\n try:\n wfile = open(common.stats_path + '/weekly-average/' + str(i) + '/' + str(j))\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Visualizes the week by week eviction data into a graph
def graphify(evictions_per_week): weeks = [] for week in evictions_per_week.keys(): if '2020' in week: weeks.append(week) evictions_filed = [] for week in weeks: evictions_filed.append(evictions_per_week[week]) plt.figure(figsize=(50, 10)) plt.plot(weeks, evi...
[ "def visualize_days():\n\tdata_file = parse(MY_FILE, \",\") #uses the parse function above\n\t#returns a dict where it sums total values for each key\n\t#in this case, keys are dayofweek, values are count of incidents\n\t#count of incidents\n\t\n\tcounter = Counter(item[\"DayOfWeek\"] for item in data_file) #counte...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Graphs the baseline eviction data of 20152016 in the same format
def graph_baseline(evictiondata, weeks): base_evictions_per_week = {} for index, row in evictiondata.iterrows(): if row['week_date'] not in base_evictions_per_week.keys(): base_evictions_per_week[row['week_date']] = row['filings_avg'] elif row['GEOID'] != 'sealed': ...
[ "def cross_analyze(evictions_filed, base_evictions_filed, weeks):\r\n plt.figure(figsize=(50, 10))\r\n plt.plot(weeks, evictions_filed, label = '2020')\r\n plt.plot(weeks, base_evictions_filed, label = '2015-2016')\r\n plt.xlabel('Date', fontsize = 25)\r\n plt.ylabel('Evictions filed', fontsize = 25)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cross analyzes the baseline with 2020's eviction data. NOTE Requires you to run the above functions
def cross_analyze(evictions_filed, base_evictions_filed, weeks): plt.figure(figsize=(50, 10)) plt.plot(weeks, evictions_filed, label = '2020') plt.plot(weeks, base_evictions_filed, label = '2015-2016') plt.xlabel('Date', fontsize = 25) plt.ylabel('Evictions filed', fontsize = 25) plt.title...
[ "def emissions_baseline(self):\n baseline = DataFrame(columns=[\"CO2\", \"NOx\", \"PM10\", \"PM2.5\", \"SO2\"])\n baseline = baseline.append(year_1(self.plant.emissions()))\n baseline = baseline.append(year_1(self.plant.fuel_reseller().emissions()))\n baseline = baseline.append(year_1(se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if a switch exist for device.
def _switch_exist(lge_device: LGEDevice, switch_desc: ThinQSwitchEntityDescription) -> bool: if switch_desc.value_fn is not None: return True feature = switch_desc.key if feature in lge_device.available_features: return True return False
[ "def get_switch(self):\n\n svc = \"urn:upnp-org:serviceId:SwitchPower1\"\n if not svc in self.services:\n raise RuntimeError, \"Device doesn't support the service\"\n \n status = self.get_variable(svc, \"Status\")\n return status == 1", "def is_port_switch_available(s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return True if entity is available.
def available(self) -> bool: is_avail = True if self.entity_description.available_fn is not None: is_avail = self.entity_description.available_fn(self._wrap_device) return self._api.available and is_avail
[ "def available(self) -> bool:\n return super().available and (\n self.coordinator.data.get(self.entity_description.key) is not None\n )", "def _enabled_entity_exists(self) -> bool:\n return self.entity_exists(self._enabled_toggle_entity_id)", "def has_entity(self, entity):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get current switch state
def _get_switch_state(self): if self.entity_description.value_fn is not None: return self.entity_description.value_fn(self._wrap_device) if self._api.state: feature = self.entity_description.key return self._api.state.device_features.get(feature) return None
[ "def getState(self): \n return self.tello.get_current_state()", "def get_current_state(self):\n return self.robot.get_current_state()", "def read_switch(self):\n switch = GPIO.input(SWITCH_PIN)\n\n if (switch == 0):\n switch=1\n else:\n switch=0\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a List of Tensors and returns a List of mask Tensor with 1 if the input was all zeros (on dimension 2) and 0 otherwise. This is used in the Attention layer to mask the padding observations.
def get_zero_entities_mask(entities: List[torch.Tensor]) -> List[torch.Tensor]: with torch.no_grad(): if exporting_to_onnx.is_exporting(): with warnings.catch_warnings(): # We ignore a TracerWarning from PyTorch that warns that doing # shape[n].item() will cause ...
[ "def build_attention_mask(input_ids): \n attention_masks = [] \n\n # 1 for input and 0 for pad\n for seq in input_ids: \n attention_masks.append([float(i>0) for i in seq])\n\n return attention_masks", "def convert_paddings_to_mask(paddings: JTensor,\n dtype: jnp.dtyp...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load the configuration file that manage raw data. conf is a dictionary
def load_config_raw_data(conf): path = Path(conf["conf_raw_data"]) with open(path) as f: txt = f.read() conf = json.loads(txt) return conf
[ "def load_config(self):\n self.data.read(self.path)", "def _load_cfg(self):\n try:\n with open(self._file_path) as conf:\n self._json_cfg = json.load(conf)\n except:\n print('Failed to load configuration from:', self._file_path)\n raise", "def...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load as a pandas Dataframe the table specified by the name 'table' (string). Must match one of the keys in the \ conf raw data file
def load_raw_table(conf, table): confrd = load_config_raw_data(conf) path_table = Path(confrd[table]["path"]) sep = confrd[table]["sep"] encoding = confrd[table]["encoding"] df = pd.read_csv(path_table, sep=sep, encoding=encoding) return df
[ "def read_table(table_f):\n df = pandas.read_csv(table_f, sep=\"\\t\", header=0, index_col=0)\n\n # convert potentially numerical row names into strings\n df.index = [str(i) for i in df.index]\n\n return df", "def read_table(file_name: Union[str, Path], **kwargs):\n\tfile_name = Path(file_name)\n\text...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
lists contents of hydroshare irods userspace
def ils(self): cmd = Popen(['ils'], stdout=PIPE, stderr=STDOUT, shell=True) stdout = cmd.communicate()[0].decode('ascii') if cmd.returncode != 0: print('Failed to fetch irods file list: %s' % stdout) return [] return [s.replace('C-', '').strip() for s in ...
[ "def list():\n rino.remote.list()", "def list():\n data = getInstaData()\n return render_template(\"list.html\", data=data)", "def list_users(self):\n lines = output_lines(self.exec_psql('\\\\du'))\n return [line.split('|') for line in lines]", "def showData(self, from_):\n out = sub...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prints help for a specified tool.
def print_specific_help(tool_name): if tool_name not in AvailableCommands.commands: print 'Command is not supported: {0}'.format(tool_name) return cmd = AvailableCommands.commands[tool_name] print 'Usage of {0}:'.format(cmd.name) print '\nAccepted input types:\n{0}'.format(str(li...
[ "def tool_help( self, trans, id ):\n toolbox = self.get_toolbox()\n tool = toolbox.tools_by_id.get(id, '')\n yield \"<html><body>\"\n if not tool:\n yield \"Unkown tool id '%s'\" % id\n elif tool.help:\n yield tool.help\n else:\n yield \"No ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates command line objects to compress/decompress a workflow.
def generate_compression_command_line_objects(dir_stack, command_line_parameters): # Generate command lines threads = [] thread_sizes = [] first_d = True for d in dir_stack: if first_d: first_d = False continue if not os.path.isdir(d.path): continue...
[ "def cli(yamlfile, **args):\n print(LogicProgramGenerator(yamlfile, **args).serialize(**args))", "def main():\n\n import sys\n import os\n\n command = sys.argv[1]\n if 'generate-source' == command:\n serialization_composite_h_file = os.path.abspath(sys.argv[2])\n elif 'generate-test' == command:\n s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates commands to execute workflow for each input file.
def generate_command_line_objects(input_file_parameters, dir_stack, auto_split_workflows): workflows = [] prev_number_of_ids_per_command = None prev_command_had_output_dir = True first_command = True # Bools for splitting workflow. Separate values for automatically splitting workflow and #...
[ "def get_command(self, input_files):\r\n return []", "def process(args):\n manifest = Manifest()\n manifest.check_all()\n if args.batch:\n workflow_batch(manifest)\n else:\n workflow_single(manifest, args.process_id, args.procedures,\n args.files, args.id, a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reports which commands have not been successfully run. Commands found in staplefile are compared with files found in directory stack to identify which commands have failed.
def validate_run_results(input_file_parameters, dir_stack): prev_command_had_output_dir = True dir_stack_index = -1 command_index = 0 for current_command in input_file_parameters.commands: # Skip over SPLIT commands if current_command == 'SPLIT': continue co...
[ "def test_execute_commands_stop_on_first_failure(self):\n # All commands succeed.\n exp = (True, [])\n log_f = TemporaryFile(prefix=self.prefix, suffix='.txt')\n obs = _execute_commands(['echo foo', 'echo bar'], log_f, 1, True)\n self.assertEqual(obs, exp)\n\n exp = (\"Comm...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes the output in simple shell script format. The default format is a shell script file containing the command lines.
def write_default(workflows, output_dir): # Calculate the total number of commands number_of_commands = 0 for workflow in workflows: number_of_commands += sum(map(len, workflow)) # Create command line strings i = 0 out_lines = ['echo Started executing shell script at:', 'date'...
[ "def WriteShellFileHeader(f):\n Writeln(f, '!/bin/sh')", "def makeScript(self):\n \n script = 'echo \"----- JobTransform shell script starts ---------\"\\n'\n script += '#! /bin/bash\\n'\n script += 'set +e\\n'\n script += self.indicateThisIsRTT() \n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes the output in LSF job array format. Creates sub shell scripts that contain the workflow for each input file separately. After this main shell script containing TORQUE configuration is created. This script is responsible for starting the sub shells as separate processes.
def write_lsf(workloads, input_file_parameters, command_line_parameters): workload_index = 0 workload_zfill_amount = len(str(len(workloads))) workload_file_paths = [] for workload in workloads: # Each workflow part will have separate file to submit to TORQUE with # sbatch command. ...
[ "def write_unix(workloads, input_file_parameters, command_line_parameters):\r\n\r\n workload_index = 0\r\n workload_zfill_amount = len(str(len(workloads)))\r\n background_process_list = []\r\n for workload in workloads:\r\n # Each workflow part will have separate file to submit to TORQUE with\r\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes the output in Sun Grid Engine job array submission format. Creates sub shell scripts that contain the workflow for each input file separately. After this main shell script containing SGE configuration is created. This script is responsible for starting the sub shells as separate processes.
def write_sge(workloads, input_file_parameters, command_line_parameters): validate_resource_manager_parameters( input_file_parameters.resource_manager_params, ['# -o', '# -e', '# -t']) workload_index = 0 workload_zfill_amount = len(str(len(workloads))) workload_file_paths = [] ...
[ "def gen_jobs(fpath, num_runs, netid):\n\n run = \"\"\n run += \"import sys\\n\"\n run += \"import subprocess\\n\"\n run += \"cmd_array = (\"\n for i in range(num_runs):\n run += \"r\\\"python test.py %d\\\"\" % i\n run += \",\\n\"\n\n run += \")\\n\"\n run += \"p = subprocess.Pop...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes the output in SLURM array job format. Creates sub shell scripts that contain the workflow for each input file separately. After this main shell script containing SLURM configuration is created. This script is responsible for starting the sub shells as separate processes.
def write_slurm(workloads, input_file_parameters, command_line_parameters): workload_index = 0 workload_zfill_amount = len(str(len(workloads))) workload_file_paths = [] for workload in workloads: # Each workflow part will have separate file to submit to SLURM with # sbatch command....
[ "def write_sge(workloads, input_file_parameters, command_line_parameters):\r\n validate_resource_manager_parameters(\r\n input_file_parameters.resource_manager_params,\r\n ['# -o', '# -e', '# -t'])\r\n\r\n workload_index = 0\r\n workload_zfill_amount = len(str(len(workloads)))\r\n workload...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes the output in TORQUE multiple job submission format. Creates sub shell scripts that contain the workflow for each input file separately. After this main shell script containing TORQUE configuration is created. This script is responsible for starting the sub shells as separate processes.
def write_torque(workloads, input_file_parameters, command_line_parameters): validate_resource_manager_parameters( input_file_parameters.resource_manager_params, ['#PBS -k', '#PBS -N', '#PBS -d', '#PBS -e', '#PBS -t']) workload_index = 0 workload_zfill_amount = len(str(len(workloads))...
[ "def write_unix(workloads, input_file_parameters, command_line_parameters):\r\n\r\n workload_index = 0\r\n workload_zfill_amount = len(str(len(workloads)))\r\n background_process_list = []\r\n for workload in workloads:\r\n # Each workflow part will have separate file to submit to TORQUE with\r\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes a parallelized workflow by using UNIX run background feature (&). Creates sub shell scripts that contain the workflow for each input file separately. After this main shell script is written, where each workflow is set to run as background process by using the shell & character. Workflow parts are separated by wa...
def write_unix(workloads, input_file_parameters, command_line_parameters): workload_index = 0 workload_zfill_amount = len(str(len(workloads))) background_process_list = [] for workload in workloads: # Each workflow part will have separate file to submit to TORQUE with # sbatch co...
[ "def write_slurm(workloads, input_file_parameters, command_line_parameters):\r\n workload_index = 0\r\n workload_zfill_amount = len(str(len(workloads)))\r\n workload_file_paths = []\r\n for workload in workloads:\r\n # Each workflow part will have separate file to submit to SLURM with\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test all ssh kwargs are not excluded from kwargs when preparing the SSH opts
def test_ssh_kwargs(test_opts): opt_key = test_opts[0] opt_value = test_opts[1] # Is the kwarg in salt.utils.parsers? in_parser = test_opts[2] opts = { "eauth": "auto", "username": "test", "password": "test", "client": "ssh", "tgt": "localhost", "fun"...
[ "def ssh_args(self):", "def _BuildSshOptions(self, batch, ask_key, use_cluster_key,\n strict_host_check, private_key=None, quiet=True,\n port=None):\n options = [\n \"-oEscapeChar=none\",\n \"-oHashKnownHosts=no\",\n \"-oGlobalKnownHostsFile=%s\" % pat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test expand_target when host is not included in the rosterdata
def test_expand_target_no_host(opts, tmp_path): host = "127.0.0.1" user = "test-user@" opts["tgt"] = user + host roster = """ localhost: 127.0.0.1 """ roster_file = str(tmp_path / "test_roster_no_host") with salt.utils.files.fopen(roster_file, "w") as fp: salt.utils.yaml...
[ "def test_update_expand_target_dns(opts, roster):\n host = \"localhost\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == user + host\n with patch...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test update_targets when host is ip address
def test_update_targets_ip_address(opts): host = "127.0.0.1" user = "test-user@" opts["tgt"] = user + host with patch("salt.utils.network.is_reachable_host", MagicMock(return_value=False)): client = ssh.SSH(opts) assert opts["tgt"] == user + host client._update_targets() assert opts...
[ "def test_update_targets_dns(opts):\n host = \"localhost\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == user + host\n client._update_targets()...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }