query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Retrieve the most recent rebalance submission's approval status
def get_rebalance_approval_status(self) -> str: last_approval = GsIndexApi.last_rebalance_approval(self.id) return get(last_approval, 'status')
[ "def _calculate_approval(self):\n from reviewboard.extensions.hooks import ReviewRequestApprovalHook\n\n approved = True\n failure = None\n\n if self.shipit_count == 0:\n approved = False\n failure = 'The review request has not been marked \"Ship It!\"'\n eli...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cancel the most recent rebalance submission Usage Cancel the basket's most recent rebalance submission if it has not yet been approved Examples Cancel the basket's most recent rebalance submission >>> from gs_quant.markets.baskets import Basket >>> >>> basket = Basket.get("GSMBXXXX") >>> basket.cancel_rebalance() See a...
def cancel_rebalance(self) -> Dict: return GsIndexApi.cancel_rebalance(self.id, CustomBasketsRebalanceAction.default_instance())
[ "def request_cancel(self, *args, **kwargs) -> None:\n self.connection.request_cancel_workflow_execution(self.domain.name, self.workflow_id, run_id=self.run_id)", "def cancel_request(self, requestid):\n # TODO: return to SedmDb.py because of how much sql \"understanding\" it requires?\n self.d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve corporate actions for a basket across a date range
def get_corporate_actions(self, start: dt.date = DateLimit.LOW_LIMIT.value, end: dt.date = dt.date.today() + dt.timedelta(days=10), ca_type: List[CorporateActionType] = CorporateActionType.to_list()) -> pd.DataFrame: where...
[ "def apply_corporate_actions(stock, corporate):\n stock[\"Date\"] = pd.to_datetime(stock[\"Date\"])\n corporate[\"Ex Date\"] = pd.to_datetime(\n corporate[\"Ex Date\"], errors='coerce')\n # corporate[\"BC Start Date\"] = pd.to_datetime(corporate[\"BC Start Date\"],errors='coerce')\n # corporate[\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve fundamentals data for a basket across a date range
def get_fundamentals(self, start: dt.date = DateLimit.LOW_LIMIT.value, end: dt.date = dt.date.today(), period: DataMeasure = DataMeasure.ONE_YEAR.value, direction: DataMeasure = DataMeasure.FORWARD.value, ...
[ "def request_fundamentals(stock_index):\n items = [\n ['l1', 'Last Price'],\n ['y', 'Dividend Yield'],\n ['r', 'Price/Earnings'],\n ['e', 'Earnings/Share'],\n ['b4', 'Book Value'],\n ['j', '52 week low'],\n ['k', '52 week high'],\n ['j1', 'Market Cap'],\n ['j4', 'EBITDA'],\n ['p5', ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve basket's live date Usage Retrieve basket's live date Examples >>> from gs_quant.markets.baskets import Basket >>> >>> basket = Basket.get("GSMBXXXX") >>> basket.get_live_date()
def get_live_date(self) -> Optional[dt.date]: return self.__live_date
[ "def time_to_live(self) -> Optional[str]:\n return pulumi.get(self, \"time_to_live\")", "def getnow(self):\n print()\n print(\"Current date:\")\n print(datetime.date.today())", "def test_get_product_live_time_details_success():\n expected_data = {\n 'product_id': 12,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve url to basket's product page in Marquee Usage Retrieve url to basket's product page in Marquee Examples >>> from gs_quant.markets.baskets import Basket >>> >>> basket = Basket.get("GSMBXXXX") >>> basket.get_url()
def get_url(self) -> str: env = '-dev' if 'dev' in get(GsSession, 'current.domain', '') else '' env = '-qa' if 'qa' in get(GsSession, 'current.domain', '') else env return f'https://marquee{env}.gs.com/s/products/{self.id}/summary'
[ "def get_url_page(self, product):\n return product.get('url')", "def getProductUrl(productId):\r\n return baseUrl + productId", "def item_url(self):\n return self.get_url(item=True)", "def getQueueURL():\n q = SQS.get_queue_url(QueueName='RestaurantRequest').get(QUEUE_URL)\n logger.debu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create and schedule a new factor risk report for your basket
def add_factor_risk_report(self, risk_model_id: str, fx_hedged: bool): payload = CustomBasketRiskParams(risk_model=risk_model_id, fx_hedged=fx_hedged) return GsIndexApi.update_risk_reports(payload)
[ "def scrum_report(ctx): # pylint: disable=unused-argument\n generate_scrum_report()", "def generate_report():", "def ticket(self,args,groupby='nite'):\n try:\n args.dataframe\n except:\n print(\"Must specify input data!\")\n sys.exit(1)\n \n if args...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete an existing factor risk report for your basket
def delete_factor_risk_report(self, risk_model_id: str): payload = CustomBasketRiskParams(risk_model=risk_model_id, delete=True) return GsIndexApi.update_risk_reports(payload)
[ "def delete(self, crash_report_id):\n pass", "def test_delete_report(self):\n report = dict(_id=\"report_uuid\")\n self.database.reports.find_one.return_value = report\n self.assertEqual(dict(ok=True), delete_report(\"report_uuid\", self.database))", "def delete_record():", "def re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If basket should be backcasted using the current composition
def default_backcast(self) -> Optional[bool]: return self.__default_backcast
[ "def process_basket(self, basket: BaseBasket, request: HttpRequest) -> None:", "def test_copyBasket(self):\n basket1 = self.createBasket()\n basket1.addItem(\"beans\")\n basket1.addItem(\"spaghetti hoops\")\n\n basket2 = self.createBasket()\n basket2.copyFrom(basket1)\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If the basket is flagship (internal only)
def flagship(self) -> Optional[bool]: return self.__flagship
[ "def is_shippable(sender, **kwargs):\n rental_item = kwargs.get('instance')\n if rental_item.id:\n return\n\n if rental_item.shipping_method:\n rental_item.is_shippable = True", "def is_flagged(self, key: Key) -> bool:\n return self.get_rank(key) == Rank.FLAG", "def is_gift(self):\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initial price the basket it should start ticking at
def initial_price(self) -> Optional[float]: return self.__initial_price
[ "def set_next_price(bundle_item):\r\n prev_price = bundle_item", "def _get_base_price(self) -> int:\n pass", "def set_prev_price(bundle_item):\r\n prev_price = bundle_item", "def get_base_price(self):\n # in progress\n # day = datetime.date.weekday()\n # print day\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If the basket should be published to Bloomberg
def publish_to_bloomberg(self) -> Optional[bool]: return self.__publish_to_bloomberg
[ "def has_published_version(self, xblock):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def is_available_bsin(cls, bsin):\n try:\n cls.objects.get(bsin=bsin)\n return False\n except Brand.DoesNotExist:\n return True", "def PublishingTo3DDwf(sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If the basket should be published to Factset
def publish_to_factset(self) -> Optional[bool]: return self.__publish_to_factset
[ "def publish_to_bloomberg(self) -> Optional[bool]:\n return self.__publish_to_bloomberg", "def is_published(self):\n return self.article.stage == STAGE_PUBLISHED", "def is_satisfied(self, item: Product):", "def is_basket_empty(basket):\n if not basket:\n return True\n return False",...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If the basket should be published to Reuters
def publish_to_reuters(self) -> Optional[bool]: return self.__publish_to_reuters
[ "def publish_to_bloomberg(self) -> Optional[bool]:\n return self.__publish_to_bloomberg", "def requires_republish(self) -> bool:\n return typing.cast(\n bool,\n self._properties.get(\"requiresRepublish\"),\n )", "def publish_to_factset(self) -> Optional[bool]:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If updates require edit and rebalance, rebal will not be scheduled until/if edit report succeeds
def __edit_and_rebalance(self, edit_inputs: CustomBasketsEditInputs, rebal_inputs: CustomBasketsRebalanceInputs) -> CustomBasketsResponse: _logger.info('Current update request requires multiple reports. Your rebalance request will be submitted \ once the edit r...
[ "def run_scheduler(self, cr, uid, context=None): \n self.update_crm(cr, uid, context)\n return True", "def test_successful_update(self):\n\n manager = SchedulerManager()\n manager.sync_with_database()", "def if_trigger_update(self):\n\n if self.status != st.ComponentStatus.O...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Current basket settings for existing basket
def __populate_current_attributes_for_existing_basket(self, gs_asset: GsAsset): self.__clone_parent_id = get(gs_asset, 'parameters.cloneParentId') self.__default_backcast = get(gs_asset, 'parameters.defaultBackcast') self.__description = get(gs_asset, 'description') self.__flagship = get...
[ "def setup_basket(self, basket: BaseBasket, request: HttpRequest) -> None:", "def show_product(self):\n return self.baskets", "def GetCurrentSettings():\n return Setting.get_by_id('current_settings')", "def get_shipping_settings(self):\n return self.client.execute(\"product/get-shipping-setti...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Default basket settings prior to creation
def __populate_default_attributes_for_new_basket(self, **kwargs): self.__allow_ca_restricted_assets = get(kwargs, 'allow_ca_restricted_assets') self.__allow_limited_access_assets = get(kwargs, 'allow_limited_access_assets') self.__clone_parent_id = get(kwargs, 'clone_parent_id') self.__c...
[ "def setup_basket(self, basket: BaseBasket, request: HttpRequest) -> None:", "def __populate_current_attributes_for_existing_basket(self, gs_asset: GsAsset):\n self.__clone_parent_id = get(gs_asset, 'parameters.cloneParentId')\n self.__default_backcast = get(gs_asset, 'parameters.defaultBackcast')\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
build and deploy project and give the log to Search4Ejb
def add_components(cls, project_path): old_path = os.getcwd() os.chdir(project_path) # print("begin mvn clean package"+absoluteProjectPath) # subprocess.call(["mvn", "clean", "package"], shell = True) # print("end mvn clean package") print("Veuillez deployer l'ear") ...
[ "def deploy_eis_app():", "def build():\n if not os.path.exists(\"build\"):\n os.mkdir(\"build\")\n local(\"date >> build/log\")\n local(\"python setup.py sdist >> build/log\")\n local(\"python setup.py bdist_wheel >> build/log\")", "def main():\n \n jobInfo = jenkinsBase(jenkinsUrl)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that get_repo_data raises ValueError Execptions for invalid input
def test_input_validation(self): with self.assertRaises(ValueError): get_repo_data(" ") with self.assertRaises(ValueError): get_repo_data("nonExistentUserID") self.assertTrue(get_repo_data("derobertsw"))
[ "def test_get_repo_data(self):\n self.assertEqual(get_repo_data(\"derobertsw\"),\n [('GitHubAPI567', 4), ('home', 2), ('ssw567_hw2_triangle', 9), ('Student-Repository', 30)])", "def test_repo_get_contents(self):\n pass", "def test_invalid_language(self):\n datasite =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that get_repo_data returns correct data
def test_get_repo_data(self): self.assertEqual(get_repo_data("derobertsw"), [('GitHubAPI567', 4), ('home', 2), ('ssw567_hw2_triangle', 9), ('Student-Repository', 30)])
[ "def test_repo_get_contents(self):\n pass", "def test_api_v3_repositories_get(self):\n pass", "def test_LocalRepo_get_data():\n\n # Create module to upload\n local_repo = cpenv.LocalRepo(\"test_modules\", data_path(\"modules\"))\n spec = local_repo.find(\"testmod-0.2.0\")[0]\n\n data =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize logging for the repository update Logging is quite important as a full global update can take many hours and be totally unattended (run from cron, etc.), so if something goes wrong we need to be able to find out what went wrong.
def init_logging(log_folder_path): # first try to make sure the logging folder actually exists if not utils.createFolderPath(log_folder_path): print("ERROR: failed to create logging folder in: %s" % log_folder_path) return else: print("initializing logging") # create main logger...
[ "def init_logger(self):\n logger.Reinitialize(level=self.log_level, logToFileAtSpecifiedPath=self.log_file)", "def _init_logger(self):\n #self._logger = logger_factory.make_logger(__name__)", "def logging_init():\n # Default logging levels. These can be overridden when the config file is lo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Move through the whole hit_id list and attempt to expire the HITs
def expire_all_unassigned_hits(self): for hit in view.all(): if not hit.complete and hit.hit_id in self.hit_ids: print(hit.hit_id) mturk_utils.expire_hit(mturk_config['is_sandbox'], hit.hit_id)
[ "def expire_and_dispose_hits(\n client: MTurkClient, hits: List[Dict[str, Any]], quiet: bool = False\n) -> List[Dict[str, Any]]:\n non_disposed_hits = []\n for h in tqdm(hits, disable=quiet):\n try:\n client.delete_hit(HITId=h[\"HITId\"])\n except Exception as e:\n clien...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
approve work for a given assignment through the mturk client.
def approve_work(self, assignment_id, override_rejection=False): client = mturk_utils.get_mturk_client(mturk_config['is_sandbox']) assignment_status = None approve_attempt_num = 0 if assignment_status != SUBMIT_STATUS and approve_attempt_num < APPROVE_TIME_LIMIT: try: ...
[ "def approve_work(client: MTurkClient, assignment_id: str, override_rejection: bool = False) -> None:\n try:\n client.approve_assignment(AssignmentId=assignment_id, OverrideRejection=override_rejection)\n except Exception as e:\n logger.exception(\n f\"Approving MTurk assignment faile...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Give a worker a particular qualification.
def give_worker_qualification(self, worker_id, qual_name, qual_value=None): qual_id = mturk_utils.find_or_create_qualification(qual_name, 'Worker has done this task', mturk_config['is_sandbox']) ...
[ "def give_worker_qualification(\n client: MTurkClient,\n worker_id: str,\n qualification_id: str,\n value: Optional[int] = None,\n) -> None:\n if value is not None:\n client.associate_qualification_with_worker(\n QualificationTypeId=qualification_id,\n WorkerId=worker_id,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SurveyQuestionGroupScore a model defined in Swagger
def __init__(self): self.swagger_types = { 'question_group_id': 'str', 'total_score': 'float', 'max_total_score': 'float', 'marked_na': 'bool', 'question_scores': 'list[SurveyQuestionScore]' } self.attribute_map = { 'questi...
[ "def _get_scores(self, obj):\n if not hasattr(obj, '_scores'):\n obj._scores = QuestionScore.objects.filter(question=obj)\n\n return obj._scores", "def score_answer(self, answer, answer_spec):\n raise NotImplementedError", "def question_scores(self):\n return self._questio...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the question_group_id of this SurveyQuestionGroupScore.
def question_group_id(self): return self._question_group_id
[ "def get_group_idx(self) -> int:\n return self.group_idx", "def question_group_id(self, question_group_id):\n \n self._question_group_id = question_group_id", "def task_group_id(self):\n return self._task_group_id", "def google_group_id(self) -> str:\n return pulumi.get(self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the question_group_id of this SurveyQuestionGroupScore.
def question_group_id(self, question_group_id): self._question_group_id = question_group_id
[ "def campaign_group_id(self, campaign_group_id):\n\n self._campaign_group_id = campaign_group_id", "def role_group_id(self, role_group_id):\n\n self._role_group_id = role_group_id", "def question_group_id(self):\n return self._question_group_id", "def set_group(self, group: t.Optional[jan...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the total_score of this SurveyQuestionGroupScore. Score of all questions in the group
def total_score(self): return self._total_score
[ "def score(self):\n return score_by_team()[self]", "def _cv_total_score(self):\n scores = self.scores\n numbers = self.number_predicted\n total = sum(numbers)\n number_correct = sum([s*n for s,n in zip(scores,numbers)])\n total_score = number_correct / total\n retu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the total_score of this SurveyQuestionGroupScore. Score of all questions in the group
def total_score(self, total_score): self._total_score = total_score
[ "def max_total_score(self, max_total_score):\n \n self._max_total_score = max_total_score", "def _cv_total_score(self):\n scores = self.scores\n numbers = self.number_predicted\n total = sum(numbers)\n number_correct = sum([s*n for s,n in zip(scores,numbers)])\n to...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the max_total_score of this SurveyQuestionGroupScore. Maximum possible score of all questions in the group
def max_total_score(self): return self._max_total_score
[ "def get_max_score(self):\n return sum(self.maxpoints.values())", "def max_total_score(self, max_total_score):\n \n self._max_total_score = max_total_score", "def max_raw_score(self):\n if self._max_raw_score is None:\n self._max_raw_score = self.matrix.max(axis=0).sum()\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the max_total_score of this SurveyQuestionGroupScore. Maximum possible score of all questions in the group
def max_total_score(self, max_total_score): self._max_total_score = max_total_score
[ "def max_total_score(self):\n return self._max_total_score", "def total_score(self, total_score):\n \n self._total_score = total_score", "def max_total_recipients(self, max_total_recipients):\n\n self._max_total_recipients = max_total_recipients", "def get_max_score(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the marked_na of this SurveyQuestionGroupScore.
def marked_na(self): return self._marked_na
[ "def marked_na(self, marked_na):\n \n self._marked_na = marked_na", "def notna(self) -> npt.NDArray[np.bool_]:\n return ~self.isna()", "def nan(self):\r\n\t\treturn float(\"nan\")", "def nan(self, x):\n return math.isnan(x)", "def get_labeled_mask(self):\n return ~np.fromi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the marked_na of this SurveyQuestionGroupScore.
def marked_na(self, marked_na): self._marked_na = marked_na
[ "def marked_na(self):\n return self._marked_na", "def mark_empty_groups(self, mark):\n self._mark_empty_groups(mark)", "def _fill_nans(dataset, val):\n for k in dataset.keys():\n dataset.values[np.isnan(dataset.values)] = val", "def _fill_null(self, df):\n invalid_jobs =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the question_scores of this SurveyQuestionGroupScore.
def question_scores(self): return self._question_scores
[ "def _get_scores(self, obj):\n if not hasattr(obj, '_scores'):\n obj._scores = QuestionScore.objects.filter(question=obj)\n\n return obj._scores", "def _get_scores(self):\n a = numpy.array([x['scores'] for x in self.results])\n return a", "def score_division(self) -> List[...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the question_scores of this SurveyQuestionGroupScore.
def question_scores(self, question_scores): self._question_scores = question_scores
[ "def qm_score(self, qm_score):\n self._qm_score = qm_score", "def score_division(self, score_division: List[float]):\n\n self._score_division = score_division", "def question_scores(self):\n return self._question_scores", "def set_input_score(self, score):\n pass", "def reset_sco...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set parameters for animal species.
def set_animal_parameters(species, params): if species == "Herbivore": Herbivore.set_parameters(params) if species == "Carnivore": Carnivore.set_parameters(params)
[ "def set_animal_parameters(self, species, params):", "def test_set_animal_parameters_callable(self):\n params = {}\n self.biosim.set_animal_parameters('Herbivore', params)", "def set_parameters(cls, params):\n # cls.params_dict.update(params)\n for parameter in params:\n #...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set parameters for landscape type.
def set_landscape_parameters(landscape, params): if landscape == "L": Lowland.set_parameters(params) elif landscape == "H": Highland.set_parameters(params) else: raise ValueError('Lowland and Highland are the' 'only ones that can h...
[ "def set_landscape_parameters(self, landscape, params):", "def test_set_landscape_parameters(self):\n params1 = {'alpha': 0.4}\n params2 = {'f_max': 500}\n self.biosim.set_landscape_parameters('S', params1)\n self.biosim.set_landscape_parameters('J', params2)\n assert Savannah.a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a population to each cell on the island
def add_population(self, population): for cell_coord in population: x, y = cell_coord.get('loc') self.island[x][y].place_animals(cell_coord.get('pop'))
[ "def add_population(self, population):", "def appendPopulation(self, population):\n for index in range(population.size()):\n gene = population.getGene(index)\n fitness = population.getFitness(index)\n self.appendGene(gene, fitness)", "def populateCells(self):\n for...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes the number of animals throughout the island and sum them up
def heat_num_animals(self): total_herbivores = sum(sum(self.heatmap_of_population()['Herbivore'])) total_carnivores = sum(sum(self.heatmap_of_population()['Carnivore'])) animal_count_dict = {"Herbivore": total_herbivores, "Carnivore": total_carnivores} return animal_count_dict
[ "def num_animals(self):\n val_sum = 0\n for key, values in self.heat_num_animals.items():\n val_sum += values\n return val_sum", "def num_animals(self):", "def num_animals_per_species(self):", "def population(self):\n return sum([len(s) for s in self.__species])", "def...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Total number of animals on island.
def num_animals(self): val_sum = 0 for key, values in self.heat_num_animals.items(): val_sum += values return val_sum
[ "def num_animals(self):", "def num_animals_per_species(self):", "def heat_num_animals(self):\n total_herbivores = sum(sum(self.heatmap_of_population()['Herbivore']))\n total_carnivores = sum(sum(self.heatmap_of_population()['Carnivore']))\n animal_count_dict = {\"Herbivore\": total_herbivor...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds a key word argument 'level' to the augmentation function. The level policy is function that translates a level value to appropriate arguments of the augmentation.
def randaug(level_policy): def decorator(aug): @functools.wraps(aug) def wrapper(image, *args, **kwargs): if 'level' in kwargs: args, kwargs = level_policy(image, kwargs['level']) return aug(image, *args, **kwargs) else: return aug(image, *args, **kwargs) ...
[ "def set_new_level(self, level):\r\n\r\n self.property_set(\"level\",\r\n Sample(0, int(level), unit=\"%\"))", "def addLevel(self, level: 'SoNode') -> \"void\":\n return _coin.SoVRMLLOD_addLevel(self, level)", "def SetLevel(self, level):\n self.level = level", "def wi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns image with an extra channel set to all 1s.
def _wrap(image): ones = torch.ones_like(image[:,:1,:,:]) return torch.cat([image, ones], 1)
[ "def make_binary_image(im):", "def add_alpha_channel(img):\n gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \n _, alpha = cv2.threshold(gray_img, 25, 255, cv2.THRESH_BINARY)\n \n b, g, r = cv2.split(img)\n rgba = [b, g, r, alpha]\n alpha_img = cv2.merge(rgba, 4)\n \n return al...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Unwraps an image produced by wrap by filling each channel with the replacement values wherever the wrapper channel is zero.
def _unwrap(image, replace): image, alpha = image[:,:3,...], image[:,3:4,...] b, c, h, w = image.shape replace = replace.to(image.dtype) replace = replace.to(image.device) if replace.dim() == 2: replace = replace.view(-1,c,1,1) else: replace = replace.view(-1,1,1,1) return torch.where(alpha ...
[ "def _wrap_image(self, im, border=7):\n # We should throw an exception if the image is smaller than 'border', since at this point\n # this process doesn't make sense.\n if im.bounds.xmax - im.bounds.xmin < border:\n raise RuntimeError(\"Periodic wrapping does not work with images thi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a constant to all values below the threshold. The constant should be between 128 and 128.
def solarize_add(image, constant, threshold=128): added_image = image.to(torch.int64) + constant added_image = torch.clamp(added_image, 0, 255) return torch.where(image >= threshold, image, added_image)
[ "def thresholding(self, thval=130):\n self.thval = thval\n self.temp_img[self.temp_img < thval] = thval", "def setWhiteThreshold(self, value) -> None:\n ...", "def assign_thresholds(self):\n self.thresholds=np.hstack([2.0,np.flip(np.linspace(0.,1.,10000))])", "def _apply_signed_thr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Implements autocontrast from PIL using torch ops.
def auto_contrast(image, cutoff, grayscale=True): w, h = image.shape[-2:] if grayscale: reference = VF.rgb_to_grayscale(image) else: reference = image hist = uint8_histc(reference) hist = hist.cumsum(-1) hist = hist / hist[...,-1:] if cutoff: lo = (hist <= cutoff).sum(-1) hi = 256.0 - (...
[ "def adjust_contrast(image):\n\n # 0.5 <= alpha <= 2.0\n # These values found empirically\n alpha = 0.5 + 1.5 * random.random()\n image = cv2.convertScaleAbs(image, alpha=alpha, beta=0)\n\n return image", "def predict_single():\n path = 'outputs/gray/img-8-epoch-29.jpg'\n img = Image.open(pat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Count the number of missing values in a vector
def count_missing(vec): #print('hello') null_vec = isna(vec) return sum(null_vec)
[ "def null_count_alt(df):\n x = [test_df[col].isna().sum() for col in test_df.columns]\n y = 0\n for _ in x:\n y += _\n return y", "def nonzero_count(my_list):\n counter = 0\n for value in my_list:\n if value != 0:\n counter += 1\n return counter", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds starting or ending index of date from targetDate if it exists in data loaded.
def find_date_index(self, targetDate: datetime.date, starting: bool = True) -> int: if type(targetDate) == datetime: targetDate = targetDate.date() if starting: iterator = list(enumerate(self.data)) else: iterator = reversed(list(enumerate(self.data))) ...
[ "def _get_index_for_date(self, the_date):\n date_ordinal = the_date.toordinal()\n index = 2 * (date_ordinal - self.start_date_ordinal)\n return (int(index))", "def get_index(start_date, end_date):\n delta_days = (end_date - start_date).days + 1\n index = [\n (start_date + timedel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns index of start date based on startDate argument.
def get_start_index(self, startDate: datetime.date) -> int: if startDate: startDateIndex = self.find_date_index(startDate) if startDateIndex == -1: raise IndexError("Date not found.") else: return startDateIndex else: return...
[ "def _get_index_for_date(self, the_date):\n date_ordinal = the_date.toordinal()\n index = 2 * (date_ordinal - self.start_date_ordinal)\n return (int(index))", "def find_date_index(self, targetDate: datetime.date, starting: bool = True) -> int:\n if type(targetDate) == datetime:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns index of end date based on endDate argument.
def get_end_index(self, endDate: datetime.date) -> int: if endDate: endDateIndex = self.find_date_index(endDate, starting=False) if endDateIndex == -1: raise IndexError("Date not found.") elif endDateIndex < 1: raise IndexError("You need at lea...
[ "def get_index(start_date, end_date):\n delta_days = (end_date - start_date).days + 1\n index = [\n (start_date + timedelta(day)).strftime(JSON_DATE_FORMAT)\n for day in xrange(delta_days)\n ]\n return index", "def get_end_day(self) -> int:\n return self.end_date.day", "def get_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the current backtester price and period based on index provided.
def set_indexed_current_price_and_period(self, index: int): self.currentPeriod = self.data[index] self.currentPrice = self.data[index]['open']
[ "def exit_backtest(self, index: int = None):\n if index is None:\n index = self.endDateIndex\n\n self.currentPeriod = self.data[index]\n self.currentPrice = self.currentPeriod['close']\n\n if self.inShortPosition:\n self.buy_short(\"Exited short position because bac...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Auxiliary function to set current period and price to price provided.
def set_priced_current_price_and_period(self, price): self.currentPeriod = { 'date_utc': None, 'open': price, 'close': price, 'high': price, 'low': price } self.currentPrice = price
[ "def set_indexed_current_price_and_period(self, index: int):\n self.currentPeriod = self.data[index]\n self.currentPrice = self.data[index]['open']", "def update_price(self, company: Company):\n pass", "def do_setPrice(self, args):\n weekday = input(\"Enter weekday price: \")\n we...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets stop loss equal to the counter provided.
def set_stop_loss_counter(self, counter: int): self.stopLossCounter = self.initialStopLossCounter = counter
[ "def reset_smart_stop_loss(self):\n self.stopLossCounter = self.initialStopLossCounter", "def stop(self, iterations):\n self.stop_count += iterations", "def setStopSweep(self,stop):\r\n self.isSIUnit(stop)\r\n self.stopFreqSweep = stop", "def set_stop_criterion(self,criterion):\n\t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Resets smart stop loss and sets it equal to initial stop loss counter.
def reset_smart_stop_loss(self): self.stopLossCounter = self.initialStopLossCounter
[ "def set_stop_loss_counter(self, counter: int):\n self.stopLossCounter = self.initialStopLossCounter = counter", "def _reset(self):\n self.monitor_op = lambda a, b: (a - b) < -self.epsilon\n self.best_loss = 1e15\n self.cooldown_counter = 0\n self.wait = 0", "def reset(self):\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ends a backtest by exiting out of a position if needed.
def exit_backtest(self, index: int = None): if index is None: index = self.endDateIndex self.currentPeriod = self.data[index] self.currentPrice = self.currentPeriod['close'] if self.inShortPosition: self.buy_short("Exited short position because backtest ended.")...
[ "def end_test(self):", "def back(self):\n self.position -= 1\n if self.position > len(self.document.characters):\n raise CursorTopError", "def ending(self):\n if self.view_index:\n self.view_index = self.end()", "def end_while_true(self):\n seen_close = 0\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Simulate a long hold position if no strategies are provided.
def simulate_hold(self, testLength: int, divisor: int, thread=None): for index in range(self.startDateIndex, self.endDateIndex, divisor): if thread and not thread.running: raise RuntimeError("Backtest was canceled.") self.currentPeriod = self.data[index] self...
[ "def func(self):\r\n rand = random.random()\r\n if rand < 0.5:\r\n self.caller.msg(\"You nudge at the lid. It seems stuck.\")\r\n elif 0.5 <= rand < 0.7:\r\n self.caller.msg(\"You move the lid back and forth. It won't budge.\")\r\n else:\r\n self.caller.m...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handles trailing prices based on the current price.
def handle_trailing_prices(self): if self.longTrailingPrice is not None and self.currentPrice > self.longTrailingPrice: self.longTrailingPrice = self.currentPrice if self.shortTrailingPrice is not None and self.currentPrice < self.shortTrailingPrice: self.shortTrailingPrice = sel...
[ "def _adjust_price(self):\n\n # Go through each topping and add the money amount for topping\n topping_additional_money = 0\n for topping in self._toppings:\n topping_additional_money += topping.getPrice()\n\n self._price = self._base_price + topping_additional_money", "def ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns stop loss for short position.
def _get_short_stop_loss(self) -> Union[float, None]: if self.lossStrategy == TRAILING: return self.shortTrailingPrice * (1 + self.lossPercentageDecimal) elif self.lossStrategy == STOP: return self.sellShortPrice * (1 + self.lossPercentageDecimal) elif self.lossStrategy i...
[ "def get_stop_loss(self) -> Union[float, None]:\n self.handle_trailing_prices()\n if self.inShortPosition:\n self.previousStopLoss = self._get_short_stop_loss()\n return self.previousStopLoss\n elif self.inLongPosition:\n self.previousStopLoss = self._get_long_s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns stop loss for long position.
def _get_long_stop_loss(self) -> Union[float, None]: if self.lossStrategy == TRAILING: return self.longTrailingPrice * (1 - self.lossPercentageDecimal) elif self.lossStrategy == STOP: return self.buyLongPrice * (1 - self.lossPercentageDecimal) elif self.lossStrategy is No...
[ "def get_stop_loss(self) -> Union[float, None]:\n self.handle_trailing_prices()\n if self.inShortPosition:\n self.previousStopLoss = self._get_short_stop_loss()\n return self.previousStopLoss\n elif self.inLongPosition:\n self.previousStopLoss = self._get_long_s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns stop loss value.
def get_stop_loss(self) -> Union[float, None]: self.handle_trailing_prices() if self.inShortPosition: self.previousStopLoss = self._get_short_stop_loss() return self.previousStopLoss elif self.inLongPosition: self.previousStopLoss = self._get_long_stop_loss() ...
[ "def train_loss(self):\n return self._train_loss", "def get_stop_probability(self) -> float:", "def get_last_train_loss(self) -> float:\n loss = self.training_loss\n self.training_loss = 0\n return loss", "def _get_long_stop_loss(self) -> Union[float, None]:\n if self.lossStrate...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns net balance with current price of coin being traded. It factors in the current balance, the amount shorted, and the amount owned.
def get_net(self) -> float: return self.coin * self.currentPrice - self.coinOwed * self.currentPrice + self.balance
[ "def getClientBalance(self, client, bot_config):\n currency = str(bot_config['currency'])\n pair = currency[len(currency)-4:len(currency)]\n if(pair == 'USDT'):\n balance = client.get_asset_balance(asset='USDT')\n else:\n balance = client.get_asset_balance(asset='BT...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns trend based on the strategies provided.
def get_trend(self) -> Union[int, None]: trends = [strategy.trend for strategy in self.strategies.values()] if len(trends) == 0: return None if all(trend == BEARISH for trend in trends): return BEARISH elif all(trend == BULLISH for trend in trends): ...
[ "def _calculate_trends(\n history: list[OrderedDict], measurements_to_use: int\n) -> dict[str, Any]:\n if measurements_to_use == -1:\n index_range = np.arange(0, len(history))\n else:\n index_range = np.arange(0, measurements_to_use)\n\n measured_attributes = set().union(*(d.keys() for d i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prints out strategies provided in configuration.
def print_strategies(self): for strategyName, strategy in self.strategies.items(): print(f'\t{get_label_string(strategyName)}: {strategy.get_params()}')
[ "def show_strategy_menu(self):\n self.show_content(\"Please select a strategy for your opponent \"\n \"from the numbered list.\")\n self.show_content(\"\\t1 - The Rock: always plays 'rock'\")\n self.show_content(\"\\t2 - You'll never guess!\")\n self.show_content...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prints out backtest results.
def print_backtest_results(self, stdout=None): previous_stdout = sys.stdout if stdout is not None: # Temporarily redirects output to stdout provided. sys.stdout = stdout print("\nBacktest results:") print(f'\tSymbol: {"Unknown/Imported Data" if self.symbol is None else self...
[ "def print_results(self):\n self._write_term('\\nPassed {} of {} tests, {} failed.\\n'.format(self.passes,self.numTests,self.fails))", "def print_test_results(self):\n for n, (name, result) in enumerate(zip(self._test_names, self._test_results)):\n print('Test {} ({}): {}'.format(n+1, nam...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prints out all the trades conducted so far.
def print_trades(self, stdout=sys.__stdout__): previous_stdout = sys.stdout if stdout is not None: # Temporarily redirects output to stdout provided. sys.stdout = stdout print("\nTrades made:") for trade in self.trades: print(f'\t{trade["date"].strftime("%Y-%m-%...
[ "def print_all(self):\n print(\"Models:\", self._models)\n print(\"Scores:\", self._scores)\n print(\"MSE:\", self._mse)\n print()", "def print_backtest_results(self, stdout=None):\n previous_stdout = sys.stdout\n if stdout is not None: # Temporarily redirects output to ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a default backtest result file name.
def get_default_result_file_name(self): backtestResultsFolder = 'Backtest Results' symbol = 'Imported' if not self.symbol else self.symbol dateString = datetime.now().strftime("%Y-%m-%d_%H-%M") resultFile = f'{symbol}_backtest_results_{"_".join(self.interval.lower().split())}-{dateString...
[ "def default_test_results_location():\n return os.path.join(repo_root(), \"test-results\")", "def get_default_filename(cls) -> str:\n return cls.__open('default_filename')", "def getDefaultBackupFile(self):\n def getName(index=0):\n \"\"\"\n Try to create an unique backup ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes backtest results to resultFile provided. If none is provided, it'll write to a default file name.
def write_results(self, resultFile=None) -> str: currentPath = os.getcwd() if not resultFile: resultFile = self.get_default_result_file_name() with open(resultFile, 'w') as f: self.print_configuration_parameters(f) self.print_backtest_results(f) ...
[ "def write_results(filename):", "def save_results(self):\n\n # Save the results\n self.output_results = '{}_results.dat'.format(self.method)\n with open(self.output_results, 'w') as f:\n f.write(str(self.result))\n print('Results file saved to {}'.format(self.output_results)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Top k frequent occuring word in given file
def top_k_freq_words(self,file_names,top_k,seperator="#|#",return_word_only=True): c = Counter() for file_name in file_names: print ("Reading file ",file_name) with codecs.open(file_name, 'r',encoding='utf8') as fp: for each_line in fp: each_li...
[ "def kTopWords(k,list):\r\n # Check at this point if the list is empty. If so, then the the program is exited.\r\n if list == []:\r\n print(\"Unable to continue:\\n1. Writing.txt is empty or\\n2. There is no word remaining after preprocessing.\")\r\n sys.exit()\r\n\r\n # Make sure the list pa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given top k words return word2vec for that file top_k_words = [word1, word2, ...]
def top_k_word2vec(self,word2vec_file_name,top_k_words,word2vec_dimension,new_file_name): #word2vec = pd.read_csv("../../temp_results/a.txt",sep=' ', header=None, skiprows=range(1)) model = models.KeyedVectors.load_word2vec_format(word2vec_file_name, binary=False) filtered_vectors = model[top_k_...
[ "def get_top_k_words(words, stopwords, k):\n # create dict of unique words to count them down\n un_words = {}\n for x in words:\n if x not in stopwords:\n if not un_words.__contains__(x):\n un_words[x] = 1\n else:\n un_words[x] += 1\n un_words =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds the item object based on the string name
def get_item(name): for item in globals().values(): if isinstance(item, MarketItem) and item.name == name: return item raise Exception("Invaid item '{}'".format(name))
[ "def get_item_by_name(self, partialname):\n for item in self.items:\n itemobj=globalitemloader.get_item(item)\n if partialname.lower() in itemobj.name.lower():\n return itemobj\n return None", "def get_item(self, item_name):\n if len(self.items) > 0: ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Raise any number to a power of three
def my_power_three(number): return number ** 3
[ "def times_three(num):\n\n return num*3", "def f7():\n x_pow3 = mul_functions(identity(), mul_functions(identity(), identity()))\n return inverse(x_pow3)", "def pow_4_of(number):\n pow2 = pow_2_of(number)\n return pow2 * pow2", "def triple(number):\n return number * 3", "def cube(num):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read the mirrors from the mirrorlist file.
def read(self): p = compile('Server = {url}\n') with open(self.path) as f: for line in f: r = p.parse(line) if r: self.mirrors.append(r.named['url']) return self.mirrors
[ "def get_list_of_mirrors(file_type, file_path, mirrors_dict):\n\n # Checking if all the arguments have appropriate format.\n formats.RELPATH_SCHEMA.check_match(file_path)\n formats.MIRRORDICT_SCHEMA.check_match(mirrors_dict)\n sslib_formats.NAME_SCHEMA.check_match(file_type)\n\n # Verify 'file_type' is support...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Copy the provided mirrorlist into the chroot and refresh pacman databases.
def copy(self, path='/etc/pacman.d/mirrorlist'): copy2(path, self.path) self.chroot.refresh() self.read()
[ "def _sync_databases(self):\n host, port = self._src_mc.primary\n self._logger.info('[%s] sync databases from %s:%d' % (self._current_process_name, host, port))\n exclude_dbnames = ['admin', 'local']\n for dbname in self._src_mc.database_names():\n if dbname not in exclude_dbn...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the given mirror as the chroot's only mirror and refresh pacman databases.
def set(self, mirror, write=True): self.mirrors = [mirror] if write: self.write()
[ "def has_mirror(self, mirror):\n\n if not isinstance(mirror, RepositoryURI):\n mirror = misc.url_affix_trailing_slash(mirror)\n return mirror in self.mirrors", "def remove_mirror(self, mirror):\n\n if not isinstance(mirror, RepositoryURI):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Append the given mirror to the chroot's mirrorlist and refresh pacman databases.
def add(self, mirror, write=True): self.mirrors.append(mirror) if write: self.write()
[ "def add_mirror(self, mirror, priority=None, ssl_cert=None,\n ssl_key=None):\n\n def dup_check(mirror):\n if self.has_mirror(mirror):\n raise api_errors.DuplicateRepositoryMirror(\n mirror)\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the mirror to the Arch Linux Archive repository of the given date.
def set_date(self, date, write=True): date_url = f'{date:/%Y/%m/%d}' mirror = Mirrorlist.archive_url + date_url + '/$repo/os/$arch' self.set(mirror, write) return mirror
[ "def mirror_deb(self, repo_name, alias=None):\n print 'Mirror creation start'\n if alias is None:\n alias = repo_name\n cmds = []\n\n # create mirrors\n for conf in self.DISRS['deb']:\n cmds.append([\n 'aptly', 'mirror', 'create', \n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make the chroot using mkarchroot.
def make(self): if not self.working_dir.exists(): self.working_dir.mkdir(parents=True) cmd = ['mkarchroot', str(self.root), 'base-devel', 'devtools'] cmdlog.run(cmd)
[ "def create_chroot_arch(path: str, packages: str = \"base\"):\n print(\"Creating arch at {0}\".format(path))\n os.makedirs(path, exist_ok=False)\n ctr_create(\"docker.io/library/archlinux:latest\", path, \"sed -i 's/^#ParallelDownloads/ParallelDownloads/g' /etc/pacman.conf && pacman -Sy --noconfirm --neede...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run pacman with the given flags in the chroot.
def pacman(self, flags): cmdlog.run(['arch-nspawn', str(self.root), 'pacman', flags])
[ "def pacman(args, confirm=False):\n write_stdin(['sudo', 'pacman', *args], confirm and [] or repeat('y\\n'))", "def run_chrooted(self, call_args, *args, **kwargs):\n assert self.chroot_prepared\n run((\"chroot\", self.mountpoint) + call_args, *args, **kwargs)", "def run_foam():\n os.chdir('cfd')\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update the chroot with `pacman Syu`.
def update(self): if not self.exists(): self.make() else: self.pacman('-Syuu')
[ "def scp_sysroot(ctx, user, host):\n _scp_dir(user, host, \"llvm-sysroot\")", "def update():\n update_code()\n update_env()\n symlink()\n set_current()\n permissions()", "def unsafechroot():\n arch_chroot_into(distro_name, btrfs_dev_uuid)", "def update_fetch(self):\n Popen([\"mount...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build a package in the chroot using makechrootpkg.
def makepkg(self, pkgbuild, deps=[]): if not self.exists(): self.make() pkgbuild.update() cmd = ['makechrootpkg', '-cr', str(self.working_dir)] for d in deps: cmd += ['-I', d] cmd += ['--', '-s'] with cwd(pkgbuild.builddir): return cmdl...
[ "def package(ctx):\n ctx.run(\"rm -rf dist\")\n ctx.run(\"python setup.py sdist\")", "def create_chroot_arch(path: str, packages: str = \"base\"):\n print(\"Creating arch at {0}\".format(path))\n os.makedirs(path, exist_ok=False)\n ctr_create(\"docker.io/library/archlinux:latest\", path, \"sed -i '...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract news from user input keywords
def get_news_from_keywords(keywords): keywords = rf.process_keywords(keywords) news_data = get_all_news_entries() news_entries = [] for title, description, link, date, named_entities, processed in news_data.values(): if all((k in named_entities) for k in keywords): news_entries.app...
[ "def keyword_articles():\r\n # To retrieve user's keyword, use \"keyword.get()\".\r\n # Retrieve the keyword news.\r\n keyword_news = api_call(\"http://newsapi.org/v2/top-headlines?language=en&q=\" + keyword.get() + \"&apiKey=c0cbc3a185e84d60bf612e355c9a2760\")\r\n\r\n # Retrieve the keyword news articl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Dado un numero, devuelve true si es primo
def es_primo(numero): # Comparo el numero con cada uno de sus anteriores excepto el 1 y él mismo. for i in range(2, numero-1): # Si el resto es 0, no es primo. if (numero % i == 0): return False return True
[ "def primo(numero):\n\tfor i in range(2, numero, 1):\n\t\tif numero%i==0:\n\t\t\treturn False\n\tif numero==1:\n\t\treturn False\n\treturn True", "def is_natural(num):\n if(not (num % 3) or not (num % 5)):\n return num\n else:\n return 0", "def est_premier(nombre):\n # si le nombre est in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Devuelve una lista de los numeros primos y perfectos que hay entre 1 y el numero dado
def primos_y_perfectos(numero): # Lista de nºs entre 1 y el numero dado: numeros = [i for i in range(1, numero)] # Listas donde almacenaré los numeros que haya: primos = [] perfectos = [] for numero in numeros: # Primos: if(es_primo(numero)): primos.appen...
[ "def ex7_PerfectNumber():\n N1 = int(input())\n N2 = int(input())\n\n def perfectNumber(N1, N2):\n result = []\n while N1 < N2:\n i = 1\n divisors = []\n while i < N1:\n if N1 % i == 0:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Devuelve recursivamente el sumatorio de 1 hasta el numero dado
def sumatorio(numero): # Caso base if (numero == 1): return numero # Caso general else: return sumatorio(numero-1) + numero
[ "def firstnsum(n):\n\treturn sum(range(n+1))", "def amicable(n):\r\n \"*** YOUR CODE HERE ***\"\r\n while True:\r\n n = n + 1\r\n m = sum_of_divisor(n) \r\n if m != n and sum_of_divisor(m) == n:\r\n break\r\n\r\n return n", "def sum_to(n):\n running_sum = 0\n\n #...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create the query for insert values
def create_insert_query(self) -> None: build = BuildInjectQuery() self.query = build.build_query_insert(table=self.table, key_duplicate=True, list_values=self.list_values)
[ "def insert(self, sql):", "def insert(self, table, fields): \n field_keys = ', '.join(fields.keys())\n _fields = '\\',\\''.join(fields.values())\n return self.query(\"INSERT INTO {} ({}) VALUES ({})\", (field_keys, _fields), table)", "def _getInsertRowRequest(self, dictValues):\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check the TS in terms of energy, normal mode displacement, and IRC. Populates the ``TS.ts_checks`` dictionary. Note that the 'freq' check is done in Scheduler.check_negative_freq() and not here.
def check_ts(reaction: 'ARCReaction', verbose: bool = True, job: Optional['JobAdapter'] = None, checks: Optional[List[str]] = None, rxn_zone_atom_indices: Optional[List[int]] = None, ): checks = checks or list() for entry in checks: if ent...
[ "def check_ts_energy(reaction: 'ARCReaction',\n verbose: bool = True,\n ) -> None:\n # Check whether E0 values are already known, e.g. from Arkane species YAML files\n check_rxn_e0(reaction=reaction)\n if reaction.ts_species.ts_checks['E0']:\n return\n\n r_e_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check whether the TS species passes all checks other than ones specified in ``exemptions``.
def ts_passed_all_checks(species: 'ARCSpecies', exemptions: Optional[List[str]] = None, verbose: bool = False, ) -> bool: exemptions = exemptions or list() for check, value in species.ts_checks.items(): if check not in exemptions...
[ "def test_chk_species_fail(self):\n pass", "def check_exon(self):\n global check, error_details\n for row_index, row in self.primer_df.iterrows():\n for char in str(row['Exon']):\n if char in specials:\n check += 1\n error = \"Sp...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that the TS electronic energy is above both reactant and product wells in a ``reaction``. Sets the respective energy parameter 'e_elect' in the ``TS.ts_checks`` dictionary.
def check_ts_energy(reaction: 'ARCReaction', verbose: bool = True, ) -> None: # Check whether E0 values are already known, e.g. from Arkane species YAML files check_rxn_e0(reaction=reaction) if reaction.ts_species.ts_checks['E0']: return r_e_elect = None ...
[ "def check_rxn_e0(reaction: 'ARCReaction',\n verbose: bool = True,\n ) -> Optional[bool]:\n if reaction.ts_species.ts_checks['E0']:\n return True\n r_e0 = sum_list_entries([r.e0 for r in reaction.r_species],\n multipliers=[reaction.get_species_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checking the E0 values between wells and a TS in a ``reaction`` using ZPE from statmech.
def compute_and_check_rxn_e0(reaction: 'ARCReaction', species_dict: dict, project_directory: str, kinetics_adapter: str, output: dict, sp_level: 'Level', ...
[ "def check_rxn_e0(reaction: 'ARCReaction',\n verbose: bool = True,\n ) -> Optional[bool]:\n if reaction.ts_species.ts_checks['E0']:\n return True\n r_e0 = sum_list_entries([r.e0 for r in reaction.r_species],\n multipliers=[reaction.get_species_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check the E0 values between wells and a TS in a ``reaction``, assuming that E0 values are available.
def check_rxn_e0(reaction: 'ARCReaction', verbose: bool = True, ) -> Optional[bool]: if reaction.ts_species.ts_checks['E0']: return True r_e0 = sum_list_entries([r.e0 for r in reaction.r_species], multipliers=[reaction.get_species_count(speci...
[ "def check_ts_energy(reaction: 'ARCReaction',\n verbose: bool = True,\n ) -> None:\n # Check whether E0 values are already known, e.g. from Arkane species YAML files\n check_rxn_e0(reaction=reaction)\n if reaction.ts_species.ts_checks['E0']:\n return\n\n r_e_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check the normal mode displacement by identifying bonds that break and form and comparing them to the expected RMG template, if available.
def check_normal_mode_displacement(reaction: 'ARCReaction', job: Optional['JobAdapter'], amplitudes: Optional[Union[float, List[float]]] = None, ): if job is None: return if reaction.family is None: ...
[ "def test_d_regularization_dims(self):\n template_rxn_map = self.family.get_reaction_matches(thermo_database=self.database.thermo, estimate_thermo=False)\n\n for entry in self.family.groups.entries.values():\n if entry.children == []:\n continue\n # set of violatio...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine whether a bond breaks or forms in a TS. Note that ``bond`` and all bond entries in `dmat_bonds_1/2`` must be already sorted from small to large indices.
def determine_changing_bond(bond: Tuple[int, ...], dmat_bonds_1: List[Tuple[int, int]], dmat_bonds_2: List[Tuple[int, int]], ) -> Optional[str]: if len(bond) != 2 or any(not isinstance(entry, int) for entry in bond): raise V...
[ "def isin_bond(self):\n return 'bond' in self.flags", "def has_bond(self, id):\n for row in self._group.bonds.where(\n 'id == value', condvars={'value': id}):\n return True\n return False", "def same_bond_topology(bt1, bt2):\n natoms = len(bt1.atom)\n if len(bt2....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Invalidate rotors in which both pivots are included in the reactive zone.
def invalidate_rotors_with_both_pivots_in_a_reactive_zone(reaction: 'ARCReaction', job: 'JobAdapter', rxn_zone_atom_indices: Optional[List[int]] = None, ...
[ "def rotate_box(self):\r\n self._rotate = not self._rotate", "def combineRotations(circuit,wires):\n #initialize a list to store the indices that will be deleted after the loop to not affect the stored sequence of indices\n indicesToPop=[]\n for j in range(len(wires)):\n i=0\n n=len(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the reaction zone atom indices by parsing normal mode displacement.
def get_rxn_zone_atom_indices(reaction: 'ARCReaction', job: 'JobAdapter', ) -> List[int]: freqs, normal_mode_disp = parser.parse_normal_mode_displacement(path=job.local_path_to_output_file, ...
[ "def get_atom_indices(self):\n return (range(1, self.natm+1), self)", "def positions(self):\n return get_positions(as_numpy=True).reshape((self.natom, 3))", "def find_indices(universe, atoms, molname, natoms):\n indices = []\n atoms = np.array(atoms)\n mol_atoms = universe.atoms[universe....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the root mean squares of the normal mode displacements. Use atom mass weights if ``reaction`` is given.
def get_rms_from_normal_mode_disp(normal_mode_disp: np.ndarray, freqs: np.ndarray, reaction: Optional['ARCReaction'] = None, ) -> List[float]: mode_index = get_index_of_abs_largest_neg_freq(freqs) nmd = normal_...
[ "def rms(x):\n import numpy as np\n return np.sqrt(np.mean(x*x))", "def compute_norm_mode(mode, molecule):\n norm = 0.0\n for iatom, displacement in enumerate(mode.displacements):\n for xyz in displacement:\n norm += xyz**2 * molecule.masses[iatom]/U_TO_AMU\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the index of the |largest| negative frequency.
def get_index_of_abs_largest_neg_freq(freqs: np.ndarray) -> Optional[int]: if not len(freqs) or all(freq > 0 for freq in freqs): return None return list(freqs).index(min(freqs))
[ "def argmax(array: list) -> int:\n index, value = max(enumerate(array), key=lambda x: x[1])\n return index", "def indexOfMax(list):\r\n max = -np.Infinity\r\n index = 0\r\n i = 0\r\n for value in list:\r\n if value > max:\r\n max = value\r\n index = i\r\n i +=...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the number of atoms that are expected to have the largest normal mode displacement for the TS (considering all families). This is a wrapper for ``get_rxn_normal_mode_disp_atom_number()``. It is theoretically possible that TSGuesses of the same species will belong to different families.
def get_expected_num_atoms_with_largest_normal_mode_disp(normal_mode_disp_rms: List[float], ts_guesses: List['TSGuess'], reaction: Optional['ARCReaction'] = None, ...
[ "def get_rxn_normal_mode_disp_atom_number(rxn_family: Optional[str] = None,\n reaction: Optional['ARCReaction'] = None,\n rms_list: Optional[List[float]] = None,\n ) -> int:\n default = 3\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the number of atoms expected to have the largest normal mode displacement per family. If ``rms_list`` is given, also include atoms with an RMS value close to the lowest RMS still considered.
def get_rxn_normal_mode_disp_atom_number(rxn_family: Optional[str] = None, reaction: Optional['ARCReaction'] = None, rms_list: Optional[List[float]] = None, ) -> int: default = 3 if rms_lis...
[ "def get_expected_num_atoms_with_largest_normal_mode_disp(normal_mode_disp_rms: List[float],\n ts_guesses: List['TSGuess'],\n reaction: Optional['ARCReaction'] = None,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check whether two lists of bonds are equal.
def _check_equal_bonds_list(bonds_1: List[Tuple[int, int]], bonds_2: List[Tuple[int, int]], ) -> bool: if len(bonds_1) != len(bonds_2): return False if all(bond in bonds_2 for bond in bonds_1): return True return False
[ "def test_equivalent(self):\n for order1 in self.orderList:\n for order2 in self.orderList:\n bond1 = Bond(None, None, order=order1)\n bond2 = Bond(None, None, order=order2)\n if order1 == order2:\n self.assertTrue(bond1.equivalent(bo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that the number of imaginary frequencies make sense. Theoretically, a TS should only have one "large" imaginary frequency, however additional imaginary frequency are allowed if they are very small in magnitude. This method does not consider the normal mode displacement check.
def check_imaginary_frequencies(imaginary_freqs: Optional[List[float]]) -> bool: if imaginary_freqs is None: # Freqs haven't been calculated for this TS guess, do consider it as an optional candidate. return True if len(imaginary_freqs) == 0: return False if len(imaginary_freqs) == 1...
[ "def is_real(self):\n return self.degrees().count(2) == self.number_of_irreducible_components()", "def has_complex_result(self):\n tfq_sup = self._model.metadata.time_freq_support\n if not tfq_sup:\n return False\n if tfq_sup.complex_frequencies == None:\n ret...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Boolean. Checks if custom dates are set or the desired spend on the account
def has_custom_dates(self): # return self.desired_spend_start_date != None and self.desired_spend_end_date != None return False # Temporarily disabling this feature
[ "def requirements(self):\n if datetime.datetime.today().day == 15:\n return True\n return False", "def checkDateOptions(options: Dict, timeStart: datetime, timeStop: datetime) -> bool:\n # now check the user provided dates\n if options[\"start\"] and options[\"start\"] > timeStop:\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }