query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Includes two new traits in this version. | def test_includes_two_new_traits(self):
new_traits = factories.SourceTraitFactory.create_batch(
2, source_dataset__source_study_version=self.study_version_3)
result = self.study_version_3.get_new_sourcetraits()
for new_trait in new_traits:
self.assertIn(new_trait, result) | [
"def combine_traits(self, my_traits, other_traits):\n raise NotImplementedError()",
"def traits(self):\r\n state = self.state\r\n domain = state.domain\r\n features = state.attributes\r\n \r\n t = [Trait(state) for Trait in trait.TRAITS\r\n if Trait.support... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Does not show a new trait in a more recent study version. | def test_intermediate_version_no_new_current_traits(self):
new_trait_v3 = factories.SourceTraitFactory.create(
source_dataset__source_study_version=self.study_version_3)
result = self.study_version_2.get_new_sourcetraits()
self.assertNotIn(new_trait_v3, result) | [
"def test_no_updated_traits(self):\n result = self.study_version_3.get_new_sourcetraits()\n for trait in self.source_traits_v3:\n self.assertNotIn(trait, result)",
"def test_includes_one_new_trait(self):\n new_trait = factories.SourceTraitFactory.create(\n source_dataset... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
SourceDataset.objects.current() does not return deprecated traits. | def test_current_queryset_method(self):
current_dataset = factories.SourceDatasetFactory.create()
deprecated_dataset = factories.SourceDatasetFactory.create()
deprecated_dataset.source_study_version.i_is_deprecated = True
deprecated_dataset.source_study_version.save()
self.assert... | [
"def test_current_queryset_method(self):\n current_trait = factories.SourceTraitFactory.create()\n deprecated_trait = factories.SourceTraitFactory.create()\n deprecated_trait.source_dataset.source_study_version.i_is_deprecated = True\n deprecated_trait.source_dataset.source_study_version... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get_name_link_html returns a string. | def test_get_name_link_html(self):
dataset = factories.SourceDatasetFactory.create()
self.assertIsInstance(dataset.get_name_link_html(), str) | [
"def test_get_name_link_html(self):\n study = factories.StudyFactory.create()\n self.assertIsInstance(study.get_name_link_html(), str)",
"def get_html_name(self):\n name_str = self._rdl_name_\n if name_str is None:\n return None\n return rdlformatcode.rdlfc_to_html(na... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get_name_link_html includes an mdash when description is blank. | def test_get_name_link_html_mdash_for_blank_description(self):
dataset = factories.SourceDatasetFactory.create(i_dbgap_description='')
self.assertIsInstance(dataset.get_name_link_html(), str)
self.assertIn('—', dataset.get_name_link_html()) | [
"def test_get_name_link_html_blank_description(self):\n trait = factories.HarmonizedTraitFactory.create(i_description='')\n self.assertIsInstance(trait.get_name_link_html(), str)\n self.assertIn('—', trait.get_name_link_html())",
"def test_get_name_link_html_truncates_long_description(s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get_name_link_html truncates a long description. | def test_get_name_link_html_truncates_long_description(self):
desc = 'my dataset description with many words'
dataset = factories.SourceDatasetFactory.create(i_dbgap_description=desc)
self.assertIsInstance(dataset.get_name_link_html(), str)
self.assertIn('my dataset', dataset.get_name_li... | [
"def test_get_name_link_html_truncates_long_description(self):\n desc = 'my trait description with many words'\n trait = factories.SourceTraitFactory.create(i_description=desc)\n self.assertIsInstance(trait.get_name_link_html(), str)\n self.assertIn('my trait', trait.get_name_link_html(m... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get_latest_version returns itself if the dataset is the most recent and an old version exists. | def test_get_latest_version_is_most_recent_with_same_version(self):
study = factories.StudyFactory.create()
deprecated_study_version = factories.SourceStudyVersionFactory.create(study=study, i_is_deprecated=True)
deprecated_dataset = factories.SourceDatasetFactory.create(source_study_version=dep... | [
"def get_latest_version(self):\n # Test if dataset path already exists\n dset_path = self.path(f_part=False, version=False, root=True)\n if os.path.isdir(dset_path) and os.listdir(dset_path):\n # Get and sort all existing dataset versions\n versions = sorted([v for v in os... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
You can save a HarmonizedTraitSet object. | def test_model_saving(self):
harmonized_trait_set = factories.HarmonizedTraitSetFactory.create()
self.assertIsInstance(
models.HarmonizedTraitSet.objects.get(pk=harmonized_trait_set.pk), models.HarmonizedTraitSet) | [
"def test_harmonized_trait_set_factory_create(self):\n harmonized_trait_set = factories.HarmonizedTraitSetFactory.create()\n self.assertIsInstance(harmonized_trait_set, models.HarmonizedTraitSet)",
"def test_model_saving(self):\n harmonized_trait = factories.HarmonizedTraitFactory.create()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
You can save a HarmonizedTraitSetVersion object. | def test_model_saving(self):
harmonized_trait_set_version = factories.HarmonizedTraitSetVersionFactory.create()
self.assertIsInstance(
models.HarmonizedTraitSetVersion.objects.get(pk=harmonized_trait_set_version.pk),
models.HarmonizedTraitSetVersion) | [
"def test_adding_component_harmonized_trait_set_versions(self):\n component_harmonized_trait_set_versions = factories.HarmonizedTraitSetVersionFactory.create_batch(5)\n harmonized_trait = factories.HarmonizedTraitFactory.create(\n component_harmonized_trait_set_versions=component_harmonized... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get_component_html returns a string. | def test_get_component_html(self):
htsv = factories.HarmonizedTraitSetVersionFactory.create()
self.assertIsInstance(htsv.get_component_html(), str) | [
"def inner_html(self):\r\n return self.delegate.InnerHtml",
"def get_inner_html(self, locator):\n js = \"this.browserbot.findElement('%s').innerHTML\" % locator\n return self.execute_javascript(js).strip()",
"def outer_html(self):\r\n return self.delegate.OuterHtml",
"def html(self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
You can save a HarmonizationUnit object. | def test_model_saving(self):
harmonization_unit = factories.HarmonizationUnitFactory.create()
self.assertIsInstance(models.HarmonizationUnit.objects.get(pk=harmonization_unit.pk), models.HarmonizationUnit) | [
"def saveUnit( unit, filename ):\n unit.initialize()\n unit.generator = None\n filename += '.lre'\n with open( filename, 'w' ) as file:\n pickle.dump( unit, file )\n print 'Saved unit'",
"def test_create(self):\n harmonization_unit = factories.HarmonizationUnitFactory.create()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adding associated component_source_traits works. | def test_adding_component_source_traits(self):
global_study = factories.GlobalStudyFactory.create()
component_source_traits = factories.SourceTraitFactory.create_batch(
5, source_dataset__source_study_version__study__global_study=global_study)
harmonization_unit = factories.Harmoniza... | [
"def test_adding_component_source_traits(self):\n global_study = factories.GlobalStudyFactory.create()\n component_source_traits = factories.SourceTraitFactory.create_batch(\n 5, source_dataset__source_study_version__study__global_study=global_study)\n harmonized_trait = factories.Ha... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adding associated component_batch_traits works. | def test_adding_component_batch_traits(self):
global_study = factories.GlobalStudyFactory.create()
component_batch_traits = factories.SourceTraitFactory.create_batch(
5, source_dataset__source_study_version__study__global_study=global_study)
harmonization_unit = factories.Harmonizati... | [
"def test_adding_component_batch_traits(self):\n global_study = factories.GlobalStudyFactory.create()\n component_batch_traits = factories.SourceTraitFactory.create_batch(\n 5, source_dataset__source_study_version__study__global_study=global_study)\n harmonized_trait = factories.Harm... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adding associated component_age_traits works. | def test_adding_component_age_traits(self):
global_study = factories.GlobalStudyFactory.create()
component_age_traits = factories.SourceTraitFactory.create_batch(
5, source_dataset__source_study_version__study__global_study=global_study)
harmonization_unit = factories.HarmonizationUn... | [
"def test_adding_component_batch_traits(self):\n global_study = factories.GlobalStudyFactory.create()\n component_batch_traits = factories.SourceTraitFactory.create_batch(\n 5, source_dataset__source_study_version__study__global_study=global_study)\n harmonized_trait = factories.Harm... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adding associated component_harmonized_trait_set_versions works. | def test_adding_component_harmonized_trait_set_versions(self):
component_harmonized_trait_set_versions = factories.HarmonizedTraitSetVersionFactory.create_batch(5)
harmonization_unit = factories.HarmonizationUnitFactory.create(
component_harmonized_trait_set_versions=component_harmonized_tra... | [
"def test_adding_component_harmonized_trait_set_versions(self):\n component_harmonized_trait_set_versions = factories.HarmonizedTraitSetVersionFactory.create_batch(5)\n harmonized_trait = factories.HarmonizedTraitFactory.create(\n component_harmonized_trait_set_versions=component_harmonized... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returned queryset of source traits is correct. | def test_get_all_source_traits(self):
source_traits = factories.SourceTraitFactory.create_batch(6)
hu = factories.HarmonizationUnitFactory.create(
component_age_traits=source_traits[0:2], component_batch_traits=source_traits[2:4],
component_source_traits=source_traits[4:])
... | [
"def test_current_queryset_method(self):\n current_trait = factories.SourceTraitFactory.create()\n deprecated_trait = factories.SourceTraitFactory.create()\n deprecated_trait.source_dataset.source_study_version.i_is_deprecated = True\n deprecated_trait.source_dataset.source_study_version... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returned list of linked studies is correct. | def test_get_source_studies(self):
global_study = factories.GlobalStudyFactory.create()
studies = factories.StudyFactory.create_batch(2, global_study=global_study)
traits1 = factories.SourceTraitFactory.create_batch(3, source_dataset__source_study_version__study=studies[0])
traits2 = fac... | [
"def get_studies(self, dry_run=False):\n\n if self.verbose:\n print(\"GET STUDIES\")\n resp = self.do_get(self.api_base + \"studies\", dry_run=dry_run)\n if dry_run:\n print(\"DRY_RUN:: Returned dummy names.\")\n resp[c_STUDIES_RET_KEY] = [\"DRY_RUN 1\", \"DRY_R... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get_component_html returns a string. | def test_get_component_html(self):
htsv = factories.HarmonizedTraitSetVersionFactory.create()
self.assertIsInstance(htsv.get_component_html(), str) | [
"def inner_html(self):\r\n return self.delegate.InnerHtml",
"def get_inner_html(self, locator):\n js = \"this.browserbot.findElement('%s').innerHTML\" % locator\n return self.execute_javascript(js).strip()",
"def outer_html(self):\r\n return self.delegate.OuterHtml",
"def html(self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
You can save a SourceTrait object. | def test_model_saving(self):
source_trait = factories.SourceTraitFactory.create()
self.assertIsInstance(models.SourceTrait.objects.get(pk=source_trait.pk), models.SourceTrait) | [
"def _save(self):\n\t\twith open(self.path, 'w') as f:\n\t\t\tf.write(self.source)",
"def save(self, world): # pragma: no cover\n\t\traise NotImplementedError",
"def test_source_trait_factory_create(self):\n source_trait = factories.SourceTraitFactory.create()\n self.assertIsInstance(source_trait,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get_name_link_html truncates a long description. | def test_get_name_link_html_truncates_long_description(self):
desc = 'my trait description with many words'
trait = factories.SourceTraitFactory.create(i_description=desc)
self.assertIsInstance(trait.get_name_link_html(), str)
self.assertIn('my trait', trait.get_name_link_html(max_popove... | [
"def test_get_name_link_html_truncates_long_description(self):\n desc = 'my dataset description with many words'\n dataset = factories.SourceDatasetFactory.create(i_dbgap_description=desc)\n self.assertIsInstance(dataset.get_name_link_html(), str)\n self.assertIn('my dataset', dataset.ge... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
SourceTrait.objects.current() does not return deprecated traits. | def test_current_queryset_method(self):
current_trait = factories.SourceTraitFactory.create()
deprecated_trait = factories.SourceTraitFactory.create()
deprecated_trait.source_dataset.source_study_version.i_is_deprecated = True
deprecated_trait.source_dataset.source_study_version.save()
... | [
"def test_intermediate_version_no_new_current_traits(self):\n new_trait_v3 = factories.SourceTraitFactory.create(\n source_dataset__source_study_version=self.study_version_3)\n result = self.study_version_2.get_new_sourcetraits()\n self.assertNotIn(new_trait_v3, result)",
"def trai... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Archived tags and non archived tags linked to the trait are where they should be. | def test_archived_tags_and_non_archived_tags(self):
trait = factories.SourceTraitFactory.create()
archived = TaggedTraitFactory.create(archived=True, trait=trait)
non_archived = TaggedTraitFactory.create(archived=False, trait=trait)
self.assertIn(archived.tag, trait.all_tags.all())
... | [
"def test_multiple_archived_tags(self):\n trait = factories.SourceTraitFactory.create()\n archived = TaggedTraitFactory.create_batch(5, archived=True, trait=trait)\n non_archived = TaggedTraitFactory.create_batch(6, archived=False, trait=trait)\n for tagged_trait in archived:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The properties archived_traits and non_archived_traits are QuerySets. | def test_archived_tags_and_non_archived_tags_are_querysets(self):
# These need to be querysets to behave similarly to tag.traits and trait.all_tags.
trait = factories.SourceTraitFactory.create()
archived = TaggedTraitFactory.create(archived=True, trait=trait)
non_archived = TaggedTraitFa... | [
"def get_queryset(self):\n return super().get_queryset().filter(_category=DisplayCategory.ARCHIVED)",
"def archived_descendants(self):\n return self.get_descendants().filter(is_archived=True)",
"def get_queryset(self):\n return ApplcationQuerySet(self.model).exclude(status=STATUS.deleted)",... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Archived tags show up in the archived_tags property with multiple tagged traits of each type. | def test_multiple_archived_tags(self):
trait = factories.SourceTraitFactory.create()
archived = TaggedTraitFactory.create_batch(5, archived=True, trait=trait)
non_archived = TaggedTraitFactory.create_batch(6, archived=False, trait=trait)
for tagged_trait in archived:
self.ass... | [
"def test_archived_tags_and_non_archived_tags(self):\n trait = factories.SourceTraitFactory.create()\n archived = TaggedTraitFactory.create(archived=True, trait=trait)\n non_archived = TaggedTraitFactory.create(archived=False, trait=trait)\n self.assertIn(archived.tag, trait.all_tags.all... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Nonarchived tags show up in the non_archived_tags property with multiple of each type. | def test_multiple_non_archived_tags(self):
trait = factories.SourceTraitFactory.create()
archived = TaggedTraitFactory.create_batch(5, archived=True, trait=trait)
non_archived = TaggedTraitFactory.create_batch(6, archived=False, trait=trait)
for tagged_trait in non_archived:
... | [
"def test_archived_tags_and_non_archived_tags(self):\n trait = factories.SourceTraitFactory.create()\n archived = TaggedTraitFactory.create(archived=True, trait=trait)\n non_archived = TaggedTraitFactory.create(archived=False, trait=trait)\n self.assertIn(archived.tag, trait.all_tags.all... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test the method to get all of the trait's tags. | def test_tags(self):
trait = factories.SourceTraitFactory.create()
tagged_traits = TaggedTraitFactory.create_batch(10, trait=trait)
self.assertListEqual(list(trait.all_tags.all()), list(Tag.objects.all())) | [
"def test_organization_tags_get(self):\n pass",
"def tags(self) -> List:",
"def test_resource_tag_resource_find_tags_get(self):\n pass",
"def get_traits(self) -> list:",
"def get_all_tags(self):\n return self.scenario.get_all_tags()",
"def test_repo_list_tags(self):\n pass",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get_latest_version returns itself if the trait is the most recent. | def test_get_latest_version_is_most_recent(self):
trait = factories.SourceTraitFactory.create()
self.assertEqual(trait.get_latest_version(), trait) | [
"def test_get_latest_version_is_most_recent_with_same_version(self):\n study = factories.StudyFactory.create()\n deprecated_study_version = factories.SourceStudyVersionFactory.create(study=study, i_is_deprecated=True)\n deprecated_trait = factories.SourceTraitFactory.create(\n source... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get_latest_version returns itself if the trait is the most recent and an old version exists. | def test_get_latest_version_is_most_recent_with_same_version(self):
study = factories.StudyFactory.create()
deprecated_study_version = factories.SourceStudyVersionFactory.create(study=study, i_is_deprecated=True)
deprecated_trait = factories.SourceTraitFactory.create(
source_dataset_... | [
"def test_get_latest_version_is_most_recent(self):\n trait = factories.SourceTraitFactory.create()\n self.assertEqual(trait.get_latest_version(), trait)",
"def get_latest_version(self, did, has_version=None):\n raise NotImplementedError(\"TODO\")",
"def latest_stable_version():\n data = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns None if there is no previous study version. | def test_get_previous_version_no_previous_study_version(self):
study = factories.StudyFactory.create()
now = timezone.now()
study_version = factories.SourceStudyVersionFactory.create(
study=study, i_version=1, i_date_added=now - timedelta(hours=1))
newer_study_version = facto... | [
"def test_get_previous_version_no_previous_version(self):\n study = factories.StudyFactory.create()\n source_study_version_1 = factories.SourceStudyVersionFactory.create(study=study, i_version=1)\n source_study_version_2 = factories.SourceStudyVersionFactory.create(study=study, i_version=2)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns None if the source trait doesn't exist in the previous version. | def test_get_previous_version_previous_version_no_trait(self):
study = factories.StudyFactory.create()
now = timezone.now()
previous_study_version = factories.SourceStudyVersionFactory.create(
study=study, i_version=1, i_date_added=now - timedelta(hours=1))
study_version = fa... | [
"def test_get_previous_version_no_previous_study_version(self):\n study = factories.StudyFactory.create()\n now = timezone.now()\n study_version = factories.SourceStudyVersionFactory.create(\n study=study, i_version=1, i_date_added=now - timedelta(hours=1))\n newer_study_versi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
You can save a HarmonizedTrait object. | def test_model_saving(self):
harmonized_trait = factories.HarmonizedTraitFactory.create()
self.assertIsInstance(models.HarmonizedTrait.objects.get(pk=harmonized_trait.pk), models.HarmonizedTrait) | [
"def test_model_saving(self):\n harmonized_trait_set = factories.HarmonizedTraitSetFactory.create()\n self.assertIsInstance(\n models.HarmonizedTraitSet.objects.get(pk=harmonized_trait_set.pk), models.HarmonizedTraitSet)",
"def test_create(self):\n harmonized_trait = factories.Harm... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adding associated component_source_traits works. | def test_adding_component_source_traits(self):
global_study = factories.GlobalStudyFactory.create()
component_source_traits = factories.SourceTraitFactory.create_batch(
5, source_dataset__source_study_version__study__global_study=global_study)
harmonized_trait = factories.HarmonizedT... | [
"def test_adding_component_source_traits(self):\n global_study = factories.GlobalStudyFactory.create()\n component_source_traits = factories.SourceTraitFactory.create_batch(\n 5, source_dataset__source_study_version__study__global_study=global_study)\n harmonization_unit = factories.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adding associated component_batch_traits works. | def test_adding_component_batch_traits(self):
global_study = factories.GlobalStudyFactory.create()
component_batch_traits = factories.SourceTraitFactory.create_batch(
5, source_dataset__source_study_version__study__global_study=global_study)
harmonized_trait = factories.HarmonizedTra... | [
"def test_adding_component_batch_traits(self):\n global_study = factories.GlobalStudyFactory.create()\n component_batch_traits = factories.SourceTraitFactory.create_batch(\n 5, source_dataset__source_study_version__study__global_study=global_study)\n harmonization_unit = factories.Ha... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adding associated component_harmonized_trait_set_versions works. | def test_adding_component_harmonized_trait_set_versions(self):
component_harmonized_trait_set_versions = factories.HarmonizedTraitSetVersionFactory.create_batch(5)
harmonized_trait = factories.HarmonizedTraitFactory.create(
component_harmonized_trait_set_versions=component_harmonized_trait_s... | [
"def test_adding_component_harmonized_trait_set_versions(self):\n component_harmonized_trait_set_versions = factories.HarmonizedTraitSetVersionFactory.create_batch(5)\n harmonization_unit = factories.HarmonizationUnitFactory.create(\n component_harmonized_trait_set_versions=component_harmon... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adding associated harmonization_units works. | def test_adding_harmonization_units(self):
htrait_set_version = factories.HarmonizedTraitSetVersionFactory.create()
harmonization_units = factories.HarmonizationUnitFactory.create_batch(
5, harmonized_trait_set_version=htrait_set_version)
harmonized_trait = factories.HarmonizedTraitF... | [
"def test_create_with_harmonization_units(self):\n harmonization_units = factories.HarmonizationUnitFactory.create_batch(10)\n harmonized_trait = factories.HarmonizedTraitFactory.create(harmonization_units=harmonization_units)\n self.assertEqual(harmonization_units, list(harmonized_trait.harmon... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get_name_link_html includes an mdash when description is blank. | def test_get_name_link_html_blank_description(self):
trait = factories.HarmonizedTraitFactory.create(i_description='')
self.assertIsInstance(trait.get_name_link_html(), str)
self.assertIn('—', trait.get_name_link_html()) | [
"def test_get_name_link_html_mdash_for_blank_description(self):\n dataset = factories.SourceDatasetFactory.create(i_dbgap_description='')\n self.assertIsInstance(dataset.get_name_link_html(), str)\n self.assertIn('—', dataset.get_name_link_html())",
"def test_get_name_link_html_truncate... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
HarmonizedTrait.objects.current() does not return deprecated traits. | def test_current_queryset_method(self):
current_trait = factories.HarmonizedTraitFactory.create()
deprecated_trait = factories.HarmonizedTraitFactory.create()
deprecated_trait.harmonized_trait_set_version.i_is_deprecated = True
deprecated_trait.harmonized_trait_set_version.save()
... | [
"def traits(self):\r\n state = self.state\r\n domain = state.domain\r\n features = state.attributes\r\n \r\n t = [Trait(state) for Trait in trait.TRAITS\r\n if Trait.supported(domain, features)]\r\n return t",
"def get_traits(self) -> list:",
"def test_cu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
HarmonizedTrait.objects.non_unique_keys() does not return unique key traits. | def test_non_unique_keys_queryset_method(self):
non_uk_trait = factories.HarmonizedTraitFactory.create(i_is_unique_key=False)
uk_trait = factories.HarmonizedTraitFactory.create(i_is_unique_key=True)
self.assertIn(non_uk_trait, models.HarmonizedTrait.objects.non_unique_keys())
self.assert... | [
"def get_all_keys(ds):\n keys = []\n for d in ds:\n for k in d.keys():\n if not( k in keys):\n keys.append(k)\n return keys",
"def keys(self):\n for item in self.table:\n if item:\n yield item.key",
"def _key_set(self):\n return se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
You can save a SourceTraitEncodedValue object. | def test_model_saving(self):
source_trait_encoded_value = factories.SourceTraitEncodedValueFactory.create()
self.assertIsInstance(
models.SourceTraitEncodedValue.objects.get(pk=source_trait_encoded_value.pk),
models.SourceTraitEncodedValue) | [
"def test_source_trait_encoded_value_factory_create(self):\n source_trait_encoded_value = factories.SourceTraitEncodedValueFactory.create()\n self.assertIsInstance(source_trait_encoded_value, models.SourceTraitEncodedValue)",
"def test_source_trait_encoded_value_factory_build(self):\n source_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
You can save a HarmonizedTraitEncodedValue object. | def test_model_saving(self):
harmonized_trait_encoded_value = factories.HarmonizedTraitEncodedValueFactory.create()
self.assertIsInstance(
models.HarmonizedTraitEncodedValue.objects.get(pk=harmonized_trait_encoded_value.pk),
models.HarmonizedTraitEncodedValue) | [
"def test_harmonized_trait_encoded_value_factory_create(self):\n harmonized_trait_encoded_value = factories.HarmonizedTraitEncodedValueFactory.create()\n self.assertIsInstance(harmonized_trait_encoded_value, models.HarmonizedTraitEncodedValue)",
"def test_harmonized_trait_encoded_value_factory_build... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return True if sampleunit is single end. | def is_single_end(sample, unit):
return pd.isnull(units.loc[(sample, unit), "fq2"]) | [
"def is_unit(self):\n if self.is_one() or (-self).is_one():\n return True\n if self.is_zero(): # now 0 != 1\n return False\n raise NotImplementedError",
"def is_single_trajectory(self) -> bool:\n return not any(self[SampleBatch.TERMINATEDS][:-1... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Denote sample name and platform in read group. | def get_read_group(wildcards):
return r"-R '@RG\tID:{sample}\tSM:{sample}\tPL:{platform}'".format(sample=wildcards.sample, platform=units.loc[(wildcards.sample, wildcards.unit), "platform"]) | [
"def get_read_group(wildcards):\n return r\"-R '@RG\\tID:{run}\\tSM:{sample}-{condition}\\tPL:{platform}'\".format(\n sample=wildcards.sample,\n condition=wildcards.condition,\n run=units.loc[(wildcards.sample, wildcards.unit, wildcards.condition), \"fq1\"].split(\"/\")[-1].split(\".\")[0],\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns True if this output stream uses hardware acceleration. | def is_hardware_accelerated(self) -> bool:
if self.codec:
return self.codec.is_hardware_accelerated()
return False | [
"def is_accelerable(self):\n if self._kernel_params:\n return True\n else:\n return False",
"def has_speed_configuration(self) -> bool:\n return self.data_template.get_speed_config(self.info.platform_data) is not None",
"def accelerate2_d_video_enabled(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns True if the output format is restricted to DASH protocol | def is_dash_only(self) -> bool:
if self.codec is not None:
return self.codec.get_output_format() == 'webm'
return False | [
"def _checkFormat(self,packetData,responseFormat):\n pass\n #build the custom regex from the responseFormat\n variables = self._getVariables(responseFormat)\n #replace the variables (!variableName!)\n for variable in variables:\n responseFormat = responseFormat.replace(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get methods from macro server | def __getattr__(self, name):
return getattr(self.macro_server, name) | [
"def get_rpc_methods(ctx, request):\n fill_response_header(ctx)\n resp = AutoConfigServer._handle_tr069_message(ctx, request)\n return resp",
"def methods(self):\n return self.client.fldigi.list()",
"def get_macros(self):\n name = self.__class__.__name__\n if na... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
takes a symmetrical connectivity matrix (e.g., numpy array) and a list of roi_names (strings) returns data frame with roi_names as index and column names e.g. r1 r2 r3 r4 r1 0.0 0.3 0.7 0.2 r2 0.3 0.0 0.6 0.5 r3 0.7 0.6 0.0 0.9 r4 0.2 0.5 0.9 0.0 | def _get_con_df(raw_mat, roi_names):
# sanity check if matrix is symmetrical
assert np.allclose(raw_mat, raw_mat.T), "matrix not symmetrical"
np.fill_diagonal(raw_mat, 0)
con_df = pd.DataFrame(raw_mat, index=roi_names, columns=roi_names)
return con_df | [
"def add_row_and_columns_id(names, moc_matrix):\n df = pd.DataFrame(moc_matrix, columns=names, index=names)\n return df",
"def transform_listeArcs_mat_adjacence(liste_noeuds_matIni, listeArcs, oriente):\n matE = pd.DataFrame( index = liste_noeuds_matIni, columns = liste_noeuds_matIni)\n ##print (\"col... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
tests we can server take_action_template | def test_take_action_template(self):
new_campaign = Campaign(campaign_url='test.com')
new_campaign.save()
response = self.client.get('/takeaction/?campaign_id=%s' % (new_campaign.id))
self.assertEqual('takeaction.html', response.templates[0].name) | [
"def test_templates(self):\n\t\tpass",
"def test_post_get_prepper_template(self):\n pass",
"def test_template_permission_sets_post(self):\n pass",
"def test_otoroshi_controllers_adminapi_templates_controller_create_from_template_simple(self):\n pass",
"def test_post_apply_prepper_templa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test we can render button.html | def test_button_html_success(self):
new_campaign = Campaign(campaign_url='test.com')
new_campaign.save()
response = self.client.get('/button/?campaign_id=%s' % (new_campaign.id))
#import ipdb; ipdb.set_trace()
self.assertEqual('button.html', response.templates[0].name) | [
"def test_index_and_base_templates_html(self):\n\n with app.test_client() as client:\n resp = client.get('/')\n html = resp.get_data(as_text=True)\n\n self.assertIn('<form action=\"/game-board\" id=\"begin-game\">', html)\n self.assertIn('<script src=\"https://unp... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a list of all the nouns in a string. | def get_nouns(text):
if type(text) == list:
text = ' '.join(text)
is_noun = lambda x: 'NN' in x
tokens = tokenize(text)
nouns = [word for (word, pos) in nltk.pos_tag(tokens) if is_noun(pos)]
return nouns | [
"def nouns(self):\n return self._nouns",
"def find_nouns(text):\n\n import nltk # pylint: disable=import-outside-toplevel\n\n tagged = nltk.pos_tag(nltk.word_tokenize(text.strip()), tagset=\"universal\")\n tagged_nouns = filter(tagged_word_is_noun, tagged)\n return map(lambda tagged: tagged[0]... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tokenize a list of reviews. | def tokenize_2(review_list):
texts_list = []
for doc in tqdm(review_list):
# Parse the doc into tokens
tokenizer = RegexpTokenizer(r'\w+')
raw = doc.lower()
tokens = tokenizer.tokenize(raw)
# Remove stop words
en_stop = stopwords.words('english')
stopped... | [
"def __tokenize_all_reviews(self, cached_path):\n\n self.reviews_tokenized = [\n self.tokenizer.tokenize_sentence(i) for i in self.reviews\n ]\n\n # save tokenized reviews to cache to speedup build process\n with open(cached_path, \"w\") as fp:\n json.dump(self.revi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test get books a recipe is in | def test_get_books(self):
self.create_recipe()
recipe = Recipes.objects.last()
url = '/0/recipes/%i/books' % recipe.pk
book = Book.objects.create(name='Book 1', chef=self.user)
book.add_recipe(recipe)
user2 = self.create_user('2')
book_other = Book.objects.crea... | [
"def test_retrieve_recipes(self):\n pass",
"def test_retrieve_recipe(self):\n pass",
"def test_list_recipe(self):\n col = baker.make_recipe(\"makeReports.college\", active=True)\n colI = baker.make_recipe(\"makeReports.college\",active=False)\n response = self.client.get(rever... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
draw an octree as an STLSurface | def drawTree2(myscreen,t,color=red,opacity=0.2):
tlist = pyocl.octree2trilist(t)
surf = STLSurf(triangleList=tlist)
surf.SetColor(color)
surf.SetOpacity(opacity)
myscreen.addActor(surf) | [
"def displaySurface(objects, flipNormals=bool, xRay=bool, twoSidedLighting=bool):\n pass",
"def printview(self, obj):\n req = self.osm.get_allocation()\n widthpx = req.width\n heightpx = req.height\n prt = CairoPrintSave(widthpx, heightpx, self.osm.do_draw, self.osm)\n prt.ru... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
set surface to wireframe | def SetWireframe(self):
self.GetProperty().SetRepresentationToWireframe() | [
"def setWireframeShader(self, shader):\n self._wireframe_shader = shader",
"def setNoLightWireframeShader(self, shader):\n self._nolight_wireframe_shader = shader",
"def wireframeShader(self):\n return self._wireframe_shader",
"def surface(self):\n if self._surface is None:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
set surface rendering on | def SetSurface(self):
self.GetProperty().SetRepresentationToSurface() | [
"def display(self,surface):\n\n if self._shown:\n \"\"\"the 0,0 values mean the upper left corner of the surface\"\"\"\n self._image.display(0,0,self._surf)\n surface.blit(self._surf, self._rect)\n self.displaySubwidgets(surface)",
"def draw_on_screen(self, scree... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
set position on screen | def SetPos(self, pos):
self.SetDisplayPosition(pos[0], pos[1]) | [
"def setPosition(self, x, y):\n\n\t\tself.rect.left = x\n\t\tself.rect.top = y",
"def set_player_position(self, position):",
"def set_position(self, position):\n self.mediaplayer.set_position(position / 1000.0)",
"def update_position(self, x, y):\n self.x = x\n self.y = y",
"def _set_po... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets chrome options for Selenium. Chrome options for headless browser is enabled. 1. Explicitly saying that this is a headless application with headless 2. Explicitly bypassing the security level in Docker with nosandbox . Apparently as Docker deamon always runs as a root user, Chrome crushes. 3. Explicitly disabling t... | def set_chrome_options(self) -> Options:
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_prefs = {}
chrome_options.experimental_options["prefs"] ... | [
"def set_chrome_options():\n chrome_options = webdriver.ChromeOptions()\n # chrome_options.add_argument(\"--headless\")\n chrome_options.add_argument(\"--no-sandbox\")\n chrome_options.add_argument(\"--disable-dev-shm-usage\")\n chrome_options.add_argument(\"--disable-blink-features=AutomationControl... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A function that creates a function to perform when you Select Player | def __choose_player(self, lanel, player, choose):
def clicking():
"""
Function that occurs when you press Select.
Coverage of the image is more relevant, and updating of the
dictionary is under review.
If you have already selected two - close t... | [
"def playerSelection(player):\n print('\\nIs player {} a human or computer?'.format(player))\n print('1. Enter 1 if Human')\n print('2. Enter 2 if Computer')\n\n return makeChoice()",
"def player_choose(self) -> None:\n print(\"(1) Rock\\n(2) Paper\\n(3) Scissors\")\n self.human_choice =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Change the value of every pixel by following x_n = 0.5x_p^2 where x_n is the new value and x_p is the original value | def change_value(image):
out = None
### YOUR CODE HERE
out = 0.5 * image ** 2
### END YOUR CODE
return out | [
"def adjust_pixel(x):\n if x <= 200:\n x = max(0, x - 32)\n return min(x, 128)\n else:\n return 255",
"def set_pixel(x, y, value):\n st7567.set_pixel(x, y, value)",
"def SetPixel(self, index: 'itkIndex2', value: 'float const &') -> \"void\":\n return _itk... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return image decomposed to just the lab channel specified | def lab_decomposition(image, channel):
lab = color.rgb2lab(image)
out = None
### YOUR CODE HERE
if channel == 'L':
out = lab[..., 0]
elif channel == 'A':
out = lab[..., 1]
elif channel == 'B':
out = lab[..., 2]
else:
print('Input channel is not RGB!')
... | [
"def pull_out_L_channel(img_lab):\n img_l = img_lab[:, :, 0]\n return img_l",
"def cvt_color_2_lab(frame):\n img_rgb = (frame[:, :, [2, 1, 0]] * 1.0 / 255).astype(np.float32)\n img_lab = cv.cvtColor(img_rgb, cv.COLOR_RGB2Lab)\n return img_lab",
"def extract_blue(image):\n # Since blue is the f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return image decomposed to just the hsv channel specified | def hsv_decomposition(image, channel='H'):
hsv = color.rgb2hsv(image)
out = None
### YOUR CODE HERE
if channel == 'H':
out = hsv[..., 0]
elif channel == 'S':
out = hsv[..., 1]
elif channel == 'V':
out = hsv[..., 2]
else:
print('Input channel is not RGB!')
... | [
"def r2h(img):\n return cv.cvtColor(img,cv.COLOR_RGB2HSV)",
"def to_hsv(img):\n return cv2.cvtColor(img, cv2.COLOR_RGB2HSV)",
"def hsvSpace(imagePath):\n img=cv2.imread (imagePath)\n return cv2.cvtColor(img, cv2.COLOR_BGR2HSV)",
"def _get_threashold_image_hsv(self, img, debug=False):\n imgHS... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return image which is the left of image1 and right of image 2 excluding the specified channels for each image | def mix_images(image1, image2, channel1, channel2):
out = None
### YOUR CODE HERE
h = image1.shape[0]
w = image1.shape[1]
r1, g1, b1 = image1[:,:int(w/2),0], image1[:,:int(w/2),1], image1[:,:int(w/2),2]
r2, g2, b2 = image2[:,int(w/2):,0], image2[:,int(w/2):,1], image2[:,int(w/2):,2]
if chan... | [
"def two_image_filter( image1, image2 ):\n pass",
"def get_grey_images(img1, img2):\n img1_grey = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)\n img2_grey = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)\n return img1_grey, img2_grey",
"def merge_left(img1, img2, h, matches_lst, x_shift_cum=0, y_shift_cum=0):\r\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Define Config Command Mode | def config_mode(self):
pass | [
"def setCmdConfig(self, config, mode = 'raw'):\n self.mode = PPU.MOVE\n self.modeParam = 0\n if mode == 'raw':\n self.cmdPan_Raw = config[0]\n self.cmdTilt_Raw = config[1]\n self.cmdExt_Raw = config[2]\n elif mode == 'rad/prct':\n self.setCmdRa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return HTTP headers required for the Mandrill API. | def _get_headers():
return {"content-type": "application/json", "user-agent": "Mandrill-Python/1.0.57"} | [
"def get_headers():\n return {\n 'Content-Type': 'application/json',\n 'Authorization': get_basic_auth()\n }",
"def auth_headers():\n headers = {\n 'X-Auth-User': config.craton_creds['username'],\n 'X-Auth-Token': config.craton_creds['password'],\n 'X-Auth-Project': con... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Re structure project so that directories of form WeekxSxYEARTopics are changed to YEARSxWeekxTopics so file explorers lists them in order. There are also randomly named .md files, if only one of them exists in bottom level directories, rename them to README.md so it reders on GitHub. | def re_structure_project(absoluate_current_dir):
files = os.listdir(absoluate_current_dir)
directories = [f for f in files if f not in IGNORED and os.path.isdir(
os.path.join(absoluate_current_dir, f))]
if directories: # Not a bottom level directory.
for directory in directories:
... | [
"def fix_contents(dir_path):\n # regexp to match human readable doc name\n re_html_title = re.compile(r'<title>(.+)&mdash')\n\n # move to the temp dir\n os.chdir(dir_path)\n\n for doc_section_dir in os.listdir(os.curdir):\n if os.path.isdir(doc_section_dir):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test admin policy works. | def test_admin_policy() -> None:
# Make sure it's valid
POLICY_SCHEMA(system_policies.ADMIN_POLICY)
perms = PolicyPermissions(system_policies.ADMIN_POLICY, None)
assert perms.check_entity("light.kitchen", "read")
assert perms.check_entity("light.kitchen", "control")
assert perms.check_entity("l... | [
"def test_admin_page(self):\n response = self.client.get('/admin', follow=True)\n self.assertContains(response, \"Django admin\", status_code=200)",
"def test_administrated_by(self):\n self.assertTrue(self.tool.administrated_by(self.user1))\n self.assertTrue(self.tool.administrated_by(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test user policy works. | def test_user_policy() -> None:
# Make sure it's valid
POLICY_SCHEMA(system_policies.USER_POLICY)
perms = PolicyPermissions(system_policies.USER_POLICY, None)
assert perms.check_entity("light.kitchen", "read")
assert perms.check_entity("light.kitchen", "control")
assert perms.check_entity("ligh... | [
"def test_admin_policy() -> None:\n # Make sure it's valid\n POLICY_SCHEMA(system_policies.ADMIN_POLICY)\n\n perms = PolicyPermissions(system_policies.ADMIN_POLICY, None)\n assert perms.check_entity(\"light.kitchen\", \"read\")\n assert perms.check_entity(\"light.kitchen\", \"control\")\n assert p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test read only policy works. | def test_read_only_policy() -> None:
# Make sure it's valid
POLICY_SCHEMA(system_policies.READ_ONLY_POLICY)
perms = PolicyPermissions(system_policies.READ_ONLY_POLICY, None)
assert perms.check_entity("light.kitchen", "read")
assert not perms.check_entity("light.kitchen", "control")
assert not p... | [
"def test_readonly(self):\n self.assertTrue(self.fs.getmeta().get('read_only'),\n 'Filesystem is not read-only.')\n\n with self.assertRaises(errors.ResourceReadOnly):\n self.fs.makedir('newdir')\n with self.assertRaises(errors.ResourceReadOnly):\n se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load obj with give label from hidden state directory | def load(self, label):
objloc = '{0}/{1}'.format(self.statedir, label)
try:
obj = pickle.load(open(objloc, 'r'))
except (KeyError, IndexError, EOFError):
obj = open(objloc, 'r').read()
try:
obj = float(obj)
except ValueError:
... | [
"def load_obj(load_dir):\r\n return pickle.load(open(load_dir, 'rb'))",
"def load_object(self, obj):\n pass",
"def load_obj(name):\r\n with open(name + '.pkl', 'rb') as f:\r\n return pickle.load(f)",
"def load_label(self, idx):\n\t\"\"\"\n label_400 = scipy.io.loadmat('{}/trainval/{... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Hack to get name of notebook as python obj 'nbname'. Does not work with 'run all' | def getnbname():
display(Javascript("""IPython.notebook.kernel.execute("nbname = " + "\'"+IPython.notebook.notebook_name+"\'");""")) | [
"def get_nb_name(d=None) -> str:\n try:\n nb_name = d['__vsc_ipynb_file__']\n return nb_name # VSCode\n except:\n try: \n _, path = _find_nb()\n if path:\n return path.name\n else:\n return\n except: \n retur... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get first TTL pulse time from 100_ADC1.continuous file to synchronize Expo trials with Openephys recording | def get_first_sync_time_openephys(filepath):
d = pyopenephys.openephys_tools.loadContinuous(filepath=filepath)
for i, b in enumerate(d['data'] > 10000):
if b:
return i / float(d['header']['sampleRate'])
return None | [
"def get_first_sync_time_spikeglx(filepath):\n rec = SpikeGLXRecordingExtractor(file_path=filepath)\n xx, yy = rec.get_ttl_events()\n rate = rec.get_sampling_frequency()\n return xx[0] / rate",
"def first_get_times(self, header, seq=True):\n times = header[:, -3] / np.float(self.nperpacket) \... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get first TTL pulse time from spikeglx file to synchronize Expo trials with SpikeGLX recording | def get_first_sync_time_spikeglx(filepath):
rec = SpikeGLXRecordingExtractor(file_path=filepath)
xx, yy = rec.get_ttl_events()
rate = rec.get_sampling_frequency()
return xx[0] / rate | [
"def get_first_sync_time_openephys(filepath):\n d = pyopenephys.openephys_tools.loadContinuous(filepath=filepath)\n for i, b in enumerate(d['data'] > 10000):\n if b:\n return i / float(d['header']['sampleRate'])\n return None",
"def first_get_times(self, header, seq=True):\n ti... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
return a list of FileSnapshot objects from given directory. Not recursive. | def get_snapshots(root):
snapshots = {}
for f in os.listdir(root):
filepath = os.path.join(root, f)
if os.path.isfile(filepath):
if f.startswith("."):
continue
snapshot = FileSnapshot(filepath)
snapshots[filepath] = snapshot
return snapshot... | [
"def collect_files_from_dir(directory, prefix=\"\", suffix=\"\", recursive=True):\n files = []\n _collect_files_from_dir(directory, prefix, suffix, recursive, files)\n return files",
"def get_entries(self, dir):\n return os.listdir(dir)",
"def all_snap_changes(archive_dir):\n # type: (str) ->... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
return a new shade of this color, brightened or darkened by factor | def shade(self, factor):
newr = min(floor(self.r * factor), 255)
newg = min(floor(self.g * factor), 255)
newb = min(floor(self.b * factor), 255)
newa = self.a
return Color(newr, newg, newb, newa) | [
"def darkness_multiplier(self):\n radius = self.light_distance()\n if radius == 0: return 255\n base_alpha = self.base_alpha()\n multiplier = math.pow( ( float( 250.0/base_alpha ) ), float( 1.0/radius ) )\n return multiplier",
"def modulate(self, hue=0, saturation=0, lightness=0... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Flush the ARP cache for each mininet host in the list cli ... a bigtest cli in mininet mode host_list a list of host to query, e.g., [ 'h1', 'h4' ] | def mininetFlushArpCache(cli, host_list):
for h in host_list:
cli.runCmd("%s ip neigh flush all" % h) | [
"def mininetGetArpCache(cli, host_list):\n res = dict()\n for h in host_list:\n arp_str = cli.runCmd(\"%s ip neigh show\" % h)\n arp_str = arp_str.replace('\\\\r', '').replace('\\\\n', '\\n')\n arp_arr = arp_str.split(\"\\n\")\n entries = dict()\n for entry in arp_arr[2:]:\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Query the ARP cache for each mininet host in host_list and | def mininetGetArpCache(cli, host_list):
res = dict()
for h in host_list:
arp_str = cli.runCmd("%s ip neigh show" % h)
arp_str = arp_str.replace('\\r', '').replace('\\n', '\n')
arp_arr = arp_str.split("\n")
entries = dict()
for entry in arp_arr[2:]:
# first two... | [
"def mininetFlushArpCache(cli, host_list):\n for h in host_list:\n cli.runCmd(\"%s ip neigh flush all\" % h)",
"def _GetHostsFromArpTable(self, hosts):\n for (mac, ip4, iface) in self._ParseArpTable():\n ip4 = tr.helpers.NormalizeIPAddr(ip4)\n mac = mac.lower()\n host = hosts.get(mac, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate UTC time difference in seconds between 2 controllers Unreliable if response time is slow. This is meant to be a sanity check for NTP. Or to be used when NTP is not used. | def calculateControllerTimeDifference(controller1, controller2, absolute=True):
ju1 = controller1.restGet("system/clock/utc")
ju2 = controller2.restGet("system/clock/utc")
def convertUnicodeKeyToString(ju):
js = {}
for i in ju:
if str(i) in ["tz", "tzinfo", "microsecond"]: contin... | [
"def compare_times(one_time, another_time):\n another_time_dt = datetime.strptime(another_time, \"%Y-%m-%dT%H:%M:%SZ\")\n diff = one_time - another_time_dt\n return diff.seconds",
"def tz_difference():\n date_one = date.strptime(date.now(pytztz('Asia/Krasnoyarsk')).strftime('%d/%m/%Y %H:%M'),'%d/%m/%... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a list of reifications of var from the solutions of goal (only the first n solutions, if n given). | def run(var, goal, n=None):
solns = gen_solutions(var, goal)
return list(solns if n is None else islice(solns, 0, n)) | [
"def alternatives(self, n):\n return []",
"def generate_problem_renaming_variations(problem, target_n):\n\n variations = []\n variations_hash_set = {}\n for _ in range(target_n):\n variation = generate_problem_renaming(problem)\n if variation:\n variation_hash = str(variation)\n if varia... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Succeed when goal1 succeeds or goal2 succeeds (not sharing any new substitutions between the two). | def either(goal1, goal2):
return lambda s: interleave((goal1(s), goal2(s))) | [
"def check_same(comment, first, second, localMsg, localResults):\n if first == second:\n localResults['pass'] += 1\n else:\n localResults['fail'] += 1\n print('FAILED '+comment)\n print(localMsg)\n print('')",
"def test_two_args(self):\n self.assertTrue(is_testable(), NOT_TESTABLE)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return val filled out by substitution s enough that the result is not a bound variable; it's either a nonvariable or unbound. | def substitute(val, s):
while is_var(val):
for svar, sval in substitutions(s):
if val is svar:
val = sval
break
else:
break
return val | [
"def get_substitute_variable(val: str, var_dict: dict) -> Any:\n var_marker = 'var:'\n if val.startswith(var_marker):\n var_name = val[len(var_marker):]\n\n if var_name in var_dict:\n return var_dict[var_name]\n\n return None",
"def varsub(val):\n _dbg('varsub(): starting with... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return s plus minimal extensions to make the vals u and v equal mod substitution, if possible; else None. | def unify(u, v, s):
u = substitute(u, s)
v = substitute(v, s)
if u is v:
return s
elif is_var(u):
return (extend_unchecked if is_var(v) else extend)(u, v, s)
elif is_var(v):
return extend(v, u, s)
elif is_tuple(u) and is_tuple(v) and len(u) == len(v):
for ui, vi i... | [
"def unify(u, v, s):\n s = s if isinstance(s, Substitution) else Substitution(s)\n u = s.walk(u)\n v = s.walk(v)\n if u == v:\n return s\n if isinstance(u, Var):\n return s.assoc(u, v)\n if isinstance(v, Var):\n return s.assoc(v, u)\n if isinstance(u, tuple) and isinstance(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Clears the context for the given key. | def clear_context(self, key: str):
del self.global_context[key] | [
"def clear(self, key):\n cache_key = self.get_cache_key(key)\n self._cache.pop(cache_key, None)\n return self",
"def delete(self, key):\n del self.dict[key]",
"def erase(self,key):\n Loader.capi.cppcms_capi_session_erase(self.d,key.encode())\n self.check()",
"def clea... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Translates all the keys in a dictionary. Useful for translating the i18n property that exists for some lovely.io packages. | def _translate_dict(self, dictionary: dict, context=None) -> dict:
for k, v in dictionary.items():
if isinstance(v, str):
dictionary[k] = self._translate_text(v, None, None, context)
return dictionary | [
"def translate_dict(item: str, trans: dict) -> str:\n return trans[item.lower()]",
"def translate_from_dictionary(ciphertext, translate):\n plaintext = ''\n for character in ciphertext:\n plaintext += translate[character]\n\n return plaintext",
"def _translate_fields(self, root):\n for... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
creates unit_of_work and inserts it into the DB | def _insert_uow(self, process_name, start_timeperiod, end_timeperiod, start_id, end_id):
uow = UnitOfWork()
uow.process_name = process_name
uow.timeperiod = start_timeperiod
uow.start_id = str(start_id)
uow.end_id = str(end_id)
uow.start_timeperiod = start_timeperiod
... | [
"def test_new_unit_creation(self):\n local_user = self.create_and_return_local_user()\n course_id = new_course({\n 'teacher' : local_user.key.id(),\n 'title' : 'foo course',\n 'body' : 'hey look mom',\n })\n unit_id = new_unit({\n 'course' ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
method creates and publishes a unit_of_work. it also handles DuplicateKeyError and attempts recovery | def insert_and_publish_uow(self, process_name, start_timeperiod, end_timeperiod, start_id, end_id):
is_duplicate = False
try:
uow = self._insert_uow(process_name, start_timeperiod, end_timeperiod, start_id, end_id)
except DuplicateKeyError as e:
is_duplicate = True
... | [
"def _insert_uow(self, process_name, start_timeperiod, end_timeperiod, start_id, end_id):\n uow = UnitOfWork()\n uow.process_name = process_name\n uow.timeperiod = start_timeperiod\n uow.start_id = str(start_id)\n uow.end_id = str(end_id)\n uow.start_timeperiod = start_time... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
method that takes care of processing job records in STATE_EMBRYO state | def _process_state_embryo(self, job_record):
pass | [
"def _process_state_final_run(self, job_record):\n pass",
"def _process_state_skipped(self, job_record):\n pass",
"def handle_submitted_jobs(self):\n\n try:\n cart = self.cart\n\n if cart['cart_status']['cart_status_id'] == STATUS_INPROCESS:\n return\n\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
method that takes care of processing job records in STATE_IN_PROGRESS state | def _process_state_in_progress(self, job_record):
pass | [
"def _process_state_skipped(self, job_record):\n pass",
"def _process_state_final_run(self, job_record):\n pass",
"def handle_submitted_jobs(self):\n\n try:\n cart = self.cart\n\n if cart['cart_status']['cart_status_id'] == STATUS_INPROCESS:\n return\n\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
method takes care of processing job records in STATE_FINAL_RUN state | def _process_state_final_run(self, job_record):
pass | [
"def _process_state_skipped(self, job_record):\n pass",
"def _finalize_job(cls, mapreduce_spec, mapreduce_state):\n config = util.create_datastore_write_config(mapreduce_spec)\n queue_name = util.get_queue_name(mapreduce_spec.params.get(\n model.MapreduceSpec.PARAM_DONE_CALLBACK_QUEUE))\n d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
method takes care of processing job records in STATE_FINAL_SKIPPED state | def _process_state_skipped(self, job_record):
pass | [
"def _process_state_final_run(self, job_record):\n pass",
"def complete_unprocessed_file(job: Job, job_repo: JobRepository, client_file: ClientFile, flag: str) -> None:\n # alert about the flag and halt the file process\n logger.info(\"validate_file_structure is FALSE. As this is an unexpected config... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
method will trigger job processing only if _all_ dependencies are in STATE_PROCESSED method will transfer current job into STATE_SKIPPED if any dependency is in STATE_SKIPPED | def manage_job_with_blocking_dependencies(self, job_record, run_on_active_timeperiod):
composite_state = self.timetable.dependent_on_composite_state(job_record)
assert isinstance(composite_state, NodesCompositeState)
if composite_state.all_processed:
self.manage_job(job_record)
... | [
"def start_ready_jobs(self):\n for case in self.cases:\n for job in case['jobs']:\n if job.status != JobStatus.VALID:\n continue\n if len(self.running_jobs) >= self.max_running_jobs:\n msg = 'running {} of {} jobs, waiting for que... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
compute the vector whose ith element is the mean of the ith elements of the input vectors | def vector_mean(vectors):
n = len(vectors)
return scalar_multiply(1/n, vector_sum(vectors)) | [
"def vector_mean(*args):\n sz = len(args)\n mean_vector = [0.0 for _ in range(len(args[0]))]\n for input_vector in args:\n mean_vector = [a+b for a, b in zip(mean_vector, input_vector)]\n mean_vector = [a / sz for a in mean_vector]\n return mean_vector",
"def vectors_mean(self, vectors):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns a num_rows x num_cols matrix whose (i,j)th entry is entry_fn(i, j) | def make_matrix(num_rows, num_cols, entry_fn):
return [[entry_fn(i , j)
for j in range(num_cols)]
for i in range(num_rows)] | [
"def make_matrix(num_rows, num_cols, entry_fn):\n return [[entry_fn(i, j) for j in range(num_cols)] for i in range(num_rows)]",
"def row(m, n):\r\n return m[n]",
"def map_mat(fn, mat):\n res_mat = Mat(mat.cols, mat.rows)\n for (x, y), v in mat:\n res_mat.set_cell(x, y, fn(v))\n return res_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Maximum Influence InArborescence funtion All the paths to v with propagation probability above theta | def miia(v, theta, grph):
u_list = []
for u in range(grph.number_of_nodes()):
if v == u: continue
path, score = mip(u, v, grph)
if score > theta:
u_list.append(path)
return u_list | [
"def mioa(u, theta, grph):\n v_list = []\n for v in range(grph.number_of_nodes()):\n if v == u: continue\n path, score = mip(u, v, grph)\n if score > theta:\n v_list.append(path)\n return v_list",
"def best_party_without_emp(v):\n if v.g >= 0:\n return v.g\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Maximum Influence OutArborescence funtion All the paths from u with propagation probability above theta | def mioa(u, theta, grph):
v_list = []
for v in range(grph.number_of_nodes()):
if v == u: continue
path, score = mip(u, v, grph)
if score > theta:
v_list.append(path)
return v_list | [
"def _u_1_Lee09(self, theta, u, rho, theta_max=None):\n if u <= rho:\n return 0. * theta\n out = np.zeros_like(theta)\n mask = (theta <= theta_max)\n if np.any(mask):\n ucos = u * np.cos(theta[mask])\n out[mask] = ucos - np.sqrt(rho * rho - u * u + ucos**... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute inneighbors of u in miia | def in_neighbors(u, miia):
result_set = []
for path in miia:
if u in path and path[0] != u:
result_set.append(path[path.index(u) - 1])
return result_set | [
"def neighbors(self, u):\r\n if not u in self.vertices:\r\n return\r\n for v in self.vertices[u]:\r\n yield (v, self.vertices[u][v])",
"def neighbor_indices(self):",
"def getNeighbours(self, user=None, limit=None):\n pass",
"def calculateNeighbours(mat):\n N = mat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates a City object | def update_city(city_id):
city = storage.get(City, city_id)
if city is None:
abort(404)
new_obj = request.get_json()
if new_obj is None:
abort(400, "Not a JSON")
for key, value in new_obj.items():
if key not in ["id", "state_id", "created_at", "updated_at"]:
set... | [
"def update_city(city_id):\n city = storage.get(City, city_id)\n if city is None:\n abort(404)\n req = request.get_json()\n if req is None:\n abort(400, \"Not a JSON\")\n for key, value in req.items():\n if key in ['id', 'created_at', 'updated_at']:\n continue\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates metadata pickle for fast access to post metadata. The goal is to avoid reading all the .md files in full every time we want to do some operation that requires all the metadata. | def RegenerateMetaData():
# Get the posts.
posts = os.listdir(Settings.Settings.webRoot + "/posts/")
# Create meta data dictionary.
metaInfo = {}
# Grouped by tag. Key is tag, value is list of post md files with tag.
metaInfo["byTag"] = {... | [
"def update_metadata(self):\n for element in self.elements:\n self.meta_data[element.name] = element.meta_data()",
"def update_metadata(self, metadata):\n if metadata:\n self._metadata.update(metadata)\n self.to_swap_dir()",
"def update_metadata(self, metadata):\r\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Constructor, takes input/output dimensions and dropout rate | def __init__(self, in_feats, out_feats,
dropout_rate, bias=True):
super(LinearCustomDropout, self).__init__(
in_feats, out_feats, bias)
self.dropout_mask = torch.nn.Dropout(p=dropout_rate)
return None | [
"def __init__(self, dimensions, dropout=0.5):\n super(NodeDistributor, self).__init__()\n self.dimensions = dimensions\n self.dropout = dropout\n self.linear = torch.nn.Linear(self.dimensions, self.dimensions)",
"def __init__(self, p=0.5, name='dropout_layer'):\n assert 0. <= p ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The constraints on `className` are the same as those on the first arguments of `namedtuple()` and `type()`. `length` is a nonnegative integer. The position names are guaranteed to be sensibly set, if `positionNames` is either a boolean or an iterator that yields valid identifiers. Possible | def mathtuple(className, length, positionNames=True):
if not (length >= 0 and isinstance(length, Integral)):
raise ValueError('"length" must be a non-negative integer')
fmtStr = '_{{:0{:d}}}'.format(len(str(length-1)))
if positionNames is True:
if length <= 3:
reifiedSyms... | [
"def namedtuple(typename, field_names, verbose=False, rename=False):\n\n # Validate the field names. At the user's option, either generate an error\n # message or automatically replace the field name with a valid name.\n if isinstance(field_names, basestring):\n field_names = field_names.replace(',... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a set of configured Google Homes instances. | def configured_google_homes(hass):
return {
entry.data.get(CONF_NAME) for entry in hass.config_entries.async_entries(DOMAIN)
} | [
"def get_jenkins_instances(worker_fleets=False):\n gqlapi = gql.get_api()\n query = Template(JENKINS_INSTANCES_QUERY).render(worker_fleets=worker_fleets)\n return gqlapi.query(query)[\"instances\"]",
"def all_instances(self):\n _logger.debug('%s', where_am_i())\n instances = []\n for... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The organizations.networks.index endpoint should be correct. | def test_organization_networks_index(self):
self.assertEqual(
"https://dashboard.meraki.com/api/v0/organizations/"
+ ORGANIZATION_ID
+ "/networks"
, MerakiAPI(KEY)
.organizations(ORGANIZATION_ID)
.networks()
.lazy()
... | [
"def test_organization_networks_show(self):\n self.assertEqual(\n \"https://dashboard.meraki.com/api/v0/organizations/\"\n + ORGANIZATION_ID\n + \"/networks/\"\n + NETWORK_ID\n , MerakiAPI(KEY)\n .organizations(ORGANIZATION_ID)\n .n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |