body stringlengths 26 98.2k | body_hash int64 -9,222,864,604,528,158,000 9,221,803,474B | docstring stringlengths 1 16.8k | path stringlengths 5 230 | name stringlengths 1 96 | repository_name stringlengths 7 89 | lang stringclasses 1
value | body_without_docstring stringlengths 20 98.2k |
|---|---|---|---|---|---|---|---|
def NormalizeFieldTypeName(field_fqn):
'Normalize a fully qualified field type name, e.g.\n\n .envoy.foo.bar.\n\n Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX.\n\n Args:\n field_fqn: a fully qualified type name from FieldDescriptorProto.type_name.\n Return: Normalized type name.\n '
if fie... | -589,018,630,924,515,800 | Normalize a fully qualified field type name, e.g.
.envoy.foo.bar.
Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX.
Args:
field_fqn: a fully qualified type name from FieldDescriptorProto.type_name.
Return: Normalized type name. | tools/protodoc/protodoc.py | NormalizeFieldTypeName | Gsantomaggio/envoy | python | def NormalizeFieldTypeName(field_fqn):
'Normalize a fully qualified field type name, e.g.\n\n .envoy.foo.bar.\n\n Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX.\n\n Args:\n field_fqn: a fully qualified type name from FieldDescriptorProto.type_name.\n Return: Normalized type name.\n '
if fie... |
def NormalizeTypeContextName(type_name):
'Normalize a type name, e.g.\n\n envoy.foo.bar.\n\n Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX.\n\n Args:\n type_name: a name from a TypeContext.\n Return: Normalized type name.\n '
return NormalizeFieldTypeName(QualifyTypeName(type_name)) | 6,082,845,560,899,143,000 | Normalize a type name, e.g.
envoy.foo.bar.
Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX.
Args:
type_name: a name from a TypeContext.
Return: Normalized type name. | tools/protodoc/protodoc.py | NormalizeTypeContextName | Gsantomaggio/envoy | python | def NormalizeTypeContextName(type_name):
'Normalize a type name, e.g.\n\n envoy.foo.bar.\n\n Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX.\n\n Args:\n type_name: a name from a TypeContext.\n Return: Normalized type name.\n '
return NormalizeFieldTypeName(QualifyTypeName(type_name)) |
def FormatEmph(s):
'RST format a string for emphasis.'
return ('*%s*' % s) | 5,429,408,957,495,039,000 | RST format a string for emphasis. | tools/protodoc/protodoc.py | FormatEmph | Gsantomaggio/envoy | python | def FormatEmph(s):
return ('*%s*' % s) |
def FormatFieldType(type_context, field):
'Format a FieldDescriptorProto type description.\n\n Adds cross-refs for message types.\n TODO(htuch): Add cross-refs for enums as well.\n\n Args:\n type_context: contextual information for message/enum/field.\n field: FieldDescriptor proto.\n Return: RST formatte... | 5,157,686,213,921,965,000 | Format a FieldDescriptorProto type description.
Adds cross-refs for message types.
TODO(htuch): Add cross-refs for enums as well.
Args:
type_context: contextual information for message/enum/field.
field: FieldDescriptor proto.
Return: RST formatted field type. | tools/protodoc/protodoc.py | FormatFieldType | Gsantomaggio/envoy | python | def FormatFieldType(type_context, field):
'Format a FieldDescriptorProto type description.\n\n Adds cross-refs for message types.\n TODO(htuch): Add cross-refs for enums as well.\n\n Args:\n type_context: contextual information for message/enum/field.\n field: FieldDescriptor proto.\n Return: RST formatte... |
def StripLeadingSpace(s):
'Remove leading space in flat comment strings.'
return MapLines((lambda s: s[1:]), s) | 4,293,869,762,082,076,700 | Remove leading space in flat comment strings. | tools/protodoc/protodoc.py | StripLeadingSpace | Gsantomaggio/envoy | python | def StripLeadingSpace(s):
return MapLines((lambda s: s[1:]), s) |
def FileCrossRefLabel(msg_name):
'File cross reference label.'
return ('envoy_api_file_%s' % msg_name) | -2,295,307,785,000,013,600 | File cross reference label. | tools/protodoc/protodoc.py | FileCrossRefLabel | Gsantomaggio/envoy | python | def FileCrossRefLabel(msg_name):
return ('envoy_api_file_%s' % msg_name) |
def MessageCrossRefLabel(msg_name):
'Message cross reference label.'
return ('envoy_api_msg_%s' % msg_name) | 1,987,750,585,387,631,900 | Message cross reference label. | tools/protodoc/protodoc.py | MessageCrossRefLabel | Gsantomaggio/envoy | python | def MessageCrossRefLabel(msg_name):
return ('envoy_api_msg_%s' % msg_name) |
def EnumCrossRefLabel(enum_name):
'Enum cross reference label.'
return ('envoy_api_enum_%s' % enum_name) | -7,816,271,191,061,070,000 | Enum cross reference label. | tools/protodoc/protodoc.py | EnumCrossRefLabel | Gsantomaggio/envoy | python | def EnumCrossRefLabel(enum_name):
return ('envoy_api_enum_%s' % enum_name) |
def FieldCrossRefLabel(field_name):
'Field cross reference label.'
return ('envoy_api_field_%s' % field_name) | 2,143,585,757,202,854 | Field cross reference label. | tools/protodoc/protodoc.py | FieldCrossRefLabel | Gsantomaggio/envoy | python | def FieldCrossRefLabel(field_name):
return ('envoy_api_field_%s' % field_name) |
def EnumValueCrossRefLabel(enum_value_name):
'Enum value cross reference label.'
return ('envoy_api_enum_value_%s' % enum_value_name) | -1,175,846,287,253,893,400 | Enum value cross reference label. | tools/protodoc/protodoc.py | EnumValueCrossRefLabel | Gsantomaggio/envoy | python | def EnumValueCrossRefLabel(enum_value_name):
return ('envoy_api_enum_value_%s' % enum_value_name) |
def FormatAnchor(label):
'Format a label as an Envoy API RST anchor.'
return ('.. _%s:\n\n' % label) | -4,015,010,840,871,764,500 | Format a label as an Envoy API RST anchor. | tools/protodoc/protodoc.py | FormatAnchor | Gsantomaggio/envoy | python | def FormatAnchor(label):
return ('.. _%s:\n\n' % label) |
def FormatFieldAsDefinitionListItem(outer_type_context, type_context, field, protodoc_manifest):
'Format a FieldDescriptorProto as RST definition list item.\n\n Args:\n outer_type_context: contextual information for enclosing message.\n type_context: contextual information for message/enum/field.\n field:... | -7,184,781,647,353,333,000 | Format a FieldDescriptorProto as RST definition list item.
Args:
outer_type_context: contextual information for enclosing message.
type_context: contextual information for message/enum/field.
field: FieldDescriptorProto.
protodoc_manifest: tools.protodoc.Manifest for proto.
Returns:
RST formatted definition... | tools/protodoc/protodoc.py | FormatFieldAsDefinitionListItem | Gsantomaggio/envoy | python | def FormatFieldAsDefinitionListItem(outer_type_context, type_context, field, protodoc_manifest):
'Format a FieldDescriptorProto as RST definition list item.\n\n Args:\n outer_type_context: contextual information for enclosing message.\n type_context: contextual information for message/enum/field.\n field:... |
def FormatMessageAsDefinitionList(type_context, msg, protodoc_manifest):
'Format a DescriptorProto as RST definition list.\n\n Args:\n type_context: contextual information for message/enum/field.\n msg: DescriptorProto.\n protodoc_manifest: tools.protodoc.Manifest for proto.\n\n Returns:\n RST formatt... | 4,753,135,101,386,673,000 | Format a DescriptorProto as RST definition list.
Args:
type_context: contextual information for message/enum/field.
msg: DescriptorProto.
protodoc_manifest: tools.protodoc.Manifest for proto.
Returns:
RST formatted definition list item. | tools/protodoc/protodoc.py | FormatMessageAsDefinitionList | Gsantomaggio/envoy | python | def FormatMessageAsDefinitionList(type_context, msg, protodoc_manifest):
'Format a DescriptorProto as RST definition list.\n\n Args:\n type_context: contextual information for message/enum/field.\n msg: DescriptorProto.\n protodoc_manifest: tools.protodoc.Manifest for proto.\n\n Returns:\n RST formatt... |
def FormatEnumValueAsDefinitionListItem(type_context, enum_value):
'Format a EnumValueDescriptorProto as RST definition list item.\n\n Args:\n type_context: contextual information for message/enum/field.\n enum_value: EnumValueDescriptorProto.\n\n Returns:\n RST formatted definition list item.\n '
a... | -5,338,880,447,968,518,000 | Format a EnumValueDescriptorProto as RST definition list item.
Args:
type_context: contextual information for message/enum/field.
enum_value: EnumValueDescriptorProto.
Returns:
RST formatted definition list item. | tools/protodoc/protodoc.py | FormatEnumValueAsDefinitionListItem | Gsantomaggio/envoy | python | def FormatEnumValueAsDefinitionListItem(type_context, enum_value):
'Format a EnumValueDescriptorProto as RST definition list item.\n\n Args:\n type_context: contextual information for message/enum/field.\n enum_value: EnumValueDescriptorProto.\n\n Returns:\n RST formatted definition list item.\n '
a... |
def FormatEnumAsDefinitionList(type_context, enum):
'Format a EnumDescriptorProto as RST definition list.\n\n Args:\n type_context: contextual information for message/enum/field.\n enum: DescriptorProto.\n\n Returns:\n RST formatted definition list item.\n '
return ('\n'.join((FormatEnumValueAsDefin... | 2,359,914,086,906,887,000 | Format a EnumDescriptorProto as RST definition list.
Args:
type_context: contextual information for message/enum/field.
enum: DescriptorProto.
Returns:
RST formatted definition list item. | tools/protodoc/protodoc.py | FormatEnumAsDefinitionList | Gsantomaggio/envoy | python | def FormatEnumAsDefinitionList(type_context, enum):
'Format a EnumDescriptorProto as RST definition list.\n\n Args:\n type_context: contextual information for message/enum/field.\n enum: DescriptorProto.\n\n Returns:\n RST formatted definition list item.\n '
return ('\n'.join((FormatEnumValueAsDefin... |
def FormatProtoAsBlockComment(proto):
'Format a proto as a RST block comment.\n\n Useful in debugging, not usually referenced.\n '
return (('\n\nproto::\n\n' + MapLines(functools.partial(Indent, 2), str(proto))) + '\n') | -4,856,622,305,351,986,000 | Format a proto as a RST block comment.
Useful in debugging, not usually referenced. | tools/protodoc/protodoc.py | FormatProtoAsBlockComment | Gsantomaggio/envoy | python | def FormatProtoAsBlockComment(proto):
'Format a proto as a RST block comment.\n\n Useful in debugging, not usually referenced.\n '
return (('\n\nproto::\n\n' + MapLines(functools.partial(Indent, 2), str(proto))) + '\n') |
def max_positions(self):
'Return None to allow model to dictate max sentence length allowed'
return None | -3,937,559,029,037,020,700 | Return None to allow model to dictate max sentence length allowed | pytorch_translate/tasks/pytorch_translate_multi_task.py | max_positions | Meteorix/translate | python | def max_positions(self):
return None |
def __init__(self, authtoken, authurl, user, key, tenant_name, auth_version, container_name, temp_url_key, temp_url_key2, connection_retry_count, chosen_temp_url_key):
'Init routine.'
self.requests = requests
self.authurl = authurl
self.preauthtoken = authtoken
self.user = user
self.key = key
... | -1,212,660,782,708,376,800 | Init routine. | src/config/fabric-ansible/ansible-playbooks/library/swift_fileutil.py | __init__ | edwinpjacques/contrail-controller | python | def __init__(self, authtoken, authurl, user, key, tenant_name, auth_version, container_name, temp_url_key, temp_url_key2, connection_retry_count, chosen_temp_url_key):
self.requests = requests
self.authurl = authurl
self.preauthtoken = authtoken
self.user = user
self.key = key
self.auth_ver... |
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_run(self):
'\n run case.#\n :return:\n '
self.common_run(self.testarg) | 784,258,713,962,435,200 | run case.#
:return: | tests/st/ops/ascend/vector/test_expm1_001.py | test_run | mindspore-ai/akg | python | @pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_run(self):
'\n run case.#\n :return:\n '
self.common_run(self.testarg) |
def test_run_rpc_cloud(self):
'\n run case.#\n :return:\n '
self.common_run(self.testarg_rpc_cloud) | 3,985,071,734,288,005,600 | run case.#
:return: | tests/st/ops/ascend/vector/test_expm1_001.py | test_run_rpc_cloud | mindspore-ai/akg | python | def test_run_rpc_cloud(self):
'\n run case.#\n :return:\n '
self.common_run(self.testarg_rpc_cloud) |
def teardown(self):
'\n clean environment\n :return:\n '
self._log.info('============= {0} Teardown============'.format(self.casename))
return | -5,374,107,392,887,249,000 | clean environment
:return: | tests/st/ops/ascend/vector/test_expm1_001.py | teardown | mindspore-ai/akg | python | def teardown(self):
'\n clean environment\n :return:\n '
self._log.info('============= {0} Teardown============'.format(self.casename))
return |
async def async_step_user(self, user_input: (dict[(str, Any)] | None)=None) -> FlowResult:
'Handle the initial step.'
errors: dict[(str, str)] = {}
if (self._options is None):
coordinator = (await get_coordinator(self.hass))
if ((not coordinator.last_update_success) or (coordinator.data is N... | -8,851,942,273,044,763,000 | Handle the initial step. | homeassistant/components/coronavirus/config_flow.py | async_step_user | bimmbo/core | python | async def async_step_user(self, user_input: (dict[(str, Any)] | None)=None) -> FlowResult:
errors: dict[(str, str)] = {}
if (self._options is None):
coordinator = (await get_coordinator(self.hass))
if ((not coordinator.last_update_success) or (coordinator.data is None)):
return ... |
def get_titletext_for_role(self, role):
'\n Get a short title briefly describing the given ``role``.\n Remember that the role is n Period.\n '
period = role
return period | -5,426,291,248,196,443,000 | Get a short title briefly describing the given ``role``.
Remember that the role is n Period. | devilry/devilry_admin/views/period/crinstance_period.py | get_titletext_for_role | aless80/devilry-django | python | def get_titletext_for_role(self, role):
'\n Get a short title briefly describing the given ``role``.\n Remember that the role is n Period.\n '
period = role
return period |
def get_devilryrole_for_requestuser(self):
'\n Get the devilryrole for the requesting user on the current\n period (request.cradmin_instance).\n\n The return values is the same as for\n :meth:`devilry.devilry_account.models.PeriodPermissionGroupQuerySet.get_devilryrole_for_user_on_period... | 3,701,731,663,110,944,300 | Get the devilryrole for the requesting user on the current
period (request.cradmin_instance).
The return values is the same as for
:meth:`devilry.devilry_account.models.PeriodPermissionGroupQuerySet.get_devilryrole_for_user_on_period`,
exept that this method raises ValueError if it does not find a role. | devilry/devilry_admin/views/period/crinstance_period.py | get_devilryrole_for_requestuser | aless80/devilry-django | python | def get_devilryrole_for_requestuser(self):
'\n Get the devilryrole for the requesting user on the current\n period (request.cradmin_instance).\n\n The return values is the same as for\n :meth:`devilry.devilry_account.models.PeriodPermissionGroupQuerySet.get_devilryrole_for_user_on_period... |
def get_text_index_string(self, text):
'\n Return a string of text containing part-of-speech, lemma pairs.\n '
if (len(text) <= 2):
text_without_punctuation = text.translate(self.punctuation_table)
if (len(text_without_punctuation) >= 1):
text = text_without_punctuation... | 7,772,530,436,711,282,000 | Return a string of text containing part-of-speech, lemma pairs. | app/chatterbot/tagging.py | get_text_index_string | Jack2313/WeChatterBot | python | def get_text_index_string(self, text):
'\n \n '
if (len(text) <= 2):
text_without_punctuation = text.translate(self.punctuation_table)
if (len(text_without_punctuation) >= 1):
text = text_without_punctuation
document = self.nlp(text)
if (len(text) <= 2):
... |
def my_worker_splitter(urls):
'Split urls per worker\n Selects a subset of urls based on Torch get_worker_info.\n Used as a shard selection function in Dataset.\n replaces wds.split_by_worker'
urls = [url for url in urls]
assert isinstance(urls, list)
worker_info = torch.utils.data.get_worker_i... | 8,415,511,833,976,402,000 | Split urls per worker
Selects a subset of urls based on Torch get_worker_info.
Used as a shard selection function in Dataset.
replaces wds.split_by_worker | test_train_mp_wds_local.py | my_worker_splitter | mlexample/gcspytorchimagenet | python | def my_worker_splitter(urls):
'Split urls per worker\n Selects a subset of urls based on Torch get_worker_info.\n Used as a shard selection function in Dataset.\n replaces wds.split_by_worker'
urls = [url for url in urls]
assert isinstance(urls, list)
worker_info = torch.utils.data.get_worker_i... |
def my_node_splitter(urls):
'Split urls_ correctly per accelerator node\n :param urls:\n :return: slice of urls_\n '
rank = xm.get_ordinal()
num_replicas = xm.xrt_world_size()
urls_this = urls[rank::num_replicas]
return urls_this | -7,302,456,343,695,276,000 | Split urls_ correctly per accelerator node
:param urls:
:return: slice of urls_ | test_train_mp_wds_local.py | my_node_splitter | mlexample/gcspytorchimagenet | python | def my_node_splitter(urls):
'Split urls_ correctly per accelerator node\n :param urls:\n :return: slice of urls_\n '
rank = xm.get_ordinal()
num_replicas = xm.xrt_world_size()
urls_this = urls[rank::num_replicas]
return urls_this |
def testFieldDescriptionUpdatedEvent(self):
'Test FieldDescriptionUpdatedEvent'
pass | 7,405,285,144,728,888,000 | Test FieldDescriptionUpdatedEvent | python/test/test_field_description_updated_event.py | testFieldDescriptionUpdatedEvent | dlens/dlxapi | python | def testFieldDescriptionUpdatedEvent(self):
pass |
def __attrs_post_init__(self) -> None:
'Register events.'
self.register_event(self.on_selected) | -4,143,778,683,437,158,400 | Register events. | earwax/menus/menu_item.py | __attrs_post_init__ | chrisnorman7/earwax | python | def __attrs_post_init__(self) -> None:
self.register_event(self.on_selected) |
def get_title(self) -> Optional[str]:
'Return the proper title of this object.\n\n If :attr:`self.title <earwax.mixins.TitleMixin.title>` is a callable,\n its return value will be returned.\n '
if callable(self.title):
return self.title()
return self.title | 2,710,188,890,296,733,000 | Return the proper title of this object.
If :attr:`self.title <earwax.mixins.TitleMixin.title>` is a callable,
its return value will be returned. | earwax/menus/menu_item.py | get_title | chrisnorman7/earwax | python | def get_title(self) -> Optional[str]:
'Return the proper title of this object.\n\n If :attr:`self.title <earwax.mixins.TitleMixin.title>` is a callable,\n its return value will be returned.\n '
if callable(self.title):
return self.title()
return self.title |
def on_selected(self) -> None:
'Handle this menu item being selected.'
pass | -2,197,262,279,189,630,500 | Handle this menu item being selected. | earwax/menus/menu_item.py | on_selected | chrisnorman7/earwax | python | def on_selected(self) -> None:
pass |
def all_discrete(comp_dists):
'\n Determine if all distributions in comp_dists are discrete\n '
if isinstance(comp_dists, Distribution):
return isinstance(comp_dists, Discrete)
else:
return all((isinstance(comp_dist, Discrete) for comp_dist in comp_dists)) | -2,878,885,385,310,497,000 | Determine if all distributions in comp_dists are discrete | pymc3/distributions/mixture.py | all_discrete | himkt/pymc3 | python | def all_discrete(comp_dists):
'\n \n '
if isinstance(comp_dists, Distribution):
return isinstance(comp_dists, Discrete)
else:
return all((isinstance(comp_dist, Discrete) for comp_dist in comp_dists)) |
def __init__(self):
'\n todo: this. what fields does it need?\n '
pass | -683,415,658,845,593,700 | todo: this. what fields does it need? | src/models/Massey.py | __init__ | alhart2015/march-madness | python | def __init__(self):
'\n \n '
pass |
def rank(self) -> List[Team]:
'\n Given a matrix, create a power ranking of the teams\n '
pass | 1,653,216,557,902,713,300 | Given a matrix, create a power ranking of the teams | src/models/Massey.py | rank | alhart2015/march-madness | python | def rank(self) -> List[Team]:
'\n \n '
pass |
def predict_bracket(self) -> Bracket:
'\n Given a ranking of the teams, and the draw for the bracket, predict who wins and stuff\n '
pass | 2,216,859,857,913,330,200 | Given a ranking of the teams, and the draw for the bracket, predict who wins and stuff | src/models/Massey.py | predict_bracket | alhart2015/march-madness | python | def predict_bracket(self) -> Bracket:
'\n \n '
pass |
@staticmethod
def from_file(filename: str) -> Massey:
'\n todo: docs\n todo: weighting param?\n\n parse teams and games from file\n create matrix from teams and games\n '
pass | 4,295,603,889,069,199,400 | todo: docs
todo: weighting param?
parse teams and games from file
create matrix from teams and games | src/models/Massey.py | from_file | alhart2015/march-madness | python | @staticmethod
def from_file(filename: str) -> Massey:
'\n todo: docs\n todo: weighting param?\n\n parse teams and games from file\n create matrix from teams and games\n '
pass |
def linear_warmup_and_cosine_protocol(f_values: Tuple[(float, float, float)], x_milestones: Tuple[(int, int, int, int)]):
'\n There are 5 regions:\n 1. constant at f0 for x < x0\n 2. linear increase from f0 to f1 for x0 < x < x1\n 3. constant at f1 for x1 < x < x2\n 4. cosine protocol from f1 to f2 f... | 5,977,864,187,232,458,000 | There are 5 regions:
1. constant at f0 for x < x0
2. linear increase from f0 to f1 for x0 < x < x1
3. constant at f1 for x1 < x < x2
4. cosine protocol from f1 to f2 for x2 < x < x3
5. constant at f2 for x > x3
If you want a linear_ramp followed by a cosine_decay only simply set:
1. x0=0 (to eliminate the first consta... | src/tissue_purifier/models/_optim_scheduler.py | linear_warmup_and_cosine_protocol | broadinstitute/tissue_purifier | python | def linear_warmup_and_cosine_protocol(f_values: Tuple[(float, float, float)], x_milestones: Tuple[(int, int, int, int)]):
'\n There are 5 regions:\n 1. constant at f0 for x < x0\n 2. linear increase from f0 to f1 for x0 < x < x1\n 3. constant at f1 for x1 < x < x2\n 4. cosine protocol from f1 to f2 f... |
@torch.no_grad()
def step(self, closure=None):
'Performs a single optimization step.\n Args:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n '
loss = None
if (closure is not None):
with torch.enable_grad():
... | -4,350,163,514,070,495,000 | Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss. | src/tissue_purifier/models/_optim_scheduler.py | step | broadinstitute/tissue_purifier | python | @torch.no_grad()
def step(self, closure=None):
'Performs a single optimization step.\n Args:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n '
loss = None
if (closure is not None):
with torch.enable_grad():
... |
def self_generate(output_filename, filename='iso3166-1.csv'):
'\n The following code can be used for self-generation of this file.\n\n It requires a UTF-8 CSV file containing the short ISO name and two letter\n country code as the first two columns.\n '
import csv
import re
countries = []
... | 7,161,760,376,718,209,000 | The following code can be used for self-generation of this file.
It requires a UTF-8 CSV file containing the short ISO name and two letter
country code as the first two columns. | django_countries/data.py | self_generate | Bounder/django-countries | python | def self_generate(output_filename, filename='iso3166-1.csv'):
'\n The following code can be used for self-generation of this file.\n\n It requires a UTF-8 CSV file containing the short ISO name and two letter\n country code as the first two columns.\n '
import csv
import re
countries = []
... |
def test_positive_without_instance_attr(self):
' Test if the target class without a singleton attribute. '
try:
@singleton
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest):
pass
self.assertTrue(True, 'Singleton Class: Passed the initialization as expected.'... | -2,292,913,109,432,330,800 | Test if the target class without a singleton attribute. | test/ut/test_decorator_common_singleton.py | test_positive_without_instance_attr | shiroyuki/Tori | python | def test_positive_without_instance_attr(self):
' '
try:
@singleton
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest):
pass
self.assertTrue(True, 'Singleton Class: Passed the initialization as expected.')
except SingletonInitializationException:
... |
def test_positive_using_decorator_with_primitive_parameters(self):
' Test if the target class without a singleton attribute but using a decorator with primitive parameters. '
try:
@singleton(10)
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest):
def __init__(self, ... | 377,191,850,248,781,300 | Test if the target class without a singleton attribute but using a decorator with primitive parameters. | test/ut/test_decorator_common_singleton.py | test_positive_using_decorator_with_primitive_parameters | shiroyuki/Tori | python | def test_positive_using_decorator_with_primitive_parameters(self):
' '
try:
@singleton(10)
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest):
def __init__(self, init_number):
super(self.__class__, self).__init__()
self.number = init... |
def test_positive_for_normal_singleton_with_parameters(self):
' Positive test for @singleton with parameters provided for the constructor '
try:
class SampleDependencyInjection(object):
pass
sample_di = SampleDependencyInjection()
@singleton(sample_di)
class SuperDu... | 3,776,731,983,615,224,300 | Positive test for @singleton with parameters provided for the constructor | test/ut/test_decorator_common_singleton.py | test_positive_for_normal_singleton_with_parameters | shiroyuki/Tori | python | def test_positive_for_normal_singleton_with_parameters(self):
' '
try:
class SampleDependencyInjection(object):
pass
sample_di = SampleDependencyInjection()
@singleton(sample_di)
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest):
def _... |
def test_negative_for_normal_singleton_with_class_reference(self):
' Negative test for @singleton with class_reference provided for the constructor '
try:
class SampleDependencyInjection(object):
pass
@singleton(SampleDependencyInjection)
class SuperDummyClass(TestDecorator... | -1,487,090,186,829,476,600 | Negative test for @singleton with class_reference provided for the constructor | test/ut/test_decorator_common_singleton.py | test_negative_for_normal_singleton_with_class_reference | shiroyuki/Tori | python | def test_negative_for_normal_singleton_with_class_reference(self):
' '
try:
class SampleDependencyInjection(object):
pass
@singleton(SampleDependencyInjection)
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest):
def __init__(self, dependency_in... |
def test_positive_for_singleton_with(self):
' Positive test for @singleton_with(*args, **kwargs) '
try:
class SampleDependencyInjection(object):
pass
@singleton_with(SampleDependencyInjection)
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest):
... | -7,476,538,399,266,750,000 | Positive test for @singleton_with(*args, **kwargs) | test/ut/test_decorator_common_singleton.py | test_positive_for_singleton_with | shiroyuki/Tori | python | def test_positive_for_singleton_with(self):
' '
try:
class SampleDependencyInjection(object):
pass
@singleton_with(SampleDependencyInjection)
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest):
def __init__(self, dependency_injection):
... |
def test_negative_with_existed_singleton_instance(self):
' Test if the target class is with null singleton attribute. '
try:
@singleton
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest):
_singleton_instance = None
def __init__(self):
sup... | 7,762,687,949,380,224,000 | Test if the target class is with null singleton attribute. | test/ut/test_decorator_common_singleton.py | test_negative_with_existed_singleton_instance | shiroyuki/Tori | python | def test_negative_with_existed_singleton_instance(self):
' '
try:
@singleton
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest):
_singleton_instance = None
def __init__(self):
super(self.__class__, self).__init__()
self.assertTru... |
def test_negative_with_unexpected_instance_attr(self):
' Test if the target class has already had an attribute `_singleton_instance` but it is not compatible. '
try:
@singleton
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest):
_singleton_instance = {}
... | -3,037,646,351,450,630,000 | Test if the target class has already had an attribute `_singleton_instance` but it is not compatible. | test/ut/test_decorator_common_singleton.py | test_negative_with_unexpected_instance_attr | shiroyuki/Tori | python | def test_negative_with_unexpected_instance_attr(self):
' '
try:
@singleton
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest):
_singleton_instance = {}
def __init__(self):
super(self.__class__, self).__init__()
self.assertTrue(Fa... |
def match(given, definition):
'Returns true if a given argument matches the definition.'
if isinstance(definition, Variable):
return True
return (definition == given) | 758,407,112,162,061,200 | Returns true if a given argument matches the definition. | lib/statement.py | match | bendmorris/beaver | python | def match(given, definition):
if isinstance(definition, Variable):
return True
return (definition == given) |
def replace(self, *varsets):
'Checks each part of the statement against defined variables. If any\n matches are found, the statement is updated. If the statement is a function\n call, a new set of statements is returned; otherwise, None is returned.'
matched = False
subj = self.subject
if ... | 1,030,962,870,911,389,000 | Checks each part of the statement against defined variables. If any
matches are found, the statement is updated. If the statement is a function
call, a new set of statements is returned; otherwise, None is returned. | lib/statement.py | replace | bendmorris/beaver | python | def replace(self, *varsets):
'Checks each part of the statement against defined variables. If any\n matches are found, the statement is updated. If the statement is a function\n call, a new set of statements is returned; otherwise, None is returned.'
matched = False
subj = self.subject
if ... |
def __init__(self, id, full_shard_id_list, root_tip):
' Empty full_shard_id_list means root '
if isinstance(id, bytes):
self.id = id
else:
self.id = bytes(id, 'ascii')
self.full_shard_id_list = full_shard_id_list
self.root_tip = root_tip | 3,254,462,930,744,925,700 | Empty full_shard_id_list means root | quarkchain/cluster/rpc.py | __init__ | QuarkChain/pyquarkchain | python | def __init__(self, id, full_shard_id_list, root_tip):
' '
if isinstance(id, bytes):
self.id = id
else:
self.id = bytes(id, 'ascii')
self.full_shard_id_list = full_shard_id_list
self.root_tip = root_tip |
def __init__(self, id, full_shard_id_list):
' Empty slave_id and full_shard_id_list means root '
if isinstance(id, bytes):
self.id = id
else:
self.id = bytes(id, 'ascii')
self.full_shard_id_list = full_shard_id_list | 5,757,208,166,312,482,000 | Empty slave_id and full_shard_id_list means root | quarkchain/cluster/rpc.py | __init__ | QuarkChain/pyquarkchain | python | def __init__(self, id, full_shard_id_list):
' '
if isinstance(id, bytes):
self.id = id
else:
self.id = bytes(id, 'ascii')
self.full_shard_id_list = full_shard_id_list |
def fitnessFunction(chromosome):
'\n Given a "chromosome", this function must determine its fitness score\n The fitness score should be a floating point value. If the fitness is zero or smaller\n then the chromosome will not be allowed to "reproduce"\n '
a = chromosome['a']
b = chromosome['b'... | 302,749,153,459,521,800 | Given a "chromosome", this function must determine its fitness score
The fitness score should be a floating point value. If the fitness is zero or smaller
then the chromosome will not be allowed to "reproduce" | examples/EquationSolver_simple.py | fitnessFunction | littley/pyvolution | python | def fitnessFunction(chromosome):
'\n Given a "chromosome", this function must determine its fitness score\n The fitness score should be a floating point value. If the fitness is zero or smaller\n then the chromosome will not be allowed to "reproduce"\n '
a = chromosome['a']
b = chromosome['b'... |
def equation_checker(equation):
'\n Check equation for balanced parentheses\n\n Args:\n equation(string): String form of equation\n Returns:\n bool: Return if parentheses are balanced or not\n '
opening_parenthesis = Stack()
closing_parenthesis = Stack()
for _ in equation:
... | 4,243,911,488,360,488,000 | Check equation for balanced parentheses
Args:
equation(string): String form of equation
Returns:
bool: Return if parentheses are balanced or not | 3. data_structures/stack/balanced_parantheses.py | equation_checker | m-01101101/udacity-datastructures-algorithms | python | def equation_checker(equation):
'\n Check equation for balanced parentheses\n\n Args:\n equation(string): String form of equation\n Returns:\n bool: Return if parentheses are balanced or not\n '
opening_parenthesis = Stack()
closing_parenthesis = Stack()
for _ in equation:
... |
def _equation_checker(equation):
'\n Check equation for balanced parentheses\n '
return (equation.count('(') == equation.count(')')) | -2,766,931,488,167,891,000 | Check equation for balanced parentheses | 3. data_structures/stack/balanced_parantheses.py | _equation_checker | m-01101101/udacity-datastructures-algorithms | python | def _equation_checker(equation):
'\n \n '
return (equation.count('(') == equation.count(')')) |
@inject
def load(bento_tag: t.Union[(str, Tag)], tags: t.Optional[t.List[str]]=None, options: t.Optional['tf_ext.SaveOptions']=None, load_as_hub_module: t.Optional[bool]=None, model_store: 'ModelStore'=Provide[BentoMLContainer.model_store]) -> t.Union[('tf_ext.AutoTrackable', 'tf_ext.Module', 'HubModule', 'KerasLayer')... | -5,209,461,294,918,116,000 | Load a model from BentoML local modelstore with given name.
Args:
bento_tag (:code:`Union[str, Tag]`):
Tag of a saved model in BentoML local modelstore.
tags (:code:`str`, `optional`, defaults to `None`):
A set of strings specifying the graph variant to use, if loading from a v1 module.
opt... | bentoml/_internal/frameworks/tensorflow_v2.py | load | almirb/BentoML | python | @inject
def load(bento_tag: t.Union[(str, Tag)], tags: t.Optional[t.List[str]]=None, options: t.Optional['tf_ext.SaveOptions']=None, load_as_hub_module: t.Optional[bool]=None, model_store: 'ModelStore'=Provide[BentoMLContainer.model_store]) -> t.Union[('tf_ext.AutoTrackable', 'tf_ext.Module', 'HubModule', 'KerasLayer')... |
@inject
def import_from_tfhub(identifier: t.Union[(str, 'HubModule', 'KerasLayer')], name: t.Optional[str]=None, labels: t.Optional[t.Dict[(str, str)]]=None, custom_objects: t.Optional[t.Dict[(str, t.Any)]]=None, metadata: t.Optional[t.Dict[(str, t.Any)]]=None, model_store: 'ModelStore'=Provide[BentoMLContainer.model_s... | 5,087,025,802,375,021,000 | Import a model from `Tensorflow Hub <https://tfhub.dev/>`_ to BentoML modelstore.
Args:
identifier (:code:`Union[str, tensorflow_hub.Module, tensorflow_hub.KerasLayer]`): Identifier accepts
two type of inputs:
- if `type` of :code:`identifier` either of type :code:`tensorflow_hub.Module` (**leg... | bentoml/_internal/frameworks/tensorflow_v2.py | import_from_tfhub | almirb/BentoML | python | @inject
def import_from_tfhub(identifier: t.Union[(str, 'HubModule', 'KerasLayer')], name: t.Optional[str]=None, labels: t.Optional[t.Dict[(str, str)]]=None, custom_objects: t.Optional[t.Dict[(str, t.Any)]]=None, metadata: t.Optional[t.Dict[(str, t.Any)]]=None, model_store: 'ModelStore'=Provide[BentoMLContainer.model_s... |
@inject
def save(name: str, model: t.Union[('PathType', 'tf_ext.KerasModel', 'tf_ext.Module')], *, signatures: t.Optional['tf_ext.ConcreteFunction']=None, options: t.Optional['tf_ext.SaveOptions']=None, labels: t.Optional[t.Dict[(str, str)]]=None, custom_objects: t.Optional[t.Dict[(str, t.Any)]]=None, metadata: t.Optio... | -6,741,630,064,561,893,000 | Save a model instance to BentoML modelstore.
Args:
name (:code:`str`):
Name for given model instance. This should pass Python identifier check.
model (:code:`Union[keras.Model, tf.Module, path-like objects]`):
Instance of model to be saved
labels (:code:`Dict[str, str]`, `optional`, default... | bentoml/_internal/frameworks/tensorflow_v2.py | save | almirb/BentoML | python | @inject
def save(name: str, model: t.Union[('PathType', 'tf_ext.KerasModel', 'tf_ext.Module')], *, signatures: t.Optional['tf_ext.ConcreteFunction']=None, options: t.Optional['tf_ext.SaveOptions']=None, labels: t.Optional[t.Dict[(str, str)]]=None, custom_objects: t.Optional[t.Dict[(str, t.Any)]]=None, metadata: t.Optio... |
def load_runner(tag: t.Union[(str, Tag)], *, predict_fn_name: str='__call__', device_id: str='CPU:0', name: t.Optional[str]=None, partial_kwargs: t.Optional[t.Dict[(str, t.Any)]]=None) -> '_TensorflowRunner':
'\n Runner represents a unit of serving logic that can be scaled horizontally to\n maximize throughpu... | -1,421,393,638,133,696,500 | Runner represents a unit of serving logic that can be scaled horizontally to
maximize throughput. `bentoml.tensorflow.load_runner` implements a Runner class that
wrap around a Tensorflow model, which optimize it for the BentoML runtime.
Args:
tag (:code:`Union[str, Tag]`):
Tag of a saved model in BentoML l... | bentoml/_internal/frameworks/tensorflow_v2.py | load_runner | almirb/BentoML | python | def load_runner(tag: t.Union[(str, Tag)], *, predict_fn_name: str='__call__', device_id: str='CPU:0', name: t.Optional[str]=None, partial_kwargs: t.Optional[t.Dict[(str, t.Any)]]=None) -> '_TensorflowRunner':
'\n Runner represents a unit of serving logic that can be scaled horizontally to\n maximize throughpu... |
def test_no_defaults(self, Manifest):
'\n LibreOffice does not use the Default element\n '
xml = '\n <Types xmlns="http://schemas.openxmlformats.org/package/2006/content-types">\n <Override PartName="/_rels/.rels" ContentType="application/vnd.openxmlformats-package.relationships+x... | -4,868,845,727,821,458,000 | LibreOffice does not use the Default element | openpyxl/packaging/tests/test_manifest.py | test_no_defaults | chenc2/openpyxl | python | def test_no_defaults(self, Manifest):
'\n \n '
xml = '\n <Types xmlns="http://schemas.openxmlformats.org/package/2006/content-types">\n <Override PartName="/_rels/.rels" ContentType="application/vnd.openxmlformats-package.relationships+xml"/>\n </Types>\n '
node ... |
def my_id(self) -> bytes32:
'If node has public cert use that one for id, if not use private.'
if (self.p2p_crt_path is not None):
pem_cert = x509.load_pem_x509_certificate(self.p2p_crt_path.read_bytes(), default_backend())
else:
pem_cert = x509.load_pem_x509_certificate(self._private_cert_p... | -4,359,501,893,485,975,000 | If node has public cert use that one for id, if not use private. | greenberry/server/server.py | my_id | GreenBerry-Network/greenberry-blockchain | python | def my_id(self) -> bytes32:
if (self.p2p_crt_path is not None):
pem_cert = x509.load_pem_x509_certificate(self.p2p_crt_path.read_bytes(), default_backend())
else:
pem_cert = x509.load_pem_x509_certificate(self._private_cert_path.read_bytes(), default_backend())
der_cert_bytes = pem_cert... |
async def garbage_collect_connections_task(self) -> None:
'\n Periodically checks for connections with no activity (have not sent us any data), and removes them,\n to allow room for other peers.\n '
while True:
(await asyncio.sleep(600))
to_remove: List[WSGreenBerryConnectio... | 692,973,664,444,766,700 | Periodically checks for connections with no activity (have not sent us any data), and removes them,
to allow room for other peers. | greenberry/server/server.py | garbage_collect_connections_task | GreenBerry-Network/greenberry-blockchain | python | async def garbage_collect_connections_task(self) -> None:
'\n Periodically checks for connections with no activity (have not sent us any data), and removes them,\n to allow room for other peers.\n '
while True:
(await asyncio.sleep(600))
to_remove: List[WSGreenBerryConnectio... |
async def start_client(self, target_node: PeerInfo, on_connect: Callable=None, auth: bool=False, is_feeler: bool=False) -> bool:
'\n Tries to connect to the target node, adding one connection into the pipeline, if successful.\n An on connect method can also be specified, and this will be saved into th... | 1,556,936,749,418,891,500 | Tries to connect to the target node, adding one connection into the pipeline, if successful.
An on connect method can also be specified, and this will be saved into the instance variables. | greenberry/server/server.py | start_client | GreenBerry-Network/greenberry-blockchain | python | async def start_client(self, target_node: PeerInfo, on_connect: Callable=None, auth: bool=False, is_feeler: bool=False) -> bool:
'\n Tries to connect to the target node, adding one connection into the pipeline, if successful.\n An on connect method can also be specified, and this will be saved into th... |
def from_numpy(X: np.ndarray, dist_type_schema: Dict[(int, str)]=None, lasso_beta: float=0.0, ridge_beta: float=0.0, use_bias: bool=False, hidden_layer_units: Iterable[int]=None, w_threshold: float=None, max_iter: int=100, tabu_edges: List[Tuple[(int, int)]]=None, tabu_parent_nodes: List[int]=None, tabu_child_nodes: Li... | 2,413,458,060,328,271,400 | Learn the `StructureModel`, the graph structure with lasso regularisation
describing conditional dependencies between variables in data presented as a numpy array.
Based on DAGs with NO TEARS.
@inproceedings{zheng2018dags,
author = {Zheng, Xun and Aragam, Bryon and Ravikumar, Pradeep and Xing, Eric P.},
bookti... | causalnex/structure/pytorch/notears.py | from_numpy | mkretsch327/causalnex | python | def from_numpy(X: np.ndarray, dist_type_schema: Dict[(int, str)]=None, lasso_beta: float=0.0, ridge_beta: float=0.0, use_bias: bool=False, hidden_layer_units: Iterable[int]=None, w_threshold: float=None, max_iter: int=100, tabu_edges: List[Tuple[(int, int)]]=None, tabu_parent_nodes: List[int]=None, tabu_child_nodes: Li... |
def from_pandas(X: pd.DataFrame, dist_type_schema: Dict[(Union[(str, int)], str)]=None, lasso_beta: float=0.0, ridge_beta: float=0.0, use_bias: bool=False, hidden_layer_units: Iterable[int]=None, max_iter: int=100, w_threshold: float=None, tabu_edges: List[Tuple[(str, str)]]=None, tabu_parent_nodes: List[str]=None, tab... | -1,140,084,792,508,126,500 | Learn the `StructureModel`, the graph structure describing conditional dependencies between variables
in data presented as a pandas dataframe.
The optimisation is to minimise a score function :math:`F(W)` over the graph's
weighted adjacency matrix, :math:`W`, subject to the a constraint function :math:`h(W)`,
where :m... | causalnex/structure/pytorch/notears.py | from_pandas | mkretsch327/causalnex | python | def from_pandas(X: pd.DataFrame, dist_type_schema: Dict[(Union[(str, int)], str)]=None, lasso_beta: float=0.0, ridge_beta: float=0.0, use_bias: bool=False, hidden_layer_units: Iterable[int]=None, max_iter: int=100, w_threshold: float=None, tabu_edges: List[Tuple[(str, str)]]=None, tabu_parent_nodes: List[str]=None, tab... |
def build_treeprocessors(md_instance, **kwargs):
' Build the default treeprocessors for Markdown. '
treeprocessors = odict.OrderedDict()
treeprocessors['inline'] = InlineProcessor(md_instance)
treeprocessors['prettify'] = PrettifyTreeprocessor(md_instance)
return treeprocessors | 2,951,572,762,128,452,600 | Build the default treeprocessors for Markdown. | Tensorflow_LightGBM_Scipy_nightly/source/markdown/treeprocessors.py | build_treeprocessors | Con-Mi/lambda-packs | python | def build_treeprocessors(md_instance, **kwargs):
' '
treeprocessors = odict.OrderedDict()
treeprocessors['inline'] = InlineProcessor(md_instance)
treeprocessors['prettify'] = PrettifyTreeprocessor(md_instance)
return treeprocessors |
def isString(s):
" Check if it's string "
if (not isinstance(s, util.AtomicString)):
return isinstance(s, str)
return False | -7,252,295,634,798,910,000 | Check if it's string | Tensorflow_LightGBM_Scipy_nightly/source/markdown/treeprocessors.py | isString | Con-Mi/lambda-packs | python | def isString(s):
" "
if (not isinstance(s, util.AtomicString)):
return isinstance(s, str)
return False |
def run(self, root):
'\n Subclasses of Treeprocessor should implement a `run` method, which\n takes a root ElementTree. This method can return another ElementTree \n object, and the existing root ElementTree will be replaced, or it can \n modify the current tree and return None.\n ... | -3,420,337,297,505,160,000 | Subclasses of Treeprocessor should implement a `run` method, which
takes a root ElementTree. This method can return another ElementTree
object, and the existing root ElementTree will be replaced, or it can
modify the current tree and return None. | Tensorflow_LightGBM_Scipy_nightly/source/markdown/treeprocessors.py | run | Con-Mi/lambda-packs | python | def run(self, root):
'\n Subclasses of Treeprocessor should implement a `run` method, which\n takes a root ElementTree. This method can return another ElementTree \n object, and the existing root ElementTree will be replaced, or it can \n modify the current tree and return None.\n ... |
def __makePlaceholder(self, type):
' Generate a placeholder '
id = ('%04d' % len(self.stashed_nodes))
hash = (util.INLINE_PLACEHOLDER % id)
return (hash, id) | -8,206,074,628,519,016,000 | Generate a placeholder | Tensorflow_LightGBM_Scipy_nightly/source/markdown/treeprocessors.py | __makePlaceholder | Con-Mi/lambda-packs | python | def __makePlaceholder(self, type):
' '
id = ('%04d' % len(self.stashed_nodes))
hash = (util.INLINE_PLACEHOLDER % id)
return (hash, id) |
def __findPlaceholder(self, data, index):
'\n Extract id from data string, start from index\n\n Keyword arguments:\n\n * data: string\n * index: index, from which we start search\n\n Returns: placeholder id and string index, after the found placeholder.\n \n '
m ... | -6,930,052,867,000,450,000 | Extract id from data string, start from index
Keyword arguments:
* data: string
* index: index, from which we start search
Returns: placeholder id and string index, after the found placeholder. | Tensorflow_LightGBM_Scipy_nightly/source/markdown/treeprocessors.py | __findPlaceholder | Con-Mi/lambda-packs | python | def __findPlaceholder(self, data, index):
'\n Extract id from data string, start from index\n\n Keyword arguments:\n\n * data: string\n * index: index, from which we start search\n\n Returns: placeholder id and string index, after the found placeholder.\n \n '
m ... |
def __stashNode(self, node, type):
' Add node to stash '
(placeholder, id) = self.__makePlaceholder(type)
self.stashed_nodes[id] = node
return placeholder | 321,642,995,561,102,700 | Add node to stash | Tensorflow_LightGBM_Scipy_nightly/source/markdown/treeprocessors.py | __stashNode | Con-Mi/lambda-packs | python | def __stashNode(self, node, type):
' '
(placeholder, id) = self.__makePlaceholder(type)
self.stashed_nodes[id] = node
return placeholder |
def __handleInline(self, data, patternIndex=0):
'\n Process string with inline patterns and replace it\n with placeholders\n\n Keyword arguments:\n\n * data: A line of Markdown text\n * patternIndex: The index of the inlinePattern to start with\n\n Returns: String with plac... | 6,206,016,853,998,460,000 | Process string with inline patterns and replace it
with placeholders
Keyword arguments:
* data: A line of Markdown text
* patternIndex: The index of the inlinePattern to start with
Returns: String with placeholders. | Tensorflow_LightGBM_Scipy_nightly/source/markdown/treeprocessors.py | __handleInline | Con-Mi/lambda-packs | python | def __handleInline(self, data, patternIndex=0):
'\n Process string with inline patterns and replace it\n with placeholders\n\n Keyword arguments:\n\n * data: A line of Markdown text\n * patternIndex: The index of the inlinePattern to start with\n\n Returns: String with plac... |
def __processElementText(self, node, subnode, isText=True):
"\n Process placeholders in Element.text or Element.tail\n of Elements popped from self.stashed_nodes.\n\n Keywords arguments:\n\n * node: parent node\n * subnode: processing node\n * isText: bool variable, True - ... | 73,273,833,939,332,340 | Process placeholders in Element.text or Element.tail
of Elements popped from self.stashed_nodes.
Keywords arguments:
* node: parent node
* subnode: processing node
* isText: bool variable, True - it's text, False - it's tail
Returns: None | Tensorflow_LightGBM_Scipy_nightly/source/markdown/treeprocessors.py | __processElementText | Con-Mi/lambda-packs | python | def __processElementText(self, node, subnode, isText=True):
"\n Process placeholders in Element.text or Element.tail\n of Elements popped from self.stashed_nodes.\n\n Keywords arguments:\n\n * node: parent node\n * subnode: processing node\n * isText: bool variable, True - ... |
def __processPlaceholders(self, data, parent):
'\n Process string with placeholders and generate ElementTree tree.\n\n Keyword arguments:\n\n * data: string with placeholders instead of ElementTree elements.\n * parent: Element, which contains processing inline data\n\n Returns: l... | 4,380,680,562,387,093,500 | Process string with placeholders and generate ElementTree tree.
Keyword arguments:
* data: string with placeholders instead of ElementTree elements.
* parent: Element, which contains processing inline data
Returns: list with ElementTree elements with applied inline patterns. | Tensorflow_LightGBM_Scipy_nightly/source/markdown/treeprocessors.py | __processPlaceholders | Con-Mi/lambda-packs | python | def __processPlaceholders(self, data, parent):
'\n Process string with placeholders and generate ElementTree tree.\n\n Keyword arguments:\n\n * data: string with placeholders instead of ElementTree elements.\n * parent: Element, which contains processing inline data\n\n Returns: l... |
def __applyPattern(self, pattern, data, patternIndex, startIndex=0):
'\n Check if the line fits the pattern, create the necessary\n elements, add it to stashed_nodes.\n\n Keyword arguments:\n\n * data: the text to be processed\n * pattern: the pattern to be checked\n * patt... | -3,648,883,033,776,715,300 | Check if the line fits the pattern, create the necessary
elements, add it to stashed_nodes.
Keyword arguments:
* data: the text to be processed
* pattern: the pattern to be checked
* patternIndex: index of current pattern
* startIndex: string index, from which we start searching
Returns: String with placeholders ins... | Tensorflow_LightGBM_Scipy_nightly/source/markdown/treeprocessors.py | __applyPattern | Con-Mi/lambda-packs | python | def __applyPattern(self, pattern, data, patternIndex, startIndex=0):
'\n Check if the line fits the pattern, create the necessary\n elements, add it to stashed_nodes.\n\n Keyword arguments:\n\n * data: the text to be processed\n * pattern: the pattern to be checked\n * patt... |
def run(self, tree):
'Apply inline patterns to a parsed Markdown tree.\n\n Iterate over ElementTree, find elements with inline tag, apply inline\n patterns and append newly created Elements to tree. If you don\'t\n want to process your data with inline paterns, instead of normal string,\n ... | 8,504,789,209,487,850,000 | Apply inline patterns to a parsed Markdown tree.
Iterate over ElementTree, find elements with inline tag, apply inline
patterns and append newly created Elements to tree. If you don't
want to process your data with inline paterns, instead of normal string,
use subclass AtomicString:
node.text = markdown.AtomicSt... | Tensorflow_LightGBM_Scipy_nightly/source/markdown/treeprocessors.py | run | Con-Mi/lambda-packs | python | def run(self, tree):
'Apply inline patterns to a parsed Markdown tree.\n\n Iterate over ElementTree, find elements with inline tag, apply inline\n patterns and append newly created Elements to tree. If you don\'t\n want to process your data with inline paterns, instead of normal string,\n ... |
def _prettifyETree(self, elem):
' Recursively add linebreaks to ElementTree children. '
i = '\n'
if (util.isBlockLevel(elem.tag) and (elem.tag not in ['code', 'pre'])):
if (((not elem.text) or (not elem.text.strip())) and len(elem) and util.isBlockLevel(elem[0].tag)):
elem.text = i
... | -4,502,805,878,882,343,000 | Recursively add linebreaks to ElementTree children. | Tensorflow_LightGBM_Scipy_nightly/source/markdown/treeprocessors.py | _prettifyETree | Con-Mi/lambda-packs | python | def _prettifyETree(self, elem):
' '
i = '\n'
if (util.isBlockLevel(elem.tag) and (elem.tag not in ['code', 'pre'])):
if (((not elem.text) or (not elem.text.strip())) and len(elem) and util.isBlockLevel(elem[0].tag)):
elem.text = i
for e in elem:
if util.isBlockLevel(... |
def run(self, root):
' Add linebreaks to ElementTree root object. '
self._prettifyETree(root)
brs = root.getiterator('br')
for br in brs:
if ((not br.tail) or (not br.tail.strip())):
br.tail = '\n'
else:
br.tail = ('\n%s' % br.tail) | -3,843,561,070,745,513,500 | Add linebreaks to ElementTree root object. | Tensorflow_LightGBM_Scipy_nightly/source/markdown/treeprocessors.py | run | Con-Mi/lambda-packs | python | def run(self, root):
' '
self._prettifyETree(root)
brs = root.getiterator('br')
for br in brs:
if ((not br.tail) or (not br.tail.strip())):
br.tail = '\n'
else:
br.tail = ('\n%s' % br.tail) |
def add_goal_to_payload(func):
"Adds a goal to payload for 't-goal' key."
@functools.wraps(func)
def wrapper_inject_goal(ack: Ack, payload: Dict[(str, Any)], context: BoltContext):
try:
content = GoalContent(content=payload['state']['values'][CREATE_GOAL_INPUT_BLOCK][CREATE_GOAL_INPUT][... | -3,697,957,113,407,593,000 | Adds a goal to payload for 't-goal' key. | teamiclink/slack/view_goal_create.py | add_goal_to_payload | e1004/teamiclink | python | def add_goal_to_payload(func):
@functools.wraps(func)
def wrapper_inject_goal(ack: Ack, payload: Dict[(str, Any)], context: BoltContext):
try:
content = GoalContent(content=payload['state']['values'][CREATE_GOAL_INPUT_BLOCK][CREATE_GOAL_INPUT]['value']).content
except Validatio... |
def download_and_append_fragments_multiple(self, *args, pack_func=None, finish_func=None):
'\n @params (ctx1, fragments1, info_dict1), (ctx2, fragments2, info_dict2), ...\n all args must be either tuple or list\n '
interrupt_trigger = [True]
max_progress = len(args)
if (max_... | 3,892,368,910,339,873,300 | @params (ctx1, fragments1, info_dict1), (ctx2, fragments2, info_dict2), ...
all args must be either tuple or list | yt_dlp/downloader/fragment.py | download_and_append_fragments_multiple | 9Fork/yt-dlp | python | def download_and_append_fragments_multiple(self, *args, pack_func=None, finish_func=None):
'\n @params (ctx1, fragments1, info_dict1), (ctx2, fragments2, info_dict2), ...\n all args must be either tuple or list\n '
interrupt_trigger = [True]
max_progress = len(args)
if (max_... |
def createFiles(self, fileContents, path, repMap=None, repMaps=None):
'repMap: single map for all files\n repMaps: a dict, with the filenames as the keys'
if ((repMap is not None) and (repMaps is not None)):
raise AllInOneError('createFiles can only take repMap or repMaps (or neither), not bot... | -1,049,011,689,998,130,600 | repMap: single map for all files
repMaps: a dict, with the filenames as the keys | Alignment/OfflineValidation/python/TkAlAllInOneTool/genericValidation.py | createFiles | 4quarks/cmssw | python | def createFiles(self, fileContents, path, repMap=None, repMaps=None):
'repMap: single map for all files\n repMaps: a dict, with the filenames as the keys'
if ((repMap is not None) and (repMaps is not None)):
raise AllInOneError('createFiles can only take repMap or repMaps (or neither), not bot... |
def __init__(self, valName, alignment, config):
'\n This method adds additional items to the `self.general` dictionary\n which are only needed for validations using datasets.\n \n Arguments:\n - `valName`: String which identifies individual validation instances\n - `alignme... | -8,920,607,103,725,009,000 | This method adds additional items to the `self.general` dictionary
which are only needed for validations using datasets.
Arguments:
- `valName`: String which identifies individual validation instances
- `alignment`: `Alignment` instance to validate
- `config`: `BetterConfigParser` instance which includes the
... | Alignment/OfflineValidation/python/TkAlAllInOneTool/genericValidation.py | __init__ | 4quarks/cmssw | python | def __init__(self, valName, alignment, config):
'\n This method adds additional items to the `self.general` dictionary\n which are only needed for validations using datasets.\n \n Arguments:\n - `valName`: String which identifies individual validation instances\n - `alignme... |
def createCrabCfg(self, path, crabCfgBaseName):
'\n Method which creates a `crab.cfg` for a validation on datasets.\n \n Arguments:\n - `path`: Path at which the file will be stored.\n - `crabCfgBaseName`: String which depends on the actual type of\n va... | -6,388,444,098,593,256,000 | Method which creates a `crab.cfg` for a validation on datasets.
Arguments:
- `path`: Path at which the file will be stored.
- `crabCfgBaseName`: String which depends on the actual type of
validation calling this method. | Alignment/OfflineValidation/python/TkAlAllInOneTool/genericValidation.py | createCrabCfg | 4quarks/cmssw | python | def createCrabCfg(self, path, crabCfgBaseName):
'\n Method which creates a `crab.cfg` for a validation on datasets.\n \n Arguments:\n - `path`: Path at which the file will be stored.\n - `crabCfgBaseName`: String which depends on the actual type of\n va... |
@abstractmethod
def plottingscriptname(cls):
'override with a classmethod' | -5,638,076,648,143,060,000 | override with a classmethod | Alignment/OfflineValidation/python/TkAlAllInOneTool/genericValidation.py | plottingscriptname | 4quarks/cmssw | python | @abstractmethod
def plottingscriptname(cls):
|
@abstractmethod
def plottingscripttemplate(cls):
'override with a classmethod' | -1,264,907,108,993,485,600 | override with a classmethod | Alignment/OfflineValidation/python/TkAlAllInOneTool/genericValidation.py | plottingscripttemplate | 4quarks/cmssw | python | @abstractmethod
def plottingscripttemplate(cls):
|
@abstractmethod
def plotsdirname(cls):
'override with a classmethod' | -2,828,980,906,335,749,000 | override with a classmethod | Alignment/OfflineValidation/python/TkAlAllInOneTool/genericValidation.py | plotsdirname | 4quarks/cmssw | python | @abstractmethod
def plotsdirname(cls):
|
@abstractmethod
def getsummaryitems(cls, folder):
'override with a classmethod that returns a list of SummaryItems\n based on the plots saved in folder' | 5,501,817,813,577,775,000 | override with a classmethod that returns a list of SummaryItems
based on the plots saved in folder | Alignment/OfflineValidation/python/TkAlAllInOneTool/genericValidation.py | getsummaryitems | 4quarks/cmssw | python | @abstractmethod
def getsummaryitems(cls, folder):
'override with a classmethod that returns a list of SummaryItems\n based on the plots saved in folder' |
@abstractmethod
def comparealignmentsname(cls):
'classmethod' | -6,744,047,084,482,538,000 | classmethod | Alignment/OfflineValidation/python/TkAlAllInOneTool/genericValidation.py | comparealignmentsname | 4quarks/cmssw | python | @abstractmethod
def comparealignmentsname(cls):
|
@abstractmethod
def presentationsubsections(cls):
'classmethod' | -1,757,039,647,671,784,700 | classmethod | Alignment/OfflineValidation/python/TkAlAllInOneTool/genericValidation.py | presentationsubsections | 4quarks/cmssw | python | @abstractmethod
def presentationsubsections(cls):
|
def __init__(self, name, values, format=None, latexname=None, latexformat=None):
'\n name: name of the summary item, goes on top of the column\n values: value for each alignment (in order of rows)\n format: python format string (default: {:.3g}, meaning up to 3 sign... | 9,029,562,683,415,845,000 | name: name of the summary item, goes on top of the column
values: value for each alignment (in order of rows)
format: python format string (default: {:.3g}, meaning up to 3 significant digits)
latexname: name in latex form, e.g. if name=sigma you might want latexname=\sigma (default: name)
latexforma... | Alignment/OfflineValidation/python/TkAlAllInOneTool/genericValidation.py | __init__ | 4quarks/cmssw | python | def __init__(self, name, values, format=None, latexname=None, latexformat=None):
'\n name: name of the summary item, goes on top of the column\n values: value for each alignment (in order of rows)\n format: python format string (default: {:.3g}, meaning up to 3 sign... |
def __init__(self):
'Constructor.\n '
self.filters = {} | -9,051,548,567,641,834,000 | Constructor. | pylith/apps/ConfigSearchApp.py | __init__ | Shengduo/pylith | python | def __init__(self):
'\n '
self.filters = {} |
def main(self, **kwargs):
'Main entry point.\n\n Keyword arguments:\n searchpath (str), default: "."\n Search path for .cfg files.\n display (str), default: "all"\n List of metadata to display in search results.\n keywords (str), default: None\n ... | 1,933,451,545,898,552,600 | Main entry point.
Keyword arguments:
searchpath (str), default: "."
Search path for .cfg files.
display (str), default: "all"
List of metadata to display in search results.
keywords (str), default: None
Comma delimited list of keywords for filtering search results.
features (str... | pylith/apps/ConfigSearchApp.py | main | Shengduo/pylith | python | def main(self, **kwargs):
'Main entry point.\n\n Keyword arguments:\n searchpath (str), default: "."\n Search path for .cfg files.\n display (str), default: "all"\n List of metadata to display in search results.\n keywords (str), default: None\n ... |
def _set_filters(self, options):
'Set filters for display from command line option.\n\n Args:\n options (argsparse.Namespace)\n Command line options.\n '
if options.keywords:
self.filters['keywords'] = string_to_list(options.keywords)
if options.features:
... | -6,979,188,438,464,539,000 | Set filters for display from command line option.
Args:
options (argsparse.Namespace)
Command line options. | pylith/apps/ConfigSearchApp.py | _set_filters | Shengduo/pylith | python | def _set_filters(self, options):
'Set filters for display from command line option.\n\n Args:\n options (argsparse.Namespace)\n Command line options.\n '
if options.keywords:
self.filters['keywords'] = string_to_list(options.keywords)
if options.features:
... |
def _apply_filters(self, metadata):
'Apply filters to metadata.\n\n Args:\n metadata (pylith.utils.SimulationMetadata)\n Simulation metadata.\n \n Returns: (bool)\n True if metadata meets filter requirements, False otherwise.\n '
if ('keywords' in... | 2,894,900,722,805,184,500 | Apply filters to metadata.
Args:
metadata (pylith.utils.SimulationMetadata)
Simulation metadata.
Returns: (bool)
True if metadata meets filter requirements, False otherwise. | pylith/apps/ConfigSearchApp.py | _apply_filters | Shengduo/pylith | python | def _apply_filters(self, metadata):
'Apply filters to metadata.\n\n Args:\n metadata (pylith.utils.SimulationMetadata)\n Simulation metadata.\n \n Returns: (bool)\n True if metadata meets filter requirements, False otherwise.\n '
if ('keywords' in... |
def _apply_filters_incompatible(self, metadata):
'Apply filters to metadata to find incompatible parameter files.\n\n Args:\n metadata (pylith.utils.SimulationMetadata)\n Simulation metadata.\n \n Returns: (bool)\n True if metadata is incompatible with filte... | -2,360,490,715,105,342,000 | Apply filters to metadata to find incompatible parameter files.
Args:
metadata (pylith.utils.SimulationMetadata)
Simulation metadata.
Returns: (bool)
True if metadata is incompatible with filter requirements, False otherwise. | pylith/apps/ConfigSearchApp.py | _apply_filters_incompatible | Shengduo/pylith | python | def _apply_filters_incompatible(self, metadata):
'Apply filters to metadata to find incompatible parameter files.\n\n Args:\n metadata (pylith.utils.SimulationMetadata)\n Simulation metadata.\n \n Returns: (bool)\n True if metadata is incompatible with filte... |
def _display_metadata(self, filename, metadata, options):
'Print metadata to stdout.\n\n Args:\n filename (str)\n Name of simulation .cfg file.\n metadata (pylith.utils.SimulationMetadata)\n Simulation metadata.\n options (list of str)\n ... | -8,794,836,628,804,046,000 | Print metadata to stdout.
Args:
filename (str)
Name of simulation .cfg file.
metadata (pylith.utils.SimulationMetadata)
Simulation metadata.
options (list of str)
List of metadata to display. | pylith/apps/ConfigSearchApp.py | _display_metadata | Shengduo/pylith | python | def _display_metadata(self, filename, metadata, options):
'Print metadata to stdout.\n\n Args:\n filename (str)\n Name of simulation .cfg file.\n metadata (pylith.utils.SimulationMetadata)\n Simulation metadata.\n options (list of str)\n ... |
def _parse_command_line(self):
'Parse command line arguments.\n\n Returns (argsparse.Namespace)\n Command line arguments.\n '
DESCRIPTION = 'Application for searching PyLith .cfg parameter files.'
parser = argparse.ArgumentParser(description=DESCRIPTION, formatter_class=argparse.Argu... | -7,304,533,193,928,211,000 | Parse command line arguments.
Returns (argsparse.Namespace)
Command line arguments. | pylith/apps/ConfigSearchApp.py | _parse_command_line | Shengduo/pylith | python | def _parse_command_line(self):
'Parse command line arguments.\n\n Returns (argsparse.Namespace)\n Command line arguments.\n '
DESCRIPTION = 'Application for searching PyLith .cfg parameter files.'
parser = argparse.ArgumentParser(description=DESCRIPTION, formatter_class=argparse.Argu... |
def list_user_usage(self, **kwargs):
'\n A list of usage user entries\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please define a `callback` function\n to be invoked when receiving the response.\n >>> def callback_function(res... | 505,787,424,465,485,060 | A list of usage user entries
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_user_usage(callback=callback_... | purity_fb/purity_fb_1dot6/apis/usage_users_api.py | list_user_usage | unixtreme/purity_fb_python_client | python | def list_user_usage(self, **kwargs):
'\n A list of usage user entries\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please define a `callback` function\n to be invoked when receiving the response.\n >>> def callback_function(res... |
def list_user_usage_with_http_info(self, **kwargs):
'\n A list of usage user entries\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please define a `callback` function\n to be invoked when receiving the response.\n >>> def callba... | 5,569,943,251,025,442,000 | A list of usage user entries
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_user_usage_with_http_info(cal... | purity_fb/purity_fb_1dot6/apis/usage_users_api.py | list_user_usage_with_http_info | unixtreme/purity_fb_python_client | python | def list_user_usage_with_http_info(self, **kwargs):
'\n A list of usage user entries\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please define a `callback` function\n to be invoked when receiving the response.\n >>> def callba... |
def run_and_check_derivs(self, prob, tol=1e-05, verbose=False):
'Runs check_partials and compares to analytic derivatives.'
prob.run_model()
derivs = prob.check_partials(out_stream=None)
for i in derivs['comp'].keys():
if verbose:
print('Checking derivative pair:', i)
if (der... | 9,059,856,035,833,121,000 | Runs check_partials and compares to analytic derivatives. | openmdao/components/tests/test_meta_model_structured_comp.py | run_and_check_derivs | JustinSGray/OpenMDAO | python | def run_and_check_derivs(self, prob, tol=1e-05, verbose=False):
prob.run_model()
derivs = prob.check_partials(out_stream=None)
for i in derivs['comp'].keys():
if verbose:
print('Checking derivative pair:', i)
if (derivs['comp'][i]['J_fwd'].sum() != 0.0):
rel_err ... |
def run_and_check_derivs(self, prob, tol=1e-05, verbose=False):
'Runs check_partials and compares to analytic derivatives.'
prob.run_model()
derivs = prob.check_partials(method='cs', out_stream=None)
for i in derivs['comp'].keys():
if verbose:
print('Checking derivative pair:', i)
... | 3,383,656,209,822,107,600 | Runs check_partials and compares to analytic derivatives. | openmdao/components/tests/test_meta_model_structured_comp.py | run_and_check_derivs | JustinSGray/OpenMDAO | python | def run_and_check_derivs(self, prob, tol=1e-05, verbose=False):
prob.run_model()
derivs = prob.check_partials(method='cs', out_stream=None)
for i in derivs['comp'].keys():
if verbose:
print('Checking derivative pair:', i)
if (derivs['comp'][i]['J_fwd'].sum() != 0.0):
... |
@property
def keys(self):
'Names of columns.\n\n A tuple of strings that indicate the names of columns.\n '
raise NotImplementedError | 7,768,638,038,528,339,000 | Names of columns.
A tuple of strings that indicate the names of columns. | pytorch_pfn_extras/dataset/tabular/tabular_dataset.py | keys | HiroakiMikami/pytorch-pfn-extras | python | @property
def keys(self):
'Names of columns.\n\n A tuple of strings that indicate the names of columns.\n '
raise NotImplementedError |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.