body stringlengths 26 98.2k | body_hash int64 -9,222,864,604,528,158,000 9,221,803,474B | docstring stringlengths 1 16.8k | path stringlengths 5 230 | name stringlengths 1 96 | repository_name stringlengths 7 89 | lang stringclasses 1
value | body_without_docstring stringlengths 20 98.2k |
|---|---|---|---|---|---|---|---|
def field_isomorphism(a, b, **args):
'Construct an isomorphism between two number fields.'
if (not all((isinstance(_, AlgebraicField) for _ in (a, b)))):
raise ValueError(f'Arguments should be algebraic fields, got {a} and {b}')
if (a == b):
return a.unit.rep.all_coeffs()
n = a.minpoly.d... | 4,375,923,129,808,113,000 | Construct an isomorphism between two number fields. | diofant/polys/numberfields.py | field_isomorphism | diofant/diofant | python | def field_isomorphism(a, b, **args):
if (not all((isinstance(_, AlgebraicField) for _ in (a, b)))):
raise ValueError(f'Arguments should be algebraic fields, got {a} and {b}')
if (a == b):
return a.unit.rep.all_coeffs()
n = a.minpoly.degree()
m = b.minpoly.degree()
if (a.domain =... |
def test_login_required(self):
'Test that login required for retrieving tags'
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED) | -5,907,001,046,836,814,000 | Test that login required for retrieving tags | app/recipe/tests/test_tags_api.py | test_login_required | deborahoni/recipe-app-api | python | def test_login_required(self):
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED) |
def test_retrieve_tags(self):
'Test retrieving tags'
Tag.objects.create(user=self.user, name='Vegan')
Tag.objects.create(user=self.user, name='Dessert')
res = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(res... | -3,301,511,521,326,451,700 | Test retrieving tags | app/recipe/tests/test_tags_api.py | test_retrieve_tags | deborahoni/recipe-app-api | python | def test_retrieve_tags(self):
Tag.objects.create(user=self.user, name='Vegan')
Tag.objects.create(user=self.user, name='Dessert')
res = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(res.status_code, status.H... |
def test_tags_limited_to_user(self):
'Test that tags returned are for authenticated user'
user2 = get_user_model().objects.create_user('example@example.com', 'testpass')
Tag.objects.create(user=user2, name='Tasty')
tag = Tag.objects.create(user=self.user, name='Just Food')
res = self.client.get(TAGS... | -6,624,375,918,940,996,000 | Test that tags returned are for authenticated user | app/recipe/tests/test_tags_api.py | test_tags_limited_to_user | deborahoni/recipe-app-api | python | def test_tags_limited_to_user(self):
user2 = get_user_model().objects.create_user('example@example.com', 'testpass')
Tag.objects.create(user=user2, name='Tasty')
tag = Tag.objects.create(user=self.user, name='Just Food')
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.H... |
def test_create_tag_successful(self):
'Test creating a new tag'
payload = {'name': 'Simple'}
self.client.post(TAGS_URL, payload)
exists = Tag.objects.filter(user=self.user, name=payload['name']).exists()
self.assertTrue(exists) | 6,388,799,538,000,849,000 | Test creating a new tag | app/recipe/tests/test_tags_api.py | test_create_tag_successful | deborahoni/recipe-app-api | python | def test_create_tag_successful(self):
payload = {'name': 'Simple'}
self.client.post(TAGS_URL, payload)
exists = Tag.objects.filter(user=self.user, name=payload['name']).exists()
self.assertTrue(exists) |
def test_create_tag_invalid(self):
'Test creating a new tag with invalid payload'
payload = {'name': ''}
res = self.client.post(TAGS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST) | -5,803,848,339,914,584,000 | Test creating a new tag with invalid payload | app/recipe/tests/test_tags_api.py | test_create_tag_invalid | deborahoni/recipe-app-api | python | def test_create_tag_invalid(self):
payload = {'name': }
res = self.client.post(TAGS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST) |
def test_retrieve_tags_assigned_to_recipes(self):
'Test filtering tags by those assigned to recipes'
tag1 = Tag.objects.create(user=self.user, name='Breakfast')
tag2 = Tag.objects.create(user=self.user, name='Lunch')
recipe = Recipe.objects.create(title='Coriander eggs on toast', time_minutes=10, price=... | -7,826,462,116,507,644,000 | Test filtering tags by those assigned to recipes | app/recipe/tests/test_tags_api.py | test_retrieve_tags_assigned_to_recipes | deborahoni/recipe-app-api | python | def test_retrieve_tags_assigned_to_recipes(self):
tag1 = Tag.objects.create(user=self.user, name='Breakfast')
tag2 = Tag.objects.create(user=self.user, name='Lunch')
recipe = Recipe.objects.create(title='Coriander eggs on toast', time_minutes=10, price=5.0, user=self.user)
recipe.tags.add(tag1)
... |
def test_retrieve_tags_assigned_unique(self):
'Test filtering tags by assigned returns unique items'
tag = Tag.objects.create(user=self.user, name='Breakfast')
Tag.objects.create(user=self.user, name='Lunch')
recipe1 = Recipe.objects.create(title='Pancakes', time_minutes=5, price=3.0, user=self.user)
... | 1,714,289,436,581,793,000 | Test filtering tags by assigned returns unique items | app/recipe/tests/test_tags_api.py | test_retrieve_tags_assigned_unique | deborahoni/recipe-app-api | python | def test_retrieve_tags_assigned_unique(self):
tag = Tag.objects.create(user=self.user, name='Breakfast')
Tag.objects.create(user=self.user, name='Lunch')
recipe1 = Recipe.objects.create(title='Pancakes', time_minutes=5, price=3.0, user=self.user)
recipe1.tags.add(tag)
recipe2 = Recipe.objects.c... |
def test_missing_name(self):
'\n every question needs a name (or alias of name)\n '
self.assertPyxformXform(name='invalidcols', ss_structure={'survey': [{'type': 'text', 'label': 'label'}]}, errored=True, error__contains=['no name']) | 8,243,894,922,048,404,000 | every question needs a name (or alias of name) | pyxform/tests_v1/test_sheet_columns.py | test_missing_name | medic/pyxform | python | def test_missing_name(self):
'\n \n '
self.assertPyxformXform(name='invalidcols', ss_structure={'survey': [{'type': 'text', 'label': 'label'}]}, errored=True, error__contains=['no name']) |
def test_column_case(self):
'\n Ensure that column name is case insensitive\n '
self.assertPyxformXform(name='mixedcasecolumns', md='\n | Survey | | | |\n | | Type | name | Label |\n | | text | Name | ... | 1,118,352,065,668,415,700 | Ensure that column name is case insensitive | pyxform/tests_v1/test_sheet_columns.py | test_column_case | medic/pyxform | python | def test_column_case(self):
'\n \n '
self.assertPyxformXform(name='mixedcasecolumns', md='\n | Survey | | | |\n | | Type | name | Label |\n | | text | Name | the name |\n | | int... |
def test_value_and_name(self):
"\n confirm that both 'name' and 'value' columns of choice list work\n "
for name_alias in ['name', 'value']:
self.assertPyxformXform(name='aliases', md=('\n | survey | | | |\n | |... | -3,907,622,770,719,091,000 | confirm that both 'name' and 'value' columns of choice list work | pyxform/tests_v1/test_sheet_columns.py | test_value_and_name | medic/pyxform | python | def test_value_and_name(self):
"\n \n "
for name_alias in ['name', 'value']:
self.assertPyxformXform(name='aliases', md=('\n | survey | | | |\n | | type | name | label |\n ... |
def add(self, log_prob, next_word_prob):
' increments counters for the sum of log probs of current word and next\n word (given context ending at current word). Since the next word might be at the end of the example,\n or it might be not counted because it is not an ending subword unit,\n ... | -5,975,998,106,554,211,000 | increments counters for the sum of log probs of current word and next
word (given context ending at current word). Since the next word might be at the end of the example,
or it might be not counted because it is not an ending subword unit,
also keeps track of how many of those we have seen | fairseq_cli/eval_lm.py | add | liangan1/fairseq | python | def add(self, log_prob, next_word_prob):
' increments counters for the sum of log probs of current word and next\n word (given context ending at current word). Since the next word might be at the end of the example,\n or it might be not counted because it is not an ending subword unit,\n ... |
def __str__(self):
'\n The smallest python literal representation of a string\n\n :rtype: str\n\n '
if (self._s == ''):
return ''
if (len(self.quote) == 1):
s = self.to_short()
else:
s = self.to_long()
try:
eval(((self.quote + s) + self.quote))
... | 1,014,733,903,730,512,800 | The smallest python literal representation of a string
:rtype: str | src/python_minifier/ministring.py | __str__ | clbarnes/python-minifier | python | def __str__(self):
'\n The smallest python literal representation of a string\n\n :rtype: str\n\n '
if (self._s == ):
return
if (len(self.quote) == 1):
s = self.to_short()
else:
s = self.to_long()
try:
eval(((self.quote + s) + self.quote))
ex... |
def __str__(self):
'\n The smallest python literal representation of a string\n\n :rtype: str\n\n '
if (self._b == b''):
return ''
if (len(self.quote) == 1):
s = self.to_short()
else:
s = self.to_long()
assert (eval(((('b' + self.quote) + s) + self.quote)... | -7,563,030,493,167,593,000 | The smallest python literal representation of a string
:rtype: str | src/python_minifier/ministring.py | __str__ | clbarnes/python-minifier | python | def __str__(self):
'\n The smallest python literal representation of a string\n\n :rtype: str\n\n '
if (self._b == b):
return
if (len(self.quote) == 1):
s = self.to_short()
else:
s = self.to_long()
assert (eval(((('b' + self.quote) + s) + self.quote)) ==... |
def test_upload_file(self):
'Integration test for the QuizSubmissionFilesAPI.upload_file method.'
pass | 3,796,981,798,016,427,000 | Integration test for the QuizSubmissionFilesAPI.upload_file method. | py3canvas/tests/quiz_submission_files.py | test_upload_file | tylerclair/py3canvas | python | def test_upload_file(self):
pass |
def dict_to_data(d: dict) -> dict:
'Recursively calls to_data on dict'
return {key: to_data(d[key]) for key in d} | -4,699,255,432,109,351,000 | Recursively calls to_data on dict | cyber_sdk/util/json.py | dict_to_data | SaveTheAles/cyber.py | python | def dict_to_data(d: dict) -> dict:
return {key: to_data(d[key]) for key in d} |
def to_data(self) -> Any:
'Converts the object to its JSON-serializable Python data representation.'
return dict_to_data(copy.deepcopy(self.__dict__)) | -8,924,841,611,247,736,000 | Converts the object to its JSON-serializable Python data representation. | cyber_sdk/util/json.py | to_data | SaveTheAles/cyber.py | python | def to_data(self) -> Any:
return dict_to_data(copy.deepcopy(self.__dict__)) |
def to_json(self) -> str:
'Marshals the object into a stringified JSON serialization. Keys are first sorted\n and the JSON rendered removes all unnecessary whitespace.\n\n Returns:\n str: JSON string representation\n '
return json.dumps(self.to_data(), sort_keys=True, separators=(... | -9,101,289,065,471,352,000 | Marshals the object into a stringified JSON serialization. Keys are first sorted
and the JSON rendered removes all unnecessary whitespace.
Returns:
str: JSON string representation | cyber_sdk/util/json.py | to_json | SaveTheAles/cyber.py | python | def to_json(self) -> str:
'Marshals the object into a stringified JSON serialization. Keys are first sorted\n and the JSON rendered removes all unnecessary whitespace.\n\n Returns:\n str: JSON string representation\n '
return json.dumps(self.to_data(), sort_keys=True, separators=(... |
def _set_artifact_properties(artifact: types.Artifact, properties: Optional[Dict[(str, Any)]], custom_properties: Optional[Dict[(str, Any)]]):
'Sets properties and custom_properties to the given artifact.'
if (properties is not None):
for (key, value) in properties.items():
setattr(artifact,... | 8,432,907,034,830,028,000 | Sets properties and custom_properties to the given artifact. | tfx/dsl/components/common/importer.py | _set_artifact_properties | SunitRoy2703/tfx | python | def _set_artifact_properties(artifact: types.Artifact, properties: Optional[Dict[(str, Any)]], custom_properties: Optional[Dict[(str, Any)]]):
if (properties is not None):
for (key, value) in properties.items():
setattr(artifact, key, value)
if (custom_properties is not None):
f... |
def _prepare_artifact(metadata_handler: metadata.Metadata, uri: str, properties: Dict[(str, Any)], custom_properties: Dict[(str, Any)], reimport: bool, output_artifact_class: Type[types.Artifact], mlmd_artifact_type: Optional[metadata_store_pb2.ArtifactType]) -> types.Artifact:
"Prepares the Importer's output artif... | -617,003,016,772,959,400 | Prepares the Importer's output artifact.
If there is already an artifact in MLMD with the same URI and properties /
custom properties, that artifact will be reused unless the `reimport`
argument is set to True.
Args:
metadata_handler: The handler of MLMD.
uri: The uri of the artifact.
properties: The properties... | tfx/dsl/components/common/importer.py | _prepare_artifact | SunitRoy2703/tfx | python | def _prepare_artifact(metadata_handler: metadata.Metadata, uri: str, properties: Dict[(str, Any)], custom_properties: Dict[(str, Any)], reimport: bool, output_artifact_class: Type[types.Artifact], mlmd_artifact_type: Optional[metadata_store_pb2.ArtifactType]) -> types.Artifact:
"Prepares the Importer's output artif... |
def generate_output_dict(metadata_handler: metadata.Metadata, uri: str, properties: Dict[(str, Any)], custom_properties: Dict[(str, Any)], reimport: bool, output_artifact_class: Type[types.Artifact], mlmd_artifact_type: Optional[metadata_store_pb2.ArtifactType]=None) -> Dict[(str, List[types.Artifact])]:
"Generates... | -1,837,062,771,133,348,600 | Generates importer's output dict.
If there is already an artifact in MLMD with the same URI and properties /
custom properties, that artifact will be reused unless the `reimport`
argument is set to True.
Args:
metadata_handler: The handler of MLMD.
uri: The uri of the artifact.
properties: The properties of the... | tfx/dsl/components/common/importer.py | generate_output_dict | SunitRoy2703/tfx | python | def generate_output_dict(metadata_handler: metadata.Metadata, uri: str, properties: Dict[(str, Any)], custom_properties: Dict[(str, Any)], reimport: bool, output_artifact_class: Type[types.Artifact], mlmd_artifact_type: Optional[metadata_store_pb2.ArtifactType]=None) -> Dict[(str, List[types.Artifact])]:
"Generates... |
def __init__(self, source_uri: str, artifact_type: Type[types.Artifact], reimport: Optional[bool]=False, properties: Optional[Dict[(str, Union[(str, int)])]]=None, custom_properties: Optional[Dict[(str, Union[(str, int)])]]=None):
'Init function for the Importer.\n\n Args:\n source_uri: the URI of the resou... | -3,162,124,771,600,060,400 | Init function for the Importer.
Args:
source_uri: the URI of the resource that needs to be registered.
artifact_type: the type of the artifact to import.
reimport: whether or not to re-import as a new artifact if the URI has
been imported in before.
properties: Dictionary of properties for the imported Art... | tfx/dsl/components/common/importer.py | __init__ | SunitRoy2703/tfx | python | def __init__(self, source_uri: str, artifact_type: Type[types.Artifact], reimport: Optional[bool]=False, properties: Optional[Dict[(str, Union[(str, int)])]]=None, custom_properties: Optional[Dict[(str, Union[(str, int)])]]=None):
'Init function for the Importer.\n\n Args:\n source_uri: the URI of the resou... |
@property
def outputs(self) -> Dict[(str, Any)]:
'Output Channel dict that contains imported artifacts.'
return self._output_dict | -4,582,220,960,787,554,000 | Output Channel dict that contains imported artifacts. | tfx/dsl/components/common/importer.py | outputs | SunitRoy2703/tfx | python | @property
def outputs(self) -> Dict[(str, Any)]:
return self._output_dict |
def on_transaction_end(session):
'\n Decorator for a function which should run after a top-level transaction ended.\n\n Transactions that are either implicitly or explicitly committed or rolled back will be\n closed at the end of a Pyramid view. This is here for cleaning up caches so that\n code after t... | -8,933,014,282,677,295,000 | Decorator for a function which should run after a top-level transaction ended.
Transactions that are either implicitly or explicitly committed or rolled back will be
closed at the end of a Pyramid view. This is here for cleaning up caches so that
code after the view, exception views for example, will not be able to ac... | h/util/db.py | on_transaction_end | Brahim109/h | python | def on_transaction_end(session):
'\n Decorator for a function which should run after a top-level transaction ended.\n\n Transactions that are either implicitly or explicitly committed or rolled back will be\n closed at the end of a Pyramid view. This is here for cleaning up caches so that\n code after t... |
def load(path, num_cpu=16):
'Load act function that was returned by learn function.\n\n Parameters\n ----------\n path: str\n path to the act function pickle\n num_cpu: int\n number of cpus to use for executing the policy\n\n Returns\n -------\n act: ActWrapper\n function t... | 4,148,967,975,659,665,000 | Load act function that was returned by learn function.
Parameters
----------
path: str
path to the act function pickle
num_cpu: int
number of cpus to use for executing the policy
Returns
-------
act: ActWrapper
function that takes a batch of observations
and returns actions. | baselines/deepq/simple.py | load | hyperdo/python2-baselines | python | def load(path, num_cpu=16):
'Load act function that was returned by learn function.\n\n Parameters\n ----------\n path: str\n path to the act function pickle\n num_cpu: int\n number of cpus to use for executing the policy\n\n Returns\n -------\n act: ActWrapper\n function t... |
def learn(env, q_func, lr=0.0005, max_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=1, checkpoint_freq=10000, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prio... | -7,016,482,687,221,096,000 | Train a deepq model.
Parameters
-------
env : gym.Env
environment to train on
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
... | baselines/deepq/simple.py | learn | hyperdo/python2-baselines | python | def learn(env, q_func, lr=0.0005, max_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=1, checkpoint_freq=10000, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prio... |
def save(self, path):
'Save model to a pickle located at `path`'
with tempfile.TemporaryDirectory() as td:
U.save_state(os.path.join(td, 'model'))
arc_name = os.path.join(td, 'packed.zip')
with zipfile.ZipFile(arc_name, 'w') as zipf:
for (root, dirs, files) in os.walk(td):
... | 1,592,568,679,855,833,900 | Save model to a pickle located at `path` | baselines/deepq/simple.py | save | hyperdo/python2-baselines | python | def save(self, path):
with tempfile.TemporaryDirectory() as td:
U.save_state(os.path.join(td, 'model'))
arc_name = os.path.join(td, 'packed.zip')
with zipfile.ZipFile(arc_name, 'w') as zipf:
for (root, dirs, files) in os.walk(td):
for fname in files:
... |
def _generate_detections_v1(boxes: tf.Tensor, scores: tf.Tensor, attributes: Optional[Mapping[(str, tf.Tensor)]]=None, pre_nms_top_k: int=5000, pre_nms_score_threshold: float=0.05, nms_iou_threshold: float=0.5, max_num_detections: int=100, soft_nms_sigma: Optional[float]=None):
'Generates the final detections given... | -4,265,795,419,135,677,000 | Generates the final detections given the model outputs.
The implementation unrolls the batch dimension and process images one by one.
It required the batch dimension to be statically known and it is TPU
compatible.
Args:
boxes: A `tf.Tensor` with shape `[batch_size, N, num_classes, 4]` or
`[batch_size, N, 1, 4]... | official/vision/beta/modeling/layers/detection_generator.py | _generate_detections_v1 | 915067906/models | python | def _generate_detections_v1(boxes: tf.Tensor, scores: tf.Tensor, attributes: Optional[Mapping[(str, tf.Tensor)]]=None, pre_nms_top_k: int=5000, pre_nms_score_threshold: float=0.05, nms_iou_threshold: float=0.5, max_num_detections: int=100, soft_nms_sigma: Optional[float]=None):
'Generates the final detections given... |
def _generate_detections_per_image(boxes: tf.Tensor, scores: tf.Tensor, attributes: Optional[Mapping[(str, tf.Tensor)]]=None, pre_nms_top_k: int=5000, pre_nms_score_threshold: float=0.05, nms_iou_threshold: float=0.5, max_num_detections: int=100, soft_nms_sigma: Optional[float]=None):
'Generates the final detection... | 8,376,549,356,609,634,000 | Generates the final detections per image given the model outputs.
Args:
boxes: A `tf.Tensor` with shape `[N, num_classes, 4]` or `[N, 1, 4]`, which
box predictions on all feature levels. The N is the number of total
anchors on all levels.
scores: A `tf.Tensor` with shape `[N, num_classes]`, which stacks c... | official/vision/beta/modeling/layers/detection_generator.py | _generate_detections_per_image | 915067906/models | python | def _generate_detections_per_image(boxes: tf.Tensor, scores: tf.Tensor, attributes: Optional[Mapping[(str, tf.Tensor)]]=None, pre_nms_top_k: int=5000, pre_nms_score_threshold: float=0.05, nms_iou_threshold: float=0.5, max_num_detections: int=100, soft_nms_sigma: Optional[float]=None):
'Generates the final detection... |
def _select_top_k_scores(scores_in: tf.Tensor, pre_nms_num_detections: int):
'Selects top_k scores and indices for each class.\n\n Args:\n scores_in: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which\n stacks class logit outputs on all feature levels. The N is the number of\n total anchor... | -961,165,520,036,506,900 | Selects top_k scores and indices for each class.
Args:
scores_in: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which
stacks class logit outputs on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model.
pre_nms... | official/vision/beta/modeling/layers/detection_generator.py | _select_top_k_scores | 915067906/models | python | def _select_top_k_scores(scores_in: tf.Tensor, pre_nms_num_detections: int):
'Selects top_k scores and indices for each class.\n\n Args:\n scores_in: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which\n stacks class logit outputs on all feature levels. The N is the number of\n total anchor... |
def _generate_detections_v2(boxes: tf.Tensor, scores: tf.Tensor, pre_nms_top_k: int=5000, pre_nms_score_threshold: float=0.05, nms_iou_threshold: float=0.5, max_num_detections: int=100):
'Generates the final detections given the model outputs.\n\n This implementation unrolls classes dimension while using the tf.wh... | -413,168,451,629,743,200 | Generates the final detections given the model outputs.
This implementation unrolls classes dimension while using the tf.while_loop
to implement the batched NMS, so that it can be parallelized at the batch
dimension. It should give better performance comparing to v1 implementation.
It is TPU compatible.
Args:
boxes... | official/vision/beta/modeling/layers/detection_generator.py | _generate_detections_v2 | 915067906/models | python | def _generate_detections_v2(boxes: tf.Tensor, scores: tf.Tensor, pre_nms_top_k: int=5000, pre_nms_score_threshold: float=0.05, nms_iou_threshold: float=0.5, max_num_detections: int=100):
'Generates the final detections given the model outputs.\n\n This implementation unrolls classes dimension while using the tf.wh... |
def _generate_detections_batched(boxes: tf.Tensor, scores: tf.Tensor, pre_nms_score_threshold: float, nms_iou_threshold: float, max_num_detections: int):
'Generates detected boxes with scores and classes for one-stage detector.\n\n The function takes output of multi-level ConvNets and anchor boxes and\n generates... | 133,743,826,918,885,950 | Generates detected boxes with scores and classes for one-stage detector.
The function takes output of multi-level ConvNets and anchor boxes and
generates detected boxes. Note that this used batched nms, which is not
supported on TPU currently.
Args:
boxes: A `tf.Tensor` with shape `[batch_size, N, num_classes, 4]` ... | official/vision/beta/modeling/layers/detection_generator.py | _generate_detections_batched | 915067906/models | python | def _generate_detections_batched(boxes: tf.Tensor, scores: tf.Tensor, pre_nms_score_threshold: float, nms_iou_threshold: float, max_num_detections: int):
'Generates detected boxes with scores and classes for one-stage detector.\n\n The function takes output of multi-level ConvNets and anchor boxes and\n generates... |
def __init__(self, apply_nms: bool=True, pre_nms_top_k: int=5000, pre_nms_score_threshold: float=0.05, nms_iou_threshold: float=0.5, max_num_detections: int=100, nms_version: str='v2', use_cpu_nms: bool=False, soft_nms_sigma: Optional[float]=None, **kwargs):
'Initializes a detection generator.\n\n Args:\n a... | -2,313,276,793,241,696,000 | Initializes a detection generator.
Args:
apply_nms: A `bool` of whether or not apply non maximum suppression.
If False, the decoded boxes and their scores are returned.
pre_nms_top_k: An `int` of the number of top scores proposals to be kept
before applying NMS.
pre_nms_score_threshold: A `float` of the ... | official/vision/beta/modeling/layers/detection_generator.py | __init__ | 915067906/models | python | def __init__(self, apply_nms: bool=True, pre_nms_top_k: int=5000, pre_nms_score_threshold: float=0.05, nms_iou_threshold: float=0.5, max_num_detections: int=100, nms_version: str='v2', use_cpu_nms: bool=False, soft_nms_sigma: Optional[float]=None, **kwargs):
'Initializes a detection generator.\n\n Args:\n a... |
def __call__(self, raw_boxes: tf.Tensor, raw_scores: tf.Tensor, anchor_boxes: tf.Tensor, image_shape: tf.Tensor, regression_weights: Optional[List[float]]=None, bbox_per_class: bool=True):
'Generates final detections.\n\n Args:\n raw_boxes: A `tf.Tensor` of shape of `[batch_size, K, num_classes * 4]`\n ... | -3,888,970,305,849,663,000 | Generates final detections.
Args:
raw_boxes: A `tf.Tensor` of shape of `[batch_size, K, num_classes * 4]`
representing the class-specific box coordinates relative to anchors.
raw_scores: A `tf.Tensor` of shape of `[batch_size, K, num_classes]`
representing the class logits before applying score activiation... | official/vision/beta/modeling/layers/detection_generator.py | __call__ | 915067906/models | python | def __call__(self, raw_boxes: tf.Tensor, raw_scores: tf.Tensor, anchor_boxes: tf.Tensor, image_shape: tf.Tensor, regression_weights: Optional[List[float]]=None, bbox_per_class: bool=True):
'Generates final detections.\n\n Args:\n raw_boxes: A `tf.Tensor` of shape of `[batch_size, K, num_classes * 4]`\n ... |
def __init__(self, apply_nms: bool=True, pre_nms_top_k: int=5000, pre_nms_score_threshold: float=0.05, nms_iou_threshold: float=0.5, max_num_detections: int=100, nms_version: str='v1', use_cpu_nms: bool=False, soft_nms_sigma: Optional[float]=None, **kwargs):
'Initializes a multi-level detection generator.\n\n Ar... | 3,301,950,544,126,950,400 | Initializes a multi-level detection generator.
Args:
apply_nms: A `bool` of whether or not apply non maximum suppression. If
False, the decoded boxes and their scores are returned.
pre_nms_top_k: An `int` of the number of top scores proposals to be kept
before applying NMS.
pre_nms_score_threshold: A `fl... | official/vision/beta/modeling/layers/detection_generator.py | __init__ | 915067906/models | python | def __init__(self, apply_nms: bool=True, pre_nms_top_k: int=5000, pre_nms_score_threshold: float=0.05, nms_iou_threshold: float=0.5, max_num_detections: int=100, nms_version: str='v1', use_cpu_nms: bool=False, soft_nms_sigma: Optional[float]=None, **kwargs):
'Initializes a multi-level detection generator.\n\n Ar... |
def _decode_multilevel_outputs(self, raw_boxes: Mapping[(str, tf.Tensor)], raw_scores: Mapping[(str, tf.Tensor)], anchor_boxes: tf.Tensor, image_shape: tf.Tensor, raw_attributes: Optional[Mapping[(str, tf.Tensor)]]=None):
'Collects dict of multilevel boxes, scores, attributes into lists.'
boxes = []
scores ... | 6,158,989,498,364,521,000 | Collects dict of multilevel boxes, scores, attributes into lists. | official/vision/beta/modeling/layers/detection_generator.py | _decode_multilevel_outputs | 915067906/models | python | def _decode_multilevel_outputs(self, raw_boxes: Mapping[(str, tf.Tensor)], raw_scores: Mapping[(str, tf.Tensor)], anchor_boxes: tf.Tensor, image_shape: tf.Tensor, raw_attributes: Optional[Mapping[(str, tf.Tensor)]]=None):
boxes = []
scores = []
if raw_attributes:
attributes = {att_name: [] for ... |
def __call__(self, raw_boxes: Mapping[(str, tf.Tensor)], raw_scores: Mapping[(str, tf.Tensor)], anchor_boxes: tf.Tensor, image_shape: tf.Tensor, raw_attributes: Optional[Mapping[(str, tf.Tensor)]]=None):
'Generates final detections.\n\n Args:\n raw_boxes: A `dict` with keys representing FPN levels and value... | 2,912,919,324,711,744,500 | Generates final detections.
Args:
raw_boxes: A `dict` with keys representing FPN levels and values
representing box tenors of shape `[batch, feature_h, feature_w,
num_anchors * 4]`.
raw_scores: A `dict` with keys representing FPN levels and values
representing logit tensors of shape `[batch, feature_h,... | official/vision/beta/modeling/layers/detection_generator.py | __call__ | 915067906/models | python | def __call__(self, raw_boxes: Mapping[(str, tf.Tensor)], raw_scores: Mapping[(str, tf.Tensor)], anchor_boxes: tf.Tensor, image_shape: tf.Tensor, raw_attributes: Optional[Mapping[(str, tf.Tensor)]]=None):
'Generates final detections.\n\n Args:\n raw_boxes: A `dict` with keys representing FPN levels and value... |
def Process(self, parser_mediator, plist_name, top_level, **kwargs):
'Check if it is a valid MacOS plist file name.\n\n Args:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n plist_name (str): name of the plist.\n ... | -4,250,799,335,899,952,600 | Check if it is a valid MacOS plist file name.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
plist_name (str): name of the plist.
top_level (dict[str, object]): plist top-level key. | plaso/parsers/plist_plugins/launchd.py | Process | ddm1004/plaso | python | def Process(self, parser_mediator, plist_name, top_level, **kwargs):
'Check if it is a valid MacOS plist file name.\n\n Args:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n plist_name (str): name of the plist.\n ... |
def GetEntries(self, parser_mediator, top_level=None, **unused_kwargs):
'Extracts launchd information from the plist.\n\n Args:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n top_level (Optional[dict[str: object]]): ... | -2,750,604,028,025,562,000 | Extracts launchd information from the plist.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
top_level (Optional[dict[str: object]]): keys extracted from PLIST_KEYS. | plaso/parsers/plist_plugins/launchd.py | GetEntries | ddm1004/plaso | python | def GetEntries(self, parser_mediator, top_level=None, **unused_kwargs):
'Extracts launchd information from the plist.\n\n Args:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n top_level (Optional[dict[str: object]]): ... |
def _write_value(self, value):
'Sets motor value between [-1, 1]'
if (abs(value) <= 0.05):
value = 0.0
mapped_value = int((1300.0 * ((self.alpha * value) + self.beta)))
speed = min(max(mapped_value, (- 1300)), 1300)
self._motor.setSpeed(speed) | -5,645,086,418,558,018,000 | Sets motor value between [-1, 1] | jetbot/motor.py | _write_value | vstoneofficial/jetbot-mecanum | python | def _write_value(self, value):
if (abs(value) <= 0.05):
value = 0.0
mapped_value = int((1300.0 * ((self.alpha * value) + self.beta)))
speed = min(max(mapped_value, (- 1300)), 1300)
self._motor.setSpeed(speed) |
def _release(self):
'Stops motor by releasing control'
self._motor.setSpeed(0) | -3,456,137,181,658,730,500 | Stops motor by releasing control | jetbot/motor.py | _release | vstoneofficial/jetbot-mecanum | python | def _release(self):
self._motor.setSpeed(0) |
def index():
" Module's Home Page "
try:
module_name = settings.modules[module].name_nice
except:
module_name = T('Disaster Victim Identification')
table = s3db.dvi_body
total = db((table.deleted == False)).count()
itable = s3db.dvi_identification
query = ((((table.deleted ==... | -8,932,594,271,398,314,000 | Module's Home Page | controllers/dvi.py | index | andygimma/eden | python | def index():
" "
try:
module_name = settings.modules[module].name_nice
except:
module_name = T('Disaster Victim Identification')
table = s3db.dvi_body
total = db((table.deleted == False)).count()
itable = s3db.dvi_identification
query = ((((table.deleted == False) & (itable.... |
def recreq():
' Recovery Requests List '
table = s3db.dvi_recreq
table.person_id.default = s3_logged_in_person()
def prep(r):
if (r.interactive and (not r.record)):
table.status.readable = False
table.status.writable = False
table.bodies_recovered.readable = ... | 3,506,701,886,621,570,000 | Recovery Requests List | controllers/dvi.py | recreq | andygimma/eden | python | def recreq():
' '
table = s3db.dvi_recreq
table.person_id.default = s3_logged_in_person()
def prep(r):
if (r.interactive and (not r.record)):
table.status.readable = False
table.status.writable = False
table.bodies_recovered.readable = False
tabl... |
def morgue():
' Morgue Registry '
morgue_tabs = [(T('Morgue Details'), ''), (T('Bodies'), 'body')]
rheader = S3ResourceHeader([[(T('Morgue'), 'name')]], tabs=morgue_tabs)
def prep(r):
s3db.gis_location_filter(r)
if (r.interactive and r.id and (not r.component)):
field = r.ta... | -1,926,710,972,500,599,600 | Morgue Registry | controllers/dvi.py | morgue | andygimma/eden | python | def morgue():
' '
morgue_tabs = [(T('Morgue Details'), ), (T('Bodies'), 'body')]
rheader = S3ResourceHeader([[(T('Morgue'), 'name')]], tabs=morgue_tabs)
def prep(r):
s3db.gis_location_filter(r)
if (r.interactive and r.id and (not r.component)):
field = r.table.obsolete
... |
def body():
' Dead Bodies Registry '
gender_opts = s3db.pr_gender_opts
gender_opts[1] = T('unknown')
btable = s3db.dvi_body
itable = s3db.dvi_identification
status = request.get_vars.get('status', None)
if (status == 'unidentified'):
query = ((itable.deleted == False) & (itable.statu... | 4,108,203,930,432,968,000 | Dead Bodies Registry | controllers/dvi.py | body | andygimma/eden | python | def body():
' '
gender_opts = s3db.pr_gender_opts
gender_opts[1] = T('unknown')
btable = s3db.dvi_body
itable = s3db.dvi_identification
status = request.get_vars.get('status', None)
if (status == 'unidentified'):
query = ((itable.deleted == False) & (itable.status == 3))
ids... |
def person():
' Missing Persons Registry (Match Finder) '
table = s3db.pr_person
s3.crud_strings['pr_person'].update(title_display=T('Missing Person Details'), title_list=T('Missing Persons'), label_list_button=T('List Missing Persons'), msg_list_empty=T('No Persons found'), msg_no_match=T('No Persons curre... | 7,416,614,646,194,750,000 | Missing Persons Registry (Match Finder) | controllers/dvi.py | person | andygimma/eden | python | def person():
' '
table = s3db.pr_person
s3.crud_strings['pr_person'].update(title_display=T('Missing Person Details'), title_list=T('Missing Persons'), label_list_button=T('List Missing Persons'), msg_list_empty=T('No Persons found'), msg_no_match=T('No Persons currently reported missing'))
s3db.confi... |
def dvi_match_query(body_id):
'\n Get a query for candidate matches between the missing\n persons registry and a dead body\n\n @param body_id: the dvi_body record ID\n '
ptable = s3db.pr_person
ntable = s3db.pr_note
btable = s3db.dvi_body
query = ((((ptable.deleted == False) ... | -398,216,051,305,224,960 | Get a query for candidate matches between the missing
persons registry and a dead body
@param body_id: the dvi_body record ID | controllers/dvi.py | dvi_match_query | andygimma/eden | python | def dvi_match_query(body_id):
'\n Get a query for candidate matches between the missing\n persons registry and a dead body\n\n @param body_id: the dvi_body record ID\n '
ptable = s3db.pr_person
ntable = s3db.pr_note
btable = s3db.dvi_body
query = ((((ptable.deleted == False) ... |
def tooltip():
' Ajax Tooltips '
formfield = request.vars.get('formfield', None)
if formfield:
response.view = ('pr/ajaxtips/%s.html' % formfield)
return dict() | 1,023,201,173,793,866,200 | Ajax Tooltips | controllers/dvi.py | tooltip | andygimma/eden | python | def tooltip():
' '
formfield = request.vars.get('formfield', None)
if formfield:
response.view = ('pr/ajaxtips/%s.html' % formfield)
return dict() |
def _get_example_figures():
'Create two example figures.'
fig1 = plt.plot([1, 2], [1, 2])[0].figure
fig2 = plt.plot([3, 4], [3, 4])[0].figure
return [fig1, fig2] | 6,730,110,490,721,703,000 | Create two example figures. | mne/tests/test_report.py | _get_example_figures | NataKozh/mne-python | python | def _get_example_figures():
fig1 = plt.plot([1, 2], [1, 2])[0].figure
fig2 = plt.plot([3, 4], [3, 4])[0].figure
return [fig1, fig2] |
@pytest.mark.slowtest
@testing.requires_testing_data
def test_render_report():
'Test rendering -*.fif files for mne report.'
tempdir = _TempDir()
raw_fname_new = op.join(tempdir, 'temp_raw.fif')
ms_fname_new = op.join(tempdir, 'temp_ms_raw.fif')
event_fname_new = op.join(tempdir, 'temp_raw-eve.fif')... | 3,057,919,541,365,986,000 | Test rendering -*.fif files for mne report. | mne/tests/test_report.py | test_render_report | NataKozh/mne-python | python | @pytest.mark.slowtest
@testing.requires_testing_data
def test_render_report():
tempdir = _TempDir()
raw_fname_new = op.join(tempdir, 'temp_raw.fif')
ms_fname_new = op.join(tempdir, 'temp_ms_raw.fif')
event_fname_new = op.join(tempdir, 'temp_raw-eve.fif')
cov_fname_new = op.join(tempdir, 'temp_r... |
@testing.requires_testing_data
def test_report_raw_psd_and_date():
'Test report raw PSD and DATE_NONE functionality.'
with pytest.raises(TypeError, match='dict'):
Report(raw_psd='foo')
tempdir = _TempDir()
raw = read_raw_fif(raw_fname).crop(0, 1.0).load_data()
raw_fname_new = op.join(tempdir... | -7,349,168,060,396,265,000 | Test report raw PSD and DATE_NONE functionality. | mne/tests/test_report.py | test_report_raw_psd_and_date | NataKozh/mne-python | python | @testing.requires_testing_data
def test_report_raw_psd_and_date():
with pytest.raises(TypeError, match='dict'):
Report(raw_psd='foo')
tempdir = _TempDir()
raw = read_raw_fif(raw_fname).crop(0, 1.0).load_data()
raw_fname_new = op.join(tempdir, 'temp_raw.fif')
raw.save(raw_fname_new)
... |
@testing.requires_testing_data
@requires_mayavi
@traits_test
def test_render_add_sections():
'Test adding figures/images to section.'
tempdir = _TempDir()
report = Report(subjects_dir=subjects_dir)
fig = plt.plot([1, 2], [1, 2])[0].figure
report.add_figs_to_section(figs=fig, captions=['evoked respon... | -1,994,883,436,208,848,600 | Test adding figures/images to section. | mne/tests/test_report.py | test_render_add_sections | NataKozh/mne-python | python | @testing.requires_testing_data
@requires_mayavi
@traits_test
def test_render_add_sections():
tempdir = _TempDir()
report = Report(subjects_dir=subjects_dir)
fig = plt.plot([1, 2], [1, 2])[0].figure
report.add_figs_to_section(figs=fig, captions=['evoked response'], scale=1.2, image_format='svg')
... |
@pytest.mark.slowtest
@testing.requires_testing_data
@requires_mayavi
@traits_test
@requires_nibabel()
def test_render_mri():
'Test rendering MRI for mne report.'
tempdir = _TempDir()
trans_fname_new = op.join(tempdir, 'temp-trans.fif')
for (a, b) in [[trans_fname, trans_fname_new]]:
shutil.copy... | -644,899,421,329,662,000 | Test rendering MRI for mne report. | mne/tests/test_report.py | test_render_mri | NataKozh/mne-python | python | @pytest.mark.slowtest
@testing.requires_testing_data
@requires_mayavi
@traits_test
@requires_nibabel()
def test_render_mri():
tempdir = _TempDir()
trans_fname_new = op.join(tempdir, 'temp-trans.fif')
for (a, b) in [[trans_fname, trans_fname_new]]:
shutil.copyfile(a, b)
report = Report(info_... |
@testing.requires_testing_data
@requires_nibabel()
def test_render_mri_without_bem():
'Test rendering MRI without BEM for mne report.'
tempdir = _TempDir()
os.mkdir(op.join(tempdir, 'sample'))
os.mkdir(op.join(tempdir, 'sample', 'mri'))
shutil.copyfile(mri_fname, op.join(tempdir, 'sample', 'mri', 'T... | 7,342,466,567,199,111,000 | Test rendering MRI without BEM for mne report. | mne/tests/test_report.py | test_render_mri_without_bem | NataKozh/mne-python | python | @testing.requires_testing_data
@requires_nibabel()
def test_render_mri_without_bem():
tempdir = _TempDir()
os.mkdir(op.join(tempdir, 'sample'))
os.mkdir(op.join(tempdir, 'sample', 'mri'))
shutil.copyfile(mri_fname, op.join(tempdir, 'sample', 'mri', 'T1.mgz'))
report = Report(info_fname=raw_fnam... |
@testing.requires_testing_data
@requires_nibabel()
def test_add_htmls_to_section():
'Test adding html str to mne report.'
report = Report(info_fname=raw_fname, subject='sample', subjects_dir=subjects_dir)
html = '<b>MNE-Python is AWESOME</b>'
(caption, section) = ('html', 'html_section')
report.add_... | -7,173,137,651,665,221,000 | Test adding html str to mne report. | mne/tests/test_report.py | test_add_htmls_to_section | NataKozh/mne-python | python | @testing.requires_testing_data
@requires_nibabel()
def test_add_htmls_to_section():
report = Report(info_fname=raw_fname, subject='sample', subjects_dir=subjects_dir)
html = '<b>MNE-Python is AWESOME</b>'
(caption, section) = ('html', 'html_section')
report.add_htmls_to_section(html, caption, secti... |
def test_add_slider_to_section():
'Test adding a slider with a series of images to mne report.'
tempdir = _TempDir()
report = Report(info_fname=raw_fname, subject='sample', subjects_dir=subjects_dir)
section = 'slider_section'
figs = _get_example_figures()
report.add_slider_to_section(figs, sect... | 890,989,121,652,424,200 | Test adding a slider with a series of images to mne report. | mne/tests/test_report.py | test_add_slider_to_section | NataKozh/mne-python | python | def test_add_slider_to_section():
tempdir = _TempDir()
report = Report(info_fname=raw_fname, subject='sample', subjects_dir=subjects_dir)
section = 'slider_section'
figs = _get_example_figures()
report.add_slider_to_section(figs, section=section, title='my title')
assert (report.fnames[0] =... |
def test_validate_input():
'Test Report input validation.'
report = Report()
items = ['a', 'b', 'c']
captions = ['Letter A', 'Letter B', 'Letter C']
section = 'ABCs'
comments = ['First letter of the alphabet.', 'Second letter of the alphabet', 'Third letter of the alphabet']
pytest.raises(Va... | 7,231,881,813,550,490,000 | Test Report input validation. | mne/tests/test_report.py | test_validate_input | NataKozh/mne-python | python | def test_validate_input():
report = Report()
items = ['a', 'b', 'c']
captions = ['Letter A', 'Letter B', 'Letter C']
section = 'ABCs'
comments = ['First letter of the alphabet.', 'Second letter of the alphabet', 'Third letter of the alphabet']
pytest.raises(ValueError, report._validate_inpu... |
@requires_h5py
def test_open_report():
'Test the open_report function.'
tempdir = _TempDir()
hdf5 = op.join(tempdir, 'report.h5')
fig1 = _get_example_figures()[0]
with open_report(hdf5, subjects_dir=subjects_dir) as report:
assert (report.subjects_dir == subjects_dir)
assert (report.... | -46,408,802,336,081,170 | Test the open_report function. | mne/tests/test_report.py | test_open_report | NataKozh/mne-python | python | @requires_h5py
def test_open_report():
tempdir = _TempDir()
hdf5 = op.join(tempdir, 'report.h5')
fig1 = _get_example_figures()[0]
with open_report(hdf5, subjects_dir=subjects_dir) as report:
assert (report.subjects_dir == subjects_dir)
assert (report._fname == hdf5)
report.a... |
def test_remove():
'Test removing figures from a report.'
r = Report()
(fig1, fig2) = _get_example_figures()
r.add_figs_to_section(fig1, 'figure1', 'mysection')
r.add_slider_to_section([fig1, fig2], title='figure1', section='othersection')
r.add_figs_to_section(fig2, 'figure1', 'mysection')
... | 66,209,351,956,649,530 | Test removing figures from a report. | mne/tests/test_report.py | test_remove | NataKozh/mne-python | python | def test_remove():
r = Report()
(fig1, fig2) = _get_example_figures()
r.add_figs_to_section(fig1, 'figure1', 'mysection')
r.add_slider_to_section([fig1, fig2], title='figure1', section='othersection')
r.add_figs_to_section(fig2, 'figure1', 'mysection')
r.add_figs_to_section(fig2, 'figure2',... |
def test_add_or_replace():
'Test replacing existing figures in a report.'
r = Report()
(fig1, fig2) = _get_example_figures()
r.add_figs_to_section(fig1, 'duplicate', 'mysection')
r.add_figs_to_section(fig1, 'duplicate', 'mysection')
r.add_figs_to_section(fig1, 'duplicate', 'othersection')
r.... | -6,487,673,268,119,802,000 | Test replacing existing figures in a report. | mne/tests/test_report.py | test_add_or_replace | NataKozh/mne-python | python | def test_add_or_replace():
r = Report()
(fig1, fig2) = _get_example_figures()
r.add_figs_to_section(fig1, 'duplicate', 'mysection')
r.add_figs_to_section(fig1, 'duplicate', 'mysection')
r.add_figs_to_section(fig1, 'duplicate', 'othersection')
r.add_figs_to_section(fig2, 'nonduplicate', 'mys... |
def test_scraper(tmpdir):
'Test report scraping.'
r = Report()
(fig1, fig2) = _get_example_figures()
r.add_figs_to_section(fig1, 'a', 'mysection')
r.add_figs_to_section(fig2, 'b', 'mysection')
app = Bunch(builder=Bunch(srcdir=str(tmpdir), outdir=op.join(str(tmpdir), '_build', 'html')))
scrap... | 3,745,007,383,396,112,000 | Test report scraping. | mne/tests/test_report.py | test_scraper | NataKozh/mne-python | python | def test_scraper(tmpdir):
r = Report()
(fig1, fig2) = _get_example_figures()
r.add_figs_to_section(fig1, 'a', 'mysection')
r.add_figs_to_section(fig2, 'b', 'mysection')
app = Bunch(builder=Bunch(srcdir=str(tmpdir), outdir=op.join(str(tmpdir), '_build', 'html')))
scraper = _ReportScraper()
... |
def __init__(self, config_ref=None, data_secret_name=None, local_vars_configuration=None):
'IoXK8sClusterV1alpha4MachineSpecBootstrap - a model defined in OpenAPI'
if (local_vars_configuration is None):
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configurati... | -7,458,339,454,898,926,000 | IoXK8sClusterV1alpha4MachineSpecBootstrap - a model defined in OpenAPI | kubernetes/client/models/io_xk8s_cluster_v1alpha4_machine_spec_bootstrap.py | __init__ | mariusgheorghies/python | python | def __init__(self, config_ref=None, data_secret_name=None, local_vars_configuration=None):
if (local_vars_configuration is None):
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._config_ref = None
self._data_secret_name = None
sel... |
@property
def config_ref(self):
'Gets the config_ref of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501\n\n\n :return: The config_ref of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501\n :rtype: IoXK8sClusterV1alpha4MachineSpecBootstrapConfigRef\n '
return self._... | -6,719,715,699,018,144,000 | Gets the config_ref of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501
:return: The config_ref of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501
:rtype: IoXK8sClusterV1alpha4MachineSpecBootstrapConfigRef | kubernetes/client/models/io_xk8s_cluster_v1alpha4_machine_spec_bootstrap.py | config_ref | mariusgheorghies/python | python | @property
def config_ref(self):
'Gets the config_ref of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501\n\n\n :return: The config_ref of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501\n :rtype: IoXK8sClusterV1alpha4MachineSpecBootstrapConfigRef\n '
return self._... |
@config_ref.setter
def config_ref(self, config_ref):
'Sets the config_ref of this IoXK8sClusterV1alpha4MachineSpecBootstrap.\n\n\n :param config_ref: The config_ref of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501\n :type: IoXK8sClusterV1alpha4MachineSpecBootstrapConfigRef\n '
... | 4,942,017,845,807,972,000 | Sets the config_ref of this IoXK8sClusterV1alpha4MachineSpecBootstrap.
:param config_ref: The config_ref of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501
:type: IoXK8sClusterV1alpha4MachineSpecBootstrapConfigRef | kubernetes/client/models/io_xk8s_cluster_v1alpha4_machine_spec_bootstrap.py | config_ref | mariusgheorghies/python | python | @config_ref.setter
def config_ref(self, config_ref):
'Sets the config_ref of this IoXK8sClusterV1alpha4MachineSpecBootstrap.\n\n\n :param config_ref: The config_ref of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501\n :type: IoXK8sClusterV1alpha4MachineSpecBootstrapConfigRef\n '
... |
@property
def data_secret_name(self):
'Gets the data_secret_name of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501\n\n DataSecretName is the name of the secret that stores the bootstrap data script. If nil, the Machine should remain in the Pending state. # noqa: E501\n\n :return: The d... | -2,117,952,424,190,382,300 | Gets the data_secret_name of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501
DataSecretName is the name of the secret that stores the bootstrap data script. If nil, the Machine should remain in the Pending state. # noqa: E501
:return: The data_secret_name of this IoXK8sClusterV1alpha4MachineSpecBootstr... | kubernetes/client/models/io_xk8s_cluster_v1alpha4_machine_spec_bootstrap.py | data_secret_name | mariusgheorghies/python | python | @property
def data_secret_name(self):
'Gets the data_secret_name of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501\n\n DataSecretName is the name of the secret that stores the bootstrap data script. If nil, the Machine should remain in the Pending state. # noqa: E501\n\n :return: The d... |
@data_secret_name.setter
def data_secret_name(self, data_secret_name):
'Sets the data_secret_name of this IoXK8sClusterV1alpha4MachineSpecBootstrap.\n\n DataSecretName is the name of the secret that stores the bootstrap data script. If nil, the Machine should remain in the Pending state. # noqa: E501\n\n ... | 60,570,540,045,614,510 | Sets the data_secret_name of this IoXK8sClusterV1alpha4MachineSpecBootstrap.
DataSecretName is the name of the secret that stores the bootstrap data script. If nil, the Machine should remain in the Pending state. # noqa: E501
:param data_secret_name: The data_secret_name of this IoXK8sClusterV1alpha4MachineSpecBoots... | kubernetes/client/models/io_xk8s_cluster_v1alpha4_machine_spec_bootstrap.py | data_secret_name | mariusgheorghies/python | python | @data_secret_name.setter
def data_secret_name(self, data_secret_name):
'Sets the data_secret_name of this IoXK8sClusterV1alpha4MachineSpecBootstrap.\n\n DataSecretName is the name of the secret that stores the bootstrap data script. If nil, the Machine should remain in the Pending state. # noqa: E501\n\n ... |
def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
e... | 8,442,519,487,048,767,000 | Returns the model properties as a dict | kubernetes/client/models/io_xk8s_cluster_v1alpha4_machine_spec_bootstrap.py | to_dict | mariusgheorghies/python | python | def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
... |
def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict()) | 5,849,158,643,760,736,000 | Returns the string representation of the model | kubernetes/client/models/io_xk8s_cluster_v1alpha4_machine_spec_bootstrap.py | to_str | mariusgheorghies/python | python | def to_str(self):
return pprint.pformat(self.to_dict()) |
def __repr__(self):
'For `print` and `pprint`'
return self.to_str() | -8,960,031,694,814,905,000 | For `print` and `pprint` | kubernetes/client/models/io_xk8s_cluster_v1alpha4_machine_spec_bootstrap.py | __repr__ | mariusgheorghies/python | python | def __repr__(self):
return self.to_str() |
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, IoXK8sClusterV1alpha4MachineSpecBootstrap)):
return False
return (self.to_dict() == other.to_dict()) | 7,289,287,128,892,294,000 | Returns true if both objects are equal | kubernetes/client/models/io_xk8s_cluster_v1alpha4_machine_spec_bootstrap.py | __eq__ | mariusgheorghies/python | python | def __eq__(self, other):
if (not isinstance(other, IoXK8sClusterV1alpha4MachineSpecBootstrap)):
return False
return (self.to_dict() == other.to_dict()) |
def __ne__(self, other):
'Returns true if both objects are not equal'
if (not isinstance(other, IoXK8sClusterV1alpha4MachineSpecBootstrap)):
return True
return (self.to_dict() != other.to_dict()) | -403,530,282,509,678,700 | Returns true if both objects are not equal | kubernetes/client/models/io_xk8s_cluster_v1alpha4_machine_spec_bootstrap.py | __ne__ | mariusgheorghies/python | python | def __ne__(self, other):
if (not isinstance(other, IoXK8sClusterV1alpha4MachineSpecBootstrap)):
return True
return (self.to_dict() != other.to_dict()) |
def register_wrapper(wrapper, cls_or_obj):
'register_wrapper\n\n :param wrapper: A wrapper of all kinds of providers\n :param cls_or_obj: A class or class name or object instance in data/data.py\n '
if isinstance(cls_or_obj, str):
cls_or_obj = get_cls_from_name(cls_or_obj)
obj = (cls_or_ob... | -9,188,843,668,770,969,000 | register_wrapper
:param wrapper: A wrapper of all kinds of providers
:param cls_or_obj: A class or class name or object instance in data/data.py | qlib/data/data.py | register_wrapper | Tirbo06/qlib | python | def register_wrapper(wrapper, cls_or_obj):
'register_wrapper\n\n :param wrapper: A wrapper of all kinds of providers\n :param cls_or_obj: A class or class name or object instance in data/data.py\n '
if isinstance(cls_or_obj, str):
cls_or_obj = get_cls_from_name(cls_or_obj)
obj = (cls_or_ob... |
def register_all_wrappers():
'register_all_wrappers'
logger = get_module_logger('data')
_calendar_provider = get_provider_obj(C.calendar_provider)
if (getattr(C, 'calendar_cache', None) is not None):
_calendar_provider = get_provider_obj(C.calendar_cache, provider=_calendar_provider)
registe... | -5,182,649,215,750,496,000 | register_all_wrappers | qlib/data/data.py | register_all_wrappers | Tirbo06/qlib | python | def ():
logger = get_module_logger('data')
_calendar_provider = get_provider_obj(C.calendar_provider)
if (getattr(C, 'calendar_cache', None) is not None):
_calendar_provider = get_provider_obj(C.calendar_cache, provider=_calendar_provider)
register_wrapper(Cal, _calendar_provider)
logge... |
@abc.abstractmethod
def calendar(self, start_time=None, end_time=None, freq='day', future=False):
'Get calendar of certain market in given time range.\n\n Parameters\n ----------\n start_time : str\n start of the time range\n end_time : str\n end of the time range\n... | 3,577,133,685,186,970,600 | Get calendar of certain market in given time range.
Parameters
----------
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
future : bool
whether including future trading day
Returns
----------
list
calen... | qlib/data/data.py | calendar | Tirbo06/qlib | python | @abc.abstractmethod
def calendar(self, start_time=None, end_time=None, freq='day', future=False):
'Get calendar of certain market in given time range.\n\n Parameters\n ----------\n start_time : str\n start of the time range\n end_time : str\n end of the time range\n... |
def locate_index(self, start_time, end_time, freq, future):
'Locate the start time index and end time index in a calendar under certain frequency.\n\n Parameters\n ----------\n start_time : str\n start of the time range\n end_time : str\n end of the time range\n ... | 1,755,067,797,088,190,200 | Locate the start time index and end time index in a calendar under certain frequency.
Parameters
----------
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
future : bool
whether including future trading day
... | qlib/data/data.py | locate_index | Tirbo06/qlib | python | def locate_index(self, start_time, end_time, freq, future):
'Locate the start time index and end time index in a calendar under certain frequency.\n\n Parameters\n ----------\n start_time : str\n start of the time range\n end_time : str\n end of the time range\n ... |
def _get_calendar(self, freq, future):
'Load calendar using memcache.\n\n Parameters\n ----------\n freq : str\n frequency of read calendar file\n future : bool\n whether including future trading day\n\n Returns\n -------\n list\n lis... | 2,789,606,126,967,036,400 | Load calendar using memcache.
Parameters
----------
freq : str
frequency of read calendar file
future : bool
whether including future trading day
Returns
-------
list
list of timestamps
dict
dict composed by timestamp as key and index as value for fast search | qlib/data/data.py | _get_calendar | Tirbo06/qlib | python | def _get_calendar(self, freq, future):
'Load calendar using memcache.\n\n Parameters\n ----------\n freq : str\n frequency of read calendar file\n future : bool\n whether including future trading day\n\n Returns\n -------\n list\n lis... |
def _uri(self, start_time, end_time, freq, future=False):
'Get the uri of calendar generation task.'
return hash_args(start_time, end_time, freq, future) | 5,857,268,485,102,351,000 | Get the uri of calendar generation task. | qlib/data/data.py | _uri | Tirbo06/qlib | python | def _uri(self, start_time, end_time, freq, future=False):
return hash_args(start_time, end_time, freq, future) |
@staticmethod
def instruments(market='all', filter_pipe=None):
"Get the general config dictionary for a base market adding several dynamic filters.\n\n Parameters\n ----------\n market : str\n market/industry/index shortname, e.g. all/sse/szse/sse50/csi300/csi500\n filter_pipe... | 5,346,680,155,164,997,000 | Get the general config dictionary for a base market adding several dynamic filters.
Parameters
----------
market : str
market/industry/index shortname, e.g. all/sse/szse/sse50/csi300/csi500
filter_pipe : list
the list of dynamic filters
Returns
----------
dict
dict of stockpool config
{`market`=>base ... | qlib/data/data.py | instruments | Tirbo06/qlib | python | @staticmethod
def instruments(market='all', filter_pipe=None):
"Get the general config dictionary for a base market adding several dynamic filters.\n\n Parameters\n ----------\n market : str\n market/industry/index shortname, e.g. all/sse/szse/sse50/csi300/csi500\n filter_pipe... |
@abc.abstractmethod
def list_instruments(self, instruments, start_time=None, end_time=None, freq='day', as_list=False):
'List the instruments based on a certain stockpool config.\n\n Parameters\n ----------\n instruments : dict\n stockpool config\n start_time : str\n ... | -7,773,960,003,251,321,000 | List the instruments based on a certain stockpool config.
Parameters
----------
instruments : dict
stockpool config
start_time : str
start of the time range
end_time : str
end of the time range
as_list : bool
return instruments as list or dict
Returns
-------
dict or list
instruments list or dicti... | qlib/data/data.py | list_instruments | Tirbo06/qlib | python | @abc.abstractmethod
def list_instruments(self, instruments, start_time=None, end_time=None, freq='day', as_list=False):
'List the instruments based on a certain stockpool config.\n\n Parameters\n ----------\n instruments : dict\n stockpool config\n start_time : str\n ... |
@abc.abstractmethod
def feature(self, instrument, field, start_time, end_time, freq):
'Get feature data.\n\n Parameters\n ----------\n instrument : str\n a certain instrument\n field : str\n a certain field of feature\n start_time : str\n start of ... | -4,321,086,077,266,639,400 | Get feature data.
Parameters
----------
instrument : str
a certain instrument
field : str
a certain field of feature
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
Returns
-------
pd.Series
data of... | qlib/data/data.py | feature | Tirbo06/qlib | python | @abc.abstractmethod
def feature(self, instrument, field, start_time, end_time, freq):
'Get feature data.\n\n Parameters\n ----------\n instrument : str\n a certain instrument\n field : str\n a certain field of feature\n start_time : str\n start of ... |
@abc.abstractmethod
def expression(self, instrument, field, start_time=None, end_time=None, freq='day'):
'Get Expression data.\n\n Parameters\n ----------\n instrument : str\n a certain instrument\n field : str\n a certain field of feature\n start_time : str\... | -5,795,559,039,258,244,000 | Get Expression data.
Parameters
----------
instrument : str
a certain instrument
field : str
a certain field of feature
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
Returns
-------
pd.Series
data... | qlib/data/data.py | expression | Tirbo06/qlib | python | @abc.abstractmethod
def expression(self, instrument, field, start_time=None, end_time=None, freq='day'):
'Get Expression data.\n\n Parameters\n ----------\n instrument : str\n a certain instrument\n field : str\n a certain field of feature\n start_time : str\... |
@abc.abstractmethod
def dataset(self, instruments, fields, start_time=None, end_time=None, freq='day'):
'Get dataset data.\n\n Parameters\n ----------\n instruments : list or dict\n list/dict of instruments or dict of stockpool config\n fields : list\n list of featu... | 8,839,626,730,823,916,000 | Get dataset data.
Parameters
----------
instruments : list or dict
list/dict of instruments or dict of stockpool config
fields : list
list of feature instances
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency
Returns
----------
pd.DataFrame
... | qlib/data/data.py | dataset | Tirbo06/qlib | python | @abc.abstractmethod
def dataset(self, instruments, fields, start_time=None, end_time=None, freq='day'):
'Get dataset data.\n\n Parameters\n ----------\n instruments : list or dict\n list/dict of instruments or dict of stockpool config\n fields : list\n list of featu... |
def _uri(self, instruments, fields, start_time=None, end_time=None, freq='day', disk_cache=1, **kwargs):
'Get task uri, used when generating rabbitmq task in qlib_server\n\n Parameters\n ----------\n instruments : list or dict\n list/dict of instruments or dict of stockpool config\n ... | 7,055,993,701,715,980,000 | Get task uri, used when generating rabbitmq task in qlib_server
Parameters
----------
instruments : list or dict
list/dict of instruments or dict of stockpool config
fields : list
list of feature instances
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time... | qlib/data/data.py | _uri | Tirbo06/qlib | python | def _uri(self, instruments, fields, start_time=None, end_time=None, freq='day', disk_cache=1, **kwargs):
'Get task uri, used when generating rabbitmq task in qlib_server\n\n Parameters\n ----------\n instruments : list or dict\n list/dict of instruments or dict of stockpool config\n ... |
@staticmethod
def get_instruments_d(instruments, freq):
'\n Parse different types of input instruments to output instruments_d\n Wrong format of input instruments will lead to exception.\n\n '
if isinstance(instruments, dict):
if ('market' in instruments):
instruments_d ... | 6,823,630,086,228,314,000 | Parse different types of input instruments to output instruments_d
Wrong format of input instruments will lead to exception. | qlib/data/data.py | get_instruments_d | Tirbo06/qlib | python | @staticmethod
def get_instruments_d(instruments, freq):
'\n Parse different types of input instruments to output instruments_d\n Wrong format of input instruments will lead to exception.\n\n '
if isinstance(instruments, dict):
if ('market' in instruments):
instruments_d ... |
@staticmethod
def get_column_names(fields):
'\n Get column names from input fields\n\n '
if (len(fields) == 0):
raise ValueError('fields cannot be empty')
fields = fields.copy()
column_names = [str(f) for f in fields]
return column_names | 8,591,165,258,190,278,000 | Get column names from input fields | qlib/data/data.py | get_column_names | Tirbo06/qlib | python | @staticmethod
def get_column_names(fields):
'\n \n\n '
if (len(fields) == 0):
raise ValueError('fields cannot be empty')
fields = fields.copy()
column_names = [str(f) for f in fields]
return column_names |
@staticmethod
def dataset_processor(instruments_d, column_names, start_time, end_time, freq):
'\n Load and process the data, return the data set.\n - default using multi-kernel method.\n\n '
normalize_column_names = normalize_cache_fields(column_names)
data = dict()
if (C.maxtaskspe... | -8,502,041,657,564,014,000 | Load and process the data, return the data set.
- default using multi-kernel method. | qlib/data/data.py | dataset_processor | Tirbo06/qlib | python | @staticmethod
def dataset_processor(instruments_d, column_names, start_time, end_time, freq):
'\n Load and process the data, return the data set.\n - default using multi-kernel method.\n\n '
normalize_column_names = normalize_cache_fields(column_names)
data = dict()
if (C.maxtaskspe... |
@staticmethod
def expression_calculator(inst, start_time, end_time, freq, column_names, spans=None, C=None):
"\n Calculate the expressions for one instrument, return a df result.\n If the expression has been calculated before, load from cache.\n\n return value: A data frame with index 'datetime... | 2,355,530,893,085,278,000 | Calculate the expressions for one instrument, return a df result.
If the expression has been calculated before, load from cache.
return value: A data frame with index 'datetime' and other data columns. | qlib/data/data.py | expression_calculator | Tirbo06/qlib | python | @staticmethod
def expression_calculator(inst, start_time, end_time, freq, column_names, spans=None, C=None):
"\n Calculate the expressions for one instrument, return a df result.\n If the expression has been calculated before, load from cache.\n\n return value: A data frame with index 'datetime... |
@property
def _uri_cal(self):
'Calendar file uri.'
if self.remote:
return os.path.join(C.mount_path, 'calendars', '{}.txt')
else:
return os.path.join(C.provider_uri, 'calendars', '{}.txt') | 1,566,966,008,109,715,000 | Calendar file uri. | qlib/data/data.py | _uri_cal | Tirbo06/qlib | python | @property
def _uri_cal(self):
if self.remote:
return os.path.join(C.mount_path, 'calendars', '{}.txt')
else:
return os.path.join(C.provider_uri, 'calendars', '{}.txt') |
def _load_calendar(self, freq, future):
'Load original calendar timestamp from file.\n\n Parameters\n ----------\n freq : str\n frequency of read calendar file\n\n Returns\n ----------\n list\n list of timestamps\n '
if future:
fname... | -8,847,209,010,704,079,000 | Load original calendar timestamp from file.
Parameters
----------
freq : str
frequency of read calendar file
Returns
----------
list
list of timestamps | qlib/data/data.py | _load_calendar | Tirbo06/qlib | python | def _load_calendar(self, freq, future):
'Load original calendar timestamp from file.\n\n Parameters\n ----------\n freq : str\n frequency of read calendar file\n\n Returns\n ----------\n list\n list of timestamps\n '
if future:
fname... |
@property
def _uri_inst(self):
'Instrument file uri.'
return os.path.join(C.provider_uri, 'instruments', '{}.txt') | 437,905,685,548,133,440 | Instrument file uri. | qlib/data/data.py | _uri_inst | Tirbo06/qlib | python | @property
def _uri_inst(self):
return os.path.join(C.provider_uri, 'instruments', '{}.txt') |
@property
def _uri_data(self):
'Static feature file uri.'
if self.remote:
return os.path.join(C.mount_path, 'features', '{}', '{}.{}.bin')
else:
return os.path.join(C.provider_uri, 'features', '{}', '{}.{}.bin') | 5,267,282,713,068,252,000 | Static feature file uri. | qlib/data/data.py | _uri_data | Tirbo06/qlib | python | @property
def _uri_data(self):
if self.remote:
return os.path.join(C.mount_path, 'features', '{}', '{}.{}.bin')
else:
return os.path.join(C.provider_uri, 'features', '{}', '{}.{}.bin') |
@staticmethod
def multi_cache_walker(instruments, fields, start_time=None, end_time=None, freq='day'):
'\n This method is used to prepare the expression cache for the client.\n Then the client will load the data from expression cache by itself.\n\n '
instruments_d = DatasetProvider.get_inst... | 4,493,967,503,467,954,700 | This method is used to prepare the expression cache for the client.
Then the client will load the data from expression cache by itself. | qlib/data/data.py | multi_cache_walker | Tirbo06/qlib | python | @staticmethod
def multi_cache_walker(instruments, fields, start_time=None, end_time=None, freq='day'):
'\n This method is used to prepare the expression cache for the client.\n Then the client will load the data from expression cache by itself.\n\n '
instruments_d = DatasetProvider.get_inst... |
@staticmethod
def cache_walker(inst, start_time, end_time, freq, column_names):
"\n If the expressions of one instrument haven't been calculated before,\n calculate it and write it into expression cache.\n\n "
for field in column_names:
ExpressionD.expression(inst, field, start_time... | -864,899,502,299,588,600 | If the expressions of one instrument haven't been calculated before,
calculate it and write it into expression cache. | qlib/data/data.py | cache_walker | Tirbo06/qlib | python | @staticmethod
def cache_walker(inst, start_time, end_time, freq, column_names):
"\n If the expressions of one instrument haven't been calculated before,\n calculate it and write it into expression cache.\n\n "
for field in column_names:
ExpressionD.expression(inst, field, start_time... |
def features(self, instruments, fields, start_time=None, end_time=None, freq='day', disk_cache=None):
'\n disk_cache : int\n whether to skip(0)/use(1)/replace(2) disk_cache\n\n This function will try to use cache method which has a keyword `disk_cache`,\n and will use provider method... | 2,457,201,586,131,732,500 | disk_cache : int
whether to skip(0)/use(1)/replace(2) disk_cache
This function will try to use cache method which has a keyword `disk_cache`,
and will use provider method if a type error is raised because the DatasetD instance
is a provider class. | qlib/data/data.py | features | Tirbo06/qlib | python | def features(self, instruments, fields, start_time=None, end_time=None, freq='day', disk_cache=None):
'\n disk_cache : int\n whether to skip(0)/use(1)/replace(2) disk_cache\n\n This function will try to use cache method which has a keyword `disk_cache`,\n and will use provider method... |
def _uri(self, type, **kwargs):
'_uri\n The server hope to get the uri of the request. The uri will be decided\n by the dataprovider. For ex, different cache layer has different uri.\n\n :param type: The type of resource for the uri\n :param **kwargs:\n '
if (type == 'calendar... | 6,238,780,568,753,580,000 | _uri
The server hope to get the uri of the request. The uri will be decided
by the dataprovider. For ex, different cache layer has different uri.
:param type: The type of resource for the uri
:param **kwargs: | qlib/data/data.py | _uri | Tirbo06/qlib | python | def _uri(self, type, **kwargs):
'_uri\n The server hope to get the uri of the request. The uri will be decided\n by the dataprovider. For ex, different cache layer has different uri.\n\n :param type: The type of resource for the uri\n :param **kwargs:\n '
if (type == 'calendar... |
def features_uri(self, instruments, fields, start_time, end_time, freq, disk_cache=1):
'features_uri\n\n Return the uri of the generated cache of features/dataset\n\n :param disk_cache:\n :param instruments:\n :param fields:\n :param start_time:\n :param end_time:\n ... | -5,710,456,833,990,194,000 | features_uri
Return the uri of the generated cache of features/dataset
:param disk_cache:
:param instruments:
:param fields:
:param start_time:
:param end_time:
:param freq: | qlib/data/data.py | features_uri | Tirbo06/qlib | python | def features_uri(self, instruments, fields, start_time, end_time, freq, disk_cache=1):
'features_uri\n\n Return the uri of the generated cache of features/dataset\n\n :param disk_cache:\n :param instruments:\n :param fields:\n :param start_time:\n :param end_time:\n ... |
def load_in_chunks(path, chunk_size=1024):
'Lazy function (generator) to read a file piece by piece.\n Default chunk size: 1k.'
with open(path, 'rb') as file_object:
while True:
data = file_object.read(chunk_size)
if (not data):
break
(yield data) | -2,405,785,985,499,538,400 | Lazy function (generator) to read a file piece by piece.
Default chunk size: 1k. | conans/client/rest/uploader_downloader.py | load_in_chunks | AKhranovskiy/conan | python | def load_in_chunks(path, chunk_size=1024):
'Lazy function (generator) to read a file piece by piece.\n Default chunk size: 1k.'
with open(path, 'rb') as file_object:
while True:
data = file_object.read(chunk_size)
if (not data):
break
(yield data) |
def download_chunks(file_handler=None, ret_buffer=None):
'Write to a buffer or to a file handler'
chunk_size = (1024 if (not file_path) else (1024 * 100))
download_size = 0
last_progress = None
for data in response.iter_content(chunk_size):
download_size += len(data)
if (ret_buffer i... | -8,814,911,293,651,486,000 | Write to a buffer or to a file handler | conans/client/rest/uploader_downloader.py | download_chunks | AKhranovskiy/conan | python | def download_chunks(file_handler=None, ret_buffer=None):
chunk_size = (1024 if (not file_path) else (1024 * 100))
download_size = 0
last_progress = None
for data in response.iter_content(chunk_size):
download_size += len(data)
if (ret_buffer is not None):
ret_buffer.exte... |
def get(self, link_id):
'Get link resource.\n\n .. :quickref: Link collection.\n\n **Example request**:\n\n .. sourcecode:: http\n\n GET /links/1 HTTP/1.1\n Host: example.com\n Accept: application/json, text/javascript\n\n **Example response**:\n\n ... | -2,436,602,847,577,292,300 | Get link resource.
.. :quickref: Link collection.
**Example request**:
.. sourcecode:: http
GET /links/1 HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: text/javascript
{
... | slicr/resources/links.py | get | travisbyrum/slicr | python | def get(self, link_id):
'Get link resource.\n\n .. :quickref: Link collection.\n\n **Example request**:\n\n .. sourcecode:: http\n\n GET /links/1 HTTP/1.1\n Host: example.com\n Accept: application/json, text/javascript\n\n **Example response**:\n\n ... |
@use_args(link_args)
def post(self, args):
'Create shortened link.\n\n .. :quickref: Link collection.\n\n **Example request**:\n\n .. sourcecode:: http\n\n POST /links HTTP/1.1\n Host: example.com\n Accept: application/json, text/javascript\n\n {\n ... | -9,147,935,309,359,684,000 | Create shortened link.
.. :quickref: Link collection.
**Example request**:
.. sourcecode:: http
POST /links HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
{
"url": "https://www.google.com"
}
**Example response**:
.. sourcecode:: http
HTTP/1.1 201 OK
Vary... | slicr/resources/links.py | post | travisbyrum/slicr | python | @use_args(link_args)
def post(self, args):
'Create shortened link.\n\n .. :quickref: Link collection.\n\n **Example request**:\n\n .. sourcecode:: http\n\n POST /links HTTP/1.1\n Host: example.com\n Accept: application/json, text/javascript\n\n {\n ... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.