body stringlengths 26 98.2k | body_hash int64 -9,222,864,604,528,158,000 9,221,803,474B | docstring stringlengths 1 16.8k | path stringlengths 5 230 | name stringlengths 1 96 | repository_name stringlengths 7 89 | lang stringclasses 1
value | body_without_docstring stringlengths 20 98.2k |
|---|---|---|---|---|---|---|---|
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, ErrorDetails)):
return False
return (self.__dict__ == other.__dict__) | 7,013,632,968,773,976,000 | Returns true if both objects are equal | asposewordscloud/models/error_details.py | __eq__ | rizwanniazigroupdocs/aspose-words-cloud-python | python | def __eq__(self, other):
if (not isinstance(other, ErrorDetails)):
return False
return (self.__dict__ == other.__dict__) |
def __ne__(self, other):
'Returns true if both objects are not equal'
return (not (self == other)) | 7,764,124,047,908,058,000 | Returns true if both objects are not equal | asposewordscloud/models/error_details.py | __ne__ | rizwanniazigroupdocs/aspose-words-cloud-python | python | def __ne__(self, other):
return (not (self == other)) |
@profile
@login_required
def post(self):
'\n Called when saving data from the annotator client\n '
data = request.get_json(force=True)
image = data.get('image')
dataset = data.get('dataset')
image_id = image.get('id')
image_model = ImageModel.objects(id=image_id).first()
if (im... | 223,986,864,701,691,170 | Called when saving data from the annotator client | coco-annotator/backend/webserver/api/annotator.py | post | Cheol-H-Jeong/Deep-POC-2019 | python | @profile
@login_required
def post(self):
'\n \n '
data = request.get_json(force=True)
image = data.get('image')
dataset = data.get('dataset')
image_id = image.get('id')
image_model = ImageModel.objects(id=image_id).first()
if (image_model is None):
return ({'success': F... |
@profile
@login_required
def get(self, image_id):
' Called when loading from the annotator client '
image = ImageModel.objects(id=image_id).exclude('events').first()
if (image is None):
return ({'success': False, 'message': 'Could not load image'}, 400)
dataset = current_user.datasets.filter(id=... | 4,471,360,595,673,237,500 | Called when loading from the annotator client | coco-annotator/backend/webserver/api/annotator.py | get | Cheol-H-Jeong/Deep-POC-2019 | python | @profile
@login_required
def get(self, image_id):
' '
image = ImageModel.objects(id=image_id).exclude('events').first()
if (image is None):
return ({'success': False, 'message': 'Could not load image'}, 400)
dataset = current_user.datasets.filter(id=image.dataset_id).first()
if (dataset is ... |
def __init__(self, **kwargs):
'\n Initializes a new UpdateConnectionFromAmazonS3 object with values from keyword arguments. The default value of the :py:attr:`~oci.data_integration.models.UpdateConnectionFromAmazonS3.model_type` attribute\n of this class is ``AMAZON_S3_CONNECTION`` and it should not b... | 2,299,845,921,030,368,500 | Initializes a new UpdateConnectionFromAmazonS3 object with values from keyword arguments. The default value of the :py:attr:`~oci.data_integration.models.UpdateConnectionFromAmazonS3.model_type` attribute
of this class is ``AMAZON_S3_CONNECTION`` and it should not be changed.
The following keyword arguments are support... | src/oci/data_integration/models/update_connection_from_amazon_s3.py | __init__ | pabs3/oci-python-sdk | python | def __init__(self, **kwargs):
'\n Initializes a new UpdateConnectionFromAmazonS3 object with values from keyword arguments. The default value of the :py:attr:`~oci.data_integration.models.UpdateConnectionFromAmazonS3.model_type` attribute\n of this class is ``AMAZON_S3_CONNECTION`` and it should not b... |
@property
def access_key(self):
'\n Gets the access_key of this UpdateConnectionFromAmazonS3.\n\n :return: The access_key of this UpdateConnectionFromAmazonS3.\n :rtype: oci.data_integration.models.SensitiveAttribute\n '
return self._access_key | -34,331,669,729,738,812 | Gets the access_key of this UpdateConnectionFromAmazonS3.
:return: The access_key of this UpdateConnectionFromAmazonS3.
:rtype: oci.data_integration.models.SensitiveAttribute | src/oci/data_integration/models/update_connection_from_amazon_s3.py | access_key | pabs3/oci-python-sdk | python | @property
def access_key(self):
'\n Gets the access_key of this UpdateConnectionFromAmazonS3.\n\n :return: The access_key of this UpdateConnectionFromAmazonS3.\n :rtype: oci.data_integration.models.SensitiveAttribute\n '
return self._access_key |
@access_key.setter
def access_key(self, access_key):
'\n Sets the access_key of this UpdateConnectionFromAmazonS3.\n\n :param access_key: The access_key of this UpdateConnectionFromAmazonS3.\n :type: oci.data_integration.models.SensitiveAttribute\n '
self._access_key = access_key | -474,915,086,494,389,300 | Sets the access_key of this UpdateConnectionFromAmazonS3.
:param access_key: The access_key of this UpdateConnectionFromAmazonS3.
:type: oci.data_integration.models.SensitiveAttribute | src/oci/data_integration/models/update_connection_from_amazon_s3.py | access_key | pabs3/oci-python-sdk | python | @access_key.setter
def access_key(self, access_key):
'\n Sets the access_key of this UpdateConnectionFromAmazonS3.\n\n :param access_key: The access_key of this UpdateConnectionFromAmazonS3.\n :type: oci.data_integration.models.SensitiveAttribute\n '
self._access_key = access_key |
@property
def secret_key(self):
'\n Gets the secret_key of this UpdateConnectionFromAmazonS3.\n\n :return: The secret_key of this UpdateConnectionFromAmazonS3.\n :rtype: oci.data_integration.models.SensitiveAttribute\n '
return self._secret_key | 7,734,419,076,322,159,000 | Gets the secret_key of this UpdateConnectionFromAmazonS3.
:return: The secret_key of this UpdateConnectionFromAmazonS3.
:rtype: oci.data_integration.models.SensitiveAttribute | src/oci/data_integration/models/update_connection_from_amazon_s3.py | secret_key | pabs3/oci-python-sdk | python | @property
def secret_key(self):
'\n Gets the secret_key of this UpdateConnectionFromAmazonS3.\n\n :return: The secret_key of this UpdateConnectionFromAmazonS3.\n :rtype: oci.data_integration.models.SensitiveAttribute\n '
return self._secret_key |
@secret_key.setter
def secret_key(self, secret_key):
'\n Sets the secret_key of this UpdateConnectionFromAmazonS3.\n\n :param secret_key: The secret_key of this UpdateConnectionFromAmazonS3.\n :type: oci.data_integration.models.SensitiveAttribute\n '
self._secret_key = secret_key | -7,769,865,444,699,896,000 | Sets the secret_key of this UpdateConnectionFromAmazonS3.
:param secret_key: The secret_key of this UpdateConnectionFromAmazonS3.
:type: oci.data_integration.models.SensitiveAttribute | src/oci/data_integration/models/update_connection_from_amazon_s3.py | secret_key | pabs3/oci-python-sdk | python | @secret_key.setter
def secret_key(self, secret_key):
'\n Sets the secret_key of this UpdateConnectionFromAmazonS3.\n\n :param secret_key: The secret_key of this UpdateConnectionFromAmazonS3.\n :type: oci.data_integration.models.SensitiveAttribute\n '
self._secret_key = secret_key |
def train(args, train_dataset, model, tokenizer, labels, pad_token_label_id, lang_adapter_names, task_name, lang2id=None):
'Train the model.'
if (args.local_rank in [(- 1), 0]):
tb_writer = SummaryWriter()
args.train_batch_size = (args.per_gpu_train_batch_size * max(1, args.n_gpu))
print(f'Local... | 8,833,718,236,150,706,000 | Train the model. | third_party/ridayesh_run_tag.py | train | rohanshah13/cloud-emea-copy | python | def train(args, train_dataset, model, tokenizer, labels, pad_token_label_id, lang_adapter_names, task_name, lang2id=None):
if (args.local_rank in [(- 1), 0]):
tb_writer = SummaryWriter()
args.train_batch_size = (args.per_gpu_train_batch_size * max(1, args.n_gpu))
print(f'Local Rank = {args.loca... |
def _find_all_hints_in_graph_def(session):
'Look at the current default graph and return a list of LiteFuncCall objs.\n\n Args:\n session: A TensorFlow session that contains the graph to convert.\n Returns:\n a list of `LifeFuncCall` objects in the form\n\n '
func_calls = _collections.defaultdict(_Lite... | 7,412,164,229,717,128,000 | Look at the current default graph and return a list of LiteFuncCall objs.
Args:
session: A TensorFlow session that contains the graph to convert.
Returns:
a list of `LifeFuncCall` objects in the form | tensorflow/contrib/lite/python/op_hint.py | _find_all_hints_in_graph_def | 188080501/tensorflow | python | def _find_all_hints_in_graph_def(session):
'Look at the current default graph and return a list of LiteFuncCall objs.\n\n Args:\n session: A TensorFlow session that contains the graph to convert.\n Returns:\n a list of `LifeFuncCall` objects in the form\n\n '
func_calls = _collections.defaultdict(_Lite... |
def _tensor_name_base(full_tensor_name):
'Removes the device assignment code from a tensor.\n\n e.g. _tensor_name_base("foo:3") => "foo"\n\n Args:\n full_tensor_name: A tensor name that is annotated with a device placement\n (this is what tensor flow introspection gives).\n Returns:\n A name without a... | -9,004,534,146,274,701,000 | Removes the device assignment code from a tensor.
e.g. _tensor_name_base("foo:3") => "foo"
Args:
full_tensor_name: A tensor name that is annotated with a device placement
(this is what tensor flow introspection gives).
Returns:
A name without any device assignment. | tensorflow/contrib/lite/python/op_hint.py | _tensor_name_base | 188080501/tensorflow | python | def _tensor_name_base(full_tensor_name):
'Removes the device assignment code from a tensor.\n\n e.g. _tensor_name_base("foo:3") => "foo"\n\n Args:\n full_tensor_name: A tensor name that is annotated with a device placement\n (this is what tensor flow introspection gives).\n Returns:\n A name without a... |
def convert_op_hints_to_stubs(session):
'Converts a graphdef with LiteOp hints into stub operations.\n\n This is used to prepare for toco conversion of complex intrinsic usages.\n\n Args:\n session: A TensorFlow session that contains the graph to convert.\n Returns:\n A new graphdef with all ops contained ... | 545,267,334,812,460,350 | Converts a graphdef with LiteOp hints into stub operations.
This is used to prepare for toco conversion of complex intrinsic usages.
Args:
session: A TensorFlow session that contains the graph to convert.
Returns:
A new graphdef with all ops contained in OpHints being replaced by
a single op call with the right... | tensorflow/contrib/lite/python/op_hint.py | convert_op_hints_to_stubs | 188080501/tensorflow | python | def convert_op_hints_to_stubs(session):
'Converts a graphdef with LiteOp hints into stub operations.\n\n This is used to prepare for toco conversion of complex intrinsic usages.\n\n Args:\n session: A TensorFlow session that contains the graph to convert.\n Returns:\n A new graphdef with all ops contained ... |
def __init__(self, function_name, **kwargs):
'Create a OpHint.\n\n Args:\n function_name: Name of the function (the custom op name in tflite)\n **kwargs: Keyword arguments of any constant attributes for the function.\n '
self._function_name = function_name
self._unique_function_id = _uuid.uu... | 2,070,700,012,877,376,300 | Create a OpHint.
Args:
function_name: Name of the function (the custom op name in tflite)
**kwargs: Keyword arguments of any constant attributes for the function. | tensorflow/contrib/lite/python/op_hint.py | __init__ | 188080501/tensorflow | python | def __init__(self, function_name, **kwargs):
'Create a OpHint.\n\n Args:\n function_name: Name of the function (the custom op name in tflite)\n **kwargs: Keyword arguments of any constant attributes for the function.\n '
self._function_name = function_name
self._unique_function_id = _uuid.uu... |
def add_inputs(self, *args):
"Add a sequence of inputs to the function invocation.\n\n Args:\n *args: List of inputs to be converted (should be Tf.Tensor).\n Returns:\n Wrapped inputs (identity standins that have additional metadata). These\n are also are also tf.Tensor's.\n "
def augme... | -2,426,469,873,050,694,700 | Add a sequence of inputs to the function invocation.
Args:
*args: List of inputs to be converted (should be Tf.Tensor).
Returns:
Wrapped inputs (identity standins that have additional metadata). These
are also are also tf.Tensor's. | tensorflow/contrib/lite/python/op_hint.py | add_inputs | 188080501/tensorflow | python | def add_inputs(self, *args):
"Add a sequence of inputs to the function invocation.\n\n Args:\n *args: List of inputs to be converted (should be Tf.Tensor).\n Returns:\n Wrapped inputs (identity standins that have additional metadata). These\n are also are also tf.Tensor's.\n "
def augme... |
def add_outputs(self, *args):
"Add a sequence of outputs to the function invocation.\n\n Args:\n *args: List of outputs to be converted (should be tf.Tensor).\n Returns:\n Wrapped outputs (identity standins that have additional metadata). These\n are also tf.Tensor's.\n "
def augmented_... | -7,205,941,043,342,234,000 | Add a sequence of outputs to the function invocation.
Args:
*args: List of outputs to be converted (should be tf.Tensor).
Returns:
Wrapped outputs (identity standins that have additional metadata). These
are also tf.Tensor's. | tensorflow/contrib/lite/python/op_hint.py | add_outputs | 188080501/tensorflow | python | def add_outputs(self, *args):
"Add a sequence of outputs to the function invocation.\n\n Args:\n *args: List of outputs to be converted (should be tf.Tensor).\n Returns:\n Wrapped outputs (identity standins that have additional metadata). These\n are also tf.Tensor's.\n "
def augmented_... |
def extract(infile):
'\n Merges bioindex.tsv with the infile (balanced data),\n finds the volsplit.zip location for each bio file and \n extracts the files into secure_volume/holding_folder.\n '
bioindex = pd.read_csv('/media/secure_volume/index/bioindex.tsv', sep='\t')
balanced_bioindex = pd.re... | -1,507,047,250,928,302,000 | Merges bioindex.tsv with the infile (balanced data),
finds the volsplit.zip location for each bio file and
extracts the files into secure_volume/holding_folder. | code/extract_balanced.py | extract | afcarl/biographies | python | def extract(infile):
'\n Merges bioindex.tsv with the infile (balanced data),\n finds the volsplit.zip location for each bio file and \n extracts the files into secure_volume/holding_folder.\n '
bioindex = pd.read_csv('/media/secure_volume/index/bioindex.tsv', sep='\t')
balanced_bioindex = pd.re... |
@task
@with_validation
def generate(directory=None):
'\n Generate configuration files.\n '
for conffiles in iter_conffiles(directory):
status("Generating templates for '{environment}' and '{role}'", environment=conffiles.environment, role=conffiles.role)
conffiles.generate() | -2,122,800,150,191,893,500 | Generate configuration files. | confab/generate.py | generate | locationlabs/confab | python | @task
@with_validation
def generate(directory=None):
'\n \n '
for conffiles in iter_conffiles(directory):
status("Generating templates for '{environment}' and '{role}'", environment=conffiles.environment, role=conffiles.role)
conffiles.generate() |
def test_overdue_habit(datasett):
"\n please note the 'double tt' for datasett. This stands to differentiate\n the functional test data from the data used for unit tests.\n habit 1 is the overdue habit since its added first in the func/conftest\n module.\n :param datasett: from func/conftest\n :re... | 1,522,588,135,354,832,000 | please note the 'double tt' for datasett. This stands to differentiate
the functional test data from the data used for unit tests.
habit 1 is the overdue habit since its added first in the func/conftest
module.
:param datasett: from func/conftest
:return: | tests/func/test_complete_habit.py | test_overdue_habit | takavarasha-desire/habittracker1_1 | python | def test_overdue_habit(datasett):
"\n please note the 'double tt' for datasett. This stands to differentiate\n the functional test data from the data used for unit tests.\n habit 1 is the overdue habit since its added first in the func/conftest\n module.\n :param datasett: from func/conftest\n :re... |
def test_a_habit_due_for_completion(datasett):
'\n habit 2 is the due habit since its added second in the func/conftest\n module.\n :param datasett: from func/conftest\n :return:\n '
session = datasett
complete(2, session)
result = session.query(HabitHistory.streak).filter((HabitHistory.h... | 3,921,509,153,605,490,000 | habit 2 is the due habit since its added second in the func/conftest
module.
:param datasett: from func/conftest
:return: | tests/func/test_complete_habit.py | test_a_habit_due_for_completion | takavarasha-desire/habittracker1_1 | python | def test_a_habit_due_for_completion(datasett):
'\n habit 2 is the due habit since its added second in the func/conftest\n module.\n :param datasett: from func/conftest\n :return:\n '
session = datasett
complete(2, session)
result = session.query(HabitHistory.streak).filter((HabitHistory.h... |
def __init__(self, *, hass, logger, domain, platform_name, platform, scan_interval, entity_namespace, async_entities_added_callback):
'Initialize the entity platform.\n\n hass: HomeAssistant\n logger: Logger\n domain: str\n platform_name: str\n scan_interval: timedelta\n en... | -3,546,419,058,523,400,000 | Initialize the entity platform.
hass: HomeAssistant
logger: Logger
domain: str
platform_name: str
scan_interval: timedelta
entity_namespace: str
async_entities_added_callback: @callback method | homeassistant/helpers/entity_platform.py | __init__ | crazyfish1111/home-assistant | python | def __init__(self, *, hass, logger, domain, platform_name, platform, scan_interval, entity_namespace, async_entities_added_callback):
'Initialize the entity platform.\n\n hass: HomeAssistant\n logger: Logger\n domain: str\n platform_name: str\n scan_interval: timedelta\n en... |
def _get_parallel_updates_semaphore(self):
'Get or create a semaphore for parallel updates.'
if (self.parallel_updates_semaphore is None):
self.parallel_updates_semaphore = asyncio.Semaphore((self.parallel_updates if self.parallel_updates else 1), loop=self.hass.loop)
return self.parallel_updates_se... | 2,508,172,302,676,324,400 | Get or create a semaphore for parallel updates. | homeassistant/helpers/entity_platform.py | _get_parallel_updates_semaphore | crazyfish1111/home-assistant | python | def _get_parallel_updates_semaphore(self):
if (self.parallel_updates_semaphore is None):
self.parallel_updates_semaphore = asyncio.Semaphore((self.parallel_updates if self.parallel_updates else 1), loop=self.hass.loop)
return self.parallel_updates_semaphore |
async def async_setup(self, platform_config, discovery_info=None):
'Set up the platform from a config file.'
platform = self.platform
hass = self.hass
@callback
def async_create_setup_task():
'Get task to set up platform.'
if getattr(platform, 'async_setup_platform', None):
... | 6,370,612,533,691,341,000 | Set up the platform from a config file. | homeassistant/helpers/entity_platform.py | async_setup | crazyfish1111/home-assistant | python | async def async_setup(self, platform_config, discovery_info=None):
platform = self.platform
hass = self.hass
@callback
def async_create_setup_task():
'Get task to set up platform.'
if getattr(platform, 'async_setup_platform', None):
return platform.async_setup_platform(... |
async def async_setup_entry(self, config_entry):
'Set up the platform from a config entry.'
self.config_entry = config_entry
platform = self.platform
@callback
def async_create_setup_task():
'Get task to set up platform.'
return platform.async_setup_entry(self.hass, config_entry, se... | 75,007,586,245,701,980 | Set up the platform from a config entry. | homeassistant/helpers/entity_platform.py | async_setup_entry | crazyfish1111/home-assistant | python | async def async_setup_entry(self, config_entry):
self.config_entry = config_entry
platform = self.platform
@callback
def async_create_setup_task():
'Get task to set up platform.'
return platform.async_setup_entry(self.hass, config_entry, self._async_schedule_add_entities)
retur... |
async def _async_setup_platform(self, async_create_setup_task, tries=0):
'Set up a platform via config file or config entry.\n\n async_create_setup_task creates a coroutine that sets up platform.\n '
logger = self.logger
hass = self.hass
full_name = '{}.{}'.format(self.domain, self.platfor... | -8,883,834,158,884,943,000 | Set up a platform via config file or config entry.
async_create_setup_task creates a coroutine that sets up platform. | homeassistant/helpers/entity_platform.py | _async_setup_platform | crazyfish1111/home-assistant | python | async def _async_setup_platform(self, async_create_setup_task, tries=0):
'Set up a platform via config file or config entry.\n\n async_create_setup_task creates a coroutine that sets up platform.\n '
logger = self.logger
hass = self.hass
full_name = '{}.{}'.format(self.domain, self.platfor... |
def _schedule_add_entities(self, new_entities, update_before_add=False):
'Schedule adding entities for a single platform, synchronously.'
run_callback_threadsafe(self.hass.loop, self._async_schedule_add_entities, list(new_entities), update_before_add).result() | 7,908,124,374,192,280,000 | Schedule adding entities for a single platform, synchronously. | homeassistant/helpers/entity_platform.py | _schedule_add_entities | crazyfish1111/home-assistant | python | def _schedule_add_entities(self, new_entities, update_before_add=False):
run_callback_threadsafe(self.hass.loop, self._async_schedule_add_entities, list(new_entities), update_before_add).result() |
@callback
def _async_schedule_add_entities(self, new_entities, update_before_add=False):
'Schedule adding entities for a single platform async.'
self._tasks.append(self.hass.async_add_job(self.async_add_entities(new_entities, update_before_add=update_before_add))) | 6,827,352,441,585,063,000 | Schedule adding entities for a single platform async. | homeassistant/helpers/entity_platform.py | _async_schedule_add_entities | crazyfish1111/home-assistant | python | @callback
def _async_schedule_add_entities(self, new_entities, update_before_add=False):
self._tasks.append(self.hass.async_add_job(self.async_add_entities(new_entities, update_before_add=update_before_add))) |
def add_entities(self, new_entities, update_before_add=False):
'Add entities for a single platform.'
if update_before_add:
self.logger.warning("Call 'add_entities' with update_before_add=True only inside tests or you can run into a deadlock!")
run_coroutine_threadsafe(self.async_add_entities(list(ne... | -443,141,501,391,420,860 | Add entities for a single platform. | homeassistant/helpers/entity_platform.py | add_entities | crazyfish1111/home-assistant | python | def add_entities(self, new_entities, update_before_add=False):
if update_before_add:
self.logger.warning("Call 'add_entities' with update_before_add=True only inside tests or you can run into a deadlock!")
run_coroutine_threadsafe(self.async_add_entities(list(new_entities), update_before_add), self... |
async def async_add_entities(self, new_entities, update_before_add=False):
'Add entities for a single platform async.\n\n This method must be run in the event loop.\n '
if (not new_entities):
return
hass = self.hass
device_registry = (await hass.helpers.device_registry.async_get_re... | -4,472,886,937,978,459,600 | Add entities for a single platform async.
This method must be run in the event loop. | homeassistant/helpers/entity_platform.py | async_add_entities | crazyfish1111/home-assistant | python | async def async_add_entities(self, new_entities, update_before_add=False):
'Add entities for a single platform async.\n\n This method must be run in the event loop.\n '
if (not new_entities):
return
hass = self.hass
device_registry = (await hass.helpers.device_registry.async_get_re... |
async def _async_add_entity(self, entity, update_before_add, entity_registry, device_registry):
'Add an entity to the platform.'
if (entity is None):
raise ValueError('Entity cannot be None')
entity.hass = self.hass
entity.platform = self
if (hasattr(entity, 'async_update') and (not self.par... | 530,176,300,249,078,340 | Add an entity to the platform. | homeassistant/helpers/entity_platform.py | _async_add_entity | crazyfish1111/home-assistant | python | async def _async_add_entity(self, entity, update_before_add, entity_registry, device_registry):
if (entity is None):
raise ValueError('Entity cannot be None')
entity.hass = self.hass
entity.platform = self
if (hasattr(entity, 'async_update') and (not self.parallel_updates)):
entity.... |
async def async_reset(self):
'Remove all entities and reset data.\n\n This method must be run in the event loop.\n '
if (self._async_cancel_retry_setup is not None):
self._async_cancel_retry_setup()
self._async_cancel_retry_setup = None
if (not self.entities):
return
... | -510,075,945,936,083,100 | Remove all entities and reset data.
This method must be run in the event loop. | homeassistant/helpers/entity_platform.py | async_reset | crazyfish1111/home-assistant | python | async def async_reset(self):
'Remove all entities and reset data.\n\n This method must be run in the event loop.\n '
if (self._async_cancel_retry_setup is not None):
self._async_cancel_retry_setup()
self._async_cancel_retry_setup = None
if (not self.entities):
return
... |
async def async_remove_entity(self, entity_id):
'Remove entity id from platform.'
(await self.entities[entity_id].async_remove())
if ((self._async_unsub_polling is not None) and (not any((entity.should_poll for entity in self.entities.values())))):
self._async_unsub_polling()
self._async_uns... | -7,593,386,608,796,709,000 | Remove entity id from platform. | homeassistant/helpers/entity_platform.py | async_remove_entity | crazyfish1111/home-assistant | python | async def async_remove_entity(self, entity_id):
(await self.entities[entity_id].async_remove())
if ((self._async_unsub_polling is not None) and (not any((entity.should_poll for entity in self.entities.values())))):
self._async_unsub_polling()
self._async_unsub_polling = None |
async def _update_entity_states(self, now):
'Update the states of all the polling entities.\n\n To protect from flooding the executor, we will update async entities\n in parallel and other entities sequential.\n\n This method must be run in the event loop.\n '
if self._process_update... | 7,350,641,399,040,290,000 | Update the states of all the polling entities.
To protect from flooding the executor, we will update async entities
in parallel and other entities sequential.
This method must be run in the event loop. | homeassistant/helpers/entity_platform.py | _update_entity_states | crazyfish1111/home-assistant | python | async def _update_entity_states(self, now):
'Update the states of all the polling entities.\n\n To protect from flooding the executor, we will update async entities\n in parallel and other entities sequential.\n\n This method must be run in the event loop.\n '
if self._process_update... |
@callback
def async_create_setup_task():
'Get task to set up platform.'
if getattr(platform, 'async_setup_platform', None):
return platform.async_setup_platform(hass, platform_config, self._async_schedule_add_entities, discovery_info)
return hass.loop.run_in_executor(None, platform.setup_platform, h... | 9,092,128,761,817,666,000 | Get task to set up platform. | homeassistant/helpers/entity_platform.py | async_create_setup_task | crazyfish1111/home-assistant | python | @callback
def async_create_setup_task():
if getattr(platform, 'async_setup_platform', None):
return platform.async_setup_platform(hass, platform_config, self._async_schedule_add_entities, discovery_info)
return hass.loop.run_in_executor(None, platform.setup_platform, hass, platform_config, self._sc... |
@callback
def async_create_setup_task():
'Get task to set up platform.'
return platform.async_setup_entry(self.hass, config_entry, self._async_schedule_add_entities) | -284,641,014,274,873,100 | Get task to set up platform. | homeassistant/helpers/entity_platform.py | async_create_setup_task | crazyfish1111/home-assistant | python | @callback
def async_create_setup_task():
return platform.async_setup_entry(self.hass, config_entry, self._async_schedule_add_entities) |
async def setup_again(now):
'Run setup again.'
self._async_cancel_retry_setup = None
(await self._async_setup_platform(async_create_setup_task, tries)) | -514,513,532,165,713,860 | Run setup again. | homeassistant/helpers/entity_platform.py | setup_again | crazyfish1111/home-assistant | python | async def setup_again(now):
self._async_cancel_retry_setup = None
(await self._async_setup_platform(async_create_setup_task, tries)) |
@with_cupy_rmm
def fit(self, X):
'\n Fit a multi-node multi-GPU KMeans model\n\n Parameters\n ----------\n X : Dask cuDF DataFrame or CuPy backed Dask Array\n Training data to cluster.\n\n '
data = DistributedDataHandler.create(X, client=self.client)
self.datatype =... | 7,721,958,996,140,420,000 | Fit a multi-node multi-GPU KMeans model
Parameters
----------
X : Dask cuDF DataFrame or CuPy backed Dask Array
Training data to cluster. | python/cuml/dask/cluster/kmeans.py | fit | Chetank99/cuml | python | @with_cupy_rmm
def fit(self, X):
'\n Fit a multi-node multi-GPU KMeans model\n\n Parameters\n ----------\n X : Dask cuDF DataFrame or CuPy backed Dask Array\n Training data to cluster.\n\n '
data = DistributedDataHandler.create(X, client=self.client)
self.datatype =... |
def fit_predict(self, X, delayed=True):
'\n Compute cluster centers and predict cluster index for each sample.\n\n Parameters\n ----------\n X : Dask cuDF DataFrame or CuPy backed Dask Array\n Data to predict\n\n Returns\n -------\n result: Dask cuDF DataF... | 6,022,462,453,244,419,000 | Compute cluster centers and predict cluster index for each sample.
Parameters
----------
X : Dask cuDF DataFrame or CuPy backed Dask Array
Data to predict
Returns
-------
result: Dask cuDF DataFrame or CuPy backed Dask Array
Distributed object containing predictions | python/cuml/dask/cluster/kmeans.py | fit_predict | Chetank99/cuml | python | def fit_predict(self, X, delayed=True):
'\n Compute cluster centers and predict cluster index for each sample.\n\n Parameters\n ----------\n X : Dask cuDF DataFrame or CuPy backed Dask Array\n Data to predict\n\n Returns\n -------\n result: Dask cuDF DataF... |
def predict(self, X, delayed=True):
'\n Predict labels for the input\n\n Parameters\n ----------\n X : Dask cuDF DataFrame or CuPy backed Dask Array\n Data to predict\n\n delayed : bool (default = True)\n Whether to do a lazy prediction (and return Delayed ob... | -6,130,491,462,909,309,000 | Predict labels for the input
Parameters
----------
X : Dask cuDF DataFrame or CuPy backed Dask Array
Data to predict
delayed : bool (default = True)
Whether to do a lazy prediction (and return Delayed objects) or an
eagerly executed one.
Returns
-------
result: Dask cuDF DataFrame or CuPy backed Dask Arr... | python/cuml/dask/cluster/kmeans.py | predict | Chetank99/cuml | python | def predict(self, X, delayed=True):
'\n Predict labels for the input\n\n Parameters\n ----------\n X : Dask cuDF DataFrame or CuPy backed Dask Array\n Data to predict\n\n delayed : bool (default = True)\n Whether to do a lazy prediction (and return Delayed ob... |
def fit_transform(self, X, delayed=True):
'\n Calls fit followed by transform using a distributed KMeans model\n\n Parameters\n ----------\n X : Dask cuDF DataFrame or CuPy backed Dask Array\n Data to predict\n\n delayed : bool (default = True)\n Whether to e... | 2,970,504,870,052,390,000 | Calls fit followed by transform using a distributed KMeans model
Parameters
----------
X : Dask cuDF DataFrame or CuPy backed Dask Array
Data to predict
delayed : bool (default = True)
Whether to execute as a delayed task or eager.
Returns
-------
result: Dask cuDF DataFrame or CuPy backed Dask Array
Dis... | python/cuml/dask/cluster/kmeans.py | fit_transform | Chetank99/cuml | python | def fit_transform(self, X, delayed=True):
'\n Calls fit followed by transform using a distributed KMeans model\n\n Parameters\n ----------\n X : Dask cuDF DataFrame or CuPy backed Dask Array\n Data to predict\n\n delayed : bool (default = True)\n Whether to e... |
def transform(self, X, delayed=True):
'\n Transforms the input into the learned centroid space\n\n Parameters\n ----------\n X : Dask cuDF DataFrame or CuPy backed Dask Array\n Data to predict\n\n delayed : bool (default = True)\n Whether to execute as a dela... | -7,165,475,942,176,801,000 | Transforms the input into the learned centroid space
Parameters
----------
X : Dask cuDF DataFrame or CuPy backed Dask Array
Data to predict
delayed : bool (default = True)
Whether to execute as a delayed task or eager.
Returns
-------
result: Dask cuDF DataFrame or CuPy backed Dask Array
Distributed obj... | python/cuml/dask/cluster/kmeans.py | transform | Chetank99/cuml | python | def transform(self, X, delayed=True):
'\n Transforms the input into the learned centroid space\n\n Parameters\n ----------\n X : Dask cuDF DataFrame or CuPy backed Dask Array\n Data to predict\n\n delayed : bool (default = True)\n Whether to execute as a dela... |
@with_cupy_rmm
def score(self, X):
'\n Computes the inertia score for the trained KMeans centroids.\n\n Parameters\n ----------\n X : dask_cudf.Dataframe\n Dataframe to compute score\n\n Returns\n -------\n\n Inertial score\n '
scores = self._ru... | 5,906,948,693,175,010,000 | Computes the inertia score for the trained KMeans centroids.
Parameters
----------
X : dask_cudf.Dataframe
Dataframe to compute score
Returns
-------
Inertial score | python/cuml/dask/cluster/kmeans.py | score | Chetank99/cuml | python | @with_cupy_rmm
def score(self, X):
'\n Computes the inertia score for the trained KMeans centroids.\n\n Parameters\n ----------\n X : dask_cudf.Dataframe\n Dataframe to compute score\n\n Returns\n -------\n\n Inertial score\n '
scores = self._ru... |
def parse_rec(filename):
' Parse a PASCAL VOC xml file '
tree = ET.parse(filename)
objects = []
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['name'] = obj.find('name').text
obj_struct['pose'] = obj.find('pose').text
obj_struct['truncated'] = int(obj.find(... | -1,181,628,649,275,111,700 | Parse a PASCAL VOC xml file | eval.py | parse_rec | FLyingLSJ/ssd.pytorch | python | def parse_rec(filename):
' '
tree = ET.parse(filename)
objects = []
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['name'] = obj.find('name').text
obj_struct['pose'] = obj.find('pose').text
obj_struct['truncated'] = int(obj.find('truncated').text)
... |
def get_output_dir(name, phase):
'Return the directory where experimental artifacts are placed.\n If the directory does not exist, it is created.\n A canonical path is built using the name from an imdb and a network\n (if not None).\n '
filedir = os.path.join(name, phase)
if (not os.path.exists(... | -4,561,549,611,072,020,500 | Return the directory where experimental artifacts are placed.
If the directory does not exist, it is created.
A canonical path is built using the name from an imdb and a network
(if not None). | eval.py | get_output_dir | FLyingLSJ/ssd.pytorch | python | def get_output_dir(name, phase):
'Return the directory where experimental artifacts are placed.\n If the directory does not exist, it is created.\n A canonical path is built using the name from an imdb and a network\n (if not None).\n '
filedir = os.path.join(name, phase)
if (not os.path.exists(... |
def voc_ap(rec, prec, use_07_metric=True):
' ap = voc_ap(rec, prec, [use_07_metric])\n Compute VOC AP given precision and recall.\n If use_07_metric is true, uses the\n VOC 07 11 point method (default:True).\n '
if use_07_metric:
ap = 0.0
for t in np.arange(0.0, 1.1, 0.1):
... | -5,061,982,948,125,241,000 | ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:True). | eval.py | voc_ap | FLyingLSJ/ssd.pytorch | python | def voc_ap(rec, prec, use_07_metric=True):
' ap = voc_ap(rec, prec, [use_07_metric])\n Compute VOC AP given precision and recall.\n If use_07_metric is true, uses the\n VOC 07 11 point method (default:True).\n '
if use_07_metric:
ap = 0.0
for t in np.arange(0.0, 1.1, 0.1):
... |
def voc_eval(detpath, annopath, imagesetfile, classname, cachedir, ovthresh=0.5, use_07_metric=True):
"rec, prec, ap = voc_eval(detpath,\n annopath,\n imagesetfile,\n classname,\n [ovthresh],\n ... | 562,733,316,720,542,660 | rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
... | eval.py | voc_eval | FLyingLSJ/ssd.pytorch | python | def voc_eval(detpath, annopath, imagesetfile, classname, cachedir, ovthresh=0.5, use_07_metric=True):
"rec, prec, ap = voc_eval(detpath,\n annopath,\n imagesetfile,\n classname,\n [ovthresh],\n ... |
def __init__(self, minconn, maxconn, *args, **kwargs):
"Initialize the connection pool.\n\n New 'minconn' connections are created immediately calling 'connfunc'\n with given parameters. The connection pool will support a maximum of\n about 'maxconn' connections. \n "
self.minc... | 1,293,587,767,893,814,000 | Initialize the connection pool.
New 'minconn' connections are created immediately calling 'connfunc'
with given parameters. The connection pool will support a maximum of
about 'maxconn' connections. | lexis/Lib/site-packages/psycopg2/pool.py | __init__ | ALEXIS2ES/sherom-Serve | python | def __init__(self, minconn, maxconn, *args, **kwargs):
"Initialize the connection pool.\n\n New 'minconn' connections are created immediately calling 'connfunc'\n with given parameters. The connection pool will support a maximum of\n about 'maxconn' connections. \n "
self.minc... |
def _connect(self, key=None):
"Create a new connection and assign it to 'key' if not None."
conn = psycopg2.connect(*self._args, **self._kwargs)
if (key is not None):
self._used[key] = conn
self._rused[id(conn)] = key
else:
self._pool.append(conn)
return conn | 5,585,987,887,297,364,000 | Create a new connection and assign it to 'key' if not None. | lexis/Lib/site-packages/psycopg2/pool.py | _connect | ALEXIS2ES/sherom-Serve | python | def _connect(self, key=None):
conn = psycopg2.connect(*self._args, **self._kwargs)
if (key is not None):
self._used[key] = conn
self._rused[id(conn)] = key
else:
self._pool.append(conn)
return conn |
def _getkey(self):
'Return a new unique key.'
self._keys += 1
return self._keys | -2,913,718,119,693,489,700 | Return a new unique key. | lexis/Lib/site-packages/psycopg2/pool.py | _getkey | ALEXIS2ES/sherom-Serve | python | def _getkey(self):
self._keys += 1
return self._keys |
def _getconn(self, key=None):
"Get a free connection and assign it to 'key' if not None."
if self.closed:
raise PoolError('connection pool is closed')
if (key is None):
key = self._getkey()
if (key in self._used):
return self._used[key]
if self._pool:
self._used[key] ... | -1,052,344,869,246,796,800 | Get a free connection and assign it to 'key' if not None. | lexis/Lib/site-packages/psycopg2/pool.py | _getconn | ALEXIS2ES/sherom-Serve | python | def _getconn(self, key=None):
if self.closed:
raise PoolError('connection pool is closed')
if (key is None):
key = self._getkey()
if (key in self._used):
return self._used[key]
if self._pool:
self._used[key] = conn = self._pool.pop()
self._rused[id(conn)] = k... |
def _putconn(self, conn, key=None, close=False):
'Put away a connection.'
if self.closed:
raise PoolError('connection pool is closed')
if (key is None):
key = self._rused.get(id(conn))
if (not key):
raise PoolError('trying to put unkeyed connection')
if ((len(self._pool) < se... | 1,155,863,612,707,922,400 | Put away a connection. | lexis/Lib/site-packages/psycopg2/pool.py | _putconn | ALEXIS2ES/sherom-Serve | python | def _putconn(self, conn, key=None, close=False):
if self.closed:
raise PoolError('connection pool is closed')
if (key is None):
key = self._rused.get(id(conn))
if (not key):
raise PoolError('trying to put unkeyed connection')
if ((len(self._pool) < self.minconn) and (not clo... |
def _closeall(self):
'Close all connections.\n\n Note that this can lead to some code fail badly when trying to use\n an already closed connection. If you call .closeall() make sure\n your code can deal with it.\n '
if self.closed:
raise PoolError('connection pool is closed')... | 433,966,829,568,226,200 | Close all connections.
Note that this can lead to some code fail badly when trying to use
an already closed connection. If you call .closeall() make sure
your code can deal with it. | lexis/Lib/site-packages/psycopg2/pool.py | _closeall | ALEXIS2ES/sherom-Serve | python | def _closeall(self):
'Close all connections.\n\n Note that this can lead to some code fail badly when trying to use\n an already closed connection. If you call .closeall() make sure\n your code can deal with it.\n '
if self.closed:
raise PoolError('connection pool is closed')... |
def __init__(self, minconn, maxconn, *args, **kwargs):
'Initialize the threading lock.'
import threading
AbstractConnectionPool.__init__(self, minconn, maxconn, *args, **kwargs)
self._lock = threading.Lock() | 8,024,484,810,999,034,000 | Initialize the threading lock. | lexis/Lib/site-packages/psycopg2/pool.py | __init__ | ALEXIS2ES/sherom-Serve | python | def __init__(self, minconn, maxconn, *args, **kwargs):
import threading
AbstractConnectionPool.__init__(self, minconn, maxconn, *args, **kwargs)
self._lock = threading.Lock() |
def getconn(self, key=None):
"Get a free connection and assign it to 'key' if not None."
self._lock.acquire()
try:
return self._getconn(key)
finally:
self._lock.release() | 6,270,094,374,509,713,000 | Get a free connection and assign it to 'key' if not None. | lexis/Lib/site-packages/psycopg2/pool.py | getconn | ALEXIS2ES/sherom-Serve | python | def getconn(self, key=None):
self._lock.acquire()
try:
return self._getconn(key)
finally:
self._lock.release() |
def putconn(self, conn=None, key=None, close=False):
'Put away an unused connection.'
self._lock.acquire()
try:
self._putconn(conn, key, close)
finally:
self._lock.release() | -2,805,035,333,017,517,600 | Put away an unused connection. | lexis/Lib/site-packages/psycopg2/pool.py | putconn | ALEXIS2ES/sherom-Serve | python | def putconn(self, conn=None, key=None, close=False):
self._lock.acquire()
try:
self._putconn(conn, key, close)
finally:
self._lock.release() |
def closeall(self):
'Close all connections (even the one currently in use.)'
self._lock.acquire()
try:
self._closeall()
finally:
self._lock.release() | 8,940,636,885,304,963,000 | Close all connections (even the one currently in use.) | lexis/Lib/site-packages/psycopg2/pool.py | closeall | ALEXIS2ES/sherom-Serve | python | def closeall(self):
self._lock.acquire()
try:
self._closeall()
finally:
self._lock.release() |
def __init__(self, minconn, maxconn, *args, **kwargs):
'Initialize the threading lock.'
import warnings
warnings.warn('deprecated: use ZPsycopgDA.pool implementation', DeprecationWarning)
import threading
AbstractConnectionPool.__init__(self, minconn, maxconn, *args, **kwargs)
self._lock = threa... | -4,742,599,862,310,846,000 | Initialize the threading lock. | lexis/Lib/site-packages/psycopg2/pool.py | __init__ | ALEXIS2ES/sherom-Serve | python | def __init__(self, minconn, maxconn, *args, **kwargs):
import warnings
warnings.warn('deprecated: use ZPsycopgDA.pool implementation', DeprecationWarning)
import threading
AbstractConnectionPool.__init__(self, minconn, maxconn, *args, **kwargs)
self._lock = threading.Lock()
import _thread a... |
def getconn(self):
'Generate thread id and return a connection.'
key = self.__thread.get_ident()
self._lock.acquire()
try:
return self._getconn(key)
finally:
self._lock.release() | 7,005,839,141,883,069,000 | Generate thread id and return a connection. | lexis/Lib/site-packages/psycopg2/pool.py | getconn | ALEXIS2ES/sherom-Serve | python | def getconn(self):
key = self.__thread.get_ident()
self._lock.acquire()
try:
return self._getconn(key)
finally:
self._lock.release() |
def putconn(self, conn=None, close=False):
'Put away an unused connection.'
key = self.__thread.get_ident()
self._lock.acquire()
try:
if (not conn):
conn = self._used[key]
self._putconn(conn, key, close)
finally:
self._lock.release() | 2,892,461,049,250,483,700 | Put away an unused connection. | lexis/Lib/site-packages/psycopg2/pool.py | putconn | ALEXIS2ES/sherom-Serve | python | def putconn(self, conn=None, close=False):
key = self.__thread.get_ident()
self._lock.acquire()
try:
if (not conn):
conn = self._used[key]
self._putconn(conn, key, close)
finally:
self._lock.release() |
def closeall(self):
'Close all connections (even the one currently in use.)'
self._lock.acquire()
try:
self._closeall()
finally:
self._lock.release() | 8,940,636,885,304,963,000 | Close all connections (even the one currently in use.) | lexis/Lib/site-packages/psycopg2/pool.py | closeall | ALEXIS2ES/sherom-Serve | python | def closeall(self):
self._lock.acquire()
try:
self._closeall()
finally:
self._lock.release() |
def from_ppc(ppc, f_hz=50, validate_conversion=False, **kwargs):
'\n This function converts pypower case files to pandapower net structure.\n\n INPUT:\n\n **ppc** : The pypower case file.\n\n OPTIONAL:\n\n **f_hz** (float, 50) - The frequency of the network.\n\n **validate_conversion**... | -607,897,207,075,075,600 | This function converts pypower case files to pandapower net structure.
INPUT:
**ppc** : The pypower case file.
OPTIONAL:
**f_hz** (float, 50) - The frequency of the network.
**validate_conversion** (bool, False) - If True, validate_from_ppc is run after conversion.
For running the validation, t... | pandapower/converter/pypower/from_ppc.py | from_ppc | BaraaUniKassel/pandapower | python | def from_ppc(ppc, f_hz=50, validate_conversion=False, **kwargs):
'\n This function converts pypower case files to pandapower net structure.\n\n INPUT:\n\n **ppc** : The pypower case file.\n\n OPTIONAL:\n\n **f_hz** (float, 50) - The frequency of the network.\n\n **validate_conversion**... |
def validate_from_ppc(ppc_net, net, pf_type='runpp', max_diff_values={'bus_vm_pu': 1e-06, 'bus_va_degree': 1e-05, 'branch_p_mw': 1e-06, 'branch_q_mvar': 1e-06, 'gen_p_mw': 1e-06, 'gen_q_mvar': 1e-06}, run=True):
'\n This function validates the pypower case files to pandapower net structure conversion via a c... | -2,964,167,797,680,866,300 | This function validates the pypower case files to pandapower net structure conversion via a comparison of loadflow calculation results. (Hence the opf cost conversion is not validated.)
INPUT:
**ppc_net** - The pypower case file, which must already contain the pypower powerflow
results or pypower must... | pandapower/converter/pypower/from_ppc.py | validate_from_ppc | BaraaUniKassel/pandapower | python | def validate_from_ppc(ppc_net, net, pf_type='runpp', max_diff_values={'bus_vm_pu': 1e-06, 'bus_va_degree': 1e-05, 'branch_p_mw': 1e-06, 'branch_q_mvar': 1e-06, 'gen_p_mw': 1e-06, 'gen_q_mvar': 1e-06}, run=True):
'\n This function validates the pypower case files to pandapower net structure conversion via a c... |
def _get_zh_a_page_count() -> int:
'\n 所有股票的总页数\n http://vip.stock.finance.sina.com.cn/mkt/#hs_a\n :return: 需要抓取的股票总页数\n :rtype: int\n '
res = requests.get(zh_sina_a_stock_count_url)
page_count = (int(re.findall(re.compile('\\d+'), res.text)[0]) / 80)
if isinstance(page_count, int):
... | 5,514,657,700,420,927,000 | 所有股票的总页数
http://vip.stock.finance.sina.com.cn/mkt/#hs_a
:return: 需要抓取的股票总页数
:rtype: int | akshare/stock/zh_stock_a_sina.py | _get_zh_a_page_count | fellowfun/akshare | python | def _get_zh_a_page_count() -> int:
'\n 所有股票的总页数\n http://vip.stock.finance.sina.com.cn/mkt/#hs_a\n :return: 需要抓取的股票总页数\n :rtype: int\n '
res = requests.get(zh_sina_a_stock_count_url)
page_count = (int(re.findall(re.compile('\\d+'), res.text)[0]) / 80)
if isinstance(page_count, int):
... |
def stock_zh_a_spot() -> pd.DataFrame:
'\n 从新浪财经-A股获取所有A股的实时行情数据, 重复运行本函数会被新浪暂时封 IP\n http://vip.stock.finance.sina.com.cn/mkt/#qbgg_hk\n :return: pandas.DataFrame\n symbol code name trade pricechange changepercent buy 0 sh600000 600000 浦发银行 12.920 -0.030 -0... | -3,537,146,474,981,795,300 | 从新浪财经-A股获取所有A股的实时行情数据, 重复运行本函数会被新浪暂时封 IP
http://vip.stock.finance.sina.com.cn/mkt/#qbgg_hk
:return: pandas.DataFrame
symbol code name trade pricechange changepercent buy 0 sh600000 600000 浦发银行 12.920 -0.030 -0.232 12.920
1 sh600004 600004 白云机场 18.110 -0.370 ... | akshare/stock/zh_stock_a_sina.py | stock_zh_a_spot | fellowfun/akshare | python | def stock_zh_a_spot() -> pd.DataFrame:
'\n 从新浪财经-A股获取所有A股的实时行情数据, 重复运行本函数会被新浪暂时封 IP\n http://vip.stock.finance.sina.com.cn/mkt/#qbgg_hk\n :return: pandas.DataFrame\n symbol code name trade pricechange changepercent buy 0 sh600000 600000 浦发银行 12.920 -0.030 -0... |
def stock_zh_a_daily(symbol: str='sz000613', adjust: str='qfq') -> pd.DataFrame:
'\n 新浪财经-A股-个股的历史行情数据, 大量抓取容易封IP\n :param symbol: sh600000\n :type symbol: str\n :param adjust: 默认为空: 返回不复权的数据; qfq: 返回前复权后的数据; hfq: 返回后复权后的数据; hfq-factor: 返回后复权因子; hfq-factor: 返回前复权因子\n :type adjust: str\n :return: s... | 1,219,581,013,612,928,500 | 新浪财经-A股-个股的历史行情数据, 大量抓取容易封IP
:param symbol: sh600000
:type symbol: str
:param adjust: 默认为空: 返回不复权的数据; qfq: 返回前复权后的数据; hfq: 返回后复权后的数据; hfq-factor: 返回后复权因子; hfq-factor: 返回前复权因子
:type adjust: str
:return: specific data
:rtype: pandas.DataFrame | akshare/stock/zh_stock_a_sina.py | stock_zh_a_daily | fellowfun/akshare | python | def stock_zh_a_daily(symbol: str='sz000613', adjust: str='qfq') -> pd.DataFrame:
'\n 新浪财经-A股-个股的历史行情数据, 大量抓取容易封IP\n :param symbol: sh600000\n :type symbol: str\n :param adjust: 默认为空: 返回不复权的数据; qfq: 返回前复权后的数据; hfq: 返回后复权后的数据; hfq-factor: 返回后复权因子; hfq-factor: 返回前复权因子\n :type adjust: str\n :return: s... |
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None) -> None:
'Old way of setting up HomematicIP Cloud lights.'
pass | -2,221,725,257,671,890,000 | Old way of setting up HomematicIP Cloud lights. | homeassistant/components/homematicip_cloud/light.py | async_setup_platform | 0x00-0xFF/home-assistant | python | async def async_setup_platform(hass, config, async_add_entities, discovery_info=None) -> None:
pass |
async def async_setup_entry(hass: HomeAssistantType, config_entry: ConfigEntry, async_add_entities) -> None:
'Set up the HomematicIP Cloud lights from a config entry.'
hap = hass.data[HMIPC_DOMAIN][config_entry.data[HMIPC_HAPID]]
entities = []
for device in hap.home.devices:
if isinstance(device... | 481,496,749,042,861,000 | Set up the HomematicIP Cloud lights from a config entry. | homeassistant/components/homematicip_cloud/light.py | async_setup_entry | 0x00-0xFF/home-assistant | python | async def async_setup_entry(hass: HomeAssistantType, config_entry: ConfigEntry, async_add_entities) -> None:
hap = hass.data[HMIPC_DOMAIN][config_entry.data[HMIPC_HAPID]]
entities = []
for device in hap.home.devices:
if isinstance(device, AsyncBrandSwitchMeasuring):
entities.append(... |
def _convert_color(color: tuple) -> RGBColorState:
'\n Convert the given color to the reduced RGBColorState color.\n\n RGBColorStat contains only 8 colors including white and black,\n so a conversion is required.\n '
if (color is None):
return RGBColorState.WHITE
hue = int(color[0])
... | 3,999,648,746,070,601,000 | Convert the given color to the reduced RGBColorState color.
RGBColorStat contains only 8 colors including white and black,
so a conversion is required. | homeassistant/components/homematicip_cloud/light.py | _convert_color | 0x00-0xFF/home-assistant | python | def _convert_color(color: tuple) -> RGBColorState:
'\n Convert the given color to the reduced RGBColorState color.\n\n RGBColorStat contains only 8 colors including white and black,\n so a conversion is required.\n '
if (color is None):
return RGBColorState.WHITE
hue = int(color[0])
... |
def __init__(self, hap: HomematicipHAP, device) -> None:
'Initialize the light device.'
super().__init__(hap, device) | 4,148,022,420,929,488,400 | Initialize the light device. | homeassistant/components/homematicip_cloud/light.py | __init__ | 0x00-0xFF/home-assistant | python | def __init__(self, hap: HomematicipHAP, device) -> None:
super().__init__(hap, device) |
@property
def is_on(self) -> bool:
'Return true if device is on.'
return self._device.on | -2,283,132,927,271,933,000 | Return true if device is on. | homeassistant/components/homematicip_cloud/light.py | is_on | 0x00-0xFF/home-assistant | python | @property
def is_on(self) -> bool:
return self._device.on |
async def async_turn_on(self, **kwargs) -> None:
'Turn the device on.'
(await self._device.turn_on()) | 2,166,206,960,677,107,000 | Turn the device on. | homeassistant/components/homematicip_cloud/light.py | async_turn_on | 0x00-0xFF/home-assistant | python | async def async_turn_on(self, **kwargs) -> None:
(await self._device.turn_on()) |
async def async_turn_off(self, **kwargs) -> None:
'Turn the device off.'
(await self._device.turn_off()) | 155,385,039,799,394,780 | Turn the device off. | homeassistant/components/homematicip_cloud/light.py | async_turn_off | 0x00-0xFF/home-assistant | python | async def async_turn_off(self, **kwargs) -> None:
(await self._device.turn_off()) |
@property
def device_state_attributes(self) -> Dict[(str, Any)]:
'Return the state attributes of the generic device.'
state_attr = super().device_state_attributes
current_power_w = self._device.currentPowerConsumption
if (current_power_w > 0.05):
state_attr[ATTR_CURRENT_POWER_W] = round(current_... | -3,098,059,166,993,918,000 | Return the state attributes of the generic device. | homeassistant/components/homematicip_cloud/light.py | device_state_attributes | 0x00-0xFF/home-assistant | python | @property
def device_state_attributes(self) -> Dict[(str, Any)]:
state_attr = super().device_state_attributes
current_power_w = self._device.currentPowerConsumption
if (current_power_w > 0.05):
state_attr[ATTR_CURRENT_POWER_W] = round(current_power_w, 2)
state_attr[ATTR_TODAY_ENERGY_KWH] = ... |
def __init__(self, hap: HomematicipHAP, device) -> None:
'Initialize the dimmer light device.'
super().__init__(hap, device) | 4,226,430,284,465,216,000 | Initialize the dimmer light device. | homeassistant/components/homematicip_cloud/light.py | __init__ | 0x00-0xFF/home-assistant | python | def __init__(self, hap: HomematicipHAP, device) -> None:
super().__init__(hap, device) |
@property
def is_on(self) -> bool:
'Return true if device is on.'
return ((self._device.dimLevel is not None) and (self._device.dimLevel > 0.0)) | -6,862,420,167,665,377,000 | Return true if device is on. | homeassistant/components/homematicip_cloud/light.py | is_on | 0x00-0xFF/home-assistant | python | @property
def is_on(self) -> bool:
return ((self._device.dimLevel is not None) and (self._device.dimLevel > 0.0)) |
@property
def brightness(self) -> int:
'Return the brightness of this light between 0..255.'
return int(((self._device.dimLevel or 0.0) * 255)) | 4,879,828,942,923,381,000 | Return the brightness of this light between 0..255. | homeassistant/components/homematicip_cloud/light.py | brightness | 0x00-0xFF/home-assistant | python | @property
def brightness(self) -> int:
return int(((self._device.dimLevel or 0.0) * 255)) |
@property
def supported_features(self) -> int:
'Flag supported features.'
return SUPPORT_BRIGHTNESS | -7,275,260,559,451,487,000 | Flag supported features. | homeassistant/components/homematicip_cloud/light.py | supported_features | 0x00-0xFF/home-assistant | python | @property
def supported_features(self) -> int:
return SUPPORT_BRIGHTNESS |
async def async_turn_on(self, **kwargs) -> None:
'Turn the light on.'
if (ATTR_BRIGHTNESS in kwargs):
(await self._device.set_dim_level((kwargs[ATTR_BRIGHTNESS] / 255.0)))
else:
(await self._device.set_dim_level(1)) | 5,651,431,970,317,736,000 | Turn the light on. | homeassistant/components/homematicip_cloud/light.py | async_turn_on | 0x00-0xFF/home-assistant | python | async def async_turn_on(self, **kwargs) -> None:
if (ATTR_BRIGHTNESS in kwargs):
(await self._device.set_dim_level((kwargs[ATTR_BRIGHTNESS] / 255.0)))
else:
(await self._device.set_dim_level(1)) |
async def async_turn_off(self, **kwargs) -> None:
'Turn the light off.'
(await self._device.set_dim_level(0)) | 904,547,101,540,762,200 | Turn the light off. | homeassistant/components/homematicip_cloud/light.py | async_turn_off | 0x00-0xFF/home-assistant | python | async def async_turn_off(self, **kwargs) -> None:
(await self._device.set_dim_level(0)) |
def __init__(self, hap: HomematicipHAP, device, channel: int) -> None:
'Initialize the dimmer light device.'
self.channel = channel
if (self.channel == 2):
super().__init__(hap, device, 'Top')
else:
super().__init__(hap, device, 'Bottom')
self._color_switcher = {RGBColorState.WHITE: ... | -936,554,559,333,744,100 | Initialize the dimmer light device. | homeassistant/components/homematicip_cloud/light.py | __init__ | 0x00-0xFF/home-assistant | python | def __init__(self, hap: HomematicipHAP, device, channel: int) -> None:
self.channel = channel
if (self.channel == 2):
super().__init__(hap, device, 'Top')
else:
super().__init__(hap, device, 'Bottom')
self._color_switcher = {RGBColorState.WHITE: [0.0, 0.0], RGBColorState.RED: [0.0, ... |
@property
def is_on(self) -> bool:
'Return true if device is on.'
return ((self._func_channel.dimLevel is not None) and (self._func_channel.dimLevel > 0.0)) | -6,904,967,177,971,977,000 | Return true if device is on. | homeassistant/components/homematicip_cloud/light.py | is_on | 0x00-0xFF/home-assistant | python | @property
def is_on(self) -> bool:
return ((self._func_channel.dimLevel is not None) and (self._func_channel.dimLevel > 0.0)) |
@property
def brightness(self) -> int:
'Return the brightness of this light between 0..255.'
return int(((self._func_channel.dimLevel or 0.0) * 255)) | -5,342,752,628,957,432,000 | Return the brightness of this light between 0..255. | homeassistant/components/homematicip_cloud/light.py | brightness | 0x00-0xFF/home-assistant | python | @property
def brightness(self) -> int:
return int(((self._func_channel.dimLevel or 0.0) * 255)) |
@property
def hs_color(self) -> tuple:
'Return the hue and saturation color value [float, float].'
simple_rgb_color = self._func_channel.simpleRGBColorState
return self._color_switcher.get(simple_rgb_color, [0.0, 0.0]) | 6,329,802,148,743,832,000 | Return the hue and saturation color value [float, float]. | homeassistant/components/homematicip_cloud/light.py | hs_color | 0x00-0xFF/home-assistant | python | @property
def hs_color(self) -> tuple:
simple_rgb_color = self._func_channel.simpleRGBColorState
return self._color_switcher.get(simple_rgb_color, [0.0, 0.0]) |
@property
def device_state_attributes(self) -> Dict[(str, Any)]:
'Return the state attributes of the generic device.'
state_attr = super().device_state_attributes
if self.is_on:
state_attr[ATTR_COLOR_NAME] = self._func_channel.simpleRGBColorState
return state_attr | -7,103,013,381,797,680,000 | Return the state attributes of the generic device. | homeassistant/components/homematicip_cloud/light.py | device_state_attributes | 0x00-0xFF/home-assistant | python | @property
def device_state_attributes(self) -> Dict[(str, Any)]:
state_attr = super().device_state_attributes
if self.is_on:
state_attr[ATTR_COLOR_NAME] = self._func_channel.simpleRGBColorState
return state_attr |
@property
def name(self) -> str:
'Return the name of the generic device.'
return f'{super().name} Notification' | 9,124,239,975,491,450,000 | Return the name of the generic device. | homeassistant/components/homematicip_cloud/light.py | name | 0x00-0xFF/home-assistant | python | @property
def name(self) -> str:
return f'{super().name} Notification' |
@property
def supported_features(self) -> int:
'Flag supported features.'
return (SUPPORT_BRIGHTNESS | SUPPORT_COLOR) | 8,128,663,612,521,723,000 | Flag supported features. | homeassistant/components/homematicip_cloud/light.py | supported_features | 0x00-0xFF/home-assistant | python | @property
def supported_features(self) -> int:
return (SUPPORT_BRIGHTNESS | SUPPORT_COLOR) |
@property
def unique_id(self) -> str:
'Return a unique ID.'
return f'{self.__class__.__name__}_{self.post}_{self._device.id}' | -2,511,959,092,211,002,000 | Return a unique ID. | homeassistant/components/homematicip_cloud/light.py | unique_id | 0x00-0xFF/home-assistant | python | @property
def unique_id(self) -> str:
return f'{self.__class__.__name__}_{self.post}_{self._device.id}' |
async def async_turn_on(self, **kwargs) -> None:
'Turn the light on.'
hs_color = kwargs.get(ATTR_HS_COLOR, self.hs_color)
simple_rgb_color = _convert_color(hs_color)
brightness = kwargs.get(ATTR_BRIGHTNESS, self.brightness)
if (not kwargs):
brightness = 255
brightness = max(10, brightnes... | -8,156,840,869,278,348,000 | Turn the light on. | homeassistant/components/homematicip_cloud/light.py | async_turn_on | 0x00-0xFF/home-assistant | python | async def async_turn_on(self, **kwargs) -> None:
hs_color = kwargs.get(ATTR_HS_COLOR, self.hs_color)
simple_rgb_color = _convert_color(hs_color)
brightness = kwargs.get(ATTR_BRIGHTNESS, self.brightness)
if (not kwargs):
brightness = 255
brightness = max(10, brightness)
dim_level = (... |
async def async_turn_off(self, **kwargs) -> None:
'Turn the light off.'
simple_rgb_color = self._func_channel.simpleRGBColorState
transition = kwargs.get(ATTR_TRANSITION, 0.5)
(await self._device.set_rgb_dim_level_with_time(channelIndex=self.channel, rgb=simple_rgb_color, dimLevel=0.0, onTime=0, rampTim... | -6,279,083,896,082,220,000 | Turn the light off. | homeassistant/components/homematicip_cloud/light.py | async_turn_off | 0x00-0xFF/home-assistant | python | async def async_turn_off(self, **kwargs) -> None:
simple_rgb_color = self._func_channel.simpleRGBColorState
transition = kwargs.get(ATTR_TRANSITION, 0.5)
(await self._device.set_rgb_dim_level_with_time(channelIndex=self.channel, rgb=simple_rgb_color, dimLevel=0.0, onTime=0, rampTime=transition)) |
@property
def exists(self):
'\n checks if the db exist and logs it\n\n Returns\n -------\n bool\n bool if the file exist or not\n '
if os.path.isfile(self.db_loc):
log.info('database at %s, does EXIST', self.db_loc)
return Tru... | 1,824,685,546,315,325,000 | checks if the db exist and logs it
Returns
-------
bool
bool if the file exist or not | antipetros_discordbot/utility/gidsql/db_action_base.py | exists | official-antistasi-community/Antipetros_Discord_Bot | python | @property
def exists(self):
'\n checks if the db exist and logs it\n\n Returns\n -------\n bool\n bool if the file exist or not\n '
if os.path.isfile(self.db_loc):
log.info('database at %s, does EXIST', self.db_loc)
return Tru... |
@property
def exists(self):
'\n checks if the db exist and logs it\n\n Returns\n -------\n bool\n bool if the file exist or not\n '
if os.path.isfile(self.db_loc):
log.info('database at %s, does EXIST', self.db_loc)
return Tru... | 1,824,685,546,315,325,000 | checks if the db exist and logs it
Returns
-------
bool
bool if the file exist or not | antipetros_discordbot/utility/gidsql/db_action_base.py | exists | official-antistasi-community/Antipetros_Discord_Bot | python | @property
def exists(self):
'\n checks if the db exist and logs it\n\n Returns\n -------\n bool\n bool if the file exist or not\n '
if os.path.isfile(self.db_loc):
log.info('database at %s, does EXIST', self.db_loc)
return Tru... |
def discounted_reverse_cumsum(data, gamma: float):
'\n Use a linear filter to compute the reverse discounted cumulative sum.\n\n .. note::\n `scipy.signal.lfilter` assumes an initialization with 0 by default.\n\n :param data: input data with samples along the 0 axis (e.g. time series)\n :param ga... | -5,288,915,096,824,507,000 | Use a linear filter to compute the reverse discounted cumulative sum.
.. note::
`scipy.signal.lfilter` assumes an initialization with 0 by default.
:param data: input data with samples along the 0 axis (e.g. time series)
:param gamma: discount factor
:return: cumulative sums for every step | mushroom_rl/core/parallelization_tools/step_sequence.py | discounted_reverse_cumsum | nifunk/GNNMushroomRL | python | def discounted_reverse_cumsum(data, gamma: float):
'\n Use a linear filter to compute the reverse discounted cumulative sum.\n\n .. note::\n `scipy.signal.lfilter` assumes an initialization with 0 by default.\n\n :param data: input data with samples along the 0 axis (e.g. time series)\n :param ga... |
def discounted_value(rollout: StepSequence, gamma: float):
'\n Compute the discounted state values for one rollout.\n\n :param rollout: input data\n :param gamma: temporal discount factor\n :return: state values for every time step in the rollout\n '
rewards = [step.reward for step in rollout]
... | 3,926,704,981,727,231,500 | Compute the discounted state values for one rollout.
:param rollout: input data
:param gamma: temporal discount factor
:return: state values for every time step in the rollout | mushroom_rl/core/parallelization_tools/step_sequence.py | discounted_value | nifunk/GNNMushroomRL | python | def discounted_value(rollout: StepSequence, gamma: float):
'\n Compute the discounted state values for one rollout.\n\n :param rollout: input data\n :param gamma: temporal discount factor\n :return: state values for every time step in the rollout\n '
rewards = [step.reward for step in rollout]
... |
def discounted_values(rollouts: Sequence[StepSequence], gamma: float, data_format: Optional[str]='torch'):
'\n Compute the discounted state values for multiple rollouts.\n\n :param rollouts: input data\n :param gamma: temporal discount factor\n :param data_format: data format of the given\n :return: ... | 645,887,553,901,988,900 | Compute the discounted state values for multiple rollouts.
:param rollouts: input data
:param gamma: temporal discount factor
:param data_format: data format of the given
:return: state values for every time step in the rollouts (concatenated sequence across rollouts) | mushroom_rl/core/parallelization_tools/step_sequence.py | discounted_values | nifunk/GNNMushroomRL | python | def discounted_values(rollouts: Sequence[StepSequence], gamma: float, data_format: Optional[str]='torch'):
'\n Compute the discounted state values for multiple rollouts.\n\n :param rollouts: input data\n :param gamma: temporal discount factor\n :param data_format: data format of the given\n :return: ... |
def gae_returns(rollout: StepSequence, gamma: float=0.99, lamb: float=0.95):
"\n Compute returns using generalized advantage estimation.\n\n .. seealso::\n [1] J. Schulmann, P. Moritz, S. Levine, M. Jordan, P. Abbeel, 'High-Dimensional Continuous Control Using\n Generalized Advantage Estimation'... | 4,842,705,186,051,923,000 | Compute returns using generalized advantage estimation.
.. seealso::
[1] J. Schulmann, P. Moritz, S. Levine, M. Jordan, P. Abbeel, 'High-Dimensional Continuous Control Using
Generalized Advantage Estimation', ICLR 2016
:param rollout: sequence of steps
:param gamma: temporal discount factor
:param lamb: disco... | mushroom_rl/core/parallelization_tools/step_sequence.py | gae_returns | nifunk/GNNMushroomRL | python | def gae_returns(rollout: StepSequence, gamma: float=0.99, lamb: float=0.95):
"\n Compute returns using generalized advantage estimation.\n\n .. seealso::\n [1] J. Schulmann, P. Moritz, S. Levine, M. Jordan, P. Abbeel, 'High-Dimensional Continuous Control Using\n Generalized Advantage Estimation'... |
def __init__(self, rollout, index):
'\n Constructor\n\n :param rollout: `StepSequence` object to which this step belongs\n :param index: index of this step in the rollout\n '
super(Step, self).__init__(rollout.__dict__, index)
self._rollout = rollout | -7,175,570,219,185,015,000 | Constructor
:param rollout: `StepSequence` object to which this step belongs
:param index: index of this step in the rollout | mushroom_rl/core/parallelization_tools/step_sequence.py | __init__ | nifunk/GNNMushroomRL | python | def __init__(self, rollout, index):
'\n Constructor\n\n :param rollout: `StepSequence` object to which this step belongs\n :param index: index of this step in the rollout\n '
super(Step, self).__init__(rollout.__dict__, index)
self._rollout = rollout |
def __init__(self, *, complete: Optional[bool]=True, rollout_info=None, data_format: Optional[str]=None, done: Optional[np.ndarray]=None, continuous: Optional[bool]=True, rollout_bounds=None, rewards: Sequence, observations: Sequence, actions: Sequence, **data):
"\n Constructor\n\n :param complete: `F... | -5,813,278,499,522,838,000 | Constructor
:param complete: `False` if the rollout is incomplete, i.e. as part of a mini-batch
:param rollout_info: data staying constant through the whole episode
:param data_format: 'torch' to use Tensors, 'numpy' to use ndarrays.
Will use Tensors if any data argument does, else ndarrays
:param... | mushroom_rl/core/parallelization_tools/step_sequence.py | __init__ | nifunk/GNNMushroomRL | python | def __init__(self, *, complete: Optional[bool]=True, rollout_info=None, data_format: Optional[str]=None, done: Optional[np.ndarray]=None, continuous: Optional[bool]=True, rollout_bounds=None, rewards: Sequence, observations: Sequence, actions: Sequence, **data):
"\n Constructor\n\n :param complete: `F... |
@property
def data_format(self) -> str:
" Get the name of data format ('torch' or 'numpy'). "
return self._data_format | -3,737,586,975,972,980,700 | Get the name of data format ('torch' or 'numpy'). | mushroom_rl/core/parallelization_tools/step_sequence.py | data_format | nifunk/GNNMushroomRL | python | @property
def data_format(self) -> str:
" "
return self._data_format |
@property
def data_names(self) -> Sequence[str]:
' Get the list of data attribute names. '
return self._data_names | 7,636,364,652,369,576,000 | Get the list of data attribute names. | mushroom_rl/core/parallelization_tools/step_sequence.py | data_names | nifunk/GNNMushroomRL | python | @property
def data_names(self) -> Sequence[str]:
' '
return self._data_names |
@property
def rollout_count(self):
' Count the number of sub-rollouts inside this step sequence. '
if (not self.continuous):
raise pyrado.ValueErr(msg='Sub-rollouts are only supported on continuous data.')
return (len(self._rollout_bounds) - 1) | -8,265,467,451,147,833,000 | Count the number of sub-rollouts inside this step sequence. | mushroom_rl/core/parallelization_tools/step_sequence.py | rollout_count | nifunk/GNNMushroomRL | python | @property
def rollout_count(self):
' '
if (not self.continuous):
raise pyrado.ValueErr(msg='Sub-rollouts are only supported on continuous data.')
return (len(self._rollout_bounds) - 1) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.