_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q18700
_setup_network
train
def _setup_network(): """Setup platform specific network settings""" global wlan global secret if sys.platform in PYCOM: # Update secret as tuple with wlan mode for PyCom port. wlan = network.WLAN(network.WLAN.STA) secret = (network.WLAN.WPA2, settings.WIFI_PASSWORD) else: # default micropython wlan settings wlan = network.WLAN(network.STA_IF) secret = settings.WIFI_PASSWORD
python
{ "resource": "" }
q18701
_wifi_connect
train
def _wifi_connect(): """Connects to WIFI""" if not wlan.isconnected(): wlan.active(True) print("NETWORK: connecting to network %s..." % settings.WIFI_SSID) wlan.connect(settings.WIFI_SSID, secret) while not wlan.isconnected(): print("NETWORK: waiting for connection...") utime.sleep(1) print("NETWORK: Connected, network config: %s" % repr(wlan.ifconfig()))
python
{ "resource": "" }
q18702
disable_ap
train
def disable_ap(): """Disables any Accesspoint""" wlan = network.WLAN(network.AP_IF) wlan.active(False) print("NETWORK: Access Point disabled.")
python
{ "resource": "" }
q18703
HomieNode.get_property_id_from_set_topic
train
def get_property_id_from_set_topic(self, topic): """Return the property id from topic as integer""" topic = topic.decode() return int(topic.split("/")[-3].split("_")[-1])
python
{ "resource": "" }
q18704
HomieDevice.add_node
train
def add_node(self, node): """add a node class of HomieNode to this device""" self.nodes.append(node) # add node_ids try: if node.node_id != b"$stats": self.node_ids.append(node.node_id) except NotImplementedError: raise except Exception: print("ERROR: getting Node")
python
{ "resource": "" }
q18705
HomieDevice.subscribe_topics
train
def subscribe_topics(self): """subscribe to all registered device and node topics""" base = self.topic subscribe = self.mqtt.subscribe # device topics subscribe(b"/".join((base, b"$stats/interval/set"))) subscribe(b"/".join((self.settings.MQTT_BASE_TOPIC, b"$broadcast/#"))) # node topics nodes = self.nodes for node in nodes: for topic in node.subscribe: topic = b"/".join((base, topic)) # print('MQTT SUBSCRIBE: {}'.format(topic)) subscribe(topic) self.topic_callbacks[topic] = node.callback
python
{ "resource": "" }
q18706
HomieDevice.publish_properties
train
def publish_properties(self): """publish device and node properties""" publish = self.publish # device properties publish(b"$homie", b"3.0.1") publish(b"$name", self.settings.DEVICE_NAME) publish(b"$state", b"init") publish(b"$fw/name", b"Microhomie") publish(b"$fw/version", __version__) publish(b"$implementation", bytes(sys.platform, "utf-8")) publish(b"$localip", utils.get_local_ip()) publish(b"$mac", utils.get_local_mac()) publish(b"$stats", b"interval,uptime,freeheap") publish(b"$stats/interval", self.stats_interval) publish(b"$nodes", b",".join(self.node_ids)) # node properties for node in self.nodes: try: for propertie in node.get_properties(): if propertie: publish(*propertie) except NotImplementedError: raise except Exception as error: self.node_error(node, error)
python
{ "resource": "" }
q18707
HomieDevice.publish_data
train
def publish_data(self): """publish node data if node has updates""" self.publish_device_stats() publish = self.publish # node data for node in self.nodes: try: if node.has_update(): for data in node.get_data(): publish(*data) except NotImplementedError: raise except Exception as error: self.node_error(node, error)
python
{ "resource": "" }
q18708
HomieDevice.start
train
def start(self): """publish device and node properties, run forever""" self.publish_properties() self.subscribe_topics() gc.collect() self.set_state("ready") while True: try: if not utils.wlan.isconnected(): utils.wifi_connect() # publish device data self.publish_data() # check for new mqtt messages self.mqtt.check_msg() idle() sleep(1) except KeyboardInterrupt: self.set_state("disconnected") self.mqtt.disconnect()
python
{ "resource": "" }
q18709
array
train
def array(shape, dtype=_np.float64, autolock=False): """Factory method for shared memory arrays supporting all numpy dtypes.""" assert _NP_AVAILABLE, "To use the shared array object, numpy must be available!" if not isinstance(dtype, _np.dtype): dtype = _np.dtype(dtype) # Not bothering to translate the numpy dtypes to ctype types directly, # because they're only partially supported. Instead, create a byte ctypes # array of the right size and use a view of the appropriate datatype. shared_arr = _multiprocessing.Array( "b", int(_np.prod(shape) * dtype.alignment), lock=autolock ) with _warnings.catch_warnings(): # For more information on why this is necessary, see # https://www.reddit.com/r/Python/comments/j3qjb/parformatlabpool_replacement _warnings.simplefilter("ignore", RuntimeWarning) data = _np.ctypeslib.as_array(shared_arr).view(dtype).reshape(shape) return data
python
{ "resource": "" }
q18710
Parallel.print
train
def print(cls, *args, **kwargs): """Print synchronized.""" # pylint: disable=protected-access with _shared._PRINT_LOCK: print(*args, **kwargs) _sys.stdout.flush()
python
{ "resource": "" }
q18711
Parallel.range
train
def range(self, start, stop=None, step=1): """ Get the correctly distributed parallel chunks. This corresponds to using the OpenMP 'static' schedule. """ self._assert_active() if stop is None: start, stop = 0, start full_list = range(start, stop, step) per_worker = len(full_list) // self._num_threads rem = len(full_list) % self._num_threads schedule = [ per_worker + 1 if thread_idx < rem else per_worker for thread_idx in range(self._num_threads) ] # pylint: disable=undefined-variable start_idx = _functools.reduce( lambda x, y: x + y, schedule[: self.thread_num], 0 ) end_idx = start_idx + schedule[self._thread_num] return full_list[start_idx:end_idx]
python
{ "resource": "" }
q18712
Parallel.xrange
train
def xrange(self, start, stop=None, step=1): """ Get an iterator for this threads chunk of work. This corresponds to using the OpenMP 'dynamic' schedule. """ self._assert_active() if stop is None: start, stop = 0, start with self._queuelock: pool_loop_reached = max(self._thread_loop_ids) # Get this loop id. self._thread_loop_ids[self._thread_num] += 1 loop_id = self._thread_loop_ids[self._thread_num] if pool_loop_reached < loop_id: # No thread reached this loop yet. Set up the queue. for idx in range(start, stop, step): self._dynamic_queue.put(idx) # Iterate. return _QueueIterator(self._dynamic_queue, loop_id, self)
python
{ "resource": "" }
q18713
Parallel.iterate
train
def iterate(self, iterable, element_timeout=None): """ Iterate over an iterable. The iterator is executed in the host thread. The threads dynamically grab the elements. The iterator elements must hence be picklable to be transferred through the queue. If there is only one thread, no special operations are performed. Otherwise, effectively n-1 threads are used to process the iterable elements, and the host thread is used to provide them. You can specify a timeout for the clients to adhere. """ self._assert_active() with self._queuelock: # Get this loop id. self._thread_loop_ids[self._thread_num] += 1 loop_id = self._thread_loop_ids[self._thread_num] # Iterate. return _IterableQueueIterator( self._iter_queue, loop_id, self, iterable, element_timeout )
python
{ "resource": "" }
q18714
configure
train
def configure(): """ Configure information about Databricks account and default behavior. Configuration is stored in a `.apparatecfg` file. A config file must exist before this package can be used, and can be supplied either directly as a text file or generated using this configuration tool. """ config = _load_config(CFG_FILE) _update_value( config, 'host', 'Databricks host (e.g. https://my-organization.cloud.databricks.com)', is_sensitive=False, ) _update_value( config, 'token', 'Databricks API token', is_sensitive=True, ) _update_value( config, 'prod_folder', 'Databricks folder for production libraries', is_sensitive=False, ) with open(CFG_FILE, 'w+') as f: config.write(f)
python
{ "resource": "" }
q18715
load_library
train
def load_library(filename, match, folder, token, host): """ upload an egg to the Databricks filesystem. Parameters ---------- filename: string local location of file to upload match: FilenameMatch object match object with library_type, library_name, and version folder: string Databricks folder to upload to (e.g. '/Users/htorrence@shoprunner.com/') token: string Databricks API key host: string Databricks host (e.g. https://my-organization.cloud.databricks.com) Side Effects ------------ uploads egg to Databricks """ with open(filename, 'rb') as file_obj: res = requests.post( host + '/api/1.2/libraries/upload', auth=('token', token), data={ 'libType': match.lib_type, 'name': '{0}-{1}'.format(match.library_name, match.version), 'folder': folder, }, files={'uri': file_obj} ) if res.status_code != 200: raise APIError(res)
python
{ "resource": "" }
q18716
get_job_list
train
def get_job_list(logger, match, library_mapping, token, host): """ get a list of jobs using the major version of the given library Parameters ---------- logger: logging object configured in cli_commands.py match: FilenameMatch object match object with suffix library_mapping: dict first element of get_library_mapping output token: string Databricks API key host: string Databricks host (e.g. https://my-organization.cloud.databricks.com) Returns ------- list of dictionaries containing the job id, job name, and library path for each job """ res = requests.get( host + '/api/2.0/jobs/list', auth=('token', token), ) if res.status_code == 200: job_list = [] if len(res.json()['jobs']) == 0: return [] for job in res.json()['jobs']: logger.debug('job: {}'.format(job['settings']['name'])) if 'libraries' in job['settings'].keys(): for library in job['settings']['libraries']: if match.suffix in library.keys(): try: # if in prod_folder, mapping turns uri into name job_library_uri = basename(library[match.suffix]) job_match = library_mapping[job_library_uri] except KeyError: logger.debug( 'not in library map: {}' .format(job_library_uri) ) pass else: if match.replace_version(job_match, logger): job_list.append({ 'job_id': job['job_id'], 'job_name': job['settings']['name'], 'library_path': library[match.suffix], }) else: logger.debug( 'not replacable: {}' .format(job_match.filename) ) else: logger.debug( 'no matching suffix: looking for {}, found {}' .format(match.suffix, str(library.keys())) ) return job_list else: raise APIError(res)
python
{ "resource": "" }
q18717
get_library_mapping
train
def get_library_mapping(logger, prod_folder, token, host): """ returns a pair of library mappings, the first mapping library uri to a library name for all libraries in the production folder, and the second mapping library name to info for libraries in the production folder with parsable versions Parameters ---------- logger: logging object configured in cli_commands.py prod_folder: string name of folder in Databricks UI containing production libraries token: string Databricks API key host: string Databricks account url (e.g. https://fake-organization.cloud.databricks.com) Returns ------- dictionary mapping a library uri to a library name dictionary mapping library UI path to base name, major version, minor version, and id number """ res = requests.get( host + '/api/1.2/libraries/list', auth=('token', token), ) if res.status_code == 200: library_list = res.json() library_map = {} id_nums = {} for library in library_list: status_res = ( requests .get( host + '/api/1.2/libraries/status?libraryId={}' .format(library['id']), auth=('token', token), ) ) if status_res.status_code == 200: library_info = status_res.json() # only do any of this for libraries in the production folder if library_info['folder'] != prod_folder: logger.debug( 'excluded folder: {} in {}, not prod folder ({})' .format( library_info['name'], library_info['folder'], prod_folder, ) ) continue if library_info['libType'] == 'python-egg': full_name = library_info['name'] + '.egg' elif library_info['libType'] == 'java-jar': full_name = library_info['name'] + '.jar' else: logger.debug( 'excluded library type: {} is of libType {}, ' 'not jar or egg' .format( library_info['name'], library_info['libType'], ) ) continue try: name_match = FileNameMatch(full_name) # map uri to name match object library_map[library_info['files'][0]] = name_match # map name to name match object and id number # we'll need the id number to clean up old libraries id_nums[library_info['name']] = { 'name_match': name_match, 'id_num': library_info['id'], } except FileNameError: logger.debug( 'FileNameError: {} file name is not parsable' .format(full_name) ) pass else: raise APIError(status_res) return library_map, id_nums else: raise APIError(res)
python
{ "resource": "" }
q18718
update_job_libraries
train
def update_job_libraries( logger, job_list, match, new_library_path, token, host, ): """ update libraries on jobs using same major version Parameters ---------- logger: logging object configured in cli_commands.py job_list: list of strings output of get_job_list match: FilenameMatch object match object with suffix new_library_path: string path to library in dbfs (including uri) token: string Databricks API key with admin permissions host: string Databricks account url (e.g. https://fake-organization.cloud.databricks.com) Side Effects ------------ jobs now require updated version of library """ for job in job_list: get_res = requests.get( host + '/api/2.0/jobs/get?job_id={}'.format(job['job_id']), auth=('token', token), ) if get_res.status_code == 200: job_specs = get_res.json() # copy current job specs settings = job_specs['settings'] job_specs.pop('settings') new_libraries = [] for lib in settings['libraries']: if ( match.suffix in lib.keys() and lib[match.suffix] == job['library_path'] ): # replace entry for old library path with new one new_libraries.append({match.suffix: new_library_path}) else: new_libraries.append(lib) settings['libraries'] = new_libraries job_specs['new_settings'] = settings post_res = requests.post( host + '/api/2.0/jobs/reset', auth=('token', token), data=json.dumps(job_specs) ) if post_res.status_code != 200: raise APIError(post_res) else: raise APIError(get_res)
python
{ "resource": "" }
q18719
FileNameMatch.replace_version
train
def replace_version(self, other, logger): """ True if self can safely replace other based on version numbers only - snapshot and branch tags are ignored """ if other.library_name != self.library_name: logger.debug( 'not replacable: {} != {} ()' .format(other.library_name, self.library_name, other.filename) ) return False elif int(other.major_version) != int(self.major_version): logger.debug( 'not replacable: {} != {} ({})' .format( int(self.major_version), int(other.major_version), other.filename, ) ) return False elif float(other.minor_version) >= float(self.minor_version): logger.debug( 'not replacable: {} >= {} ({})' .format( other.minor_version, self.minor_version, other.filename, ) ) return False else: return True
python
{ "resource": "" }
q18720
_resolve_input
train
def _resolve_input(variable, variable_name, config_key, config): """ Resolve input entered as option values with config values If option values are provided (passed in as `variable`), then they are returned unchanged. If `variable` is None, then we first look for a config value to use. If no config value is found, then raise an error. Parameters ---------- variable: string or numeric value passed in as input by the user variable_name: string name of the variable, for clarity in the error message config_key: string key in the config whose value could be used to fill in the variable config: ConfigParser contains keys/values in .apparatecfg """ if variable is None: try: variable = config.get(PROFILE, config_key) except NoOptionError: raise ValueError(( 'no {} found - either provide a command line argument or ' 'set up a default by running `apparate configure`' ).format(variable_name)) return variable
python
{ "resource": "" }
q18721
upload
train
def upload(path, token, folder): """ The egg that the provided path points to will be uploaded to Databricks. """ config = _load_config(CFG_FILE) token = _resolve_input(token, 'token', 'token', config) folder = _resolve_input(folder, 'folder', 'prod_folder', config) update_databricks( logger, path, token, folder, update_jobs=False, cleanup=False )
python
{ "resource": "" }
q18722
upload_and_update
train
def upload_and_update(path, token, cleanup): """ The egg that the provided path points to will be uploaded to Databricks. All jobs which use the same major version of the library will be updated to use the new version, and all version of this library in the production folder with the same major version and a lower minor version will be deleted. Unlike `upload`, `upload_and_update` does not ask for a folder because it relies on the production folder specified in the config. This is to protect against accidentally updating jobs to versions of a library still in testing/development. All egg names already in Databricks must be properly formatted with versions of the form <name>-0.0.0. """ config = _load_config(CFG_FILE) token = _resolve_input(token, 'token', 'token', config) folder = _resolve_input(None, 'folder', 'prod_folder', config) update_databricks( logger, path, token, folder, update_jobs=True, cleanup=cleanup )
python
{ "resource": "" }
q18723
parse_sas_token
train
def parse_sas_token(sas_token): """Parse a SAS token into its components. :param sas_token: The SAS token. :type sas_token: str :rtype: dict[str, str] """ sas_data = {} token = sas_token.partition(' ')[2] fields = token.split('&') for field in fields: key, value = field.split('=', 1) sas_data[key.lower()] = value return sas_data
python
{ "resource": "" }
q18724
EventData.offset
train
def offset(self): """ The offset of the event data object. :rtype: ~azure.eventhub.common.Offset """ try: return Offset(self._annotations[EventData.PROP_OFFSET].decode('UTF-8')) except (KeyError, AttributeError): return None
python
{ "resource": "" }
q18725
EventData.enqueued_time
train
def enqueued_time(self): """ The enqueued timestamp of the event data object. :rtype: datetime.datetime """ timestamp = self._annotations.get(EventData.PROP_TIMESTAMP, None) if timestamp: return datetime.datetime.utcfromtimestamp(float(timestamp)/1000) return None
python
{ "resource": "" }
q18726
EventData.partition_key
train
def partition_key(self): """ The partition key of the event data object. :rtype: bytes """ try: return self._annotations[self._partition_key] except KeyError: return self._annotations.get(EventData.PROP_PARTITION_KEY, None)
python
{ "resource": "" }
q18727
EventData.partition_key
train
def partition_key(self, value): """ Set the partition key of the event data object. :param value: The partition key to set. :type value: str or bytes """ annotations = dict(self._annotations) annotations[self._partition_key] = value header = MessageHeader() header.durable = True self.message.annotations = annotations self.message.header = header self._annotations = annotations
python
{ "resource": "" }
q18728
EventData.application_properties
train
def application_properties(self, value): """ Application defined properties on the message. :param value: The application properties for the EventData. :type value: dict """ self._app_properties = value properties = dict(self._app_properties) self.message.application_properties = properties
python
{ "resource": "" }
q18729
EventData.body_as_str
train
def body_as_str(self, encoding='UTF-8'): """ The body of the event data as a string if the data is of a compatible type. :param encoding: The encoding to use for decoding message data. Default is 'UTF-8' :rtype: str or unicode """ data = self.body try: return "".join(b.decode(encoding) for b in data) except TypeError: return six.text_type(data) except: # pylint: disable=bare-except pass try: return data.decode(encoding) except Exception as e: raise TypeError("Message data is not compatible with string type: {}".format(e))
python
{ "resource": "" }
q18730
EventData.body_as_json
train
def body_as_json(self, encoding='UTF-8'): """ The body of the event loaded as a JSON object is the data is compatible. :param encoding: The encoding to use for decoding message data. Default is 'UTF-8' :rtype: dict """ data_str = self.body_as_str(encoding=encoding) try: return json.loads(data_str) except Exception as e: raise TypeError("Event data is not compatible with JSON type: {}".format(e))
python
{ "resource": "" }
q18731
Offset.selector
train
def selector(self): """ Creates a selector expression of the offset. :rtype: bytes """ operator = ">=" if self.inclusive else ">" if isinstance(self.value, datetime.datetime): timestamp = (calendar.timegm(self.value.utctimetuple()) * 1000) + (self.value.microsecond/1000) return ("amqp.annotation.x-opt-enqueued-time {} '{}'".format(operator, int(timestamp))).encode('utf-8') if isinstance(self.value, six.integer_types): return ("amqp.annotation.x-opt-sequence-number {} '{}'".format(operator, self.value)).encode('utf-8') return ("amqp.annotation.x-opt-offset {} '{}'".format(operator, self.value)).encode('utf-8')
python
{ "resource": "" }
q18732
EventHubConfig.get_client_address
train
def get_client_address(self): """ Returns an auth token dictionary for making calls to eventhub REST API. :rtype: str """ return "amqps://{}:{}@{}.{}:5671/{}".format( urllib.parse.quote_plus(self.policy), urllib.parse.quote_plus(self.sas_key), self.sb_name, self.namespace_suffix, self.eh_name)
python
{ "resource": "" }
q18733
EventHubConfig.get_rest_token
train
def get_rest_token(self): """ Returns an auth token for making calls to eventhub REST API. :rtype: str """ uri = urllib.parse.quote_plus( "https://{}.{}/{}".format(self.sb_name, self.namespace_suffix, self.eh_name)) sas = self.sas_key.encode('utf-8') expiry = str(int(time.time() + 10000)) string_to_sign = ('{}\n{}'.format(uri, expiry)).encode('utf-8') signed_hmac_sha256 = hmac.HMAC(sas, string_to_sign, hashlib.sha256) signature = urllib.parse.quote(base64.b64encode(signed_hmac_sha256.digest())) return 'SharedAccessSignature sr={}&sig={}&se={}&skn={}' \ .format(uri, signature, expiry, self.policy)
python
{ "resource": "" }
q18734
Sender.send
train
def send(self, event_data): """ Sends an event data and blocks until acknowledgement is received or operation times out. :param event_data: The event to be sent. :type event_data: ~azure.eventhub.common.EventData :raises: ~azure.eventhub.common.EventHubError if the message fails to send. :return: The outcome of the message send. :rtype: ~uamqp.constants.MessageSendResult """ if self.error: raise self.error if not self.running: raise ValueError("Unable to send until client has been started.") if event_data.partition_key and self.partition: raise ValueError("EventData partition key cannot be used with a partition sender.") event_data.message.on_send_complete = self._on_outcome try: self._handler.send_message(event_data.message) if self._outcome != constants.MessageSendResult.Ok: raise Sender._error(self._outcome, self._condition) except errors.MessageException as failed: error = EventHubError(str(failed), failed) self.close(exception=error) raise error except (errors.TokenExpired, errors.AuthenticationException): log.info("Sender disconnected due to token error. Attempting reconnect.") self.reconnect() except (errors.LinkDetach, errors.ConnectionClose) as shutdown: if shutdown.action.retry and self.auto_reconnect: log.info("Sender detached. Attempting reconnect.") self.reconnect() else: log.info("Sender detached. Shutting down.") error = EventHubError(str(shutdown), shutdown) self.close(exception=error) raise error except errors.MessageHandlerError as shutdown: if self.auto_reconnect: log.info("Sender detached. Attempting reconnect.") self.reconnect() else: log.info("Sender detached. Shutting down.") error = EventHubError(str(shutdown), shutdown) self.close(exception=error) raise error except Exception as e: log.info("Unexpected error occurred (%r). Shutting down.", e) error = EventHubError("Send failed: {}".format(e)) self.close(exception=error) raise error else: return self._outcome
python
{ "resource": "" }
q18735
Sender.transfer
train
def transfer(self, event_data, callback=None): """ Transfers an event data and notifies the callback when the operation is done. :param event_data: The event to be sent. :type event_data: ~azure.eventhub.common.EventData :param callback: Callback to be run once the message has been send. This must be a function that accepts two arguments. :type callback: callable[~uamqp.constants.MessageSendResult, ~azure.eventhub.common.EventHubError] """ if self.error: raise self.error if not self.running: raise ValueError("Unable to send until client has been started.") if event_data.partition_key and self.partition: raise ValueError("EventData partition key cannot be used with a partition sender.") if callback: event_data.message.on_send_complete = lambda o, c: callback(o, Sender._error(o, c)) self._handler.queue_message(event_data.message)
python
{ "resource": "" }
q18736
Sender._on_outcome
train
def _on_outcome(self, outcome, condition): """ Called when the outcome is received for a delivery. :param outcome: The outcome of the message delivery - success or failure. :type outcome: ~uamqp.constants.MessageSendResult """ self._outcome = outcome self._condition = condition
python
{ "resource": "" }
q18737
AzureStorageCheckpointLeaseManager.initialize
train
def initialize(self, host): """ The EventProcessorHost can't pass itself to the AzureStorageCheckpointLeaseManager constructor because it is still being constructed. Do other initialization here also because it might throw and hence we don't want it in the constructor. """ self.host = host self.storage_client = BlockBlobService(account_name=self.storage_account_name, account_key=self.storage_account_key, sas_token=self.storage_sas_token, endpoint_suffix=self.endpoint_suffix, connection_string=self.connection_string, request_session=self.request_session) self.consumer_group_directory = self.storage_blob_prefix + self.host.eh_config.consumer_group
python
{ "resource": "" }
q18738
AzureStorageCheckpointLeaseManager.get_checkpoint_async
train
async def get_checkpoint_async(self, partition_id): """ Get the checkpoint data associated with the given partition. Could return null if no checkpoint has been created for that partition. :param partition_id: The partition ID. :type partition_id: str :return: Given partition checkpoint info, or `None` if none has been previously stored. :rtype: ~azure.eventprocessorhost.checkpoint.Checkpoint """ lease = await self.get_lease_async(partition_id) checkpoint = None if lease: if lease.offset: checkpoint = Checkpoint(partition_id, lease.offset, lease.sequence_number) return checkpoint
python
{ "resource": "" }
q18739
AzureStorageCheckpointLeaseManager.create_lease_store_if_not_exists_async
train
async def create_lease_store_if_not_exists_async(self): """ Create the lease store if it does not exist, do nothing if it does exist. :return: `True` if the lease store already exists or was created successfully, `False` if not. :rtype: bool """ try: await self.host.loop.run_in_executor( self.executor, functools.partial( self.storage_client.create_container, self.lease_container_name)) except Exception as err: # pylint: disable=broad-except _logger.error("%r", err) raise err return True
python
{ "resource": "" }
q18740
AzureStorageCheckpointLeaseManager.get_lease_async
train
async def get_lease_async(self, partition_id): """ Return the lease info for the specified partition. Can return null if no lease has been created in the store for the specified partition. :param partition_id: The partition ID. :type partition_id: str :return: lease info for the partition, or `None`. :rtype: ~azure.eventprocessorhost.lease.Lease """ try: blob = await self.host.loop.run_in_executor( self.executor, functools.partial( self.storage_client.get_blob_to_text, self.lease_container_name, partition_id)) lease = AzureBlobLease() lease.with_blob(blob) async def state(): """ Allow lease to curry storage_client to get state """ try: loop = asyncio.get_event_loop() res = await loop.run_in_executor( self.executor, functools.partial( self.storage_client.get_blob_properties, self.lease_container_name, partition_id)) return res.properties.lease.state except Exception as err: # pylint: disable=broad-except _logger.error("Failed to get lease state %r %r", err, partition_id) lease.state = state return lease except Exception as err: # pylint: disable=broad-except _logger.error("Failed to get lease %r %r", err, partition_id)
python
{ "resource": "" }
q18741
AzureStorageCheckpointLeaseManager.create_lease_if_not_exists_async
train
async def create_lease_if_not_exists_async(self, partition_id): """ Create in the store the lease info for the given partition, if it does not exist. Do nothing if it does exist in the store already. :param partition_id: The ID of a given parition. :type partition_id: str :return: the existing or newly-created lease info for the partition. :rtype: ~azure.eventprocessorhost.lease.Lease """ return_lease = None try: return_lease = AzureBlobLease() return_lease.partition_id = partition_id serializable_lease = return_lease.serializable() json_lease = json.dumps(serializable_lease) _logger.info("Creating Lease %r %r %r", self.lease_container_name, partition_id, json.dumps({k:v for k, v in serializable_lease.items() if k != 'event_processor_context'})) await self.host.loop.run_in_executor( self.executor, functools.partial( self.storage_client.create_blob_from_text, self.lease_container_name, partition_id, json_lease)) except Exception: # pylint: disable=broad-except try: return_lease = await self.get_lease_async(partition_id) except Exception as err: # pylint: disable=broad-except _logger.error("Failed to create lease %r", err) raise err return return_lease
python
{ "resource": "" }
q18742
AzureStorageCheckpointLeaseManager.delete_lease_async
train
async def delete_lease_async(self, lease): """ Delete the lease info for the given partition from the store. If there is no stored lease for the given partition, that is treated as success. :param lease: The stored lease to be deleted. :type lease: ~azure.eventprocessorhost.lease.Lease """ await self.host.loop.run_in_executor( self.executor, functools.partial( self.storage_client.delete_blob, self.lease_container_name, lease.partition_id, lease_id=lease.token))
python
{ "resource": "" }
q18743
AzureStorageCheckpointLeaseManager.acquire_lease_async
train
async def acquire_lease_async(self, lease): """ Acquire the lease on the desired partition for this EventProcessorHost. Note that it is legal to acquire a lease that is already owned by another host. Lease-stealing is how partitions are redistributed when additional hosts are started. :param lease: The stored lease to be acquired. :type lease: ~azure.eventprocessorhost.lease.Lease :return: `True` if the lease was acquired successfully, `False` if not. :rtype: bool """ retval = True new_lease_id = str(uuid.uuid4()) partition_id = lease.partition_id try: if asyncio.iscoroutinefunction(lease.state): state = await lease.state() else: state = lease.state() if state == "leased": if not lease.token: # We reach here in a race condition: when this instance of EventProcessorHost # scanned the lease blobs, this partition was unowned (token is empty) but # between then and now, another instance of EPH has established a lease # (getLeaseState() is LEASED). We normally enforcethat we only steal the lease # if it is still owned by the instance which owned it when we scanned, but we # can't do that when we don't know who owns it. The safest thing to do is just # fail the acquisition. If that means that one EPH instance gets more partitions # than it should, rebalancing will take care of that quickly enough. retval = False else: _logger.info("ChangingLease %r %r", self.host.guid, lease.partition_id) await self.host.loop.run_in_executor( self.executor, functools.partial( self.storage_client.change_blob_lease, self.lease_container_name, partition_id, lease.token, new_lease_id)) lease.token = new_lease_id else: _logger.info("AcquiringLease %r %r", self.host.guid, lease.partition_id) lease.token = await self.host.loop.run_in_executor( self.executor, functools.partial( self.storage_client.acquire_blob_lease, self.lease_container_name, partition_id, self.lease_duration, new_lease_id)) lease.owner = self.host.host_name lease.increment_epoch() # check if this solves the issue retval = await self.update_lease_async(lease) except Exception as err: # pylint: disable=broad-except _logger.error("Failed to acquire lease %r %r %r", err, partition_id, lease.token) return False return retval
python
{ "resource": "" }
q18744
AzureStorageCheckpointLeaseManager.release_lease_async
train
async def release_lease_async(self, lease): """ Give up a lease currently held by this host. If the lease has been stolen, or expired, releasing it is unnecessary, and will fail if attempted. :param lease: The stored lease to be released. :type lease: ~azure.eventprocessorhost.lease.Lease :return: `True` if the lease was released successfully, `False` if not. :rtype: bool """ lease_id = None try: _logger.info("Releasing lease %r %r", self.host.guid, lease.partition_id) lease_id = lease.token released_copy = AzureBlobLease() released_copy.with_lease(lease) released_copy.token = None released_copy.owner = None released_copy.state = None await self.host.loop.run_in_executor( self.executor, functools.partial( self.storage_client.create_blob_from_text, self.lease_container_name, lease.partition_id, json.dumps(released_copy.serializable()), lease_id=lease_id)) await self.host.loop.run_in_executor( self.executor, functools.partial( self.storage_client.release_blob_lease, self.lease_container_name, lease.partition_id, lease_id)) except Exception as err: # pylint: disable=broad-except _logger.error("Failed to release lease %r %r %r", err, lease.partition_id, lease_id) return False return True
python
{ "resource": "" }
q18745
AzureStorageCheckpointLeaseManager.update_lease_async
train
async def update_lease_async(self, lease): """ Update the store with the information in the provided lease. It is necessary to currently hold a lease in order to update it. If the lease has been stolen, or expired, or released, it cannot be updated. Updating should renew the lease before performing the update to avoid lease expiration during the process. :param lease: The stored lease to be updated. :type lease: ~azure.eventprocessorhost.lease.Lease :return: `True` if the updated was performed successfully, `False` if not. :rtype: bool """ if lease is None: return False if not lease.token: return False _logger.debug("Updating lease %r %r", self.host.guid, lease.partition_id) # First, renew the lease to make sure the update will go through. if await self.renew_lease_async(lease): try: await self.host.loop.run_in_executor( self.executor, functools.partial( self.storage_client.create_blob_from_text, self.lease_container_name, lease.partition_id, json.dumps(lease.serializable()), lease_id=lease.token)) except Exception as err: # pylint: disable=broad-except _logger.error("Failed to update lease %r %r %r", self.host.guid, lease.partition_id, err) raise err else: return False return True
python
{ "resource": "" }
q18746
EventHubClient.from_sas_token
train
def from_sas_token(cls, address, sas_token, eventhub=None, **kwargs): """Create an EventHubClient from an existing auth token or token generator. :param address: The Event Hub address URL :type address: str :param sas_token: A SAS token or function that returns a SAS token. If a function is supplied, it will be used to retrieve subsequent tokens in the case of token expiry. The function should take no arguments. :type sas_token: str or callable :param eventhub: The name of the EventHub, if not already included in the address URL. :type eventhub: str :param debug: Whether to output network trace logs to the logger. Default is `False`. :type debug: bool :param http_proxy: HTTP proxy settings. This must be a dictionary with the following keys: 'proxy_hostname' (str value) and 'proxy_port' (int value). Additionally the following keys may also be present: 'username', 'password'. :type http_proxy: dict[str, Any] :param auth_timeout: The time in seconds to wait for a token to be authorized by the service. The default value is 60 seconds. If set to 0, no timeout will be enforced from the client. :type auth_timeout: int """ address = _build_uri(address, eventhub) return cls(address, sas_token=sas_token, **kwargs)
python
{ "resource": "" }
q18747
EventHubClient.from_connection_string
train
def from_connection_string(cls, conn_str, eventhub=None, **kwargs): """Create an EventHubClient from a connection string. :param conn_str: The connection string. :type conn_str: str :param eventhub: The name of the EventHub, if the EntityName is not included in the connection string. :type eventhub: str :param debug: Whether to output network trace logs to the logger. Default is `False`. :type debug: bool :param http_proxy: HTTP proxy settings. This must be a dictionary with the following keys: 'proxy_hostname' (str value) and 'proxy_port' (int value). Additionally the following keys may also be present: 'username', 'password'. :type http_proxy: dict[str, Any] :param auth_timeout: The time in seconds to wait for a token to be authorized by the service. The default value is 60 seconds. If set to 0, no timeout will be enforced from the client. :type auth_timeout: int """ address, policy, key, entity = _parse_conn_str(conn_str) entity = eventhub or entity address = _build_uri(address, entity) return cls(address, username=policy, password=key, **kwargs)
python
{ "resource": "" }
q18748
EventHubClient.from_iothub_connection_string
train
def from_iothub_connection_string(cls, conn_str, **kwargs): """ Create an EventHubClient from an IoTHub connection string. :param conn_str: The connection string. :type conn_str: str :param debug: Whether to output network trace logs to the logger. Default is `False`. :type debug: bool :param http_proxy: HTTP proxy settings. This must be a dictionary with the following keys: 'proxy_hostname' (str value) and 'proxy_port' (int value). Additionally the following keys may also be present: 'username', 'password'. :type http_proxy: dict[str, Any] :param auth_timeout: The time in seconds to wait for a token to be authorized by the service. The default value is 60 seconds. If set to 0, no timeout will be enforced from the client. :type auth_timeout: int """ address, policy, key, _ = _parse_conn_str(conn_str) hub_name = address.split('.')[0] username = "{}@sas.root.{}".format(policy, hub_name) password = _generate_sas_token(address, policy, key) client = cls("amqps://" + address, username=username, password=password, **kwargs) client._auth_config = { # pylint: disable=protected-access 'iot_username': policy, 'iot_password': key, 'username': username, 'password': password} return client
python
{ "resource": "" }
q18749
EventHubClient.create_properties
train
def create_properties(self): # pylint: disable=no-self-use """ Format the properties with which to instantiate the connection. This acts like a user agent over HTTP. :rtype: dict """ properties = {} properties["product"] = "eventhub.python" properties["version"] = __version__ properties["framework"] = "Python {}.{}.{}".format(*sys.version_info[0:3]) properties["platform"] = sys.platform return properties
python
{ "resource": "" }
q18750
EventHubClient.add_receiver
train
def add_receiver( self, consumer_group, partition, offset=None, prefetch=300, operation=None, keep_alive=30, auto_reconnect=True): """ Add a receiver to the client for a particular consumer group and partition. :param consumer_group: The name of the consumer group. :type consumer_group: str :param partition: The ID of the partition. :type partition: str :param offset: The offset from which to start receiving. :type offset: ~azure.eventhub.common.Offset :param prefetch: The message prefetch count of the receiver. Default is 300. :type prefetch: int :operation: An optional operation to be appended to the hostname in the source URL. The value must start with `/` character. :type operation: str :rtype: ~azure.eventhub.receiver.Receiver """ path = self.address.path + operation if operation else self.address.path source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format( self.address.hostname, path, consumer_group, partition) handler = Receiver( self, source_url, offset=offset, prefetch=prefetch, keep_alive=keep_alive, auto_reconnect=auto_reconnect) self.clients.append(handler) return handler
python
{ "resource": "" }
q18751
EventHubClient.add_sender
train
def add_sender(self, partition=None, operation=None, send_timeout=60, keep_alive=30, auto_reconnect=True): """ Add a sender to the client to EventData object to an EventHub. :param partition: Optionally specify a particular partition to send to. If omitted, the events will be distributed to available partitions via round-robin. :type parition: str :operation: An optional operation to be appended to the hostname in the target URL. The value must start with `/` character. :type operation: str :param send_timeout: The timeout in seconds for an individual event to be sent from the time that it is queued. Default value is 60 seconds. If set to 0, there will be no timeout. :type send_timeout: int :param keep_alive: The time interval in seconds between pinging the connection to keep it alive during periods of inactivity. The default value is 30 seconds. If set to `None`, the connection will not be pinged. :type keep_alive: int :param auto_reconnect: Whether to automatically reconnect the sender if a retryable error occurs. Default value is `True`. :rtype: ~azure.eventhub.sender.Sender """ target = "amqps://{}{}".format(self.address.hostname, self.address.path) if operation: target = target + operation handler = Sender( self, target, partition=partition, send_timeout=send_timeout, keep_alive=keep_alive, auto_reconnect=auto_reconnect) self.clients.append(handler) return handler
python
{ "resource": "" }
q18752
EventHubClientAsync._create_auth
train
def _create_auth(self, username=None, password=None): """ Create an ~uamqp.authentication.cbs_auth_async.SASTokenAuthAsync instance to authenticate the session. :param username: The name of the shared access policy. :type username: str :param password: The shared access key. :type password: str """ if self.sas_token: token = self.sas_token() if callable(self.sas_token) else self.sas_token try: expiry = int(parse_sas_token(token)['se']) except (KeyError, TypeError, IndexError): raise ValueError("Supplied SAS token has no valid expiry value.") return authentication.SASTokenAsync( self.auth_uri, self.auth_uri, token, expires_at=expiry, timeout=self.auth_timeout, http_proxy=self.http_proxy) username = username or self._auth_config['username'] password = password or self._auth_config['password'] if "@sas.root" in username: return authentication.SASLPlain( self.address.hostname, username, password, http_proxy=self.http_proxy) return authentication.SASTokenAsync.from_shared_access_key( self.auth_uri, username, password, timeout=self.auth_timeout, http_proxy=self.http_proxy)
python
{ "resource": "" }
q18753
EventHubClientAsync.get_eventhub_info_async
train
async def get_eventhub_info_async(self): """ Get details on the specified EventHub async. :rtype: dict """ alt_creds = { "username": self._auth_config.get("iot_username"), "password":self._auth_config.get("iot_password")} try: mgmt_auth = self._create_auth(**alt_creds) mgmt_client = AMQPClientAsync(self.mgmt_target, auth=mgmt_auth, debug=self.debug) await mgmt_client.open_async() mgmt_msg = Message(application_properties={'name': self.eh_name}) response = await mgmt_client.mgmt_request_async( mgmt_msg, constants.READ_OPERATION, op_type=b'com.microsoft:eventhub', status_code_field=b'status-code', description_fields=b'status-description') eh_info = response.get_data() output = {} if eh_info: output['name'] = eh_info[b'name'].decode('utf-8') output['type'] = eh_info[b'type'].decode('utf-8') output['created_at'] = datetime.datetime.fromtimestamp(float(eh_info[b'created_at'])/1000) output['partition_count'] = eh_info[b'partition_count'] output['partition_ids'] = [p.decode('utf-8') for p in eh_info[b'partition_ids']] return output finally: await mgmt_client.close_async()
python
{ "resource": "" }
q18754
EventHubClientAsync.add_async_receiver
train
def add_async_receiver( self, consumer_group, partition, offset=None, prefetch=300, operation=None, keep_alive=30, auto_reconnect=True, loop=None): """ Add an async receiver to the client for a particular consumer group and partition. :param consumer_group: The name of the consumer group. :type consumer_group: str :param partition: The ID of the partition. :type partition: str :param offset: The offset from which to start receiving. :type offset: ~azure.eventhub.common.Offset :param prefetch: The message prefetch count of the receiver. Default is 300. :type prefetch: int :operation: An optional operation to be appended to the hostname in the source URL. The value must start with `/` character. :type operation: str :rtype: ~azure.eventhub.async_ops.receiver_async.ReceiverAsync """ path = self.address.path + operation if operation else self.address.path source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format( self.address.hostname, path, consumer_group, partition) handler = AsyncReceiver( self, source_url, offset=offset, prefetch=prefetch, keep_alive=keep_alive, auto_reconnect=auto_reconnect, loop=loop) self.clients.append(handler) return handler
python
{ "resource": "" }
q18755
Checkpoint.from_source
train
def from_source(self, checkpoint): """ Creates a new Checkpoint from an existing checkpoint. :param checkpoint: Existing checkpoint. :type checkpoint: ~azure.eventprocessorhost.checkpoint.Checkpoint """ self.partition_id = checkpoint.partition_id self.offset = checkpoint.offset self.sequence_number = checkpoint.sequence_number
python
{ "resource": "" }
q18756
AzureBlobLease.with_blob
train
def with_blob(self, blob): """ Init Azure Blob Lease with existing blob. """ content = json.loads(blob.content) self.partition_id = content["partition_id"] self.owner = content["owner"] self.token = content["token"] self.epoch = content["epoch"] self.offset = content["offset"] self.sequence_number = content["sequence_number"] self.event_processor_context = content.get("event_processor_context")
python
{ "resource": "" }
q18757
AzureBlobLease.with_source
train
def with_source(self, lease): """ Init Azure Blob Lease from existing. """ super().with_source(lease) self.offset = lease.offset self.sequence_number = lease.sequence_number
python
{ "resource": "" }
q18758
AzureBlobLease.is_expired
train
async def is_expired(self): """ Check and return Azure Blob Lease state using Storage API. """ if asyncio.iscoroutinefunction(self.state): current_state = await self.state() else: current_state = self.state() if current_state: return current_state != "leased" return False
python
{ "resource": "" }
q18759
PartitionPump.run
train
def run(self): """ Makes pump sync so that it can be run in a thread. """ self.loop = asyncio.new_event_loop() self.loop.run_until_complete(self.open_async())
python
{ "resource": "" }
q18760
PartitionPump.set_pump_status
train
def set_pump_status(self, status): """ Updates pump status and logs update to console. """ self.pump_status = status _logger.info("%r partition %r", status, self.lease.partition_id)
python
{ "resource": "" }
q18761
PartitionPump.set_lease
train
def set_lease(self, new_lease): """ Sets a new partition lease to be processed by the pump. :param lease: The lease to set. :type lease: ~azure.eventprocessorhost.lease.Lease """ if self.partition_context: self.partition_context.lease = new_lease self.partition_context.event_processor_context = new_lease.event_processor_context
python
{ "resource": "" }
q18762
PartitionPump.open_async
train
async def open_async(self): """ Opens partition pump. """ self.set_pump_status("Opening") self.partition_context = PartitionContext(self.host, self.lease.partition_id, self.host.eh_config.client_address, self.host.eh_config.consumer_group, self.loop) self.partition_context.lease = self.lease self.partition_context.event_processor_context = self.lease.event_processor_context self.processor = self.host.event_processor(self.host.event_processor_params) try: await self.processor.open_async(self.partition_context) except Exception as err: # pylint: disable=broad-except # If the processor won't create or open, only thing we can do here is pass the buck. # Null it out so we don't try to operate on it further. await self.process_error_async(err) self.processor = None self.set_pump_status("OpenFailed") # If Open Async Didn't Fail call OnOpenAsync if self.pump_status == "Opening": await self.on_open_async()
python
{ "resource": "" }
q18763
PartitionPump.close_async
train
async def close_async(self, reason): """ Safely closes the pump. :param reason: The reason for the shutdown. :type reason: str """ self.set_pump_status("Closing") try: await self.on_closing_async(reason) if self.processor: _logger.info("PartitionPumpInvokeProcessorCloseStart %r %r %r", self.host.guid, self.partition_context.partition_id, reason) await self.processor.close_async(self.partition_context, reason) _logger.info("PartitionPumpInvokeProcessorCloseStart %r %r", self.host.guid, self.partition_context.partition_id) except Exception as err: # pylint: disable=broad-except await self.process_error_async(err) _logger.error("%r %r %r", self.host.guid, self.partition_context.partition_id, err) raise err if reason == "LeaseLost": try: _logger.info("Lease Lost releasing ownership") await self.host.storage_manager.release_lease_async(self.partition_context.lease) except Exception as err: # pylint: disable=broad-except _logger.error("%r %r %r", self.host.guid, self.partition_context.partition_id, err) raise err self.set_pump_status("Closed")
python
{ "resource": "" }
q18764
PartitionPump.process_events_async
train
async def process_events_async(self, events): """ Process pump events. :param events: List of events to be processed. :type events: list[~azure.eventhub.common.EventData] """ if events: # Synchronize to serialize calls to the processor. The handler is not installed until # after OpenAsync returns, so ProcessEventsAsync cannot conflict with OpenAsync. There # could be a conflict between ProcessEventsAsync and CloseAsync, however. All calls to # CloseAsync are protected by synchronizing too. try: last = events[-1] if last is not None: self.partition_context.set_offset_and_sequence_number(last) await self.processor.process_events_async(self.partition_context, events) except Exception as err: # pylint: disable=broad-except await self.process_error_async(err)
python
{ "resource": "" }
q18765
EventHubPartitionPump.on_open_async
train
async def on_open_async(self): """ Eventhub Override for on_open_async. """ _opened_ok = False _retry_count = 0 while (not _opened_ok) and (_retry_count < 5): try: await self.open_clients_async() _opened_ok = True except Exception as err: # pylint: disable=broad-except _logger.warning( "%r,%r PartitionPumpWarning: Failure creating client or receiver, retrying: %r", self.host.guid, self.partition_context.partition_id, err) last_exception = err _retry_count += 1 if not _opened_ok: await self.processor.process_error_async(self.partition_context, last_exception) self.set_pump_status("OpenFailed") if self.pump_status == "Opening": loop = asyncio.get_event_loop() self.set_pump_status("Running") await self.eh_client.run_async() self.running = loop.create_task(self.partition_receiver.run()) if self.pump_status in ["OpenFailed", "Errored"]: self.set_pump_status("Closing") await self.clean_up_clients_async() self.set_pump_status("Closed")
python
{ "resource": "" }
q18766
EventHubPartitionPump.open_clients_async
train
async def open_clients_async(self): """ Responsible for establishing connection to event hub client throws EventHubsException, IOException, InterruptedException, ExecutionException. """ await self.partition_context.get_initial_offset_async() # Create event hub client and receive handler and set options self.eh_client = EventHubClientAsync( self.host.eh_config.client_address, debug=self.host.eph_options.debug_trace, http_proxy=self.host.eph_options.http_proxy) self.partition_receive_handler = self.eh_client.add_async_receiver( self.partition_context.consumer_group_name, self.partition_context.partition_id, Offset(self.partition_context.offset), prefetch=self.host.eph_options.prefetch_count, keep_alive=self.host.eph_options.keep_alive_interval, auto_reconnect=self.host.eph_options.auto_reconnect_on_error, loop=self.loop) self.partition_receiver = PartitionReceiver(self)
python
{ "resource": "" }
q18767
EventHubPartitionPump.clean_up_clients_async
train
async def clean_up_clients_async(self): """ Resets the pump swallows all exceptions. """ if self.partition_receiver: if self.eh_client: await self.eh_client.stop_async() self.partition_receiver = None self.partition_receive_handler = None self.eh_client = None
python
{ "resource": "" }
q18768
EventHubPartitionPump.on_closing_async
train
async def on_closing_async(self, reason): """ Overides partition pump on closing. :param reason: The reason for the shutdown. :type reason: str """ self.partition_receiver.eh_partition_pump.set_pump_status("Errored") try: await self.running except TypeError: _logger.debug("No partition pump running.") except Exception as err: # pylint: disable=broad-except _logger.info("Error on closing partition pump: %r", err) await self.clean_up_clients_async()
python
{ "resource": "" }
q18769
PartitionReceiver.run
train
async def run(self): """ Runs the async partion reciever event loop to retrive messages from the event queue. """ # Implement pull max batch from queue instead of one message at a time while self.eh_partition_pump.pump_status != "Errored" and not self.eh_partition_pump.is_closing(): if self.eh_partition_pump.partition_receive_handler: try: msgs = await self.eh_partition_pump.partition_receive_handler.receive( max_batch_size=self.max_batch_size, timeout=self.recieve_timeout) except Exception as e: # pylint: disable=broad-except _logger.info("Error raised while attempting to receive messages: %r", e) await self.process_error_async(e) else: if not msgs: _logger.info("No events received, queue size %r, release %r", self.eh_partition_pump.partition_receive_handler.queue_size, self.eh_partition_pump.host.eph_options.release_pump_on_timeout) if self.eh_partition_pump.host.eph_options.release_pump_on_timeout: await self.process_error_async(TimeoutError("No events received")) else: await self.process_events_async(msgs)
python
{ "resource": "" }
q18770
Lease.with_partition_id
train
def with_partition_id(self, partition_id): """ Init with partition Id. :param partition_id: ID of a given partition. :type partition_id: str """ self.partition_id = partition_id self.owner = None self.token = None self.epoch = 0 self.event_processor_context = None
python
{ "resource": "" }
q18771
Lease.with_source
train
def with_source(self, lease): """ Init with existing lease. :param lease: An existing Lease. :type lease: ~azure.eventprocessorhost.lease.Lease """ self.partition_id = lease.partition_id self.epoch = lease.epoch self.owner = lease.owner self.token = lease.token self.event_processor_context = lease.event_processor_context
python
{ "resource": "" }
q18772
EventProcessorHost.open_async
train
async def open_async(self): """ Starts the host. """ if not self.loop: self.loop = asyncio.get_event_loop() await self.partition_manager.start_async()
python
{ "resource": "" }
q18773
PartitionContext.set_offset_and_sequence_number
train
def set_offset_and_sequence_number(self, event_data): """ Updates offset based on event. :param event_data: A received EventData with valid offset and sequenceNumber. :type event_data: ~azure.eventhub.common.EventData """ if not event_data: raise Exception(event_data) self.offset = event_data.offset.value self.sequence_number = event_data.sequence_number
python
{ "resource": "" }
q18774
PartitionContext.get_initial_offset_async
train
async def get_initial_offset_async(self): # throws InterruptedException, ExecutionException """ Gets the initial offset for processing the partition. :rtype: str """ _logger.info("Calling user-provided initial offset provider %r %r", self.host.guid, self.partition_id) starting_checkpoint = await self.host.storage_manager.get_checkpoint_async(self.partition_id) if not starting_checkpoint: # No checkpoint was ever stored. Use the initialOffsetProvider instead # defaults to "-1" self.offset = self.host.eph_options.initial_offset_provider self.sequence_number = -1 else: self.offset = starting_checkpoint.offset self.sequence_number = starting_checkpoint.sequence_number _logger.info("%r %r Initial offset/sequenceNumber provided %r/%r", self.host.guid, self.partition_id, self.offset, self.sequence_number) return self.offset
python
{ "resource": "" }
q18775
PartitionContext.checkpoint_async
train
async def checkpoint_async(self, event_processor_context=None): """ Generates a checkpoint for the partition using the curren offset and sequenceNumber for and persists to the checkpoint manager. :param event_processor_context An optional custom state value for the Event Processor. This data must be in a JSON serializable format. :type event_processor_context: str or dict """ captured_checkpoint = Checkpoint(self.partition_id, self.offset, self.sequence_number) await self.persist_checkpoint_async(captured_checkpoint, event_processor_context) self.event_processor_context = event_processor_context
python
{ "resource": "" }
q18776
PartitionContext.checkpoint_async_event_data
train
async def checkpoint_async_event_data(self, event_data, event_processor_context=None): """ Stores the offset and sequenceNumber from the provided received EventData instance, then writes those values to the checkpoint store via the checkpoint manager. Optionally stores the state of the Event Processor along the checkpoint. :param event_data: A received EventData with valid offset and sequenceNumber. :type event_data: ~azure.eventhub.common.EventData :param event_processor_context An optional custom state value for the Event Processor. This data must be in a JSON serializable format. :type event_processor_context: str or dict :raises: ValueError if suplied event_data is None. :raises: ValueError if the sequenceNumber is less than the last checkpointed value. """ if not event_data: raise ValueError("event_data") if event_data.sequence_number > self.sequence_number: #We have never seen this sequence number yet raise ValueError("Argument Out Of Range event_data x-opt-sequence-number") await self.persist_checkpoint_async(Checkpoint(self.partition_id, event_data.offset.value, event_data.sequence_number), event_processor_context) self.event_processor_context = event_processor_context
python
{ "resource": "" }
q18777
PartitionContext.persist_checkpoint_async
train
async def persist_checkpoint_async(self, checkpoint, event_processor_context=None): """ Persists the checkpoint, and - optionally - the state of the Event Processor. :param checkpoint: The checkpoint to persist. :type checkpoint: ~azure.eventprocessorhost.checkpoint.Checkpoint :param event_processor_context An optional custom state value for the Event Processor. This data must be in a JSON serializable format. :type event_processor_context: str or dict """ _logger.debug("PartitionPumpCheckpointStart %r %r %r %r", self.host.guid, checkpoint.partition_id, checkpoint.offset, checkpoint.sequence_number) try: in_store_checkpoint = await self.host.storage_manager.get_checkpoint_async(checkpoint.partition_id) if not in_store_checkpoint or checkpoint.sequence_number >= in_store_checkpoint.sequence_number: if not in_store_checkpoint: _logger.info("persisting checkpoint %r", checkpoint.__dict__) await self.host.storage_manager.create_checkpoint_if_not_exists_async(checkpoint.partition_id) self.lease.event_processor_context = event_processor_context if not await self.host.storage_manager.update_checkpoint_async(self.lease, checkpoint): _logger.error("Failed to persist checkpoint for partition: %r", self.partition_id) raise Exception("failed to persist checkpoint") self.lease.offset = checkpoint.offset self.lease.sequence_number = checkpoint.sequence_number else: _logger.error( # pylint: disable=logging-not-lazy "Ignoring out of date checkpoint with offset %r/sequence number %r because " + "current persisted checkpoint has higher offset %r/sequence number %r", checkpoint.offset, checkpoint.sequence_number, in_store_checkpoint.offset, in_store_checkpoint.sequence_number) raise Exception("offset/sequenceNumber invalid") except Exception as err: _logger.error("PartitionPumpCheckpointError %r %r %r", self.host.guid, checkpoint.partition_id, err) raise finally: _logger.debug("PartitionPumpCheckpointStop %r %r", self.host.guid, checkpoint.partition_id)
python
{ "resource": "" }
q18778
Receiver.receive
train
def receive(self, max_batch_size=None, timeout=None): """ Receive events from the EventHub. :param max_batch_size: Receive a batch of events. Batch size will be up to the maximum specified, but will return as soon as service returns no new events. If combined with a timeout and no events are retrieve before the time, the result will be empty. If no batch size is supplied, the prefetch size will be the maximum. :type max_batch_size: int :rtype: list[~azure.eventhub.common.EventData] """ if self.error: raise self.error if not self.running: raise ValueError("Unable to receive until client has been started.") data_batch = [] try: timeout_ms = 1000 * timeout if timeout else 0 message_batch = self._handler.receive_message_batch( max_batch_size=max_batch_size, timeout=timeout_ms) for message in message_batch: event_data = EventData(message=message) self.offset = event_data.offset data_batch.append(event_data) return data_batch except (errors.TokenExpired, errors.AuthenticationException): log.info("Receiver disconnected due to token error. Attempting reconnect.") self.reconnect() return data_batch except (errors.LinkDetach, errors.ConnectionClose) as shutdown: if shutdown.action.retry and self.auto_reconnect: log.info("Receiver detached. Attempting reconnect.") self.reconnect() return data_batch log.info("Receiver detached. Shutting down.") error = EventHubError(str(shutdown), shutdown) self.close(exception=error) raise error except errors.MessageHandlerError as shutdown: if self.auto_reconnect: log.info("Receiver detached. Attempting reconnect.") self.reconnect() return data_batch log.info("Receiver detached. Shutting down.") error = EventHubError(str(shutdown), shutdown) self.close(exception=error) raise error except Exception as e: log.info("Unexpected error occurred (%r). Shutting down.", e) error = EventHubError("Receive failed: {}".format(e)) self.close(exception=error) raise error
python
{ "resource": "" }
q18779
PartitionManager.get_partition_ids_async
train
async def get_partition_ids_async(self): """ Returns a list of all the event hub partition IDs. :rtype: list[str] """ if not self.partition_ids: try: eh_client = EventHubClientAsync( self.host.eh_config.client_address, debug=self.host.eph_options.debug_trace, http_proxy=self.host.eph_options.http_proxy) try: eh_info = await eh_client.get_eventhub_info_async() self.partition_ids = eh_info['partition_ids'] except Exception as err: # pylint: disable=broad-except raise Exception("Failed to get partition ids", repr(err)) finally: await eh_client.stop_async() return self.partition_ids
python
{ "resource": "" }
q18780
PartitionManager.start_async
train
async def start_async(self): """ Intializes the partition checkpoint and lease store and then calls run async. """ if self.run_task: raise Exception("A PartitionManager cannot be started multiple times.") partition_count = await self.initialize_stores_async() _logger.info("%r PartitionCount: %r", self.host.guid, partition_count) self.run_task = asyncio.ensure_future(self.run_async())
python
{ "resource": "" }
q18781
PartitionManager.stop_async
train
async def stop_async(self): """ Terminiates the partition manger. """ self.cancellation_token.cancel() if self.run_task and not self.run_task.done(): await self.run_task
python
{ "resource": "" }
q18782
PartitionManager.run_async
train
async def run_async(self): """ Starts the run loop and manages exceptions and cleanup. """ try: await self.run_loop_async() except Exception as err: # pylint: disable=broad-except _logger.error("Run loop failed %r", err) try: _logger.info("Shutting down all pumps %r", self.host.guid) await self.remove_all_pumps_async("Shutdown") except Exception as err: # pylint: disable=broad-except raise Exception("Failed to remove all pumps {!r}".format(err))
python
{ "resource": "" }
q18783
PartitionManager.initialize_stores_async
train
async def initialize_stores_async(self): """ Intializes the partition checkpoint and lease store ensures that a checkpoint exists for all partitions. Note in this case checkpoint and lease stores are the same storage manager construct. :return: Returns the number of partitions. :rtype: int """ await self.host.storage_manager.create_checkpoint_store_if_not_exists_async() partition_ids = await self.get_partition_ids_async() retry_tasks = [] for partition_id in partition_ids: retry_tasks.append( self.retry_async( self.host.storage_manager.create_checkpoint_if_not_exists_async, partition_id=partition_id, retry_message="Failure creating checkpoint for partition, retrying", final_failure_message="Out of retries creating checkpoint blob for partition", max_retries=5, host_id=self.host.host_name)) await asyncio.gather(*retry_tasks) return len(partition_ids)
python
{ "resource": "" }
q18784
PartitionManager.retry_async
train
async def retry_async(self, func, partition_id, retry_message, final_failure_message, max_retries, host_id): """ Throws if it runs out of retries. If it returns, action succeeded. """ created_okay = False retry_count = 0 while not created_okay and retry_count <= max_retries: try: await func(partition_id) created_okay = True except Exception as err: # pylint: disable=broad-except _logger.error("%r %r %r %r", retry_message, host_id, partition_id, err) retry_count += 1 if not created_okay: raise Exception(host_id, final_failure_message)
python
{ "resource": "" }
q18785
PartitionManager.run_loop_async
train
async def run_loop_async(self): """ This is the main execution loop for allocating and manging pumps. """ while not self.cancellation_token.is_cancelled: lease_manager = self.host.storage_manager # Inspect all leases. # Acquire any expired leases. # Renew any leases that currently belong to us. getting_all_leases = await lease_manager.get_all_leases() leases_owned_by_others_q = Queue() renew_tasks = [ self.attempt_renew_lease_async( get_lease_task, owned_by_others_q=leases_owned_by_others_q, lease_manager=lease_manager) for get_lease_task in getting_all_leases] await asyncio.gather(*renew_tasks) # Extract all leasees leases_owned_by_others and our_lease_count from the all_leases = {} leases_owned_by_others = [] our_lease_count = 0 while not leases_owned_by_others_q.empty(): lease_owned_by_other = leases_owned_by_others_q.get() # Check if lease is owned by other and append if lease_owned_by_other[0]: leases_owned_by_others.append(lease_owned_by_other[1]) else: our_lease_count += 1 all_leases[lease_owned_by_other[1].partition_id] = lease_owned_by_other[1] # Grab more leases if available and needed for load balancing leases_owned_by_others_count = len(leases_owned_by_others) if leases_owned_by_others_count > 0: steal_this_lease = self.which_lease_to_steal( leases_owned_by_others, our_lease_count) if steal_this_lease: try: _logger.info("Lease to steal %r", steal_this_lease.serializable()) if await lease_manager.acquire_lease_async(steal_this_lease): _logger.info("Stole lease sucessfully %r %r", self.host.guid, steal_this_lease.partition_id) else: _logger.info("Failed to steal lease for partition %r %r", self.host.guid, steal_this_lease.partition_id) except Exception as err: # pylint: disable=broad-except _logger.error("Failed to steal lease %r", err) for partition_id in all_leases: try: updated_lease = all_leases[partition_id] if updated_lease.owner == self.host.host_name: _logger.debug("Attempting to renew lease %r %r", self.host.guid, partition_id) await self.check_and_add_pump_async(partition_id, updated_lease) else: _logger.debug("Removing pump due to lost lease.") await self.remove_pump_async(partition_id, "LeaseLost") except Exception as err: # pylint: disable=broad-except _logger.error("Failed to update lease %r", err) await asyncio.sleep(lease_manager.lease_renew_interval)
python
{ "resource": "" }
q18786
PartitionManager.check_and_add_pump_async
train
async def check_and_add_pump_async(self, partition_id, lease): """ Updates the lease on an exisiting pump. :param partition_id: The partition ID. :type partition_id: str :param lease: The lease to be used. :type lease: ~azure.eventprocessorhost.lease.Lease """ if partition_id in self.partition_pumps: # There already is a pump. Make sure the pump is working and replace the lease. captured_pump = self.partition_pumps[partition_id] if captured_pump.pump_status == "Errored" or captured_pump.is_closing(): # The existing pump is bad. Remove it. await self.remove_pump_async(partition_id, "Shutdown") else: # Pump is working, should just replace the lease. # This is causing a race condition since if the checkpoint is being updated # when the lease changes then the pump will error and shut down captured_pump.set_lease(lease) else: _logger.info("Starting pump %r %r", self.host.guid, partition_id) await self.create_new_pump_async(partition_id, lease)
python
{ "resource": "" }
q18787
PartitionManager.create_new_pump_async
train
async def create_new_pump_async(self, partition_id, lease): """ Create a new pump thread with a given lease. :param partition_id: The partition ID. :type partition_id: str :param lease: The lease to be used. :type lease: ~azure.eventprocessorhost.lease.Lease """ loop = asyncio.get_event_loop() partition_pump = EventHubPartitionPump(self.host, lease) # Do the put after start, if the start fails then put doesn't happen loop.create_task(partition_pump.open_async()) self.partition_pumps[partition_id] = partition_pump _logger.info("Created new partition pump %r %r", self.host.guid, partition_id)
python
{ "resource": "" }
q18788
PartitionManager.remove_pump_async
train
async def remove_pump_async(self, partition_id, reason): """ Stops a single partiton pump. :param partition_id: The partition ID. :type partition_id: str :param reason: A reason for closing. :type reason: str """ if partition_id in self.partition_pumps: captured_pump = self.partition_pumps[partition_id] if not captured_pump.is_closing(): await captured_pump.close_async(reason) # else, pump is already closing/closed, don't need to try to shut it down again del self.partition_pumps[partition_id] # remove pump _logger.debug("Removed pump %r %r", self.host.guid, partition_id) _logger.debug("%r pumps still running", len(self.partition_pumps)) else: # PartitionManager main loop tries to remove pump for every partition that the # host does not own, just to be sure. Not finding a pump for a partition is normal # and expected most of the time. _logger.debug("No pump found to remove for this partition %r %r", self.host.guid, partition_id)
python
{ "resource": "" }
q18789
PartitionManager.which_lease_to_steal
train
def which_lease_to_steal(self, stealable_leases, have_lease_count): """ Determines and return which lease to steal If the number of leases is a multiple of the number of hosts, then the desired configuration is that all hosts own the name number of leases, and the difference between the "biggest" owner and any other is 0. If the number of leases is not a multiple of the number of hosts, then the most even configurationpossible is for some hosts to have (self, leases/hosts) leases and others to have (self, (self, leases/hosts) + 1). For example, for 16 partitions distributed over five hosts, the distribution would be 4, 3, 3, 3, 3, or any of the possible reorderings. In either case, if the difference between this host and the biggest owner is 2 or more, then thesystem is not in the most evenly-distributed configuration, so steal one lease from the biggest. If there is a tie for biggest, we pick whichever appears first in the list because it doesn't really matter which "biggest" is trimmed down. Stealing one at a time prevents flapping because it reduces the difference between the biggest and this host by two at a time. If the starting difference is two or greater, then the difference cannot end up below 0. This host may become tied for biggest, but it cannot become larger than the host that it is stealing from. :param stealable_leases: List of leases to determine which can be stolen. :type stealable_leases: list[~azure.eventprocessorhost.lease.Lease] :param have_lease_count: Lease count. :type have_lease_count: int :rtype: ~azure.eventprocessorhost.lease.Lease """ counts_by_owner = self.count_leases_by_owner(stealable_leases) biggest_owner = (sorted(counts_by_owner.items(), key=lambda kv: kv[1])).pop() steal_this_lease = None if (biggest_owner[1] - have_lease_count) >= 2: steal_this_lease = [l for l in stealable_leases if l.owner == biggest_owner[0]][0] return steal_this_lease
python
{ "resource": "" }
q18790
PartitionManager.count_leases_by_owner
train
def count_leases_by_owner(self, leases): # pylint: disable=no-self-use """ Returns a dictionary of leases by current owner. """ owners = [l.owner for l in leases] return dict(Counter(owners))
python
{ "resource": "" }
q18791
PartitionManager.attempt_renew_lease_async
train
async def attempt_renew_lease_async(self, lease_task, owned_by_others_q, lease_manager): """ Attempts to renew a potential lease if possible and marks in the queue as none adds to adds to the queue. """ try: possible_lease = await lease_task if await possible_lease.is_expired(): _logger.info("Trying to aquire lease %r %r", self.host.guid, possible_lease.partition_id) if await lease_manager.acquire_lease_async(possible_lease): owned_by_others_q.put((False, possible_lease)) else: owned_by_others_q.put((True, possible_lease)) elif possible_lease.owner == self.host.host_name: try: _logger.debug("Trying to renew lease %r %r", self.host.guid, possible_lease.partition_id) if await lease_manager.renew_lease_async(possible_lease): owned_by_others_q.put((False, possible_lease)) else: owned_by_others_q.put((True, possible_lease)) except Exception as err: # pylint: disable=broad-except # Update to 'Lease Lost' exception. _logger.error("Lease lost exception %r %r %r", err, self.host.guid, possible_lease.partition_id) owned_by_others_q.put((True, possible_lease)) else: owned_by_others_q.put((True, possible_lease)) except Exception as err: # pylint: disable=broad-except _logger.error( "Failure during getting/acquiring/renewing lease, skipping %r", err)
python
{ "resource": "" }
q18792
PymataSocket.start
train
async def start(self): """ This method opens an IP connection on the IP device :return: None """ try: self.reader, self.writer = await asyncio.open_connection( self.ip_address, self.port, loop=self.loop) except OSError: print("Can't open connection to " + self.ip_address) sys.exit(0)
python
{ "resource": "" }
q18793
PyMata3.digital_read
train
def digital_read(self, pin): """ Retrieve the last data update for the specified digital pin. It is intended for a polling application. :param pin: Digital pin number :returns: Last value reported for the digital pin """ task = asyncio.ensure_future(self.core.digital_read(pin)) value = self.loop.run_until_complete(task) return value
python
{ "resource": "" }
q18794
PyMata3.encoder_read
train
def encoder_read(self, pin): """ This method retrieves the latest encoder data value. It is a FirmataPlus feature. :param pin: Encoder Pin :returns: encoder data value """ try: task = asyncio.ensure_future(self.core.encoder_read(pin)) value = self.loop.run_until_complete(task) return value except RuntimeError: self.shutdown()
python
{ "resource": "" }
q18795
PyMata3.enable_digital_reporting
train
def enable_digital_reporting(self, pin): """ Enables digital reporting. By turning reporting on for all 8 bits in the "port". This is part of Firmata's protocol specification. :param pin: Pin and all pins for this port :returns: No return value """ task = asyncio.ensure_future(self.core.enable_digital_reporting(pin)) self.loop.run_until_complete(task)
python
{ "resource": "" }
q18796
PyMata3.extended_analog
train
def extended_analog(self, pin, data): """ This method will send an extended-data analog write command to the selected pin.. :param pin: 0 - 127 :param data: 0 - 0-0x4000 (14 bits) :returns: No return value """ task = asyncio.ensure_future(self.core.extended_analog(pin, data)) self.loop.run_until_complete(task)
python
{ "resource": "" }
q18797
PyMata3.get_analog_map
train
def get_analog_map(self, cb=None): """ This method requests and returns an analog map. :param cb: Optional callback reference :returns: An analog map response or None if a timeout occurs """ task = asyncio.ensure_future(self.core.get_analog_map()) report = self.loop.run_until_complete(task) if cb: cb(report) else: return report
python
{ "resource": "" }
q18798
PyMata3.get_capability_report
train
def get_capability_report(self, raw=True, cb=None): """ This method retrieves the Firmata capability report :param raw: If True, it either stores or provides the callback with a report as list. If False, prints a formatted report to the console :param cb: Optional callback reference to receive a raw report :returns: capability report """ task = asyncio.ensure_future(self.core.get_capability_report()) report = self.loop.run_until_complete(task) if raw: if cb: cb(report) else: return report else: # noinspection PyProtectedMember self.core._format_capability_report(report)
python
{ "resource": "" }
q18799
PyMata3.get_pymata_version
train
def get_pymata_version(self): """ This method retrieves the PyMata version number :returns: PyMata version number. """ task = asyncio.ensure_future(self.core.get_pymata_version()) self.loop.run_until_complete(task)
python
{ "resource": "" }