blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c56630841c8602bde83bb5f0a0ab2c6ffcd7ceb2 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa1/sample/def_func_nonlocal-67.py | 097609d7e0d61bbab6660375189cf84a37495645 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py |
def foo(x:int) -> bool:
a:int = 0
b:int = 1
def bar(y: int) -> int:
nonlocal a
a = 2
return y
return bar($Parameters) > a
foo(1)
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
75f658448722da512c74e14e4e1266419cd6b885 | b2b9be47ce8c2cd0985eb39fa4489e149c78a085 | /nintendo/common/http.py | 3562997ac828fce35d724486d11d1eb836251c34 | [
"MIT"
] | permissive | SmmServer/NintendoClients | 9e15801f73d35877b5e3a47830c2a5f2778547ad | e1d2fec34e460cd87330a8cc886e54479d701469 | refs/heads/master | 2022-09-10T13:06:35.800799 | 2019-08-29T17:23:46 | 2019-08-29T17:23:46 | 218,143,700 | 0 | 3 | null | 2019-10-28T20:56:14 | 2019-10-28T20:56:13 | null | UTF-8 | Python | false | false | 5,291 | py |
from . import socket, signal, util, scheduler
import logging
logger = logging.getLogger(__name__)
class HTTPFormData:
def __init__(self):
self.fields = {}
def parse(self, data):
fields = data.split("&")
for field in fields:
if not "=" in field:
logger.warning("Malformed form parameter")
return False
key, value = field.split("=", 1)
self.fields[key] = value
return True
def __contains__(self, item):
return item in self.fields
def __getitem__(self, item):
return self.fields[item]
class HTTPRequest:
def __init__(self, client):
self.client = client
self.method = None
self.path = None
self.version = None
self.headers = util.CaseInsensitiveDict()
self.body = ""
self.params = HTTPFormData()
self.form = HTTPFormData()
def process(self):
if "?" in self.path:
self.path, params = self.path.split("?", 1)
if not self.params.parse(params):
return False
if self.headers.get("Content-Type") == "application/x-www-form-urlencoded":
if not self.form.parse(self.body):
return False
return True
RESPONSE_TEMPLATE = "%s %i %s\r\n%s\r\n"
class HTTPResponse:
status_names = {
200: "OK",
400: "Bad Request",
401: "Unauthorized",
403: "Forbidden",
404: "Not Found",
405: "Method Not Allowed"
}
def __init__(self, status):
self.version = "HTTP/1.1"
self.status = status
self.headers = util.CaseInsensitiveDict()
self.body = ""
def encode(self):
self.headers["Content-Length"] = len(self.body)
headers = ""
for key, value in self.headers.items():
headers += "%s: %s\r\n" %(key, value)
header = RESPONSE_TEMPLATE %(
self.version, self.status,
self.status_names[self.status],
headers
)
return (header + self.body).encode("ascii")
class HTTPState:
RESULT_OK = 0
RESULT_INCOMPLETE = 1
RESULT_ERROR = 2
def __init__(self, socket):
self.socket = socket
self.buffer = b""
self.state = self.state_header
self.event = scheduler.add_socket(self.handle_recv, socket)
self.request = HTTPRequest(socket)
self.message_event = signal.Signal()
def handle_recv(self, data):
if not data:
scheduler.remove(self.event)
return
self.buffer += data
result = self.state()
while self.buffer and result == self.RESULT_OK:
result = self.state()
if result == self.RESULT_ERROR:
logger.warning("Failed to parse HTTP request")
response = HTTPResponse(400)
self.socket.send(response.encode())
scheduler.remove(self.event)
self.socket.close()
def finish(self):
if not self.request.process():
return self.RESULT_ERROR
self.message_event(self.request)
self.request = HTTPRequest(self.socket)
self.state = self.state_header
return self.RESULT_OK
def handle_header(self, data):
try:
lines = data.decode("ascii").splitlines()
except UnicodeDecodeError:
logger.warning("Failed to decode HTTP request")
return self.RESULT_ERROR
fields = lines[0].split()
if len(fields) != 3:
logger.warning("Invalid HTTP request")
return self.RESULT_ERROR
self.request.method = fields[0]
self.request.path = fields[1]
self.request.version = fields[2]
for header in lines[1:]:
if not ": " in header:
logger.warning("Invalid HTTP request header")
return self.RESULT_ERROR
key, value = header.split(": ", 1)
self.request.headers[key.lower()] = value
if "Content-Length" in self.request.headers:
if not util.is_numeric(self.request.headers["Content-Length"]):
logger.warning("Invalid Content-Length header")
return self.RESULT_ERROR
self.state = self.state_body
else:
return self.finish()
return self.RESULT_OK
def state_header(self):
if b"\r\n\r\n" in self.buffer:
header, self.buffer = self.buffer.split(b"\r\n\r\n", 1)
return self.handle_header(header)
return self.RESULT_INCOMPLETE
def state_body(self):
length = int(self.request.headers["Content-Length"])
if len(self.buffer) < length:
return self.RESULT_INCOMPLETE
try:
self.request.body = self.buffer[:length].decode("ascii")
except UnicodeDecodeError:
logger.warning("Failed to decode HTTP request body")
return self.RESULT_ERROR
self.buffer = self.buffer[length:]
return self.finish()
class HTTPServer:
def __init__(self, ssl, server=None):
self.ssl = ssl
self.server = server
if not self.server:
if ssl:
self.server = socket.SocketServer(socket.TYPE_SSL)
else:
self.server = socket.SocketServer(socket.TYPE_TCP)
def set_certificate(self, cert, key):
self.server.set_certificate(cert, key)
def start(self, host, port):
logger.info("Starting HTTP server at %s:%i", host, port)
self.server.start(host, port)
scheduler.add_server(self.handle_conn, self.server)
def handle_conn(self, socket):
address = socket.remote_address()
logger.debug("New HTTP connection: %s:%i", address[0], address[1])
state = HTTPState(socket)
state.message_event.add(self.handle_req)
def handle_req(self, request):
logger.debug("Received HTTP request: %s %s", request.method, request.path)
response = self.handle(request)
logger.debug("Sending HTTP response (%i)", response.status)
request.client.send(response.encode())
def handle(self, request):
pass
| [
"ymarchand@me.com"
] | ymarchand@me.com |
6da151bf49ca7542bce2b587f7aa9ae674492dd1 | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/databox/azure-mgmt-databox/azure/mgmt/databox/v2020_11_01/models/_models.py | 4da1571b91cc41132abda557cbb0c4d423f413d3 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 160,973 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class AccountCredentialDetails(msrest.serialization.Model):
"""Credential details of the account.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar account_name: Name of the account.
:vartype account_name: str
:ivar data_account_type: Type of the account. Possible values include: "StorageAccount",
"ManagedDisk".
:vartype data_account_type: str or ~azure.mgmt.databox.models.DataAccountType
:ivar account_connection_string: Connection string of the account endpoint to use the account
as a storage endpoint on the device.
:vartype account_connection_string: str
:ivar share_credential_details: Per share level unencrypted access credentials.
:vartype share_credential_details: list[~azure.mgmt.databox.models.ShareCredentialDetails]
"""
_validation = {
'account_name': {'readonly': True},
'data_account_type': {'readonly': True},
'account_connection_string': {'readonly': True},
'share_credential_details': {'readonly': True},
}
_attribute_map = {
'account_name': {'key': 'accountName', 'type': 'str'},
'data_account_type': {'key': 'dataAccountType', 'type': 'str'},
'account_connection_string': {'key': 'accountConnectionString', 'type': 'str'},
'share_credential_details': {'key': 'shareCredentialDetails', 'type': '[ShareCredentialDetails]'},
}
def __init__(
self,
**kwargs
):
super(AccountCredentialDetails, self).__init__(**kwargs)
self.account_name = None
self.data_account_type = None
self.account_connection_string = None
self.share_credential_details = None
class AdditionalErrorInfo(msrest.serialization.Model):
"""Additional error info.
:param type: Additional error type.
:type type: str
:param info: Additional error info.
:type info: object
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'info': {'key': 'info', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(AdditionalErrorInfo, self).__init__(**kwargs)
self.type = kwargs.get('type', None)
self.info = kwargs.get('info', None)
class AddressValidationOutput(msrest.serialization.Model):
"""Output of the address validation api.
Variables are only populated by the server, and will be ignored when sending a request.
:param validation_type: Identifies the type of validation response.Constant filled by server.
Possible values include: "ValidateAddress", "ValidateSubscriptionIsAllowedToCreateJob",
"ValidatePreferences", "ValidateCreateOrderLimit", "ValidateSkuAvailability",
"ValidateDataTransferDetails".
:type validation_type: str or ~azure.mgmt.databox.models.ValidationInputDiscriminator
:ivar error: Error code and message of validation response.
:vartype error: ~azure.mgmt.databox.models.CloudError
:ivar validation_status: The address validation status. Possible values include: "Valid",
"Invalid", "Ambiguous".
:vartype validation_status: str or ~azure.mgmt.databox.models.AddressValidationStatus
:ivar alternate_addresses: List of alternate addresses.
:vartype alternate_addresses: list[~azure.mgmt.databox.models.ShippingAddress]
"""
_validation = {
'error': {'readonly': True},
'validation_status': {'readonly': True},
'alternate_addresses': {'readonly': True},
}
_attribute_map = {
'validation_type': {'key': 'properties.validationType', 'type': 'str'},
'error': {'key': 'properties.error', 'type': 'CloudError'},
'validation_status': {'key': 'properties.validationStatus', 'type': 'str'},
'alternate_addresses': {'key': 'properties.alternateAddresses', 'type': '[ShippingAddress]'},
}
def __init__(
self,
**kwargs
):
super(AddressValidationOutput, self).__init__(**kwargs)
self.validation_type = None # type: Optional[str]
self.error = None
self.validation_status = None
self.alternate_addresses = None
class ValidationInputResponse(msrest.serialization.Model):
"""Minimum properties that should be present in each individual validation response.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AddressValidationProperties, CreateOrderLimitForSubscriptionValidationResponseProperties, DataTransferDetailsValidationResponseProperties, PreferencesValidationResponseProperties, SkuAvailabilityValidationResponseProperties, SubscriptionIsAllowedToCreateJobValidationResponseProperties.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param validation_type: Required. Identifies the type of validation response.Constant filled by
server. Possible values include: "ValidateAddress",
"ValidateSubscriptionIsAllowedToCreateJob", "ValidatePreferences", "ValidateCreateOrderLimit",
"ValidateSkuAvailability", "ValidateDataTransferDetails".
:type validation_type: str or ~azure.mgmt.databox.models.ValidationInputDiscriminator
:ivar error: Error code and message of validation response.
:vartype error: ~azure.mgmt.databox.models.CloudError
"""
_validation = {
'validation_type': {'required': True},
'error': {'readonly': True},
}
_attribute_map = {
'validation_type': {'key': 'validationType', 'type': 'str'},
'error': {'key': 'error', 'type': 'CloudError'},
}
_subtype_map = {
'validation_type': {'ValidateAddress': 'AddressValidationProperties', 'ValidateCreateOrderLimit': 'CreateOrderLimitForSubscriptionValidationResponseProperties', 'ValidateDataTransferDetails': 'DataTransferDetailsValidationResponseProperties', 'ValidatePreferences': 'PreferencesValidationResponseProperties', 'ValidateSkuAvailability': 'SkuAvailabilityValidationResponseProperties', 'ValidateSubscriptionIsAllowedToCreateJob': 'SubscriptionIsAllowedToCreateJobValidationResponseProperties'}
}
def __init__(
self,
**kwargs
):
super(ValidationInputResponse, self).__init__(**kwargs)
self.validation_type = None # type: Optional[str]
self.error = None
class AddressValidationProperties(ValidationInputResponse):
"""The address validation output.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param validation_type: Required. Identifies the type of validation response.Constant filled by
server. Possible values include: "ValidateAddress",
"ValidateSubscriptionIsAllowedToCreateJob", "ValidatePreferences", "ValidateCreateOrderLimit",
"ValidateSkuAvailability", "ValidateDataTransferDetails".
:type validation_type: str or ~azure.mgmt.databox.models.ValidationInputDiscriminator
:ivar error: Error code and message of validation response.
:vartype error: ~azure.mgmt.databox.models.CloudError
:ivar validation_status: The address validation status. Possible values include: "Valid",
"Invalid", "Ambiguous".
:vartype validation_status: str or ~azure.mgmt.databox.models.AddressValidationStatus
:ivar alternate_addresses: List of alternate addresses.
:vartype alternate_addresses: list[~azure.mgmt.databox.models.ShippingAddress]
"""
_validation = {
'validation_type': {'required': True},
'error': {'readonly': True},
'validation_status': {'readonly': True},
'alternate_addresses': {'readonly': True},
}
_attribute_map = {
'validation_type': {'key': 'validationType', 'type': 'str'},
'error': {'key': 'error', 'type': 'CloudError'},
'validation_status': {'key': 'validationStatus', 'type': 'str'},
'alternate_addresses': {'key': 'alternateAddresses', 'type': '[ShippingAddress]'},
}
def __init__(
self,
**kwargs
):
super(AddressValidationProperties, self).__init__(**kwargs)
self.validation_type = 'ValidateAddress' # type: str
self.validation_status = None
self.alternate_addresses = None
class ApiError(msrest.serialization.Model):
"""ApiError.
All required parameters must be populated in order to send to Azure.
:param error: Required.
:type error: ~azure.mgmt.databox.models.ErrorDetail
"""
_validation = {
'error': {'required': True},
}
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
**kwargs
):
super(ApiError, self).__init__(**kwargs)
self.error = kwargs['error']
class ApplianceNetworkConfiguration(msrest.serialization.Model):
"""The Network Adapter configuration of a DataBox.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Name of the network.
:vartype name: str
:ivar mac_address: Mac Address.
:vartype mac_address: str
"""
_validation = {
'name': {'readonly': True},
'mac_address': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'mac_address': {'key': 'macAddress', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApplianceNetworkConfiguration, self).__init__(**kwargs)
self.name = None
self.mac_address = None
class ArmBaseObject(msrest.serialization.Model):
"""Base class for all objects under resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Name of the object.
:vartype name: str
:ivar id: Id of the object.
:vartype id: str
:ivar type: Type of the object.
:vartype type: str
"""
_validation = {
'name': {'readonly': True},
'id': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ArmBaseObject, self).__init__(**kwargs)
self.name = None
self.id = None
self.type = None
class AvailableSkuRequest(msrest.serialization.Model):
"""The filters for showing the available skus.
All required parameters must be populated in order to send to Azure.
:param transfer_type: Required. Type of the transfer. Possible values include: "ImportToAzure",
"ExportFromAzure".
:type transfer_type: str or ~azure.mgmt.databox.models.TransferType
:param country: Required. ISO country code. Country for hardware shipment. For codes check:
https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2#Officially_assigned_code_elements.
:type country: str
:param location: Required. Location for data transfer. For locations check:
https://management.azure.com/subscriptions/SUBSCRIPTIONID/locations?api-version=2018-01-01.
:type location: str
:param sku_names: Sku Names to filter for available skus.
:type sku_names: list[str or ~azure.mgmt.databox.models.SkuName]
"""
_validation = {
'transfer_type': {'required': True},
'country': {'required': True},
'location': {'required': True},
}
_attribute_map = {
'transfer_type': {'key': 'transferType', 'type': 'str'},
'country': {'key': 'country', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'sku_names': {'key': 'skuNames', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(AvailableSkuRequest, self).__init__(**kwargs)
self.transfer_type = kwargs['transfer_type']
self.country = kwargs['country']
self.location = kwargs['location']
self.sku_names = kwargs.get('sku_names', None)
class AvailableSkusResult(msrest.serialization.Model):
"""The available skus operation response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of available skus.
:vartype value: list[~azure.mgmt.databox.models.SkuInformation]
:param next_link: Link for the next set of skus.
:type next_link: str
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SkuInformation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AvailableSkusResult, self).__init__(**kwargs)
self.value = None
self.next_link = kwargs.get('next_link', None)
class AzureFileFilterDetails(msrest.serialization.Model):
"""Filter details to transfer Azure files.
:param file_prefix_list: Prefix list of the Azure files to be transferred.
:type file_prefix_list: list[str]
:param file_path_list: List of full path of the files to be transferred.
:type file_path_list: list[str]
:param file_share_list: List of file shares to be transferred.
:type file_share_list: list[str]
"""
_attribute_map = {
'file_prefix_list': {'key': 'filePrefixList', 'type': '[str]'},
'file_path_list': {'key': 'filePathList', 'type': '[str]'},
'file_share_list': {'key': 'fileShareList', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(AzureFileFilterDetails, self).__init__(**kwargs)
self.file_prefix_list = kwargs.get('file_prefix_list', None)
self.file_path_list = kwargs.get('file_path_list', None)
self.file_share_list = kwargs.get('file_share_list', None)
class BlobFilterDetails(msrest.serialization.Model):
"""Filter details to transfer Azure Blobs.
:param blob_prefix_list: Prefix list of the Azure blobs to be transferred.
:type blob_prefix_list: list[str]
:param blob_path_list: List of full path of the blobs to be transferred.
:type blob_path_list: list[str]
:param container_list: List of blob containers to be transferred.
:type container_list: list[str]
"""
_attribute_map = {
'blob_prefix_list': {'key': 'blobPrefixList', 'type': '[str]'},
'blob_path_list': {'key': 'blobPathList', 'type': '[str]'},
'container_list': {'key': 'containerList', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(BlobFilterDetails, self).__init__(**kwargs)
self.blob_prefix_list = kwargs.get('blob_prefix_list', None)
self.blob_path_list = kwargs.get('blob_path_list', None)
self.container_list = kwargs.get('container_list', None)
class CancellationReason(msrest.serialization.Model):
"""Reason for cancellation.
All required parameters must be populated in order to send to Azure.
:param reason: Required. Reason for cancellation.
:type reason: str
"""
_validation = {
'reason': {'required': True},
}
_attribute_map = {
'reason': {'key': 'reason', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CancellationReason, self).__init__(**kwargs)
self.reason = kwargs['reason']
class CloudError(msrest.serialization.Model):
"""Cloud error.
Variables are only populated by the server, and will be ignored when sending a request.
:param code: Cloud error code.
:type code: str
:param message: Cloud error message.
:type message: str
:param target: Cloud error target.
:type target: str
:ivar details: Cloud error details.
:vartype details: list[~azure.mgmt.databox.models.CloudError]
:ivar additional_info: Cloud error additional info.
:vartype additional_info: list[~azure.mgmt.databox.models.AdditionalErrorInfo]
"""
_validation = {
'details': {'readonly': True},
'additional_info': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[CloudError]'},
'additional_info': {'key': 'additionalInfo', 'type': '[AdditionalErrorInfo]'},
}
def __init__(
self,
**kwargs
):
super(CloudError, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
self.target = kwargs.get('target', None)
self.details = None
self.additional_info = None
class ContactDetails(msrest.serialization.Model):
"""Contact Details.
All required parameters must be populated in order to send to Azure.
:param contact_name: Required. Contact name of the person.
:type contact_name: str
:param phone: Required. Phone number of the contact person.
:type phone: str
:param phone_extension: Phone extension number of the contact person.
:type phone_extension: str
:param mobile: Mobile number of the contact person.
:type mobile: str
:param email_list: Required. List of Email-ids to be notified about job progress.
:type email_list: list[str]
:param notification_preference: Notification preference for a job stage.
:type notification_preference: list[~azure.mgmt.databox.models.NotificationPreference]
"""
_validation = {
'contact_name': {'required': True},
'phone': {'required': True},
'email_list': {'required': True},
}
_attribute_map = {
'contact_name': {'key': 'contactName', 'type': 'str'},
'phone': {'key': 'phone', 'type': 'str'},
'phone_extension': {'key': 'phoneExtension', 'type': 'str'},
'mobile': {'key': 'mobile', 'type': 'str'},
'email_list': {'key': 'emailList', 'type': '[str]'},
'notification_preference': {'key': 'notificationPreference', 'type': '[NotificationPreference]'},
}
def __init__(
self,
**kwargs
):
super(ContactDetails, self).__init__(**kwargs)
self.contact_name = kwargs['contact_name']
self.phone = kwargs['phone']
self.phone_extension = kwargs.get('phone_extension', None)
self.mobile = kwargs.get('mobile', None)
self.email_list = kwargs['email_list']
self.notification_preference = kwargs.get('notification_preference', None)
class CopyLogDetails(msrest.serialization.Model):
"""Details for log generated during copy.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: DataBoxAccountCopyLogDetails, DataBoxDiskCopyLogDetails, DataBoxHeavyAccountCopyLogDetails.
All required parameters must be populated in order to send to Azure.
:param copy_log_details_type: Required. Indicates the type of job details.Constant filled by
server. Possible values include: "DataBox", "DataBoxDisk", "DataBoxHeavy".
:type copy_log_details_type: str or ~azure.mgmt.databox.models.ClassDiscriminator
"""
_validation = {
'copy_log_details_type': {'required': True},
}
_attribute_map = {
'copy_log_details_type': {'key': 'copyLogDetailsType', 'type': 'str'},
}
_subtype_map = {
'copy_log_details_type': {'DataBox': 'DataBoxAccountCopyLogDetails', 'DataBoxDisk': 'DataBoxDiskCopyLogDetails', 'DataBoxHeavy': 'DataBoxHeavyAccountCopyLogDetails'}
}
def __init__(
self,
**kwargs
):
super(CopyLogDetails, self).__init__(**kwargs)
self.copy_log_details_type = None # type: Optional[str]
class CopyProgress(msrest.serialization.Model):
"""Copy progress.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar storage_account_name: Name of the storage account. This will be empty for data account
types other than storage account.
:vartype storage_account_name: str
:ivar transfer_type: Transfer type of data. Possible values include: "ImportToAzure",
"ExportFromAzure".
:vartype transfer_type: str or ~azure.mgmt.databox.models.TransferType
:ivar data_account_type: Data Account Type. Possible values include: "StorageAccount",
"ManagedDisk".
:vartype data_account_type: str or ~azure.mgmt.databox.models.DataAccountType
:ivar account_id: Id of the account where the data needs to be uploaded.
:vartype account_id: str
:ivar bytes_processed: To indicate bytes transferred.
:vartype bytes_processed: long
:ivar total_bytes_to_process: Total amount of data to be processed by the job.
:vartype total_bytes_to_process: long
:ivar files_processed: Number of files processed.
:vartype files_processed: long
:ivar total_files_to_process: Total files to process.
:vartype total_files_to_process: long
:ivar invalid_files_processed: Number of files not adhering to azure naming conventions which
were processed by automatic renaming.
:vartype invalid_files_processed: long
:ivar invalid_file_bytes_uploaded: Total amount of data not adhering to azure naming
conventions which were processed by automatic renaming.
:vartype invalid_file_bytes_uploaded: long
:ivar renamed_container_count: Number of folders not adhering to azure naming conventions which
were processed by automatic renaming.
:vartype renamed_container_count: long
:ivar files_errored_out: Number of files which could not be copied.
:vartype files_errored_out: long
:ivar directories_errored_out: To indicate directories errored out in the job.
:vartype directories_errored_out: long
:ivar invalid_directories_processed: To indicate directories renamed.
:vartype invalid_directories_processed: long
:ivar is_enumeration_in_progress: To indicate if enumeration of data is in progress.
Until this is true, the TotalBytesToProcess may not be valid.
:vartype is_enumeration_in_progress: bool
"""
_validation = {
'storage_account_name': {'readonly': True},
'transfer_type': {'readonly': True},
'data_account_type': {'readonly': True},
'account_id': {'readonly': True},
'bytes_processed': {'readonly': True},
'total_bytes_to_process': {'readonly': True},
'files_processed': {'readonly': True},
'total_files_to_process': {'readonly': True},
'invalid_files_processed': {'readonly': True},
'invalid_file_bytes_uploaded': {'readonly': True},
'renamed_container_count': {'readonly': True},
'files_errored_out': {'readonly': True},
'directories_errored_out': {'readonly': True},
'invalid_directories_processed': {'readonly': True},
'is_enumeration_in_progress': {'readonly': True},
}
_attribute_map = {
'storage_account_name': {'key': 'storageAccountName', 'type': 'str'},
'transfer_type': {'key': 'transferType', 'type': 'str'},
'data_account_type': {'key': 'dataAccountType', 'type': 'str'},
'account_id': {'key': 'accountId', 'type': 'str'},
'bytes_processed': {'key': 'bytesProcessed', 'type': 'long'},
'total_bytes_to_process': {'key': 'totalBytesToProcess', 'type': 'long'},
'files_processed': {'key': 'filesProcessed', 'type': 'long'},
'total_files_to_process': {'key': 'totalFilesToProcess', 'type': 'long'},
'invalid_files_processed': {'key': 'invalidFilesProcessed', 'type': 'long'},
'invalid_file_bytes_uploaded': {'key': 'invalidFileBytesUploaded', 'type': 'long'},
'renamed_container_count': {'key': 'renamedContainerCount', 'type': 'long'},
'files_errored_out': {'key': 'filesErroredOut', 'type': 'long'},
'directories_errored_out': {'key': 'directoriesErroredOut', 'type': 'long'},
'invalid_directories_processed': {'key': 'invalidDirectoriesProcessed', 'type': 'long'},
'is_enumeration_in_progress': {'key': 'isEnumerationInProgress', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(CopyProgress, self).__init__(**kwargs)
self.storage_account_name = None
self.transfer_type = None
self.data_account_type = None
self.account_id = None
self.bytes_processed = None
self.total_bytes_to_process = None
self.files_processed = None
self.total_files_to_process = None
self.invalid_files_processed = None
self.invalid_file_bytes_uploaded = None
self.renamed_container_count = None
self.files_errored_out = None
self.directories_errored_out = None
self.invalid_directories_processed = None
self.is_enumeration_in_progress = None
class ValidationRequest(msrest.serialization.Model):
"""Minimum request requirement of any validation category.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: CreateJobValidations.
All required parameters must be populated in order to send to Azure.
:param validation_category: Required. Identify the nature of validation.Constant filled by
server.
:type validation_category: str
:param individual_request_details: Required. List of request details contain validationType and
its request as key and value respectively.
:type individual_request_details: list[~azure.mgmt.databox.models.ValidationInputRequest]
"""
_validation = {
'validation_category': {'required': True},
'individual_request_details': {'required': True},
}
_attribute_map = {
'validation_category': {'key': 'validationCategory', 'type': 'str'},
'individual_request_details': {'key': 'individualRequestDetails', 'type': '[ValidationInputRequest]'},
}
_subtype_map = {
'validation_category': {'JobCreationValidation': 'CreateJobValidations'}
}
def __init__(
self,
**kwargs
):
super(ValidationRequest, self).__init__(**kwargs)
self.validation_category = None # type: Optional[str]
self.individual_request_details = kwargs['individual_request_details']
class CreateJobValidations(ValidationRequest):
"""It does all pre-job creation validations.
All required parameters must be populated in order to send to Azure.
:param validation_category: Required. Identify the nature of validation.Constant filled by
server.
:type validation_category: str
:param individual_request_details: Required. List of request details contain validationType and
its request as key and value respectively.
:type individual_request_details: list[~azure.mgmt.databox.models.ValidationInputRequest]
"""
_validation = {
'validation_category': {'required': True},
'individual_request_details': {'required': True},
}
_attribute_map = {
'validation_category': {'key': 'validationCategory', 'type': 'str'},
'individual_request_details': {'key': 'individualRequestDetails', 'type': '[ValidationInputRequest]'},
}
def __init__(
self,
**kwargs
):
super(CreateJobValidations, self).__init__(**kwargs)
self.validation_category = 'JobCreationValidation' # type: str
class ValidationInputRequest(msrest.serialization.Model):
"""Minimum fields that must be present in any type of validation request.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: ValidateAddress, CreateOrderLimitForSubscriptionValidationRequest, DataTransferDetailsValidationRequest, PreferencesValidationRequest, SkuAvailabilityValidationRequest, SubscriptionIsAllowedToCreateJobValidationRequest.
All required parameters must be populated in order to send to Azure.
:param validation_type: Required. Identifies the type of validation request.Constant filled by
server. Possible values include: "ValidateAddress",
"ValidateSubscriptionIsAllowedToCreateJob", "ValidatePreferences", "ValidateCreateOrderLimit",
"ValidateSkuAvailability", "ValidateDataTransferDetails".
:type validation_type: str or ~azure.mgmt.databox.models.ValidationInputDiscriminator
"""
_validation = {
'validation_type': {'required': True},
}
_attribute_map = {
'validation_type': {'key': 'validationType', 'type': 'str'},
}
_subtype_map = {
'validation_type': {'ValidateAddress': 'ValidateAddress', 'ValidateCreateOrderLimit': 'CreateOrderLimitForSubscriptionValidationRequest', 'ValidateDataTransferDetails': 'DataTransferDetailsValidationRequest', 'ValidatePreferences': 'PreferencesValidationRequest', 'ValidateSkuAvailability': 'SkuAvailabilityValidationRequest', 'ValidateSubscriptionIsAllowedToCreateJob': 'SubscriptionIsAllowedToCreateJobValidationRequest'}
}
def __init__(
self,
**kwargs
):
super(ValidationInputRequest, self).__init__(**kwargs)
self.validation_type = None # type: Optional[str]
class CreateOrderLimitForSubscriptionValidationRequest(ValidationInputRequest):
"""Request to validate create order limit for current subscription.
All required parameters must be populated in order to send to Azure.
:param validation_type: Required. Identifies the type of validation request.Constant filled by
server. Possible values include: "ValidateAddress",
"ValidateSubscriptionIsAllowedToCreateJob", "ValidatePreferences", "ValidateCreateOrderLimit",
"ValidateSkuAvailability", "ValidateDataTransferDetails".
:type validation_type: str or ~azure.mgmt.databox.models.ValidationInputDiscriminator
:param device_type: Required. Device type to be used for the job. Possible values include:
"DataBox", "DataBoxDisk", "DataBoxHeavy".
:type device_type: str or ~azure.mgmt.databox.models.SkuName
"""
_validation = {
'validation_type': {'required': True},
'device_type': {'required': True},
}
_attribute_map = {
'validation_type': {'key': 'validationType', 'type': 'str'},
'device_type': {'key': 'deviceType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CreateOrderLimitForSubscriptionValidationRequest, self).__init__(**kwargs)
self.validation_type = 'ValidateCreateOrderLimit' # type: str
self.device_type = kwargs['device_type']
class CreateOrderLimitForSubscriptionValidationResponseProperties(ValidationInputResponse):
"""Properties of create order limit for subscription validation response.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param validation_type: Required. Identifies the type of validation response.Constant filled by
server. Possible values include: "ValidateAddress",
"ValidateSubscriptionIsAllowedToCreateJob", "ValidatePreferences", "ValidateCreateOrderLimit",
"ValidateSkuAvailability", "ValidateDataTransferDetails".
:type validation_type: str or ~azure.mgmt.databox.models.ValidationInputDiscriminator
:ivar error: Error code and message of validation response.
:vartype error: ~azure.mgmt.databox.models.CloudError
:ivar status: Create order limit validation status. Possible values include: "Valid",
"Invalid", "Skipped".
:vartype status: str or ~azure.mgmt.databox.models.ValidationStatus
"""
_validation = {
'validation_type': {'required': True},
'error': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'validation_type': {'key': 'validationType', 'type': 'str'},
'error': {'key': 'error', 'type': 'CloudError'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CreateOrderLimitForSubscriptionValidationResponseProperties, self).__init__(**kwargs)
self.validation_type = 'ValidateCreateOrderLimit' # type: str
self.status = None
class DataAccountDetails(msrest.serialization.Model):
"""Account details of the data to be transferred.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: ManagedDiskDetails, StorageAccountDetails.
All required parameters must be populated in order to send to Azure.
:param data_account_type: Required. Account Type of the data to be transferred.Constant filled
by server. Possible values include: "StorageAccount", "ManagedDisk".
:type data_account_type: str or ~azure.mgmt.databox.models.DataAccountType
:param share_password: Password for all the shares to be created on the device. Should not be
passed for TransferType:ExportFromAzure jobs. If this is not passed, the service will generate
password itself. This will not be returned in Get Call. Password Requirements : Password must
be minimum of 12 and maximum of 64 characters. Password must have at least one uppercase
alphabet, one number and one special character. Password cannot have the following characters :
IilLoO0 Password can have only alphabets, numbers and these characters : @#-$%^!+=;:_()]+.
:type share_password: str
"""
_validation = {
'data_account_type': {'required': True},
}
_attribute_map = {
'data_account_type': {'key': 'dataAccountType', 'type': 'str'},
'share_password': {'key': 'sharePassword', 'type': 'str'},
}
_subtype_map = {
'data_account_type': {'ManagedDisk': 'ManagedDiskDetails', 'StorageAccount': 'StorageAccountDetails'}
}
def __init__(
self,
**kwargs
):
super(DataAccountDetails, self).__init__(**kwargs)
self.data_account_type = None # type: Optional[str]
self.share_password = kwargs.get('share_password', None)
class DataBoxAccountCopyLogDetails(CopyLogDetails):
"""Copy log details for a storage account of a DataBox job.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param copy_log_details_type: Required. Indicates the type of job details.Constant filled by
server. Possible values include: "DataBox", "DataBoxDisk", "DataBoxHeavy".
:type copy_log_details_type: str or ~azure.mgmt.databox.models.ClassDiscriminator
:ivar account_name: Account name.
:vartype account_name: str
:ivar copy_log_link: Link for copy logs.
:vartype copy_log_link: str
:ivar copy_verbose_log_link: Link for copy verbose logs. This will be set only when
LogCollectionLevel is set to Verbose.
:vartype copy_verbose_log_link: str
"""
_validation = {
'copy_log_details_type': {'required': True},
'account_name': {'readonly': True},
'copy_log_link': {'readonly': True},
'copy_verbose_log_link': {'readonly': True},
}
_attribute_map = {
'copy_log_details_type': {'key': 'copyLogDetailsType', 'type': 'str'},
'account_name': {'key': 'accountName', 'type': 'str'},
'copy_log_link': {'key': 'copyLogLink', 'type': 'str'},
'copy_verbose_log_link': {'key': 'copyVerboseLogLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataBoxAccountCopyLogDetails, self).__init__(**kwargs)
self.copy_log_details_type = 'DataBox' # type: str
self.account_name = None
self.copy_log_link = None
self.copy_verbose_log_link = None
class DataBoxDiskCopyLogDetails(CopyLogDetails):
"""Copy Log Details for a disk.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param copy_log_details_type: Required. Indicates the type of job details.Constant filled by
server. Possible values include: "DataBox", "DataBoxDisk", "DataBoxHeavy".
:type copy_log_details_type: str or ~azure.mgmt.databox.models.ClassDiscriminator
:ivar disk_serial_number: Disk Serial Number.
:vartype disk_serial_number: str
:ivar error_log_link: Link for copy error logs.
:vartype error_log_link: str
:ivar verbose_log_link: Link for copy verbose logs.
:vartype verbose_log_link: str
"""
_validation = {
'copy_log_details_type': {'required': True},
'disk_serial_number': {'readonly': True},
'error_log_link': {'readonly': True},
'verbose_log_link': {'readonly': True},
}
_attribute_map = {
'copy_log_details_type': {'key': 'copyLogDetailsType', 'type': 'str'},
'disk_serial_number': {'key': 'diskSerialNumber', 'type': 'str'},
'error_log_link': {'key': 'errorLogLink', 'type': 'str'},
'verbose_log_link': {'key': 'verboseLogLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataBoxDiskCopyLogDetails, self).__init__(**kwargs)
self.copy_log_details_type = 'DataBoxDisk' # type: str
self.disk_serial_number = None
self.error_log_link = None
self.verbose_log_link = None
class DataBoxDiskCopyProgress(msrest.serialization.Model):
"""DataBox Disk Copy Progress.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar serial_number: The serial number of the disk.
:vartype serial_number: str
:ivar bytes_copied: Bytes copied during the copy of disk.
:vartype bytes_copied: long
:ivar percent_complete: Indicates the percentage completed for the copy of the disk.
:vartype percent_complete: int
:ivar status: The Status of the copy. Possible values include: "NotStarted", "InProgress",
"Completed", "CompletedWithErrors", "Failed", "NotReturned", "HardwareError",
"DeviceFormatted", "DeviceMetadataModified", "StorageAccountNotAccessible", "UnsupportedData".
:vartype status: str or ~azure.mgmt.databox.models.CopyStatus
"""
_validation = {
'serial_number': {'readonly': True},
'bytes_copied': {'readonly': True},
'percent_complete': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'serial_number': {'key': 'serialNumber', 'type': 'str'},
'bytes_copied': {'key': 'bytesCopied', 'type': 'long'},
'percent_complete': {'key': 'percentComplete', 'type': 'int'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataBoxDiskCopyProgress, self).__init__(**kwargs)
self.serial_number = None
self.bytes_copied = None
self.percent_complete = None
self.status = None
class JobDetails(msrest.serialization.Model):
"""Job details.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: DataBoxJobDetails, DataBoxDiskJobDetails, DataBoxHeavyJobDetails.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar job_stages: List of stages that run in the job.
:vartype job_stages: list[~azure.mgmt.databox.models.JobStages]
:param contact_details: Required. Contact details for notification and shipping.
:type contact_details: ~azure.mgmt.databox.models.ContactDetails
:param shipping_address: Shipping address of the customer.
:type shipping_address: ~azure.mgmt.databox.models.ShippingAddress
:ivar delivery_package: Delivery package shipping details.
:vartype delivery_package: ~azure.mgmt.databox.models.PackageShippingDetails
:ivar return_package: Return package shipping details.
:vartype return_package: ~azure.mgmt.databox.models.PackageShippingDetails
:param data_import_details: Details of the data to be imported into azure.
:type data_import_details: list[~azure.mgmt.databox.models.DataImportDetails]
:param data_export_details: Details of the data to be exported from azure.
:type data_export_details: list[~azure.mgmt.databox.models.DataExportDetails]
:param job_details_type: Required. Indicates the type of job details.Constant filled by server.
Possible values include: "DataBox", "DataBoxDisk", "DataBoxHeavy".
:type job_details_type: str or ~azure.mgmt.databox.models.ClassDiscriminator
:param preferences: Preferences for the order.
:type preferences: ~azure.mgmt.databox.models.Preferences
:ivar copy_log_details: List of copy log details.
:vartype copy_log_details: list[~azure.mgmt.databox.models.CopyLogDetails]
:ivar reverse_shipment_label_sas_key: Shared access key to download the return shipment label.
:vartype reverse_shipment_label_sas_key: str
:ivar chain_of_custody_sas_key: Shared access key to download the chain of custody logs.
:vartype chain_of_custody_sas_key: str
:ivar key_encryption_key: Details about which key encryption type is being used.
:vartype key_encryption_key: ~azure.mgmt.databox.models.KeyEncryptionKey
:param expected_data_size_in_terabytes: The expected size of the data, which needs to be
transferred in this job, in terabytes.
:type expected_data_size_in_terabytes: int
"""
_validation = {
'job_stages': {'readonly': True},
'contact_details': {'required': True},
'delivery_package': {'readonly': True},
'return_package': {'readonly': True},
'job_details_type': {'required': True},
'copy_log_details': {'readonly': True},
'reverse_shipment_label_sas_key': {'readonly': True},
'chain_of_custody_sas_key': {'readonly': True},
'key_encryption_key': {'readonly': True},
}
_attribute_map = {
'job_stages': {'key': 'jobStages', 'type': '[JobStages]'},
'contact_details': {'key': 'contactDetails', 'type': 'ContactDetails'},
'shipping_address': {'key': 'shippingAddress', 'type': 'ShippingAddress'},
'delivery_package': {'key': 'deliveryPackage', 'type': 'PackageShippingDetails'},
'return_package': {'key': 'returnPackage', 'type': 'PackageShippingDetails'},
'data_import_details': {'key': 'dataImportDetails', 'type': '[DataImportDetails]'},
'data_export_details': {'key': 'dataExportDetails', 'type': '[DataExportDetails]'},
'job_details_type': {'key': 'jobDetailsType', 'type': 'str'},
'preferences': {'key': 'preferences', 'type': 'Preferences'},
'copy_log_details': {'key': 'copyLogDetails', 'type': '[CopyLogDetails]'},
'reverse_shipment_label_sas_key': {'key': 'reverseShipmentLabelSasKey', 'type': 'str'},
'chain_of_custody_sas_key': {'key': 'chainOfCustodySasKey', 'type': 'str'},
'key_encryption_key': {'key': 'keyEncryptionKey', 'type': 'KeyEncryptionKey'},
'expected_data_size_in_terabytes': {'key': 'expectedDataSizeInTerabytes', 'type': 'int'},
}
_subtype_map = {
'job_details_type': {'DataBox': 'DataBoxJobDetails', 'DataBoxDisk': 'DataBoxDiskJobDetails', 'DataBoxHeavy': 'DataBoxHeavyJobDetails'}
}
def __init__(
self,
**kwargs
):
super(JobDetails, self).__init__(**kwargs)
self.job_stages = None
self.contact_details = kwargs['contact_details']
self.shipping_address = kwargs.get('shipping_address', None)
self.delivery_package = None
self.return_package = None
self.data_import_details = kwargs.get('data_import_details', None)
self.data_export_details = kwargs.get('data_export_details', None)
self.job_details_type = None # type: Optional[str]
self.preferences = kwargs.get('preferences', None)
self.copy_log_details = None
self.reverse_shipment_label_sas_key = None
self.chain_of_custody_sas_key = None
self.key_encryption_key = None
self.expected_data_size_in_terabytes = kwargs.get('expected_data_size_in_terabytes', None)
class DataBoxDiskJobDetails(JobDetails):
"""DataBox Disk Job Details.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar job_stages: List of stages that run in the job.
:vartype job_stages: list[~azure.mgmt.databox.models.JobStages]
:param contact_details: Required. Contact details for notification and shipping.
:type contact_details: ~azure.mgmt.databox.models.ContactDetails
:param shipping_address: Shipping address of the customer.
:type shipping_address: ~azure.mgmt.databox.models.ShippingAddress
:ivar delivery_package: Delivery package shipping details.
:vartype delivery_package: ~azure.mgmt.databox.models.PackageShippingDetails
:ivar return_package: Return package shipping details.
:vartype return_package: ~azure.mgmt.databox.models.PackageShippingDetails
:param data_import_details: Details of the data to be imported into azure.
:type data_import_details: list[~azure.mgmt.databox.models.DataImportDetails]
:param data_export_details: Details of the data to be exported from azure.
:type data_export_details: list[~azure.mgmt.databox.models.DataExportDetails]
:param job_details_type: Required. Indicates the type of job details.Constant filled by server.
Possible values include: "DataBox", "DataBoxDisk", "DataBoxHeavy".
:type job_details_type: str or ~azure.mgmt.databox.models.ClassDiscriminator
:param preferences: Preferences for the order.
:type preferences: ~azure.mgmt.databox.models.Preferences
:ivar copy_log_details: List of copy log details.
:vartype copy_log_details: list[~azure.mgmt.databox.models.CopyLogDetails]
:ivar reverse_shipment_label_sas_key: Shared access key to download the return shipment label.
:vartype reverse_shipment_label_sas_key: str
:ivar chain_of_custody_sas_key: Shared access key to download the chain of custody logs.
:vartype chain_of_custody_sas_key: str
:ivar key_encryption_key: Details about which key encryption type is being used.
:vartype key_encryption_key: ~azure.mgmt.databox.models.KeyEncryptionKey
:param expected_data_size_in_terabytes: The expected size of the data, which needs to be
transferred in this job, in terabytes.
:type expected_data_size_in_terabytes: int
:param preferred_disks: User preference on what size disks are needed for the job. The map is
from the disk size in TB to the count. Eg. {2,5} means 5 disks of 2 TB size. Key is string but
will be checked against an int.
:type preferred_disks: dict[str, int]
:ivar copy_progress: Copy progress per disk.
:vartype copy_progress: list[~azure.mgmt.databox.models.DataBoxDiskCopyProgress]
:ivar disks_and_size_details: Contains the map of disk serial number to the disk size being
used for the job. Is returned only after the disks are shipped to the customer.
:vartype disks_and_size_details: dict[str, int]
:param passkey: User entered passkey for DataBox Disk job.
:type passkey: str
"""
_validation = {
'job_stages': {'readonly': True},
'contact_details': {'required': True},
'delivery_package': {'readonly': True},
'return_package': {'readonly': True},
'job_details_type': {'required': True},
'copy_log_details': {'readonly': True},
'reverse_shipment_label_sas_key': {'readonly': True},
'chain_of_custody_sas_key': {'readonly': True},
'key_encryption_key': {'readonly': True},
'copy_progress': {'readonly': True},
'disks_and_size_details': {'readonly': True},
}
_attribute_map = {
'job_stages': {'key': 'jobStages', 'type': '[JobStages]'},
'contact_details': {'key': 'contactDetails', 'type': 'ContactDetails'},
'shipping_address': {'key': 'shippingAddress', 'type': 'ShippingAddress'},
'delivery_package': {'key': 'deliveryPackage', 'type': 'PackageShippingDetails'},
'return_package': {'key': 'returnPackage', 'type': 'PackageShippingDetails'},
'data_import_details': {'key': 'dataImportDetails', 'type': '[DataImportDetails]'},
'data_export_details': {'key': 'dataExportDetails', 'type': '[DataExportDetails]'},
'job_details_type': {'key': 'jobDetailsType', 'type': 'str'},
'preferences': {'key': 'preferences', 'type': 'Preferences'},
'copy_log_details': {'key': 'copyLogDetails', 'type': '[CopyLogDetails]'},
'reverse_shipment_label_sas_key': {'key': 'reverseShipmentLabelSasKey', 'type': 'str'},
'chain_of_custody_sas_key': {'key': 'chainOfCustodySasKey', 'type': 'str'},
'key_encryption_key': {'key': 'keyEncryptionKey', 'type': 'KeyEncryptionKey'},
'expected_data_size_in_terabytes': {'key': 'expectedDataSizeInTerabytes', 'type': 'int'},
'preferred_disks': {'key': 'preferredDisks', 'type': '{int}'},
'copy_progress': {'key': 'copyProgress', 'type': '[DataBoxDiskCopyProgress]'},
'disks_and_size_details': {'key': 'disksAndSizeDetails', 'type': '{int}'},
'passkey': {'key': 'passkey', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataBoxDiskJobDetails, self).__init__(**kwargs)
self.job_details_type = 'DataBoxDisk' # type: str
self.preferred_disks = kwargs.get('preferred_disks', None)
self.copy_progress = None
self.disks_and_size_details = None
self.passkey = kwargs.get('passkey', None)
class JobSecrets(msrest.serialization.Model):
"""The base class for the secrets.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: DataboxJobSecrets, DataBoxDiskJobSecrets, DataBoxHeavyJobSecrets.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param job_secrets_type: Required. Used to indicate what type of job secrets object.Constant
filled by server. Possible values include: "DataBox", "DataBoxDisk", "DataBoxHeavy".
:type job_secrets_type: str or ~azure.mgmt.databox.models.ClassDiscriminator
:ivar dc_access_security_code: Dc Access Security Code for Customer Managed Shipping.
:vartype dc_access_security_code: ~azure.mgmt.databox.models.DcAccessSecurityCode
:ivar error: Error while fetching the secrets.
:vartype error: ~azure.mgmt.databox.models.CloudError
"""
_validation = {
'job_secrets_type': {'required': True},
'dc_access_security_code': {'readonly': True},
'error': {'readonly': True},
}
_attribute_map = {
'job_secrets_type': {'key': 'jobSecretsType', 'type': 'str'},
'dc_access_security_code': {'key': 'dcAccessSecurityCode', 'type': 'DcAccessSecurityCode'},
'error': {'key': 'error', 'type': 'CloudError'},
}
_subtype_map = {
'job_secrets_type': {'DataBox': 'DataboxJobSecrets', 'DataBoxDisk': 'DataBoxDiskJobSecrets', 'DataBoxHeavy': 'DataBoxHeavyJobSecrets'}
}
def __init__(
self,
**kwargs
):
super(JobSecrets, self).__init__(**kwargs)
self.job_secrets_type = None # type: Optional[str]
self.dc_access_security_code = None
self.error = None
class DataBoxDiskJobSecrets(JobSecrets):
"""The secrets related to disk job.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param job_secrets_type: Required. Used to indicate what type of job secrets object.Constant
filled by server. Possible values include: "DataBox", "DataBoxDisk", "DataBoxHeavy".
:type job_secrets_type: str or ~azure.mgmt.databox.models.ClassDiscriminator
:ivar dc_access_security_code: Dc Access Security Code for Customer Managed Shipping.
:vartype dc_access_security_code: ~azure.mgmt.databox.models.DcAccessSecurityCode
:ivar error: Error while fetching the secrets.
:vartype error: ~azure.mgmt.databox.models.CloudError
:ivar disk_secrets: Contains the list of secrets object for that device.
:vartype disk_secrets: list[~azure.mgmt.databox.models.DiskSecret]
:ivar pass_key: PassKey for the disk Job.
:vartype pass_key: str
:ivar is_passkey_user_defined: Whether passkey was provided by user.
:vartype is_passkey_user_defined: bool
"""
_validation = {
'job_secrets_type': {'required': True},
'dc_access_security_code': {'readonly': True},
'error': {'readonly': True},
'disk_secrets': {'readonly': True},
'pass_key': {'readonly': True},
'is_passkey_user_defined': {'readonly': True},
}
_attribute_map = {
'job_secrets_type': {'key': 'jobSecretsType', 'type': 'str'},
'dc_access_security_code': {'key': 'dcAccessSecurityCode', 'type': 'DcAccessSecurityCode'},
'error': {'key': 'error', 'type': 'CloudError'},
'disk_secrets': {'key': 'diskSecrets', 'type': '[DiskSecret]'},
'pass_key': {'key': 'passKey', 'type': 'str'},
'is_passkey_user_defined': {'key': 'isPasskeyUserDefined', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(DataBoxDiskJobSecrets, self).__init__(**kwargs)
self.job_secrets_type = 'DataBoxDisk' # type: str
self.disk_secrets = None
self.pass_key = None
self.is_passkey_user_defined = None
class DataBoxHeavyAccountCopyLogDetails(CopyLogDetails):
"""Copy log details for a storage account for Databox heavy.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param copy_log_details_type: Required. Indicates the type of job details.Constant filled by
server. Possible values include: "DataBox", "DataBoxDisk", "DataBoxHeavy".
:type copy_log_details_type: str or ~azure.mgmt.databox.models.ClassDiscriminator
:ivar account_name: Account name.
:vartype account_name: str
:ivar copy_log_link: Link for copy logs.
:vartype copy_log_link: list[str]
:ivar copy_verbose_log_link: Link for copy verbose logs. This will be set only when the
LogCollectionLevel is set to verbose.
:vartype copy_verbose_log_link: list[str]
"""
_validation = {
'copy_log_details_type': {'required': True},
'account_name': {'readonly': True},
'copy_log_link': {'readonly': True},
'copy_verbose_log_link': {'readonly': True},
}
_attribute_map = {
'copy_log_details_type': {'key': 'copyLogDetailsType', 'type': 'str'},
'account_name': {'key': 'accountName', 'type': 'str'},
'copy_log_link': {'key': 'copyLogLink', 'type': '[str]'},
'copy_verbose_log_link': {'key': 'copyVerboseLogLink', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(DataBoxHeavyAccountCopyLogDetails, self).__init__(**kwargs)
self.copy_log_details_type = 'DataBoxHeavy' # type: str
self.account_name = None
self.copy_log_link = None
self.copy_verbose_log_link = None
class DataBoxHeavyJobDetails(JobDetails):
"""Databox Heavy Device Job Details.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar job_stages: List of stages that run in the job.
:vartype job_stages: list[~azure.mgmt.databox.models.JobStages]
:param contact_details: Required. Contact details for notification and shipping.
:type contact_details: ~azure.mgmt.databox.models.ContactDetails
:param shipping_address: Shipping address of the customer.
:type shipping_address: ~azure.mgmt.databox.models.ShippingAddress
:ivar delivery_package: Delivery package shipping details.
:vartype delivery_package: ~azure.mgmt.databox.models.PackageShippingDetails
:ivar return_package: Return package shipping details.
:vartype return_package: ~azure.mgmt.databox.models.PackageShippingDetails
:param data_import_details: Details of the data to be imported into azure.
:type data_import_details: list[~azure.mgmt.databox.models.DataImportDetails]
:param data_export_details: Details of the data to be exported from azure.
:type data_export_details: list[~azure.mgmt.databox.models.DataExportDetails]
:param job_details_type: Required. Indicates the type of job details.Constant filled by server.
Possible values include: "DataBox", "DataBoxDisk", "DataBoxHeavy".
:type job_details_type: str or ~azure.mgmt.databox.models.ClassDiscriminator
:param preferences: Preferences for the order.
:type preferences: ~azure.mgmt.databox.models.Preferences
:ivar copy_log_details: List of copy log details.
:vartype copy_log_details: list[~azure.mgmt.databox.models.CopyLogDetails]
:ivar reverse_shipment_label_sas_key: Shared access key to download the return shipment label.
:vartype reverse_shipment_label_sas_key: str
:ivar chain_of_custody_sas_key: Shared access key to download the chain of custody logs.
:vartype chain_of_custody_sas_key: str
:ivar key_encryption_key: Details about which key encryption type is being used.
:vartype key_encryption_key: ~azure.mgmt.databox.models.KeyEncryptionKey
:param expected_data_size_in_terabytes: The expected size of the data, which needs to be
transferred in this job, in terabytes.
:type expected_data_size_in_terabytes: int
:ivar copy_progress: Copy progress per account.
:vartype copy_progress: list[~azure.mgmt.databox.models.CopyProgress]
:param device_password: Set Device password for unlocking Databox Heavy. Should not be passed
for TransferType:ExportFromAzure jobs. If this is not passed, the service will generate
password itself. This will not be returned in Get Call. Password Requirements : Password must
be minimum of 12 and maximum of 64 characters. Password must have at least one uppercase
alphabet, one number and one special character. Password cannot have the following characters :
IilLoO0 Password can have only alphabets, numbers and these characters : @#-$%^!+=;:_()]+.
:type device_password: str
"""
_validation = {
'job_stages': {'readonly': True},
'contact_details': {'required': True},
'delivery_package': {'readonly': True},
'return_package': {'readonly': True},
'job_details_type': {'required': True},
'copy_log_details': {'readonly': True},
'reverse_shipment_label_sas_key': {'readonly': True},
'chain_of_custody_sas_key': {'readonly': True},
'key_encryption_key': {'readonly': True},
'copy_progress': {'readonly': True},
}
_attribute_map = {
'job_stages': {'key': 'jobStages', 'type': '[JobStages]'},
'contact_details': {'key': 'contactDetails', 'type': 'ContactDetails'},
'shipping_address': {'key': 'shippingAddress', 'type': 'ShippingAddress'},
'delivery_package': {'key': 'deliveryPackage', 'type': 'PackageShippingDetails'},
'return_package': {'key': 'returnPackage', 'type': 'PackageShippingDetails'},
'data_import_details': {'key': 'dataImportDetails', 'type': '[DataImportDetails]'},
'data_export_details': {'key': 'dataExportDetails', 'type': '[DataExportDetails]'},
'job_details_type': {'key': 'jobDetailsType', 'type': 'str'},
'preferences': {'key': 'preferences', 'type': 'Preferences'},
'copy_log_details': {'key': 'copyLogDetails', 'type': '[CopyLogDetails]'},
'reverse_shipment_label_sas_key': {'key': 'reverseShipmentLabelSasKey', 'type': 'str'},
'chain_of_custody_sas_key': {'key': 'chainOfCustodySasKey', 'type': 'str'},
'key_encryption_key': {'key': 'keyEncryptionKey', 'type': 'KeyEncryptionKey'},
'expected_data_size_in_terabytes': {'key': 'expectedDataSizeInTerabytes', 'type': 'int'},
'copy_progress': {'key': 'copyProgress', 'type': '[CopyProgress]'},
'device_password': {'key': 'devicePassword', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataBoxHeavyJobDetails, self).__init__(**kwargs)
self.job_details_type = 'DataBoxHeavy' # type: str
self.copy_progress = None
self.device_password = kwargs.get('device_password', None)
class DataBoxHeavyJobSecrets(JobSecrets):
"""The secrets related to a databox heavy job.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param job_secrets_type: Required. Used to indicate what type of job secrets object.Constant
filled by server. Possible values include: "DataBox", "DataBoxDisk", "DataBoxHeavy".
:type job_secrets_type: str or ~azure.mgmt.databox.models.ClassDiscriminator
:ivar dc_access_security_code: Dc Access Security Code for Customer Managed Shipping.
:vartype dc_access_security_code: ~azure.mgmt.databox.models.DcAccessSecurityCode
:ivar error: Error while fetching the secrets.
:vartype error: ~azure.mgmt.databox.models.CloudError
:ivar cabinet_pod_secrets: Contains the list of secret objects for a databox heavy job.
:vartype cabinet_pod_secrets: list[~azure.mgmt.databox.models.DataBoxHeavySecret]
"""
_validation = {
'job_secrets_type': {'required': True},
'dc_access_security_code': {'readonly': True},
'error': {'readonly': True},
'cabinet_pod_secrets': {'readonly': True},
}
_attribute_map = {
'job_secrets_type': {'key': 'jobSecretsType', 'type': 'str'},
'dc_access_security_code': {'key': 'dcAccessSecurityCode', 'type': 'DcAccessSecurityCode'},
'error': {'key': 'error', 'type': 'CloudError'},
'cabinet_pod_secrets': {'key': 'cabinetPodSecrets', 'type': '[DataBoxHeavySecret]'},
}
def __init__(
self,
**kwargs
):
super(DataBoxHeavyJobSecrets, self).__init__(**kwargs)
self.job_secrets_type = 'DataBoxHeavy' # type: str
self.cabinet_pod_secrets = None
class DataBoxHeavySecret(msrest.serialization.Model):
"""The secrets related to a databox heavy.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar device_serial_number: Serial number of the assigned device.
:vartype device_serial_number: str
:ivar device_password: Password for out of the box experience on device.
:vartype device_password: str
:ivar network_configurations: Network configuration of the appliance.
:vartype network_configurations: list[~azure.mgmt.databox.models.ApplianceNetworkConfiguration]
:ivar encoded_validation_cert_pub_key: The base 64 encoded public key to authenticate with the
device.
:vartype encoded_validation_cert_pub_key: str
:ivar account_credential_details: Per account level access credentials.
:vartype account_credential_details: list[~azure.mgmt.databox.models.AccountCredentialDetails]
"""
_validation = {
'device_serial_number': {'readonly': True},
'device_password': {'readonly': True},
'network_configurations': {'readonly': True},
'encoded_validation_cert_pub_key': {'readonly': True},
'account_credential_details': {'readonly': True},
}
_attribute_map = {
'device_serial_number': {'key': 'deviceSerialNumber', 'type': 'str'},
'device_password': {'key': 'devicePassword', 'type': 'str'},
'network_configurations': {'key': 'networkConfigurations', 'type': '[ApplianceNetworkConfiguration]'},
'encoded_validation_cert_pub_key': {'key': 'encodedValidationCertPubKey', 'type': 'str'},
'account_credential_details': {'key': 'accountCredentialDetails', 'type': '[AccountCredentialDetails]'},
}
def __init__(
self,
**kwargs
):
super(DataBoxHeavySecret, self).__init__(**kwargs)
self.device_serial_number = None
self.device_password = None
self.network_configurations = None
self.encoded_validation_cert_pub_key = None
self.account_credential_details = None
class DataBoxJobDetails(JobDetails):
"""Databox Job Details.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar job_stages: List of stages that run in the job.
:vartype job_stages: list[~azure.mgmt.databox.models.JobStages]
:param contact_details: Required. Contact details for notification and shipping.
:type contact_details: ~azure.mgmt.databox.models.ContactDetails
:param shipping_address: Shipping address of the customer.
:type shipping_address: ~azure.mgmt.databox.models.ShippingAddress
:ivar delivery_package: Delivery package shipping details.
:vartype delivery_package: ~azure.mgmt.databox.models.PackageShippingDetails
:ivar return_package: Return package shipping details.
:vartype return_package: ~azure.mgmt.databox.models.PackageShippingDetails
:param data_import_details: Details of the data to be imported into azure.
:type data_import_details: list[~azure.mgmt.databox.models.DataImportDetails]
:param data_export_details: Details of the data to be exported from azure.
:type data_export_details: list[~azure.mgmt.databox.models.DataExportDetails]
:param job_details_type: Required. Indicates the type of job details.Constant filled by server.
Possible values include: "DataBox", "DataBoxDisk", "DataBoxHeavy".
:type job_details_type: str or ~azure.mgmt.databox.models.ClassDiscriminator
:param preferences: Preferences for the order.
:type preferences: ~azure.mgmt.databox.models.Preferences
:ivar copy_log_details: List of copy log details.
:vartype copy_log_details: list[~azure.mgmt.databox.models.CopyLogDetails]
:ivar reverse_shipment_label_sas_key: Shared access key to download the return shipment label.
:vartype reverse_shipment_label_sas_key: str
:ivar chain_of_custody_sas_key: Shared access key to download the chain of custody logs.
:vartype chain_of_custody_sas_key: str
:ivar key_encryption_key: Details about which key encryption type is being used.
:vartype key_encryption_key: ~azure.mgmt.databox.models.KeyEncryptionKey
:param expected_data_size_in_terabytes: The expected size of the data, which needs to be
transferred in this job, in terabytes.
:type expected_data_size_in_terabytes: int
:ivar copy_progress: Copy progress per storage account.
:vartype copy_progress: list[~azure.mgmt.databox.models.CopyProgress]
:param device_password: Set Device password for unlocking Databox. Should not be passed for
TransferType:ExportFromAzure jobs. If this is not passed, the service will generate password
itself. This will not be returned in Get Call. Password Requirements : Password must be
minimum of 12 and maximum of 64 characters. Password must have at least one uppercase alphabet,
one number and one special character. Password cannot have the following characters : IilLoO0
Password can have only alphabets, numbers and these characters : @#-$%^!+=;:_()]+.
:type device_password: str
"""
_validation = {
'job_stages': {'readonly': True},
'contact_details': {'required': True},
'delivery_package': {'readonly': True},
'return_package': {'readonly': True},
'job_details_type': {'required': True},
'copy_log_details': {'readonly': True},
'reverse_shipment_label_sas_key': {'readonly': True},
'chain_of_custody_sas_key': {'readonly': True},
'key_encryption_key': {'readonly': True},
'copy_progress': {'readonly': True},
}
_attribute_map = {
'job_stages': {'key': 'jobStages', 'type': '[JobStages]'},
'contact_details': {'key': 'contactDetails', 'type': 'ContactDetails'},
'shipping_address': {'key': 'shippingAddress', 'type': 'ShippingAddress'},
'delivery_package': {'key': 'deliveryPackage', 'type': 'PackageShippingDetails'},
'return_package': {'key': 'returnPackage', 'type': 'PackageShippingDetails'},
'data_import_details': {'key': 'dataImportDetails', 'type': '[DataImportDetails]'},
'data_export_details': {'key': 'dataExportDetails', 'type': '[DataExportDetails]'},
'job_details_type': {'key': 'jobDetailsType', 'type': 'str'},
'preferences': {'key': 'preferences', 'type': 'Preferences'},
'copy_log_details': {'key': 'copyLogDetails', 'type': '[CopyLogDetails]'},
'reverse_shipment_label_sas_key': {'key': 'reverseShipmentLabelSasKey', 'type': 'str'},
'chain_of_custody_sas_key': {'key': 'chainOfCustodySasKey', 'type': 'str'},
'key_encryption_key': {'key': 'keyEncryptionKey', 'type': 'KeyEncryptionKey'},
'expected_data_size_in_terabytes': {'key': 'expectedDataSizeInTerabytes', 'type': 'int'},
'copy_progress': {'key': 'copyProgress', 'type': '[CopyProgress]'},
'device_password': {'key': 'devicePassword', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataBoxJobDetails, self).__init__(**kwargs)
self.job_details_type = 'DataBox' # type: str
self.copy_progress = None
self.device_password = kwargs.get('device_password', None)
class DataboxJobSecrets(JobSecrets):
"""The secrets related to a databox job.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param job_secrets_type: Required. Used to indicate what type of job secrets object.Constant
filled by server. Possible values include: "DataBox", "DataBoxDisk", "DataBoxHeavy".
:type job_secrets_type: str or ~azure.mgmt.databox.models.ClassDiscriminator
:ivar dc_access_security_code: Dc Access Security Code for Customer Managed Shipping.
:vartype dc_access_security_code: ~azure.mgmt.databox.models.DcAccessSecurityCode
:ivar error: Error while fetching the secrets.
:vartype error: ~azure.mgmt.databox.models.CloudError
:param pod_secrets: Contains the list of secret objects for a job.
:type pod_secrets: list[~azure.mgmt.databox.models.DataBoxSecret]
"""
_validation = {
'job_secrets_type': {'required': True},
'dc_access_security_code': {'readonly': True},
'error': {'readonly': True},
}
_attribute_map = {
'job_secrets_type': {'key': 'jobSecretsType', 'type': 'str'},
'dc_access_security_code': {'key': 'dcAccessSecurityCode', 'type': 'DcAccessSecurityCode'},
'error': {'key': 'error', 'type': 'CloudError'},
'pod_secrets': {'key': 'podSecrets', 'type': '[DataBoxSecret]'},
}
def __init__(
self,
**kwargs
):
super(DataboxJobSecrets, self).__init__(**kwargs)
self.job_secrets_type = 'DataBox' # type: str
self.pod_secrets = kwargs.get('pod_secrets', None)
class ScheduleAvailabilityRequest(msrest.serialization.Model):
"""Request body to get the availability for scheduling orders.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: DataBoxScheduleAvailabilityRequest, DiskScheduleAvailabilityRequest, HeavyScheduleAvailabilityRequest.
All required parameters must be populated in order to send to Azure.
:param storage_location: Required. Location for data transfer. For locations check:
https://management.azure.com/subscriptions/SUBSCRIPTIONID/locations?api-version=2018-01-01.
:type storage_location: str
:param sku_name: Required. Sku Name for which the order is to be scheduled.Constant filled by
server. Possible values include: "DataBox", "DataBoxDisk", "DataBoxHeavy".
:type sku_name: str or ~azure.mgmt.databox.models.SkuName
:param country: Country in which storage location should be supported.
:type country: str
"""
_validation = {
'storage_location': {'required': True},
'sku_name': {'required': True},
}
_attribute_map = {
'storage_location': {'key': 'storageLocation', 'type': 'str'},
'sku_name': {'key': 'skuName', 'type': 'str'},
'country': {'key': 'country', 'type': 'str'},
}
_subtype_map = {
'sku_name': {'DataBox': 'DataBoxScheduleAvailabilityRequest', 'DataBoxDisk': 'DiskScheduleAvailabilityRequest', 'DataBoxHeavy': 'HeavyScheduleAvailabilityRequest'}
}
def __init__(
self,
**kwargs
):
super(ScheduleAvailabilityRequest, self).__init__(**kwargs)
self.storage_location = kwargs['storage_location']
self.sku_name = None # type: Optional[str]
self.country = kwargs.get('country', None)
class DataBoxScheduleAvailabilityRequest(ScheduleAvailabilityRequest):
"""Request body to get the availability for scheduling data box orders orders.
All required parameters must be populated in order to send to Azure.
:param storage_location: Required. Location for data transfer. For locations check:
https://management.azure.com/subscriptions/SUBSCRIPTIONID/locations?api-version=2018-01-01.
:type storage_location: str
:param sku_name: Required. Sku Name for which the order is to be scheduled.Constant filled by
server. Possible values include: "DataBox", "DataBoxDisk", "DataBoxHeavy".
:type sku_name: str or ~azure.mgmt.databox.models.SkuName
:param country: Country in which storage location should be supported.
:type country: str
"""
_validation = {
'storage_location': {'required': True},
'sku_name': {'required': True},
}
_attribute_map = {
'storage_location': {'key': 'storageLocation', 'type': 'str'},
'sku_name': {'key': 'skuName', 'type': 'str'},
'country': {'key': 'country', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataBoxScheduleAvailabilityRequest, self).__init__(**kwargs)
self.sku_name = 'DataBox' # type: str
class DataBoxSecret(msrest.serialization.Model):
"""The secrets related to a DataBox.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar device_serial_number: Serial number of the assigned device.
:vartype device_serial_number: str
:ivar device_password: Password for out of the box experience on device.
:vartype device_password: str
:ivar network_configurations: Network configuration of the appliance.
:vartype network_configurations: list[~azure.mgmt.databox.models.ApplianceNetworkConfiguration]
:ivar encoded_validation_cert_pub_key: The base 64 encoded public key to authenticate with the
device.
:vartype encoded_validation_cert_pub_key: str
:ivar account_credential_details: Per account level access credentials.
:vartype account_credential_details: list[~azure.mgmt.databox.models.AccountCredentialDetails]
"""
_validation = {
'device_serial_number': {'readonly': True},
'device_password': {'readonly': True},
'network_configurations': {'readonly': True},
'encoded_validation_cert_pub_key': {'readonly': True},
'account_credential_details': {'readonly': True},
}
_attribute_map = {
'device_serial_number': {'key': 'deviceSerialNumber', 'type': 'str'},
'device_password': {'key': 'devicePassword', 'type': 'str'},
'network_configurations': {'key': 'networkConfigurations', 'type': '[ApplianceNetworkConfiguration]'},
'encoded_validation_cert_pub_key': {'key': 'encodedValidationCertPubKey', 'type': 'str'},
'account_credential_details': {'key': 'accountCredentialDetails', 'type': '[AccountCredentialDetails]'},
}
def __init__(
self,
**kwargs
):
super(DataBoxSecret, self).__init__(**kwargs)
self.device_serial_number = None
self.device_password = None
self.network_configurations = None
self.encoded_validation_cert_pub_key = None
self.account_credential_details = None
class DataExportDetails(msrest.serialization.Model):
"""Details of the data to be used for exporting data from azure.
All required parameters must be populated in order to send to Azure.
:param transfer_configuration: Required. Configuration for the data transfer.
:type transfer_configuration: ~azure.mgmt.databox.models.TransferConfiguration
:param log_collection_level: Level of the logs to be collected. Possible values include:
"Error", "Verbose".
:type log_collection_level: str or ~azure.mgmt.databox.models.LogCollectionLevel
:param account_details: Required. Account details of the data to be transferred.
:type account_details: ~azure.mgmt.databox.models.DataAccountDetails
"""
_validation = {
'transfer_configuration': {'required': True},
'account_details': {'required': True},
}
_attribute_map = {
'transfer_configuration': {'key': 'transferConfiguration', 'type': 'TransferConfiguration'},
'log_collection_level': {'key': 'logCollectionLevel', 'type': 'str'},
'account_details': {'key': 'accountDetails', 'type': 'DataAccountDetails'},
}
def __init__(
self,
**kwargs
):
super(DataExportDetails, self).__init__(**kwargs)
self.transfer_configuration = kwargs['transfer_configuration']
self.log_collection_level = kwargs.get('log_collection_level', None)
self.account_details = kwargs['account_details']
class DataImportDetails(msrest.serialization.Model):
"""Details of the data to be used for importing data to azure.
All required parameters must be populated in order to send to Azure.
:param account_details: Required. Account details of the data to be transferred.
:type account_details: ~azure.mgmt.databox.models.DataAccountDetails
"""
_validation = {
'account_details': {'required': True},
}
_attribute_map = {
'account_details': {'key': 'accountDetails', 'type': 'DataAccountDetails'},
}
def __init__(
self,
**kwargs
):
super(DataImportDetails, self).__init__(**kwargs)
self.account_details = kwargs['account_details']
class DataLocationToServiceLocationMap(msrest.serialization.Model):
"""Map of data location to service location.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar data_location: Location of the data.
:vartype data_location: str
:ivar service_location: Location of the service.
:vartype service_location: str
"""
_validation = {
'data_location': {'readonly': True},
'service_location': {'readonly': True},
}
_attribute_map = {
'data_location': {'key': 'dataLocation', 'type': 'str'},
'service_location': {'key': 'serviceLocation', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataLocationToServiceLocationMap, self).__init__(**kwargs)
self.data_location = None
self.service_location = None
class DataTransferDetailsValidationRequest(ValidationInputRequest):
"""Request to validate export and import data details.
All required parameters must be populated in order to send to Azure.
:param validation_type: Required. Identifies the type of validation request.Constant filled by
server. Possible values include: "ValidateAddress",
"ValidateSubscriptionIsAllowedToCreateJob", "ValidatePreferences", "ValidateCreateOrderLimit",
"ValidateSkuAvailability", "ValidateDataTransferDetails".
:type validation_type: str or ~azure.mgmt.databox.models.ValidationInputDiscriminator
:param data_export_details: List of DataTransfer details to be used to export data from azure.
:type data_export_details: list[~azure.mgmt.databox.models.DataExportDetails]
:param data_import_details: List of DataTransfer details to be used to import data to azure.
:type data_import_details: list[~azure.mgmt.databox.models.DataImportDetails]
:param device_type: Required. Device type. Possible values include: "DataBox", "DataBoxDisk",
"DataBoxHeavy".
:type device_type: str or ~azure.mgmt.databox.models.SkuName
:param transfer_type: Required. Type of the transfer. Possible values include: "ImportToAzure",
"ExportFromAzure".
:type transfer_type: str or ~azure.mgmt.databox.models.TransferType
"""
_validation = {
'validation_type': {'required': True},
'device_type': {'required': True},
'transfer_type': {'required': True},
}
_attribute_map = {
'validation_type': {'key': 'validationType', 'type': 'str'},
'data_export_details': {'key': 'dataExportDetails', 'type': '[DataExportDetails]'},
'data_import_details': {'key': 'dataImportDetails', 'type': '[DataImportDetails]'},
'device_type': {'key': 'deviceType', 'type': 'str'},
'transfer_type': {'key': 'transferType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataTransferDetailsValidationRequest, self).__init__(**kwargs)
self.validation_type = 'ValidateDataTransferDetails' # type: str
self.data_export_details = kwargs.get('data_export_details', None)
self.data_import_details = kwargs.get('data_import_details', None)
self.device_type = kwargs['device_type']
self.transfer_type = kwargs['transfer_type']
class DataTransferDetailsValidationResponseProperties(ValidationInputResponse):
"""Properties of data transfer details validation response.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param validation_type: Required. Identifies the type of validation response.Constant filled by
server. Possible values include: "ValidateAddress",
"ValidateSubscriptionIsAllowedToCreateJob", "ValidatePreferences", "ValidateCreateOrderLimit",
"ValidateSkuAvailability", "ValidateDataTransferDetails".
:type validation_type: str or ~azure.mgmt.databox.models.ValidationInputDiscriminator
:ivar error: Error code and message of validation response.
:vartype error: ~azure.mgmt.databox.models.CloudError
:ivar status: Data transfer details validation status. Possible values include: "Valid",
"Invalid", "Skipped".
:vartype status: str or ~azure.mgmt.databox.models.ValidationStatus
"""
_validation = {
'validation_type': {'required': True},
'error': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'validation_type': {'key': 'validationType', 'type': 'str'},
'error': {'key': 'error', 'type': 'CloudError'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataTransferDetailsValidationResponseProperties, self).__init__(**kwargs)
self.validation_type = 'ValidateDataTransferDetails' # type: str
self.status = None
class DcAccessSecurityCode(msrest.serialization.Model):
"""Dc access security code.
:param reverse_dc_access_code: Reverse Dc access security code.
:type reverse_dc_access_code: str
:param forward_dc_access_code: Forward Dc access security code.
:type forward_dc_access_code: str
"""
_attribute_map = {
'reverse_dc_access_code': {'key': 'reverseDcAccessCode', 'type': 'str'},
'forward_dc_access_code': {'key': 'forwardDcAccessCode', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DcAccessSecurityCode, self).__init__(**kwargs)
self.reverse_dc_access_code = kwargs.get('reverse_dc_access_code', None)
self.forward_dc_access_code = kwargs.get('forward_dc_access_code', None)
class Details(msrest.serialization.Model):
"""Details.
All required parameters must be populated in order to send to Azure.
:param code: Required.
:type code: str
:param message: Required.
:type message: str
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Details, self).__init__(**kwargs)
self.code = kwargs['code']
self.message = kwargs['message']
class DiskScheduleAvailabilityRequest(ScheduleAvailabilityRequest):
"""Request body to get the availability for scheduling disk orders.
All required parameters must be populated in order to send to Azure.
:param storage_location: Required. Location for data transfer. For locations check:
https://management.azure.com/subscriptions/SUBSCRIPTIONID/locations?api-version=2018-01-01.
:type storage_location: str
:param sku_name: Required. Sku Name for which the order is to be scheduled.Constant filled by
server. Possible values include: "DataBox", "DataBoxDisk", "DataBoxHeavy".
:type sku_name: str or ~azure.mgmt.databox.models.SkuName
:param country: Country in which storage location should be supported.
:type country: str
:param expected_data_size_in_terabytes: Required. The expected size of the data, which needs to
be transferred in this job, in terabytes.
:type expected_data_size_in_terabytes: int
"""
_validation = {
'storage_location': {'required': True},
'sku_name': {'required': True},
'expected_data_size_in_terabytes': {'required': True},
}
_attribute_map = {
'storage_location': {'key': 'storageLocation', 'type': 'str'},
'sku_name': {'key': 'skuName', 'type': 'str'},
'country': {'key': 'country', 'type': 'str'},
'expected_data_size_in_terabytes': {'key': 'expectedDataSizeInTerabytes', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(DiskScheduleAvailabilityRequest, self).__init__(**kwargs)
self.sku_name = 'DataBoxDisk' # type: str
self.expected_data_size_in_terabytes = kwargs['expected_data_size_in_terabytes']
class DiskSecret(msrest.serialization.Model):
"""Contains all the secrets of a Disk.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar disk_serial_number: Serial number of the assigned disk.
:vartype disk_serial_number: str
:ivar bit_locker_key: Bit Locker key of the disk which can be used to unlock the disk to copy
data.
:vartype bit_locker_key: str
"""
_validation = {
'disk_serial_number': {'readonly': True},
'bit_locker_key': {'readonly': True},
}
_attribute_map = {
'disk_serial_number': {'key': 'diskSerialNumber', 'type': 'str'},
'bit_locker_key': {'key': 'bitLockerKey', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DiskSecret, self).__init__(**kwargs)
self.disk_serial_number = None
self.bit_locker_key = None
class EncryptionPreferences(msrest.serialization.Model):
"""Preferences related to the Encryption.
:param double_encryption: Defines secondary layer of software-based encryption enablement.
Possible values include: "Enabled", "Disabled".
:type double_encryption: str or ~azure.mgmt.databox.models.DoubleEncryption
"""
_attribute_map = {
'double_encryption': {'key': 'doubleEncryption', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EncryptionPreferences, self).__init__(**kwargs)
self.double_encryption = kwargs.get('double_encryption', None)
class ErrorDetail(msrest.serialization.Model):
"""ErrorDetail.
All required parameters must be populated in order to send to Azure.
:param code: Required.
:type code: str
:param message: Required.
:type message: str
:param details:
:type details: list[~azure.mgmt.databox.models.Details]
:param target:
:type target: str
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'details': {'key': 'details', 'type': '[Details]'},
'target': {'key': 'target', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ErrorDetail, self).__init__(**kwargs)
self.code = kwargs['code']
self.message = kwargs['message']
self.details = kwargs.get('details', None)
self.target = kwargs.get('target', None)
class FilterFileDetails(msrest.serialization.Model):
"""Details of the filter files to be used for data transfer.
All required parameters must be populated in order to send to Azure.
:param filter_file_type: Required. Type of the filter file. Possible values include:
"AzureBlob", "AzureFile".
:type filter_file_type: str or ~azure.mgmt.databox.models.FilterFileType
:param filter_file_path: Required. Path of the file that contains the details of all items to
transfer.
:type filter_file_path: str
"""
_validation = {
'filter_file_type': {'required': True},
'filter_file_path': {'required': True},
}
_attribute_map = {
'filter_file_type': {'key': 'filterFileType', 'type': 'str'},
'filter_file_path': {'key': 'filterFilePath', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(FilterFileDetails, self).__init__(**kwargs)
self.filter_file_type = kwargs['filter_file_type']
self.filter_file_path = kwargs['filter_file_path']
class HeavyScheduleAvailabilityRequest(ScheduleAvailabilityRequest):
"""Request body to get the availability for scheduling heavy orders.
All required parameters must be populated in order to send to Azure.
:param storage_location: Required. Location for data transfer. For locations check:
https://management.azure.com/subscriptions/SUBSCRIPTIONID/locations?api-version=2018-01-01.
:type storage_location: str
:param sku_name: Required. Sku Name for which the order is to be scheduled.Constant filled by
server. Possible values include: "DataBox", "DataBoxDisk", "DataBoxHeavy".
:type sku_name: str or ~azure.mgmt.databox.models.SkuName
:param country: Country in which storage location should be supported.
:type country: str
"""
_validation = {
'storage_location': {'required': True},
'sku_name': {'required': True},
}
_attribute_map = {
'storage_location': {'key': 'storageLocation', 'type': 'str'},
'sku_name': {'key': 'skuName', 'type': 'str'},
'country': {'key': 'country', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(HeavyScheduleAvailabilityRequest, self).__init__(**kwargs)
self.sku_name = 'DataBoxHeavy' # type: str
class IdentityProperties(msrest.serialization.Model):
"""Managed identity properties.
:param type: Managed service identity type.
:type type: str
:param user_assigned: User assigned identity properties.
:type user_assigned: ~azure.mgmt.databox.models.UserAssignedProperties
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'user_assigned': {'key': 'userAssigned', 'type': 'UserAssignedProperties'},
}
def __init__(
self,
**kwargs
):
super(IdentityProperties, self).__init__(**kwargs)
self.type = kwargs.get('type', None)
self.user_assigned = kwargs.get('user_assigned', None)
class JobDeliveryInfo(msrest.serialization.Model):
"""Additional delivery info.
:param scheduled_date_time: Scheduled date time.
:type scheduled_date_time: ~datetime.datetime
"""
_attribute_map = {
'scheduled_date_time': {'key': 'scheduledDateTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(JobDeliveryInfo, self).__init__(**kwargs)
self.scheduled_date_time = kwargs.get('scheduled_date_time', None)
class Resource(msrest.serialization.Model):
"""Model of the Resource.
All required parameters must be populated in order to send to Azure.
:param location: Required. The location of the resource. This will be one of the supported and
registered Azure Regions (e.g. West US, East US, Southeast Asia, etc.). The region of a
resource cannot be changed once it is created, but if an identical region is specified on
update the request will succeed.
:type location: str
:param tags: A set of tags. The list of key value pairs that describe the resource. These tags
can be used in viewing and grouping this resource (across resource groups).
:type tags: dict[str, str]
:param sku: Required. The sku type.
:type sku: ~azure.mgmt.databox.models.Sku
:param identity: Msi identity of the resource.
:type identity: ~azure.mgmt.databox.models.ResourceIdentity
"""
_validation = {
'location': {'required': True},
'sku': {'required': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'identity': {'key': 'identity', 'type': 'ResourceIdentity'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.location = kwargs['location']
self.tags = kwargs.get('tags', None)
self.sku = kwargs['sku']
self.identity = kwargs.get('identity', None)
class JobResource(Resource):
"""Job Resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param location: Required. The location of the resource. This will be one of the supported and
registered Azure Regions (e.g. West US, East US, Southeast Asia, etc.). The region of a
resource cannot be changed once it is created, but if an identical region is specified on
update the request will succeed.
:type location: str
:param tags: A set of tags. The list of key value pairs that describe the resource. These tags
can be used in viewing and grouping this resource (across resource groups).
:type tags: dict[str, str]
:param sku: Required. The sku type.
:type sku: ~azure.mgmt.databox.models.Sku
:param identity: Msi identity of the resource.
:type identity: ~azure.mgmt.databox.models.ResourceIdentity
:ivar name: Name of the object.
:vartype name: str
:ivar id: Id of the object.
:vartype id: str
:ivar type: Type of the object.
:vartype type: str
:param transfer_type: Required. Type of the data transfer. Possible values include:
"ImportToAzure", "ExportFromAzure".
:type transfer_type: str or ~azure.mgmt.databox.models.TransferType
:ivar is_cancellable: Describes whether the job is cancellable or not.
:vartype is_cancellable: bool
:ivar is_deletable: Describes whether the job is deletable or not.
:vartype is_deletable: bool
:ivar is_shipping_address_editable: Describes whether the shipping address is editable or not.
:vartype is_shipping_address_editable: bool
:ivar is_prepare_to_ship_enabled: Is Prepare To Ship Enabled on this job.
:vartype is_prepare_to_ship_enabled: bool
:ivar status: Name of the stage which is in progress. Possible values include: "DeviceOrdered",
"DevicePrepared", "Dispatched", "Delivered", "PickedUp", "AtAzureDC", "DataCopy", "Completed",
"CompletedWithErrors", "Cancelled", "Failed_IssueReportedAtCustomer",
"Failed_IssueDetectedAtAzureDC", "Aborted", "CompletedWithWarnings",
"ReadyToDispatchFromAzureDC", "ReadyToReceiveAtAzureDC".
:vartype status: str or ~azure.mgmt.databox.models.StageName
:ivar start_time: Time at which the job was started in UTC ISO 8601 format.
:vartype start_time: ~datetime.datetime
:ivar error: Top level error for the job.
:vartype error: ~azure.mgmt.databox.models.CloudError
:param details: Details of a job run. This field will only be sent for expand details filter.
:type details: ~azure.mgmt.databox.models.JobDetails
:ivar cancellation_reason: Reason for cancellation.
:vartype cancellation_reason: str
:param delivery_type: Delivery type of Job. Possible values include: "NonScheduled",
"Scheduled".
:type delivery_type: str or ~azure.mgmt.databox.models.JobDeliveryType
:param delivery_info: Delivery Info of Job.
:type delivery_info: ~azure.mgmt.databox.models.JobDeliveryInfo
:ivar is_cancellable_without_fee: Flag to indicate cancellation of scheduled job.
:vartype is_cancellable_without_fee: bool
"""
_validation = {
'location': {'required': True},
'sku': {'required': True},
'name': {'readonly': True},
'id': {'readonly': True},
'type': {'readonly': True},
'transfer_type': {'required': True},
'is_cancellable': {'readonly': True},
'is_deletable': {'readonly': True},
'is_shipping_address_editable': {'readonly': True},
'is_prepare_to_ship_enabled': {'readonly': True},
'status': {'readonly': True},
'start_time': {'readonly': True},
'error': {'readonly': True},
'cancellation_reason': {'readonly': True},
'is_cancellable_without_fee': {'readonly': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'identity': {'key': 'identity', 'type': 'ResourceIdentity'},
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'transfer_type': {'key': 'properties.transferType', 'type': 'str'},
'is_cancellable': {'key': 'properties.isCancellable', 'type': 'bool'},
'is_deletable': {'key': 'properties.isDeletable', 'type': 'bool'},
'is_shipping_address_editable': {'key': 'properties.isShippingAddressEditable', 'type': 'bool'},
'is_prepare_to_ship_enabled': {'key': 'properties.isPrepareToShipEnabled', 'type': 'bool'},
'status': {'key': 'properties.status', 'type': 'str'},
'start_time': {'key': 'properties.startTime', 'type': 'iso-8601'},
'error': {'key': 'properties.error', 'type': 'CloudError'},
'details': {'key': 'properties.details', 'type': 'JobDetails'},
'cancellation_reason': {'key': 'properties.cancellationReason', 'type': 'str'},
'delivery_type': {'key': 'properties.deliveryType', 'type': 'str'},
'delivery_info': {'key': 'properties.deliveryInfo', 'type': 'JobDeliveryInfo'},
'is_cancellable_without_fee': {'key': 'properties.isCancellableWithoutFee', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(JobResource, self).__init__(**kwargs)
self.name = None
self.id = None
self.type = None
self.transfer_type = kwargs['transfer_type']
self.is_cancellable = None
self.is_deletable = None
self.is_shipping_address_editable = None
self.is_prepare_to_ship_enabled = None
self.status = None
self.start_time = None
self.error = None
self.details = kwargs.get('details', None)
self.cancellation_reason = None
self.delivery_type = kwargs.get('delivery_type', None)
self.delivery_info = kwargs.get('delivery_info', None)
self.is_cancellable_without_fee = None
class JobResourceList(msrest.serialization.Model):
"""Job Resource Collection.
:param value: List of job resources.
:type value: list[~azure.mgmt.databox.models.JobResource]
:param next_link: Link for the next set of job resources.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[JobResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JobResourceList, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class JobResourceUpdateParameter(msrest.serialization.Model):
"""The JobResourceUpdateParameter.
:param tags: A set of tags. The list of key value pairs that describe the resource. These tags
can be used in viewing and grouping this resource (across resource groups).
:type tags: dict[str, str]
:param identity: Msi identity of the resource.
:type identity: ~azure.mgmt.databox.models.ResourceIdentity
:param details: Details of a job to be updated.
:type details: ~azure.mgmt.databox.models.UpdateJobDetails
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'identity': {'key': 'identity', 'type': 'ResourceIdentity'},
'details': {'key': 'properties.details', 'type': 'UpdateJobDetails'},
}
def __init__(
self,
**kwargs
):
super(JobResourceUpdateParameter, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.identity = kwargs.get('identity', None)
self.details = kwargs.get('details', None)
class JobStages(msrest.serialization.Model):
"""Job stages.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar stage_name: Name of the job stage. Possible values include: "DeviceOrdered",
"DevicePrepared", "Dispatched", "Delivered", "PickedUp", "AtAzureDC", "DataCopy", "Completed",
"CompletedWithErrors", "Cancelled", "Failed_IssueReportedAtCustomer",
"Failed_IssueDetectedAtAzureDC", "Aborted", "CompletedWithWarnings",
"ReadyToDispatchFromAzureDC", "ReadyToReceiveAtAzureDC".
:vartype stage_name: str or ~azure.mgmt.databox.models.StageName
:ivar display_name: Display name of the job stage.
:vartype display_name: str
:ivar stage_status: Status of the job stage. Possible values include: "None", "InProgress",
"Succeeded", "Failed", "Cancelled", "Cancelling", "SucceededWithErrors",
"WaitingForCustomerAction", "SucceededWithWarnings".
:vartype stage_status: str or ~azure.mgmt.databox.models.StageStatus
:ivar stage_time: Time for the job stage in UTC ISO 8601 format.
:vartype stage_time: ~datetime.datetime
:ivar job_stage_details: Job Stage Details.
:vartype job_stage_details: object
"""
_validation = {
'stage_name': {'readonly': True},
'display_name': {'readonly': True},
'stage_status': {'readonly': True},
'stage_time': {'readonly': True},
'job_stage_details': {'readonly': True},
}
_attribute_map = {
'stage_name': {'key': 'stageName', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'stage_status': {'key': 'stageStatus', 'type': 'str'},
'stage_time': {'key': 'stageTime', 'type': 'iso-8601'},
'job_stage_details': {'key': 'jobStageDetails', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(JobStages, self).__init__(**kwargs)
self.stage_name = None
self.display_name = None
self.stage_status = None
self.stage_time = None
self.job_stage_details = None
class KeyEncryptionKey(msrest.serialization.Model):
"""Encryption key containing details about key to encrypt different keys.
All required parameters must be populated in order to send to Azure.
:param kek_type: Required. Type of encryption key used for key encryption. Possible values
include: "MicrosoftManaged", "CustomerManaged".
:type kek_type: str or ~azure.mgmt.databox.models.KekType
:param identity_properties: Managed identity properties used for key encryption.
:type identity_properties: ~azure.mgmt.databox.models.IdentityProperties
:param kek_url: Key encryption key. It is required in case of Customer managed KekType.
:type kek_url: str
:param kek_vault_resource_id: Kek vault resource id. It is required in case of Customer managed
KekType.
:type kek_vault_resource_id: str
"""
_validation = {
'kek_type': {'required': True},
}
_attribute_map = {
'kek_type': {'key': 'kekType', 'type': 'str'},
'identity_properties': {'key': 'identityProperties', 'type': 'IdentityProperties'},
'kek_url': {'key': 'kekUrl', 'type': 'str'},
'kek_vault_resource_id': {'key': 'kekVaultResourceID', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(KeyEncryptionKey, self).__init__(**kwargs)
self.kek_type = kwargs['kek_type']
self.identity_properties = kwargs.get('identity_properties', None)
self.kek_url = kwargs.get('kek_url', None)
self.kek_vault_resource_id = kwargs.get('kek_vault_resource_id', None)
class ManagedDiskDetails(DataAccountDetails):
"""Details of the managed disks.
All required parameters must be populated in order to send to Azure.
:param data_account_type: Required. Account Type of the data to be transferred.Constant filled
by server. Possible values include: "StorageAccount", "ManagedDisk".
:type data_account_type: str or ~azure.mgmt.databox.models.DataAccountType
:param share_password: Password for all the shares to be created on the device. Should not be
passed for TransferType:ExportFromAzure jobs. If this is not passed, the service will generate
password itself. This will not be returned in Get Call. Password Requirements : Password must
be minimum of 12 and maximum of 64 characters. Password must have at least one uppercase
alphabet, one number and one special character. Password cannot have the following characters :
IilLoO0 Password can have only alphabets, numbers and these characters : @#-$%^!+=;:_()]+.
:type share_password: str
:param resource_group_id: Required. Resource Group Id of the compute disks.
:type resource_group_id: str
:param staging_storage_account_id: Required. Resource Id of the storage account that can be
used to copy the vhd for staging.
:type staging_storage_account_id: str
"""
_validation = {
'data_account_type': {'required': True},
'resource_group_id': {'required': True},
'staging_storage_account_id': {'required': True},
}
_attribute_map = {
'data_account_type': {'key': 'dataAccountType', 'type': 'str'},
'share_password': {'key': 'sharePassword', 'type': 'str'},
'resource_group_id': {'key': 'resourceGroupId', 'type': 'str'},
'staging_storage_account_id': {'key': 'stagingStorageAccountId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ManagedDiskDetails, self).__init__(**kwargs)
self.data_account_type = 'ManagedDisk' # type: str
self.resource_group_id = kwargs['resource_group_id']
self.staging_storage_account_id = kwargs['staging_storage_account_id']
class NotificationPreference(msrest.serialization.Model):
"""Notification preference for a job stage.
All required parameters must be populated in order to send to Azure.
:param stage_name: Required. Name of the stage. Possible values include: "DevicePrepared",
"Dispatched", "Delivered", "PickedUp", "AtAzureDC", "DataCopy".
:type stage_name: str or ~azure.mgmt.databox.models.NotificationStageName
:param send_notification: Required. Notification is required or not.
:type send_notification: bool
"""
_validation = {
'stage_name': {'required': True},
'send_notification': {'required': True},
}
_attribute_map = {
'stage_name': {'key': 'stageName', 'type': 'str'},
'send_notification': {'key': 'sendNotification', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(NotificationPreference, self).__init__(**kwargs)
self.stage_name = kwargs['stage_name']
self.send_notification = kwargs['send_notification']
class Operation(msrest.serialization.Model):
"""Operation entity.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Name of the operation. Format:
{resourceProviderNamespace}/{resourceType}/{read|write|delete|action}.
:vartype name: str
:ivar display: Operation display values.
:vartype display: ~azure.mgmt.databox.models.OperationDisplay
:ivar properties: Operation properties.
:vartype properties: object
:ivar origin: Origin of the operation. Can be : user|system|user,system.
:vartype origin: str
:param is_data_action: Indicates whether the operation is a data action.
:type is_data_action: bool
"""
_validation = {
'name': {'readonly': True},
'display': {'readonly': True},
'properties': {'readonly': True},
'origin': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'properties': {'key': 'properties', 'type': 'object'},
'origin': {'key': 'origin', 'type': 'str'},
'is_data_action': {'key': 'isDataAction', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = None
self.display = None
self.properties = None
self.origin = None
self.is_data_action = kwargs.get('is_data_action', None)
class OperationDisplay(msrest.serialization.Model):
"""Operation display.
:param provider: Provider name.
:type provider: str
:param resource: Resource name.
:type resource: str
:param operation: Localized name of the operation for display purpose.
:type operation: str
:param description: Localized description of the operation for display purpose.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = kwargs.get('provider', None)
self.resource = kwargs.get('resource', None)
self.operation = kwargs.get('operation', None)
self.description = kwargs.get('description', None)
class OperationList(msrest.serialization.Model):
"""Operation Collection.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of operations.
:vartype value: list[~azure.mgmt.databox.models.Operation]
:param next_link: Link for the next set of operations.
:type next_link: str
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationList, self).__init__(**kwargs)
self.value = None
self.next_link = kwargs.get('next_link', None)
class PackageShippingDetails(msrest.serialization.Model):
"""Shipping details.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar carrier_name: Name of the carrier.
:vartype carrier_name: str
:ivar tracking_id: Tracking Id of shipment.
:vartype tracking_id: str
:ivar tracking_url: Url where shipment can be tracked.
:vartype tracking_url: str
"""
_validation = {
'carrier_name': {'readonly': True},
'tracking_id': {'readonly': True},
'tracking_url': {'readonly': True},
}
_attribute_map = {
'carrier_name': {'key': 'carrierName', 'type': 'str'},
'tracking_id': {'key': 'trackingId', 'type': 'str'},
'tracking_url': {'key': 'trackingUrl', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PackageShippingDetails, self).__init__(**kwargs)
self.carrier_name = None
self.tracking_id = None
self.tracking_url = None
class Preferences(msrest.serialization.Model):
"""Preferences related to the order.
:param preferred_data_center_region: Preferred data center region.
:type preferred_data_center_region: list[str]
:param transport_preferences: Preferences related to the shipment logistics of the sku.
:type transport_preferences: ~azure.mgmt.databox.models.TransportPreferences
:param encryption_preferences: Preferences related to the Encryption.
:type encryption_preferences: ~azure.mgmt.databox.models.EncryptionPreferences
"""
_attribute_map = {
'preferred_data_center_region': {'key': 'preferredDataCenterRegion', 'type': '[str]'},
'transport_preferences': {'key': 'transportPreferences', 'type': 'TransportPreferences'},
'encryption_preferences': {'key': 'encryptionPreferences', 'type': 'EncryptionPreferences'},
}
def __init__(
self,
**kwargs
):
super(Preferences, self).__init__(**kwargs)
self.preferred_data_center_region = kwargs.get('preferred_data_center_region', None)
self.transport_preferences = kwargs.get('transport_preferences', None)
self.encryption_preferences = kwargs.get('encryption_preferences', None)
class PreferencesValidationRequest(ValidationInputRequest):
"""Request to validate preference of transport and data center.
All required parameters must be populated in order to send to Azure.
:param validation_type: Required. Identifies the type of validation request.Constant filled by
server. Possible values include: "ValidateAddress",
"ValidateSubscriptionIsAllowedToCreateJob", "ValidatePreferences", "ValidateCreateOrderLimit",
"ValidateSkuAvailability", "ValidateDataTransferDetails".
:type validation_type: str or ~azure.mgmt.databox.models.ValidationInputDiscriminator
:param preference: Preference of transport and data center.
:type preference: ~azure.mgmt.databox.models.Preferences
:param device_type: Required. Device type to be used for the job. Possible values include:
"DataBox", "DataBoxDisk", "DataBoxHeavy".
:type device_type: str or ~azure.mgmt.databox.models.SkuName
"""
_validation = {
'validation_type': {'required': True},
'device_type': {'required': True},
}
_attribute_map = {
'validation_type': {'key': 'validationType', 'type': 'str'},
'preference': {'key': 'preference', 'type': 'Preferences'},
'device_type': {'key': 'deviceType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PreferencesValidationRequest, self).__init__(**kwargs)
self.validation_type = 'ValidatePreferences' # type: str
self.preference = kwargs.get('preference', None)
self.device_type = kwargs['device_type']
class PreferencesValidationResponseProperties(ValidationInputResponse):
"""Properties of data center and transport preference validation response.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param validation_type: Required. Identifies the type of validation response.Constant filled by
server. Possible values include: "ValidateAddress",
"ValidateSubscriptionIsAllowedToCreateJob", "ValidatePreferences", "ValidateCreateOrderLimit",
"ValidateSkuAvailability", "ValidateDataTransferDetails".
:type validation_type: str or ~azure.mgmt.databox.models.ValidationInputDiscriminator
:ivar error: Error code and message of validation response.
:vartype error: ~azure.mgmt.databox.models.CloudError
:ivar status: Validation status of requested data center and transport. Possible values
include: "Valid", "Invalid", "Skipped".
:vartype status: str or ~azure.mgmt.databox.models.ValidationStatus
"""
_validation = {
'validation_type': {'required': True},
'error': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'validation_type': {'key': 'validationType', 'type': 'str'},
'error': {'key': 'error', 'type': 'CloudError'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PreferencesValidationResponseProperties, self).__init__(**kwargs)
self.validation_type = 'ValidatePreferences' # type: str
self.status = None
class RegionConfigurationRequest(msrest.serialization.Model):
"""Request body to get the configuration for the region.
:param schedule_availability_request: Request body to get the availability for scheduling
orders.
:type schedule_availability_request: ~azure.mgmt.databox.models.ScheduleAvailabilityRequest
:param transport_availability_request: Request body to get the transport availability for given
sku.
:type transport_availability_request: ~azure.mgmt.databox.models.TransportAvailabilityRequest
"""
_attribute_map = {
'schedule_availability_request': {'key': 'scheduleAvailabilityRequest', 'type': 'ScheduleAvailabilityRequest'},
'transport_availability_request': {'key': 'transportAvailabilityRequest', 'type': 'TransportAvailabilityRequest'},
}
def __init__(
self,
**kwargs
):
super(RegionConfigurationRequest, self).__init__(**kwargs)
self.schedule_availability_request = kwargs.get('schedule_availability_request', None)
self.transport_availability_request = kwargs.get('transport_availability_request', None)
class RegionConfigurationResponse(msrest.serialization.Model):
"""Configuration response specific to a region.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar schedule_availability_response: Schedule availability for given sku in a region.
:vartype schedule_availability_response:
~azure.mgmt.databox.models.ScheduleAvailabilityResponse
:ivar transport_availability_response: Transport options available for given sku in a region.
:vartype transport_availability_response:
~azure.mgmt.databox.models.TransportAvailabilityResponse
"""
_validation = {
'schedule_availability_response': {'readonly': True},
'transport_availability_response': {'readonly': True},
}
_attribute_map = {
'schedule_availability_response': {'key': 'scheduleAvailabilityResponse', 'type': 'ScheduleAvailabilityResponse'},
'transport_availability_response': {'key': 'transportAvailabilityResponse', 'type': 'TransportAvailabilityResponse'},
}
def __init__(
self,
**kwargs
):
super(RegionConfigurationResponse, self).__init__(**kwargs)
self.schedule_availability_response = None
self.transport_availability_response = None
class ResourceIdentity(msrest.serialization.Model):
"""Msi identity details of the resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param type: Identity type.
:type type: str
:ivar principal_id: Service Principal Id backing the Msi.
:vartype principal_id: str
:ivar tenant_id: Home Tenant Id.
:vartype tenant_id: str
:param user_assigned_identities: User Assigned Identities.
:type user_assigned_identities: dict[str, ~azure.mgmt.databox.models.UserAssignedIdentity]
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{UserAssignedIdentity}'},
}
def __init__(
self,
**kwargs
):
super(ResourceIdentity, self).__init__(**kwargs)
self.type = kwargs.get('type', None)
self.principal_id = None
self.tenant_id = None
self.user_assigned_identities = kwargs.get('user_assigned_identities', None)
class ScheduleAvailabilityResponse(msrest.serialization.Model):
"""Schedule availability for given sku in a region.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar available_dates: List of dates available to schedule.
:vartype available_dates: list[~datetime.datetime]
"""
_validation = {
'available_dates': {'readonly': True},
}
_attribute_map = {
'available_dates': {'key': 'availableDates', 'type': '[iso-8601]'},
}
def __init__(
self,
**kwargs
):
super(ScheduleAvailabilityResponse, self).__init__(**kwargs)
self.available_dates = None
class ShareCredentialDetails(msrest.serialization.Model):
"""Credential details of the shares in account.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar share_name: Name of the share.
:vartype share_name: str
:ivar share_type: Type of the share. Possible values include: "UnknownType", "HCS",
"BlockBlob", "PageBlob", "AzureFile", "ManagedDisk", "AzurePremiumFiles".
:vartype share_type: str or ~azure.mgmt.databox.models.ShareDestinationFormatType
:ivar user_name: User name for the share.
:vartype user_name: str
:ivar password: Password for the share.
:vartype password: str
:ivar supported_access_protocols: Access protocols supported on the device.
:vartype supported_access_protocols: list[str or ~azure.mgmt.databox.models.AccessProtocol]
"""
_validation = {
'share_name': {'readonly': True},
'share_type': {'readonly': True},
'user_name': {'readonly': True},
'password': {'readonly': True},
'supported_access_protocols': {'readonly': True},
}
_attribute_map = {
'share_name': {'key': 'shareName', 'type': 'str'},
'share_type': {'key': 'shareType', 'type': 'str'},
'user_name': {'key': 'userName', 'type': 'str'},
'password': {'key': 'password', 'type': 'str'},
'supported_access_protocols': {'key': 'supportedAccessProtocols', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(ShareCredentialDetails, self).__init__(**kwargs)
self.share_name = None
self.share_type = None
self.user_name = None
self.password = None
self.supported_access_protocols = None
class ShipmentPickUpRequest(msrest.serialization.Model):
"""Shipment pick up request details.
All required parameters must be populated in order to send to Azure.
:param start_time: Required. Minimum date after which the pick up should commence, this must be
in local time of pick up area.
:type start_time: ~datetime.datetime
:param end_time: Required. Maximum date before which the pick up should commence, this must be
in local time of pick up area.
:type end_time: ~datetime.datetime
:param shipment_location: Required. Shipment Location in the pickup place. Eg.front desk.
:type shipment_location: str
"""
_validation = {
'start_time': {'required': True},
'end_time': {'required': True},
'shipment_location': {'required': True},
}
_attribute_map = {
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'shipment_location': {'key': 'shipmentLocation', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ShipmentPickUpRequest, self).__init__(**kwargs)
self.start_time = kwargs['start_time']
self.end_time = kwargs['end_time']
self.shipment_location = kwargs['shipment_location']
class ShipmentPickUpResponse(msrest.serialization.Model):
"""Shipment pick up response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar confirmation_number: Confirmation number for the pick up request.
:vartype confirmation_number: str
:ivar ready_by_time: Time by which shipment should be ready for pick up, this is in local time
of pick up area.
:vartype ready_by_time: ~datetime.datetime
"""
_validation = {
'confirmation_number': {'readonly': True},
'ready_by_time': {'readonly': True},
}
_attribute_map = {
'confirmation_number': {'key': 'confirmationNumber', 'type': 'str'},
'ready_by_time': {'key': 'readyByTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(ShipmentPickUpResponse, self).__init__(**kwargs)
self.confirmation_number = None
self.ready_by_time = None
class ShippingAddress(msrest.serialization.Model):
"""Shipping address where customer wishes to receive the device.
All required parameters must be populated in order to send to Azure.
:param street_address1: Required. Street Address line 1.
:type street_address1: str
:param street_address2: Street Address line 2.
:type street_address2: str
:param street_address3: Street Address line 3.
:type street_address3: str
:param city: Name of the City.
:type city: str
:param state_or_province: Name of the State or Province.
:type state_or_province: str
:param country: Required. Name of the Country.
:type country: str
:param postal_code: Postal code.
:type postal_code: str
:param zip_extended_code: Extended Zip Code.
:type zip_extended_code: str
:param company_name: Name of the company.
:type company_name: str
:param address_type: Type of address. Possible values include: "None", "Residential",
"Commercial".
:type address_type: str or ~azure.mgmt.databox.models.AddressType
"""
_validation = {
'street_address1': {'required': True},
'country': {'required': True},
}
_attribute_map = {
'street_address1': {'key': 'streetAddress1', 'type': 'str'},
'street_address2': {'key': 'streetAddress2', 'type': 'str'},
'street_address3': {'key': 'streetAddress3', 'type': 'str'},
'city': {'key': 'city', 'type': 'str'},
'state_or_province': {'key': 'stateOrProvince', 'type': 'str'},
'country': {'key': 'country', 'type': 'str'},
'postal_code': {'key': 'postalCode', 'type': 'str'},
'zip_extended_code': {'key': 'zipExtendedCode', 'type': 'str'},
'company_name': {'key': 'companyName', 'type': 'str'},
'address_type': {'key': 'addressType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ShippingAddress, self).__init__(**kwargs)
self.street_address1 = kwargs['street_address1']
self.street_address2 = kwargs.get('street_address2', None)
self.street_address3 = kwargs.get('street_address3', None)
self.city = kwargs.get('city', None)
self.state_or_province = kwargs.get('state_or_province', None)
self.country = kwargs['country']
self.postal_code = kwargs.get('postal_code', None)
self.zip_extended_code = kwargs.get('zip_extended_code', None)
self.company_name = kwargs.get('company_name', None)
self.address_type = kwargs.get('address_type', None)
class Sku(msrest.serialization.Model):
"""The Sku.
All required parameters must be populated in order to send to Azure.
:param name: Required. The sku name. Possible values include: "DataBox", "DataBoxDisk",
"DataBoxHeavy".
:type name: str or ~azure.mgmt.databox.models.SkuName
:param display_name: The display name of the sku.
:type display_name: str
:param family: The sku family.
:type family: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'family': {'key': 'family', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Sku, self).__init__(**kwargs)
self.name = kwargs['name']
self.display_name = kwargs.get('display_name', None)
self.family = kwargs.get('family', None)
class SkuAvailabilityValidationRequest(ValidationInputRequest):
"""Request to validate sku availability.
All required parameters must be populated in order to send to Azure.
:param validation_type: Required. Identifies the type of validation request.Constant filled by
server. Possible values include: "ValidateAddress",
"ValidateSubscriptionIsAllowedToCreateJob", "ValidatePreferences", "ValidateCreateOrderLimit",
"ValidateSkuAvailability", "ValidateDataTransferDetails".
:type validation_type: str or ~azure.mgmt.databox.models.ValidationInputDiscriminator
:param device_type: Required. Device type to be used for the job. Possible values include:
"DataBox", "DataBoxDisk", "DataBoxHeavy".
:type device_type: str or ~azure.mgmt.databox.models.SkuName
:param transfer_type: Required. Type of the transfer. Possible values include: "ImportToAzure",
"ExportFromAzure".
:type transfer_type: str or ~azure.mgmt.databox.models.TransferType
:param country: Required. ISO country code. Country for hardware shipment. For codes check:
https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2#Officially_assigned_code_elements.
:type country: str
:param location: Required. Location for data transfer. For locations check:
https://management.azure.com/subscriptions/SUBSCRIPTIONID/locations?api-version=2018-01-01.
:type location: str
"""
_validation = {
'validation_type': {'required': True},
'device_type': {'required': True},
'transfer_type': {'required': True},
'country': {'required': True},
'location': {'required': True},
}
_attribute_map = {
'validation_type': {'key': 'validationType', 'type': 'str'},
'device_type': {'key': 'deviceType', 'type': 'str'},
'transfer_type': {'key': 'transferType', 'type': 'str'},
'country': {'key': 'country', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SkuAvailabilityValidationRequest, self).__init__(**kwargs)
self.validation_type = 'ValidateSkuAvailability' # type: str
self.device_type = kwargs['device_type']
self.transfer_type = kwargs['transfer_type']
self.country = kwargs['country']
self.location = kwargs['location']
class SkuAvailabilityValidationResponseProperties(ValidationInputResponse):
"""Properties of sku availability validation response.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param validation_type: Required. Identifies the type of validation response.Constant filled by
server. Possible values include: "ValidateAddress",
"ValidateSubscriptionIsAllowedToCreateJob", "ValidatePreferences", "ValidateCreateOrderLimit",
"ValidateSkuAvailability", "ValidateDataTransferDetails".
:type validation_type: str or ~azure.mgmt.databox.models.ValidationInputDiscriminator
:ivar error: Error code and message of validation response.
:vartype error: ~azure.mgmt.databox.models.CloudError
:ivar status: Sku availability validation status. Possible values include: "Valid", "Invalid",
"Skipped".
:vartype status: str or ~azure.mgmt.databox.models.ValidationStatus
"""
_validation = {
'validation_type': {'required': True},
'error': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'validation_type': {'key': 'validationType', 'type': 'str'},
'error': {'key': 'error', 'type': 'CloudError'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SkuAvailabilityValidationResponseProperties, self).__init__(**kwargs)
self.validation_type = 'ValidateSkuAvailability' # type: str
self.status = None
class SkuCapacity(msrest.serialization.Model):
"""Capacity of the sku.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar usable: Usable capacity in TB.
:vartype usable: str
:ivar maximum: Maximum capacity in TB.
:vartype maximum: str
"""
_validation = {
'usable': {'readonly': True},
'maximum': {'readonly': True},
}
_attribute_map = {
'usable': {'key': 'usable', 'type': 'str'},
'maximum': {'key': 'maximum', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SkuCapacity, self).__init__(**kwargs)
self.usable = None
self.maximum = None
class SkuCost(msrest.serialization.Model):
"""Describes metadata for retrieving price info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar meter_id: Meter id of the Sku.
:vartype meter_id: str
:ivar meter_type: The type of the meter.
:vartype meter_type: str
:ivar multiplier: Multiplier specifies the region specific value to be multiplied with 1$ guid.
Eg: Our new regions will be using 1$ shipping guid with appropriate multiplier specific to
region.
:vartype multiplier: float
"""
_validation = {
'meter_id': {'readonly': True},
'meter_type': {'readonly': True},
'multiplier': {'readonly': True},
}
_attribute_map = {
'meter_id': {'key': 'meterId', 'type': 'str'},
'meter_type': {'key': 'meterType', 'type': 'str'},
'multiplier': {'key': 'multiplier', 'type': 'float'},
}
def __init__(
self,
**kwargs
):
super(SkuCost, self).__init__(**kwargs)
self.meter_id = None
self.meter_type = None
self.multiplier = None
class SkuInformation(msrest.serialization.Model):
"""Information of the sku.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar sku: The Sku.
:vartype sku: ~azure.mgmt.databox.models.Sku
:ivar enabled: The sku is enabled or not.
:vartype enabled: bool
:ivar data_location_to_service_location_map: The map of data location to service location.
:vartype data_location_to_service_location_map:
list[~azure.mgmt.databox.models.DataLocationToServiceLocationMap]
:ivar capacity: Capacity of the Sku.
:vartype capacity: ~azure.mgmt.databox.models.SkuCapacity
:ivar costs: Cost of the Sku.
:vartype costs: list[~azure.mgmt.databox.models.SkuCost]
:ivar api_versions: Api versions that support this Sku.
:vartype api_versions: list[str]
:ivar disabled_reason: Reason why the Sku is disabled. Possible values include: "None",
"Country", "Region", "Feature", "OfferType", "NoSubscriptionInfo".
:vartype disabled_reason: str or ~azure.mgmt.databox.models.SkuDisabledReason
:ivar disabled_reason_message: Message for why the Sku is disabled.
:vartype disabled_reason_message: str
:ivar required_feature: Required feature to access the sku.
:vartype required_feature: str
"""
_validation = {
'sku': {'readonly': True},
'enabled': {'readonly': True},
'data_location_to_service_location_map': {'readonly': True},
'capacity': {'readonly': True},
'costs': {'readonly': True},
'api_versions': {'readonly': True},
'disabled_reason': {'readonly': True},
'disabled_reason_message': {'readonly': True},
'required_feature': {'readonly': True},
}
_attribute_map = {
'sku': {'key': 'sku', 'type': 'Sku'},
'enabled': {'key': 'enabled', 'type': 'bool'},
'data_location_to_service_location_map': {'key': 'properties.dataLocationToServiceLocationMap', 'type': '[DataLocationToServiceLocationMap]'},
'capacity': {'key': 'properties.capacity', 'type': 'SkuCapacity'},
'costs': {'key': 'properties.costs', 'type': '[SkuCost]'},
'api_versions': {'key': 'properties.apiVersions', 'type': '[str]'},
'disabled_reason': {'key': 'properties.disabledReason', 'type': 'str'},
'disabled_reason_message': {'key': 'properties.disabledReasonMessage', 'type': 'str'},
'required_feature': {'key': 'properties.requiredFeature', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SkuInformation, self).__init__(**kwargs)
self.sku = None
self.enabled = None
self.data_location_to_service_location_map = None
self.capacity = None
self.costs = None
self.api_versions = None
self.disabled_reason = None
self.disabled_reason_message = None
self.required_feature = None
class StorageAccountDetails(DataAccountDetails):
"""Details for the storage account.
All required parameters must be populated in order to send to Azure.
:param data_account_type: Required. Account Type of the data to be transferred.Constant filled
by server. Possible values include: "StorageAccount", "ManagedDisk".
:type data_account_type: str or ~azure.mgmt.databox.models.DataAccountType
:param share_password: Password for all the shares to be created on the device. Should not be
passed for TransferType:ExportFromAzure jobs. If this is not passed, the service will generate
password itself. This will not be returned in Get Call. Password Requirements : Password must
be minimum of 12 and maximum of 64 characters. Password must have at least one uppercase
alphabet, one number and one special character. Password cannot have the following characters :
IilLoO0 Password can have only alphabets, numbers and these characters : @#-$%^!+=;:_()]+.
:type share_password: str
:param storage_account_id: Required. Storage Account Resource Id.
:type storage_account_id: str
"""
_validation = {
'data_account_type': {'required': True},
'storage_account_id': {'required': True},
}
_attribute_map = {
'data_account_type': {'key': 'dataAccountType', 'type': 'str'},
'share_password': {'key': 'sharePassword', 'type': 'str'},
'storage_account_id': {'key': 'storageAccountId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StorageAccountDetails, self).__init__(**kwargs)
self.data_account_type = 'StorageAccount' # type: str
self.storage_account_id = kwargs['storage_account_id']
class SubscriptionIsAllowedToCreateJobValidationRequest(ValidationInputRequest):
"""Request to validate subscription permission to create jobs.
All required parameters must be populated in order to send to Azure.
:param validation_type: Required. Identifies the type of validation request.Constant filled by
server. Possible values include: "ValidateAddress",
"ValidateSubscriptionIsAllowedToCreateJob", "ValidatePreferences", "ValidateCreateOrderLimit",
"ValidateSkuAvailability", "ValidateDataTransferDetails".
:type validation_type: str or ~azure.mgmt.databox.models.ValidationInputDiscriminator
"""
_validation = {
'validation_type': {'required': True},
}
_attribute_map = {
'validation_type': {'key': 'validationType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SubscriptionIsAllowedToCreateJobValidationRequest, self).__init__(**kwargs)
self.validation_type = 'ValidateSubscriptionIsAllowedToCreateJob' # type: str
class SubscriptionIsAllowedToCreateJobValidationResponseProperties(ValidationInputResponse):
"""Properties of subscription permission to create job validation response.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param validation_type: Required. Identifies the type of validation response.Constant filled by
server. Possible values include: "ValidateAddress",
"ValidateSubscriptionIsAllowedToCreateJob", "ValidatePreferences", "ValidateCreateOrderLimit",
"ValidateSkuAvailability", "ValidateDataTransferDetails".
:type validation_type: str or ~azure.mgmt.databox.models.ValidationInputDiscriminator
:ivar error: Error code and message of validation response.
:vartype error: ~azure.mgmt.databox.models.CloudError
:ivar status: Validation status of subscription permission to create job. Possible values
include: "Valid", "Invalid", "Skipped".
:vartype status: str or ~azure.mgmt.databox.models.ValidationStatus
"""
_validation = {
'validation_type': {'required': True},
'error': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'validation_type': {'key': 'validationType', 'type': 'str'},
'error': {'key': 'error', 'type': 'CloudError'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SubscriptionIsAllowedToCreateJobValidationResponseProperties, self).__init__(**kwargs)
self.validation_type = 'ValidateSubscriptionIsAllowedToCreateJob' # type: str
self.status = None
class TransferAllDetails(msrest.serialization.Model):
"""Details to transfer all data.
All required parameters must be populated in order to send to Azure.
:param data_account_type: Required. Type of the account of data. Possible values include:
"StorageAccount", "ManagedDisk".
:type data_account_type: str or ~azure.mgmt.databox.models.DataAccountType
:param transfer_all_blobs: To indicate if all Azure blobs have to be transferred.
:type transfer_all_blobs: bool
:param transfer_all_files: To indicate if all Azure Files have to be transferred.
:type transfer_all_files: bool
"""
_validation = {
'data_account_type': {'required': True},
}
_attribute_map = {
'data_account_type': {'key': 'dataAccountType', 'type': 'str'},
'transfer_all_blobs': {'key': 'transferAllBlobs', 'type': 'bool'},
'transfer_all_files': {'key': 'transferAllFiles', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(TransferAllDetails, self).__init__(**kwargs)
self.data_account_type = kwargs['data_account_type']
self.transfer_all_blobs = kwargs.get('transfer_all_blobs', None)
self.transfer_all_files = kwargs.get('transfer_all_files', None)
class TransferConfiguration(msrest.serialization.Model):
"""Configuration for defining the transfer of data.
All required parameters must be populated in order to send to Azure.
:param transfer_configuration_type: Required. Type of the configuration for transfer. Possible
values include: "TransferAll", "TransferUsingFilter".
:type transfer_configuration_type: str or ~azure.mgmt.databox.models.TransferConfigurationType
:param transfer_filter_details: Map of filter type and the details to filter. This field is
required only if the TransferConfigurationType is given as TransferUsingFilter.
:type transfer_filter_details:
~azure.mgmt.databox.models.TransferConfigurationTransferFilterDetails
:param transfer_all_details: Map of filter type and the details to transfer all data. This
field is required only if the TransferConfigurationType is given as TransferAll.
:type transfer_all_details: ~azure.mgmt.databox.models.TransferConfigurationTransferAllDetails
"""
_validation = {
'transfer_configuration_type': {'required': True},
}
_attribute_map = {
'transfer_configuration_type': {'key': 'transferConfigurationType', 'type': 'str'},
'transfer_filter_details': {'key': 'transferFilterDetails', 'type': 'TransferConfigurationTransferFilterDetails'},
'transfer_all_details': {'key': 'transferAllDetails', 'type': 'TransferConfigurationTransferAllDetails'},
}
def __init__(
self,
**kwargs
):
super(TransferConfiguration, self).__init__(**kwargs)
self.transfer_configuration_type = kwargs['transfer_configuration_type']
self.transfer_filter_details = kwargs.get('transfer_filter_details', None)
self.transfer_all_details = kwargs.get('transfer_all_details', None)
class TransferConfigurationTransferAllDetails(msrest.serialization.Model):
"""Map of filter type and the details to transfer all data. This field is required only if the TransferConfigurationType is given as TransferAll.
:param include: Details to transfer all data.
:type include: ~azure.mgmt.databox.models.TransferAllDetails
"""
_attribute_map = {
'include': {'key': 'include', 'type': 'TransferAllDetails'},
}
def __init__(
self,
**kwargs
):
super(TransferConfigurationTransferAllDetails, self).__init__(**kwargs)
self.include = kwargs.get('include', None)
class TransferConfigurationTransferFilterDetails(msrest.serialization.Model):
"""Map of filter type and the details to filter. This field is required only if the TransferConfigurationType is given as TransferUsingFilter.
:param include: Details of the filtering the transfer of data.
:type include: ~azure.mgmt.databox.models.TransferFilterDetails
"""
_attribute_map = {
'include': {'key': 'include', 'type': 'TransferFilterDetails'},
}
def __init__(
self,
**kwargs
):
super(TransferConfigurationTransferFilterDetails, self).__init__(**kwargs)
self.include = kwargs.get('include', None)
class TransferFilterDetails(msrest.serialization.Model):
"""Details of the filtering the transfer of data.
All required parameters must be populated in order to send to Azure.
:param data_account_type: Required. Type of the account of data. Possible values include:
"StorageAccount", "ManagedDisk".
:type data_account_type: str or ~azure.mgmt.databox.models.DataAccountType
:param blob_filter_details: Filter details to transfer blobs.
:type blob_filter_details: ~azure.mgmt.databox.models.BlobFilterDetails
:param azure_file_filter_details: Filter details to transfer Azure files.
:type azure_file_filter_details: ~azure.mgmt.databox.models.AzureFileFilterDetails
:param filter_file_details: Details of the filter files to be used for data transfer.
:type filter_file_details: list[~azure.mgmt.databox.models.FilterFileDetails]
"""
_validation = {
'data_account_type': {'required': True},
}
_attribute_map = {
'data_account_type': {'key': 'dataAccountType', 'type': 'str'},
'blob_filter_details': {'key': 'blobFilterDetails', 'type': 'BlobFilterDetails'},
'azure_file_filter_details': {'key': 'azureFileFilterDetails', 'type': 'AzureFileFilterDetails'},
'filter_file_details': {'key': 'filterFileDetails', 'type': '[FilterFileDetails]'},
}
def __init__(
self,
**kwargs
):
super(TransferFilterDetails, self).__init__(**kwargs)
self.data_account_type = kwargs['data_account_type']
self.blob_filter_details = kwargs.get('blob_filter_details', None)
self.azure_file_filter_details = kwargs.get('azure_file_filter_details', None)
self.filter_file_details = kwargs.get('filter_file_details', None)
class TransportAvailabilityDetails(msrest.serialization.Model):
"""Transport options availability details for given region.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar shipment_type: Transport Shipment Type supported for given region. Possible values
include: "CustomerManaged", "MicrosoftManaged".
:vartype shipment_type: str or ~azure.mgmt.databox.models.TransportShipmentTypes
"""
_validation = {
'shipment_type': {'readonly': True},
}
_attribute_map = {
'shipment_type': {'key': 'shipmentType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TransportAvailabilityDetails, self).__init__(**kwargs)
self.shipment_type = None
class TransportAvailabilityRequest(msrest.serialization.Model):
"""Request body to get the transport availability for given sku.
:param sku_name: Type of the device. Possible values include: "DataBox", "DataBoxDisk",
"DataBoxHeavy".
:type sku_name: str or ~azure.mgmt.databox.models.SkuName
"""
_attribute_map = {
'sku_name': {'key': 'skuName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TransportAvailabilityRequest, self).__init__(**kwargs)
self.sku_name = kwargs.get('sku_name', None)
class TransportAvailabilityResponse(msrest.serialization.Model):
"""Transport options available for given sku in a region.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar transport_availability_details: List of transport availability details for given region.
:vartype transport_availability_details:
list[~azure.mgmt.databox.models.TransportAvailabilityDetails]
"""
_validation = {
'transport_availability_details': {'readonly': True},
}
_attribute_map = {
'transport_availability_details': {'key': 'transportAvailabilityDetails', 'type': '[TransportAvailabilityDetails]'},
}
def __init__(
self,
**kwargs
):
super(TransportAvailabilityResponse, self).__init__(**kwargs)
self.transport_availability_details = None
class TransportPreferences(msrest.serialization.Model):
"""Preferences related to the shipment logistics of the sku.
All required parameters must be populated in order to send to Azure.
:param preferred_shipment_type: Required. Indicates Shipment Logistics type that the customer
preferred. Possible values include: "CustomerManaged", "MicrosoftManaged".
:type preferred_shipment_type: str or ~azure.mgmt.databox.models.TransportShipmentTypes
"""
_validation = {
'preferred_shipment_type': {'required': True},
}
_attribute_map = {
'preferred_shipment_type': {'key': 'preferredShipmentType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TransportPreferences, self).__init__(**kwargs)
self.preferred_shipment_type = kwargs['preferred_shipment_type']
class UnencryptedCredentials(msrest.serialization.Model):
"""Unencrypted credentials for accessing device.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar job_name: Name of the job.
:vartype job_name: str
:ivar job_secrets: Secrets related to this job.
:vartype job_secrets: ~azure.mgmt.databox.models.JobSecrets
"""
_validation = {
'job_name': {'readonly': True},
'job_secrets': {'readonly': True},
}
_attribute_map = {
'job_name': {'key': 'jobName', 'type': 'str'},
'job_secrets': {'key': 'jobSecrets', 'type': 'JobSecrets'},
}
def __init__(
self,
**kwargs
):
super(UnencryptedCredentials, self).__init__(**kwargs)
self.job_name = None
self.job_secrets = None
class UnencryptedCredentialsList(msrest.serialization.Model):
"""List of unencrypted credentials for accessing device.
:param value: List of unencrypted credentials.
:type value: list[~azure.mgmt.databox.models.UnencryptedCredentials]
:param next_link: Link for the next set of unencrypted credentials.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[UnencryptedCredentials]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UnencryptedCredentialsList, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class UpdateJobDetails(msrest.serialization.Model):
"""Job details for update.
:param contact_details: Contact details for notification and shipping.
:type contact_details: ~azure.mgmt.databox.models.ContactDetails
:param shipping_address: Shipping address of the customer.
:type shipping_address: ~azure.mgmt.databox.models.ShippingAddress
:param key_encryption_key: Key encryption key for the job.
:type key_encryption_key: ~azure.mgmt.databox.models.KeyEncryptionKey
"""
_attribute_map = {
'contact_details': {'key': 'contactDetails', 'type': 'ContactDetails'},
'shipping_address': {'key': 'shippingAddress', 'type': 'ShippingAddress'},
'key_encryption_key': {'key': 'keyEncryptionKey', 'type': 'KeyEncryptionKey'},
}
def __init__(
self,
**kwargs
):
super(UpdateJobDetails, self).__init__(**kwargs)
self.contact_details = kwargs.get('contact_details', None)
self.shipping_address = kwargs.get('shipping_address', None)
self.key_encryption_key = kwargs.get('key_encryption_key', None)
class UserAssignedIdentity(msrest.serialization.Model):
"""Class defining User assigned identity details.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: The principal id of user assigned identity.
:vartype principal_id: str
:ivar client_id: The client id of user assigned identity.
:vartype client_id: str
"""
_validation = {
'principal_id': {'readonly': True},
'client_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'client_id': {'key': 'clientId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UserAssignedIdentity, self).__init__(**kwargs)
self.principal_id = None
self.client_id = None
class UserAssignedProperties(msrest.serialization.Model):
"""User assigned identity properties.
:param resource_id: Arm resource id for user assigned identity to be used to fetch MSI token.
:type resource_id: str
"""
_attribute_map = {
'resource_id': {'key': 'resourceId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UserAssignedProperties, self).__init__(**kwargs)
self.resource_id = kwargs.get('resource_id', None)
class ValidateAddress(ValidationInputRequest):
"""The requirements to validate customer address where the device needs to be shipped.
All required parameters must be populated in order to send to Azure.
:param validation_type: Required. Identifies the type of validation request.Constant filled by
server. Possible values include: "ValidateAddress",
"ValidateSubscriptionIsAllowedToCreateJob", "ValidatePreferences", "ValidateCreateOrderLimit",
"ValidateSkuAvailability", "ValidateDataTransferDetails".
:type validation_type: str or ~azure.mgmt.databox.models.ValidationInputDiscriminator
:param shipping_address: Required. Shipping address of the customer.
:type shipping_address: ~azure.mgmt.databox.models.ShippingAddress
:param device_type: Required. Device type to be used for the job. Possible values include:
"DataBox", "DataBoxDisk", "DataBoxHeavy".
:type device_type: str or ~azure.mgmt.databox.models.SkuName
:param transport_preferences: Preferences related to the shipment logistics of the sku.
:type transport_preferences: ~azure.mgmt.databox.models.TransportPreferences
"""
_validation = {
'validation_type': {'required': True},
'shipping_address': {'required': True},
'device_type': {'required': True},
}
_attribute_map = {
'validation_type': {'key': 'validationType', 'type': 'str'},
'shipping_address': {'key': 'shippingAddress', 'type': 'ShippingAddress'},
'device_type': {'key': 'deviceType', 'type': 'str'},
'transport_preferences': {'key': 'transportPreferences', 'type': 'TransportPreferences'},
}
def __init__(
self,
**kwargs
):
super(ValidateAddress, self).__init__(**kwargs)
self.validation_type = 'ValidateAddress' # type: str
self.shipping_address = kwargs['shipping_address']
self.device_type = kwargs['device_type']
self.transport_preferences = kwargs.get('transport_preferences', None)
class ValidationResponse(msrest.serialization.Model):
"""Response of pre job creation validations.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar status: Overall validation status. Possible values include: "AllValidToProceed",
"InputsRevisitRequired", "CertainInputValidationsSkipped".
:vartype status: str or ~azure.mgmt.databox.models.OverallValidationStatus
:ivar individual_response_details: List of response details contain validationType and its
response as key and value respectively.
:vartype individual_response_details: list[~azure.mgmt.databox.models.ValidationInputResponse]
"""
_validation = {
'status': {'readonly': True},
'individual_response_details': {'readonly': True},
}
_attribute_map = {
'status': {'key': 'properties.status', 'type': 'str'},
'individual_response_details': {'key': 'properties.individualResponseDetails', 'type': '[ValidationInputResponse]'},
}
def __init__(
self,
**kwargs
):
super(ValidationResponse, self).__init__(**kwargs)
self.status = None
self.individual_response_details = None
| [
"noreply@github.com"
] | scbedd.noreply@github.com |
3f0d5dd795c63efece0b18782e3ac5fed79b6dfd | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=2.5_rd=0.5_rw=0.04_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=9/params.py | c43dddd0986e94512fb896453546bc28545453ac | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | {'cpus': 4,
'duration': 30,
'final_util': '2.516595',
'max_util': '2.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.5',
'res_nmb': '4',
'res_weight': '0.04',
'scheduler': 'GSN-EDF',
'trial': 9,
'utils': 'uni-medium-3'}
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
0afd96d692bfc94c7f5f01bf86770bbc30d98414 | 3e82d54eda4a3dffd350dfbacc3d6486541211fe | /manage.py | 304e50896c1c893186e36e0542a71457742d63ee | [] | no_license | crowdbotics-apps/testapp-dev-1747 | c29cae77cf75a8ced4804e2093aa7dd7800872c5 | 485adfd2b57bd59185c0d2ea9714884e00130750 | refs/heads/master | 2022-04-08T14:08:53.030919 | 2020-02-28T19:19:18 | 2020-02-28T19:19:18 | 243,837,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'testapp_dev_1747.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
d05fc8d82d239460c8948bcb2604441baf39c5b5 | 32c56293475f49c6dd1b0f1334756b5ad8763da9 | /google-cloud-sdk/lib/surface/iot/devices/credentials/describe.py | c6e806c9fe1370cae03c45a6ce6d6cc152c4def6 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | bopopescu/socialliteapp | b9041f17f8724ee86f2ecc6e2e45b8ff6a44b494 | 85bb264e273568b5a0408f733b403c56373e2508 | refs/heads/master | 2022-11-20T03:01:47.654498 | 2020-02-01T20:29:43 | 2020-02-01T20:29:43 | 282,403,750 | 0 | 0 | MIT | 2020-07-25T08:31:59 | 2020-07-25T08:31:59 | null | UTF-8 | Python | false | false | 2,028 | py | # -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""`gcloud iot devices credentials describe` command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.cloudiot import devices
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.iot import flags
from googlecloudsdk.command_lib.iot import resource_args
from googlecloudsdk.command_lib.iot import util
class Describe(base.DescribeCommand):
"""Show details about a specific device credential."""
detailed_help = {
'EXAMPLES':
"""\
To describe the first credential of a device in region 'us-central1', run:
$ {command} --region=us-central1 --registry=my-registry --device=my-device 0
""",
}
@staticmethod
def Args(parser):
resource_args.AddDeviceResourceArg(parser,
'to which the credential belongs',
positional=False)
flags.GetIndexFlag('credential', 'to describe').AddToParser(parser)
def Run(self, args):
client = devices.DevicesClient()
device_ref = args.CONCEPTS.device.Parse()
credentials = client.Get(device_ref).credentials
try:
return credentials[args.index]
except IndexError:
raise util.BadCredentialIndexError(device_ref.Name(), credentials,
args.index)
| [
"jonathang132298@gmail.com"
] | jonathang132298@gmail.com |
f086c3c60ee2807d697de9ada4ec168df5ca413c | 9eaa2c64a777bd24a3cccd0230da5f81231ef612 | /study/1905/month01/code/Stage1/day15/exercise02.py | 5a783276705d8406f1385d3bdd97764fd8574235 | [
"MIT"
] | permissive | Dython-sky/AID1908 | 4528932f2ca66b844d8a3fcab5ed8bf84d20eb0c | 46cd54a7b36b5f009974f2bbb7005a4ad440ca1a | refs/heads/master | 2022-04-14T12:23:30.426270 | 2020-04-01T18:05:19 | 2020-04-01T18:05:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | """
练习2:根据生日(年月日),计算活了多少天
思路
年月日 --> 时间
当前时间 --> 出生时间
计算天
"""
import time
def life_days(year, month, day):
"""
根据生日活了多少天
:param year: 年
:param month:月
:param day:日
:return: 活的天数
"""
tuple_time = time.strptime("{}-{}-{}".format(year, month, day), "%Y-%m-%d")
lift_second = time.time() - time.mktime(tuple_time)
return int(lift_second / 60 / 60 // 24)
result = life_days(1998,4,4)
print(result)
| [
"dong_1998_dream@163.com"
] | dong_1998_dream@163.com |
578fcf3f4a42677b26d57fc4c57d9470a1df953a | a37c93503ecb205b16f860664629a41b7c48250e | /initiation/migrations/0003_auto_20200913_0148.py | baad8b906b1e677c71d249c5286e339f7fb44344 | [] | no_license | samuelitwaru/PMS | f86681eaec2e34142447c3e66ab8d0939f4e0dd0 | 7bf7c9c511dd727479020540eef2a86ef561369e | refs/heads/master | 2023-01-10T04:35:16.852447 | 2020-10-22T09:03:48 | 2020-10-22T09:03:48 | 290,486,771 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,491 | py | # Generated by Django 2.2.3 on 2020-09-13 01:48
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('initiation', '0002_auto_20200912_0001'),
]
operations = [
migrations.RemoveField(
model_name='requisition',
name='file_attachment',
),
migrations.RemoveField(
model_name='specification',
name='requisition',
),
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('quantity', models.IntegerField()),
('unit_of_measure', models.CharField(max_length=64)),
('unit_cost', models.IntegerField()),
('description', models.CharField(max_length=512)),
('file_attachment', models.FileField(null=True, upload_to='')),
('requisition_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='initiation.Requisition')),
],
),
migrations.AddField(
model_name='specification',
name='item_id',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='initiation.Item'),
preserve_default=False,
),
]
| [
"samuelitwaru@gmail.com"
] | samuelitwaru@gmail.com |
86df83c6ac010a214be5f8c2906b4e742ea73a87 | 5a17226264d9abe7ff99c7da76512551b3df86a5 | /leetcode_algorithm/hacker_rank_easy_grid_challenge.py | 3e9122d701492f26548736c95604398bd5809d3f | [] | no_license | ccubc/DS_self_learning | cfddf6a02926884fae5a0b2798a20a4470674101 | 40725ee6d699c19c25dfbd81363564e15707c448 | refs/heads/small_potato | 2021-06-30T18:35:04.463732 | 2021-02-12T05:59:01 | 2021-02-12T05:59:01 | 219,366,357 | 3 | 2 | null | 2020-11-12T04:34:16 | 2019-11-03T21:13:49 | Jupyter Notebook | UTF-8 | Python | false | false | 1,656 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 10 12:50:41 2020
gridChallenge
Given a square grid of characters in the range ascii[a-z], rearrange elements of each row alphabetically, ascending. Determine if the columns are also in ascending alphabetical order, top to bottom. Return YES if they are or NO if they are not.
For example, given:
a b c
a d e
e f g
The rows are already in alphabetical order. The columns a a e, b d f and c e g are also in alphabetical order, so the answer would be YES. Only elements within the same row can be rearranged. They cannot be moved to a different row.
@author: chengchen
"""
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the gridChallenge function below.
def gridChallenge(grid):
# to number
num = []
num_row = []
res = 'YES'
for row in grid:
for x in row:
num_row.append(ord(x))
num_row.sort()
num.append(num_row)
num_row = []
print(num)
print(len(grid[0]))
print(len(grid)-1)
for i in range(len(grid)-1):
for j in range(len(grid[0])):
print(i)
print(j)
if num[i][j] > num[i+1][j]:
res = 'NO'
return res
return res
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input())
for t_itr in range(t):
n = int(input())
grid = []
for _ in range(n):
grid_item = input()
grid.append(grid_item)
result = gridChallenge(grid)
fptr.write(result + '\n')
fptr.close()
| [
"chengchen1215@gmail.com"
] | chengchen1215@gmail.com |
6c60eb36eeb0a4af34726a6095e3e87304254470 | e7300321d37036463fabb4e959d4b22389d3bca8 | /snmpresponder/macro.py | 7c793889037ca6b6e33d4d54f07f1dcf729e02c0 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | etingof/snmpresponder | e9b3d3ad4ff06be599f6984a280692dda389fa99 | 6e69f6168b0993cbc36e83cb44245c8776f7427a | refs/heads/master | 2023-03-27T16:56:02.021452 | 2019-12-15T18:23:27 | 2019-12-15T18:23:27 | 164,206,184 | 7 | 2 | BSD-2-Clause | 2019-01-30T22:49:07 | 2019-01-05T10:50:45 | Python | UTF-8 | Python | false | false | 544 | py | #
# This file is part of snmpresponder software.
#
# Copyright (c) 2019, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/snmpresponder/license.html
#
def expandMacro(option, context):
for k in context:
pat = '${%s}' % k
if option and '${' in option:
option = option.replace(pat, str(context[k]))
return option
def expandMacros(options, context):
options = list(options)
for idx, option in enumerate(options):
options[idx] = expandMacro(option, context)
return options
| [
"etingof@gmail.com"
] | etingof@gmail.com |
9befa19f962e8ba4171f6cb4701bc4fcf00e3c6f | fc27e1e21ad4891b1d4e769170671da1a4d32ed2 | /aliyun-python-sdk-cbn/aliyunsdkcbn/request/v20170912/CreateCenRequest.py | fb1574c13c4f51a6a4ae5ea1896c4783115ff5c7 | [
"Apache-2.0"
] | permissive | yonzhan2/aliyun-openapi-python-sdk | 3d05f7e83aeb286ad553a6a36c42ce932a1ece3e | e64873f9b528e1a83e3ea27d583f3f7998e7650b | refs/heads/master | 2020-04-11T10:22:48.511973 | 2018-12-13T09:29:21 | 2018-12-13T09:29:21 | 161,712,443 | 1 | 0 | null | 2018-12-14T00:52:39 | 2018-12-14T00:52:39 | null | UTF-8 | Python | false | false | 2,226 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class CreateCenRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cbn', '2017-09-12', 'CreateCen','cbn')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_Name(self):
return self.get_query_params().get('Name')
def set_Name(self,Name):
self.add_query_param('Name',Name)
def get_Description(self):
return self.get_query_params().get('Description')
def set_Description(self,Description):
self.add_query_param('Description',Description)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId) | [
"haowei.yao@alibaba-inc.com"
] | haowei.yao@alibaba-inc.com |
ee77ce284052c960131e82d10421401c2060f43e | 594f60b6a536b831d0df38abea7f0ffc0a2fd3cb | /utils/caller_var_name.py | 3ef1f66a6d0fe81c11dad3400acbaf9a1f8b60a5 | [] | no_license | mh70cz/py | 1478439fe939076cca3a30be2f2d29fb4e8a3cd9 | 7fc23f2133624c787e1dd4856322d48251cc6f0e | refs/heads/master | 2022-08-12T06:08:30.720164 | 2022-08-08T23:16:19 | 2022-08-08T23:16:19 | 95,386,881 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | """ get the original variable name of a variable passed to a function """
# https://stackoverflow.com/questions/2749796/how-to-get-the-original-variable-name-of-variable-passed-to-a-function
# %%
import traceback
# %%
def get_arg_var_name(var):
stack = traceback.extract_stack()
filename, lineno, function_name, code = stack[-2]
# return filename, lineno, function_name, code
# extract single argument variable name
arg_var_name = code.rpartition("func(")[2].strip(")")
return arg_var_name
foo = "myfoo"
arg_var_name = get_arg_var_name(foo)
print (arg_var_name)
# %%
| [
"mh70@mh70.cz"
] | mh70@mh70.cz |
9caf7213f35a574893e014c7ef43aa54969c1b44 | 299426a473b725df6c58f4b838be65eb7f6b46d2 | /BootloaderCorePkg/Tools/GenCfgDataDsc.py | ae65b633e258b3f55a0b0965810a3f34a21863bb | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause-Patent"
] | permissive | donnayoyo/slimbootloader | ae494e7f2e4e07652fc8c84e57f6f7a24f590310 | 7942790d76a66ef386660e72c61022696d36273e | refs/heads/master | 2023-03-05T03:04:12.822419 | 2023-02-22T15:22:45 | 2023-02-22T22:19:56 | 153,537,980 | 0 | 0 | BSD-2-Clause | 2018-10-17T23:51:58 | 2018-10-17T23:51:57 | null | UTF-8 | Python | false | false | 84,578 | py | ## @ GenCfgData.py
#
# Copyright (c) 2014 - 2019, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
import os
import re
import sys
import struct
import marshal
from functools import reduce
from datetime import date
# Generated file copyright header
__copyright_tmp__ = """/** @file
Platform Configuration %s File.
Copyright (c) %4d, Intel Corporation. All rights reserved.<BR>
SPDX-License-Identifier: BSD-2-Clause-Patent
This file is automatically generated. Please do NOT modify !!!
**/
"""
def Bytes2Val (Bytes):
return reduce(lambda x,y: (x<<8)|y, Bytes[::-1] )
def Bytes2Str (Bytes):
return '{ %s }' % (', '.join('0x%02X' % i for i in Bytes))
def Str2Bytes (Value, Blen):
Result = bytearray(Value[1:-1], 'utf-8') # Excluding quotes
if len(Result) < Blen:
Result.extend(b'\x00' * (Blen - len(Result)))
return Result
def Val2Bytes (Value, Blen):
return [(Value>>(i*8) & 0xff) for i in range(Blen)]
def Array2Val (ValStr):
ValStr = ValStr.strip()
if ValStr.startswith('{'):
ValStr = ValStr[1:]
if ValStr.endswith('}'):
ValStr = ValStr[:-1]
if ValStr.startswith("'"):
ValStr = ValStr[1:]
if ValStr.endswith("'"):
ValStr = ValStr[:-1]
Value = 0
for Each in ValStr.split(',')[::-1]:
Each = Each.strip()
if Each.startswith('0x'):
Base = 16
else:
Base = 10
Value = (Value << 8) | int(Each, Base)
return Value
def GetCopyrightHeader (FileType, AllowModify = False):
FileDescription = {
'bsf' : 'Boot Setting',
'dsc' : 'Definition',
'dlt' : 'Delta',
'inc' : 'C Binary Blob',
'h' : 'C Struct Header'
}
if FileType in ['bsf', 'dsc', 'dlt']:
CommentChar = '#'
else:
CommentChar = ''
Lines = __copyright_tmp__.split('\n')
if AllowModify:
Lines = [Line for Line in Lines if 'Please do NOT modify' not in Line]
CopyrightHdr = '\n'.join('%s%s' % (CommentChar, Line) for Line in Lines)[:-1] + '\n'
return CopyrightHdr % (FileDescription[FileType], date.today().year)
class CLogicalExpression:
def __init__(self):
self.index = 0
self.string = ''
self.dictVariable = {}
self.parenthesisOpenSet = '('
self.parenthesisCloseSet = ')'
def errExit(self, err = ''):
print ("ERROR: Express parsing for:")
print (" %s" % self.string)
print (" %s^" % (' ' * self.index))
if err:
print ("INFO : %s" % err)
raise Exception ("Logical expression parsing error!")
def getNonNumber (self, n1, n2):
if not n1.isdigit():
return n1
if not n2.isdigit():
return n2
return None
def getCurr(self, lens = 1):
try:
if lens == -1:
return self.string[self.index :]
else:
if self.index + lens > len(self.string):
lens = len(self.string) - self.index
return self.string[self.index : self.index + lens]
except Exception:
return ''
def isLast(self):
return self.index == len(self.string)
def moveNext(self, len = 1):
self.index += len
def skipSpace(self):
while not self.isLast():
if self.getCurr() in ' \t':
self.moveNext()
else:
return
def getNumber(self, var):
var = var.strip()
if re.match('^0x[a-fA-F0-9]+$', var):
value = int(var, 16)
elif re.match('^0b[01]+$', var):
value = int(var, 2)
elif re.match('^[+-]?\d+$', var):
value = int(var, 10)
else:
self.errExit("Invalid value '%s'" % var)
return value
def getVariable(self, var):
value = self.dictVariable.get(var, None)
if value == None:
self.errExit("Unrecognized variable '%s'" % var)
return value
def parseValue(self):
self.skipSpace()
var = ''
while not self.isLast():
char = self.getCurr()
if re.match('^[\w.]', char):
var += char
self.moveNext()
else:
break
if len(var):
if var[0].isdigit():
value = self.getNumber(var)
else:
value = self.getVariable(var)
else:
self.errExit('Invalid number or variable found !')
return int(value)
def parseSingleOp(self):
self.skipSpace()
char = self.getCurr()
if char == '~':
self.moveNext()
return ~self.parseBrace()
else:
return self.parseValue()
def parseBrace(self):
self.skipSpace()
char = self.getCurr()
parenthesisType = self.parenthesisOpenSet.find(char)
if parenthesisType >= 0:
self.moveNext()
value = self.parseExpr()
self.skipSpace()
if self.getCurr() != self.parenthesisCloseSet[parenthesisType]:
self.errExit ("No closing brace !")
self.moveNext()
if parenthesisType == 1: # [ : Get content
value = self.getContent(value)
elif parenthesisType == 2: # { : To address
value = self.toAddress(value)
elif parenthesisType == 3: # < : To offset
value = self.toOffset(value)
return value
else:
return self.parseSingleOp()
def parseMul(self):
values = [self.parseBrace()]
ops = ['*']
while True:
self.skipSpace()
char = self.getCurr()
if char == '*':
self.moveNext()
values.append(self.parseBrace())
ops.append(char)
elif char == '/':
self.moveNext()
values.append(self.parseBrace())
ops.append(char)
else:
break
value = 1
for idx, each in enumerate(values):
if ops[idx] == '*':
value *= each
else:
value //= each
return value
def parseAndOr(self):
value = self.parseMul()
op = None
while True:
self.skipSpace()
char = self.getCurr()
if char == '&':
self.moveNext()
value &= self.parseMul()
elif char == '|':
div_index = self.index
self.moveNext()
value |= self.parseMul()
else:
break
return value
def parseAddMinus(self):
values = [self.parseAndOr()]
while True:
self.skipSpace()
char = self.getCurr()
if char == '+':
self.moveNext()
values.append(self.parseAndOr())
elif char == '-':
self.moveNext()
values.append(-1 * self.parseAndOr())
else:
break
return sum(values)
def parseExpr(self):
return self.parseAddMinus()
def evaluateExpress (self, Expr, VarDict = {}):
self.index = 0
self.string = Expr
self.dictVariable = VarDict
Result = self.parseExpr()
return Result
class CGenCfgData:
def __init__(self):
self.Debug = False
self.Error = ''
self.ReleaseMode = True
self._GlobalDataDef = """
GlobalDataDef
SKUID = 0, "DEFAULT"
EndGlobalData
"""
self._BuidinOptionTxt = """
List &EN_DIS
Selection 0x1 , "Enabled"
Selection 0x0 , "Disabled"
EndList
"""
self._StructType = ['UINT8','UINT16','UINT32','UINT64']
self._BsfKeyList = ['FIND','NAME','HELP','TYPE','PAGE', 'PAGES', 'BLOCK', 'OPTION','CONDITION','ORDER', 'MARKER', 'SUBT']
self._HdrKeyList = ['HEADER','STRUCT', 'EMBED', 'COMMENT']
self._BuidinOption = {'$EN_DIS' : 'EN_DIS'}
self._MacroDict = {}
self._VarDict = {}
self._PcdsDict = {}
self._CfgBlkDict = {}
self._CfgPageDict = {}
self._CfgOptsDict = {}
self._BsfTempDict = {}
self._CfgItemList = []
self._DscLines = []
self._DscFile = ''
self._CfgPageTree = {}
self._MapVer = 0
self._MinCfgTagId = 0x100
def ParseMacros (self, MacroDefStr):
# ['-DABC=1', '-D', 'CFG_DEBUG=1', '-D', 'CFG_OUTDIR=Build']
self._MacroDict = {}
IsExpression = False
for Macro in MacroDefStr:
if Macro.startswith('-D'):
IsExpression = True
if len(Macro) > 2:
Macro = Macro[2:]
else :
continue
if IsExpression:
IsExpression = False
Match = re.match("(\w+)=(.+)", Macro)
if Match:
self._MacroDict[Match.group(1)] = Match.group(2)
else:
Match = re.match("(\w+)", Macro)
if Match:
self._MacroDict[Match.group(1)] = ''
if len(self._MacroDict) == 0:
Error = 1
else:
Error = 0
if self.Debug:
print ("INFO : Macro dictionary:")
for Each in self._MacroDict:
print (" $(%s) = [ %s ]" % (Each , self._MacroDict[Each]))
return Error
def EvaulateIfdef (self, Macro):
Result = Macro in self._MacroDict
if self.Debug:
print ("INFO : Eval Ifdef [%s] : %s" % (Macro, Result))
return Result
def ExpandMacros (self, Input, Preserve = False):
Line = Input
Match = re.findall("\$\(\w+\)", Input)
if Match:
for Each in Match:
Variable = Each[2:-1]
if Variable in self._MacroDict:
Line = Line.replace(Each, self._MacroDict[Variable])
else:
if self.Debug:
print ("WARN : %s is not defined" % Each)
if not Preserve:
Line = Line.replace(Each, Each[2:-1])
return Line
def ExpandPcds (self, Input):
Line = Input
Match = re.findall("(\w+\.\w+)", Input)
if Match:
for PcdName in Match:
if PcdName in self._PcdsDict:
Line = Line.replace(PcdName, self._PcdsDict[PcdName])
else:
if self.Debug:
print ("WARN : %s is not defined" % PcdName)
return Line
def EvaluateExpress (self, Expr):
ExpExpr = self.ExpandPcds(Expr)
ExpExpr = self.ExpandMacros(ExpExpr)
LogExpr = CLogicalExpression()
Result = LogExpr.evaluateExpress (ExpExpr, self._VarDict)
if self.Debug:
print ("INFO : Eval Express [%s] : %s" % (Expr, Result))
return Result
def ValueToByteArray (self, ValueStr, Length):
Match = re.match("\{\s*FILE:(.+)\}", ValueStr)
if Match:
FileList = Match.group(1).split(',')
Result = bytearray()
for File in FileList:
File = File.strip()
BinPath = os.path.join(os.path.dirname(self._DscFile), File)
Result.extend(bytearray(open(BinPath, 'rb').read()))
else:
try:
Result = bytearray(self.ValueToList(ValueStr, Length))
except ValueError as e:
raise Exception ("Bytes in '%s' must be in range 0~255 !" % ValueStr)
if len(Result) < Length:
Result.extend(b'\x00' * (Length - len(Result)))
elif len(Result) > Length:
raise Exception ("Value '%s' is too big to fit into %d bytes !" % (ValueStr, Length))
return Result[:Length]
def ValueToList (self, ValueStr, Length):
if ValueStr[0] == '{':
Result = []
BinList = ValueStr[1:-1].split(',')
InBitField = False
LastInBitField = False
Value = 0
BitLen = 0
for Element in BinList:
InBitField = False
Each = Element.strip()
if len(Each) == 0:
pass
else:
if Each[0] in ['"', "'"]:
Result.extend(list(bytearray(Each[1:-1], 'utf-8')))
elif ':' in Each:
Match = re.match("(.+):(\d+)b", Each)
if Match is None:
raise Exception("Invald value list format '%s' !" % Each)
InBitField = True
CurrentBitLen = int(Match.group(2))
CurrentValue = ((self.EvaluateExpress(Match.group(1)) & (1<<CurrentBitLen) - 1)) << BitLen
else:
Result.append(self.EvaluateExpress(Each.strip()))
if InBitField:
Value += CurrentValue
BitLen += CurrentBitLen
if LastInBitField and ((not InBitField) or (Element == BinList[-1])):
if BitLen % 8 != 0:
raise Exception("Invald bit field length!")
Result.extend(Val2Bytes(Value, BitLen // 8))
Value = 0
BitLen = 0
LastInBitField = InBitField
elif ValueStr.startswith("'") and ValueStr.endswith("'"):
Result = Str2Bytes (ValueStr, Length)
elif ValueStr.startswith('"') and ValueStr.endswith('"'):
Result = Str2Bytes (ValueStr, Length)
else:
Result = Val2Bytes (self.EvaluateExpress(ValueStr), Length)
return Result
def FormatDeltaValue(self, ConfigDict):
ValStr = ConfigDict['value']
if ValStr[0] == "'":
# Remove padding \x00 in the value string
ValStr = "'%s'" % ValStr[1:-1].rstrip('\x00')
Struct = ConfigDict['struct']
if Struct in self._StructType:
# Format the array using its struct type
Unit = int(Struct[4:]) // 8
Value = Array2Val(ConfigDict['value'])
Loop = ConfigDict['length'] // Unit
Values = []
for Each in range(Loop):
Values.append (Value & ((1 << (Unit * 8)) - 1))
Value = Value >> (Unit * 8)
ValStr = '{ ' + ', '.join ([('0x%%0%dX' % (Unit * 2)) % x for x in Values]) + ' }'
return ValStr
def FormatListValue(self, ConfigDict):
Struct = ConfigDict['struct']
if Struct not in self._StructType:
return
DataList = self.ValueToList(ConfigDict['value'], ConfigDict['length'])
Unit = int(Struct[4:]) // 8
if int(ConfigDict['length']) != Unit * len(DataList):
# Fallback to byte array
Unit = 1
if int(ConfigDict['length']) != len(DataList):
raise Exception("Array size is not proper for '%s' !" % ConfigDict['cname'])
ByteArray = []
for Value in DataList:
for Loop in range(Unit):
ByteArray.append("0x%02X" % (Value & 0xFF))
Value = Value >> 8
NewValue = '{' + ','.join(ByteArray) + '}'
ConfigDict['value'] = NewValue
return ""
def GetOrderNumber (self, Offset, Order, BitOff = 0):
if isinstance(Order, int):
if Order == -1:
Order = Offset << 16
else:
(Major, Minor) = Order.split('.')
Order = (int (Major, 16) << 16) + ((int (Minor, 16) & 0xFF) << 8)
return Order + (BitOff & 0xFF)
def SubtituteLine (self, Line, Args):
Args = Args.strip()
Vars = Args.split(':')
Line = self.ExpandMacros(Line, True)
for Idx in range(len(Vars)-1, 0, -1):
Line = Line.replace('$(%d)' % Idx, Vars[Idx].strip())
Remaining = re.findall ('\$\(\d+\)', Line)
if len(Remaining) > 0:
raise Exception ("ERROR: Unknown argument '%s' for template '%s' !" % (Remaining[0], Vars[0]))
return Line
def CfgDuplicationCheck (self, CfgDict, Name):
if not self.Debug:
return
if Name == 'Dummy':
return
if Name not in CfgDict:
CfgDict[Name] = 1
else:
print ("WARNING: Duplicated item found '%s' !" % ConfigDict['cname'])
def AddBsfChildPage (self, Child, Parent = 'root'):
def AddBsfChildPageRecursive (PageTree, Parent, Child):
Key = next(iter(PageTree))
if Parent == Key:
PageTree[Key].append({Child : []})
return True
else:
Result = False
for Each in PageTree[Key]:
if AddBsfChildPageRecursive (Each, Parent, Child):
Result = True
break
return Result
return AddBsfChildPageRecursive (self._CfgPageTree, Parent, Child)
def ParseDscFile (self, DscFile):
self._DscLines = []
self._CfgItemList = []
self._CfgPageDict = {}
self._CfgBlkDict = {}
self._BsfTempDict = {}
self._CfgPageTree = {'root' : []}
self._DscFile = DscFile
CfgDict = {}
SectionNameList = ["Defines".lower(), "PcdsFeatureFlag".lower(),
"PcdsDynamicVpd.Tmp".lower(), "PcdsDynamicVpd.Upd".lower()]
IsDefSect = False
IsPcdSect = False
IsUpdSect = False
IsTmpSect = False
TemplateName = ''
IfStack = []
ElifStack = []
Error = 0
ConfigDict = {}
DscFd = open(DscFile, "r")
DscLines = DscFd.readlines()
DscFd.close()
BsfRegExp = re.compile("(%s):{(.+?)}(?:$|\s+)" % '|'.join(self._BsfKeyList))
HdrRegExp = re.compile("(%s):{(.+?)}" % '|'.join(self._HdrKeyList))
CfgRegExp = re.compile("^([_a-zA-Z0-9]+)\s*\|\s*(0x[0-9A-F]+|\*)\s*\|\s*(\d+|0x[0-9a-fA-F]+)\s*\|\s*(.+)")
TksRegExp = re.compile("^(g[_a-zA-Z0-9]+\.)(.+)")
SkipLines = 0
while len(DscLines):
DscLine = DscLines.pop(0).strip()
if SkipLines == 0:
self._DscLines.append (DscLine)
else:
SkipLines = SkipLines - 1
if len(DscLine) == 0:
continue
Handle = False
Match = re.match("^\[(.+)\]", DscLine)
if Match is not None:
IsDefSect = False
IsPcdSect = False
IsUpdSect = False
IsTmpSect = False
SectionName = Match.group(1).lower()
if SectionName == SectionNameList[0]:
IsDefSect = True
if SectionName == SectionNameList[1]:
IsPcdSect = True
elif SectionName == SectionNameList[2]:
IsTmpSect = True
elif SectionName == SectionNameList[3]:
ConfigDict = {
'header' : 'ON',
'page' : '',
'name' : '',
'find' : '',
'struct' : '',
'embed' : '',
'marker' : '',
'option' : '',
'comment' : '',
'condition' : '',
'order' : -1,
'subreg' : []
}
IsUpdSect = True
Offset = 0
else:
if IsDefSect or IsPcdSect or IsUpdSect or IsTmpSect:
Match = False if DscLine[0] != '!' else True
if Match:
Match = re.match("^!(else|endif|ifdef|ifndef|if|elseif|include)\s*(.+)?$", DscLine)
Keyword = Match.group(1) if Match else ''
Remaining = Match.group(2) if Match else ''
Remaining = '' if Remaining is None else Remaining.strip()
if Keyword in ['if', 'elseif', 'ifdef', 'ifndef', 'include'] and not Remaining:
raise Exception ("ERROR: Expression is expected after '!if' or !elseif' for line '%s'" % DscLine)
if Keyword == 'else':
if IfStack:
IfStack[-1] = not IfStack[-1]
else:
raise Exception ("ERROR: No paired '!if' found for '!else' for line '%s'" % DscLine)
elif Keyword == 'endif':
if IfStack:
IfStack.pop()
Level = ElifStack.pop()
if Level > 0:
del IfStack[-Level:]
else:
raise Exception ("ERROR: No paired '!if' found for '!endif' for line '%s'" % DscLine)
elif Keyword == 'ifdef' or Keyword == 'ifndef':
Result = self.EvaulateIfdef (Remaining)
if Keyword == 'ifndef':
Result = not Result
IfStack.append(Result)
ElifStack.append(0)
elif Keyword == 'if' or Keyword == 'elseif':
Result = self.EvaluateExpress(Remaining)
if Keyword == "if":
ElifStack.append(0)
IfStack.append(Result)
else: #elseif
if IfStack:
IfStack[-1] = not IfStack[-1]
IfStack.append(Result)
ElifStack[-1] = ElifStack[-1] + 1
else:
raise Exception ("ERROR: No paired '!if' found for '!elif' for line '%s'" % DscLine)
else:
if IfStack:
Handle = reduce(lambda x,y: x and y, IfStack)
else:
Handle = True
if Handle:
if Keyword == 'include':
Remaining = self.ExpandMacros(Remaining)
# Relative to DSC filepath
IncludeFilePath = os.path.join(os.path.dirname(DscFile), Remaining)
if not os.path.exists(IncludeFilePath):
# Relative to repository to find dsc in common platform
IncludeFilePath = os.path.abspath(os.path.join(os.path.dirname(DscFile), "../../..", Remaining))
try:
IncludeDsc = open(IncludeFilePath, "r")
except:
raise Exception ("ERROR: Cannot open file '%s'." % IncludeFilePath)
NewDscLines = IncludeDsc.readlines()
IncludeDsc.close()
StartTag = ['# !< include %s\n' % Remaining]
EndTag = ['# !> include %s\n' % Remaining]
DscLines = StartTag + NewDscLines + EndTag + DscLines
del self._DscLines[-1]
else:
if DscLine.startswith('!'):
raise Exception ("ERROR: Unrecoginized directive for line '%s'" % DscLine)
if not Handle:
continue
if IsDefSect:
Match = re.match("^\s*(?:DEFINE\s+)*(\w+)\s*=\s*(.+)", DscLine)
if Match:
self._MacroDict[Match.group(1)] = Match.group(2)
if self.Debug:
print ("INFO : DEFINE %s = [ %s ]" % (Match.group(1), Match.group(2)))
elif IsPcdSect:
Match = re.match("^\s*([\w\.]+)\s*\|\s*(\w+)", DscLine)
if Match:
self._PcdsDict[Match.group(1)] = Match.group(2)
if self.Debug:
print ("INFO : PCD %s = [ %s ]" % (Match.group(1), Match.group(2)))
elif IsTmpSect:
# !BSF DEFT:{GPIO_TMPL:START}
Match = re.match("^\s*#\s+(!BSF)\s+DEFT:{(.+?):(START|END)}", DscLine)
if Match:
if Match.group(3) == 'START' and not TemplateName:
TemplateName = Match.group(2).strip()
self._BsfTempDict[TemplateName] = []
if Match.group(3) == 'END' and (TemplateName == Match.group(2).strip()) and TemplateName:
TemplateName = ''
else:
if TemplateName:
Match = re.match("^!include\s*(.+)?$", DscLine)
if Match:
continue
self._BsfTempDict[TemplateName].append(DscLine)
else:
Match = re.match("^\s*#\s+(!BSF|!HDR)\s+(.+)", DscLine)
if Match:
Remaining = Match.group(2)
if Match.group(1) == '!BSF':
Result = BsfRegExp.findall (Remaining)
if Result:
for Each in Result:
Key = Each[0]
Remaining = Each[1]
if Key == 'BLOCK':
Match = re.match("NAME:\"(.+)\"\s*,\s*VER:\"(.+)\"\s*", Remaining)
if Match:
self._CfgBlkDict['name'] = Match.group(1)
self._CfgBlkDict['ver'] = Match.group(2)
elif Key == 'SUBT':
#GPIO_TMPL:1:2:3
Remaining = Remaining.strip()
Match = re.match("(\w+)\s*:", Remaining)
if Match:
TemplateName = Match.group(1)
for Line in self._BsfTempDict[TemplateName][::-1]:
NewLine = self.SubtituteLine (Line, Remaining)
DscLines.insert(0, NewLine)
SkipLines += 1
elif Key == 'PAGES':
# !BSF PAGES:{HSW:"Haswell System Agent", LPT:"Lynx Point PCH"}
PageList = Remaining.split(',')
for Page in PageList:
Page = Page.strip()
Match = re.match('(\w+):(\w*:)?\"(.+)\"', Page)
if Match:
PageName = Match.group(1)
ParentName = Match.group(2)
if not ParentName or ParentName == ':' :
ParentName = 'root'
else:
ParentName = ParentName[:-1]
if not self.AddBsfChildPage (PageName, ParentName):
raise Exception("Cannot find parent page '%s'!" % ParentName)
self._CfgPageDict[PageName] = Match.group(3)
else:
raise Exception("Invalid page definitions '%s'!" % Page)
elif Key in ['NAME', 'HELP', 'OPTION'] and Remaining.startswith('+'):
# Allow certain options to be extended to multiple lines
ConfigDict[Key.lower()] += Remaining[1:]
else:
if Key == 'NAME':
Remaining = Remaining.strip()
elif Key == 'CONDITION':
Remaining = self.ExpandMacros(Remaining.strip())
ConfigDict[Key.lower()] = Remaining
else:
Match = HdrRegExp.match(Remaining)
if Match:
Key = Match.group(1)
Remaining = Match.group(2)
if Key == 'EMBED':
Parts = Remaining.split(':')
Names = Parts[0].split(',')
DummyDict = ConfigDict.copy()
if len(Names) > 1:
Remaining = Names[0] + ':' + ':'.join(Parts[1:])
DummyDict['struct'] = Names[1]
else:
DummyDict['struct'] = Names[0]
DummyDict['cname'] = 'Dummy'
DummyDict['name'] = ''
DummyDict['embed'] = Remaining
DummyDict['offset'] = Offset
DummyDict['length'] = 0
DummyDict['value'] = '0'
DummyDict['type'] = 'Reserved'
DummyDict['help'] = ''
DummyDict['subreg'] = []
self._CfgItemList.append(DummyDict)
else:
ConfigDict[Key.lower()] = Remaining
# Check CFG line
# gCfgData.VariableName | * | 0x01 | 0x1
Clear = False
Match = TksRegExp.match (DscLine)
if Match:
DscLine = 'gCfgData.%s' % Match.group(2)
if DscLine.startswith('gCfgData.'):
Match = CfgRegExp.match(DscLine[9:])
else:
Match = None
if Match:
ConfigDict['space'] = 'gCfgData'
ConfigDict['cname'] = Match.group(1)
if Match.group(2) != '*':
Offset = int (Match.group(2), 16)
ConfigDict['offset'] = Offset
ConfigDict['order'] = self.GetOrderNumber (ConfigDict['offset'], ConfigDict['order'])
Value = Match.group(4).strip()
if Match.group(3).startswith("0x"):
Length = int (Match.group(3), 16)
else :
Length = int (Match.group(3))
Offset += Length
ConfigDict['length'] = Length
Match = re.match("\$\((\w+)\)", Value)
if Match:
if Match.group(1) in self._MacroDict:
Value = self._MacroDict[Match.group(1)]
ConfigDict['value'] = Value
if re.match("\{\s*FILE:(.+)\}", Value):
# Expand embedded binary file
ValArray = self.ValueToByteArray (ConfigDict['value'], ConfigDict['length'])
NewValue = Bytes2Str(ValArray)
self._DscLines[-1] = re.sub(r'(.*)(\{\s*FILE:.+\})' , r'\1 %s' % NewValue, self._DscLines[-1])
ConfigDict['value'] = NewValue
if ConfigDict['name'] == '':
# Clear BSF specific items
ConfigDict['bsfname'] = ''
ConfigDict['help'] = ''
ConfigDict['type'] = ''
ConfigDict['option'] = ''
self.CfgDuplicationCheck (CfgDict, ConfigDict['cname'])
self._CfgItemList.append(ConfigDict.copy())
Clear = True
else:
# It could be a virtual item as below
# !BSF FIELD:{SerialDebugPortAddress0:1}
# or
# @Bsf FIELD:{SerialDebugPortAddress0:1b}
Match = re.match("^\s*#\s+(!BSF)\s+FIELD:{(.+)}", DscLine)
if Match:
BitFieldTxt = Match.group(2)
Match = re.match("(.+):(\d+)b([BWDQ])?", BitFieldTxt)
if not Match:
raise Exception ("Incorrect bit field format '%s' !" % BitFieldTxt)
UnitBitLen = 1
SubCfgDict = ConfigDict.copy()
SubCfgDict['cname'] = Match.group(1)
SubCfgDict['bitlength'] = int (Match.group(2)) * UnitBitLen
if SubCfgDict['bitlength'] > 0:
LastItem = self._CfgItemList[-1]
if len(LastItem['subreg']) == 0:
SubOffset = 0
else:
SubOffset = LastItem['subreg'][-1]['bitoffset'] + LastItem['subreg'][-1]['bitlength']
if Match.group(3) == 'B':
SubCfgDict['bitunit'] = 1
elif Match.group(3) == 'W':
SubCfgDict['bitunit'] = 2
elif Match.group(3) == 'Q':
SubCfgDict['bitunit'] = 8
else:
SubCfgDict['bitunit'] = 4
SubCfgDict['bitoffset'] = SubOffset
SubCfgDict['order'] = self.GetOrderNumber (SubCfgDict['offset'], SubCfgDict['order'], SubOffset)
SubCfgDict['value'] = ''
SubCfgDict['cname'] = '%s_%s' % (LastItem['cname'], Match.group(1))
self.CfgDuplicationCheck (CfgDict, SubCfgDict['cname'])
LastItem['subreg'].append (SubCfgDict.copy())
Clear = True
if Clear:
ConfigDict['name'] = ''
ConfigDict['find'] = ''
ConfigDict['struct'] = ''
ConfigDict['embed'] = ''
ConfigDict['marker'] = ''
ConfigDict['comment'] = ''
ConfigDict['order'] = -1
ConfigDict['subreg'] = []
ConfigDict['option'] = ''
ConfigDict['condition'] = ''
return Error
def GetBsfBitFields (self, subitem, bytes):
start = subitem['bitoffset']
end = start + subitem['bitlength']
bitsvalue = ''.join('{0:08b}'.format(i) for i in bytes[::-1])
bitsvalue = bitsvalue[::-1]
bitslen = len(bitsvalue)
if start > bitslen or end > bitslen:
raise Exception ("Invalid bits offset [%d,%d] %d for %s" % (start, end, bitslen, subitem['name']))
return '0x%X' % (int(bitsvalue[start:end][::-1], 2))
def UpdateBsfBitFields (self, SubItem, NewValue, ValueArray):
Start = SubItem['bitoffset']
End = Start + SubItem['bitlength']
Blen = len (ValueArray)
BitsValue = ''.join('{0:08b}'.format(i) for i in ValueArray[::-1])
BitsValue = BitsValue[::-1]
BitsLen = len(BitsValue)
if Start > BitsLen or End > BitsLen:
raise Exception ("Invalid bits offset [%d,%d] %d for %s" % (Start, End, BitsLen, SubItem['name']))
BitsValue = BitsValue[:Start] + '{0:0{1}b}'.format(NewValue, SubItem['bitlength'])[::-1] + BitsValue[End:]
ValueArray[:] = bytearray.fromhex('{0:0{1}x}'.format(int(BitsValue[::-1], 2), Blen * 2))[::-1]
def CreateVarDict (self):
Error = 0
self._VarDict = {}
if len(self._CfgItemList) > 0:
Item = self._CfgItemList[-1]
self._VarDict['_LENGTH_'] = '%d' % (Item['offset'] + Item['length'])
for Item in self._CfgItemList:
Embed = Item['embed']
Match = re.match("^(\w+):(\w+):(START|END)", Embed)
if Match:
StructName = Match.group(1)
VarName = '_%s_%s_' % (Match.group(3), StructName)
if Match.group(3) == 'END':
self._VarDict[VarName] = Item['offset'] + Item['length']
self._VarDict['_LENGTH_%s_' % StructName] = \
self._VarDict['_END_%s_' % StructName] - self._VarDict['_START_%s_' % StructName]
if Match.group(2).startswith('TAG_'):
if self._VarDict['_LENGTH_%s_' % StructName] % 4:
raise Exception("Size of structure '%s' is %d, not DWORD aligned !" % (StructName, self._VarDict['_LENGTH_%s_' % StructName]))
self._VarDict['_TAG_%s_' % StructName] = int (Match.group(2)[4:], 16) & 0xFFF
else:
self._VarDict[VarName] = Item['offset']
if Item['marker']:
self._VarDict['_OFFSET_%s_' % Item['marker'].strip()] = Item['offset']
return Error
def UpdateBsfBitUnit (self, Item):
BitTotal = 0
BitOffset = 0
StartIdx = 0
Unit = None
UnitDec = {1:'BYTE', 2:'WORD', 4:'DWORD', 8:'QWORD'}
for Idx, SubItem in enumerate(Item['subreg']):
if Unit is None:
Unit = SubItem['bitunit']
BitLength = SubItem['bitlength']
BitTotal += BitLength
BitOffset += BitLength
if BitOffset > 64 or BitOffset > Unit * 8:
break
if BitOffset == Unit * 8:
for SubIdx in range (StartIdx, Idx + 1):
Item['subreg'][SubIdx]['bitunit'] = Unit
BitOffset = 0
StartIdx = Idx + 1
Unit = None
if BitOffset > 0:
raise Exception ("Bit fields cannot fit into %s for '%s.%s' !" % (UnitDec[Unit], Item['cname'], SubItem['cname']))
ExpectedTotal = Item['length'] * 8
if Item['length'] * 8 != BitTotal:
raise Exception ("Bit fields total length (%d) does not match length (%d) of '%s' !" % (BitTotal, ExpectedTotal, Item['cname']))
def UpdateDefaultValue (self):
Error = 0
for Idx, Item in enumerate(self._CfgItemList):
if len(Item['subreg']) == 0:
Value = Item['value']
if (len(Value) > 0) and (Value[0] == '{' or Value[0] == "'" or Value[0] == '"'):
# {XXX} or 'XXX' strings
self.FormatListValue(self._CfgItemList[Idx])
else:
Match = re.match("(0x[0-9a-fA-F]+|[0-9]+)", Value)
if not Match:
NumValue = self.EvaluateExpress (Value)
Item['value'] = '0x%X' % NumValue
else:
ValArray = self.ValueToByteArray (Item['value'], Item['length'])
for SubItem in Item['subreg']:
SubItem['value'] = self.GetBsfBitFields(SubItem, ValArray)
self.UpdateBsfBitUnit (Item)
return Error
@staticmethod
def ExpandIncludeFiles (FilePath, CurDir = ''):
if CurDir == '':
CurDir = os.path.dirname(FilePath)
FilePath = os.path.basename(FilePath)
InputFilePath = os.path.join(CurDir, FilePath)
File = open(InputFilePath, "r")
Lines = File.readlines()
File.close()
NewLines = []
for LineNum, Line in enumerate(Lines):
Match = re.match("^!include\s*(.+)?$", Line.strip())
if Match:
IncPath = Match.group(1)
TmpPath = os.path.join(CurDir, IncPath)
OrgPath = TmpPath
if not os.path.exists(TmpPath):
CurDir = os.path.join(os.path.dirname (os.path.realpath(__file__)), "..", "..")
TmpPath = os.path.join(CurDir, IncPath)
if not os.path.exists(TmpPath):
raise Exception ("ERROR: Cannot open include file '%s'." % OrgPath)
else:
NewLines.append (('# Included from file: %s\n' % IncPath, TmpPath, 0))
NewLines.append (('# %s\n' % ('=' * 80), TmpPath, 0))
NewLines.extend (CGenCfgData.ExpandIncludeFiles (IncPath, CurDir))
else:
NewLines.append ((Line, InputFilePath, LineNum))
return NewLines
def OverrideDefaultValue (self, DltFile):
Error = 0
DltLines = CGenCfgData.ExpandIncludeFiles (DltFile);
PlatformId = None
for Line, FilePath, LineNum in DltLines:
Line = Line.strip()
if not Line or Line.startswith('#'):
continue
Match = re.match("\s*(\w+)\.(\w+)(\.\w+)?\s*\|\s*(.+)", Line)
if not Match:
raise Exception("Unrecognized line '%s' (File:'%s' Line:%d) !" % (Line, FilePath, LineNum + 1))
Found = False
InScope = False
for Idx, Item in enumerate(self._CfgItemList):
if not InScope:
if not (Item['embed'].endswith(':START') and Item['embed'].startswith(Match.group(1))):
continue
InScope = True
if Item['cname'] == Match.group(2):
Found = True
break
if Item['embed'].endswith(':END') and Item['embed'].startswith(Match.group(1)):
break
Name = '%s.%s' % (Match.group(1),Match.group(2))
if not Found:
ErrItem = Match.group(2) if InScope else Match.group(1)
raise Exception("Invalid configuration '%s' in '%s' (File:'%s' Line:%d) !" %
(ErrItem, Name, FilePath, LineNum + 1))
ValueStr = Match.group(4).strip()
if Match.group(3) is not None:
# This is a subregion item
BitField = Match.group(3)[1:]
Found = False
if len(Item['subreg']) > 0:
for SubItem in Item['subreg']:
if SubItem['cname'] == '%s_%s' % (Item['cname'], BitField):
Found = True
break
if not Found:
raise Exception("Invalid configuration bit field '%s' in '%s.%s' (File:'%s' Line:%d) !" %
(BitField, Name, BitField, FilePath, LineNum + 1))
try:
Value = int(ValueStr, 16) if ValueStr.startswith('0x') else int(ValueStr, 10)
except:
raise Exception("Invalid value '%s' for bit field '%s.%s' (File:'%s' Line:%d) !" %
(ValueStr, Name, BitField, FilePath, LineNum + 1))
if Value >= 2 ** SubItem['bitlength']:
raise Exception("Invalid configuration bit field value '%s' for '%s.%s' (File:'%s' Line:%d) !" %
(Value, Name, BitField, FilePath, LineNum + 1))
ValArray = self.ValueToByteArray (Item['value'], Item['length'])
self.UpdateBsfBitFields (SubItem, Value, ValArray)
if Item['value'].startswith('{'):
Item['value'] = '{' + ', '.join('0x%02X' % i for i in ValArray) + '}'
else:
BitsValue = ''.join('{0:08b}'.format(i) for i in ValArray[::-1])
Item['value'] = '0x%X' % (int(BitsValue, 2))
else:
if Item['value'].startswith('{') and not ValueStr.startswith('{'):
raise Exception("Data array required for '%s' (File:'%s' Line:%d) !" % (Name, FilePath, LineNum + 1))
Item['value'] = ValueStr
if Name == 'PLATFORMID_CFG_DATA.PlatformId':
PlatformId = ValueStr
if PlatformId is None:
raise Exception("PLATFORMID_CFG_DATA.PlatformId is missing in file '%s' !" % (DltFile))
return Error
def ProcessMultilines (self, String, MaxCharLength):
Multilines = ''
StringLength = len(String)
CurrentStringStart = 0
StringOffset = 0
BreakLineDict = []
if len(String) <= MaxCharLength:
while (StringOffset < StringLength):
if StringOffset >= 1:
if String[StringOffset - 1] == '\\' and String[StringOffset] == 'n':
BreakLineDict.append (StringOffset + 1)
StringOffset += 1
if BreakLineDict != []:
for Each in BreakLineDict:
Multilines += " %s\n" % String[CurrentStringStart:Each].lstrip()
CurrentStringStart = Each
if StringLength - CurrentStringStart > 0:
Multilines += " %s\n" % String[CurrentStringStart:].lstrip()
else:
Multilines = " %s\n" % String
else:
NewLineStart = 0
NewLineCount = 0
FoundSpaceChar = False
while (StringOffset < StringLength):
if StringOffset >= 1:
if NewLineCount >= MaxCharLength - 1:
if String[StringOffset] == ' ' and StringLength - StringOffset > 10:
BreakLineDict.append (NewLineStart + NewLineCount)
NewLineStart = NewLineStart + NewLineCount
NewLineCount = 0
FoundSpaceChar = True
elif StringOffset == StringLength - 1 and FoundSpaceChar == False:
BreakLineDict.append (0)
if String[StringOffset - 1] == '\\' and String[StringOffset] == 'n':
BreakLineDict.append (StringOffset + 1)
NewLineStart = StringOffset + 1
NewLineCount = 0
StringOffset += 1
NewLineCount += 1
if BreakLineDict != []:
BreakLineDict.sort ()
for Each in BreakLineDict:
if Each > 0:
Multilines += " %s\n" % String[CurrentStringStart:Each].lstrip()
CurrentStringStart = Each
if StringLength - CurrentStringStart > 0:
Multilines += " %s\n" % String[CurrentStringStart:].lstrip()
return Multilines
def CreateField (self, Item, Name, Length, Offset, Struct, BsfName, Help, Option, BitsLength = None):
PosName = 28
PosComment = 30
NameLine=''
HelpLine=''
OptionLine=''
if Length == 0 and Name == 'Dummy':
return '\n'
IsArray = False
if Length in [1,2,4,8]:
Type = "UINT%d" % (Length * 8)
else:
IsArray = True
Type = "UINT8"
if Item and Item['value'].startswith('{'):
Type = "UINT8"
IsArray = True
if Struct != '':
Type = Struct
if Struct in ['UINT8','UINT16','UINT32','UINT64']:
IsArray = True
Unit = int(Type[4:]) // 8
Length = Length / Unit
else:
IsArray = False
if IsArray:
Name = Name + '[%d]' % Length
if len(Type) < PosName:
Space1 = PosName - len(Type)
else:
Space1 = 1
if BsfName != '':
NameLine=" %s\n" % BsfName
else:
NameLine="\n"
if Help != '':
HelpLine = self.ProcessMultilines (Help, 80)
if Option != '':
OptionLine = self.ProcessMultilines (Option, 80)
if Offset is None:
OffsetStr = '????'
else:
OffsetStr = '0x%04X' % Offset
if BitsLength is None:
BitsLength = ''
else:
BitsLength = ' : %d' % BitsLength
return "\n/** %s%s%s**/\n %s%s%s%s;\n" % (NameLine, HelpLine, OptionLine, Type, ' ' * Space1, Name, BitsLength)
def SplitTextBody (self, TextBody):
Marker1 = '{ /* _COMMON_STRUCT_START_ */'
Marker2 = '; /* _COMMON_STRUCT_END_ */'
ComBody = []
TxtBody = []
IsCommon = False
for Line in TextBody:
if Line.strip().endswith(Marker1):
Line = Line.replace(Marker1[1:], '')
IsCommon = True
if Line.strip().endswith(Marker2):
Line = Line.replace(Marker2[1:], '')
if IsCommon:
ComBody.append(Line)
IsCommon = False
continue
if IsCommon:
ComBody.append(Line)
else:
TxtBody.append(Line)
return ComBody, TxtBody
def GetStructArrayInfo (self, Input):
ArrayStr = Input.split('[')
Name = ArrayStr[0]
if len(ArrayStr) > 1:
NumStr = ''.join(c for c in ArrayStr[-1] if c.isdigit())
NumStr = '1000' if len(NumStr) == 0 else NumStr
ArrayNum = int(NumStr)
else:
ArrayNum = 0
return Name, ArrayNum
def PostProcessBody (self, TextBody, IncludeEmbedOnly = True):
NewTextBody = []
OldTextBody = []
IncTextBody = []
StructBody = []
IncludeLine = False
LineIsDef = False
EmbedFound = False
StructName = ''
ArrayVarName = ''
VariableName = ''
Count = 0
Level = 0
BaseOffset = 0
IsCommonStruct = False
EmbedStructRe = re.compile("^/\*\sEMBED_STRUCT:([\w\[\]\*]+):([\w\[\]\*]+):(\w+):(START|END)([\s\d]+)\*/([\s\S]*)")
for Line in TextBody:
if Line.startswith('#define '):
IncTextBody.append(Line)
continue
if not Line.startswith ('/* EMBED_STRUCT:'):
Match = False
else:
Match = EmbedStructRe.match(Line)
if Match:
ArrayMarker = Match.group(5)
if Match.group(4) == 'END':
Level -= 1
if Level == 0:
Line = Match.group(6)
else: # 'START'
Level += 1
if Level == 1:
Line = Match.group(6)
else:
EmbedFound = True
TagStr = Match.group(3)
if TagStr.startswith('TAG_'):
try:
TagVal = int(TagStr[4:], 16)
except:
TagVal = -1
if (TagVal >= 0) and (TagVal < self._MinCfgTagId):
IsCommonStruct = True
if Level == 1:
if IsCommonStruct:
Suffix = ' /* _COMMON_STRUCT_START_ */'
else:
Suffix = ''
StructBody = ['typedef struct {%s' % Suffix]
StructName = Match.group(1)
StructType = Match.group(2)
VariableName = Match.group(3)
MatchOffset = re.search('/\*\*\sOffset\s0x([a-fA-F0-9]+)', Line)
if MatchOffset:
Offset = int(MatchOffset.group(1), 16)
else:
Offset = None
IncludeLine = True
BaseOffset = Offset
ModifiedStructType = StructType.rstrip()
if ModifiedStructType.endswith(']'):
Idx = ModifiedStructType.index('[')
if ArrayMarker != ' ':
# Auto array size
OldTextBody.append('')
ArrayVarName = VariableName
if int(ArrayMarker) == 1000:
Count = 1
else:
Count = int(ArrayMarker) + 1000
else:
if Count < 1000:
Count += 1
VariableTemp = ArrayVarName + '[%d]' % (Count if Count < 1000 else Count - 1000)
OldTextBody[-1] = self.CreateField (None, VariableTemp, 0, Offset, ModifiedStructType[:Idx], '', 'Structure Array', '')
else:
ArrayVarName = ''
OldTextBody.append (self.CreateField (None, VariableName, 0, Offset, ModifiedStructType, '', '', ''))
if IncludeLine:
StructBody.append (Line)
else:
OldTextBody.append (Line)
if Match and Match.group(4) == 'END':
if Level == 0:
if (StructType != Match.group(2)) or (VariableName != Match.group(3)):
print ("Unmatched struct name '%s' and '%s' !" % (StructName, Match.group(2)))
else:
if IsCommonStruct:
Suffix = ' /* _COMMON_STRUCT_END_ */'
else:
Suffix = ''
Line = '} %s;%s\n\n\n' % (StructName, Suffix)
StructBody.append (Line)
if (Line not in NewTextBody) and (Line not in OldTextBody):
NewTextBody.extend (StructBody)
IncludeLine = False
BaseOffset = 0
IsCommonStruct = False
if not IncludeEmbedOnly:
NewTextBody.extend(OldTextBody)
if EmbedFound:
NewTextBody = self.PostProcessBody (NewTextBody, False)
NewTextBody = IncTextBody + NewTextBody
return NewTextBody
def WriteHeaderFile (self, TxtBody, FileName, Type = 'h'):
FileNameDef = os.path.basename(FileName).replace ('.', '_')
FileNameDef = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', FileNameDef)
FileNameDef = re.sub('([a-z0-9])([A-Z])', r'\1_\2', FileNameDef).upper()
Lines = []
Lines.append ("%s\n" % GetCopyrightHeader(Type))
Lines.append ("#ifndef __%s__\n" % FileNameDef)
Lines.append ("#define __%s__\n\n" % FileNameDef)
if Type == 'h':
Lines.append ("#pragma pack(1)\n\n")
Lines.extend (TxtBody)
if Type == 'h':
Lines.append ("#pragma pack()\n\n")
Lines.append ("#endif\n")
# Don't rewrite if the contents are the same
Create = True
if os.path.exists(FileName):
HdrFile = open(FileName, "r")
OrgTxt = HdrFile.read()
HdrFile.close()
NewTxt = ''.join(Lines)
if OrgTxt == NewTxt:
Create = False
if Create:
HdrFile = open(FileName, "w")
HdrFile.write (''.join(Lines))
HdrFile.close()
def CreateHeaderFile (self, HdrFileName, ComHdrFileName = ''):
CommentLine = ''
LastStruct = ''
NextOffset = 0
SpaceIdx = 0
Offset = 0
FieldIdx = 0
LastFieldIdx = 0
ResvOffset = 0
ResvIdx = 0
TxtBody = []
LineBuffer = []
CfgTags = []
InRange = True
LastVisible = True
TxtBody.append("typedef struct {\n")
for Item in self._CfgItemList:
# Search for CFGDATA tags
Embed = Item["embed"].upper()
if Embed.endswith(':START'):
Match = re.match (r'(\w+)_CFG_DATA:TAG_([0-9A-F]+):START', Embed)
if Match:
TagName = Match.group(1)
TagId = int(Match.group(2), 16)
CfgTags.append ((TagId, TagName))
# Only process visible items
NextVisible = LastVisible
if LastVisible and (Item['header'] == 'OFF'):
NextVisible = False
ResvOffset = Item['offset']
elif (not LastVisible) and Item['header'] == 'ON':
NextVisible = True
Name = "ReservedUpdSpace%d" % ResvIdx
ResvIdx = ResvIdx + 1
TxtBody.append(self.CreateField (Item, Name, Item["offset"] - ResvOffset, ResvOffset, '', '', '', ''))
FieldIdx += 1
if Offset < Item["offset"]:
if LastVisible:
Name = "UnusedUpdSpace%d" % SpaceIdx
LineBuffer.append(self.CreateField (Item, Name, Item["offset"] - Offset, Offset, '', '', '', ''))
FieldIdx += 1
SpaceIdx = SpaceIdx + 1
Offset = Item["offset"]
LastVisible = NextVisible
Offset = Offset + Item["length"]
if LastVisible:
for Each in LineBuffer:
TxtBody.append (Each)
LineBuffer = []
Comment = Item["comment"]
Embed = Item["embed"].upper()
if Embed.endswith(':START') or Embed.endswith(':END'):
# EMBED_STRUCT: StructName : ItemName : VariableName : START|END
Name, ArrayNum = self.GetStructArrayInfo (Item["struct"])
Remaining = Item["embed"]
if (LastFieldIdx + 1 == FieldIdx) and (LastStruct == Name):
ArrayMarker = ' '
else:
ArrayMarker = '%d' % ArrayNum
LastFieldIdx = FieldIdx
LastStruct = Name
Marker = '/* EMBED_STRUCT:%s:%s%s*/ ' % (Name, Remaining, ArrayMarker)
if Embed.endswith(':START') and Comment != '':
Marker = '/* COMMENT:%s */ \n' % Item["comment"] + Marker
else:
if Embed == '':
Marker = ''
else:
self.Error = "Invalid embedded structure format '%s'!\n" % Item["embed"]
return 4
# Generate bit fields for structure
if len(Item['subreg']) > 0 and Item["struct"]:
StructType = Item["struct"]
StructName, ArrayNum = self.GetStructArrayInfo (StructType)
if (LastFieldIdx + 1 == FieldIdx) and (LastStruct == Item["struct"]):
ArrayMarker = ' '
else:
ArrayMarker = '%d' % ArrayNum
TxtBody.append('/* EMBED_STRUCT:%s:%s:%s:START%s*/\n' % (StructName, StructType, Item["cname"], ArrayMarker))
for SubItem in Item['subreg']:
Name = SubItem["cname"]
if Name.startswith(Item["cname"]):
Name = Name[len(Item["cname"]) + 1:]
Line = self.CreateField (SubItem, Name, SubItem["bitunit"], SubItem["offset"], SubItem['struct'], SubItem['name'], SubItem['help'], SubItem['option'], SubItem['bitlength'])
TxtBody.append(Line)
TxtBody.append('/* EMBED_STRUCT:%s:%s:%s:END%s*/\n' % (StructName, StructType, Item["cname"], ArrayMarker))
LastFieldIdx = FieldIdx
LastStruct = Item["struct"]
FieldIdx += 1
else:
FieldIdx += 1
Line = Marker + self.CreateField (Item, Item["cname"], Item["length"], Item["offset"], Item['struct'], Item['name'], Item['help'], Item['option'])
TxtBody.append(Line)
TxtBody.append("}\n\n")
# Handle the embedded data structure
TxtBody = self.PostProcessBody (TxtBody)
ComBody, TxtBody = self.SplitTextBody (TxtBody)
# Prepare TAG defines
PltTagDefTxt = ['\n']
ComTagDefTxt = ['\n']
for TagId, TagName in sorted(CfgTags):
TagLine = '#define %-30s 0x%03X\n' % ('CDATA_%s_TAG' % TagName, TagId)
if TagId < self._MinCfgTagId:
# TAG ID < 0x100, it is a generic TAG
ComTagDefTxt.append (TagLine)
else:
PltTagDefTxt.append (TagLine)
PltTagDefTxt.append ('\n\n')
ComTagDefTxt.append ('\n\n')
# Write file back
self.WriteHeaderFile (PltTagDefTxt + TxtBody, HdrFileName)
if ComHdrFileName:
self.WriteHeaderFile (ComTagDefTxt + ComBody, ComHdrFileName)
return 0
def UpdateConfigItemValue (self, Item, ValueStr):
IsArray = True if Item['value'].startswith('{') else False
IsString = True if Item['value'].startswith("'") else False
Bytes = self.ValueToByteArray(ValueStr, Item['length'])
if IsString:
NewValue = "'%s'" % Bytes.decode("utf-8")
elif IsArray:
NewValue = Bytes2Str(Bytes)
else:
Fmt = '0x%X' if Item['value'].startswith('0x') else '%d'
NewValue = Fmt % Bytes2Val(Bytes)
Item['value'] = NewValue
def LoadDefaultFromBinaryArray (self, BinDat):
BaseOff = 0
for Item in self._CfgItemList:
if Item['length'] == 0:
continue
if Item['find']:
Offset = BinDat.find (Item['find'].encode())
if Offset >= 0:
BaseOff = Offset
else:
raise Exception ('Could not find "%s" !' % Item['find'])
if Item['offset'] + Item['length'] > len(BinDat):
raise Exception ('Mismatching format between DSC and BIN files !')
ValStr = Bytes2Str(BinDat[BaseOff + Item['offset']:BaseOff + Item['offset']+Item['length']])
self.UpdateConfigItemValue (Item, ValStr)
self.UpdateDefaultValue()
def GenerateBinaryArray (self):
BinDat = bytearray()
Offset = 0
for Item in self._CfgItemList:
if Item['offset'] > Offset:
Gap = Item['offset'] - Offset
BinDat.extend(b'\x00' * Gap)
BinDat.extend(self.ValueToByteArray(Item['value'], Item['length']))
Offset = Item['offset'] + Item['length']
return BinDat
def GenerateBinary (self, BinFileName):
BinFile = open(BinFileName, "wb")
BinFile.write (self.GenerateBinaryArray ())
BinFile.close()
return 0
def GenerateDataIncFile (self, DatIncFileName, BinFile = None):
# Put a prefix GUID before CFGDATA so that it can be located later on
Prefix = b'\xa7\xbd\x7f\x73\x20\x1e\x46\xd6\xbe\x8f\x64\x12\x05\x8d\x0a\xa8'
if BinFile:
Fin = open (BinFile, 'rb')
BinDat = Prefix + bytearray(Fin.read())
Fin.close()
else:
BinDat = Prefix + self.GenerateBinaryArray ()
FileName = os.path.basename(DatIncFileName).upper()
FileName = FileName.replace('.', '_')
TxtLines = []
TxtLines.append ("UINT8 mConfigDataBlob[%d] = {\n" % len(BinDat))
Count = 0
Line = [' ']
for Each in BinDat:
Line.append('0x%02X, ' % Each)
Count = Count + 1
if (Count & 0x0F) == 0:
Line.append('\n')
TxtLines.append (''.join(Line))
Line = [' ']
if len(Line) > 1:
TxtLines.append (''.join(Line) + '\n')
TxtLines.append ("};\n\n")
self.WriteHeaderFile (TxtLines, DatIncFileName, 'inc')
return 0
def CheckCfgData (self):
# Check if CfgData contains any duplicated name
def AddItem (Item, ChkList):
Name = Item['cname']
if Name in ChkList:
return Item
if Name not in ['Dummy', 'Reserved', 'CfgHeader', 'CondValue']:
ChkList.append(Name)
return None
Duplicate = None
ChkList = []
for Item in self._CfgItemList:
Duplicate = AddItem (Item, ChkList)
if not Duplicate:
for SubItem in Item['subreg']:
Duplicate = AddItem (SubItem, ChkList)
if Duplicate:
break
if Duplicate:
break
if Duplicate:
self.Error = "Duplicated CFGDATA '%s' found !\n" % Duplicate['cname']
return -1
return 0
def PrintData (self):
for Item in self._CfgItemList:
if not Item['length']:
continue
print ("%-10s @Offset:0x%04X Len:%3d Val:%s" % (Item['cname'], Item['offset'], Item['length'], Item['value']))
for SubItem in Item['subreg']:
print (" %-20s BitOff:0x%04X BitLen:%-3d Val:%s" % (SubItem['cname'], SubItem['bitoffset'], SubItem['bitlength'], SubItem['value']))
def FormatArrayValue (self, Input, Length):
Dat = self.ValueToByteArray(Input, Length)
return ','.join('0x%02X' % Each for Each in Dat)
def GetItemOptionList (self, Item):
TmpList = []
if Item['type'] == "Combo":
if not Item['option'] in self._BuidinOption:
OptList = Item['option'].split(',')
for Option in OptList:
Option = Option.strip()
try:
(OpVal, OpStr) = Option.split(':')
except:
raise Exception("Invalid option format '%s' !" % Option)
TmpList.append((OpVal, OpStr))
return TmpList
def WriteBsfStruct (self, BsfFd, Item):
if Item['type'] == "None":
Space = "gPlatformFspPkgTokenSpaceGuid"
else:
Space = Item['space']
Line = " $%s_%s" % (Space, Item['cname'])
Match = re.match("\s*(\{.+\})\s*", Item['value'])
if Match:
DefaultValue = self.FormatArrayValue (Match.group(1).strip(), Item['length'])
else:
DefaultValue = Item['value'].strip()
if 'bitlength' in Item:
if Item['bitlength']:
BsfFd.write(" %s%s%4d bits $_DEFAULT_ = %s\n" % (Line, ' ' * (64 - len(Line)), Item['bitlength'], DefaultValue))
else:
if Item['length']:
BsfFd.write(" %s%s%4d bytes $_DEFAULT_ = %s\n" % (Line, ' ' * (64 - len(Line)), Item['length'], DefaultValue))
return self.GetItemOptionList (Item)
def GetBsfOption (self, OptionName):
if OptionName in self._CfgOptsDict:
return self._CfgOptsDict[OptionName]
else:
return OptionName
def WriteBsfOption (self, BsfFd, Item):
PcdName = Item['space'] + '_' + Item['cname']
WriteHelp = 0
BsfLines = []
if Item['type'] == "Combo":
if Item['option'] in self._BuidinOption:
Options = self._BuidinOption[Item['option']]
else:
Options = self.GetBsfOption (PcdName)
BsfLines.append (' %s $%s, "%s", &%s,\n' % (Item['type'], PcdName, Item['name'], Options))
WriteHelp = 1
elif Item['type'].startswith("EditNum"):
Match = re.match("EditNum\s*,\s*(HEX|DEC)\s*,\s*\((\d+|0x[0-9A-Fa-f]+)\s*,\s*(\d+|0x[0-9A-Fa-f]+)\)", Item['type'])
if Match:
BsfLines.append (' EditNum $%s, "%s", %s,\n' % (PcdName, Item['name'], Match.group(1)))
WriteHelp = 2
elif Item['type'].startswith("EditText"):
BsfLines.append (' %s $%s, "%s",\n' % (Item['type'], PcdName, Item['name']))
WriteHelp = 1
elif Item['type'] == "Table":
Columns = Item['option'].split(',')
if len(Columns) != 0:
BsfLines.append(' %s $%s "%s",' % (Item['type'], PcdName, Item['name']))
for Col in Columns:
Fmt = Col.split(':')
if len(Fmt) != 3:
raise Exception("Column format '%s' is invalid !" % Fmt)
try:
Dtype = int(Fmt[1].strip())
except:
raise Exception("Column size '%s' is invalid !" % Fmt[1])
BsfLines.append('\n Column "%s", %d bytes, %s' % (Fmt[0].strip(), Dtype, Fmt[2].strip()))
BsfLines.append(',\n')
WriteHelp = 1
if WriteHelp > 0:
HelpLines = Item['help'].split('\\n\\r')
FirstLine = True
for HelpLine in HelpLines:
if FirstLine:
FirstLine = False
BsfLines.append(' Help "%s"\n' % (HelpLine))
else:
BsfLines.append(' "%s"\n' % (HelpLine))
if WriteHelp == 2:
BsfLines.append(' "Valid range: %s ~ %s"\n' % (Match.group(2), Match.group(3)))
if len(Item['condition']) > 4:
CondList = Item['condition'].split(',')
Idx = 0
for Cond in CondList:
Cond = Cond.strip()
if Cond.startswith('#'):
BsfLines.insert(Idx, Cond + '\n')
Idx += 1
elif Cond.startswith('@#'):
BsfLines.append(Cond[1:] + '\n')
for Line in BsfLines:
BsfFd.write (Line)
def WriteBsfPages (self, PageTree, BsfFd):
BsfFd.write('\n')
Key = next(iter(PageTree))
for Page in PageTree[Key]:
PageName = next(iter(Page))
BsfFd.write('Page "%s"\n' % self._CfgPageDict[PageName])
if len(PageTree[Key]):
self.WriteBsfPages (Page, BsfFd)
BsfItems = []
for Item in self._CfgItemList:
if Item['name'] != '':
if Item['page'] != PageName:
continue
if len(Item['subreg']) > 0:
for SubItem in Item['subreg']:
if SubItem['name'] != '':
BsfItems.append(SubItem)
else:
BsfItems.append(Item)
BsfItems.sort(key=lambda x: x['order'])
for Item in BsfItems:
self.WriteBsfOption (BsfFd, Item)
BsfFd.write("EndPage\n\n")
def GenerateBsfFile (self, BsfFile):
if BsfFile == '':
self.Error = "BSF output file '%s' is invalid" % BsfFile
return 1
Error = 0
OptionDict = {}
BsfFd = open(BsfFile, "w")
BsfFd.write("%s\n" % GetCopyrightHeader('bsf'))
BsfFd.write("%s\n" % self._GlobalDataDef)
BsfFd.write("StructDef\n")
NextOffset = -1
for Item in self._CfgItemList:
if Item['find'] != '':
BsfFd.write('\n Find "%s"\n' % Item['find'])
NextOffset = Item['offset'] + Item['length']
if Item['name'] != '':
if NextOffset != Item['offset']:
BsfFd.write(" Skip %d bytes\n" % (Item['offset'] - NextOffset))
if len(Item['subreg']) > 0:
NextOffset = Item['offset']
BitsOffset = NextOffset * 8
for SubItem in Item['subreg']:
BitsOffset += SubItem['bitlength']
if SubItem['name'] == '':
if 'bitlength' in SubItem:
BsfFd.write(" Skip %d bits\n" % (SubItem['bitlength']))
else:
BsfFd.write(" Skip %d bytes\n" % (SubItem['length']))
else:
Options = self.WriteBsfStruct(BsfFd, SubItem)
if len(Options) > 0:
OptionDict[SubItem['space']+'_'+SubItem['cname']] = Options
NextBitsOffset = (Item['offset'] + Item['length']) * 8
if NextBitsOffset > BitsOffset:
BitsGap = NextBitsOffset - BitsOffset
BitsRemain = BitsGap % 8
if BitsRemain:
BsfFd.write(" Skip %d bits\n" % BitsRemain)
BitsGap -= BitsRemain
BytesRemain = BitsGap // 8
if BytesRemain:
BsfFd.write(" Skip %d bytes\n" % BytesRemain)
NextOffset = Item['offset'] + Item['length']
else:
NextOffset = Item['offset'] + Item['length']
Options = self.WriteBsfStruct(BsfFd, Item)
if len(Options) > 0:
OptionDict[Item['space']+'_'+Item['cname']] = Options
BsfFd.write("\nEndStruct\n\n")
BsfFd.write("%s" % self._BuidinOptionTxt)
NameList = []
OptionList = []
for Each in sorted(OptionDict):
if OptionDict[Each] not in OptionList:
NameList.append(Each)
OptionList.append (OptionDict[Each])
BsfFd.write("List &%s\n" % Each)
for Item in OptionDict[Each]:
BsfFd.write(' Selection %s , "%s"\n' % (self.EvaluateExpress(Item[0]), Item[1]))
BsfFd.write("EndList\n\n")
else:
# Item has idential options as other item
# Try to reuse the previous options instead
Idx = OptionList.index (OptionDict[Each])
self._CfgOptsDict[Each] = NameList[Idx]
BsfFd.write("BeginInfoBlock\n")
BsfFd.write(' PPVer "%s"\n' % (self._CfgBlkDict['ver']))
BsfFd.write(' Description "%s"\n' % (self._CfgBlkDict['name']))
BsfFd.write("EndInfoBlock\n\n")
self.WriteBsfPages (self._CfgPageTree, BsfFd)
BsfFd.close()
return Error
def WriteDeltaLine (self, OutLines, Name, ValStr, IsArray):
if IsArray:
Output = '%s | { %s }' % (Name, ValStr)
else:
Output = '%s | 0x%X' % (Name, Array2Val(ValStr))
OutLines.append (Output)
def WriteDeltaFile (self, OutFile, PlatformId, OutLines):
DltFd = open (OutFile, "w")
DltFd.write ("%s\n" % GetCopyrightHeader('dlt', True))
DltFd.write ('#\n')
DltFd.write ('# Delta configuration values for platform ID 0x%04X\n' % PlatformId)
DltFd.write ('#\n\n')
for Line in OutLines:
DltFd.write ('%s\n' % Line)
DltFd.close()
def GenerateDeltaFile(self, DeltaFile, BinFile, Full=False):
Fd = open (BinFile, 'rb')
NewData = bytearray(Fd.read())
Fd.close()
OldData = self.GenerateBinaryArray()
return self.GenerateDeltaFileFromBin (DeltaFile, OldData, NewData, Full)
def GenerateDeltaFileFromBin (self, DeltaFile, OldData, NewData, Full=False):
self.LoadDefaultFromBinaryArray (NewData)
Lines = []
TagName = ''
Level = 0
PlatformId = None
DefPlatformId = 0
for Item in self._CfgItemList:
if Level == 0 and Item['embed'].endswith(':START'):
TagName = Item['embed'].split(':')[0]
Level += 1
Start = Item['offset']
End = Start + Item['length']
FullName = '%s.%s' % (TagName, Item['cname'])
if 'PLATFORMID_CFG_DATA.PlatformId' == FullName:
DefPlatformId = Bytes2Val(OldData[Start:End])
if NewData[Start:End] != OldData[Start:End] or (
Full and Item['name'] and (Item['cname'] != 'Dummy')):
if TagName == '':
continue
ValStr = self.FormatDeltaValue (Item)
if not Item['subreg']:
Text = '%-40s | %s' % (FullName, ValStr)
if 'PLATFORMID_CFG_DATA.PlatformId' == FullName:
PlatformId = Array2Val(Item['value'])
else:
Lines.append(Text)
else:
if Full:
Text = '## %-40s | %s' % (FullName, ValStr)
Lines.append(Text)
OldArray = OldData[Start:End]
NewArray = NewData[Start:End]
for SubItem in Item['subreg']:
NewBitValue = self.GetBsfBitFields(SubItem, NewArray)
OldBitValue = self.GetBsfBitFields(SubItem, OldArray)
if OldBitValue != NewBitValue or (
Full and Item['name'] and
(Item['cname'] != 'Dummy')):
if SubItem['cname'].startswith(Item['cname']):
Offset = len(Item['cname']) + 1
FieldName = '%s.%s' % (
FullName, SubItem['cname'][Offset:])
ValStr = self.FormatDeltaValue (SubItem)
Text = '%-40s | %s' % (FieldName, ValStr)
Lines.append(Text)
if Item['embed'].endswith(':END'):
EndTagName = Item['embed'].split(':')[0]
if EndTagName == TagName:
Level -= 1
if PlatformId is None or DefPlatformId == PlatformId:
PlatformId = DefPlatformId
print("WARNING: 'PlatformId' configuration is same as default %d!"
% PlatformId)
Lines.insert(0, '%-40s | %s\n\n' %
('PLATFORMID_CFG_DATA.PlatformId', '0x%04X' % PlatformId))
self.WriteDeltaFile (DeltaFile, PlatformId, Lines)
return 0
def GenerateDscFile (self, OutFile):
DscFd = open(OutFile, "w")
for Line in self._DscLines:
DscFd.write (Line + '\n')
DscFd.close ()
return 0
def Usage():
print ('\n'.join([
"GenCfgData Version 0.01",
"Usage:",
" GenCfgData GENINC BinFile IncOutFile [-D Macros]",
" GenCfgData GENPKL DscFile PklOutFile [-D Macros]",
" GenCfgData GENINC DscFile[;DltFile] IncOutFile [-D Macros]",
" GenCfgData GENBIN DscFile[;DltFile] BinOutFile [-D Macros]",
" GenCfgData GENBSF DscFile[;DltFile] BsfOutFile [-D Macros]",
" GenCfgData GENDLT DscFile[;BinFile] DltOutFile [-D Macros]",
" GenCfgData GENDSC DscFile DscOutFile [-D Macros]",
" GenCfgData GENHDR DscFile[;DltFile] HdrOutFile[;ComHdrOutFile] [-D Macros]"
]))
def Main():
#
# Parse the options and args
#
argc = len(sys.argv)
if argc < 4:
Usage()
return 1
GenCfgData = CGenCfgData()
Command = sys.argv[1].upper()
OutFile = sys.argv[3]
if argc > 5 and GenCfgData.ParseMacros(sys.argv[4:]) != 0:
raise Exception ("ERROR: Macro parsing failed !")
FileList = sys.argv[2].split(';')
if len(FileList) == 2:
DscFile = FileList[0]
DltFile = FileList[1]
elif len(FileList) == 1:
DscFile = FileList[0]
DltFile = ''
else:
raise Exception ("ERROR: Invalid parameter '%s' !" % sys.argv[2])
if Command == "GENDLT" and DscFile.endswith('.dlt'):
# It needs to expand an existing DLT file
DltFile = DscFile
Lines = CGenCfgData.ExpandIncludeFiles (DltFile)
OutTxt = ''.join ([x[0] for x in Lines])
OutFile = open(OutFile, "w")
OutFile.write (OutTxt)
OutFile.close ()
return 0;
if not os.path.exists(DscFile):
raise Exception ("ERROR: Cannot open file '%s' !" % DscFile)
CfgBinFile = ''
if DltFile:
if not os.path.exists(DltFile):
raise Exception ("ERROR: Cannot open file '%s' !" % DltFile)
if Command == "GENDLT":
CfgBinFile = DltFile
DltFile = ''
BinFile = ''
if (DscFile.lower().endswith('.bin')) and (Command == "GENINC"):
# It is binary file
BinFile = DscFile
DscFile = ''
if BinFile:
if GenCfgData.GenerateDataIncFile(OutFile, BinFile) != 0:
raise Exception (GenCfgData.Error)
return 0
if DscFile.lower().endswith('.pkl'):
with open(DscFile, "rb") as PklFile:
GenCfgData.__dict__ = marshal.load(PklFile)
else:
if GenCfgData.ParseDscFile(DscFile) != 0:
raise Exception (GenCfgData.Error)
if GenCfgData.CheckCfgData() != 0:
raise Exception (GenCfgData.Error)
if GenCfgData.CreateVarDict() != 0:
raise Exception (GenCfgData.Error)
if Command == 'GENPKL':
with open(OutFile, "wb") as PklFile:
marshal.dump(GenCfgData.__dict__, PklFile)
return 0
if DltFile and Command in ['GENHDR','GENBIN','GENINC','GENBSF']:
if GenCfgData.OverrideDefaultValue(DltFile) != 0:
raise Exception (GenCfgData.Error)
if GenCfgData.UpdateDefaultValue() != 0:
raise Exception (GenCfgData.Error)
#GenCfgData.PrintData ()
if sys.argv[1] == "GENBIN":
if GenCfgData.GenerateBinary(OutFile) != 0:
raise Exception (GenCfgData.Error)
elif sys.argv[1] == "GENHDR":
OutFiles = OutFile.split(';')
BrdOutFile = OutFiles[0].strip()
if len(OutFiles) > 1:
ComOutFile = OutFiles[1].strip()
else:
ComOutFile = ''
if GenCfgData.CreateHeaderFile(BrdOutFile, ComOutFile) != 0:
raise Exception (GenCfgData.Error)
elif sys.argv[1] == "GENBSF":
if GenCfgData.GenerateBsfFile(OutFile) != 0:
raise Exception (GenCfgData.Error)
elif sys.argv[1] == "GENINC":
if GenCfgData.GenerateDataIncFile(OutFile) != 0:
raise Exception (GenCfgData.Error)
elif sys.argv[1] == "GENDLT":
if GenCfgData.GenerateDeltaFile(OutFile, CfgBinFile) != 0:
raise Exception (GenCfgData.Error)
elif sys.argv[1] == "GENDSC":
if GenCfgData.GenerateDscFile(OutFile) != 0:
raise Exception (GenCfgData.Error)
else:
raise Exception ("Unsuported command '%s' !" % Command)
return 0
if __name__ == '__main__':
sys.exit(Main())
| [
"maurice.ma@intel.com"
] | maurice.ma@intel.com |
2ee24922d25333601d8c0ae29ee08155574c5471 | f9a2e67dd2f40b37d8ff81bf6cdce47c38d2dee4 | /.c9/metadata/environment/ib_miniprojects_backend/project_management_portal/views/list_of_tasks/tests/test_case_01.py | eaad758981ca57f8218412898bfb5a503d8ccf16 | [] | no_license | mohan277/backend_repo | 4eae065cf0fffa29866a2b549028cb8df4c97643 | 25dbb4d0f1c174b6da95f4c73737e49db9978429 | refs/heads/master | 2022-11-13T00:08:37.600743 | 2020-07-09T04:36:44 | 2020-07-09T04:36:44 | 278,259,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | {"filter":false,"title":"test_case_01.py","tooltip":"/ib_miniprojects_backend/project_management_portal/views/list_of_tasks/tests/test_case_01.py","undoManager":{"mark":-1,"position":-1,"stack":[]},"ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":8,"column":0},"end":{"row":8,"column":0},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":{"row":7,"state":"qqstring3","mode":"ace/mode/python"}},"timestamp":1593184319725,"hash":"c8e01f69f3e83a1b79d4668a9f4a616916b96d22"} | [
"senammohanakrishna@gmail.com"
] | senammohanakrishna@gmail.com |
c58a77cf8ea4664d61a385917b75010f625b5d7e | 0f1c6902dcd6c067a059b4d155d21a885da07a94 | /pre_commit/languages/docker_image.py | 980c6ef3376f6398379306c27f11b5ffa1688a16 | [
"MIT"
] | permissive | andrewhare/pre-commit | 618985e86df8c2db1dde912ca98d9d20137bddea | 1c641b1c28ecc1005f46fdc76db4bbb0f67c82ac | refs/heads/master | 2021-01-06T23:30:11.927884 | 2020-02-18T18:53:53 | 2020-02-18T18:53:53 | 241,513,355 | 0 | 0 | MIT | 2020-02-19T02:20:45 | 2020-02-19T02:20:44 | null | UTF-8 | Python | false | false | 667 | py | from typing import Sequence
from typing import Tuple
from pre_commit.hook import Hook
from pre_commit.languages import helpers
from pre_commit.languages.docker import assert_docker_available
from pre_commit.languages.docker import docker_cmd
ENVIRONMENT_DIR = None
get_default_version = helpers.basic_get_default_version
healthy = helpers.basic_healthy
install_environment = helpers.no_install
def run_hook(
hook: Hook,
file_args: Sequence[str],
color: bool,
) -> Tuple[int, bytes]: # pragma: windows no cover
assert_docker_available()
cmd = docker_cmd() + hook.cmd
return helpers.run_xargs(hook, cmd, file_args, color=color)
| [
"asottile@umich.edu"
] | asottile@umich.edu |
4149bd0580c1c224bd8b0ae991d8f5da06923ea1 | 3123d95637dd6628e7cc58ec1711c965b8aa911c | /dataset_clustering/dataset_clustering/sample_map.py | bd65f8ddc77fe62f2d7ba26b1827233ef81e24f3 | [] | no_license | ConorFWild/pandda_2_tools | d9e27e33d231982dfaf09644a81a0259df332762 | 9b95c3005153a3ae8ba2bcffc699a07a139ca6df | refs/heads/master | 2021-05-24T11:52:30.107356 | 2020-04-06T16:01:28 | 2020-04-06T16:01:28 | 253,547,288 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,020 | py | import numpy as np
import mdc3
from mdc3.types.real_space import interpolate_uniform_grid
class Sampler:
def __init__(self, xmap,
alignment_moving_to_ref,
res_centre_coords,
grid_params=[20, 20, 20],
offset=[10, 10, 10],
):
self.xmap = xmap
self.alignment_moving_to_ref = alignment_moving_to_ref
self.res_centre_coords = res_centre_coords
self.grid_params = grid_params
self.offset = offset
def __call__(self):
return sample_map(self.xmap,
self.alignment_moving_to_ref,
self.res_centre_coords,
self.grid_params,
self.offset,
)
def sample_map(xmap,
alignment_moving_to_ref,
res_centre_coords,
grid_params=[20, 20, 20],
offset=[10, 10, 10],
):
# Align and Get RTop to moving protein frame from alignment
# print("\tres_centre_coords: {}".format(res_centre_coords))
moving_to_ref_translation = alignment_moving_to_ref.rotran[1]
# print("\tmoving_to_ref_translation: {}".format(moving_to_ref_translation))
rotation = alignment_moving_to_ref.rotran[0]
# print("\talignment_moving_to_ref: {}".format(alignment_moving_to_ref))
rotated_offset = np.matmul(rotation, offset)
# print("\trotated_offset: {}".format(rotated_offset))
translation = moving_to_ref_translation - res_centre_coords - rotated_offset
# print("\tSampling around point: {}".format(translation))
# Interpolate NX map in moving protein frame
nxmap = interpolate_uniform_grid(xmap,
translation,
np.transpose(rotation),
grid_params=grid_params,
)
nxmap_data = nxmap.export_numpy()
return nxmap_data
| [
"conor.wild@sky.com"
] | conor.wild@sky.com |
673dd580d8d3ff7e098704d6b2d67bf18eb392a0 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03128/s516473124.py | 57bfe64ffea6df0a31f4b06a5dc26e6f1fbefec3 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 853 | py | # ABC 118 D
def resolve():
N, M = map(int, input().split())
A = list(map(int, input().split()))
A.sort(reverse=True)
costs = [0, 2, 5, 5, 4, 5, 6, 3, 7, 6]
dp = [-1] * (N+1)
dp[0] = 0
for i in range(1, N+1):
for a in A:
if i - costs[a] < 0:
continue
dp[i] = max(dp[i-costs[a]] + 1, dp[i])
ans = ""
remain = dp[N]
match = N
minCosts = 10
for a in A:
minCosts = min(minCosts, costs[a])
while match > 0:
for a in A:
if match - costs[a] < 0 or 1<= match - costs[a] < minCosts:
continue
if dp[match-costs[a]] == remain-1:
ans += str(a)
match -= costs[a]
remain -= 1
break
print(ans)
if __name__ == "__main__":
resolve()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
0c4046bc68016ff8a5b2fbb659c8fef091ab302e | f4434c85e3814b6347f8f8099c081ed4af5678a5 | /sdk/healthcareapis/azure-mgmt-healthcareapis/azure/mgmt/healthcareapis/aio/operations/_private_endpoint_connections_operations.py | 5f74a513e84b21f5ac0ebd375fbf235578069a0f | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | yunhaoling/azure-sdk-for-python | 5da12a174a37672ac6ed8e3c1f863cb77010a506 | c4eb0ca1aadb76ad892114230473034830116362 | refs/heads/master | 2022-06-11T01:17:39.636461 | 2020-12-08T17:42:08 | 2020-12-08T17:42:08 | 177,675,796 | 1 | 0 | MIT | 2020-03-31T20:35:17 | 2019-03-25T22:43:40 | Python | UTF-8 | Python | false | false | 23,283 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PrivateEndpointConnectionsOperations:
"""PrivateEndpointConnectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.healthcareapis.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_service(
self,
resource_group_name: str,
resource_name: str,
**kwargs
) -> AsyncIterable["_models.PrivateEndpointConnectionListResult"]:
"""Lists all private endpoint connections for a service.
:param resource_group_name: The name of the resource group that contains the service instance.
:type resource_group_name: str
:param resource_name: The name of the service instance.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateEndpointConnectionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.healthcareapis.models.PrivateEndpointConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-30"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_service.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=24, min_length=3),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PrivateEndpointConnectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorDetails, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_service.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HealthcareApis/services/{resourceName}/privateEndpointConnections'} # type: ignore
async def get(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
**kwargs
) -> "_models.PrivateEndpointConnection":
"""Gets the specified private endpoint connection associated with the service.
:param resource_group_name: The name of the resource group that contains the service instance.
:type resource_group_name: str
:param resource_name: The name of the service instance.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection associated
with the Azure resource.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.healthcareapis.models.PrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-30"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=24, min_length=3),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HealthcareApis/services/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
properties: "_models.PrivateEndpointConnection",
**kwargs
) -> "_models.PrivateEndpointConnection":
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-30"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=24, min_length=3),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(properties, 'PrivateEndpointConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HealthcareApis/services/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
properties: "_models.PrivateEndpointConnection",
**kwargs
) -> AsyncLROPoller["_models.PrivateEndpointConnection"]:
"""Update the state of the specified private endpoint connection associated with the service.
:param resource_group_name: The name of the resource group that contains the service instance.
:type resource_group_name: str
:param resource_name: The name of the service instance.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection associated
with the Azure resource.
:type private_endpoint_connection_name: str
:param properties: The private endpoint connection properties.
:type properties: ~azure.mgmt.healthcareapis.models.PrivateEndpointConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.healthcareapis.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
properties=properties,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=24, min_length=3),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HealthcareApis/services/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-30"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=24, min_length=3),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HealthcareApis/services/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes a private endpoint connection.
:param resource_group_name: The name of the resource group that contains the service instance.
:type resource_group_name: str
:param resource_name: The name of the service instance.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection associated
with the Azure resource.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=24, min_length=3),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HealthcareApis/services/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
| [
"noreply@github.com"
] | yunhaoling.noreply@github.com |
c1eb625c0c3ac788a3bdba3802974dba9f5a8fec | 149baa65329d0e13ae3189b8127d2eff5f5fdf77 | /bot_ws/devel/lib/python2.7/dist-packages/rbt_baxter_msgs/msg/_AssemblyStates.py | a9e882da171b4221de92892bbf8f8e1754454571 | [] | no_license | mtbthebest/imitation_learning | 20b990aa7396fecbe5433c7703f353bf99fa5f2c | 4c08192e31062f69056cc36efffb7a2ce0264244 | refs/heads/master | 2020-05-21T17:05:26.567273 | 2019-09-16T12:20:19 | 2019-09-16T12:20:19 | 186,111,790 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,949 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from rbt_baxter_msgs/AssemblyStates.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import rbt_baxter_msgs.msg
class AssemblyStates(genpy.Message):
_md5sum = "63427318d41dbd2077c105027ad82a2b"
_type = "rbt_baxter_msgs/AssemblyStates"
_has_header = False #flag to mark the presence of a Header object
_full_text = """string[] names
AssemblyState[] states
================================================================================
MSG: rbt_baxter_msgs/AssemblyState
bool ready # true if enabled and ready to operate, e.g., not homing
bool enabled # true if enabled
bool stopped # true if stopped -- e-stop asserted
bool error # true if a component of the assembly has an error
#
# The following are specific to the robot top-level assembly:
uint8 estop_button # One of the following:
uint8 ESTOP_BUTTON_UNPRESSED = 0 # Robot is not stopped and button is not pressed
uint8 ESTOP_BUTTON_PRESSED = 1
uint8 ESTOP_BUTTON_UNKNOWN = 2 # STATE_UNKNOWN when estop was asserted by a non-user source
uint8 ESTOP_BUTTON_RELEASED = 3 # Was pressed, is now known to be released, but robot is still stopped.
#
uint8 estop_source # If stopped is true, the source of the e-stop. One of the following:
uint8 ESTOP_SOURCE_NONE = 0 # e-stop is not asserted
uint8 ESTOP_SOURCE_USER = 1 # e-stop source is user input (the red button)
uint8 ESTOP_SOURCE_UNKNOWN = 2 # e-stop source is unknown
uint8 ESTOP_SOURCE_FAULT = 3 # MotorController asserted e-stop in response to a joint fault
uint8 ESTOP_SOURCE_BRAIN = 4 # MotorController asserted e-stop in response to a lapse of the brain heartbeat
"""
__slots__ = ['names','states']
_slot_types = ['string[]','rbt_baxter_msgs/AssemblyState[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
names,states
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(AssemblyStates, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.names is None:
self.names = []
if self.states is None:
self.states = []
else:
self.names = []
self.states = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
length = len(self.names)
buff.write(_struct_I.pack(length))
for val1 in self.names:
length = len(val1)
if python3 or type(val1) == unicode:
val1 = val1.encode('utf-8')
length = len(val1)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *val1))
else:
buff.write(struct.pack('<I%ss'%length, length, val1))
length = len(self.states)
buff.write(_struct_I.pack(length))
for val1 in self.states:
_x = val1
buff.write(_struct_6B.pack(_x.ready, _x.enabled, _x.stopped, _x.error, _x.estop_button, _x.estop_source))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.states is None:
self.states = None
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.names = []
for i in range(0, length):
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1 = str[start:end].decode('utf-8')
else:
val1 = str[start:end]
self.names.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.states = []
for i in range(0, length):
val1 = rbt_baxter_msgs.msg.AssemblyState()
_x = val1
start = end
end += 6
(_x.ready, _x.enabled, _x.stopped, _x.error, _x.estop_button, _x.estop_source,) = _struct_6B.unpack(str[start:end])
val1.ready = bool(val1.ready)
val1.enabled = bool(val1.enabled)
val1.stopped = bool(val1.stopped)
val1.error = bool(val1.error)
self.states.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
length = len(self.names)
buff.write(_struct_I.pack(length))
for val1 in self.names:
length = len(val1)
if python3 or type(val1) == unicode:
val1 = val1.encode('utf-8')
length = len(val1)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *val1))
else:
buff.write(struct.pack('<I%ss'%length, length, val1))
length = len(self.states)
buff.write(_struct_I.pack(length))
for val1 in self.states:
_x = val1
buff.write(_struct_6B.pack(_x.ready, _x.enabled, _x.stopped, _x.error, _x.estop_button, _x.estop_source))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.states is None:
self.states = None
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.names = []
for i in range(0, length):
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1 = str[start:end].decode('utf-8')
else:
val1 = str[start:end]
self.names.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.states = []
for i in range(0, length):
val1 = rbt_baxter_msgs.msg.AssemblyState()
_x = val1
start = end
end += 6
(_x.ready, _x.enabled, _x.stopped, _x.error, _x.estop_button, _x.estop_source,) = _struct_6B.unpack(str[start:end])
val1.ready = bool(val1.ready)
val1.enabled = bool(val1.enabled)
val1.stopped = bool(val1.stopped)
val1.error = bool(val1.error)
self.states.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_6B = struct.Struct("<6B")
| [
"mtbthebest11@gmail.com"
] | mtbthebest11@gmail.com |
63d50ed88e8feba8f6c649ad99ca81e355a046ac | 6ddcb131e5f2806acde46a525ff8d46bfbe0990e | /enaml/backends/qt/qt_widget_component.py | 822390d1671b5cb169b41e7f6b48f38c0d128bb9 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | agrawalprash/enaml | 5ce1823188eb51e5b83117ebee6c3655f53e5157 | 96828b254ac9fdfa2e5b6b31eff93a4933cbc0aa | refs/heads/master | 2021-01-15T23:35:21.351626 | 2012-09-05T03:40:07 | 2012-09-05T03:40:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,833 | py | #------------------------------------------------------------------------------
# Copyright (c) 2011, Enthought, Inc.
# All rights reserved.
#------------------------------------------------------------------------------
from .qt.QtCore import QRect
from .qt.QtGui import QFrame, QWidgetItem, QApplication
from .qt_base_widget_component import QtBaseWidgetComponent
from .styling import q_color_from_color, q_font_from_font
from ...components.widget_component import AbstractTkWidgetComponent
from ...layout.geometry import Rect, Size, Pos
class QtWidgetComponent(QtBaseWidgetComponent, AbstractTkWidgetComponent):
""" A Qt4 implementation of WidgetComponent.
"""
#: A class attribte which indicates whether or not to use a
#: QWidget item to compute the layout geometry. Subclasses
#: should override as necessary to change the behavior. The
#: default is True.
use_widget_item_for_layout = True
@property
def _widget_item(self):
""" A readonly cached property which returns the QWidgetItem
for the underlying Qt widget.
"""
try:
res = self.__widget_item
except AttributeError:
res = self.__widget_item = QWidgetItem(self.widget)
return res
def create(self, parent):
""" Creates the underlying Qt widget. As necessary, subclasses
should reimplement this method to create different types of
widgets.
"""
self.widget = QFrame(parent)
def initialize(self):
""" Initializes the attributes of the the Qt widget.
"""
super(QtWidgetComponent, self).initialize()
shell = self.shell_obj
self.set_enabled(shell.enabled)
if shell.bgcolor:
self.set_bgcolor(shell.bgcolor)
if shell.fgcolor:
self.set_fgcolor(shell.fgcolor)
if shell.font:
self.set_font(shell.font)
def enable_updates(self):
""" Enable rendering updates for the underlying Wx widget.
"""
# Freezing updates on a top-level window seems to cause
# flicker on OSX when the updates are reenabled. In this
# case, just freeze the children instead.
if self.widget.isWindow():
for child in self.shell_obj.children:
child.enable_updates()
self.widget.setUpdatesEnabled(True)
def disable_updates(self):
""" Disable rendering updates for the underlying Qt widget.
"""
# Freezing updates on a top-level window seems to cause
# flicker on OSX when the updates are reenabled. In this
# case, just freeze the children instead.
if self.widget.isWindow():
for child in self.shell_obj.children:
child.disable_updates()
else:
self.widget.setUpdatesEnabled(False)
def set_visible(self, visible):
""" Show or hide the widget.
"""
self.widget.setVisible(visible)
def size_hint(self):
""" Returns a (width, height) tuple of integers which represent
the suggested size of the widget for its current state, ignoring
any windowing decorations. This value is used by the layout
manager to determine how much space to allocate the widget.
"""
if self.use_widget_item_for_layout:
size_hint = self._widget_item.sizeHint()
else:
size_hint = self.widget.sizeHint()
return Size(size_hint.width(), size_hint.height())
def layout_geometry(self):
""" Returns the (x, y, width, height) to of layout geometry
info for the internal toolkit widget. This should ignore any
windowing decorations, and may be different than the value
returned by geometry() if the widget's effective layout rect
is different from its paintable rect.
"""
if self.use_widget_item_for_layout:
geo = self._widget_item.geometry()
else:
geo = self.widget.geometry()
return Rect(geo.x(), geo.y(), geo.width(), geo.height())
def set_layout_geometry(self, rect):
""" Sets the layout geometry of the internal widget to the
given x, y, width, and height values. The parameters passed
are equivalent semantics to layout_geometry().
"""
rect = QRect(*rect)
if self.use_widget_item_for_layout:
self._widget_item.setGeometry(rect)
else:
self.widget.setGeometry(rect)
def geometry(self):
""" Returns an (x, y, width, height) tuple of geometry info
for the internal toolkit widget, ignoring any windowing
decorations.
"""
geom = self.widget.geometry()
return Rect(geom.x(), geom.y(), geom.width(), geom.height())
def set_geometry(self, rect):
""" Sets the geometry of the internal widget to the given
x, y, width, and height values, ignoring any windowing
decorations.
"""
self.widget.setGeometry(*rect)
def min_size(self):
""" Returns the hard minimum (width, height) of the widget,
ignoring any windowing decorations. A widget will not be able
to be resized smaller than this value
"""
min_size = self.widget.minimumSize()
return Size(min_size.width(), min_size.height())
def set_min_size(self, size):
""" Set the hard minimum width and height of the widget, ignoring
any windowing decorations. A widget will not be able to be resized
smaller than this value.
"""
self.widget.setMinimumSize(*size)
def max_size(self):
""" Returns the hard maximum (width, height) of the widget,
ignoring any windowing decorations. A widget will not be able
to be resized larger than this value
"""
max_size = self.widget.maximumSize()
return Size(max_size.width(), max_size.height())
def set_max_size(self, size):
""" Set the hard maximum width and height of the widget, ignoring
any windowing decorations. A widget will not be able to be resized
larger than this value.
"""
# The hard Qt limit is 16777215 (which is 2**24 - 1) and will
# print warnings to the shell if we attemp to set a max size
# over that amount. This can be attempted when a QtMainWindow
# has a central widget size equal to max size, and it also has
# a menu bar and other components. Clipping the max size like
# this will not have an effect on layout computation and thus
# is relatively safe.
max_width, max_height = size
max_width = min(max_width, 16777215)
max_height = min(max_height, 16777215)
self.widget.setMaximumSize(max_width, max_height)
def size(self):
""" Returns the size of the internal toolkit widget, ignoring any
windowing decorations, as a (width, height) tuple of integers.
"""
size = self.widget.size()
return Size(size.width(), size.height())
def resize(self, size):
""" Resizes the internal toolkit widget according the given
width and height integers, ignoring any windowing decorations.
"""
self.widget.resize(*size)
def pos(self):
""" Returns the position of the internal toolkit widget as an
(x, y) tuple of integers, including any windowing decorations.
The coordinates should be relative to the origin of the widget's
parent, or to the screen if the widget is toplevel.
"""
pos = self.widget.pos()
return Pos(pos.x(), pos.y())
def move(self, pos):
""" Moves the internal toolkit widget according to the given
x and y integers which are relative to the origin of the
widget's parent and includes any windowing decorations.
"""
self.widget.move(*pos)
def shell_enabled_changed(self, enabled):
""" The change handler for the 'enabled' attribute on the shell
object.
"""
self.set_enabled(enabled)
def shell_bgcolor_changed(self, color):
""" The change handler for the 'bgcolor' attribute on the shell
object. Sets the background color of the internal widget to the
given color.
"""
self.set_bgcolor(color)
def shell_fgcolor_changed(self, color):
""" The change handler for the 'fgcolor' attribute on the shell
object. Sets the foreground color of the internal widget to the
given color.
"""
self.set_fgcolor(color)
def shell_font_changed(self, font):
""" The change handler for the 'font' attribute on the shell
object. Sets the font of the internal widget to the given font.
"""
self.set_font(font)
def set_enabled(self, enabled):
""" Enable or disable the widget.
"""
self.widget.setEnabled(enabled)
def set_bgcolor(self, color):
""" Sets the background color of the widget to an appropriate
QColor given the provided Enaml Color object.
"""
widget = self.widget
role = widget.backgroundRole()
if not color:
palette = QApplication.instance().palette(widget)
qcolor = palette.color(role)
# On OSX, the default color is rendered *slightly* off
# so a simple workaround is to tell the widget not to
# auto fill the background.
widget.setAutoFillBackground(False)
else:
qcolor = q_color_from_color(color)
# When not using qt style sheets to set the background
# color, we need to tell the widget to auto fill the
# background or the bgcolor won't render at all.
widget.setAutoFillBackground(True)
palette = widget.palette()
palette.setColor(role, qcolor)
widget.setPalette(palette)
def set_fgcolor(self, color):
""" Sets the foreground color of the widget to an appropriate
QColor given the provided Enaml Color object.
"""
widget = self.widget
role = widget.foregroundRole()
if not color:
palette = QApplication.instance().palette(widget)
qcolor = palette.color(role)
else:
qcolor = q_color_from_color(color)
palette = widget.palette()
palette.setColor(role, qcolor)
widget.setPalette(palette)
def set_font(self, font):
""" Sets the font of the widget to an appropriate QFont given
the provided Enaml Font object.
"""
q_font = q_font_from_font(font)
self.widget.setFont(q_font)
| [
"sccolbert@gmail.com"
] | sccolbert@gmail.com |
befdbf9d49ecbacc576d8f26b2fa4fffff3aa151 | 82f36fc7f70a63499c9c29d031dc7b07a4e4ab88 | /vi/mappings.py | 9ba29b7af38016b203d195ebaede821278469584 | [
"MIT"
] | permissive | theicfire/Vintageous | f89ca1463b452c6a53eb1bd26595149a23359197 | 43cf37ac2cc2494f6f73102e4157442b5ae56925 | refs/heads/master | 2021-01-17T22:54:34.991987 | 2015-01-23T18:45:44 | 2015-01-23T18:45:44 | 18,224,457 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,431 | py | from Vintageous.vi import utils
from Vintageous.vi.keys import mappings
from Vintageous.vi.keys import seq_to_command
from Vintageous.vi.keys import to_bare_command_name
from Vintageous.vi.keys import KeySequenceTokenizer
from Vintageous.vi.utils import modes
from Vintageous.vi.cmd_base import cmd_types
from Vintageous.vi import variables
_mappings = {
modes.INSERT: {},
modes.NORMAL: {},
modes.VISUAL: {},
modes.VISUAL_LINE: {},
modes.OPERATOR_PENDING: {},
modes.VISUAL_BLOCK: {},
modes.SELECT: {},
}
class mapping_status:
INCOMPLETE = 1
COMPLETE = 2
class Mapping(object):
def __init__(self, head, mapping, tail, status):
self.mapping = mapping
self.head = head
self.tail = tail
self.status = status
@property
def sequence(self):
try:
return self.head + self.tail
except TypeError:
raise ValueError('no mapping found')
class Mappings(object):
def __init__(self, state):
self.state = state
def _get_mapped_seqs(self, mode):
return sorted(_mappings[mode].keys())
def _find_partial_match(self, mode, seq):
return list(x for x in self._get_mapped_seqs(mode)
if x.startswith(seq))
def _find_full_match(self, mode, seq):
partials = self._find_partial_match(mode, seq)
try:
self.state.logger.info("[Mappings] checking partials {0} for {1}".format(partials, seq))
name = list(x for x in partials if x == seq)[0]
# FIXME: Possibly related to #613. We're not returning the view's
# current mode.
return (name, _mappings[mode][name])
except IndexError:
return (None, None)
def expand(self, seq):
pass
def expand_first(self, seq):
head = ''
keys, mapped_to = self._find_full_match(self.state.mode, seq)
if keys:
self.state.logger.info("[Mappings] found full command: {0} -> {1}".format(keys, mapped_to))
return Mapping(seq, mapped_to['name'], seq[len(keys):],
mapping_status.COMPLETE)
for key in KeySequenceTokenizer(seq).iter_tokenize():
head += key
keys, mapped_to = self._find_full_match(self.state.mode, head)
if keys:
self.state.logger.info("[Mappings] found full command: {0} -> {1}".format(keys, mapped_to))
return Mapping(head, mapped_to['name'], seq[len(head):],
mapping_status.COMPLETE)
else:
break
if self._find_partial_match(self.state.mode, seq):
self.state.logger.info("[Mappings] found partial command: {0}".format(seq))
return Mapping(seq, '', '', mapping_status.INCOMPLETE)
return None
# XXX: Provisional. Get rid of this as soon as possible.
def can_be_long_user_mapping(self, key):
full_match = self._find_full_match(self.state.mode, key)
partial_matches = self._find_partial_match(self.state.mode, key)
if partial_matches:
self.state.logger.info("[Mappings] user mapping found: {0} -> {1}".format(key, partial_matches))
return (True, full_match[0])
self.state.logger.info("[Mappings] user mapping not found: {0} -> {1}".format(key, partial_matches))
return (False, True)
# XXX: Provisional. Get rid of this as soon as possible.
def incomplete_user_mapping(self):
(maybe_mapping, complete) = \
self.can_be_long_user_mapping(self.state.partial_sequence)
if maybe_mapping and not complete:
self.state.logger.info("[Mappings] incomplete user mapping {0}".format(self.state.partial_sequence))
return True
def resolve(self, sequence=None, mode=None, check_user_mappings=True):
"""
Looks at the current global state and returns the command mapped to
the available sequence. It may be a 'missing' command.
@sequence
If a @sequence is passed, it is used instead of the global state's.
This is necessary for some commands that aren't name spaces but act
as them (for example, ys from the surround plugin).
@mode
If different than `None`, it will be used instead of the global
state's. This is necessary when we are in operator pending mode
and we receive a new action. By combining the existing action's
name with name of the action just received we could find a new
action.
For example, this is the case of g~~.
"""
# we usually need to look at the partial sequence, but some commands do weird things,
# like ys, which isn't a namespace but behaves as such sometimes.
seq = sequence or self.state.partial_sequence
seq = to_bare_command_name(seq)
# TODO: Use same structure as in mappings (nested dicst).
command = None
if check_user_mappings:
self.state.logger.info('[Mappings] checking user mappings')
# TODO: We should be able to force a mode here too as, below.
command = self.expand_first(seq)
if command:
self.state.logger.info('[Mappings] {0} equals command: {1}'.format(seq, command))
return command
# return {'name': command.mapping, 'type': cmd_types.USER}
else:
self.state.logger.info('[Mappings] looking up >{0}<'.format(seq))
command = seq_to_command(self.state, seq, mode=mode)
self.state.logger.info('[Mappings] got {0}'.format(command))
return command
def add(self, mode, new, target):
new = variables.expand_keys(new)
_mappings[mode][new] = {'name': target, 'type': cmd_types.USER}
def remove(self, mode, new):
try:
del _mappings[mode][new]
except KeyError:
raise KeyError('mapping not found')
def clear(self):
_mappings[modes.NORMAL] = {}
_mappings[modes.VISUAL] = {}
_mappings[modes.VISUAL_LINE] = {}
_mappings[modes.VISUAL_BLOCK] = {}
_mappings[modes.OPERATOR_PENDING] = {}
| [
"guillermo.lopez@outlook.com"
] | guillermo.lopez@outlook.com |
f3d28c67ce3c204281fba9eb03139289dcd94557 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02699/s566880500.py | 34fe2afaf3ca8701fbe528eff12accbc101faa8a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 109 | py | n_sheep, n_wolves = map(int, input().split())
if n_sheep <= n_wolves:
print('unsafe')
else:
print('safe')
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
67f6d7067c52342313b9cecb20a8d9484514d1d9 | 3ffeeae8a9a3245d8998d94aa08f680f00056cad | /669.修剪二叉搜索树.py | 1ea81092b2bc054b50373284e38fc7407cf9e7ad | [] | no_license | Ezi4Zy/leetcode | 6e293e5c07a7d8c3e38f9445ff24330134ef6c48 | 9d394cd2862703cfb7a7b505b35deda7450a692e | refs/heads/master | 2022-04-09T14:11:36.957861 | 2022-03-09T10:30:30 | 2022-03-09T10:30:30 | 57,290,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 893 | py | #
# @lc app=leetcode.cn id=669 lang=python
#
# [669] 修剪二叉搜索树
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution(object):
def trimBST(self, root, low, high):
"""
:type root: TreeNode
:type low: int
:type high: int
:rtype: TreeNode
"""
if root:
if root.val < low:
root = root.right
return self.trimBST(root, low, high)
if root.val > high:
root = root.left
return self.trimBST(root, low, high)
root.left = self.trimBST(root.left, low, high)
root.right =self.trimBST(root.right, low, high)
return root
# @lc code=end
| [
"Ezi4zy@163.com"
] | Ezi4zy@163.com |
6666429918f78cf1993fab4ef0d3566b40ccef19 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_059/ch72_2020_05_01_19_49_54_675529.py | 0dc9d236a9b58259317b460efad32d9f49830dd5 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | def lista_caracteres(x):
l = []
for i in range(len(x)):
l.append(x[i])
return l
| [
"you@example.com"
] | you@example.com |
5fbe7e26275eff558ad4458bfb01001b4d60fc28 | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog/optimized_33338.py | 0aa8b984c1e2deefa8ec5567f59cb26c119abed1 | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,838 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((436.77, 574.993, 325.582), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((442.874, 557.277, 391.601), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((439.506, 528.639, 468.334), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((356.695, 472.979, 371.552), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((476.527, 486.406, 654.436), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((435.195, 568.021, 371.411), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((434.657, 569.04, 369.395), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((444.794, 594.407, 362.677), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((444.742, 608.58, 387.067), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((416.957, 608.782, 383.038), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((389.328, 613.44, 381.783), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((366.606, 630.007, 380.458), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((451.03, 573.008, 348.232), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((286.505, 691.499, 414.13), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((372.665, 622.88, 583.602), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((372.665, 622.88, 583.602), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((396.439, 622.861, 567.107), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((417.064, 621.772, 546.866), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((432.217, 618.413, 522.5), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((440.634, 611.344, 495.597), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((442.826, 601.092, 468.338), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((441.672, 589.394, 441.451), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((224.476, 665.76, 558.5), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((645.894, 504.689, 304.302), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((464.666, 567.646, 460.962), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((464.666, 567.646, 460.962), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((468.977, 539.173, 456.255), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((459.209, 512.049, 450.084), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((431.794, 502.397, 447.594), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((417.346, 519.931, 324.559), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((435.472, 480.868, 570.529), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((431.645, 536.209, 373.513), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((431.645, 536.208, 373.513), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((411.869, 547.026, 391.91), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((416.063, 571.281, 407.156), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((415.213, 597.412, 418.188), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((409.535, 623.51, 427.71), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((399.179, 645.117, 412.889), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((394.572, 647.213, 385.171), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((474.461, 622.31, 403.91), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((311.643, 667.017, 365.676), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((497.929, 582.805, 408.483), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((480.56, 562.927, 409.889), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((442.819, 519.859, 415.983), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((405.132, 476.789, 421.973), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((427.826, 460.216, 346.011), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((336.75, 431.692, 486.602), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((483.371, 573.045, 348.924), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((477.626, 552.486, 367.687), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((466.428, 533.098, 385.598), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((478.686, 520.211, 408.83), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((487.059, 506.019, 433.136), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((487.803, 495.401, 460.85), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((469.158, 542.387, 399.506), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((503.712, 448.813, 527.996), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"batxes@gmail.com"
] | batxes@gmail.com |
daa21c4b97b6966cef4a6950ad18f755e47dd0be | 8f6c9203b260d832860aa6e434ab668e170cfb1d | /test/conftest.py | e855b32ee851a344a6029c086ad23319e6cab159 | [
"Apache-2.0",
"FSFAP",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | igalic/mod_md | 66ad0938d4ca71f995f61a7d487ce5ade0d8c5cc | 105fca1a8a87c46c105cb9b38d13f998318c1db2 | refs/heads/master | 2023-08-15T11:01:24.290650 | 2021-10-19T12:27:17 | 2021-10-19T12:27:17 | 419,234,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,118 | py | import logging
import os
import time
from datetime import timedelta
import pytest
from md_certs import CertificateSpec, MDTestCA
from md_conf import HttpdConf
from md_env import MDTestEnv
from md_acme import MDPebbleRunner, MDBoulderRunner
def pytest_report_header(config, startdir):
env = MDTestEnv()
return "mod_md: {version} [apache: {aversion}({prefix}), mod_{ssl}, ACME server: {acme}]".format(
version=env.md_version,
prefix=env.prefix,
aversion=env.get_httpd_version(),
ssl=env.get_ssl_type(),
acme=env.acme_server,
)
@pytest.fixture(scope="session")
def env(pytestconfig) -> MDTestEnv:
level = logging.INFO
console = logging.StreamHandler()
console.setLevel(level)
console.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
logging.getLogger('').addHandler(console)
logging.getLogger('').setLevel(level=level)
env = MDTestEnv(pytestconfig=pytestconfig)
env.apache_error_log_clear()
cert_specs = [
CertificateSpec(domains=['localhost'], key_type='rsa2048'),
CertificateSpec(domains=env.domains, key_type='rsa4096'),
CertificateSpec(domains=env.expired_domains, key_type='rsa2048',
valid_from=timedelta(days=-91),
valid_to=timedelta(days=-1)),
]
ca = MDTestCA.create_root(name=env.http_tld,
store_dir=os.path.join(env.server_dir, 'ca'), key_type="rsa4096")
ca.issue_certs(cert_specs)
env.set_ca(ca)
yield env
HttpdConf(env).install()
assert env.apache_stop() == 0
#env.apache_errors_check()
@pytest.fixture(scope="session")
def acme(env):
acme_server = None
if env.acme_server == 'pebble':
acme_server = MDPebbleRunner(env, configs={
'default': os.path.join(env.server_dir, 'conf/pebble.json'),
'eab': os.path.join(env.server_dir, 'conf/pebble-eab.json'),
})
elif env.acme_server == 'boulder':
acme_server = MDBoulderRunner(env)
yield acme_server
if acme_server is not None:
acme_server.stop()
| [
"stefan.eissing@greenbytes.de"
] | stefan.eissing@greenbytes.de |
db2aa6414dc0fb9d7936579b117bce3f7587c9cc | ef4749b76a1dbb79e664c8fe1c3a13c6197c1557 | /seleniumbase/fixtures/constants.py | 37a036def8261360cebd61de644111f9964dfc17 | [
"MIT"
] | permissive | PrabhuLoganathan/Python-SeleniumBase | 78f493ad4433c33b9e3171036b9edf1f1c2fd336 | 86fc61ad083b6dd5c7cce18b7ecfb87f73f16f56 | refs/heads/master | 2021-09-02T03:27:20.327020 | 2017-12-24T23:40:42 | 2017-12-24T23:40:42 | 115,760,528 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,160 | py | """
This class containts some frequently-used constants
"""
class Environment:
QA = "qa"
STAGING = "staging"
PRODUCTION = "production"
MASTER = "master"
LOCAL = "local"
TEST = "test"
class Files:
DOWNLOADS_FOLDER = "downloaded_files"
ARCHIVED_DOWNLOADS_FOLDER = "archived_files"
class ValidBrowsers:
valid_browsers = ["firefox", "ie", "edge", "safari", "chrome", "phantomjs"]
class Browser:
FIREFOX = "firefox"
INTERNET_EXPLORER = "ie"
EDGE = "edge"
SAFARI = "safari"
GOOGLE_CHROME = "chrome"
PHANTOM_JS = "phantomjs"
HTML_UNIT = "htmlunit"
VERSION = {
"firefox": None,
"ie": None,
"edge": None,
"safari": None,
"chrome": None,
"phantomjs": None,
"htmlunit": None
}
LATEST = {
"firefox": None,
"ie": None,
"edge": None,
"safari": None,
"chrome": None,
"phantomjs": None,
"htmlunit": None
}
class State:
NOTRUN = "NotRun"
ERROR = "Error"
FAILURE = "Fail"
PASS = "Pass"
SKIP = "Skip"
BLOCKED = "Blocked"
DEPRECATED = "Deprecated"
| [
"mdmintz@gmail.com"
] | mdmintz@gmail.com |
b2d7423066c6259571568c9d3141fefe06959f0d | cf06fdf4fada6a39c661b890c184d2a3dea9d23f | /programs/pgm0A_12.py | 88945b331e21e4f0f5437ce6c44e593feda16b48 | [
"Apache-2.0"
] | permissive | danielsunzhongyuan/python_practice | f11d582c2147ad3d07ace1b31e6d7ace1da31e7c | 79bc88db1c52ee2f5607f6f9fec1bbacea2804ff | refs/heads/master | 2021-01-25T13:12:07.950283 | 2019-06-12T09:36:02 | 2019-06-12T09:36:02 | 123,542,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py | #
# This file contains the Python code from Program A.12 of
# "Data Structures and Algorithms
# with Object-Oriented Design Patterns in Python"
# by Bruno R. Preiss.
#
# Copyright (c) 2003 by Bruno R. Preiss, P.Eng. All rights reserved.
#
# http://www.brpreiss.com/books/opus7/programs/pgm0A_12.txt
#
class Rectangle(GraphicalObject):
def __init__(self, center, height, width):
super(Rectangle, self).__init__(center)
self._height = height
self._width = width
def draw(self):
pass
| [
"sunzhongyuan@lvwan.com"
] | sunzhongyuan@lvwan.com |
27024f2ab6b8e5c801b2575bf0e11ff404c9d1a7 | 8457291a531a08e6249c674f6b4943388d110f77 | /程序员面试经典/面试题 16.01. 交换数字 .py | a2f484604c3ee24053fd93149faa8f6601c014d8 | [
"Apache-2.0"
] | permissive | oscarhscc/algorithm-with-python | e190511844aaeacff14381000e371945f74aba14 | 2d67e2da11045199b9ab6c0b2f01e239255196ef | refs/heads/master | 2020-12-03T12:31:37.764545 | 2020-11-13T03:01:04 | 2020-11-13T03:01:04 | 231,317,820 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | '''
编写一个函数,不用临时变量,直接交换numbers = [a, b]中a与b的值。
示例:
输入: numbers = [1,2]
输出: [2,1]
提示:
numbers.length == 2
'''
class Solution(object):
def swapNumbers(self, numbers):
"""
:type numbers: List[int]
:rtype: List[int]
"""
numbers[0], numbers[1] = numbers[1], numbers[0]
return numbers | [
"noreply@github.com"
] | oscarhscc.noreply@github.com |
5c0f0d1e84727690497ad03f096246c4ec7d966e | 12fd778ef6756a3fcb6dc5fc2b59a0ba4a4cb911 | /aosd/downloader/update.py | 9eb2992674871673ee079c11be67eff8590bcf47 | [] | no_license | jevinskie/AOS-Downloader | ce66b024485d3a3eb05e89da8f7077cc96b49f2f | d956c8652f0a3caab3a56f441218b766b08e9130 | refs/heads/master | 2021-01-18T14:49:20.887713 | 2015-12-19T23:37:27 | 2015-12-19T23:37:27 | 57,457,504 | 1 | 0 | null | 2016-04-30T19:06:41 | 2016-04-30T19:06:41 | null | UTF-8 | Python | false | false | 1,151 | py | from ..helpers.logging_helper import logging_helper
import os
from .utilities import utilities
from .releases import releases
from .config import config
from .manager import manager
class update(object):
@classmethod
def fetch(cls):
logging_helper.getLogger().info('Updating package data...')
hashes_plist_url = os.path.join(config.getUpdateURL(), 'hashes.plist')
hashes_plist_path = utilities.getlookupplistpath('hashes')
manager.DownloadFileFromURLToPath(hashes_plist_url, hashes_plist_path)
release_plist_url = os.path.join(config.getUpdateURL(), 'releases.plist')
release_plist_path = utilities.getreleaseplistpath()
manager.DownloadFileFromURLToPath(release_plist_url, release_plist_path)
if os.path.exists(release_plist_path) == True:
for release_type in releases.get():
release_type_plist_url = os.path.join(config.getUpdateURL(), release_type+'.plist')
release_type_plist_path = utilities.getlookupplistpath(release_type)
manager.DownloadFileFromURLToPath(release_type_plist_url, release_type_plist_path)
| [
"me@samdmarshall.com"
] | me@samdmarshall.com |
e7a8ebdffb9c5478b1fc4030ede408f87828012e | 09fc5379e5ecafc66eee7eac5ba8cf51244c7aa9 | /min_max.py | a95e8b684a7360eda42ca80a1a91de96fbacd8cb | [] | no_license | aquibjamal/hackerrank_solutions | 8a9317afc3fc8c6b2f4d0542e5c979ad2c93f849 | bc27ea787743893c60d00c517ce133b6cea84942 | refs/heads/master | 2020-07-24T07:40:23.877863 | 2020-02-29T02:03:55 | 2020-02-29T02:03:55 | 207,850,323 | 0 | 0 | null | 2020-02-29T02:03:56 | 2019-09-11T15:50:47 | Python | UTF-8 | Python | false | false | 251 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 12 02:21:03 2019
@author: aquib
"""
import numpy
N,M=map(int,input().split())
x=numpy.array([input().split() for _ in range(N)], int)
mn=numpy.min(x,1)
mx=numpy.max(mn)
print(mx)
| [
"noreply@github.com"
] | aquibjamal.noreply@github.com |
ab0980d94ceabd9eb0d539c1426b2ae2af479c47 | 7610169c08da66ddb69084a01569e688c294b0a6 | /economic_indicators_spider.py | 74abcb32f31885089204969fd0517ab39e8f90c8 | [] | no_license | radoslawkrolikowski/financial-market-data-analysis | 87c96bb7202c0c13cbdc11e92d7115c9592f1c01 | 97ccee60c79608285b3865f806844ce81a257862 | refs/heads/master | 2023-08-23T12:39:09.065633 | 2022-03-17T07:55:36 | 2022-03-17T07:55:36 | 243,983,526 | 59 | 26 | null | 2023-07-23T07:13:45 | 2020-02-29T14:30:52 | Python | UTF-8 | Python | false | false | 9,145 | py | from billiard import Process
from scrapy import Spider
from scrapy import signals as scrapy_signals
from scrapy.crawler import Crawler
from twisted.internet import reactor
from kafka import KafkaProducer
from config import user_agent, time_zone, empty_ind_dict
from datetime import datetime
from collections import defaultdict
import logging
import re
import json
import pickle
# Set logger level
logging.basicConfig(level=logging.DEBUG)
class IndicatorCollectorPipeline:
"""Implementation of the Scrapy Pipeline that sends scraped and filtered indicator values
through Kafka producer.
Filtering encompasses removing scraped items that already have been sent to Kafka.
Parameters
----------
server: list
List of Kafka brokers addresses.
topic: str
Specify Kafka topic to which the stream of data records will be published.
current_dt: datetime.datetime()
Timestamp of real-time data (EST).
"""
def __init__(self, server, topic, current_dt):
self.server = server
self.topic = topic
self.current_dt = current_dt
self.items_dict = defaultdict()
self.prev_items = defaultdict()
# Read the dictionary of previously sent items
try:
with open(r"items.pickle", "rb") as output_file:
self.prev_items = pickle.load(output_file)
except (OSError, IOError):
with open(r"items.pickle", "wb") as output_file:
pickle.dump(defaultdict(), output_file)
# Instantiate Kafka producer
self.producer = KafkaProducer(bootstrap_servers=server,
value_serializer=lambda x:
json.dumps(x).encode('utf-8'))
def process_item(self, item, spider):
self.item = item
# Create dictionary of current items (keyed by release time and event name)
self.items_dict.setdefault((item['Schedule_datetime'], item['Event']), item)
@classmethod
def from_crawler(cls, crawler):
return cls(server=crawler.spider.server,
topic=crawler.spider.topic,
current_dt=crawler.spider.current_dt)
def close_spider(self, spider):
# Extract only the new items by performing set difference operation
new_items = [self.items_dict[k] for k in set(self.items_dict) - set(self.prev_items)]
# Load economic indicators message template
items_to_send = empty_ind_dict
# Set template Timestamp to contain current datetime
items_to_send["Timestamp"] = datetime.strftime(self.current_dt, "%Y-%m-%d %H:%M:%S")
if new_items:
# Remove "Schedule_datetime" and "Event" fields from new items,
# and then insert them into message template (replace 0 values)
for item in new_items:
self.prev_items.setdefault((item['Schedule_datetime'], item['Event']), item)
del item['Schedule_datetime']
del item['Event']
items_to_send.update(item)
# Send new items through Kafka producer
self.producer.send(topic=self.topic, value=items_to_send)
self.producer.flush()
self.producer.close()
# Save sent items to file
with open(r"items.pickle", "wb") as output_file:
pickle.dump(self.prev_items, output_file)
class EconomicIndicatorsSpiderSpider(Spider):
"""Implementation of the Scrapy Spider that extracts economic indicators from
Investing.com Economic Calendar.
Parameters
----------
countries: list
List of country names of which indicators will be scraped.
importance: list
List of indicator importance levels to use (possible values (1,2,3)).
event_list: list
List of economic indicators to scrap.
current_dt: datetime.datetime()
Timestamp of real-time data (EST).
server: list
List of Kafka brokers addresses.
topic: str
Specify Kafka topic to which the stream of data records will be published.
Yields
------
dict
Dictionary that represents scraped item.
"""
name = 'economic_indicators_spider'
allowed_domains = ['www.investing.com']
start_urls = ['https://www.investing.com/economic-calendar/']
custom_settings = {
'ITEM_PIPELINES': {
'economic_indicators_spider.IndicatorCollectorPipeline': 100
}
}
def __init__(self, countries, importance, event_list, current_dt, server, topic):
super(EconomicIndicatorsSpiderSpider, self).__init__()
self.countries = countries
self.importance = ['bull' + x for x in importance]
self.event_list = event_list
self.current_dt = current_dt
self.server = server
self.topic = topic
def parse(self, response):
events = response.xpath("//tr[contains(@id, 'eventRowId')]")
for event in events:
# Extract event datetime in format: '2019/11/26 16:30:00' (EST)
datetime_str = event.xpath(".//@data-event-datetime").extract_first()
if not datetime_str:
continue
event_datetime = datetime.strptime(datetime_str, "%Y/%m/%d %H:%M:%S")
event_datetime = event_datetime.replace(tzinfo=time_zone['EST'])
current_dt_str = datetime.strftime(self.current_dt, "%Y-%m-%d %H:%M:%S")
# Return only events that passed
if not self.current_dt >= event_datetime:
continue
country = event.xpath(".//td/span/@title").extract_first()
importance_label = event.xpath(".//td[@class='left textNum sentiment noWrap']/@data-img_key")\
.extract_first()
if country not in self.countries or importance_label not in self.importance:
continue
if not importance_label:
logging.warning("Empty importance label for: {} {}".format(country, datetime_str))
continue
event_name = event.xpath(".//td[@class='left event']/a/text()").extract_first()
event_name = event_name.strip(' \r\n\t ')
event_name_regex = re.findall(r"(.*?)(?=.\([a-zA-Z]{3}\))", event_name)
if event_name_regex:
event_name = event_name_regex[0].strip()
if event_name not in self.event_list:
continue
actual = event.xpath(".//td[contains(@id, 'eventActual')]/text()").extract_first().strip('%M BK')
previous = event.xpath(".//td[contains(@id, 'eventPrevious')]/span/text()").extract_first().strip('%M BK')
forecast = event.xpath(".//td[contains(@id, 'eventForecast')]/text()").extract_first().strip('%M BK')
if actual == '\xa0':
continue
previous_actual_diff = float(previous) - float(actual)
if forecast != '\xa0':
forecast_actual_diff = float(forecast) - float(actual)
yield {'Timestamp': current_dt_str,
'Schedule_datetime': datetime_str,
'Event': event_name.replace(" ", "_"),
'{}'.format(event_name.replace(" ", "_")): {
'Actual': float(actual),
'Prev_actual_diff': previous_actual_diff,
'Forc_actual_diff': forecast_actual_diff if forecast != '\xa0' else None
}
}
class CrawlerScript(Process):
"""Runs Spider multiple times within one script by utilizing billiard package
(tackle the ReactorNotRestartable error).
Parameters
----------
countries: list
List of country names of which indicators will be scraped.
importance: list
List of indicator importance levels to use (possible values (1,2,3)).
event_list: list
List of economic indicators to scrap.
current_dt: datetime.datetime()
Timestamp of real-time data (EST).
server: list
List of Kafka brokers addresses.
topic: str
Specify Kafka topic to which the stream of data records will be published.
"""
def __init__(self, countries, importance, event_list, current_dt, server, topic):
Process.__init__(self)
self.countries = countries
self.importance = importance
self.event_list = event_list
self.current_dt = current_dt
self.server = server
self.topic = topic
self.crawler = Crawler(
EconomicIndicatorsSpiderSpider,
settings={
'USER_AGENT': user_agent
}
)
self.crawler.signals.connect(reactor.stop, signal=scrapy_signals.spider_closed)
def run(self):
self.crawler.crawl(self.countries, self.importance, self.event_list, self.current_dt,
self.server, self.topic)
reactor.run()
def run_indicator_spider(countries, importance, event_list, current_dt, server, topic):
crawler = CrawlerScript(countries, importance, event_list, current_dt, server, topic)
# the script will block here until the crawling is finished
crawler.start()
crawler.join()
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
70a088e26aa79a50b83df760efc130cc5271b932 | 449175eb373ebc622221552e43b46c9378adb618 | /week_6/6210545611_lab6.py | 88a233dc9603dac335e2b915df12bb4ec5e7fc8a | [] | no_license | NutthanichN/grading-helper | 911d39211e070eafc9ffee6978f9270a0be38016 | 971c605effb4f59e9e22a32503337b3e671f120c | refs/heads/master | 2022-12-12T22:46:15.062462 | 2020-09-08T06:50:12 | 2020-09-08T06:50:12 | 293,724,530 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,290 | py |
# 1. Write a function called ll_sum that takes a list of lists of integers and adds up the elements from all of the
# nested lists. For example:
# >>> t = [[1, 2], [3], [4, 5, 6]]
# >>> ll_sum(t)
# 21
def ll_sum(t):
"""
:param t: a list of integers
:return: an add up elements of integers list
>>> s = [1,2,3,4,5]
>>> ll_sum(s)
15
>>> c = [[1,2],2,[3,4]]
>>> ll_sum(c)
12
>>> w = [2,3,4,[2,4]]
>>> ll_sum(w)
15
>>> q = [1]
>>> ll_sum(q)
1
>>> f = [2,3,4]
>>> ll_sum(f)
9
"""
num = 0
for number in t:
if type(number) == list:
num += ll_sum(number)
else:
num += number
return num
# 2. Write a function called cumulative_sum that takes a list of numbers and returns the cumulative sum; that is, a
# new list where the ith element is the sum of the first i + 1 elements from the original list. For example:
# >>> t = [1, 2, 3]
# >>> cumulative_sum(t)
# [1, 3, 6]
def cumulative_sum(t):
"""
:param t: list of integers
:return: cumulative sum of integers inthe list
>>> a = [1,3,6]
>>> cumulative_sum(a)
[1, 4, 10]
>>> e = [2,3,[4]]
>>> cumulative_sum(e)
[2, 5, 9]
>>> f = [2,66,7]
>>> cumulative_sum(f)
[2, 68, 75]
>>> p = [2,[3],[5],6]
>>> cumulative_sum(p)
[2, 5, 10, 16]
>>> x = [100,101,102,103]
>>> cumulative_sum(x)
[100, 201, 303, 406]
"""
a = []
num = 0
for number in t:
if type(number) == list:
num += ll_sum(number)
a.append(num)
else:
num += number
a.append(num)
return a
# 3. Write a function called middle that takes a list and returns a new list that contains all but the first and last
# elements. For example:
# >>> t = [1, 2, 3, 4]
# >>> middle(t)
# [2, 3]
def middle(t):
"""
:param t: list of integers
:return: the middle part of the list
>>> t = [2,3,4,5]
>>> middle(t)
[3, 4]
>>> d = [1,[2],[3,5],6]
>>> middle(d)
[[2], [3, 5]]
>>> s = [1,2]
>>> middle(s)
[]
>>> x = [3,4,5]
>>> middle(x)
[4]
>>> w = []
>>> middle(w)
[]
"""
return t[1:-1]
# 4. Write a function called chop that takes a list, modifies it by removing the first and last elements, and returns
# None. For example:
# >>> t = [1, 2, 3, 4]
# >>> chop(t)
# >>> t
# [2, 3]
def chop(t):
"""
:param t: list of integers
:return: none
>>> f = [2,3,4]
>>> chop(f)
>>> f
[3]
>>> d = [2,[3,4],1]
>>> chop(d)
>>> d
[[3, 4]]
>>> w = [1,2]
>>> chop(w)
>>> w
[]
>>> q =[1,2,3,4,5,6,7,8,9]
>>> chop(q)
>>> q
[2, 3, 4, 5, 6, 7, 8]
>>> n = [0,0,0,0,0]
>>> chop(n)
>>> n
[0, 0, 0]
"""
del t[0]
del t[-1]
# 5. Write a function called is_sorted that takes a list as a parameter and returns True if the list is sorted in
# ascending order and False otherwise. For example:
# >>> is_sorted([1, 2, 2])
# True
# >>> is_sorted(['b', 'a'])
# False
def is_sorted(list):
"""
:param list: input list
:return: true or false depend on whether the list is sorted in ascending order or not
>>> f = [1,2,3]
>>> is_sorted(f)
True
>>> s = [3,3,3]
>>> is_sorted(s)
True
>>> z = [3,2,8]
>>> is_sorted(z)
False
>>> x = [0]
>>> is_sorted(x)
True
>>> g = ['sort','is']
>>> is_sorted(g)
False
"""
if list == sorted(list):
return True
else:
return False
# 6. Given a list of strings, write a function front_x that returns a list with the strings in sorted order, except
# group all the strings that begin with 'x' first. For example:
# >>> l = ['mix', 'xyz', 'apple', 'xanadu', ‘aardvark']
# >>> front_x(l)
# ['xanadu', 'xyz', 'aardvark', 'apple', 'mix']
def front_x(list):
"""
:param list: input list with string
:return: sorted list with words start with x in front
>>> x = ['xxxx','xann','wpspp','app']
>>> front_x(x)
['xann', 'xxxx', 'app', 'wpspp']
>>> a =['1','2','xana']
>>> front_x(a)
['xana', '1', '2']
>>> c = ['max','xolo','opqw']
>>> front_x(c)
['xolo', 'max', 'opqw']
>>> l = ['a','a','x']
>>> front_x(l)
['x', 'a', 'a']
>>> d = ['x','xa','xz']
>>> front_x(d)
['x', 'xa', 'xz']
"""
list1 = []
for x in list[0:]:
if x.startswith('x'):
list1.append(x)
list.remove(x)
return sorted(list1)+sorted(list)
# 7. Create a function even_only(list) that will take a list of integers, and return a new list with only even
# numbers.
# >>> even_only([3,1,4,1,5,9,2,6,5])
# [4, 2, 6]
def even_only(list):
"""
:param list: list of integers
:return: even number only list
>>> w = [1,2,3,4,5,6]
>>> even_only(w)
[2, 4, 6]
>>> x = [2,2,2]
>>> even_only(x)
[2, 2, 2]
>>> q = [1,3,5]
>>> even_only(q)
[]
>>> a = [7,6,3,4]
>>> even_only(a)
[6, 4]
>>> h = [0]
>>> even_only(h)
[0]
"""
list1 = []
for x in list:
if x % 2 == 0:
list1.append(x)
return list1
# 8. Create a function love(text) that will change the second last word to “love”.
#
# >>> love("I like Python”)
# "I love Python”
# >>> love("I really like Python”)
# "I really love Python"
def love(text):
"""
:param text: input string
:return: string but replace the second last word with 'love'
>>> love('Flolida in USA')
'Flolida love USA'
>>> love('Orange grape pinapple soda')
'Orange grape love soda'
>>> love('cat cat')
'love cat'
>>> love('Python hate you')
'Python love you'
>>> love('seven eleven')
'love eleven'
"""
t = text.split(' ')
t[-2] = ['love']
a = (t[0:-2] + t[-2] + t[-1:])
s = ' '.join(a)
return s
# print(love('s ss'))
# 9. Two words are anagrams if you can rearrange the letters from one to spell the other. Write a function
# called is_anagram that takes two strings and returns True if they are anagrams.
# >>> is_anagram('arrange', 'Rear Nag’)
# >>> True
def is_anagram(x,y):
"""
:param x: string 1
:param y: string 2
:return: true or false if the string is anagram or not
>>> is_anagram('cat','rat')
False
>>> is_anagram('potato','topato')
True
>>> is_anagram('solo','yolo')
False
>>> is_anagram('Wally','wal ly')
True
>>> is_anagram('susan','usnaS')
True
"""
x = str.lower(x)
y = str.lower(y)
x = x.replace(' ','')
y = y.replace(' ','')
return sorted(x) == sorted(y)
# 10. Write a function called has_duplicates that takes a list and returns True if there is any element that
# appears more than once. It should not modify the original list.
# >>> has_duplicates([1, 2, 3, 4, 5])
# False
# >>> has_duplicates([1, 2, 3, 4, 5, 2])
# True
def has_duplicates(t):
"""
:param t: list of integers
:return: true or false whether or not it has duplicate
>>> has_duplicates([1,2,3,4,5,6])
False
>>> has_duplicates([2,3,4,5,2])
True
>>> has_duplicates([2])
>>> has_duplicates([2,2,2,2])
True
>>> has_duplicates([1,4,6,2,5,1,3])
True
"""
x = t[:]
x.sort()
for i in range(len(x)-1):
if x[i] == x[i+1]:
x.remove(x[0])
return True
else:
return False
# 11. Create a function average(nums) that returns the mean average of a list of numbers.
#
# >>> average([1, 1, 5, 5, 10, 8, 7])
# 5.285714285714286
def average(nums):
"""
:param nums: list of integers
:return: average sum of list
>>> average([1,1,1,1])
1.0
>>> average([2,2])
2.0
>>> average([12,3,5,2])
5.5
>>> average([1])
1.0
>>> average([3,4,6,2,0])
3.0
"""
return sum(nums)/len(nums)
# 12. Create a function centered_average(nums) that returns a "centered" average of a list of numbers,
# which is the mean average of the values that ignores the largest and smallest values in the list. If
# there are multiple copies of the smallest/largest value, pick just one copy.
#
# >>> centered_average([1, 1, 5, 5, 10, 8, 7])
# 5.2
def centered_average(nums):
"""
:param nums: list of integers
:return: centered average
>>> centered_average([1,1,1,1])
1.0
>>> centered_average([2,4,6,7,4,2,3])
3.8
>>> centered_average([4])
0
>>> centered_average([2,7,4,1,4])
3.3333333333333335
>>> centered_average([4,1,6,2])
3.0
"""
sum = 0
t = len(nums) - 2
l = nums.index(max(nums))
s = nums.index(min(nums))
if l == s:
for i in range(len(nums)):
if nums[i] == nums[s]:
l = i
for i in range(len(nums)):
if i != l and i != s:
sum = sum + nums[i]
if t > 0:
return sum / t
else:
return sum
# 13. Two sentences are a “reverse pair” if each is the reverse of the other. Write a function reverse_pair
# that returns the reverse pair of the input sentence.
# >>> reverse_pair("May the fourth be with you")
# "you with be fourth the May"
def reverse_pair(text):
"""
:param text: input string
:return: reversed pair of string text
>>> reverse_pair('USA is in America')
'America in is USA'
>>> reverse_pair('chocolate flavour ice cream')
'cream ice flavour chocolate'
>>> reverse_pair('a rat running in a jar')
'jar a in running rat a'
>>> reverse_pair('cat say meow')
'meow say cat'
>>> reverse_pair('the big bang theory')
'theory bang big the'
"""
text = text.split(' ')
text = reversed(text)
text = ' '.join(text)
return text
# 14.Given a list of strings, write a function match_ends that returns the count of the
# number of strings where the string length is 2 or more and the first and last
# chars of the string are the same.
# >>> match_ends(["Gingering", “hello","wow"]
# 2
def match_ends(string):
"""
:param string: input string
:return: number of string with same first and last character
>>> match_ends(['running','jumping','lol'])
1
>>> match_ends(['aaaa','bbbb','cccc'])
3
>>> match_ends(['apple','orange','grape'])
0
>>> match_ends(['othello','chess'])
1
>>> match_ends(['dodod'])
1
"""
i = 0
for x in string:
if len(x) >= 2 and x[0].upper() == x[-1].upper():
i += 1
return i
# 15.Given a list of numbers, write a function remove_adjacent that returns a list
# where all adjacent elements have been reduced to a single element.
# >>> remove_adjacent([1, 2, 2, 3])
# [1, 2, 3]
def remove_adjacent(list):
"""
:param list: list of integers
:return: list that remove adjacent integers
>>> remove_adjacent([1,2,2,2,3,4])
[1, 2, 3, 4]
>>> remove_adjacent([1,2,3,4])
[1, 2, 3, 4]
>>> remove_adjacent([3,3,4,4,5])
[3, 4, 5]
>>> remove_adjacent([1,1])
[1]
>>> remove_adjacent([2,2,3,5,1,1,])
[2, 3, 5, 1]
"""
x = []
for num in list:
if len(x) == 0 or num != x[-1]:
x.append(num)
return x | [
"monaprom134@gmail.com"
] | monaprom134@gmail.com |
35607a345ddf69b06bdf279af5cf89bc391593ae | 46096a52c39cb6b32923e6a968a5538579e6d73b | /18nov20_tae.py | 9fad529a54004e123a2e73d140a7772b2d699b3d | [] | no_license | JoinNova/practice_py | 16671c4b91af77a492a089b1d7a48d029da91cf8 | 5fbd8ac0b5c46c68d2a84b7e35454982ae6d637a | refs/heads/master | 2020-04-16T18:19:02.709408 | 2019-01-15T08:35:23 | 2019-01-15T08:35:23 | 165,814,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 806 | py | from selenium import webdriver
from bs4 import BeautifulSoup
driver = webdriver.Chrome()
driver.implicitly_wait(3)
driver.get('http://www.google.com')
driver.find_element_by_xpath('//*[@id="tsf"]/div[2]/div[1]/div[1]/div/div[1]/input').click
driver.find_element_by_name('q').send_keys('제주 여행지')
driver.find_element_by_xpath('//*[@id="tsf"]/div[2]/div/div[3]/center/input[1]').click()
driver.get('https://www.google.com/destination/map/topsights?q=%EC%A0%9C%EC%A3%BC+%EC%97%AC%ED%96%89%EC%A7%80&site=search&output=search&dest_mid=/m/01rffr&sa=X&ved=2ahUKEwjYuNLUhOLeAhUDzbwKHQA9CQsQzTooATAjegQIChAv')
driver.implicitly_wait(4)
html = driver.page_source
soup = BeautifulSoup(html, 'html.parser')
for i in range(10):
namelist = soup.findAll("h2", {"class" : "NbdpWc"})[i]
print(namelist)
| [
"noreply@github.com"
] | JoinNova.noreply@github.com |
1ac32f1684de191dbb46b89dec66aee789ae73d8 | b7492b70e345e248f5562393ce4fe98e043ea1b9 | /data/config.py | c4c2e33bd25591173e8d4b7ae12282cf4b748bcd | [] | no_license | wobjtushisui/S3FD_pytorch | 3037176716d0b29d1f86f43da133614d451f619b | d5616dc1a08d11a670497b68a9a26cbd97adaed1 | refs/heads/master | 2020-12-29T05:04:08.873873 | 2019-11-18T09:13:56 | 2019-11-18T09:13:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,085 | py | #-*- coding:utf-8 -*-
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import os
from easydict import EasyDict
import numpy as np
_C = EasyDict()
cfg = _C
# data augument config
_C.expand_prob = 0.5
_C.expand_max_ratio = 4
_C.hue_prob = 0.5
_C.hue_delta = 18
_C.contrast_prob = 0.5
_C.contrast_delta = 0.5
_C.saturation_prob = 0.5
_C.saturation_delta = 0.5
_C.brightness_prob = 0.5
_C.brightness_delta = 0.125
_C.data_anchor_sampling_prob = 0.5
_C.min_face_size = 6.0
_C.apply_distort = True
_C.apply_expand = False
_C.img_mean = np.array([104., 117., 123.])[:, np.newaxis, np.newaxis].astype(
'float32')
_C.resize_width = 640
_C.resize_height = 640
_C.scale = 1 / 127.0
_C.anchor_sampling = True
_C.filter_min_face = True
# train config
#_C.LR_STEPS = (120, 198, 250)
_C.MAX_STEPS = 200000
_C.LR_STEPS = (80000,100000,120000)
_C.EPOCHES = 300
# anchor config
_C.FEATURE_MAPS = [160, 80, 40, 20, 10, 5]
_C.INPUT_SIZE = 640
_C.STEPS = [4, 8, 16, 32, 64, 128]
_C.ANCHOR_SIZES = [16, 32, 64, 128, 256, 512]
_C.CLIP = False
_C.VARIANCE = [0.1, 0.2]
# detection config
_C.NMS_THRESH = 0.3
_C.NMS_TOP_K = 5000
_C.TOP_K = 750
_C.CONF_THRESH = 0.05
# loss config
_C.NEG_POS_RATIOS = 3
_C.NUM_CLASSES = 2
_C.USE_NMS = True
# dataset config
_C.HOME = '/home/robin/datasets/widerface'
# hand config
_C.HAND = EasyDict()
_C.HAND.TRAIN_FILE = './data/hand_train.txt'
_C.HAND.VAL_FILE = './data/hand_val.txt'
_C.HAND.DIR = '/home/data/lj/egohands/'
_C.HAND.OVERLAP_THRESH = 0.35
# face config
_C.FACE = EasyDict()
_C.FACE.TRAIN_FILE = './data/face_train.txt'
_C.FACE.VAL_FILE = './data/face_val.txt'
_C.FACE.FDDB_DIR = '/home/data/lj/FDDB'
_C.FACE.WIDER_DIR = '/home/robin/datasets/widerface'
_C.FACE.AFW_DIR = '/home/data/lj/AFW'
_C.FACE.PASCAL_DIR = '/home/data/lj/PASCAL_FACE'
_C.FACE.OVERLAP_THRESH = [0.1, 0.35, 0.5]
# head config
_C.HEAD = EasyDict()
_C.HEAD.DIR = '/home/data/lj/VOCHead/'
_C.HEAD.OVERLAP_THRESH = [0.1, 0.35, 0.5]
| [
"jianzhnie@126.com"
] | jianzhnie@126.com |
4fe30dd6b39027cb6a61d01ba798443b17b266b1 | 2c74bb301f1ed83b79254944183ac5a18a639fdf | /tests/components/sensibo/conftest.py | 48c9317a5cb1150d6d6a59e6b99082a44cd6f666 | [
"Apache-2.0"
] | permissive | Adminiuga/home-assistant | 5bec93007ddac1a268cc359bf7e48530c5f73b38 | dcf68d768e4f628d038f1fdd6e40bad713fbc222 | refs/heads/dev | 2023-02-22T22:03:31.013931 | 2022-11-09T00:27:20 | 2022-11-09T00:27:20 | 123,929,062 | 5 | 4 | Apache-2.0 | 2023-02-22T06:14:31 | 2018-03-05T14:11:09 | Python | UTF-8 | Python | false | false | 2,342 | py | """Fixtures for the Sensibo integration."""
from __future__ import annotations
import json
from typing import Any
from unittest.mock import patch
from pysensibo import SensiboClient
from pysensibo.model import SensiboData
import pytest
from homeassistant.components.sensibo.const import DOMAIN
from homeassistant.config_entries import SOURCE_USER
from homeassistant.core import HomeAssistant
from . import ENTRY_CONFIG
from tests.common import MockConfigEntry, load_fixture
from tests.test_util.aiohttp import AiohttpClientMocker
@pytest.fixture
async def load_int(hass: HomeAssistant, get_data: SensiboData) -> MockConfigEntry:
"""Set up the Sensibo integration in Home Assistant."""
config_entry = MockConfigEntry(
domain=DOMAIN,
source=SOURCE_USER,
data=ENTRY_CONFIG,
entry_id="1",
unique_id="username",
version=2,
)
config_entry.add_to_hass(hass)
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=get_data,
), patch(
"homeassistant.components.sensibo.util.SensiboClient.async_get_devices",
return_value={"result": [{"id": "xyzxyz"}, {"id": "abcabc"}]},
), patch(
"homeassistant.components.sensibo.util.SensiboClient.async_get_me",
return_value={"result": {"username": "username"}},
):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
return config_entry
@pytest.fixture(name="get_data")
async def get_data_from_library(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker, load_json: dict[str, Any]
) -> SensiboData:
"""Retrieve data from upstream Sensibo library."""
client = SensiboClient("123467890", aioclient_mock.create_session(hass.loop))
with patch("pysensibo.SensiboClient.async_get_devices", return_value=load_json):
output = await client.async_get_devices_data()
await client._session.close() # pylint: disable=protected-access
return output
@pytest.fixture(name="load_json", scope="session")
def load_json_from_fixture() -> SensiboData:
"""Load fixture with json data and return."""
data_fixture = load_fixture("data.json", "sensibo")
json_data: dict[str, Any] = json.loads(data_fixture)
return json_data
| [
"noreply@github.com"
] | Adminiuga.noreply@github.com |
e7e9f173951e4d55eb410c0db31bc9387a456169 | 01733042e84a768b77f64ec24118d0242b2f13b8 | /uhd_restpy/testplatform/sessions/ixnetwork/topology/requestid_4bb823de2302ea46c48b53652c8059b5.py | 9127928006cf5c5a3d16daeb18104885ad73ebf9 | [
"MIT"
] | permissive | slieberth/ixnetwork_restpy | e95673905854bc57e56177911cb3853c7e4c5e26 | 23eeb24b21568a23d3f31bbd72814ff55eb1af44 | refs/heads/master | 2023-01-04T06:57:17.513612 | 2020-10-16T22:30:55 | 2020-10-16T22:30:55 | 311,959,027 | 0 | 0 | NOASSERTION | 2020-11-11T12:15:34 | 2020-11-11T12:06:00 | null | UTF-8 | Python | false | false | 1,793 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
class RequestId(Base):
"""
The RequestId class encapsulates a required requestId resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'requestId'
_SDM_ATT_MAP = {
'Count': 'count',
}
def __init__(self, parent):
super(RequestId, self).__init__(parent)
@property
def Count(self):
"""
Returns
-------
- number: total number of values
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
| [
"andy.balogh@keysight.com"
] | andy.balogh@keysight.com |
ecc0b19eed4f3e7af2208b9c8c88f72640608edb | d0e083ce351b9680f1fe46a5216f605aefc47018 | /Everything/bp/bpp_pisces.py | 03e9ad09c8743b034684ec8c75f062b7fe84c4b5 | [] | no_license | bhishanpdl/Backups | ebfad64cc4f0a0999064f5f4dccf2320c7db8329 | e041a6c3f4a15c9e1f3eee88b6ea745fd0ea9add | refs/heads/master | 2020-04-27T09:18:32.444634 | 2019-03-06T19:51:29 | 2019-03-06T19:51:29 | 174,209,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,136 | py | #!/usr/bin/env python3
#!/usr/bin/env python3
# -*- coding: utf-8 -*-#
#
# Author : Bhishan Poudel; Physics PhD Student, Ohio University
# Date : Jun 19, 2017 Mon
# Last update : Jun 19, 2017 Mon
#
# Note: Put this file in /usr/local/bin/bpp
# sudo -H cp MYFILE.py /usr/local/bin/bpp
# sudo -H chmod a+rwx /usr/local/bin/bpp
#
# Then edit the ~/.bash_profile
# function bp () { bpp "$1" | pygmentize -f terminal256 -O style=autumn -g ;}
#
# Other options:
# function bp () { bpp "$1" | pygmentize -l python -f terminal256 -O style=autumn -g ;}
#
# Usage: (in the terminal) bp all we can also use bpp all without syntaxHigl.
# Usage: (in the terminal) bp hello
#
# Imports
import sys
import time
import subprocess
import os
# Global variables
today = time.strftime("%b %d, %Y %a") # Jun 22, 2017 Thu
today_var = time.strftime("%b").lower()+str(time.strftime("%d")) # jun22
# Utility functions
##=======================================================================
def copy_folder(frm,to):
if os.path.isdir(frm):
shutil.rmtree(to)
shutil.copytree(frm,to)
##=======================================================================
def all():
v = r"""
Possible arguments are:
""".strip()
print(v)
def arange():
v = r"""
np.arange(,,step =)
""".strip()
print(v)
def arange2():
v = r"""
np.arange(1000,2000,step =1)
y = np.arange(1e-5,1e-5*1000+1e-5,step =1e-5)
""".strip()
print(v)
def bkp_diary():
frm = '/Users/poudel/Dropbox/Research_Diary'
to = '/Volumes/bhishan/Research_Diary'
copy_folder(frm,to)
def call():
v = r"""
import subprocess
commands = '''
echo a
echo b
'''
subprocess.call(commands,shell=True)
""".strip()
print(v)
def chunks():
v = r"""
chunks = [LST[i:i + SIZE] for i in range(0, len(LST), SIZE)]
""".strip()
print(v)
def chunks2():
v = r"""
import pandas as pd
import numpy as np
lst_ =
arr1 = np.array_split(lst_, 7)
df1 = pd.DataFrame(arr1).T
df1.to_csv('tmp.txt',sep='\t',index=None,header=None,float_format='%d')
!cat tmp.txt
""".strip()
print(v)
def class_1():
v = r"""
class Celsius:
def __init__(self, temp = 0):
self._temp = temp
def to_fahrenheit(self):
return (self.temp * 1.8) + 32
@property
def temp(self):
print("Getting value")
return self._temp
@temp.setter
def temp(self, value):
if value < -273:
raise ValueError("Temperature below -273 Celsius is not possible")
print("Setting value")
self._temp = value
c = Celsius()
c.temp = 37
print(c.to_fahrenheit())
""".strip()
print(v)
def color():
v = r"""
# <span style="color:blue"> July 2017 </span>
""".strip()
print(v)
def def2():
v = r'''
def ():
""" .""""
return None
'''.strip()
print(v)
def delete_files():
v = pathlib.Path('/Users/poudel/bin/delete_files.py').read_text().strip()
print(v)
def ds9_open_galaxies():
v = pathlib.Path('/Users/poudel/bin/ds9_open_galaxies.py').read_text().strip()
print(v)
def emoji():
v = r"""
http://apps.timwhitlock.info/emoji/tables/unicode
http://www.unicode.org/emoji/charts/full-emoji-list.html
RED APPLE (🍎): 🍎
GREEN APPLE (🍏): 🍏
BLUE HEART (💙): 💙
GREEN HEART (💚): 💚
YELLOW HEART (💛): 💛
PURPLE HEART (💜): 💜
GREEN BOOK (📗): 📗
BLUE BOOK (📘): 📘
ORANGE BOOK (📙): 📙
LARGE RED CIRCLE (🔴): 🔴
LARGE BLUE CIRCLE (🔵): 🔵
LARGE ORANGE DIAMOND (🔶): 🔶
LARGE BLUE DIAMOND (🔷): 🔷
SMALL ORANGE DIAMOND (🔸): 🔸
SMALL BLUE DIAMOND (🔹): 🔹
UP-POINTING RED TRIANGLE (🔺): 🔺
DOWN-POINTING RED TRIANGLE (🔻): 🔻
UP-POINTING SMALL RED TRIANGLE (🔼): 🔼
DOWN-POINTING SMALL RED TRIANGLE (🔽): 🔽
""".strip()
print(v)
def fileread():
v = r"""
infile = ''
x = np.genfromtxt(infile,delimiter=None,usecols=(0),dtype=float,unpack=True)
y = np.genfromtxt(infile,delimiter=None,usecols=(1),dtype=str,unpack=True)
print('{} {} {} {}'.format('\nFirst row : ', x[0], y[0],'\n ' ))
""".strip()
print(v)
def fileread2():
v = r"""
infile = ''
print('{} {} {} {}'.format('\nreading file : ', infile, ' ',' ' ))
x = np.genfromtxt(infile,delimiter=None,usecols=(0),dtype=str,unpack=True)
y,z = np.genfromtxt(infile,delimiter=None,usecols=(1,2),dtype=float,unpack=True)
print('{} {} {} {}'.format('First row : ', x[0], ' ','\n ' ))
""".strip()
print(v)
def fileread3():
v = r"""
# Read in a file
with open('','r')
k=0
col0=[]
col1=[]
for line in f:
if not line.startswith("#"):
row=line.split()
col0.append(float(row[0]))
col1.append(float(row[1]))
k = k+1
""".strip()
print(v)
def filereadpd():
v = r"""
infile = ''
colnames = ['c0', 'c1']
df = pd.read_csv(infile,sep='\s+', header = None,skiprows = 0,
comment='#',names=colnames,usecols=(0,1))
""".strip()
print(v)
def filewrite():
v = r"""
np.savetxt('.txt', np.array([x,y]).T,
delimiter=' ', comments='',
fmt=['%-7d', '%.7f'],
header='%-7s %+4s'%('x','y'))
""".strip()
print(v)
def filewrite2():
v = r"""
outfile = '.csv'
print('Creating : ', outfile)
with open(outfile,'w') as f:
# write header
header = '# x y '
print(header,file=f)
# write data
for i in range(len(x)):
print(x[i],y[i],sep=' ', file=f)
""".strip()
print(v)
def filewrite3():
v = r"""
mydata = '\n'.join('\t'.join(map(str,row)) for row in zip(x,y))
with open('.csv', 'w') as fo:
print(mydata, file=fo)
""".strip()
print(v)
def hello():
print('Hello World!')
def h():
v = r'''
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Author : Bhishan Poudel, Physics PhD Student, Ohio University
# Date : Jul 04, 2017 Tue
# Last update :
def main():
"""Main Module."""
# Imports
if __name__ == '__main__':
main()
'''.strip()
print(v)
def ih():
v = today_var + " = '''" + """
*******************************************************************************
# =============================================================================""" + "\n# Date : %s\n"%today +\
"""
# Summary:
# =============================================================================
1.
2.
''';
""".strip()
print(v)
def interpolate():
v = r"""
# interpolation
from scipy import interpolate
print('Interpolating ...
')
xnew = np.linspace(,,num=)
ynew = interpolate.interp1d(x, y, kind='cubic')(xnew)
""".strip()
print(v)
def ipyh():
v = r"""
%matplotlib notebook
from IPython.display import display,HTML
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pandas import DataFrame as DF
""".strip()
print(v)
def linspace():
v = r"""
np.linspace(,,num=,endpoint=True)
""".strip()
print(v)
def lprc():
v = r"""
!vim -me -c ":syntax on" -c ":hardcopy" -c ":q"
""".strip()
print(v)
def maint():
v = r"""
##==============================================================================
## Main program
##==============================================================================
if __name__ == '__main__':
# Beginning time
begin_time,begin_ctime = time.time(), time.ctime()
# Run main program
main()
# Print the time taken
end_time,end_ctime = time.time(), time.ctime()
seconds = end_time - begin_time
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
print('\nBegin time: ', begin_ctime,'\nEnd time: ', end_ctime,'\n' )
print("Time taken: {0:.0f} days, {1:.0f} hours, \
{2:.0f} minutes, {3:f} seconds.".format(d, h, m, s))
""".strip()
print(v)
def markdown():
v = r"""
HORIZONTAL LINE
================
Three or more... ___ *** ---
SOURCE CODE HIGHLIGHT
=======================
```python
s = "Python syntax highlighting"
print s
```
BLOCKQUOTES
============
> Blockquotes are very handy in email to emulate reply text.
> This line is part of the same quote.
LINKS
======
[I'm an inline-style link](https://www.google.com)
[I'm a reference-style link][Arbitrary case-insensitive reference text]
[You can use numbers for reference-style link definitions][1]
Or leave it empty and use the [link text itself]
URLs and URLs in angle brackets will automatically get turned into links.
http://www.example.com or <http://www.example.com> and sometimes
example.com (but not on Github, for example).
Some text to show that the reference links can follow later.
[arbitrary case-insensitive reference text]: https://www.mozilla.org
[1]: http://slashdot.org
[link text itself]: http://www.reddit.com
IMAGES
=======
Here's our logo (hover to see the title text):
Inline-style:

Reference-style:
![alt text][logo]
[logo]: https://github.com/adam-p/markdown-here/raw/master/src/common/images/icon48.png "Logo Title Text 2"
YOUTUBE VIDEOS
==============
<a href="http://www.youtube.com/watch?feature=player_embedded&v=YOUTUBE_VIDEO_ID_HERE
" target="_blank"><img src="http://img.youtube.com/vi/YOUTUBE_VIDEO_ID_HERE/0.jpg"
alt="IMAGE ALT TEXT HERE" width="240" height="180" border="10" /></a>
TABLE
=====
Colons can be used to align columns.
| Tables | Are | Cool |
| ------------- |:-------------:| -----:|
| col 3 is | right-aligned | $1600 |
| col 2 is | centered | $12 |
| zebra stripes | are neat | $1 |
There must be at least 3 dashes separating each header cell.
The outer pipes (|) are optional, and you don't need to make the
raw Markdown line up prettily.
LISTS
======
1. First ordered list item
2. Another item
⋅⋅* Unordered sub-list.
1. Actual numbers don't matter, just that it's a number
⋅⋅1. Ordered sub-list
4. And another item.
⋅⋅⋅You can have properly indented paragraphs within list items. Notice the blank line above, and the leading spaces (at least one, but we'll use three here to also align the raw Markdown).
⋅⋅⋅To have a line break without a paragraph, you will need to use two trailing spaces.⋅⋅
⋅⋅⋅Note that this line is separate, but within the same paragraph.⋅⋅
⋅⋅⋅(This is contrary to the typical GFM line break behaviour, where trailing spaces are not required.)
* Unordered list can use asterisks
- Or minuses
+ Or pluses
TEXT FORMATTING
================
Emphasis, aka italics, with *asterisks* or _underscores_.
Strong emphasis, aka bold, with **asterisks** or __underscores__.
Combined emphasis with **asterisks and _underscores_**.
Strikethrough uses two tildes. ~~Scratch this.~~
""".strip()
print(v)
def notify():
v = r"""
osascript -e 'display notification "The program finished." with title "Nofitication"'
""".strip()
print(v)
def now():
v = r"""
print('\nCurrent time: ', time.ctime())
""".strip()
print(v)
def pandas():
v = r"""
gals = np.arange(302)
diff = gals * 2
df = DF(np.matrix([gals,diff]).T,
columns=['gal','diff'])
df = DF(list(zip(gals, diff)),
columns=['gal','diff'])
df.gal = df.gal.astype(int)
print(df.head())
print('\ndf.shape = ', df.shape)
""".strip()
print(v)
def plotall():
v = r"""
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author : Bhishan Poudel
# Date : Jun 19, 2017 Mon
# Imports
import matplotlib.pyplot as plt
import numpy as np
# data
x = np.arange(0,10,1)
y = np.exp(x)
# subplots
fig, ax = plt.subplots()
plt.plot(x,y,color='k',linestyle="--")
# title and axes labels
plt.title('title')
plt.xlabel('xlabel', fontsize=10)
plt.ylabel('ylabel', fontsize=10)
# axes limit
plt.xlim(0,6)
plt.ylim(0,1000)
# text marker
txt = r'$\mu=100,\ \sigma=15$'
plt.text(4, 500, txt)
# major ticks
plt.xticks(np.arange(min(x), max(x)+1, 2))
plt.yticks(np.arange(0, 1000+0.001, 200))
# minor ticks
x_minor_ticks = np.arange(min(x), max(x)+1, 1 )
y_minor_ticks = np.arange(0, 1000+0.001, 100)
ax.set_xticks(x_minor_ticks, minor=True)
ax.set_yticks(y_minor_ticks, minor=True)
# grid
plt.grid(False)
# save and show plot
#plt.savefig("fig1.png",dpi = 200, height = 14, width = 14)
plt.show()
""".strip()
print(v)
def plotfileall():
v = r"""
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author : Bhishan Poudel
# Date : May 23, 2016
# Imports
import numpy as np
import matplotlib.pyplot as plt
infile = 'bhishan.txt'
col0,col1 = np.loadtxt(infile, comments="#", skiprows=0, usecols=(0,1), unpack=True)
plt.plot(col0, col1)
plt.xlabel('x')
plt.ylabel('y')
plt.title('Title')
plt.show()
""".strip()
print(v)
def replace_comma():
v = r"""
tr ',' '\t' < in.txt > out.txt
tr is a unix translate program.
examples:
'{}' '()' changes braces to parentheses.
echo "number 234" | tr -cd [:digit:] gives the digits and -d gives letters.
""".strip()
print(v)
def sphinx():
v = r"""
sphinx-quickstart -q -p My Project -a Bhishan Poudel -v 1 --ext-autodoc --ext-doctest --ext-viewcode
cd Project
cp ~/Applications/edit_sphinx_conf.py edit_sphinx_conf.py
mkdir html rst rst/_static
make html
sphinx-apidoc -o rst ../scripts
python3 edit_sphinx_conf.py
cp conf.py rst/
cp rst/modules.rst rst/index.rst
sphinx-build -b html rst html
cp -r html ../html
cd ../
rm -rf Project scripts/__pycache__ rst
open html/index.html
""".strip()
print(v)
def sphinx_edit_conf():
v = r"""
import time
import os
import subprocess
if os.path.isfile('conf.py'):
subprocess.call('mv conf.py conf2.py', shell=True)
with open('conf2.py', 'r') as f, open('conf.py','w') as fo:
for line in f.readlines():
olds = [r'# import os',
r'# import sys',
r"# sys.path.insert(0, os.path.abspath('.'))",
r"html_theme = 'alabaster'"]
news = [r'import os',
r'import sys',
r"sys.path.append('../scripts/')",
r"html_theme = 'default'"]
if olds[0] in line:
print(line.replace(olds[0], news[0]), file=fo, end='')
elif olds[1] in line:
print(line.replace(olds[1], news[1]), file=fo, end='')
elif olds[2] in line:
print(line.lstrip('#').lstrip(' '), file=fo, end='')
print(news[2], file=fo, end='')
elif olds[3] in line:
print(line.replace(olds[3], news[3]), file=fo, end='')
else:
print(line, file=fo, end='')
os.remove('conf2.py')
""".strip()
print(v)
# Note subplots is after plotallfile, do not insert other programs inbetwix
def subplots():
v = r"""
plt.subplot(2, 1, 1) # rows, columns, and plot number
plt.plot(x1, y1, 'yo-')
plt.title('')
plt.ylabel('plot 1')
plt.subplot(2, 1, 2)
plt.plot(x2, y2, 'r.-')
plt.xlabel('')
plt.ylabel('plot 2')
plt.show()
""".strip()
print(v)
def subplotsall():
v = r"""
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author : Bhishan Poudel
# Date : Jun 19, 2017 Mon
# Imports
import numpy as np
import matplotlib.pyplot as plt
x1 = np.linspace(0.0, 5.0)
x2 = np.linspace(0.0, 2.0)
y1 = np.cos(2 * np.pi * x1) * np.exp(-x1)
y2 = np.cos(2 * np.pi * x2)
plt.subplot(2, 1, 1) # rows, columns, and plot number
plt.plot(x1, y1, 'yo-')
plt.title('one plot 2 subplots')
plt.ylabel('plot 1')
plt.subplot(2, 1, 2)
plt.plot(x2, y2, 'r.-')
plt.xlabel('time (s)')
plt.ylabel('plot 2')
plt.show()
""".strip()
print(v)
def parallel():
v = r"""
from multiprocessing import Process
def func1():
def func2():
def func3():
def func4():
def runInParallel(*fns):
proc = []
for fn in fns:
p = Process(target=fn)
p.start()
proc.append(p)
for p in proc:
p.join()
# Running parallel
runInParallel(func1, func2,func3,func4)
""".strip()
print(v)
def parallel2():
v = r"""
from joblib import Parallel, delayed
import multiprocessing as mp
# function
def my_func(i):
return i * i
# run in parallel
num_cores = mp.cpu_count()
args = range(10)
results = Parallel(n_jobs=num_cores)(delayed(my_func)(i) for i in args)
# print
print(list(range(10)))
print(results)
""".strip()
print(v)
def replace():
v = r"""
##==============================================================================
## replace_outdir
##==============================================================================
def replace_outdir(outdir):
# imports
import shutil,os
if os.path.exists(outdir):
print('Replacing folder: ', outdir)
shutil.rmtree(outdir)
os.makedirs(outdir)
else:
print('Making new folder: ', outdir)
os.makedirs(outdir)
outdir = ''
replace_outdir(outdir)
""".strip()
print(v)
def roundoff():
v = r"""
float(str(round(value, 1)))
""".strip()
print(v)
def run_process():
v = r'''
def run_process(name, args,):
""" Run another program from this program.
Example:
run_process("Running example.py", ["python3", 'example.py', 'arg1' ])
"""
process = subprocess.Popen(args)
process.communicate()
if process.returncode != 0:
print("Error: %s did not terminate correctly. \
Return code: %i."%(name, process.returncode))
sys.exit(1)
'''.strip()
print(v)
def run_process2():
v = r"""
import subprocess,time,sys
##==============================================================================
# Usage :Run a process using subprocess.Popen
# Command : run_process("Running example.py", ["python3", 'example.py', 'arg1' ])
##==============================================================================
def run_process(name, args,):
print("-------------------------------------------------")
print("Running: %s\nCommand:"%name)
for arg in args:
print(arg, end=' ')
print("")
print("---------------------------------------------------")
subprogram_start_time = time.time()
process = subprocess.Popen(args)
process.communicate()
if process.returncode != 0:
print("Error: %s did not terminate correctly. \
Return code: %i."%(name, process.returncode))
sys.exit(1)
# Print time
subprogram_end_time = time.time()
sec = subprogram_end_time - subprogram_start_time
m, s = divmod(sec, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
print("\nTime for'{}' ==> {:2.0f} days, {:2.0f} hr,\
{:2.0f} min, {:f} sec.".format( name, d, h, m, s))
print("End of command: %s\nCommand:"%name)
print("------------------------------------------------")
""".strip()
print(v)
def subprocess():
v = r"""
commands = " " +\
" ; " +\
" ; " +\
" "
print("\nRunning commands :\n", commands, "\n")
subprocess.call(commands,shell=True)
""".strip()
print(v)
def yes_no():
v = r"""
question = lambda q: input(q).lower().strip()[0] == "y" or question(q)
question("Enter yes or no: ")
""".strip()
print(v)
if __name__ == '__main__':
method_name = sys.argv[1]
possibles = globals().copy()
possibles.update(locals())
method = possibles.get(method_name)
if not method:
raise NotImplementedError("Method %s not implemented" % method_name)
# Run the given function in the argument
method()
# Print all arguments
exclude = [ a for a in dir(sys.modules[__name__] ) if '__' in a]
exclude.append('method')
exclude.append('method_name')
exclude.append('sys')
exclude.append('exclude')
exclude.append('possibles')
exclude.append('os')
exclude.append('time')
exclude.append('today')
exclude.append('today_var')
if method_name == 'all':
print(" ".join([ a for a in dir(sys.modules[__name__] ) if a not in exclude]))
| [
"bhishantryphysics@gmail.com"
] | bhishantryphysics@gmail.com |
b70ba83ab08390530f1a3a3b414fe878234a746f | e86e65380c81e2459c803a51719124faaf1ad660 | /theano_workspace/function.py | edc95a835248d2a70b12e6fda55ad8f5a54ee096 | [] | no_license | jaberg/theano_workspace | 83e79a45216a1a3e98521c823eef8cd698c714dd | 913e2d443cd23b2a4c746e7cd12d3a97947fee63 | refs/heads/master | 2021-01-20T11:59:51.899982 | 2013-05-03T05:49:48 | 2013-05-03T05:49:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,096 | py | # XXX COMPLETELY UNTESTED, DEFINITELY NOT FUNCTIONAL
class Function(object):
"""
Special case of Workspace for implementing a single callable expression
TODO: Provides support for structuring outputs as nested list, dict, etc.
"""
# XXX COMPLETELY UNTESTED
def __init__(self, ws, inputs, outputs, dests, fn_name):
self._ws = ws
self._inputs = inputs
self._outputs = outputs
self._dests = dests
self._fn_name = fn_name
def __call__(self, *args):
assert len(self._inputs) == len(args)
for var, val in zip(self._inputs, args):
self._ws[var] = val
self._ws.compiled_updates[self._fn_name]()
# TODO: unflatten dictionaries, singles, nested stuff, etc.
return [self[var] for var in self._dests]
def function(inputs, outputs, ws_cls=Workspace):
ws = ws_cls()
dests = [o.type() for o in outputs]
for var in inputs + dests:
ws[var] = None
ws.add_compiled_update('__call__', zip(dests, outputs))
return Function(ws, inputs, outputs, dests, '__call__')
| [
"james.bergstra@gmail.com"
] | james.bergstra@gmail.com |
c70dbc1ca64f2452a61d661e32d7cec9f0a63094 | facf0d5a66ba0b762f4d74bb99e568a948a76bc3 | /chat/consumers.py | 416465a4185b0c805d5e81ad97db4b384f057649 | [] | no_license | KennyDaktyl/chat_ws | bddd9bb042a31a2a54d5ac0a916a13c87649e634 | 0dfd5c2dd3a3252635adff18b54a70d7e6c77b50 | refs/heads/main | 2023-03-06T20:35:18.035734 | 2021-02-19T12:57:59 | 2021-02-19T12:57:59 | 340,362,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,216 | py | # import json
# from asgiref.sync import async_to_sync
# from channels.generic.websocket import AsyncWebsocketConsumer
# from datetime import datetime
# from django.utils import timezone
# from .models import Message
# from django.conf import settings
# from account.models import Profile
# from django.contrib.auth import get_user_model
# User = get_user_model()
# class ChatConsumer(AsyncWebsocketConsumer):
# async def fetch_messages(self, data):
# messages = Message.last_30_messages()
# content = {
# 'messages': self.messages_to_json(messages)
# }
# self.send_chat_message(content)
# def new_message(self, data):
# author = User.objects.get(username=data['from'])
# author_user = Profile.objects.get(user__username=author.username)
# message = Message.objects.create(timestamp=datetime.now(), author=author_user,
# content=data['message'])
# content = {
# 'command': 'new_message',
# 'message': self.message_to_json(message)
# }
# return self.send_chat_message(content)
# commands = {
# 'fetch_messages': fetch_messages,
# 'new_message': new_message
# }
# def messages_to_json(self, messages):
# result = []
# for message in messages:
# result.append(self.message_to_json(message))
# return result
# def message_to_json(self, message):
# return {
# 'author': message.author.user.username,
# 'content': message.content,
# 'timestamp': str(message.timestamp.strftime("%Y-%m-%d, %H:%M"))
# }
# async def connect(self):
# self.room_name = self.scope['url_route']['kwargs']['room_name']
# self.room_group_name = 'chat_%s' % self.room_name
# await self.channel_layer.group_add(
# self.room_group_name,
# self.channel_name
# )
# await self.accept()
# async def disconnect(self, close_code):
# await self.channel_layer.group_discard(
# self.room_group_name,
# self.channel_name
# )
# # Receive message from WebSocket
# async def receive(self, text_data):
# data = json.loads(text_data)
# await self.commands[data['command']](self, data)
# async def send_chat_message(self, message):
# await self.channel_layer.group_send(
# self.room_group_name,
# {
# 'type': 'chat_message',
# 'message': message,
# }
# )
# async def send_message(self, message):
# await self.send(text_data=json.dumps(message))
# # Receive message from room group
# async def chat_message(self, event):
# message = event['message']
# await self.send(text_data=json.dumps(message))
# chat/consumers.py
import json
from channels.generic.websocket import AsyncWebsocketConsumer
class ChatConsumer(AsyncWebsocketConsumer):
async def connect(self):
self.room_name = self.scope['url_route']['kwargs']['room_name']
self.room_group_name = 'chat_%s' % self.room_name
# Join room group
await self.channel_layer.group_add(
self.room_group_name,
self.channel_name
)
await self.accept()
async def disconnect(self, close_code):
# Leave room group
await self.channel_layer.group_discard(
self.room_group_name,
self.channel_name
)
# Receive message from WebSocket
async def receive(self, text_data):
text_data_json = json.loads(text_data)
message = text_data_json['message']
# Send message to room group
await self.channel_layer.group_send(
self.room_group_name,
{
'type': 'chat_message',
'message': message
}
)
# Receive message from room group
async def chat_message(self, event):
message = event['message']
# Send message to WebSocket
await self.send(text_data=json.dumps({
'message': message
})) | [
"krakow@miktelgsm.pl"
] | krakow@miktelgsm.pl |
779879dc262374b487ad7cf2413495b8e0a213e6 | 63ce62eb08e54c0e1fc93787d23d30ed38a80d19 | /giveaways/urls.py | 5f529a455d759445a26dc5a21a77515ac9ca1487 | [] | no_license | prettyirrelevant/giveawayy | 597581fee7e4e8ab209014594c670660827661e4 | 81e345466ffe9887a262caa0b5fd23d95d620f53 | refs/heads/main | 2023-08-24T11:40:17.916940 | 2021-10-17T23:47:01 | 2021-10-17T23:47:01 | 408,027,551 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | from django.urls import path
from . import views
app_name = "giveaways"
urlpatterns = [
path("giveaways/new/", views.CreateGiveawayView.as_view(), name="create-giveaway"),
path("giveaways/search/", views.SearchGiveawayView.as_view(), name="search-giveaway"),
path("giveaways/<slug>/", views.DisplayGiveawayView.as_view(), name="view-giveaway"),
path("giveaways/<slug>/join/", views.JoinGiveawayView.as_view(), name="join-giveaway"),
]
| [
"ienioladewumi@gmail.com"
] | ienioladewumi@gmail.com |
70a516f5a592ce86deea9bbd83cf59612cedb837 | 0869d7edac80e8aebe951682a2cc311a083eade3 | /Python/tdw/proc_gen/arrangements/side_table.py | 0b2ade721bc8dd0ff79ebba56f2188ac280ad961 | [
"BSD-2-Clause"
] | permissive | threedworld-mit/tdw | 7d5b4453832647733ff91ad7a7ce7ec2320454c1 | 9df96fba455b327bb360d8dd5886d8754046c690 | refs/heads/master | 2023-09-01T11:45:28.132298 | 2023-08-31T16:13:30 | 2023-08-31T16:13:30 | 245,492,977 | 427 | 75 | BSD-2-Clause | 2023-09-14T17:36:12 | 2020-03-06T18:42:09 | Python | UTF-8 | Python | false | false | 3,372 | py | from typing import List
from tdw.tdw_utils import TDWUtils
from tdw.cardinal_direction import CardinalDirection
from tdw.proc_gen.arrangements.arrangement_along_wall import ArrangementAlongWall
class SideTable(ArrangementAlongWall):
"""
A small side table with objects on it.
- The side table model is chosen randomly; see `SideTable.MODEL_CATEGORIES["side_table"]`.
- The side table is placed next to a wall.
- The side table's position is automatically adjusted to set it flush to the wall.
- The side table is automatically rotated so that it faces away from the wall.
- The side table's (x, z) positional coordinates are offset by a factor; see `SideTable.DEPTH_FACTOR` and `SIDE_TABLE.LENGTH_FACTOR`.
- The side table will have a rectangular arrangement of objects on top of it.
- The objects are chosen randomly; see `SideTable.ON_TOP_OF["side_table"]`.
- The objects are positioned in a rectangular grid on the table with random rotations and positional perturbations; see: `SideTable.CELL_SIZE`, `SideTable.CELL_DENSITY`, `SideTable.WIDTH_SCALE`, and `SideTable.DEPTH_SCALE`.
- The side table is non-kinematic.
"""
""":class_var
Offset the distance from the wall by this factor.
"""
DEPTH_FACTOR: float = 1.05
""":class_var
Offset the distance along the wall by this factor.
"""
LENGTH_FACTOR: float = 1.25
""":class_var
The size of each cell in the side table rectangular arrangement. This controls the minimum size of objects and the density of the arrangement.
"""
CELL_SIZE: float = 0.05
""":class_var
The probability from 0 to 1 of a "cell" in the side table rectangular arrangement being empty. Lower value = a higher density of small objects.
"""
CELL_DENSITY: float = 0.4
""":class
When adding objects, the width of the side table is assumed to be `actual_width * WIDTH_SCALE`. This prevents objects from being too close to the edges of the side table.
"""
WIDTH_SCALE: float = 0.8
""":class
When adding objects, the depth of the side table is assumed to be `actual_depth * DEPTH_SCALE`. This prevents objects from being too close to the edges of the side table.
"""
DEPTH_SCALE: float = 0.8
def get_commands(self) -> List[dict]:
return self._add_object_with_other_objects_on_top(kinematic=False,
cell_size=SideTable.CELL_SIZE,
density=SideTable.CELL_DENSITY,
x_scale=SideTable.WIDTH_SCALE,
z_scale=SideTable.DEPTH_SCALE)
def get_length(self) -> float:
return TDWUtils.get_bounds_extents(bounds=self._record.bounds)[2] * SideTable.LENGTH_FACTOR
def _get_depth(self) -> float:
return TDWUtils.get_bounds_extents(bounds=self._record.bounds)[0] * SideTable.DEPTH_FACTOR
def _get_rotation(self) -> float:
if self._wall == CardinalDirection.north:
return 0
elif self._wall == CardinalDirection.east:
return 90
elif self._wall == CardinalDirection.south:
return 180
else:
return 270
def _get_category(self) -> str:
return "side_table"
| [
"alters@mit.edu"
] | alters@mit.edu |
f7866615db4088601b520ceb0509c4eb8ed8a28e | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_120/713.py | 24cfd7c29cd4cf82e619addf3c3a2ef74f627af3 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 343 | py | import math
T = int(raw_input())
for i in xrange(T):
r, t = raw_input().split()
r = int(r)
t = int(t)
n = ((1.0 - 2*r) + math.sqrt( (2*r - 1.0)**2 + 8*t ))/4.0
n = int(n)
total = (2*r + 1) * n + n*(n-1)*2
if total <= t:
print 'Case #%s: %s' % (i+1, n)
else:
print 'Case #%s: %s' % (i+1, n-1)
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
f77ef9ebe4b1bb015901d003bfa80073a0be69f0 | 5c1531b47fb4dc4d7e5998d44f7200bf1786b12b | /__UNSORTED/139_word_break/word_break.py | 2de3f3f3df7205cae92ad800b5382c18f49e4c3a | [] | no_license | Web-Dev-Collaborative/Leetcode-JS-PY-MD | d1f560051aad1896a80eccdd4b4fbb389e7033e3 | 675b94fa5da8d40f0ea79efe6d3ef1393221425f | refs/heads/master | 2023-09-01T22:30:32.313793 | 2021-10-26T02:17:03 | 2021-10-26T02:17:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,069 | py | class Solution:
# @param s, a string
# @param wordDict, a set<string>
# @return a boolean
def wordBreak(self, s, wordDict):
self.tree = {}
self.memo = {}
for word in wordDict:
self.buildNode(word, self.tree)
return self.traverse(s)
def traverse(self, s):
if s in self.memo:
return self.memo[s]
if not s:
return True
ret = False
root = self.tree
for i in range(len(s) + 1):
if -1 in root:
if self.traverse(s[i:]):
ret = True
break
if i < len(s):
c = s[i]
if c in root:
root = root[c]
else:
break
self.memo[s] = ret
return ret
def buildNode(self, word, tree):
if not word:
tree[-1] = True
else:
c = word[0]
if c not in tree:
tree[c] = {}
self.buildNode(word[1:], tree[c])
| [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
03dd76a38abe323de5bb3342d57232631262aac6 | d7e4be3752a3b659d198893ebc4347c77f56a3b8 | /flaskr/repair/forms.py | 0e67749642c7c5607e0314eb0d443010e35c3527 | [] | no_license | maria-j-k/library_v2 | c76aa0b77f3103aab43e5339d5e468e28c9e485c | fdc292d152aec1ea596733b79126caecca8b93ac | refs/heads/main | 2023-03-19T20:13:06.104296 | 2021-03-11T12:16:37 | 2021-03-11T12:16:37 | 322,952,448 | 0 | 0 | null | 2021-01-19T13:13:38 | 2020-12-19T22:52:37 | Python | UTF-8 | Python | false | false | 4,328 | py | from flask_wtf import FlaskForm
from wtforms_sqlalchemy.fields import QuerySelectField
from wtforms import BooleanField, FieldList, FormField, HiddenField, IntegerField, TextAreaField, SelectField, StringField, SubmitField
from wtforms.widgets import HiddenInput
from wtforms.validators import DataRequired, AnyOf, Optional
from flaskr.models import Publisher, Room
class SearchForm(FlaskForm):
name = StringField('Name')
class PublisherForm(FlaskForm):
name = StringField('Name')
name_id = IntegerField('Id', widget=HiddenInput(), validators=[Optional(strip_whitespace=True)])
incorrect = BooleanField('Incorrect')
approuved = BooleanField('Approuved')
submit = SubmitField('Sumbit')
def all_publishers():
return Publisher.query.all()
class SerieForm(FlaskForm):
name = StringField('Name')
name_id = HiddenField(validators=[Optional(strip_whitespace=True)])
publisher = QuerySelectField(query_factory=all_publishers, allow_blank=False)
incorrect = BooleanField('Incorrect')
approuved = BooleanField('Approuved')
submit = SubmitField('Sumbit')
class CityForm(FlaskForm):
name = StringField('Name')
name_id = IntegerField('Id', widget=HiddenInput(), validators=[Optional(strip_whitespace=True)])
incorrect = BooleanField('Incorrect')
approuved = BooleanField('Approuved')
submit = SubmitField('Sumbit')
class CollectionForm(FlaskForm):
name = StringField('Name')
incorrect = BooleanField('Incorrect')
approuved = BooleanField('Approuved')
submit = SubmitField('Sumbit')
class RoomForm(FlaskForm):
name = StringField('Name')
incorrect = BooleanField('Incorrect')
approuved = BooleanField('Approuved')
submit = SubmitField('Sumbit')
def all_rooms():
return Room.query.all()
class ShelfForm(FlaskForm):
name = StringField('Name')
room = QuerySelectField(query_factory=all_rooms, allow_blank=False)
incorrect = BooleanField('Incorrect')
approuved = BooleanField('Approuved')
submit = SubmitField('Sumbit')
class BookForm(FlaskForm):
title = StringField('Title')
isbn = StringField('ISBN')
# authors = StringField('Authors')
# translation = StringField('Translation')
# redaction = StringField('Redaction')
# introduction = StringField('Introduction')
publisher = StringField('Publisher')
publisher_id = IntegerField('Id', widget=HiddenInput(), validators=[Optional(strip_whitespace=True)])
serie = StringField('Serie')
serie_id = IntegerField('Id', widget=HiddenInput(), validators=[Optional(strip_whitespace=True)])
city = StringField('Publication place')
city_id = IntegerField('Id', widget=HiddenInput(), validators=[Optional(strip_whitespace=True)])
pub_year = StringField('Publication year')
origin_language = StringField('Origin language')
fiction = SelectField('Fiction', choices=[
('', '---'),
(1, 'fiction'),
(0, 'non-fiction')
],
coerce=bool)
literary_form = SelectField('Literary form', choices=[
('', '---'),
('PO', 'Poetry'),
('PR', 'Prose'),
('DR', 'Drama')
])
genre = StringField('Genre')
precision = TextAreaField('Precision')
nukat = TextAreaField('NUKAT themes')
incorrect = BooleanField('Incorrect')
approuved = BooleanField('Approuved')
submit = SubmitField('Sumbit')
class PersonForm(FlaskForm):
name = StringField('Name')
name_id = HiddenField(validators=[Optional(strip_whitespace=True)])
incorrect = BooleanField('Incorrect')
approuved = BooleanField('Approuved')
submit = SubmitField('Submit')
class Person2Form(FlaskForm):
name = StringField('Name')
# person_id = IntegerField(widget=HiddenInput(), validators=[Optional(strip_whitespace=True)])
name_id = HiddenField(validators=[Optional(strip_whitespace=True)])
role = HiddenField(validators=[AnyOf(values=['A', 'T', 'R', 'I'])])
incorrect = BooleanField('Incorrect')
approuved = BooleanField('Approuved')
class Meta:
csrf = False
class CreatorForm(FlaskForm):
creators = FieldList(FormField(Person2Form, default={'role': 'A'}), max_entries=3)
submit = SubmitField('Sumbit')
| [
"maria77julia@gmail.com"
] | maria77julia@gmail.com |
9231b8accb452303eaa0880663dfeca5728f031a | 5ba97bd2f02e4a608b70ad2c4997079f6147dab4 | /Other_Market_Places/Etsy/product_urls_collectors/weddings_products_url_collector.py | 58869e52b56225611d3ecf282d7b6bc2f2c90ef7 | [] | no_license | akhilreddyyeredla/Web_Crawlers | d4131b622a378787c3bcdec4e64ac5be60ad78b0 | d86a2ca19a2ed2b3275e4cd506a8df67d71686ec | refs/heads/master | 2020-03-22T00:12:23.214849 | 2018-07-31T09:40:37 | 2018-07-31T09:40:37 | 139,230,561 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,468 | py | from Common.Etsy_common_imports import *
class Workers:
PRODUCTS_PAGE_URLS_QUEUE_FILE = ''
categories_queue = Queue()
NUMBER_OF_THREADS = DataCollectors_Configuration.NO_OF_THEARDS
starting_time = 0
ending_time = 0
products_page_url_queue_file = path_CONSTANTS.WEDDINGS_QUEUE_PATH
products_page_url_completed_file = path_CONSTANTS.WEDDINGS_COMPLETED_PATH
products_page_url_skipped_file = path_CONSTANTS.WEDDINGS_SKIPPED_PATH
# these sets are used to store links and avoid dulicapte links
products_page_url_queue = set()
products_page_url_completed = set()
products_page_url_skipped = set()
def __init__(self):
print("starting WEDDINGS product url collection")
self.starting_time = time.time()
self.PRODUCTS_PAGE_URLS_QUEUE_FILE = path_CONSTANTS.ACCESSORIES_QUEUE_PATH
self.create_workers()
self.crawl()
self.products_page_url_queue = file_to_set(self.products_page_url_queue_file)
self.products_page_url_completed = file_to_set(self.products_page_url_queue_file)
self.products_page_url_skipped = file_to_set(self.products_page_url_skipped_file)
# Create worker threads (will die when main exits)
def create_workers(self):
for _ in range(self.NUMBER_OF_THREADS):
t = threading.Thread(target=self.work)
t.daemon = True
t.start()
time.sleep(1)
# Do the next job in the queue
#@staticmethod
def work(self):
while True:
url = self.categories_queue.get()
# If we split it by ',' then we first item in list is hierarchy and second item in the list is
# product_page_url
url_split = url.split('|')
hierarchy = '|'.join(url_split[0:-1])
product_page_url = url_split[-1]
# Create object for ProductURLScrapper so that each treadh access different object
self.visit_page(threading.current_thread().name, hierarchy, product_page_url)
self.categories_queue.task_done()
# Each queued link is a new job
#@staticmethod
def create_jobs(self):
# For files in the file iterate through each line and put each link into queue
for link in file_to_set(self.PRODUCTS_PAGE_URLS_QUEUE_FILE):
self.categories_queue.put(link)
self.categories_queue.join()
self.crawl()
# Check if there are items in the queue, if so crawl them
#@staticmethod
def crawl(self):
queued_links = file_to_set(self.PRODUCTS_PAGE_URLS_QUEUE_FILE)
if len(queued_links) > 0:
# print(str(len(queued_links)) + ' links in the queue')
self.create_jobs()
else:
self.ending_time = time.time()
total_time = self.ending_time - self.starting_time
print('Total_time taken to collect WEDDINGS category urls: ' + "|Start Time:"+str(self.starting_time)+"| End Time:"+str(self.ending_time)+"|Total Time:"+str(total_time) + 'sec')
#@staticmethod
def get_product_links(self,hierarchy, url):
'''
:param hierarchy: hierarchy of category
:param url: product_page url is taken as input and parsed and product_urls are generated
:return: if urls are present list of product urls or returns zero
'''
response_obj = response_getter.Response()
response = response_obj.get_content(url)
if response:
required_data_1 = response.findAll("div", {
"class": "js-merch-stash-check-listing block-grid-item v2-listing-card position-relative flex-xs-none "})
required_data_2 = response.findAll("div", {
"class": "js-merch-stash-check-listing block-grid-item v2-listing-card position-relative pb-xs-0 "})
required_data_3 = response.findAll("div", {
"class": "js-merch-stash-check-listing block-grid-item v2-listing-card position-relative flex-xs-none pb-xs-0 "
})
gather_product_link = []
if len(required_data_1) != 0:
for links in required_data_1:
try:
if links is not None:
products_urls = links.find("a")['href']
hierarchy_products_urls = '{}|{}'.format(hierarchy, products_urls)
gather_product_link.append(hierarchy_products_urls)
except Exception as e:
print_exception('error', 'ETSY', hierarchy, url, e)
continue
return gather_product_link
elif len(required_data_2) != 0:
for links in required_data_2:
try:
if links is not None:
products_urls = links.find("a")['href']
hierarchy_products_urls = '{}|{}'.format(hierarchy, products_urls)
gather_product_link.append(hierarchy_products_urls)
except Exception as e:
print_exception('error', 'ETSY', hierarchy, url, e)
continue
return gather_product_link
elif len(required_data_3) != 0:
for links in required_data_3:
try:
if links is not None:
products_urls = links.find("a")['href']
hierarchy_products_urls = '{}|{}'.format(hierarchy, products_urls)
gather_product_link.append(hierarchy_products_urls)
except Exception as e:
print_exception('error', 'ETSY', hierarchy, url, e)
continue
return gather_product_link
else:
try:
raise ValueError
except ValueError:
print_exception('error', 'ETSY', hierarchy, url, 'ValueError')
# prin "not found url {}".format(url)
else:
return 0
def save_links(self,thread_name, hierarchy, product_page_url):
# when hierarchy is splitted whith '|' the last item in list give last page number
last_page = hierarchy.split('|')[-1]
completed_urls = [] # to keep Track of urls which are completed
skipped_urls = [] # to keep track of urls whicha are missed
for current_page in range(1, int(last_page) + 1):
if DataCollectors_Configuration.PRODUCT_URL_FLAG == DataCollectors_Configuration.URL_FLAG: # collect sample urls
# collect sample urls number equal to mentioned in config file
if current_page <= DataCollectors_Configuration.NO_OF_PRODUCT_URL_TO_COLLECT:
url = '{}&page={}'.format(product_page_url, current_page)
# print('{} is visiting :{} '.format(thread_name, url))
product_urls = self.get_product_links(hierarchy, url)
else:
break
else:
url = '{}&page={}'.format(product_page_url, current_page)
# print('{} is visiting :{} '.format(thread_name, url))
product_urls = self.get_product_links(hierarchy, url)
# if product _urls value is zero then their wrong with url so add that url to skiiped_list so that we can
# verify later else their is nothing wrong we can continue
if product_urls == 0:
skipped_url = '{}|{}'.format(hierarchy, url)
skipped_urls.append(skipped_url)
continue
else:
hierarchy_path = hierarchy.split('|')
completed_url = '{}|{}'.format(hierarchy, url)
completed_urls.append(completed_url)
full_path_queue = '{}{}{}{}{}'.format(DataCollectors_Configuration.ETSY_URL_ROOT_FOLDER,
DataCollectors_Configuration.PATH_STYLE,
DataCollectors_Configuration.PATH_STYLE.join(hierarchy_path[2:-1]),
DataCollectors_Configuration.PATH_STYLE, DataCollectors_Configuration.PODUCTS_PAGE)
full_path_completed = '{}{}{}{}{}'.format(
DataCollectors_Configuration.ETSY_URL_ROOT_FOLDER,
DataCollectors_Configuration.PATH_STYLE,
DataCollectors_Configuration.PATH_STYLE.join(hierarchy_path[2:-1]),
DataCollectors_Configuration.PATH_STYLE,
DataCollectors_Configuration.COMPLETED_PAGE
)
# write the urls into file and in hierarchy format
if product_urls:
list_to_file(full_path_queue, product_urls)
write_file(full_path_completed, '')
return completed_urls, skipped_urls
# Updates fills queue and updates files and collect urls
#@staticmethod
def visit_page(self,thread_name, hierarchy, product_page_url):
if product_page_url not in self.products_page_url_completed:
if product_page_url not in self.products_page_url_skipped:
self.update_links(thread_name, hierarchy, product_page_url)
# Get links and update the files
#@staticmethod
def update_links(self,thread_name, hierarchy, product_page_url):
# print(thread_name + ' now visiting ' + product_page_url)
completed_links, skipped_links = self.save_links(thread_name, hierarchy, product_page_url)
if completed_links:
# if we received completed links then remove current url for queue file and update completed file
remove_url = '{}|{}'.format(hierarchy, product_page_url)
self.products_page_url_queue.remove(remove_url)
self.products_page_url_completed |= set(completed_links)
self.products_page_url_skipped |= set(skipped_links)
self.update_files(self.products_page_url_queue,
self.products_page_url_queue_file,
self.products_page_url_completed,
self.products_page_url_completed_file)
if skipped_links:
# if there are any skipped links then update those also
list_to_file(self.products_page_url_skipped_file, skipped_links)
elif skipped_links:
# If there are skipped links then write it them in a file
remove_url = '{}|{}'.format(hierarchy, product_page_url)
list_to_file(self.products_page_url_skipped_file, skipped_links)
self.products_page_url_queue.remove(remove_url)
set_to_file(self.products_page_url_queue_file, self.products_page_url_queue)
#@staticmethod
def update_files(self,queu, queue_file, completed, completed_file):
"""
:param queu:list of urls to be updated
:param queue_file: path of file to updated
:param completed: list of urls to be updated
:param completed_file: path of file to ba updated
:return: NONE
"""
set_to_file(queue_file, queu)
set_to_file(completed_file, completed)
def start_collection():
Workers()
| [
"akhil@eunimart.com"
] | akhil@eunimart.com |
54821036ba2b8e70d4b99ed197052849563ad187 | ba1e183a41bdfc203acb41c96a8e9d9b6e94dcbb | /feature_build/extend_info_feature_g2_l0.py | 9285a60fb7313aed65864dc0105ff7ab68016f3b | [] | no_license | tianjiansmile/model_build | bf80abfc55ff5452470dc0388bb08fa952c14d80 | 5d4b4a3cf924a5841e61259ef8ab6838e889747a | refs/heads/master | 2020-11-30T13:44:59.419957 | 2020-01-10T01:34:02 | 2020-01-10T01:34:02 | 230,409,430 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 22,091 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# lh copy from SloopScoreCard and modify it to use online
__author__ = 'Dan'
# todo add log
from feature_build.extend_info_data import ExtendInfoData
from feature_build.utils import *
import pandas as pd
import itertools
import collections
import time
class RiskFeatureGroupExtendInfoG2(ExtendInfoData):
def __init__(self,is_live=False):
super(RiskFeatureGroupExtendInfoG2, self).__init__(is_live)
@staticmethod
def __tongdundata_rulesDetail(dic):
feat = dict()
ls = dic.get('rules')
if ls is None:
return feat
prefix = 'EI_tongdundata_rulesDetail'
for l in ls:
rule_id = l.get('rule_id')
feat['{p}_ruleId{rule_id}_score'.format(p=prefix, rule_id=rule_id)] = wrapper_float(l.get('score'))
cc = l.get('conditions')
if cc is None or len(cc) == 0:
continue
for ci in cc:
feat['{p}_ruleId{rule_id}_result'.format(p=prefix, rule_id=rule_id)] = wrapper_float(ci.get('result'))
if 'hits' in ci.keys():
val = ci.get('hits')
for val_i in val:
feat['{p}_ruleId{rule_id}_hits_{idn}'.format(p=prefix, rule_id=rule_id,
idn=val_i.get('industry_display_name'))] \
= wrapper_float(val_i.get('count'))
if 'results_for_dim' in ci.keys():
val = ci.get('results_for_dim')
for val_i in val:
feat['{p}_ruleId{rule_id}_resultsForDim_{dim_type}'.format(p=prefix, rule_id=rule_id,
dim_type=val_i.get('dim_type'))] \
= wrapper_float(val_i.get('count'))
if 'hits_for_dim' in ci.keys():
val = ci.get('hits_for_dim')
for val_i in val:
feat['{p}_ruleId{rule_id}_hitsForDim_{dim_type}_{idn}'.format(p=prefix, rule_id=rule_id,
dim_type=val_i.get('dim_type'),
idn=val_i.get('industry_display_name'))] = \
wrapper_float(val_i.get('count'))
return feat
@staticmethod
def __tongdundata_policy_set(ls):
feat = dict()
if ls is None or len(ls) == 0:
return feat
prefix = 'EI_tongdundata_policySet'
for val in ls:
policy_name = val.get('policy_name')
policy_score = wrapper_float(val.get('policy_score'))
policy_mode = val.get('policy_mode')
risk_type = val.get('risk_type')
policy_decision = val.get('policy_decision')
hit_rules = val.get('hit_rules')
feat['{p}_pname{pname}_policyScore'.format(p=prefix, pname=policy_name)] = policy_score
feat['{p}_pname{pname}_riskType'.format(p=prefix, pname=policy_name)] = risk_type
feat['{p}_pname{pname}_policyMode'.format(p=prefix, pname=policy_name)] = policy_mode
feat['{p}_pname{pname}_policyDecision'.format(p=prefix, pname=policy_name)] = policy_decision
if hit_rules is not None:
for hr in hit_rules:
feat['{p}_pname{pname}_hitRules_{name}'.format(p=prefix, pname=policy_name, name=hr.get('name'))] \
= wrapper_float(hr.get('score'))
return feat
@staticmethod
def __tongdundata_hit_rules(ls):
feat = dict()
if ls is None or len(ls) == 0:
return feat
prefix = 'EI_tongdundata_hitRules'
for l in ls:
name = l.get('name')
score = wrapper_float(l.get('score'))
feat['{p}_hitRules_{name}_score'.format(p=prefix, name=name)] = score
return feat
def _feat_ei_tongdundata(self,extend_data):
feat = dict()
j = self.get_parsed_ei_column('tongdundata',extend_data)
if j is None:
return feat
prefix = 'EI_tongdundata'
k = 'final_score'
feat['{p}_{k}'.format(p=prefix, k=k)] = wrapper_float(j.get(k))
k = 'risk_type'
feat['{p}_{k}'.format(p=prefix, k=k)] = j.get(k)
k = 'final_decision'
feat['{p}_{k}'.format(p=prefix, k=k)] = j.get(k)
feat.update(self.__tongdundata_rulesDetail(j.get('rulesDetail')))
feat.update(self.__tongdundata_policy_set(j.get('policy_set')))
feat.update(self.__tongdundata_hit_rules(j.get('hit_rules')))
return feat
@staticmethod
def __tanzhidata_feat0(j_data):
feat = dict()
credit_info_keys = ['refInfos', 'platform_Infos', 'eveSums']
for cik in credit_info_keys:
ls = j_data.get('mb_infos')[0].get('credit_info').get(cik)
for l in ls:
slice_name = l.get('slice_name')
ls_keys = l.keys()
ls_keys = [vv for vv in ls_keys if vv != 'slice_name']
for key in ls_keys:
feat['EI_tanzhidata_{cik}_{slice_name}_{key}'.format(slice_name=slice_name, key=key,
cik=cik)] = l.get(key)
return feat
@staticmethod
def __tanzhidata_feat1(j_data):
feat = dict()
ls = j_data.get('mb_infos')[0].get('credit_info').get('sections')
sections_columns = ['section_name', 'apply_request_count', 'repay_fail_count', 'overdue_repay_maxdelay_level',
'verif_count', 'overdue_repay_average_level',
'overdue_average_level', 'repay_remind_average_level',
'overdue_count', 'register_count', 'overdue_repay_count',
'apply_reject_count', 'apply_request_average_level',
'repay_fail_average_level', 'overdue_maxdelay_level',
'repay_remind_count', 'loan_offer_count',
'loan_offer_average_level']
df_section_name = pd.DataFrame(columns=sections_columns)
for l in ls:
# todo section name to a abstract time window
ls_vals = []
for col in sections_columns:
ls_vals.append(l.get(col))
df_section_name.loc[len(df_section_name)] = ls_vals
# replace '' with 0
df_section_name = df_section_name.replace('', None)
df_section_name = df_section_name.sort_values(by=['section_name'], ascending=0)
df_section_name = df_section_name.iloc[0:50]
df_section_name.index = range(50)
del sections_columns[0]
for col in sections_columns:
for k in range(50):
feat['EI_tanzhidata_mb_infos_lastweek{k}_{col}'.format(k=k, col=col)] = df_section_name[col].iloc[k]
return feat
def _feat_ei_tanzhidata(self, extend_data):
feat = dict()
j_data = self.get_parsed_ei_column( 'tanzhidata',extend_data)
if j_data is None:
return feat
feat.update(self.__tanzhidata_feat0(j_data))
feat.update(self.__tanzhidata_feat1(j_data))
feat = wrapper_missing_value(feat)
# Note that *_level feature is category: ['A' to 'E']
return feat
def _feat_ei_nifadata(self,extend_data):
feat = dict()
j = self.get_parsed_ei_column( 'nifadata',extend_data)
if j is None:
return feat
ls_keys = ['overduemoreamt',
'loancount',
'loanbal',
'queryatotalorg',
'outstandcount',
'overdueamt',
'loanamt',
'generationcount',
'generationamount',
'overduemorecount',
'totalorg']
for key in ls_keys:
feat['EI_nifadata_{key}'.format(key=key)] = j.get(key)
# print(np.unique(feat.values()))
return feat
@staticmethod
def __scorpionaccessreport_behavior(ls):
feat = dict()
keys = ['cell_operator_zh', 'net_flow', 'call_out_time', 'cell_operator', 'call_in_cnt', 'cell_phone_num',
'sms_cnt', 'cell_loc', 'call_cnt', 'total_amount', 'call_out_cnt', 'call_in_time']
df0 = pd.DataFrame(columns=keys + ['cell_mth'])
for l in ls:
ls_val = []
for k in keys:
ls_val.append(l.get(k))
ls_val.append(l.get('cell_mth'))
df0.loc[len(df0)] = ls_val
df0 = df0.sort_values(by=['cell_mth'], ascending=0)
df0 = df0.iloc[0:6]
df0.index = range(len(df0))
for col in keys:
for k in range(len(df0)):
feat['EI_sar_JSON_INFO_lastmth{k}_{col}'.format(k=k, col=col)] = df0[col].iloc[k]
return feat
@staticmethod
def __scorpionaccessreport_contact_region(ls):
feat = dict()
keys = ['region_avg_call_out_time', 'region_call_in_time_pct', 'region_call_out_cnt_pct',
'region_call_out_time_pct', 'region_call_in_time', 'region_avg_call_in_time', 'region_call_in_cnt_pct',
'region_call_out_time', 'region_call_out_cnt', 'region_call_in_cnt', 'region_uniq_num_cnt']
for l in ls:
region_loc = l.get('region_loc')
for k in keys:
feat['EI_sar_region{region_loc}_{k}'.format(region_loc=region_loc, k=k)] = l.get(k)
return feat
@staticmethod
def __scorpionaccessreport_appcheck(ls, cutoff_time):
prefix = 'EI_sar_appcheck'
# todo clean data, dummy or text model; this function only returns raw data
feat = dict()
ls1 = ls[1].get('check_points')
feat['{p}_ls1_court_blacklist'.format(p=prefix)] = ls1.get('court_blacklist').get('arised')
feat['{p}_ls1_financial_blacklist'.format(p=prefix)] = ls1.get('financial_blacklist').get('arised')
ls2 = ls[2].get('check_points')
feat['{p}_ls2_financial_blacklist'.format(p=prefix)] = ls2.get('financial_blacklist').get('arised')
feat['{p}_ls2_website'.format(p=prefix)] = ls2.get('website')
feat['{p}_ls2_check_idcard'.format(p=prefix)] = ls2.get('check_idcard')
feat['{p}_ls2_reliability'.format(p=prefix)] = ls2.get('reliability')
reg_time = wrapper_strptime(ls2.get('reg_time'))
diff_time = (cutoff_time - reg_time).days/365 if reg_time is not None else missingValue
feat['{p}_ls2_diff_reg_time'.format(p=prefix)] = diff_time
feat['{p}_ls2_check_name'.format(p=prefix)] = ls2.get('check_name')
feat['{p}_ls2_webcheck_ebusinesssite'.format(p=prefix)] = ls2.get('webcheck_ebusinesssite')
# 运营商通讯地址
ls3 = ls[3]
feat['{p}_ls3_operator_addr'.format(p=prefix)] = ls3.get('check_points').get('key_value')
return feat
@staticmethod
def __scorpionaccessreport_trip_info(ls):
feat = dict()
df0 = pd.DataFrame(columns=['trip_type', 'trip_dest', 'trip_leave', 'trip_start_time', 'trip_end_time'])
for l in ls:
df0.loc[len(df0)] = [l.get('trip_type'), l.get('trip_dest'), l.get('trip_leave'), l.get('trip_start_time'),
l.get('trip_end_time')]
df0.trip_start_time = [wrapper_strptime(k) for k in df0.trip_start_time]
df0.trip_end_time = [wrapper_strptime(k) for k in df0.trip_end_time]
df0 = df0.assign(trip_diff_day=wrapper_diff_time(df0, 'trip_start_time', 'trip_end_time', 'hour'))
# todo gps `trip_dest`, `trip_leave`
prefix = 'EI_sar_tripInfo'
feat['{p}_cnt'.format(p=prefix)] = len(df0)
uni_trip_types = [u'双休日', u'节假日', u'工作日']
for trip_type in uni_trip_types:
feat['{p}_cnt_{trip_type}'.format(p=prefix, trip_type=trip_type)] = sum(
df0['trip_type'] == trip_type)
feat['{p}_Ratio_{trip_type}'.format(p=prefix, trip_type=trip_type)] = \
wrapper_div(feat.get('{p}_cnt_{trip_type}'.format(p=prefix, trip_type=trip_type)), len(df0))
feat['{p}_cnt_unique_trip_dest'.format(p=prefix)] = len(np.unique(df0['trip_dest']))
feat['{p}_cnt_unique_trip_leave'.format(p=prefix)] = len(np.unique(df0['trip_leave']))
# metrics for df0.trip_diff_day
feat.update(apply_basic_metrics(list(df0.trip_diff_day), '{p}_trip_diff_time_'.format(p=prefix)))
return feat
@staticmethod
def __scorpionaccessreport_main_service(ls):
feat = dict()
prefix = 'EI_sar_main_service'
# convert ls to df
# todo call convert_ls_to_df
df = pd.DataFrame(columns=['total_service_cnt', 'company_type', 'company_name'])
for l in ls:
df.loc[len(df)] = [l.get('total_service_cnt'), l.get('company_type'), l.get('company_name')]
feat.update(apply_basic_metrics(list(df.total_service_cnt), '{p}_totalServiveCnt'.format(p=prefix)))
feat['{p}_cnt_unique_company_type'.format(p=prefix)] = len(np.unique(df.company_type))
uni_company_types = np.unique(df.company_type)
for ctype in uni_company_types:
feat['{p}_cnt_company_type_{ctype}'.format(p=prefix, ctype=ctype)] = sum(df.company_type == ctype)
feat['{p}_ratio_company_type_{ctype}'.format(p=prefix, ctype=ctype)] = \
wrapper_div(sum(df.company_type == ctype), len(df))
return feat
@staticmethod
def __scorpionaccessreport_contact_list(ls):
feat = dict()
prefix = 'EI_sar_contact_list'
cols = ['contact_name', 'needs_type',
'contact_all_day', 'contact_early_morning', 'contact_morning', 'contact_noon',
'contact_afternoon', 'contact_night',
'call_in_len', 'call_out_len', 'call_len',
'call_in_cnt', 'call_out_cnt', 'call_cnt',
'contact_1w', 'contact_1m', 'contact_3m', 'contact_3m_plus',
'phone_num', 'phone_num_loc', 'p_relation',
'contact_weekday', 'contact_weekend', 'contact_holiday']
# df = convert_ls_to_df(ls, cols)
df = convert_ls_to_df_better(ls, cols)
# df = pd.concat([cols, df], join='inner')
# basic info `contact_name`
# TODO TODO apply a sophisticated phone classifier or topic model to `contact_name`
c = collections.Counter(df.needs_type)
for k, v in c.items():
feat['{p}_cnt_needsType_{k}'.format(p=prefix, k=k)] = v
feat['{p}_ratioOfCnt_needsType_{k}'.format(p=prefix, k=k)] = wrapper_div(v, len(df))
c = collections.Counter(df.contact_all_day)
for k, v in c.items():
feat['{p}_cnt_contactAllDay_{k}'.format(p=prefix, k=str(k))] = v
feat['{p}_ratioOfCnt_contactAllDay_{k}'.format(p=prefix, k=str(k))] = wrapper_div(v, len(df))
c = collections.Counter(df.phone_num_loc)
for k, v in c.items():
feat['{p}_cnt_phoneNumLoc_{k}'.format(p=prefix, k=str(k))] = v
feat['{p}_ratioOfCnt_phoneNumLoc_{k}'.format(p=prefix, k=str(k))] = wrapper_div(v, len(df))
cols0 = ['contact_early_morning', 'contact_morning', 'contact_noon',
'contact_afternoon', 'contact_night']
cols1 = ['call_in_len', 'call_out_len', 'call_len', 'call_in_cnt', 'call_out_cnt', 'call_cnt']
cols2 = ['contact_1w', 'contact_1m', 'contact_3m', 'contact_3m_plus']
cols3 = ['contact_weekday', 'contact_weekend', 'contact_holiday']
for col in cols0 + cols1 + cols2 + cols3:
feat.update(apply_basic_metrics(list(df[col]), '{p}_{col}'.format(p=prefix, col=col)))
# apply emd metric, df-not-filtered
for cols in [cols0, cols1, cols2, cols3]:
for win_tup in itertools.combinations(cols, 2):
feat['{p}_EMD_{c1}_{c2}'.format(p=prefix, c1=win_tup[0], c2=win_tup[1])] = \
wrapper_emd(df[win_tup[0]], df[win_tup[1]])
# TODO TODO apply time window as filters
return feat
@staticmethod
def __scorpionaccessreport_behavior_check(ls):
feat = dict()
prefix = 'EI_sar_behaviorCheck'
for l in ls:
k = l.get('check_point')
v = l.get('score')
feat['{p}_{k}_score'.format(p=prefix, k=k)] = v
return feat
@staticmethod
def __scorpionaccessreport_user_info_check(ls):
feat = dict()
prefix = 'EI_sar_userInfoCheck'
ls0 = ls.get('check_search_info')
ls1 = ls.get('check_black_info')
ls0_keys = ['phone_with_other_idcards', 'phone_with_other_names', 'register_org_cnt', 'arised_open_web',
'searched_org_cnt', 'idcard_with_other_phones', 'searched_org_type', 'register_org_type',
'idcard_with_other_names']
for k in ls0_keys:
v = ls0.get(k)
v = len(v) if isinstance(v, list) else v
feat['{p}_{k}'.format(p=prefix, k=k)] = v
ls1_keys = ['contacts_class1_cnt', 'contacts_class1_blacklist_cnt', 'contacts_class2_blacklist_cnt',
'contacts_router_cnt', 'contacts_router_ratio', 'phone_gray_score']
for k in ls1_keys:
v = ls1.get(k)
v = 0 if v is None else v
feat['{p}_{k}'.format(p=prefix, k=k)] = v
return feat
def _feat_ei_scorpionaccessreport(self,extend_data):
feat = dict()
j = self.get_parsed_ei_column( 'scorpionaccessreport',extend_data)
# similar structure as scorpion access report, so map to scorpion features
if j is None:
j = self.get_parsed_ei_column('jxlaccessreport',extend_data)
if j is None:
j = self.get_parsed_ei_column( 'qh360accessreport',extend_data)
if j is None:
return feat
# print(j.get('deliver_address')) # empty
# print(j.get('collection_contact')) # empty
# j.get('ebusiness_expense') # empty
feat.update(self.__scorpionaccessreport_behavior(ls=j.get('cell_behavior')[0].get('behavior')))
# feat.update(self.__scorpionaccessreport_contact_region(ls=j.get('contact_region')))
feat.update(self.__scorpionaccessreport_appcheck(ls=j.get('application_check'), cutoff_time=self.cutoff_time))
feat.update(self.__scorpionaccessreport_trip_info(ls=j.get('trip_info')))
feat.update(self.__scorpionaccessreport_main_service(ls=j.get('main_service')))
feat.update(self.__scorpionaccessreport_contact_list(ls=j.get('contact_list'))) # Todo speedup
feat.update(self.__scorpionaccessreport_behavior_check(ls=j.get('behavior_check')))
feat.update(self.__scorpionaccessreport_user_info_check(ls=j.get('user_info_check')))
return feat
def _feat_ei_score(self,extend_data):
feat = dict()
prefix = 'EI_score'
j = self.get_parsed_ei_column( 'score',extend_data)
if j is None:
return feat
for l in j:
score_val = l.get('score')
name = l.get('name')
if isinstance(score_val, str):
score_val = missingValue if len(score_val) == 0 else score_val
feat['{p}_{name}'.format(p=prefix, name=name)] = score_val
return feat
def _feat_ei_tongdunguarddata(self, extend_data):
feat = dict()
prefix = 'EI_tongdunguarddata'
j = self.get_parsed_ei_column( 'tongdunguarddata',extend_data)
if j is None:
return feat
ks = ['rEazz31000011', 'rBazz01132010', 'rGcaz11000030',
'rEbzz39000011', 'rChzz03038030', 'rAbzz03030010', 'isHistory',
'rAcbz03009011', 'rEbzz25000020', 'rAbzz03101041', 'rEbzz21000011']
for k in ks:
v = self.__process_tongdungarddata(j.get(k))
feat['{p}_{k}_val0'.format(p=prefix, k=k)] = v[0]
feat['{p}_{k}_val1'.format(p=prefix, k=k)] = v[1]
return feat
@staticmethod
def __process_tongdungarddata(val):
v1 = missingValue
v2 = missingValue
if val:
val = val.replace('[', '').replace(')', '')
val = val.split(',')
if len(val) == 2:
v1 = wrapper_float(val[0])
v2 = wrapper_float(val[1])
return [v1, v2]
def _feat_ei_youmengdata(self, extend_data):
feat = dict()
j = self.get_parsed_ei_column( 'youmengdata',extend_data)
if j is None:
return feat
prefix = 'EI_youmengdata'
pre_keys = ['car', 'sns', 'finance', 'top', 'tail', 'appStability', 'property', 'travel', 'entertainment',
'service', 'education', 'woman', 'reading', 'tools', 'shopping', 'game', 'loan', 'health']
for d in ['180d', '90d', '7d']:
for pk in pre_keys:
key_val = pk+d
feat['{p}_{k}'.format(p=prefix, k=key_val)] = j.get(key_val)
return feat
def calc_the_group(self, extend_data):
# join with feature json and if not calculated, replace by missing value.
starttime = time.time()
all_feat_dict = dict()
all_feat_dict.update(self._feat_ei_tongdundata(extend_data))
# all_feat_dict.update(self._feat_ei_tanzhidata(extend_data))
all_feat_dict.update(self._feat_ei_nifadata(extend_data))
all_feat_dict.update(self._feat_ei_scorpionaccessreport(extend_data))
all_feat_dict.update(self._feat_ei_score(extend_data))
all_feat_dict.update(self._feat_ei_tongdunguarddata(extend_data))
# all_feat_dict.update(self._feat_ei_youmengdata(extend_data))
endtime = time.time()
print(' cost time: ', endtime - starttime)
return all_feat_dict
| [
"1635374633@qq.com"
] | 1635374633@qq.com |
3d234d7165a31b1417beb07a6b0b397816453f99 | 9b9a02657812ea0cb47db0ae411196f0e81c5152 | /repoData/facebook-real-time-proxy/allPythonContent.py | 170a2d11fb6f6b255ad362bc171f9c2f5fc07313 | [] | no_license | aCoffeeYin/pyreco | cb42db94a3a5fc134356c9a2a738a063d0898572 | 0ac6653219c2701c13c508c5c4fc9bc3437eea06 | refs/heads/master | 2020-12-14T14:10:05.763693 | 2016-06-27T05:15:15 | 2016-06-27T05:15:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42,277 | py | __FILENAME__ = apps
#
# Copyright 2010 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" A container for app-specific data and functionality."""
import threading
import logging
class App(object):
""" Manages Facebook Application-specific settings and policies
This class serves two purposes. First, it serves as a repository of
information about an application (such as the users we have seen for it
and the configuration settings for it). Second, it exposes two methods
which are used in making cache-eligibilty decisions in ProxyRequestHandler,
check_user and check_request. check_user adds the requestor to the app's
list of seen users, and then sees if the user whose data we're requesting
has been seen before (only users who we are sure have added an app will be
updated by realtime updates, so we only cache requests for those users'
data. check_request ensures that the request is only for data which is
part of the app's realtime update subscription, and is not blacklisted.
"""
def __init__(self, config):
self.id = config['app_id']
self.bad_fields = set()
self.bad_conns = set()
self.good_fields = set()
self.good_conns = set()
self.users = set()
self.lock = threading.Lock()
self.cred = config.get('app_cred')
self.secret = config.get('app_secret')
if 'blacklist_fields' in config:
self.bad_fields.update(config['blacklist_fields'])
if 'blacklist_connections' in config:
self.bad_conns.update(config['blacklist_connections'])
if 'whitelist_fields' in config:
self.good_fields = set(config['whitelist_fields'])
if 'whitelist_connections' in config:
self.good_conns = set(config['whitelist_connections'])
self.good_fields -= self.bad_fields
self.good_conns -= self.bad_conns
def check_user(self, requestor, requestee, default=None):
""" Check a request's users.
Adds the requestor to the known users for the app, and checks
if the requestee is a known user of the app. Also adds the user
to the default app, since we'll get updates for them.
"""
self.lock.acquire()
self.users.add(requestor)
ok = requestee in self.users
self.lock.release()
# if this isn't the default app, also add the user to the default app
if default != self and default != None:
default.check_user(requestor, requestee)
return ok
def check_request(self, pathparts, fields=None):
""" Returns whether a request is cacheable."""
if not fields:
fields = []
if len(pathparts) == 1: # this is a request for direct profile fields
if len(set(fields) - self.good_fields) == 0:
return True
logging.info('got fields ' + repr(fields) + ' but only '
+ repr(self.good_fields) + ' is ok')
elif len(pathparts) == 2: # this is a request for a connection
return pathparts[1] in self.good_conns
return False # safety: if we're not certain about it, fall back to
# passthrough behavior
def init(configapps):
""" Initializes the mapping of app ids to the App objects from config"""
apps = dict((str(x['app_id']), App(x)) for x in configapps)
if 'default' not in apps: # Add the default app if settings haven't been
# defined for it already.
default_app = App({'app_id': 'default'})
intersect = lambda x, y: x & y
default_app.good_fields = reduce(intersect, [x.good_fields for x
in apps.itervalues()])
default_app.good_conns = reduce(intersect, [x.good_conns for x in
apps.itervalues()])
apps['default'] = default_app
return apps
def get_app(app_id, app_set):
"""Look up the given app in the app_set, using the default if needed."""
if app_id in app_set:
return app_set[app_id]
if 'default' in app_set:
return app_set['default']
return None
########NEW FILE########
__FILENAME__ = cache
#
# Copyright 2010 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" This module simply contains the ProxyLruCache class."""
import urllib
import json
import threading
import logging
from fbproxy.lru import LRU
from fbproxy.requesthandler import ProxyRequestHandler
from fbproxy.hashdict import HashedDictionary
SCALAR_TABLE = 1
VECTOR_TABLE = 2
class ProxyLruCache(object):
"""Implement a cache for Facebook Graph API Requests.
This cache stores entries in a multi-tiered fashion. First requests are
indexed by the app and path (aka the part of the URL before the ?). At most
'size' such entries are maintained in an LRU cache. Underneath this, up to
`width` views of this URL are stored (again in an LRU). Finally, underneath
this is a mapping from access-token-less query strings to results.
This implementation can be replaced. The relevant functions to implement
are handle_request and invalidate.
"""
def __init__(self, size):
self.cache = LRU(size)
self.lock = threading.Lock()
def handle_request(self, query, path, querystring, app, server):
""" handle a cacheable request. returns (status, headers, data) tuple.
If it is found in the cache, just return the result directly from the
cache. Otherwise make a request to the graph api server and return the
result. If it is a 200 OK response, it gets saved in the cache, also.
"""
accesstoken_parts = None
accesstoken = None
if 'access_token' in query:
accesstoken = query['access_token'][0]
accesstoken_parts = ProxyRequestHandler.parse_access_token(
query['access_token'][0])
del query['access_token']
appid = accesstoken_parts[0] if accesstoken_parts else '0'
uid = accesstoken_parts[2] if accesstoken_parts else '0'
usetable = '/' not in path # use table for user directly
# usetable = False
fields = None
if 'fields' in query and usetable:
fields = query['fields'][0]
del query['fields']
key = path + "__" + appid
subkey = uid + "__" + urllib.urlencode(query)
value = None
hashdict = None
logging.debug('cache handling request with key ' + key +
', and subkey ' + subkey + ' for user ' + uid)
self.lock.acquire()
if key in self.cache:
# step 1. acquire the dictionary
hashdict = self.cache[key]
if subkey in hashdict: # step 2: grab the relevant data if there
value = hashdict[subkey]
else:
hashdict = HashedDictionary()
self.cache[key] = hashdict
self.lock.release()
if value: # step 3: return the data if available
if usetable:
(statusline, headers, table) = value
return (statusline, headers, get_response(table, fields))
else:
return value
# at this point, we have a cache miss
# step 4: fetch data
if usetable:
(statusline, headers, table, status) = _fetchtable(query,
path, accesstoken, app, hashdict, subkey, server)
# step 4.5: form a response body from the table
if status != 200:
# fetchtable returns body instead of table on error
body = table
else:
for header in headers:
if header[0].upper() == 'CONTENT-LENGTH':
headers.remove(header)
break
body = get_response(table, fields)
else:
(statusline, headers, body, status) = fetch_tuple(path,
querystring, server)
if status == 200:
hashdict[subkey] = ((statusline, headers, body), body)
return (statusline, headers, body)
def invalidate(self, appid, url):
""" Invalidate a URL in an application's context.
This removes all cache entries for the given applicaton and path.
"""
key = url + "__" + appid
logging.debug('invalidating' + key)
self.lock.acquire()
if key in self.cache:
del self.cache[key]
# also invalidate the URL for the null app
key = url + "__0"
if key in self.cache:
del self.cache[key]
self.lock.release()
def _response_to_table(body):
""" Takes a JSON response body and converts into a key-value store."""
table = {}
try:
bodyjson = json.loads(body)
for (key, value) in bodyjson.iteritems():
table[key] = value
except ValueError:
pass
return table
def get_response(table, fields):
""" Fetches the given fields from the table and returns it as JSON."""
ret = {}
if fields:
fieldlist = fields.split(',')
for field in fieldlist:
if field in table:
ret[field] = table[field]
else:
for key, value in table.iteritems():
if key[0] != '_':
ret[key] = value
return json.dumps(ret)
def _fetchtable(query, path, accesstoken, app, hashdict, key, server):
""" Fetches the requested object, returning it as a field-value table.
In addition, it will make use of the hash dict to avoid parsing the
body if possible (and store the response there as appropriate.
"""
fields = ','.join(app.good_fields)
query['fields'] = fields
query['access_token'] = accesstoken
(statusline, headers, data, statuscode) = fetch_tuple(path, \
urllib.urlencode(query), server)
# error = send the raw response instead of a table
if statuscode != 200:
return (statusline, headers, data, statuscode)
# hash miss = have to parse the file
elif not hashdict.contains_hash(data):
hashdict[key] = ((statusline, headers, _response_to_table(data)), data)
else: # statuscode == 200 and hashdict has the hash of the data
hashdict[key] = (None, data) # the stored data arg is ignored
# since the hash is in the dict
(statusline, headers, table) = hashdict[key]
return (statusline, headers, table, 200)
def fetch_tuple(path, querystring, server):
""" Fetches the requested object as (status, headers, body, status num)"""
response = ProxyRequestHandler.fetchurl('GET', path, querystring, server)
statusline = str(response.status) + " " + response.reason
headers = response.getheaders()
body = response.read()
response.close()
return (statusline, headers, body, response.status)
########NEW FILE########
__FILENAME__ = config
#
# Copyright 2010 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Central configuration location for the proxy.
The load function must be called before attempting to use this module."""
import imp
def load(cfgfile):
""" Loads the specified configuration into this module."""
local_config = imp.load_source('local_config', cfgfile)
mydict = globals()
for key in local_config.__dict__:
mydict[key] = local_config.__dict__[key]
########NEW FILE########
__FILENAME__ = hashdict
#
# Copyright 2010 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" This module contains the HashedDictionary class.
This is a smart dictionary which stores values that have identical hashes only
once, to save space.
"""
import hashlib
class HashedDictionary(object):
""" A smarter dictionary. Stores responses with identical body only once.
This dictionary stores (nonhashed_data, hashed_data) tuples, hashing by
body. The goal is to only store responses which are identical once. We
do this by mapping from the key to a hash of the response. From there,
we access the actual response in a second dictionary. Note that parts
of requests are significant, while others are not. Consumers are expected
to partition their data into nonhashed and hashed data for insertion and
retrieval.
"""
def __init__(self):
self.content = {}
self.keymap = {}
def __getitem__(self, key):
""" Fetch the tuple for the given key."""
if key in self.keymap:
valhash = self.keymap[key]
return self.content[valhash]
return None
def __setitem__(self, key, data):
""" Store the given response in the dictionary with the given key.
Takes values as (data, hashed_data). hashes hash_data, and then stores
data if that hash is unique. If that hash is not unique, then this will
point key at the existing entry with that hash.
"""
(stored_data, valhashed) = data
valhash = hashlib.sha1(valhashed).digest()
self.keymap[key] = valhash
if not valhash in self.content:
self.content[valhash] = stored_data
def __contains__(self, key):
return key in self.keymap
def contains_hash(self, valhashdata):
""" Determines if the data has a matching hash already in the dict."""
return hashlib.sha1(valhashdata).digest() in self.content
########NEW FILE########
__FILENAME__ = launcher
#
# Copyright 2010 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" main driver for the Facebook Graph API Proxy with Real-time Update support
All configuration is done by editing config.py. This file simply launches two
web servers. one for the realtime update endpoint, and one for the proxy
itself. The realtime endpoint needs to be accessible publically, while the
proxy endpoint should be accessible only from a small set of machines
(ideally the web servers that would otherwise be making direct Facebook Graph
API calls).
"""
import threading
import time
from cherrypy import wsgiserver
from fbproxy import config, apps
from fbproxy.requesthandler import ProxyRequestHandlerFactory
from fbproxy.cache import ProxyLruCache
from fbproxy.rtendpoint import RealtimeUpdateHandlerFactory
GRAPH_SERVER = "graph.facebook.com"
def launch(config_file):
""" Launch the Graph Proxy with the specified config_file."""
config.load(config_file)
cache = ProxyLruCache(config.cache_entries)
appdict = apps.init(config.apps)
request_handler_factory = ProxyRequestHandlerFactory(None,
cache, appdict, GRAPH_SERVER)
realtime_handler_factory = RealtimeUpdateHandlerFactory(cache, None,
appdict)
endpoint = "http://" + config.public_hostname + ":" + str(
config.realtime_port) + "/"
proxyserver = wsgiserver.CherryPyWSGIServer((config.proxy_interface,
config.proxy_port), request_handler_factory)
rtuserver = wsgiserver.CherryPyWSGIServer((config.realtime_interface,
config.realtime_port), realtime_handler_factory)
realtime_port_thread = threading.Thread(target=rtuserver.start)
realtime_port_thread.daemon = True
realtime_port_thread.start()
time.sleep(2) # give the server time to come up
realtime_handler_factory.register_apps(endpoint, GRAPH_SERVER)
try:
proxyserver.start()
except KeyboardInterrupt:
proxyserver.stop()
rtuserver.stop()
########NEW FILE########
__FILENAME__ = lru
#
# Copyright 2010 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" This module contains a simple LRU cache."""
class Node(object):
""" An LRU node storing a key-value pair."""
def __init__(self, key, value):
self.key = key
self.value = value
self.prev = None
self.successor = None
def remove(self):
""" Remove this node from the linked list."""
if self.prev:
self.prev.successor = self.successor
if self.successor:
self.successor.prev = self.prev
self.prev = None
self.successor = None
def setnext(self, next):
""" Move this node in the linked list (or insert it."""
self.successor = next
if next:
self.prev = next.prev
next.prev = self
if self.prev:
self.prev.successor = self
else:
self.prev = None
def __repr__(self):
return "(" + repr(self.key) + "," + repr(self.value) + ")"
class LRU(object):
""" A simple Least-recently-used cache.
This LRU cache functions by containing a linked list of nodes holding
key-value pairs, and a dictionary index into this linked list. Changes
to the size field will get reflected the next time the list's size
changes (whether by a new insert or a deletion).
"""
def __init__(self, size=10000):
self.count = 0
self.size = size
self.head = None
self.tail = None
self.index = {}
def __getitem__(self, key):
""" fetch an item from the list, and update it's access time."""
if key in self.index:
node = self.index[key]
node.remove()
node.setnext(self.head)
return self.index[key].value
return None
def __setitem__(self, key, value):
""" update a value or insert a new value. Also checks for fullness."""
node = None
if key in self.index:
node = self.index[key]
node.remove()
node.setnext(self.head)
self.head = node
node.value = value
else:
node = Node(key, value)
self.index[key] = node
if not self.head:
self.tail = node
node.setnext(self.head)
self.head = node
self.count += 1
self.checksize()
def __contains__(self, key):
""" existence check. This does NOT update the access time."""
return key in self.index
def __delitem__(self, key):
""" remove the item from the cache. does nothing if it not found."""
if key in self.index:
node = self.index[key]
if node == self.tail:
self.tail = node.prev
if node == self.head:
self.head = node.successor
del self.index[key]
self.count -= 1
node.remove()
self.checksize()
def checksize(self):
""" Prunes the LRU down to 'count' entries."""
print "checksize called. Current count is " + str(self.count) + " of " \
+ str(self.size)
while self.count > self.size:
node = self.tail
del self.index[node.key]
self.tail = node.prev
node.remove()
self.count -= 1
########NEW FILE########
__FILENAME__ = requesthandler
#
# Copyright 2010 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" WSGI application for the proxy endpoint."""
import httplib
import urlparse
import logging
USER_FIELDS = ['first_name', 'last_name', 'name', 'hometown', 'location',
'about', 'bio', 'relationship_status', 'significant_other',
'work', 'education', 'gender']
INVALIDATE_MAP = {'feed': ['statuses', 'feed', 'links'],
'links': ['feed', 'links']}
class ProxyRequestHandler(object):
""" WSGI application for handling a graph API request
This takes requests, and either passes them through to config.graph_server
or fulfills them from a cache. There are multiple reasons that a request
might not be eligible to be cached, though. Specifically, these are:
1. The request contains a field which is not enabled for realtime updates.
2. The request is not a GET request
3. The application has not seen a request from the targeted user before
(based on access_token). Note that this will never prevent caching
of a request for information about the current user. (see App.check_user
in AppStateHandler for details)
4. The request fails the application's check_request() verification.
5. The request is not for a user or a direct connection of user
6. A validator is present and the request fails its validation
For requests which are not GET requests, we also proactively invalidate
cache entries which are likely to be affected by such requests. See
ProxyLruCache for details about the caching strategy.
"""
def __init__(self, environ, start_response, validator, cache, appdict,
server):
self.start = start_response
self.env = environ
self.cache = cache
self.apps = appdict
self.server = server
# the following fields will be set in __iter__
self.uriparts = None
self.acctoken_pieces = None
self.query_parms = None
if validator:
self.validate = validator
def __iter__(self):
""" fulfills a graph API request."""
# parse the request
self.uriparts = self.env['PATH_INFO'].strip('/').split('/')
self.query_parms = urlparse.parse_qs(self.env['QUERY_STRING'])
app = None
if hasattr(self, 'validate'):
if not self.validate(self.env):
return self.forbidden()
# determine the viewer context and application, if access token exists
if 'access_token' in self.query_parms:
self.acctoken_pieces = self.parse_access_token(
self.query_parms['access_token'][0])
if self.acctoken_pieces:
app = self.apps[self.acctoken_pieces[0]] \
if self.acctoken_pieces[0] in self.apps \
else None
else:
app = self.apps['default'] if 'default' in self.apps \
else None
else:
self.acctoken_pieces = ['', '', '', '']
self.fixurl() # replace /me with the actual UID, to enable sane caching
self.env['PATH_INFO'] = '/'.join(self.uriparts)
# last chance to load an app to handle this
if not app and 'default' in self.apps:
app = self.apps['default']
if not app:
logging.info('bypassing cache due to missing application settings')
return self.pass_through() # app is missing from config, so don't
# cache
# non-GETs typically change the results of subsequent GETs. Thus we
# invalidate opportunistically.
if self.env['REQUEST_METHOD'] != 'GET':
self.invalidate_for_post(app)
return self.pass_through()
fields = USER_FIELDS # default fields if not specified
if 'fields' in self.query_parms:
fields = self.query_parms['fields'][0].split(',')
if not app.check_user(self.acctoken_pieces[2], self.uriparts[0],
self.apps.get('default')):
logging.info('bypassing cache since user not known to be app user')
return self.pass_through()
if self.cannotcache():
logging.info('bypassing cache because the URI is not cacheable')
return self.pass_through()
if not app.check_request(self.uriparts, fields):
logging.info('bypassing cache since the app rejected the request')
return self.pass_through()
if self.cache:
return self.do_cache(app, self.server)
else:
logging.warning('cache does not exist. passing request through')
return self.pass_through()
@staticmethod
def parse_access_token(acctok):
""" Split up an access_token into 4 parts.
This fails on non-user access tokens.
"""
try:
acctoken_firstsplit = acctok.split('-', 1)
acctoken_all = acctoken_firstsplit[0].split('|')
acctoken_all.extend(acctoken_firstsplit[1].split('|'))
if len(acctoken_all) != 4:
return False
return acctoken_all
except IndexError:
return False
@staticmethod
def fetchurl(reqtype, path, querystring, server):
""" fetch the requested object from the Facebook Graph API server."""
conn = httplib.HTTPSConnection(server)
conn.request(reqtype, path + "?" + querystring)
response = conn.getresponse()
return response
# connections which are known not to work with the Graph API.
# See http://developers.facebook.com/docs/api/realtime for details
connections_blacklist = ['home', 'tagged', 'posts', 'likes', 'photos', \
'albums', 'videos', 'groups', 'notes', 'events', 'inbox', 'outbox',
'updates']
def cannotcache(self):
""" A set of simple rules for ruling out some requests from caching."""
# rule 0: Only GET requests can be fetched.
# All others are assumed to have side effects
if self.env['REQUEST_METHOD'] != 'GET':
return True
# rule 1: Reject if the request is not realtime-enabled.
# Specifically, it must either be a request for an item directly, or
# for an object which is not a blacklisted connection of users
if len(self.uriparts) > 2:
return True
if len(self.uriparts) == 2:
if self.uriparts[1] in ProxyRequestHandler.connections_blacklist:
return True
return False
def fixurl(self):
""" Replace "me" with the user's actual UID."""
if self.uriparts[0].upper() == "ME":
if self.acctoken_pieces[2] != '':
self.uriparts[0] = self.acctoken_pieces[2]
def pass_through(self):
""" Satisfy a request by just proxying it to the Graph API server."""
response = self.fetchurl(self.env['REQUEST_METHOD'],
self.env['PATH_INFO'], self.env['QUERY_STRING'], self.server)
self.start(str(response.status) + " " +
response.reason, response.getheaders())
data = response.read()
response.close()
yield data
def do_cache(self, app, server):
""" Satisfy a request by passing it to the Cache."""
cached_response = self.cache.handle_request(self.query_parms,
self.env['PATH_INFO'], self.env['QUERY_STRING'], app, server)
self.start(cached_response[0], cached_response[1])
yield cached_response[2]
def forbidden(self):
self.start('403 Forbidden', [('Content-type', 'text/plain')])
yield "Failed to validate request\n"
def internal_error(self):
self.start('500 Internal Server Error',
[('Content-type', 'text/plain')])
yield "An internal error occurred\n"
def invalidate_for_post(self, app):
""" Invalidates possibly affected URLs after a non-GET.
The behavior of this is controlled by invalidate_map in config.py
"""
if len(self.uriparts) != 2:
return
if not self.uriparts[1] in INVALIDATE_MAP:
return
for field in INVALIDATE_MAP[self.uriparts[1]]:
logging.debug('invalidating ' + self.uriparts[0] + '/' + field)
self.cache.invalidate(app.id, "/" + self.uriparts[0] + "/" + field)
class ProxyRequestHandlerFactory(object):
""" factory for request handlers.
This is called by WSGI for each request. Note that this and any code
called by it can be running in multiple threads at once.
"""
def __init__(self, validator, cache, apps, server):
self.validator = validator
self.cache = cache
self.apps = apps
self.server = server
def __call__(self, environ, start_response):
return ProxyRequestHandler(environ, start_response,
self.validator, self.cache, self.apps, self.server)
########NEW FILE########
__FILENAME__ = rtendpoint
#
# Copyright 2010 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" WSGI application for realtime update handler endpoint."""
import json
import urlparse
import hmac
import hashlib
import logging
from fbproxy import rturegister
class RealtimeUpdateHandler(object):
""" WSGI application for handling a realtime update.
This responds to two types of requests: validation requests (GET), and
realtime updates (POST). For each user change entry in the update, if
at least one change is for a field directly on user, that user's entry is
invalidated. Any connections are invalidated one by one.
"""
def __init__(self, environ, start_response, validator, cache, apps):
self.start = start_response
self.env = environ
self.cache = cache
self.apps = apps
if validator:
self.validate = validator
def __iter__(self):
if self.env['REQUEST_METHOD'] == 'GET':
return self.handle_validate()
elif self.env['REQUEST_METHOD'] == 'POST':
return self.handle_update()
else:
return self.forbidden()
def bad_request(self, message=None):
self.start('400 Bad Request', [('Content-type', 'text/plain')])
if not message:
yield "This is not a valid update"
else:
yield message
def forbidden(self):
self.start('403 Forbidden', [('Content-type', 'text/plain')])
yield "Request validation failed"
def not_found(self):
self.start('404 Not Found', [('Content-type', 'text/plain')])
yield "The requested application was not found on this server"
def handle_validate(self):
""" Performs Realtime Update endpoint validation.
See http://developers.facebook.com/docs/api/realtime for details.
"""
req_data = urlparse.parse_qs(self.env['QUERY_STRING'])
logging.info('Validating subscription')
if not 'hub.mode' in req_data or req_data['hub.mode'][0] != 'subscribe':
return self.bad_request('expecting hub.mode')
if not 'hub.verify_token' in req_data or \
req_data['hub.verify_token'][0] == rturegister.randtoken:
return self.forbidden()
if not 'hub.challenge' in req_data:
return self.bad_request('Missing challenge')
return self.success(req_data['hub.challenge'][0])
def handle_update(self):
""" Respond to a Realtime Update POST.
The APPID for which the update is performed is the path portion of the
URL. This simply loops over every 'entry' in the update JSON and
passes them off to the cache to invalidate.
"""
app_id = self.env['PATH_INFO'][1:]
app = self.apps.get(app_id)
if not app:
return self.not_found()
if not 'CONTENT_LENGTH' in self.env:
return self.bad_request('Missing content length')
data = self.env['wsgi.input'].read(int(self.env['CONTENT_LENGTH']))
sig = self.env.get('HTTP_X_HUB_SIGNATURE')
if sig == None or sig == '':
logging.info('received request with missing signature')
return self.forbidden()
if sig.startswith('sha1='):
sig = sig[5:]
if app.secret != None:
hash = hmac.new(app.secret, data, hashlib.sha1)
expected_sig = hash.hexdigest()
if sig != expected_sig:
logging.warn('Received request with invalid signature')
logging.warn('sig is ' + sig)
logging.warn('expected ' + expected_sig)
logging.warn('key is ' + app.secret)
logging.warn('data is ' + data)
return self.bad_request('Invalid signature.')
try:
updates = json.loads(data)
except ValueError:
return self.bad_request('Expected JSON.')
logging.info('received a realtime update')
try: # loop over all entries in the update message
for entry in updates['entry']:
uid = entry['uid']
if len(app.good_fields.intersection(
entry['changed_fields'])) > 0:
self.cache.invalidate(app_id, uid)
conns = app.good_conns.intersection(entry['changed_fields'])
for conn in conns:
self.cache.invalidate(app_id, uid + "/" + conn)
except KeyError:
return self.bad_request('Missing fields caused key error')
return self.success('Updates successfully handled')
def success(self, message):
self.start('200 OK', [('Content-type', 'text/plain')])
yield message
class RealtimeUpdateHandlerFactory:
""" Creates RealtimeUpdateHandlers for the given cache and app dictionary.
"""
def __init__(self, cache, validator, appdict):
self.cache = cache
self.validator = validator
self.appdict = appdict
def register_apps(self, endpoint, server):
""" Registers applications for realtime updates.
This method must be called AFTER the realtime update endpoint is
ready to accept connections. This means that the realtime update
endpoint should probably be run on a different thread.
"""
for app in self.appdict.itervalues():
rturegister.register(app, endpoint + app.id, server)
def __call__(self, environ, start_response):
return RealtimeUpdateHandler(environ, start_response,
self.validator, self.cache, self.appdict)
########NEW FILE########
__FILENAME__ = rturegister
#
# Copyright 2010 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Module enabling registration for realtime updates.
The most commonly used method is register, which takes the endpoint URL
and the app object, and registers for realtime updates if either the app's
cred or secret is available and valid.
"""
import httplib
import urllib
import random
randtoken = 0
def register_with_secret(appid, secret, fields, callback, server):
""" Register the given application for realtime updates.
Creates a subscription for user fields for the given application
at the specified callback URL. This method takes the application secret
as the second argument. Only one of register_with_secret and
register_with_token needs to be called. In most cases, this should be
taken care of by register().
"""
token = appid + '|' + secret
return register_with_token(appid, token, fields, callback, server)
def register_with_token(appid, token, fields, callback, server):
""" Register the given application for realtime updates.
Creates a subscription for user fields for the given application
at the specified callback URL. This method takes an application's client
credential access token as the second argument. Only one of
register_with_secret and register_with_token needs to be called. In most
cases, this should be taken care of by register().
"""
fieldstr = ",".join(fields)
headers = {'Content-type': 'applocation/x-www-form-urlencoded'}
# use a random number as our verification token
global randtoken
if not randtoken:
randtoken = random.randint(1, 1000000000)
# make a POST to the graph API to register the endpoint
postfields = {'object': 'user',
'fields': fieldstr,
'callback_url': callback,
'verify_token': randtoken}
conn = httplib.HTTPSConnection(server)
conn.request('POST', appid + '/subscriptions?access_token=' + token,
urllib.urlencode(postfields), headers)
response = conn.getresponse()
if response.status == 200:
return True
else:
print 'Error subscribing: graph server\'s response follows'
print str(response.status) + " " + response.reason
data = response.read()
print data
return False
def register(app, callback, server):
""" Registers the given App, if possible.
For registration to be possible, at least one of app.cred or app.secret
must be defined.
"""
subscribefields = app.good_fields | app.good_conns
if app.cred:
register_with_token(app.id, app.cred, subscribefields, callback, server)
elif app.secret:
register_with_secret(app.id, app.secret, subscribefields, callback,
server)
########NEW FILE########
| [
"dyangUCI@github.com"
] | dyangUCI@github.com |
baa23174ec0a53364cec169399118e44e0da551e | 5c668379197a236d3a961dbba32aba606d661111 | /chapter13/brother1.py | 5ddb4a359161e1d5221298537cb7ab4a76b44356 | [] | no_license | RobbiNespu/wargames.unix-heaven.org | a7cf1afbc7f5983a85638c0a63dfd5764fd74b4e | 6ad914d020f696bf6148bf33d66de72aaf001589 | refs/heads/master | 2021-04-05T23:27:17.027998 | 2015-06-26T14:38:28 | 2015-06-26T14:38:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,357 | py | #!/usr/bin/env python
# code taken from the examples from the Python documentation on SocketServer
# daemon1 returns base64'd message, which is xxd'ed
import SocketServer
class MyTCPHandler(SocketServer.BaseRequestHandler):
"""
The RequestHandler class for our server.
It is instantiated once per connection to the server, and must
override the handle() method to implement communication to the
client.
"""
def handle(self):
data = """
0000000: 5157 6773 4947 4675 6233 526f 5a58 4967 QWgsIGFub3RoZXIg
0000010: 6333 5279 5957 356e 5a58 4967 6347 467a c3RyYW5nZXIgcGFz
0000020: 6332 6c75 5a79 4269 6553 454b 436c 6c76 c2luZyBieSEKCllv
0000030: 6453 4268 636d 5567 6247 3976 6132 6c75 dSBhcmUgbG9va2lu
0000040: 5a79 426d 6233 4967 6447 686c 0a49 484e ZyBmb3IgdGhl.IHN
0000050: 6c59 334a 6c64 4342 725a 586b 7349 4746 lY3JldCBrZXksIGF
0000060: 795a 5734 6e64 4342 3562 3355 6763 3352 yZW4ndCB5b3Ugc3R
0000070: 7959 5735 6e5a 5849 2f43 6770 4a4a 3230 yYW5nZXI/CgpJJ20
0000080: 6763 3239 7963 6e6b 7349 474a 3164 4342 gc29ycnksIGJ1dCB
0000090: 4a49 474e 6862 6d35 760a 6443 426f 5a57 JIGNhbm5v.dCBoZW
00000a0: 7877 4948 6c76 6453 3475 4c67 6f4b 5432 xwIHlvdS4uLgoKT2
00000b0: 3573 6553 4276 626d 5567 6232 5967 5957 5seSBvbmUgb2YgYW
00000c0: 7873 4948 567a 4948 526f 636d 566c 4947 xsIHVzIHRocmVlIG
00000d0: 6876 6247 527a 4948 526f 5a53 4272 5a58 hvbGRzIHRoZSBrZX
00000e0: 6b75 4c69 344b 0a43 6c6c 7664 5342 755a kuLi4K.CllvdSBuZ
00000f0: 5756 6b49 4852 7649 475a 7062 6d51 6762 WVkIHRvIGZpbmQgb
0000100: 586b 6759 6e4a 7664 4768 6c63 6977 6761 XkgYnJvdGhlciwga
0000110: 4755 6764 326c 7362 4342 6f5a 5778 7749 GUgd2lsbCBoZWxwI
0000120: 486c 7664 5345 4b43 6b35 7664 7942 4a49 HlvdSEKCk5vdyBJI
0000130: 4735 6c0a 5a57 5167 6447 3867 636d 567a G5l.ZWQgdG8gcmVz
0000140: 6443 7767 5a32 3976 5a47 4a35 5a53 427a dCwgZ29vZGJ5ZSBz
0000150: 6448 4a68 626d 646c 6369 454b 4367 3d3d dHJhbmdlciEKCg==
0000160: 0a .
"""
self.request.sendall(data)
if __name__ == "__main__":
HOST, PORT = "localhost", 31123
# Create the server, binding to localhost on port 31123
server = SocketServer.TCPServer((HOST, PORT), MyTCPHandler)
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
server.serve_forever()
| [
"dnaeon@gmail.com"
] | dnaeon@gmail.com |
bab2a8a108c7ebf93bc6405b7b31741a934d2815 | 54147931d6e9eecb797c4d3665337a6c2b27160c | /chapter2_string_text/2_2.py | c5d96d4ab2ab814774508068c66f69d48702b497 | [] | no_license | miniyk2012/python_cookbook | 1fefe857f22d1145e7335c15c45e8b12356d7a49 | c01daee2d5349ea018a0a8768be7b011df70b566 | refs/heads/master | 2020-06-10T19:53:45.749970 | 2017-01-16T15:23:51 | 2017-01-16T15:23:51 | 75,891,236 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | # -*- coding:utf-8 -*-
import logging
_logger = logging.getLogger(__name__)
url = 'http://www.python.org'
print(url.startswith('http:'))
import os
filenames = os.listdir('../')
print(filenames)
print([name for name in filenames if name.endswith(('algorithm', '.md'))])
print(any(name.endswith('.idea') for name in filenames))
choices = ['http:', 'ftp:']
url = 'http://www.python.org'
print(url.startswith(tuple(choices)))
import re
url = 'http://www.python.org'
print(bool(re.match('http:|https:|ftp:', url)))
print(url.startswith(('http:', 'https:', 'ftp:')))
| [
"yangk@ersoft.cn"
] | yangk@ersoft.cn |
0b14592d0ba3665b7175831dfac9b89160af3f0e | 414239752b2bfc4cb3a947474f2662af7588b6eb | / protobufeditor/Tests/MarathonTests/TestCases/ProtoSearch/ProtoSearch6.py | 53ffee71fcdf81ff114b1db15c02e1c24c9934ad | [] | no_license | dtracers/protobufeditor | 8c7f9671c3b3a7d1cd3094321d030f6e6afcc7e8 | b65d06bce93165eebf9798c533e2447a5992d384 | refs/heads/master | 2020-12-24T19:28:22.486207 | 2016-04-19T23:41:57 | 2016-04-19T23:41:57 | 56,340,712 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 870 | py | useFixture(default)
def test():
from Modules import commonBits
java_recorded_version = '1.6.0_22'
if window('Protocol Buffer Editor'):
select('FileChooser', commonBits.sampleDir() + 'Ams_LocDownload_20041228.bin')
click('Proto Search')
assert_p('Table1', 'Content', r'[[' + commonBits.stdCopybookDir() + 'Ams_Location.proto, Ams_Location.proto, Locations], [' + commonBits.stdCopybookDir() + 'Ams_Location.protocomp, Ams_Location.proto, Locations]]')
click('BasicInternalFrameTitlePane$NoFocusButton2')
assert_p('FileChooser1', 'Text', commonBits.stdCopybookDir() + 'Ams_Location.protocomp')
assert_p('ComboBox2', 'Text', 'Ams_Location.proto')
assert_p('ComboBox3', 'Content', '[[Locations]]')
assert_p('ComboBox3', 'Text', 'Locations')
assert_p('ComboBox1', 'Text', 'Compiled Proto')
assert_p('ComboBox', 'Text', 'Delimited Messages')
close()
| [
"bm_tas@yahoo.com.au"
] | bm_tas@yahoo.com.au |
56066d9682621cee113110a61f8dfb2d3fa90623 | a6e13bbb722b9eacf4e8d25f9a44cf01a41eb99b | /arelle/plugin/validate/EFM/__init__.py | df968023841965dd2aefe8453f89e4d1a83cc428 | [
"Apache-2.0"
] | permissive | michalczubek/Arelle | b13771c23c6d90ca79be646057c944b91629b2b3 | be0dca986e5451902092b6dfd43a1079ebecb22a | refs/heads/master | 2022-04-23T17:09:13.075773 | 2020-04-23T00:43:23 | 2020-04-23T00:43:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,223 | py | '''
Created on Dec 12, 2013
@author: Mark V Systems Limited
(c) Copyright 2013 Mark V Systems Limited, All rights reserved.
Input file parameters may be in JSON (without newlines for pretty printing as below):
[ {# current fields in JSON structure from Arelle Wrapper, per instance
"file": "file path to instance or html",
"cik": "1234567890",
"cikNameList": { "cik1": "name1", "cik2":"name2", "cik3":"name3"...},
"submissionType" : "SDR-A",
"exhibitType": "EX-99.K",
"itemsList": [] # array of items, e.g. ["5.03"] (either array of strings blank-separated items in string)
"accessionNumber":"0001125840-15-000159" ,
# new fields
"periodOfReport": "mm-dd-yyyy",
"entityRegistration.fyEnd": "mm/dd", # the FY End value from entity (CIK) registration
"entity.repFileNum": file number from entity (CIK) registration
"submissionHeader.fyEnd": "mm/dd", # the FY End value from submission header
"voluntaryFilerFlag": true/false, # JSON Boolean, string Yes/No, yes/no, Y/N, y/n or absent
"wellKnownSeasonedIssuerFlag": true/false, # JSON Boolean, string Yes/No, yes/no, Y/N, y/n or absent
"shellCompanyFlag": true/false, true/false, # JSON Boolean, string Yes/No, yes/no, Y/N, y/n or absent
"acceleratedFilerStatus": true/false, # JSON Boolean, string Yes/No, yes/no, Y/N, y/n or absent
"smallBusinessFlag": true/false, # JSON Boolean, string Yes/No, yes/no, Y/N, y/n or absent
"emergingGrowthCompanyFlag": true/false, # JSON Boolean, string Yes/No, yes/no, Y/N, y/n or absent
"exTransitionPeriodFlag": true/false, # JSON Boolean, string Yes/No, yes/no, Y/N, y/n or absent
# filer - use "cik" above
"invCompanyType": "N-1A" # from table of investment company types
"rptIncludeAllSeriesFlag": true/false, # JSON Boolean, string Yes/No, yes/no, Y/N, y/n or absent
"rptSeriesClassInfo.seriesIds": ["S0000990666", ...] # list of EDGAR seriesId values
"newClass2.seriesIds": [] # //seriesId xpath result on submission headers
},
{"file": "file 2"...
]
For test case operation, the above fields accepted from testcase variation:
<data>
<conf:parameter name="cikName" datatype="xs:string" value="cik1:name1" xmlns="" xmlns:conf="http://edgar/2009/conformance" />
<conf:parameter name="cikName" datatype="xs:string" value="cik2:name2" xmlns="" xmlns:conf="http://edgar/2009/conformance" />
<conf:parameter name="cikName" datatype="xs:string" value="cik3:name3" xmlns="" xmlns:conf="http://edgar/2009/conformance" />
<conf:parameter name="submissionType" datatype="xs:string" value="8-K" xmlns="" xmlns:conf="http://edgar/2009/conformance" />
<conf:parameter name="periodOfReport" datatype="xs:string" value="12-31-2017" xmlns="" xmlns:conf="http://edgar/2009/conformance" />
<conf:parameter name="voluntaryFilerFlag" datatype="xs:boolean" value="true" xmlns="" xmlns:conf="http://edgar/2009/conformance" />
<conf:parameter name="coregCikFileNumber" datatype="xs:string" value="cik1:fileNbr1" xmlns="" xmlns:conf="http://edgar/2009/conformance" />
<conf:parameter name="coregCikFileNumber" datatype="xs:string" value="cik2:fileNbr2" xmlns="" xmlns:conf="http://edgar/2009/conformance" />
<conf:parameter name="coregCikFileNumber" datatype="xs:string" value="cik3:fileNbr3" xmlns="" xmlns:conf="http://edgar/2009/conformance" />
<conf:parameter name="sroId" datatype="xs:string" value="NASD" xmlns="" xmlns:conf="http://edgar/2009/conformance" />
<conf:parameter name="sroId" datatype="xs:string" value="NYSE" xmlns="" xmlns:conf="http://edgar/2009/conformance" />
...
<instance readMeFirst="true">e9999999ng-20081231.xml</instance>
<data>
(Accession number is only needed for those EdgarRenderer output transformations of
FilingSummary.xml which require it as a parameter (such as EDGAR's internal workstations,
which have a database that requires accession number as part of the query string to retrieve
a file of a submission.)
On Windows, the input file argument must be specially quoted if passed in via Java
due to a Java bug on Windows shell interface (without the newlines for pretty printing below):
"[{\"file\":\"z:\\Documents\\dir\\gpc_gd1-20130930.htm\",
\"cik\": \"0000350001\",
\"cikNameList\": {\"0000350001\":\"BIG FUND TRUST CO\"},
\"submissionType\":\"SDR-A\", \"exhibitType\":\"EX-99.K SDR.INS\"}]"
To build cached deprecated concepts files (requires internet access):
First delete any resources/*deprecated-concept.json which you want to rebuild
arelleCmdLine --plugin validate/EFM --build-deprecated-concepts-file
In GUI mode please use formula parameters dialog to emulate the above. The parameters are named as above (with no prefix), and
an additional EdgarRenderer parameters:
noLogsInSummary or includeLogsInSummary (default) (this parameter does not need a value, just presence)
summaryXslt (use EdgarWorkstationSummarize.xslt to emulate EDGAR workstation)
reportXslt (use EdgarWorkstationInstanceReport.xslt to emulate EDGAR workstation)
The parameters with array values are entered to the GUI as blank-separated strings (no quotes):
itemsList could be 5.03 6.99
rptSeriesClassInfo.seriesIds could be S0000990666 S0000990777 S0000990888
'''
import os, io, json, zipfile, logging
jsonIndent = 1 # None for most compact, 0 for left aligned
from decimal import Decimal
from lxml.etree import XML, XMLSyntaxError
from arelle import ModelDocument, ModelValue, XmlUtil, FileSource
from arelle.ModelDocument import Type
from arelle.ModelValue import qname
from arelle.PluginManager import pluginClassMethods # , pluginMethodsForClasses, modulePluginInfos
from arelle.PythonUtil import flattenSequence
from arelle.UrlUtil import authority, relativeUri
from arelle.ValidateFilingText import referencedFiles
from .Document import checkDTSdocument
from .Filing import validateFiling
try:
import regex as re
except ImportError:
import re
from collections import defaultdict
def dislosureSystemTypes(disclosureSystem, *args, **kwargs):
# return ((disclosure system name, variable name), ...)
return (("EFM", "EFMplugin"),)
def disclosureSystemConfigURL(disclosureSystem, *args, **kwargs):
return os.path.join(os.path.dirname(__file__), "config.xml")
def validateXbrlStart(val, parameters=None, *args, **kwargs):
val.validateEFMplugin = val.validateDisclosureSystem and getattr(val.disclosureSystem, "EFMplugin", False)
if not (val.validateEFMplugin):
return
val.params = {}
parameterNames = ("CIK", "cik", "cikList", "cikNameList", "submissionType", "exhibitType", # CIK or cik both allowed
"itemsList", "accessionNumber", "entity.repFileNum",
"periodOfReport", "entityRegistration.fyEnd", "submissionHeader.fyEnd", "voluntaryFilerFlag",
"wellKnownSeasonedIssuerFlag", "shellCompanyFlag", "acceleratedFilerStatus", "smallBusinessFlag",
"emergingGrowthCompanyFlag", "exTransitionPeriodFlag", "invCompanyType",
"rptIncludeAllSeriesFlag", "rptSeriesClassInfo.seriesIds", "newClass2.seriesIds")
parameterEisFileTags = {
"cik":["depositorId", "cik", "filerId"],
"submissionType": "submissionType",
"itemsList": "item",
"periodOfReport": "periodOfReport",
#"headerFyEnd": ?,
#"voluntaryFilerFlag": ?,
"wellKnownSeasonedIssuerFlag": "wellKnownSeasonedIssuerFlag",
#"shellCompanyFlag": ?,
"acceleratedFilerStatus": "acceleratedFilerStatus",
"smallBusinessFlag": "smallBusinessFlag",
"emergingGrowthCompanyFlag": "emergingGrowthCompanyFlag",
"exTransitionPeriodFlag": "exTransitionPeriodFlag",
"invCompanyType": "invCompany",
#"rptIncludeAllSeriesFlag": ?,
#"rptSeriesClassInfo.seriesIds": ?,
#"newClass2.seriesIds": ?
}
# retrieve any EIS file parameters first
if val.modelXbrl.fileSource and val.modelXbrl.fileSource.isEis and hasattr(val.modelXbrl.fileSource, "eisDocument"):
eisDoc = val.modelXbrl.fileSource.eisDocument
for paramName, eisEltNames in parameterEisFileTags.items():
paramQName = ModelValue.qname(paramName,noPrefixIsNoNamespace=True)
for eisElt in eisDoc.iter(*("{*}"+e for e in flattenSequence(eisEltNames))):
if paramName in ("itemsList",):
parameters.setdefault(paramQName, []).append(eisElt.text)
else:
parameters[paramQName] = eisElt.text
if parameters: # parameter-provided CIKs and registrant names
for paramName in parameterNames:
p = parameters.get(ModelValue.qname(paramName,noPrefixIsNoNamespace=True))
if p and len(p) == 2 and p[1] not in ("null", "None", None):
v = p[1] # formula dialog and cmd line formula parameters may need type conversion
if isinstance(v, str):
if paramName in {"voluntaryFilerFlag", "wellKnownSeasonedIssuerFlag", "shellCompanyFlag", "acceleratedFilerStatus",
"smallBusinessFlag", "emergingGrowthCompanyFlag", "exTransitionPeriodFlag", "rptIncludeAllSeriesFlag"}:
v = {"true":True, "false":False}.get(v)
elif paramName in {"itemsList", "rptSeriesClassInfo.seriesIds", "newClass2.seriesIds"}:
v = v.split()
val.params[paramName] = v
if "CIK" in val.params: # change to lower case key
val.params["cik"] = val.params["CIK"]
del val.params["CIK"]
for paramName, p in parameters.items(): # allow ELOparams to be in any namespace (no xmlns="" required)
if paramName and paramName.localName == "ELOparams" and len(p) == 2 and p[1] not in ("null", "None", None):
try:
for key, value in json.loads(p[1]).items():
val.params[{"CIK":"cik"}.get(key,key)] = value # change upper case CIK to lower case
except (ValueError, AttributeError, TypeError):
val.modelXbrl.error("arelle.testcaseVariationParameters",
_("parameter ELOparams has malformed JSON %(json)s object"),
modelXbrl=val.modelXbrl, json=p[1][:100])
break
# parameters may also come from report entryPoint (such as exhibitType for SDR)
if hasattr(val.modelXbrl.modelManager, "efmFiling"):
efmFiling = val.modelXbrl.modelManager.efmFiling
if efmFiling.reports: # possible that there are no reports
entryPoint = efmFiling.reports[-1].entryPoint
for paramName in parameterNames: # cik is lower case here
if paramName in entryPoint and entryPoint[paramName] not in (None, ""):
val.params[paramName] = entryPoint[paramName] # if not set uses prior value
# exhibitType may be an attachmentType, if so remove ".INS"
if val.params.get("exhibitType", "").endswith(".INS"):
val.params["exhibitType"] = val.params["exhibitType"][:-4]
if isinstance(val.params.get("cikNameList", None), str):
# cik1, cik2, cik3 in cikList and name1|Edgar|name2|Edgar|name3 in cikNameList strings
_filerIdentifiers = val.params["cikList"].split(",") if "cikList" in val.params else []
_filerNames = val.params["cikNameList"].split("|Edgar|") if "cikNameList" in val.params else []
if _filerIdentifiers:
if len(_filerNames) not in (0, len(_filerIdentifiers)):
val.modelXbrl.error(("EFM.6.05.24.parameters", "GFM.3.02.02"),
_("parameters for cikList and cikNameList different list entry counts: %(cikList)s, %(cikNameList)s"),
modelXbrl=val.modelXbrl, cikList=_filerIdentifiers, cikNameList=_filerNames)
if _filerNames:
val.params["cikNameList"]=dict((_cik,_filerNames[i] if i < len(_filerNames) else None)
for i, _cik in enumerate(_filerIdentifiers))
else:
val.params["cikNameList"]=dict((_cik,None) for _cik in _filerIdentifiers)
del val.params["cikList"]
elif _filerNames:
val.modelXbrl.error(("EFM.6.05.24.parameters", "GFM.3.02.02"),
_("parameters for cikNameList provided but missing corresponding cikList: %(cikNameList)s"),
modelXbrl=val.modelXbrl, cikNameList=_filerNames)
del val.params["cikNameList"] # can't process without cik's as keys
if val.params.get("exhibitType", "") == "EX-2.01": # only applicable for edgar production and parameterized testcases
val.EFM60303 = "EFM.6.23.01"
else:
val.EFM60303 = "EFM.6.03.03"
if any((concept.qname.namespaceURI in val.disclosureSystem.standardTaxonomiesDict and concept.modelDocument.inDTS)
for concept in val.modelXbrl.nameConcepts.get("UTR",())):
val.validateUTR = True
modelManager = val.modelXbrl.modelManager
if hasattr(modelManager, "efmFiling"):
efmFiling = modelManager.efmFiling
efmFiling.submissionType = val.params.get("submissionType")
def validateXbrlFinally(val, *args, **kwargs):
if not (val.validateEFMplugin):
return
modelXbrl = val.modelXbrl
_statusMsg = _("validating {0} filing rules").format(val.disclosureSystem.name)
modelXbrl.profileActivity()
modelXbrl.modelManager.showStatus(_statusMsg)
validateFiling(val, modelXbrl, isEFM=True)
modelXbrl.profileActivity(_statusMsg, minTimeToShow=0.0)
modelXbrl.modelManager.showStatus(None)
def validateXbrlDtsDocument(val, modelDocument, isFilingDocument, *args, **kwargs):
if not (val.validateEFMplugin):
return
checkDTSdocument(val, modelDocument, isFilingDocument)
def filingStart(cntlr, options, filesource, entrypointFiles, sourceZipStream=None, responseZipStream=None, *args, **kwargs):
modelManager = cntlr.modelManager
# cntlr.addToLog("TRACE EFM filing start val={} plugin={}".format(modelManager.validateDisclosureSystem, getattr(modelManager.disclosureSystem, "EFMplugin", False)))
if modelManager.validateDisclosureSystem and getattr(modelManager.disclosureSystem, "EFMplugin", False):
# cntlr.addToLog("TRACE EFM filing start 2 classes={} moduleInfos={}".format(pluginMethodsForClasses, modulePluginInfos))
modelManager.efmFiling = Filing(cntlr, options, filesource, entrypointFiles, sourceZipStream, responseZipStream)
# this event is called for filings (of instances) as well as test cases, for test case it just keeps options accessible
for pluginXbrlMethod in pluginClassMethods("EdgarRenderer.Filing.Start"):
pluginXbrlMethod(cntlr, options, entrypointFiles, modelManager.efmFiling)
# check if any entrypointFiles have an encryption is specified
if isinstance(entrypointFiles, list):
for pluginXbrlMethod in pluginClassMethods("Security.Crypt.Filing.Start"):
pluginXbrlMethod(modelManager.efmFiling, options, filesource, entrypointFiles, sourceZipStream)
def guiTestcasesStart(cntlr, modelXbrl, *args, **kwargs):
modelManager = cntlr.modelManager
if (cntlr.hasGui and modelXbrl.modelDocument.type in Type.TESTCASETYPES and
modelManager.validateDisclosureSystem and getattr(modelManager.disclosureSystem, "EFMplugin", False)):
modelManager.efmFiling = Filing(cntlr)
def testcasesStart(cntlr, options, modelXbrl, *args, **kwargs):
# a test or RSS cases run is starting, in which case testcaseVariation... events have unique efmFilings
modelManager = cntlr.modelManager
if (hasattr(modelManager, "efmFiling") and
modelXbrl.modelDocument.type in Type.TESTCASETYPES):
efmFiling = modelManager.efmFiling
efmFiling.close() # not needed, dereference
del modelManager.efmFiling
if not hasattr(modelXbrl, "efmOptions") and options: # may have already been set by EdgarRenderer in gui startup
modelXbrl.efmOptions = options # save options in testcase's modelXbrl
def xbrlLoaded(cntlr, options, modelXbrl, entryPoint, *args, **kwargs):
# cntlr.addToLog("TRACE EFM xbrl loaded")
modelManager = cntlr.modelManager
if hasattr(modelManager, "efmFiling"):
if modelXbrl.modelDocument.type in (Type.INSTANCE, Type.INLINEXBRL, Type.INLINEXBRLDOCUMENTSET):
efmFiling = modelManager.efmFiling
efmFiling.addReport(modelXbrl)
_report = efmFiling.reports[-1]
_report.entryPoint = entryPoint
if "accessionNumber" in entryPoint and not hasattr(efmFiling, "accessionNumber"):
efmFiling.accessionNumber = entryPoint["accessionNumber"]
if "exhibitType" in entryPoint and not hasattr(_report, "exhibitType"):
_report.exhibitType = entryPoint["exhibitType"]
efmFiling.arelleUnitTests = modelXbrl.arelleUnitTests.copy() # allow unit tests to be used after instance processing finished
elif modelXbrl.modelDocument.type == Type.RSSFEED:
testcasesStart(cntlr, options, modelXbrl)
def xbrlRun(cntlr, options, modelXbrl, *args, **kwargs):
# cntlr.addToLog("TRACE EFM xbrl run")
modelManager = cntlr.modelManager
if (hasattr(modelManager, "efmFiling") and
modelXbrl.modelDocument.type in (Type.INSTANCE, Type.INLINEXBRL, Type.INLINEXBRLDOCUMENTSET)):
efmFiling = modelManager.efmFiling
_report = efmFiling.reports[-1]
if True: # HF TESTING: not (options.abortOnMajorError and len(modelXbrl.errors) > 0):
for pluginXbrlMethod in pluginClassMethods("EdgarRenderer.Xbrl.Run"):
pluginXbrlMethod(cntlr, options, modelXbrl, modelManager.efmFiling, _report)
def filingValidate(cntlr, options, filesource, entrypointFiles, sourceZipStream=None, responseZipStream=None, *args, **kwargs):
# cntlr.addToLog("TRACE EFM xbrl validate")
modelManager = cntlr.modelManager
if hasattr(modelManager, "efmFiling"):
efmFiling = modelManager.efmFiling
reports = efmFiling.reports
# check for dup inline and regular instances
# SDR checks
if any(report.documentType and report.documentType.endswith(" SDR")
for report in reports):
_kSdrs = [r for r in reports if r.documentType == "K SDR"]
if not _kSdrs and efmFiling.submissionType in ("SDR", "SDR-A"):
efmFiling.error("EFM.6.03.08.sdrHasNoKreports",
_("SDR filing has no K SDR reports"))
elif len(_kSdrs) > 1:
efmFiling.error("EFM.6.03.08.sdrHasMultipleKreports",
_("SDR filing has multiple K SDR reports for %(entities)s"),
{"entities": ", ".join(r.entityRegistrantName for r in _kSdrs),
"edgarCode": "cp-0308-Sdr-Has-Multiple-K-Reports"},
(r.url for r in _kSdrs))
_lSdrEntityReports = defaultdict(list)
for r in reports:
if r.documentType == "L SDR":
_lSdrEntityReports[r.entityCentralIndexKey if r.entityCentralIndexKey != "0000000000"
else r.entityRegistrantName].append(r)
for lSdrEntity, lSdrEntityReports in _lSdrEntityReports.items():
if len(lSdrEntityReports) > 1:
efmFiling.error("EFM.6.05.24.multipleLSdrReportsForEntity",
_("Filing entity has multiple L SDR reports: %(entity)s"),
{"entity": lSdrEntity},
(r.url for r in lSdrEntityReports))
# check for required extension files (schema, pre, lbl)
for r in reports:
hasSch = hasPre = hasCal = hasLbl = False
for f in r.reportedFiles:
if f.endswith(".xsd"): hasSch = True
elif f.endswith("_pre.xml"): hasPre = True
elif f.endswith("_cal.xml"): hasCal = True
elif f.endswith("_lab.xml"): hasLbl = True
missingFiles = ""
if not hasSch: missingFiles += ", schema"
if not hasPre: missingFiles += ", presentation linkbase"
if not hasLbl: missingFiles += ", label linkbase"
if missingFiles:
efmFiling.error("EFM.6.03.02.sdrMissingFiles",
_("%(docType)s report missing files: %(missingFiles)s"),
{"docType": r.documentType, "missingFiles": missingFiles[2:],
"edgarCode": "cp-0302-Sdr-Missing-Files"},
r.url)
if not r.hasUsGaapTaxonomy:
efmFiling.error("EFM.6.03.02.sdrMissingStandardSchema",
_("%(documentType)s submission must use a US GAAP standard schema"),
{"documentType": r.documentType,
"edgarCode": "cp-0302-Sdr-Missing-Standard-Schema"},
r.url)
if hasattr(r, "exhibitType") and r.exhibitType not in ("EX-99.K SDR", "EX-99.L SDR", "EX-99.K SDR.INS", "EX-99.L SDR.INS"):
efmFiling.error("EFM.6.03.02.sdrHasNonSdrExhibit",
_("An SDR filing contains non-SDR exhibit type %(exhibitType)s document type %(documentType)s"),
{"documentType": r.documentType, "exhibitType": r.exhibitType,
"edgarCode": "cp-0302-Sdr-Has-Non-Sdr-Exhibit"},
r.url)
_exhibitTypeReports = defaultdict(list)
for r in reports:
if hasattr(r, "exhibitType") and r.exhibitType:
_exhibitTypeReports[r.exhibitType.partition(".")[0]].append(r)
if len(_exhibitTypeReports) > 1:
efmFiling.error("EFM.6.03.08",
_("A filling contains multiple exhibit types %(exhibitTypes)s."),
{"exhibitTypes": ", ".join(_exhibitTypeReports.keys())},
[r.url for r in reports])
for _exhibitType, _exhibitReports in _exhibitTypeReports.items():
if _exhibitType not in ("EX-99",) and len(_exhibitReports) > 1:
efmFiling.error("EFM.6.03.08.moreThanOneIns",
_("A filing contains more than one instance for exhibit type %(exhibitType)s."),
{"exhibitType": _exhibitType},
[r.url for r in _exhibitReports])
def roleTypeName(modelXbrl, roleURI, *args, **kwargs):
modelManager = modelXbrl.modelManager
if hasattr(modelManager, "efmFiling"):
modelRoles = modelXbrl.roleTypes.get(roleURI, ())
if modelRoles and modelRoles[0].definition:
return re.sub(r"\{\s*(transposed|unlabeled|elements)\s*\}","", modelRoles[0].definition.rpartition('-')[2], flags=re.I).strip()
return roleURI
return None
def filingEnd(cntlr, options, filesource, entrypointFiles, sourceZipStream=None, responseZipStream=None, *args, **kwargs):
#cntlr.addToLog("TRACE EFM filing end")
modelManager = cntlr.modelManager
if hasattr(modelManager, "efmFiling"):
for pluginXbrlMethod in pluginClassMethods("EdgarRenderer.Filing.End"):
pluginXbrlMethod(cntlr, options, filesource, modelManager.efmFiling, sourceZipStream=sourceZipStream)
#cntlr.addToLog("TRACE EdgarRenderer end")
# save JSON file of instances and referenced documents
filingReferences = dict((report.url, report)
for report in modelManager.efmFiling.reports)
modelManager.efmFiling.close()
del modelManager.efmFiling
#cntlr.addToLog("TRACE EFN filing end complete")
def rssItemXbrlLoaded(modelXbrl, rssWatchOptions, rssItem, *args, **kwargs):
# Validate of RSS feed item (simulates filing & cmd line load events
if hasattr(rssItem.modelXbrl, "efmOptions"):
testcaseVariationXbrlLoaded(rssItem.modelXbrl, modelXbrl)
def rssItemValidated(val, modelXbrl, rssItem, *args, **kwargs):
# After validate of RSS feed item (simulates report and end of filing events)
if hasattr(rssItem.modelXbrl, "efmOptions"):
testcaseVariationValidated(rssItem.modelXbrl, modelXbrl)
def testcaseVariationXbrlLoaded(testcaseModelXbrl, instanceModelXbrl, modelTestcaseVariation, *args, **kwargs):
# Validate of RSS feed item or testcase variation (simulates filing & cmd line load events
modelManager = instanceModelXbrl.modelManager
if (hasattr(testcaseModelXbrl, "efmOptions") and
modelManager.validateDisclosureSystem and getattr(modelManager.disclosureSystem, "EFMplugin", False) and
instanceModelXbrl.modelDocument.type in (Type.INSTANCE, Type.INLINEXBRL, Type.INLINEXBRLDOCUMENTSET)):
cntlr = modelManager.cntlr
options = testcaseModelXbrl.efmOptions
entrypointFiles = [{"file":instanceModelXbrl.modelDocument.uri}]
if not hasattr(modelManager, "efmFiling"): # first instance of filing
modelManager.efmFiling = Filing(cntlr, options, instanceModelXbrl.fileSource, entrypointFiles, None, None, instanceModelXbrl.errorCaptureLevel)
# this event is called for filings (of instances) as well as test cases, for test case it just keeps options accessible
for pluginXbrlMethod in pluginClassMethods("EdgarRenderer.Filing.Start"):
pluginXbrlMethod(cntlr, options, entrypointFiles, modelManager.efmFiling)
modelManager.efmFiling.addReport(instanceModelXbrl)
_report = modelManager.efmFiling.reports[-1]
_report.entryPoint = entrypointFiles[0]
modelManager.efmFiling.arelleUnitTests = instanceModelXbrl.arelleUnitTests.copy() # allow unit tests to be used after instance processing finished
# check for parameters on instance
for _instanceElt in XmlUtil.descendants(modelTestcaseVariation, "*", "instance", "readMeFirst", "true", False):
if instanceModelXbrl.modelDocument.uri.endswith(_instanceElt.text):
if _instanceElt.get("exhibitType"):
_report.entryPoint["exhibitType"] = _report.exhibitType = _instanceElt.get("exhibitType")
break
def testcaseVariationXbrlValidated(testcaseModelXbrl, instanceModelXbrl, *args, **kwargs):
modelManager = instanceModelXbrl.modelManager
if (hasattr(modelManager, "efmFiling") and
instanceModelXbrl.modelDocument.type in (Type.INSTANCE, Type.INLINEXBRL, Type.INLINEXBRLDOCUMENTSET)):
efmFiling = modelManager.efmFiling
_report = modelManager.efmFiling.reports[-1]
for pluginXbrlMethod in pluginClassMethods("EdgarRenderer.Xbrl.Run"):
pluginXbrlMethod(modelManager.cntlr, efmFiling.options, instanceModelXbrl, efmFiling, _report)
def testcaseVariationValidated(testcaseModelXbrl, instanceModelXbrl, errors=None, *args, **kwargs):
modelManager = instanceModelXbrl.modelManager
if (hasattr(modelManager, "efmFiling") and
instanceModelXbrl.modelDocument.type in (Type.INSTANCE, Type.INLINEXBRL, Type.INLINEXBRLDOCUMENTSET)):
efmFiling = modelManager.efmFiling
if isinstance(errors, list):
del efmFiling.errors[:]
# validate report types
filingValidate(efmFiling.cntlr, efmFiling.options, efmFiling.filesource, efmFiling.entrypointfiles, efmFiling.sourceZipStream, efmFiling.responseZipStream) # validate each report
if isinstance(errors, list):
errors.extend(efmFiling.errors)
# simulate filingEnd
filingEnd(modelManager.cntlr, efmFiling.options, modelManager.filesource, [])
# flush logfile (assumed to be buffered, empty the buffer for next filing)
testcaseModelXbrl.modelManager.cntlr.logHandler.flush()
def fileSourceFile(cntlr, filepath, binary, stripDeclaration):
modelManager = cntlr.modelManager
if hasattr(modelManager, "efmFiling"):
for pluginXbrlMethod in pluginClassMethods("Security.Crypt.FileSource.File"):
_file = pluginXbrlMethod(cntlr, modelManager.efmFiling, filepath, binary, stripDeclaration)
if _file is not None:
return _file
return None
def fileSourceExists(cntlr, filepath):
modelManager = cntlr.modelManager
if hasattr(modelManager, "efmFiling"):
for pluginXbrlMethod in pluginClassMethods("Security.Crypt.FileSource.Exists"):
_existence = pluginXbrlMethod(modelManager.efmFiling, filepath)
if _existence is not None:
return _existence
return None
def commandLineOptionExtender(parser, *args, **kwargs):
# extend command line options to store to database
parser.add_option("--build-deprecated-concepts-file",
action="store_true",
dest="buildDeprecatedConceptsFile",
help=_("Build EFM Validation deprecated concepts file (pre-cache before use)"))
def utilityRun(self, options, *args, **kwargs):
if options.buildDeprecatedConceptsFile:
from .Util import buildDeprecatedConceptDatesFiles
buildDeprecatedConceptDatesFiles(self)
class Filing:
def __init__(self, cntlr, options=None, filesource=None, entrypointfiles=None, sourceZipStream=None, responseZipStream=None, errorCaptureLevel=None):
self.cntlr = cntlr
self.options = options
self.filesource = filesource
self.entrypointfiles = entrypointfiles
self.sourceZipStream = sourceZipStream
self.responseZipStream = responseZipStream
self.submissionType = None
self.reports = []
self.renderedFiles = set() # filing-level rendered files
self.reportZip = None
if responseZipStream:
self.setReportZipStreamMode('w')
else:
try: #zipOutputFile only present with EdgarRenderer plugin options
if options and options.zipOutputFile:
if not os.path.isabs(options.zipOutputFile):
zipOutDir = os.path.dirname(filesource.basefile)
zipOutFile = os.path.join(zipOutDir,options.zipOutputFile)
else:
zipOutFile = options.zipOutputFile
self.reportZip = zipfile.ZipFile(zipOutFile, 'w', zipfile.ZIP_DEFLATED, True)
except AttributeError:
self.reportZip = None
self.errorCaptureLevel = errorCaptureLevel or logging._checkLevel("INCONSISTENCY")
self.errors = []
self.arelleUnitTests = {} # copied from each instance loaded
for pluginXbrlMethod in pluginClassMethods("Security.Crypt.Init"):
pluginXbrlMethod(self, options, filesource, entrypointfiles, sourceZipStream)
def setReportZipStreamMode(self, mode): # mode is 'w', 'r', 'a'
# required to switch in-memory zip stream between write, read, and append modes
if self.responseZipStream:
if self.reportZip: # already open, close and reseek underlying stream
self.reportZip.close()
self.responseZipStream.seek(0)
self.reportZip = zipfile.ZipFile(self.responseZipStream, mode, zipfile.ZIP_DEFLATED, True)
def close(self):
''' MetaFiling.json (not needed?) list of all files written out
_reports = dict((report.basename, report.json) for report in self.reports)
_reports["filing"] = {"renderedFiles": sorted(self.renderedFiles)}
if self.options.logFile:
_reports["filing"]["logFile"] = self.options.logFile
if self.reportZip:
self.reportZip.writestr("MetaFiling.json", json.dumps(_reports, sort_keys=True, indent=jsonIndent))
else:
try:
if self.options.reportsFolder:
with open(os.path.join(self.options.reportsFolder, "MetaFiling.json"), mode='w') as f:
json.dump(_reports, f, sort_keys=True, indent=jsonIndent)
except AttributeError: # no reportsFolder attribute
pass
'''
if self.options and self.options.logFile:
if self.reportZip and self.reportZip.fp is not None: # open zipfile
_logFile = self.options.logFile
_logFileExt = os.path.splitext(_logFile)[1]
if _logFileExt == ".xml":
_logStr = self.cntlr.logHandler.getXml(clearLogBuffer=False) # may be saved to file later or flushed in web interface
elif _logFileExt == ".json":
_logStr = self.cntlr.logHandler.getJson(clearLogBuffer=False)
else: # no ext or _logFileExt == ".txt":
_logStr = self.cntlr.logHandler.getText(clearLogBuffer=False)
self.reportZip.writestr(_logFile, _logStr)
#else:
# with open(_logFile, "wt", encoding="utf-8") as fh:
# fh.write(_logStr)
if self.reportZip: # ok to close if already closed
self.reportZip.close()
self.__dict__.clear() # dereference all contents
def addReport(self, modelXbrl):
_report = Report(modelXbrl)
self.reports.append(_report)
def error(self, messageCode, message, messageArgs=None, file=None):
if file and len(self.entrypointfiles) > 0:
# relativize file(s)
if isinstance(file, _STR_BASE):
file = (file,)
if isinstance(self.entrypointfiles[0], dict):
_baseFile = self.entrypointfiles[0].get("file", ".")
else:
_baseFile = self.entrypointfiles[0]
relFiles = [relativeUri(_baseFile, f) for f in file]
else:
relFiles = None
self.cntlr.addToLog(message, messageCode=messageCode, messageArgs=messageArgs, file=relFiles, level=logging.ERROR)
self.errors.append(messageCode)
@property
def hasInlineReport(self):
return any(getattr(report, "isInline", False) for report in self.reports)
def writeFile(self, filepath, data):
# write the data (string or binary)
for pluginXbrlMethod in pluginClassMethods("Security.Crypt.Write"):
if pluginXbrlMethod(self, filepath, data):
return
with io.open(filepath, "wt" if isinstance(data, str) else "wb") as fh:
fh.write(data)
class Report:
REPORT_ATTRS = {"DocumentType", "DocumentPeriodEndDate", "EntityRegistrantName",
"EntityCentralIndexKey", "CurrentFiscalYearEndDate", "DocumentFiscalYearFocus"}
def lc(self, name):
return name[0].lower() + name[1:]
def __init__(self, modelXbrl):
self.isInline = modelXbrl.modelDocument.type in (Type.INLINEXBRL, Type.INLINEXBRLDOCUMENTSET)
self.url = modelXbrl.modelDocument.uri
self.reportedFiles = set()
if modelXbrl.modelDocument.type == Type.INLINEXBRLDOCUMENTSET:
self.basenames = []
self.filepaths = []
for ixDoc in sorted(modelXbrl.modelDocument.referencesDocument.keys(), key=lambda d: d.objectIndex): # preserve order
if ixDoc.type == Type.INLINEXBRL:
self.basenames.append(ixDoc.basename)
self.filepaths.append(ixDoc.filepath)
self.reportedFiles.add(ixDoc.basename)
else:
self.basenames = [modelXbrl.modelDocument.basename]
self.filepaths = [modelXbrl.modelDocument.filepath]
self.reportedFiles.add(modelXbrl.modelDocument.basename)
for attrName in Report.REPORT_ATTRS:
setattr(self, self.lc(attrName), None)
self.instanceName = self.basenames[0]
for f in modelXbrl.facts:
cntx = f.context
if cntx is not None and cntx.isStartEndPeriod and not cntx.hasSegment:
if f.qname is not None and f.qname.localName in Report.REPORT_ATTRS and f.xValue:
setattr(self, self.lc(f.qname.localName), f.xValue)
self.reportedFiles |= referencedFiles(modelXbrl)
self.renderedFiles = set()
self.hasUsGaapTaxonomy = False
sourceDir = os.path.dirname(modelXbrl.modelDocument.filepath)
# add referenced files that are xbrl-referenced local documents
refDocUris = set()
def addRefDocs(doc):
if doc.type == Type.INLINEXBRLDOCUMENTSET:
for ixDoc in doc.referencesDocument.keys():
if ixDoc.type == Type.INLINEXBRL:
addRefDocs(ixDoc)
return
for refDoc in doc.referencesDocument.keys():
_file = refDoc.filepath
if refDoc.uri not in refDocUris:
refDocUris.add(refDoc.uri)
if refDoc.filepath and refDoc.filepath.startswith(sourceDir):
self.reportedFiles.add(refDoc.filepath[len(sourceDir)+1:]) # add file name within source directory
addRefDocs(refDoc)
if refDoc.type == Type.SCHEMA and refDoc.targetNamespace:
nsAuthority = authority(refDoc.targetNamespace, includeScheme=False)
nsPath = refDoc.targetNamespace.split('/')
if len(nsPath) > 2:
if nsAuthority in ("fasb.org", "xbrl.us") and nsPath[-2] == "us-gaap":
self.hasUsGaapTaxonomy = True
addRefDocs(modelXbrl.modelDocument)
def close(self):
self.__dict__.clear() # dereference all contents
@property
def json(self): # stringify un-jsonable attributes
return dict((name, value if isinstance(value,(str,int,float,Decimal,list,dict))
else sorted(value) if isinstance(value, set)
else str(value))
for name, value in self.__dict__.items())
__pluginInfo__ = {
# Do not use _( ) in pluginInfo itself (it is applied later, after loading
'name': 'Validate EFM',
'version': '1.20.2', # SEC EDGAR release 20.2
'description': '''EFM Validation.''',
'license': 'Apache-2',
'import': ('transforms/SEC',), # SEC inline can use SEC transformations
'author': 'Mark V Systems',
'copyright': '(c) Copyright 2013-15 Mark V Systems Limited, All rights reserved.',
# classes of mount points (required)
'DisclosureSystem.Types': dislosureSystemTypes,
'DisclosureSystem.ConfigURL': disclosureSystemConfigURL,
'Validate.XBRL.Start': validateXbrlStart,
'Validate.XBRL.Finally': validateXbrlFinally,
'Validate.XBRL.DTS.document': validateXbrlDtsDocument,
'ModelXbrl.RoleTypeName': roleTypeName,
'CntlrCmdLine.Filing.Start': filingStart,
'CntlrWinMain.Xbrl.Loaded': guiTestcasesStart,
'Testcases.Start': testcasesStart,
'CntlrCmdLine.Options': commandLineOptionExtender,
'CntlrCmdLine.Utility.Run': utilityRun,
'CntlrCmdLine.Xbrl.Loaded': xbrlLoaded,
'CntlrCmdLine.Xbrl.Run': xbrlRun,
'CntlrCmdLine.Filing.Validate': filingValidate,
'CntlrCmdLine.Filing.End': filingEnd,
'RssItem.Xbrl.Loaded': rssItemXbrlLoaded,
'Validate.RssItem': rssItemValidated,
'TestcaseVariation.Xbrl.Loaded': testcaseVariationXbrlLoaded,
'TestcaseVariation.Xbrl.Validated': testcaseVariationXbrlValidated,
'TestcaseVariation.Validated': testcaseVariationValidated,
'FileSource.File': fileSourceFile,
'FileSource.Exists': fileSourceExists
}
| [
"fischer@markv.com"
] | fischer@markv.com |
a804d5d5ce3df83393ae6bff5898fb1f6cc6e43b | e780a5bd72f98ca2513c993d64a85b08578166a6 | /buildout-cache/eggs/plone.autoform-1.7.5-py2.7.egg/plone/autoform/form.py | 5f38b7215aa23d0fecdc261c29372fd0c2f0f2dc | [] | no_license | vedantc98/Plone-test | 023246597ffe848e2a49b9f65742ff49127b190b | 9fd520fc78481e2c0b9b7ec427821e7f961c777e | refs/heads/master | 2021-03-30T22:14:33.368739 | 2018-03-11T19:22:58 | 2018-03-11T19:22:58 | 124,671,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,316 | py | # -*- coding: utf-8 -*-
from plone.autoform.base import AutoFields
from plone.autoform.interfaces import IAutoExtensibleForm
from plone.autoform.interfaces import IAutoObjectSubForm
from plone.z3cform.fieldsets.extensible import ExtensibleForm
from zope.interface import implementer
_marker = object()
@implementer(IAutoExtensibleForm)
class AutoExtensibleForm(AutoFields, ExtensibleForm):
"""Mixin class for z3c.form forms that support fields extracted from
a schema
"""
showEmptyGroups = False
@property
def schema(self):
raise NotImplementedError(
'The class deriving from AutoExtensibleForm must have a '
'\'schema\' property'
)
@property
def additionalSchemata(self):
"""Default to there being no additional schemata
"""
return ()
def updateFields(self):
self.updateFieldsFromSchemata()
super(AutoExtensibleForm, self).updateFields()
@implementer(IAutoObjectSubForm)
class AutoObjectSubForm(AutoFields):
"""A Mixin class for z3c.form.object.ObjectSubForm forms that supports
fields being updated from hints in a schema.
"""
@property
def schema(self):
return self.__parent__.field.schema
def setupFields(self):
self.updateFieldsFromSchemata()
| [
"vedantc98@gmail.com"
] | vedantc98@gmail.com |
95135697228d557cb3d8a41bb0ecbf01bf2709f0 | 169e75df163bb311198562d286d37aad14677101 | /tensorflow/tensorflow/python/util/compat.py | a24a52eea9710e98bd56025457e6fda5449a5197 | [
"Apache-2.0"
] | permissive | zylo117/tensorflow-gpu-macosx | e553d17b769c67dfda0440df8ac1314405e4a10a | 181bc2b37aa8a3eeb11a942d8f330b04abc804b3 | refs/heads/master | 2022-10-19T21:35:18.148271 | 2020-10-15T02:33:20 | 2020-10-15T02:33:20 | 134,240,831 | 116 | 26 | Apache-2.0 | 2022-10-04T23:36:22 | 2018-05-21T08:29:12 | C++ | UTF-8 | Python | false | false | 4,224 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for Python 2 vs. 3 compatibility.
## Conversion routines
In addition to the functions below, `as_str` converts an object to a `str`.
## Types
The compatibility module also provides the following types:
* `bytes_or_text_types`
* `complex_types`
* `integral_types`
* `real_types`
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numbers as _numbers
import numpy as _np
import six as _six
from tensorflow.python.util.tf_export import tf_export
def as_bytes(bytes_or_text, encoding='utf-8'):
"""Converts either bytes or unicode to `bytes`, using utf-8 encoding for text.
Args:
bytes_or_text: A `bytes`, `str`, or `unicode` object.
encoding: A string indicating the charset for encoding unicode.
Returns:
A `bytes` object.
Raises:
TypeError: If `bytes_or_text` is not a binary or unicode string.
"""
if isinstance(bytes_or_text, _six.text_type):
return bytes_or_text.encode(encoding)
elif isinstance(bytes_or_text, bytes):
return bytes_or_text
else:
raise TypeError('Expected binary or unicode string, got %r' %
(bytes_or_text,))
def as_text(bytes_or_text, encoding='utf-8'):
"""Returns the given argument as a unicode string.
Args:
bytes_or_text: A `bytes`, `str`, or `unicode` object.
encoding: A string indicating the charset for decoding unicode.
Returns:
A `unicode` (Python 2) or `str` (Python 3) object.
Raises:
TypeError: If `bytes_or_text` is not a binary or unicode string.
"""
if isinstance(bytes_or_text, _six.text_type):
return bytes_or_text
elif isinstance(bytes_or_text, bytes):
return bytes_or_text.decode(encoding)
else:
raise TypeError('Expected binary or unicode string, got %r' % bytes_or_text)
# Convert an object to a `str` in both Python 2 and 3.
if _six.PY2:
as_str = as_bytes
tf_export('compat.as_bytes', 'compat.as_str')(as_bytes)
tf_export('compat.as_text')(as_text)
else:
as_str = as_text
tf_export('compat.as_bytes')(as_bytes)
tf_export('compat.as_text', 'compat.as_str')(as_text)
@tf_export('compat.as_str_any')
def as_str_any(value):
"""Converts to `str` as `str(value)`, but use `as_str` for `bytes`.
Args:
value: A object that can be converted to `str`.
Returns:
A `str` object.
"""
if isinstance(value, bytes):
return as_str(value)
else:
return str(value)
@tf_export('compat.path_to_str')
def path_to_str(path):
"""Returns the file system path representation of a `PathLike` object, else as it is.
Args:
path: An object that can be converted to path representation.
Returns:
A `str` object.
"""
if hasattr(path, '__fspath__'):
path = as_str_any(path.__fspath__())
return path
# Numpy 1.8 scalars don't inherit from numbers.Integral in Python 3, so we
# need to check them specifically. The same goes from Real and Complex.
integral_types = (_numbers.Integral, _np.integer)
tf_export('compat.integral_types').export_constant(__name__, 'integral_types')
real_types = (_numbers.Real, _np.integer, _np.floating)
tf_export('compat.real_types').export_constant(__name__, 'real_types')
complex_types = (_numbers.Complex, _np.number)
tf_export('compat.complex_types').export_constant(__name__, 'complex_types')
# Either bytes or text.
bytes_or_text_types = (bytes, _six.text_type)
tf_export('compat.bytes_or_text_types').export_constant(__name__,
'bytes_or_text_types')
| [
"thomas.warfel@pnnl.gov"
] | thomas.warfel@pnnl.gov |
5bace3fe8c8b3966f2c6c49d67a01a79ff42c1a1 | 4bc6028ed8ba403b69adfd6f5cbd139baece0f4d | /basic/hello_world.py | 2bc192cf9dc891b27de6eabd3ffdaa95d2fe90f4 | [] | no_license | xrw560/learn-pyspark | 0ef9ed427ff887ceed1c5e5773bf97ed25ecae04 | 618d16dafd73165e714111670119d9cdecc0bf1f | refs/heads/master | 2020-03-07T00:12:36.885000 | 2019-01-04T09:51:32 | 2019-01-04T09:51:32 | 127,152,051 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | #!/usr/bin/python
# -*- encoding:utf-8 -*-
if __name__ == '__main__':
"""
Hello World 程序
"""
print "hello world!!!!!!!!!!!!!!!"
| [
"ncutits@163.com"
] | ncutits@163.com |
c1a1eebde481557cb5e8673730e08fa7eff20636 | d191a04a3ded41175ea84ae88ebddb4f262b7fb1 | /Dynamic_program/33_palindromic_substring.py | d7e049406dc272a2923b0bce1e168fc05b3c3c98 | [] | no_license | YLyeliang/now_leet_code_practice | ae4aea945bae72ec08b11e57a8f8a3e81e704a54 | 204d770e095aec43800a9771fe88dd553463d2f7 | refs/heads/master | 2022-06-13T20:22:51.266813 | 2022-05-24T05:29:32 | 2022-05-24T05:29:32 | 205,753,056 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,343 | py | # Given a string, your task is to count how many palindromic substrings in this string.
#
# The substrings with different start indexes or end indexes are counted as different substrings even they consist of same characters.
#
# Example 1:
#
# Input: "abc"
# Output: 3
# Explanation: Three palindromic strings: "a", "b", "c".
#
#
# Example 2:
#
# Input: "aaa"
# Output: 6
# Explanation: Six palindromic strings: "a", "a", "a", "aa", "aa", "aaa".
#
#
# Note:
#
# The input string length won't exceed 1000.
# 问题:给定字符串s,求字符串中的回文子串的数目。相同子串,不同起始点算作不同子串。
# 分析;DP方法。考虑回文子串的索引序列 i, i+1, ..., j-1, j.如果s[i]==s[j]且s[i+1:j-1]为回文,那么这一段则为回文。
# 如果回文子串的长度小于3,只需要判断s[i]==s[j]即可。
# 根据上述表述。遍历字符串s.构建二维dp数组,dp[i][j]表示索引段是否为回文。则遍历时如果是回文,result+=1.
class Solution:
def countSubstrings(self, s: str) -> int:
n = len(s)
dp = [[0] * n for _ in range(n)]
res = 0
for i in range(n - 1, -1, -1):
for j in range(i, n):
dp[i][j] = s[i] == s[j] and ((j - i + 1) < 3 or dp[i + 1][j - 1])
res += dp[i][j]
return res
| [
"k87974@163.com"
] | k87974@163.com |
e1a70e8fa8ccf663700a94f2d16d3b20110080f5 | 03d07de94fc22d1583c45ca84c711a06df8a40ff | /lc/graph/lc_207_course-schedule.py | f5045d8a606e37fc0a05f3d15c5182ff10bcd859 | [] | no_license | gaopenghigh/algorithm | 94e04293c69a2ad6903495e1cf6e1b75556535bb | f5d78c98c7201c56f9d4c3a9c0c76e9447a17985 | refs/heads/master | 2022-03-11T18:46:38.712923 | 2022-02-20T14:20:54 | 2022-02-20T14:20:54 | 54,484,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,696 | py | # 207. 课程表
# 你这个学期必须选修 numCourses 门课程,记为 0 到 numCourses - 1 。
# 在选修某些课程之前需要一些先修课程。 先修课程按数组 prerequisites 给出,其中 prerequisites[i] = [ai, bi] ,表示如果要学习课程 ai 则 必须 先学习课程 bi 。
# 例如,先修课程对 [0, 1] 表示:想要学习课程 0 ,你需要先完成课程 1 。
# 请你判断是否可能完成所有课程的学习?如果可以,返回 true ;否则,返回 false 。
# 抽象为判断一幅有向图是否有环
# 抽象为判断一幅有向图是否有环
# 使用 DFS 遍历
# 不能简单地记录哪些顶点已经遍历过了,遍历碰到一个曾经遍历过的顶点,并不代表有环
# 比如下图:
# A -> B -> C <--+
# |______________|
# 从 B 到 C 的时候发现 C 已经被遍历过了,因为有直接从 A 到 C 的边,但并没有环
# 所以需要记录“当前遍历路径”上的顶点,也就是在递归栈上的顶点,这些顶点是“正在被搜索”的状态
#
# 使用邻接表来表示图
# 使用 2 个集合,一个存放“正在被搜索”的顶点,一个存放“还没被遍历到”的顶点
class Solution:
def __init__(self) -> None:
self.graph = []
self.n = 0
def _build_graph(self, n, edges):
self.n = n
self.graph = [[] for _ in range(n)]
for edge in edges:
src, dst = edge[0], edge[1]
self.graph[src].append(dst)
def has_cycle_dfs(self, u, unsearched, searching):
if u in searching:
return True
unsearched.remove(u)
searching.add(u)
for v in self.graph[u]:
if v in searching:
return True
if v in unsearched:
if self.has_cycle_dfs(v, unsearched, searching):
return True
searching.remove(u)
return False
def has_cycle(self):
unsearched = set()
searching = set()
for i in range(self.n):
unsearched.add(i)
# 这个图不一定是全连通的,所以需要每个节点都尝试一下
for i in range(self.n):
if i in unsearched:
if self.has_cycle_dfs(i, unsearched, searching):
return True
return False
def canFinish(self, numCourses: int, prerequisites: list[list[int]]) -> bool:
self._build_graph(numCourses, prerequisites)
# print(self.graph)
return not self.has_cycle()
if __name__ == '__main__':
s = Solution()
print(s.canFinish(20, [[0,10],[3,18],[5,5],[6,11],[11,14],[13,1],[15,1],[17,4]])) | [
"jh.gao@ucloud.cn"
] | jh.gao@ucloud.cn |
c55a5ab14f9f3d22bf01db0767cfa494e6709ed6 | 8cb0ace888d6dfe0b781e21906cab598cfb3783d | /apps/wop/wop/widgets/level_widget.py | c5349c5c427f2b346893af999b365c1f790900e3 | [] | no_license | DerThorsten/appdev | fefe1929af15f33d7848f03664b345df0f2aeaaf | 5c24328b8ba08ad57aa6b77296d68ece00b4154b | refs/heads/master | 2021-01-17T05:10:48.033350 | 2016-08-06T21:01:49 | 2016-08-06T21:01:49 | 33,121,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,725 | py | from kivy.logger import Logger
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.lang import Builder
from kivy.uix.widget import Widget
from kivy.clock import Clock
from kivy.graphics import *
from kivy.properties import NumericProperty, ReferenceListProperty,\
ObjectProperty
import numpy
import wop.level
Builder.load_string("""
<LevelWidget>:
size_hint: (1, 1)
levelCanvasWidget: levelCanvasWidget
zoom_outButton: zoom_outButton
zoom_inButton: zoom_inButton
createAndSelectWidget: createAndSelectWidget
orientation: "vertical"
LevelCanvasWidget:
text: "foo"
size_hint: (0.1,0.9)
id: levelCanvasWidget
BoxLayout:
size_hint: (1, 0.1)
orientation: "horizontal"
spacing: self.width/10.0
CreateAndSelectWidget:
id: createAndSelectWidget
size_hint: (1, 1)
BoxLayout:
size_hint: (0.25, 1)
orientation: "horizontal"
spacing: self.width/10.0
BoxLayout:
size_hint: (0.5, 1)
orientation: 'vertical'
spacing: self.height/10.0
Button:
id: zoom_outButton
text: "-"
color: (0.3,1,0.3,1)
font_size: 50
font_name: "CBlocks"
background_color: (0,0,0,0)
on_release: root.zoom_out()
Button:
id: zoom_inButton
text: "+"
color: (1,0.3,0.3,1)
font_size: 50
font_name: "CBlocks"
background_color: (0,0,0,0)
on_release: root.zoom_in()
Button:
size_hint: (1, 1)
id: menuButton
color: (0.2,0.2,0.6,1)
font_size: 30
font_name: "CBlocks"
text: "menu"
background_color: (0,0,0,0)
on_press: root.screen_manager.current = 'main_menu_screen'
""")
class LevelWidget(BoxLayout):
levelCanvasWidget = ObjectProperty(None)
createAndSelectWidget = ObjectProperty(None)
screen_manager = ObjectProperty(None)
def __init__(self,*arg,**kwarg):
super(LevelWidget,self).__init__(*arg, **kwarg)
self.level = None
def on_global_pause(self):
if self.level is not None:
self.level.on_global_pause()
def on_global_resume(self):
if self.level is not None:
self.level.on_global_resume()
def on_pre_leave(self):
self.levelCanvasWidget.on_pre_leave()
self._kill_level()
def on_leave(self):
self.levelCanvasWidget.on_leave()
def on_pre_enter(self):
self.levelCanvasWidget.on_pre_enter()
self._init_level()
def on_enter(self):
self.levelCanvasWidget.on_enter()
#self._init_level()
def zoom_in(self):
s = self.get_scale()
self.set_scale(s*1.25)
def zoom_out(self):
s = self.get_scale()
ns = s/1.25
if ns > 1.0:
self.set_scale(ns)
def set_scale(self, scale):
self.levelCanvasWidget.set_scale(scale)
def get_scale(self):
print self.pos,self.size
return self.levelCanvasWidget.get_scale()
def get_offset(self):
return self.levelCanvasWidget.get_offset()
def set_offset(self, offset):
return self.levelCanvasWidget.set_offset(offset)
def render(self):
self.levelCanvasWidget.render()
def add_render_item(self, renderItem, z):
self.levelCanvasWidget.add_render_item(renderItem,z)
def set_level(self, level):
assert self.level is None
self.level = level
def _init_level(self):
# load level
#self.level = wop.level.SimpleLevel(gameRender=self.levelCanvasWidget)
assert self.level is not None
self.level.initPhysics()
# pass the level to the levelCanvasWidget
self.levelCanvasWidget.set_level(self.level)
wmManager = self.createAndSelectWidget.wmManager
#
self.level.set_wm_manager(wmManager)
wmManager.setLevel(level = self.level)
# start the level (start physic simulation)
# will schedule level.updateCaller
self.level.level_widget = self
self.level.start_level()
def _kill_level(self):
self.level.stop_level()
self.level = None
def level_finished(self):
self.screen_manager.current = 'main_menu_screen'
| [
"thorsten.beier@iwr.uni-heidelberg.de"
] | thorsten.beier@iwr.uni-heidelberg.de |
8fdb45f49bc6c12d0a768ded289224f400e0a7cc | d37b1eb2dd6a0e917c4a01b55771e45c5f3374a8 | /web/regression/runtests.py | 8d2a886ac2427bf8a75e091fae4f319f8afc3747 | [
"PostgreSQL"
] | permissive | dmrub/pgadmin4 | 33715dc0e9637bb311922f857c80148602ef2930 | 8765db918b566a6984b7218640a599507f60942c | refs/heads/master | 2021-01-20T04:29:05.860660 | 2017-01-11T15:03:06 | 2017-01-11T15:03:06 | 78,627,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,677 | py | #############################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2017, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##############################################################
""" This file collect all modules/files present in tests directory and add
them to TestSuite. """
from __future__ import print_function
import argparse
import os
import sys
import signal
import atexit
import logging
import traceback
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
logger = logging.getLogger(__name__)
file_name = os.path.basename(__file__)
from testscenarios.scenarios import generate_scenarios
CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
# Set sys path to current directory so that we can import pgadmin package
root = os.path.dirname(CURRENT_PATH)
if sys.path[0] != root:
sys.path.insert(0, root)
os.chdir(root)
from pgadmin import create_app
import config
from regression import test_setup
# Delete SQLite db file if exists
if os.path.isfile(config.TEST_SQLITE_PATH):
os.remove(config.TEST_SQLITE_PATH)
config.TESTING_MODE = True
pgadmin_credentials = test_setup.config_data
# Set environment variables for email and password
os.environ['PGADMIN_SETUP_EMAIL'] = ''
os.environ['PGADMIN_SETUP_PASSWORD'] = ''
if pgadmin_credentials:
if 'pgAdmin4_login_credentials' in pgadmin_credentials:
if all(item in pgadmin_credentials['pgAdmin4_login_credentials']
for item in ['login_username', 'login_password']):
pgadmin_credentials = pgadmin_credentials[
'pgAdmin4_login_credentials']
os.environ['PGADMIN_SETUP_EMAIL'] = pgadmin_credentials[
'login_username']
os.environ['PGADMIN_SETUP_PASSWORD'] = pgadmin_credentials[
'login_password']
# Execute the setup file
exec (open("setup.py").read())
# Get the config database schema version. We store this in pgadmin.model
# as it turns out that putting it in the config files isn't a great idea
from pgadmin.model import SCHEMA_VERSION
# Delay the import test_utils as it needs updated config.SQLITE_PATH
from regression import test_utils
config.SETTINGS_SCHEMA_VERSION = SCHEMA_VERSION
# Override some other defaults
from logging import WARNING
config.CONSOLE_LOG_LEVEL = WARNING
# Create the app
app = create_app()
app.config['WTF_CSRF_ENABLED'] = False
test_client = app.test_client()
drop_objects = test_utils.get_cleanup_handler(test_client)
def get_suite(module_list, test_server, test_app_client):
"""
This function add the tests to test suite and return modified test suite
variable.
:param module_list: test module list
:type module_list: list
:param test_server: server details
:type test_server: dict
:param test_app_client: test client
:type test_app_client: pgadmin app object
:return pgadmin_suite: test suite with test cases
:rtype: TestSuite
"""
modules = []
pgadmin_suite = unittest.TestSuite()
# Get the each test module and add into list
for key, klass in module_list:
gen = klass
modules.append(gen)
# Set the test client to each module & generate the scenarios
for module in modules:
obj = module()
obj.setApp(app)
obj.setTestClient(test_app_client)
obj.setTestServer(test_server)
scenario = generate_scenarios(obj)
pgadmin_suite.addTests(scenario)
return pgadmin_suite
def get_test_modules(arguments):
"""
This function loads the all modules in the tests directory into testing
environment.
:param arguments: this is command line arguments for module name to
which test suite will run
:type arguments: str
:return module list: test module list
:rtype: list
"""
from pgadmin.utils.route import TestsGeneratorRegistry
# Load the test modules which are in given package(i.e. in arguments.pkg)
if arguments['pkg'] is None or arguments['pkg'] == "all":
TestsGeneratorRegistry.load_generators('pgadmin')
else:
TestsGeneratorRegistry.load_generators('pgadmin.%s.tests' %
arguments['pkg'])
# Sort module list so that test suite executes the test cases sequentially
module_list = TestsGeneratorRegistry.registry.items()
module_list = sorted(module_list, key=lambda module_tuple: module_tuple[0])
return module_list
def add_arguments():
"""
This function parse the command line arguments(project's package name
e.g. browser) & add into parser
:return args: command line argument for pgadmin's package name
:rtype: argparse namespace
"""
parser = argparse.ArgumentParser(description='Test suite for pgAdmin4')
parser.add_argument('--pkg', help='Executes the test cases of particular'
' package')
arg = parser.parse_args()
return arg
def sig_handler(signo, frame):
drop_objects()
def get_tests_result(test_suite):
"""This function returns the total ran and total failed test cases count"""
try:
total_ran = test_suite.testsRun
failed_cases_result = []
skipped_cases_result = []
if total_ran:
if test_suite.failures:
for failed_case in test_suite.failures:
class_name = str(
failed_case[0]).split('.')[-1].split()[0].strip(')')
failed_cases_result.append(class_name)
if test_suite.errors:
for error_case in test_suite.errors:
class_name = str(
error_case[0]).split('.')[-1].split()[0].strip(')')
if class_name not in failed_cases_result:
failed_cases_result.append(class_name)
if test_suite.skipped:
for skip_test in test_suite.skipped:
class_name = str(
skip_test[0]).split('.')[-1].split()[0].strip(')')
if class_name not in failed_cases_result:
skipped_cases_result.append(class_name)
return total_ran, failed_cases_result, skipped_cases_result
except Exception:
traceback.print_exc(file=sys.stderr)
class StreamToLogger(object):
def __init__(self, logger, log_level=logging.INFO):
self.terminal = sys.stderr
self.logger = logger
self.log_level = log_level
self.linebuf = ''
def write(self, buf):
"""
This function writes the log in the logger file as well as on console
:param buf: log message
:type buf: str
:return: None
"""
self.terminal.write(buf)
for line in buf.rstrip().splitlines():
self.logger.log(self.log_level, line.rstrip())
def flush(self):
pass
if __name__ == '__main__':
test_result = dict()
# Register cleanup function to cleanup on exit
atexit.register(drop_objects)
# Set signal handler for cleanup
signal_list = dir(signal)
required_signal_list = ['SIGTERM', 'SIGABRT', 'SIGQUIT', 'SIGINT']
# Get the OS wise supported signals
supported_signal_list = [sig for sig in required_signal_list if
sig in signal_list]
for sig in supported_signal_list:
signal.signal(getattr(signal, sig), sig_handler)
# Set basic logging configuration for log file
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s:%(levelname)s:%(name)s:%(message)s'
,
filename=CURRENT_PATH + "/" + "regression.log",
filemode='w'
)
# Create logger to write log in the logger file as well as on console
stderr_logger = logging.getLogger('STDERR')
sys.stderr = StreamToLogger(stderr_logger, logging.ERROR)
args = vars(add_arguments())
# Get test module list
test_module_list = get_test_modules(args)
# Login the test client
test_utils.login_tester_account(test_client)
servers_info = test_utils.get_config_data()
node_name = "all"
if args['pkg'] is not None:
node_name = args['pkg'].split('.')[-1]
try:
for server in servers_info:
print("\n=============Running the test cases for '%s'============="
% server['name'], file=sys.stderr)
# Create test server
test_utils.create_parent_server_node(server, node_name)
suite = get_suite(test_module_list, server, test_client)
tests = unittest.TextTestRunner(stream=sys.stderr,
descriptions=True,
verbosity=2).run(suite)
ran_tests, failed_cases, skipped_cases = \
get_tests_result(tests)
test_result[server['name']] = [ran_tests, failed_cases,
skipped_cases]
# Delete test server
# test_utils.delete_test_server(test_client)
except SystemExit:
drop_objects()
print("\n==============================================================="
"=======", file=sys.stderr)
print("Test Result Summary", file=sys.stderr)
print(
"==================================================================="
"===\n", file=sys.stderr)
for server_res in test_result:
failed_cases = "\n\t\t".join(test_result[server_res][1])
skipped_cases = "\n\t\t".join(test_result[server_res][2])
total_failed = len(test_result[server_res][1])
total_skipped = len(test_result[server_res][2])
total_passed_cases = int(
test_result[server_res][0]) - total_failed - total_skipped
print(
"%s:\n\n\t%s test%s passed\n\t%s test%s failed%s%s"
"\n\t%s test%s skipped%s%s\n" %
(server_res, total_passed_cases,
(total_passed_cases != 1 and "s" or ""),
total_failed, (total_failed != 1 and "s" or ""),
(total_failed != 0 and ":\n\t\t" or ""), failed_cases,
total_skipped, (total_skipped != 1 and "s" or ""),
(total_skipped != 0 and ":\n\t\t" or ""), skipped_cases),
file=sys.stderr)
print(
"==================================================================="
"===\n", file=sys.stderr)
print("Please check output in file: %s/regression.log\n" % CURRENT_PATH)
| [
"dpage@pgadmin.org"
] | dpage@pgadmin.org |
b2f8c3bb75ab6abe1aa0d310e6488a8a92ac25e9 | 12c18d9d0b210698f61f70f458a46647fc99356c | /bsp53.py | 91b6673439d0382aa616430a2978e32aa0cc3d8a | [] | no_license | ddgvv/dd | 503a64b08e12de79b5812fd39a76c826aadbd06c | 2e07d92123960e9625198a454ad3973671761fda | refs/heads/master | 2021-07-04T16:40:01.904529 | 2019-05-17T08:10:30 | 2019-05-17T08:10:30 | 110,428,294 | 1 | 10 | null | null | null | null | UTF-8 | Python | false | false | 65 | py | #53 problem
l=list(input("Enter Number"))
print(sum(map(int,l)))
| [
"noreply@github.com"
] | ddgvv.noreply@github.com |
d45809a86a61969897ea0f9adf6b683a1732e968 | 79839ab9d00afabdaeb9e4ea0ffdb50db8fdde4e | /LudoGame/controller.py | 4b1be36912f187cbdb11bff8a34564d2e27f4406 | [
"Apache-2.0"
] | permissive | surajsinghbisht054/LudoGame | 85968d57d6432e9d56edab1b71f9eb967ef5d5be | 20352646230bc541208e93dfbf0818e42eb0c6b3 | refs/heads/master | 2021-01-11T13:54:12.932791 | 2018-01-09T10:18:32 | 2018-01-09T10:18:32 | 94,885,322 | 1 | 5 | null | null | null | null | UTF-8 | Python | false | false | 6,260 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Written By:
# S.S.B
# surajsinghbisht054@gmail.com
# bitforestinfo.blogspot.com
#
#
#
#
##################################################
######## Please Don't Remove Author Name #########
############### Thanks ###########################
##################################################
#
#
# Import Module
try:
import Tkinter
except:
import tkinter as Tkinter
import random
from views import Board
from models import TRACK, OVALS, TEAM, F_TRACK
# Frame Of Dice Function
class Dice(Tkinter.Frame):
def __init__(self,root, s):
Tkinter.Frame.__init__(self,root)
self.string = s
self.string.set(6)
self.create_widget()
#
def round(self):
self.string.set(random.randint(1,6))
self.button.config(state="disable")
return
def create_widget(self):
store = Tkinter.Label(self, textvariable=self.string,width=20)
store.pack(fill='both')
self.button = Tkinter.Button(self, text="Team A", command=self.round)
self.button.pack(fill='both')
return
# Frame Of ScoreBoard
class ScoreBoard(Tkinter.LabelFrame):
def __init__(self, *args, **kwargs):
Tkinter.LabelFrame.__init__(self, *args, **kwargs)
self['padx']=20
self['pady']=20
self.create_label()
# Creating Label
def create_label(self):
Tkinter.Label(self, text="Team A", bg="RoyalBlue1").grid(row=1, column=1)
Tkinter.Label(self, text="Team B", bg="yellow2").grid(row=2, column=1)
self.team_a=Tkinter.Label(self, text="0")
self.team_a.grid(row=1, column=2)
self.team_b=Tkinter.Label(self, text="0")
self.team_b.grid(row=2, column=2)
return
# Creating Main Engine
class Engine:
def __init__(self, canvas):
self.canvas = canvas
#self.ovals=[]
self.create_ovals()
self.turn = "A"
self.number = Tkinter.IntVar()
self.add_dice()
self.score_board()
# Add Dice Frame
def add_dice(self):
self.dice=Dice(self.canvas.master, self.number)
self.dice.pack(side='left')
return
#Add Score Board
def score_board(self):
self.score=ScoreBoard(self.canvas.master, text="Score")
self.score.pack(side='right')
return
# Creating Ovals
def create_ovals(self):
self.oval_identity=[]
for a,b,c,d in OVALS:
for i in b:
s=self.canvas.create_oval(*self.getcoordinates(i), fill=c, tag="C{}".format(i), activewidth=3)
self.oval_identity.append("C{}".format(i))
self.canvas.tag_bind(s, "<Button-1>", self.oval_triggers)
return
# Oval Binding Handler
def oval_triggers(self, event):
tag = self.selected_oval(event)
if tag and (self.number.get()!=0):
# Team A
if self.turn =="A":
if tag in TEAM[0]:
# TEAM A PLAYERS
self.team_a_moves(tag)
# Team B
else:
if tag in TEAM[1]:
# TEAM B PLAYERS
self.team_b_moves(tag)
return
# Uplifting Ovals
def uplifting(self, team):
for a,b,c,d in OVALS:
# a = Track
# b = Station
# c = Color
# d = Team
for s in b:
tag=str("C"+s)
if (d==team) and tag:
# uplift here
self.canvas.lift(tag)
return
# Team A Moves
def team_a_moves(self, tag):
for a,b,c,d in OVALS:
# a = Track
# b = Station
# c = Color
# d = Team
for s in b:
if str("C"+s)==tag:
step=self.number.get()
# Open
if (step==1 or step==6) and not self.gettrackbox(tag):
self.change_place(tag,a[0])
print "Change Place to Start"
else:
print "Check"
# In Track
t = self.gettrackbox(tag)
if t:
present_address = a.index(t)
print t, a[-2]
if t==a[-2]:
self.score.team_a.config(text=str(int(self.score.team_a.cget("text"))+1))
self.canvas.delete(tag)
try:
self.change_place(tag,a[present_address+step])
#self.check_turns()
except:
pass
t = self.gettrackbox(tag)
if t==a[-2]:
print "One Coin Clear"
# One Coin Clear
self.canvas.delete(tag)
else:
self.check_turns()
return
return
# Team B Moves
def team_b_moves(self, tag):
for a,b,c,d in OVALS:
# a = Track
# b = Station
# c = Color
# d = Team
for s in b:
if str("C"+s)==tag:
step=self.number.get()
# Open
if (step==1 or step==6) and not self.gettrackbox(tag):
self.change_place(tag,a[0])
print "Change Place to Start"
else:
print "Check"
# In Track
t = self.gettrackbox(tag)
if t:
present_address = a.index(t)
print t, a[-2]
if t==a[-2]:
self.score.team_b.config(text=str(int(self.score.team_a.cget("text"))+1))
self.canvas.delete(tag)
try:
self.change_place(tag,a[present_address+step])
#self.check_turns()
except:
pass
t = self.gettrackbox(tag)
if t==a[-2]:
print "One Coin Clear"
# One Coin Clear
self.canvas.delete(tag)
else:
self.check_turns()
return
else:
print "not selected"
return
# Shape Movement Handler
def change_place(self, tag, track):
a,b,c,d=self.getcoordinates(tag)
e,f,g,h=self.getcoordinates(track)
self.canvas.move(tag, g-c, h-d)
self.check_turns()
return
# Get Square Shape Tag on Which Coin Shape Is Lying
def gettrackbox(self, tag):
for i in TRACK:
if self.getcoordinates(i)==self.getcoordinates(tag):
return i
for l in F_TRACK:
for i in l:
if self.getcoordinates(i)==self.getcoordinates(tag):
return i
return
# Selected Oval Tag Return
def selected_oval(self, event=None):
x , y = event.x, event.y
for i in self.oval_identity:
x1,y1,x2,y2 = self.getcoordinates(i)
if (x1<=x) and (x<=x2) and (y1<=y) and (y<=y2):
return i
# Team Turn handlers
def check_turns(self):
self.dice.button.config(state="normal")
self.number.set(0)
if self.turn == "A":
self.turn = "B"
self.dice.button.config(text="Team B")
self.uplifting("B")
return
else:
self.turn = "A"
self.dice.button.config(text="Team A")
self.uplifting("A")
return
# Get Tag Coordinates In Canvas
def getcoordinates(self, tags):
return self.canvas.coords(tags)
# Main Trigger
if __name__=="__main__":
root=Tkinter.Tk()
d=Board(root)
d.pack()
e=Engine(d)
root.mainloop()
| [
"surajsinghbisht054@gmail.com"
] | surajsinghbisht054@gmail.com |
95f27ad073deba4419d29e314fd828507e60ea62 | 0cb46d3c14ed82627ca7ffb4de2a80e9c3013c50 | /model/base.py | 21accadb14e32119bdef0da1932da4ef01752672 | [] | no_license | five3/zyw | 40533cd6eb1d17f98c055893d9cdfba30fa69696 | 7fcd4d7a4a877c907d59df153e42360fc616f9a5 | refs/heads/master | 2020-12-24T08:55:11.207216 | 2017-08-18T13:12:03 | 2017-08-18T13:12:03 | 38,229,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,135 | py | # -*- coding: utf-8 -*-
#!/usr/bin/env python
#coding=utf-8
from model.db.database import *
class base:
def getTable(self):
return self.__class__.__name__
def getDb(self):
return database()
def insert(self,data):
return self.getDb().insert(self.getTable(),data)
def delete(self,condition):
return self.getDb().delete(self.getTable(), condition)
def getList(self,colums,condition,orders='',limits=''):
return self.getDb().getList(self.getTable(),colums,condition,orders,limits)
def getOne(self,colums,condition,orders='',limits=''):
return self.getDb().getOne(self.getTable(),colums,condition,orders,limits)
def update(self, data,condition):
return self.getDb().update(self.getTable(),data,condition)
def execute(self, sql):
return self.getDb().execute(sql)
def executeInsert(self, sql):
return self.getDb().executeInsert(sql)
def fetchOne(self, sql):
return self.getDb().fetchOne(sql)
def fetchAll(self, sql):
return self.getDb().fetchAll(sql)
| [
"five3@163.com"
] | five3@163.com |
1afd17c2c1370a54520917228db7e4bd9cc00e5c | 1a114943c92a5db40034470ff31a79bcf8ddfc37 | /stdlib_exam/strop-example-1.py | d9e343e3aa7690e690e6a82f444da61173972665 | [] | no_license | renwl/mylinux | 1924918599efd6766c266231d66b2a7ed6f6cdd1 | 0602fc6d2b0d254a8503e57310f848fc3e1a73b4 | refs/heads/master | 2020-07-10T22:12:03.259349 | 2017-01-02T12:32:04 | 2017-01-02T12:32:04 | 66,467,007 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py | import strop
import sys
# assuming we have an executable named ".../executable", add a
# directory named ".../executable-extra" to the path
if strop.lower(sys.executable)[-4:] == ".exe":
extra = sys.executable[:-4] # windows
else:
extra = sys.executable
sys.path.insert(0, extra + "-extra")
import mymodule
| [
"wenliang.ren@quanray.com"
] | wenliang.ren@quanray.com |
4513fb0f5b0c0dacd59247201f0afd9c4f9b4c5b | 05e0429c617209530b212987f169640e6b75c8f0 | /Chapter 8/powerballLottery.py | 7fdd170731091cdea9226f01a1bb878352f84a87 | [] | no_license | nishantchaudhary12/Starting-with-Python | ac33baf01e3cf869cc1cf7f97991ecda4ee893bd | 9031fa64b19698c060d134cb0416812db01f1f7b | refs/heads/master | 2020-04-24T00:06:15.177647 | 2019-05-28T23:12:16 | 2019-05-28T23:12:16 | 171,555,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,186 | py | #powerball lottery
def records(file):
number_dict = dict()
pow_num_dict = dict()
overdue_num = list()
line = file.readline()
line = line.rstrip('\n')
while line != '':
num_list = line.split(' ')
for each in num_list:
if each in overdue_num:
overdue_num.remove(each)
overdue_num.append(each)
else:
overdue_num.append(each)
for i in range(6):
num_list[i] = int(num_list[i])
for each in range(5):
if num_list[each] not in number_dict:
number_dict[num_list[each]] = 1
else:
number_dict[num_list[each]] += 1
power_num = num_list[-1]
if power_num not in pow_num_dict:
pow_num_dict[power_num] = 1
else:
pow_num_dict[power_num] += 1
line = file.readline()
line = line.rstrip('\n')
return number_dict, pow_num_dict, overdue_num
def sort_dict(number_dict):
new_sorted_list = sorted(number_dict.items(), key=lambda x: x[1])
return new_sorted_list
def most_common(new_sorted_list):
print('Most Common Numbers with frequencies: ')
for i in range(-1, -10, -1):
print(new_sorted_list[i])
def least_common(new_sorted_list):
print('\n')
print('Least Common Numbers with frequencies: ')
for i in range(0, 10):
print(new_sorted_list[i])
def overdue(overdue_num_list):
print('\n')
print('Most overdue numbers(ordered from most to least):')
for i in range(10):
print(overdue_num_list[i])
def frequency(number_dict, pow_num_dict):
print('\n')
print('Number frequencies:')
for i in range(1, 70):
print(i,'=', number_dict[i])
print('Powerball numbers:')
for i in range(1, 27):
print(i, '=', pow_num_dict[i])
def main():
file = open('pbnumbers.txt', 'r')
number_dict, pow_num_dict, overdue_num_list = records(file)
file.close()
new_sorted_list = sort_dict(number_dict)
most_common(new_sorted_list)
least_common(new_sorted_list)
overdue(overdue_num_list)
frequency(number_dict, pow_num_dict)
main() | [
"chaudharynishant025@gmail.com"
] | chaudharynishant025@gmail.com |
a62a9c5b492057a1f8eaf18d3667012243bed748 | ad080bd1612b980490ef2d1b61647cbc6beddf5d | /my_game/administrator/asteroid_generation.py | bface5faa562653e2b5a47965d18e18a78871d13 | [] | no_license | rokealva83/my_game | 8f915076986144234950aa4443e8bc51ad019664 | 76ecc1dbf60c7f93621ddca66d62d5fea2826d0e | refs/heads/master | 2020-12-24T17:54:59.491881 | 2016-05-10T20:06:53 | 2016-05-10T20:06:53 | 29,264,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,035 | py | # -*- coding: utf-8 -*-
import random
from django.shortcuts import render
from my_game.models import System, AsteroidField
# функция генерации астероидных полей
def asteroid_generation(request):
if request.method == "POST" and request.POST.get('add_button') is not None:
asteroid = int(request.POST.get('asteroid', None))
for i in range(asteroid):
#генерация кооржинат поля
system = System.objects.filter().order_by('x').first()
x_min = system.x - 10
system = System.objects.filter().order_by('x').last()
x_max = system.x + 10
system = System.objects.filter().order_by('y').first()
y_min = system.y - 10
system = System.objects.filter().order_by('y').last()
y_max = system.y + 10
x = round(random.uniform(x_min, x_max), 3)
y = round(random.uniform(y_min, y_max), 3)
z = round(random.uniform(-30, 30), 3)
# размер поля
k = random.random()
if 0.07 > k:
size = random.randint(3000000, 5000000)
else:
if 0.07 <= k <= 0.2:
size = random.randint(1000000, 3000000)
else:
if 0.2 < k < 0.8:
size = random.randint(500000, 1000000)
else:
size = random.randint(100000, 500000)
#количество артефактов в поле
k = random.random()
if 0.02 > k:
artifact = 5
elif 0.02 <= k <= 0.1:
artifact = 4
elif 0.1 < k <= 0.2:
artifact = 3
elif 0.2 < k <= 0.4:
artifact = 2
elif 0.4 < k <= 0.7:
artifact = 1
else:
artifact = 0
# уровень обогащения поля
k = random.random()
if 0.05 > k:
ore = round(random.uniform(0.8, 0.95), 3)
elif 0.05 <= k <= 0.35:
ore = round(random.uniform(0.799, 0.8), 3)
else:
ore = round(random.uniform(0.6, 0.799), 3)
mineral_koef = round(random.uniform(0.07, 0.2), 3) * ore
resource_koef = ore - mineral_koef
koef_res_1 = round(round(random.uniform(0.2, 0.3), 3) * resource_koef, 3)
koef_res_2 = round(round(random.uniform(0.2, 0.3), 3) * resource_koef, 3)
koef_res_3 = round(round(random.uniform(0.2, 0.3), 3) * resource_koef, 3)
koef_res_4 = round(resource_koef - (koef_res_1 + koef_res_2 + koef_res_3), 3)
koef_min_1 = round(round(random.uniform(0.2, 0.3), 3) * mineral_koef, 3)
koef_min_2 = round(round(random.uniform(0.2, 0.3), 3) * mineral_koef, 3)
koef_min_3 = round(round(random.uniform(0.2, 0.3), 3) * mineral_koef, 3)
koef_min_4 = round(mineral_koef - (koef_min_1 + koef_min_2 + koef_min_3), 3)
asteroid_test = AsteroidField.objects.filter(x=x, y=y, z=z).first()
if asteroid_test:
size = asteroid_test.size
koef_res_1 = asteroid_test.koef_res_1
koef_res_2 = asteroid_test.koef_res_2
koef_res_3 = asteroid_test.koef_res_3
koef_res_4 = asteroid_test.koef_res_4
koef_min_1 = asteroid_test.koef_min_1
koef_min_2 = asteroid_test.koef_min_2
koef_min_3 = asteroid_test.koef_min_3
koef_min_4 = asteroid_test.koef_min_4
asteroid = AsteroidField(
x=x,
y=y,
z=z,
size=size,
koef_res_1=koef_res_1,
koef_res_2=koef_res_2,
koef_res_3=koef_res_3,
koef_res_4=koef_res_4,
koef_min_1=koef_min_1,
koef_min_2=koef_min_2,
koef_min_3=koef_min_3,
koef_min_4=koef_min_4,
artifact=artifact
)
asteroid.save()
else:
asteroid = AsteroidField(
x=x,
y=y,
z=z,
size=size,
koef_res_1=koef_res_1,
koef_res_2=koef_res_2,
koef_res_3=koef_res_3,
koef_res_4=koef_res_4,
koef_min_1=koef_min_1,
koef_min_2=koef_min_2,
koef_min_3=koef_min_3,
koef_min_4=koef_min_4,
artifact=artifact
)
asteroid.save()
message = 'Поля сгенерированы'
output = {'message': message}
return render(request, "admin/generation.html", output)
| [
"tolik20002@bigmir.net"
] | tolik20002@bigmir.net |
a1edb59f7e7e2fce8ac3afcefcb050ea596661c7 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/blueprint/v20181101preview/outputs.py | 1acabbfdd2ea65bb9cf93d72b6073ee60cd85ea5 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 26,621 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
__all__ = [
'AssignmentLockSettingsResponse',
'AssignmentStatusResponse',
'BlueprintStatusResponse',
'KeyVaultReferenceResponse',
'ManagedServiceIdentityResponse',
'ParameterDefinitionResponse',
'ParameterValueResponse',
'ResourceGroupDefinitionResponse',
'ResourceGroupValueResponse',
'SecretValueReferenceResponse',
'UserAssignedIdentityResponse',
]
@pulumi.output_type
class AssignmentLockSettingsResponse(dict):
"""
Defines how resources deployed by a blueprint assignment are locked.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "excludedActions":
suggest = "excluded_actions"
elif key == "excludedPrincipals":
suggest = "excluded_principals"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AssignmentLockSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AssignmentLockSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AssignmentLockSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
excluded_actions: Optional[Sequence[str]] = None,
excluded_principals: Optional[Sequence[str]] = None,
mode: Optional[str] = None):
"""
Defines how resources deployed by a blueprint assignment are locked.
:param Sequence[str] excluded_actions: List of management operations that are excluded from blueprint locks. Up to 200 actions are permitted. If the lock mode is set to 'AllResourcesReadOnly', then the following actions are automatically appended to 'excludedActions': '*/read', 'Microsoft.Network/virtualNetworks/subnets/join/action' and 'Microsoft.Authorization/locks/delete'. If the lock mode is set to 'AllResourcesDoNotDelete', then the following actions are automatically appended to 'excludedActions': 'Microsoft.Authorization/locks/delete'. Duplicate actions will get removed.
:param Sequence[str] excluded_principals: List of AAD principals excluded from blueprint locks. Up to 5 principals are permitted.
:param str mode: Lock mode.
"""
if excluded_actions is not None:
pulumi.set(__self__, "excluded_actions", excluded_actions)
if excluded_principals is not None:
pulumi.set(__self__, "excluded_principals", excluded_principals)
if mode is not None:
pulumi.set(__self__, "mode", mode)
@property
@pulumi.getter(name="excludedActions")
def excluded_actions(self) -> Optional[Sequence[str]]:
"""
List of management operations that are excluded from blueprint locks. Up to 200 actions are permitted. If the lock mode is set to 'AllResourcesReadOnly', then the following actions are automatically appended to 'excludedActions': '*/read', 'Microsoft.Network/virtualNetworks/subnets/join/action' and 'Microsoft.Authorization/locks/delete'. If the lock mode is set to 'AllResourcesDoNotDelete', then the following actions are automatically appended to 'excludedActions': 'Microsoft.Authorization/locks/delete'. Duplicate actions will get removed.
"""
return pulumi.get(self, "excluded_actions")
@property
@pulumi.getter(name="excludedPrincipals")
def excluded_principals(self) -> Optional[Sequence[str]]:
"""
List of AAD principals excluded from blueprint locks. Up to 5 principals are permitted.
"""
return pulumi.get(self, "excluded_principals")
@property
@pulumi.getter
def mode(self) -> Optional[str]:
"""
Lock mode.
"""
return pulumi.get(self, "mode")
@pulumi.output_type
class AssignmentStatusResponse(dict):
"""
The status of a blueprint assignment. This field is readonly.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "lastModified":
suggest = "last_modified"
elif key == "managedResources":
suggest = "managed_resources"
elif key == "timeCreated":
suggest = "time_created"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AssignmentStatusResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AssignmentStatusResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AssignmentStatusResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
last_modified: str,
managed_resources: Sequence[str],
time_created: str):
"""
The status of a blueprint assignment. This field is readonly.
:param str last_modified: Last modified time of this blueprint definition.
:param Sequence[str] managed_resources: List of resources that were created by the blueprint assignment.
:param str time_created: Creation time of this blueprint definition.
"""
pulumi.set(__self__, "last_modified", last_modified)
pulumi.set(__self__, "managed_resources", managed_resources)
pulumi.set(__self__, "time_created", time_created)
@property
@pulumi.getter(name="lastModified")
def last_modified(self) -> str:
"""
Last modified time of this blueprint definition.
"""
return pulumi.get(self, "last_modified")
@property
@pulumi.getter(name="managedResources")
def managed_resources(self) -> Sequence[str]:
"""
List of resources that were created by the blueprint assignment.
"""
return pulumi.get(self, "managed_resources")
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> str:
"""
Creation time of this blueprint definition.
"""
return pulumi.get(self, "time_created")
@pulumi.output_type
class BlueprintStatusResponse(dict):
"""
The status of the blueprint. This field is readonly.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "lastModified":
suggest = "last_modified"
elif key == "timeCreated":
suggest = "time_created"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BlueprintStatusResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BlueprintStatusResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BlueprintStatusResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
last_modified: str,
time_created: str):
"""
The status of the blueprint. This field is readonly.
:param str last_modified: Last modified time of this blueprint definition.
:param str time_created: Creation time of this blueprint definition.
"""
pulumi.set(__self__, "last_modified", last_modified)
pulumi.set(__self__, "time_created", time_created)
@property
@pulumi.getter(name="lastModified")
def last_modified(self) -> str:
"""
Last modified time of this blueprint definition.
"""
return pulumi.get(self, "last_modified")
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> str:
"""
Creation time of this blueprint definition.
"""
return pulumi.get(self, "time_created")
@pulumi.output_type
class KeyVaultReferenceResponse(dict):
"""
Specifies the link to a Key Vault.
"""
def __init__(__self__, *,
id: str):
"""
Specifies the link to a Key Vault.
:param str id: Azure resource ID of the Key Vault.
"""
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> str:
"""
Azure resource ID of the Key Vault.
"""
return pulumi.get(self, "id")
@pulumi.output_type
class ManagedServiceIdentityResponse(dict):
"""
Managed identity generic object.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "principalId":
suggest = "principal_id"
elif key == "tenantId":
suggest = "tenant_id"
elif key == "userAssignedIdentities":
suggest = "user_assigned_identities"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ManagedServiceIdentityResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ManagedServiceIdentityResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ManagedServiceIdentityResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
type: str,
principal_id: Optional[str] = None,
tenant_id: Optional[str] = None,
user_assigned_identities: Optional[Mapping[str, 'outputs.UserAssignedIdentityResponse']] = None):
"""
Managed identity generic object.
:param str type: Type of the managed identity.
:param str principal_id: Azure Active Directory principal ID associated with this Identity.
:param str tenant_id: ID of the Azure Active Directory.
:param Mapping[str, 'UserAssignedIdentityResponse'] user_assigned_identities: The list of user-assigned managed identities associated with the resource. Key is the Azure resource Id of the managed identity.
"""
pulumi.set(__self__, "type", type)
if principal_id is not None:
pulumi.set(__self__, "principal_id", principal_id)
if tenant_id is not None:
pulumi.set(__self__, "tenant_id", tenant_id)
if user_assigned_identities is not None:
pulumi.set(__self__, "user_assigned_identities", user_assigned_identities)
@property
@pulumi.getter
def type(self) -> str:
"""
Type of the managed identity.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> Optional[str]:
"""
Azure Active Directory principal ID associated with this Identity.
"""
return pulumi.get(self, "principal_id")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[str]:
"""
ID of the Azure Active Directory.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter(name="userAssignedIdentities")
def user_assigned_identities(self) -> Optional[Mapping[str, 'outputs.UserAssignedIdentityResponse']]:
"""
The list of user-assigned managed identities associated with the resource. Key is the Azure resource Id of the managed identity.
"""
return pulumi.get(self, "user_assigned_identities")
@pulumi.output_type
class ParameterDefinitionResponse(dict):
"""
Represent a parameter with constrains and metadata.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "allowedValues":
suggest = "allowed_values"
elif key == "defaultValue":
suggest = "default_value"
elif key == "displayName":
suggest = "display_name"
elif key == "strongType":
suggest = "strong_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ParameterDefinitionResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ParameterDefinitionResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ParameterDefinitionResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
type: str,
allowed_values: Optional[Sequence[Any]] = None,
default_value: Optional[Any] = None,
description: Optional[str] = None,
display_name: Optional[str] = None,
strong_type: Optional[str] = None):
"""
Represent a parameter with constrains and metadata.
:param str type: Allowed data types for Resource Manager template parameters.
:param Sequence[Any] allowed_values: Array of allowed values for this parameter.
:param Any default_value: Default Value for this parameter.
:param str description: Description of this parameter/resourceGroup.
:param str display_name: DisplayName of this parameter/resourceGroup.
:param str strong_type: StrongType for UI to render rich experience during blueprint assignment. Supported strong types are resourceType, principalId and location.
"""
pulumi.set(__self__, "type", type)
if allowed_values is not None:
pulumi.set(__self__, "allowed_values", allowed_values)
if default_value is not None:
pulumi.set(__self__, "default_value", default_value)
if description is not None:
pulumi.set(__self__, "description", description)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if strong_type is not None:
pulumi.set(__self__, "strong_type", strong_type)
@property
@pulumi.getter
def type(self) -> str:
"""
Allowed data types for Resource Manager template parameters.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="allowedValues")
def allowed_values(self) -> Optional[Sequence[Any]]:
"""
Array of allowed values for this parameter.
"""
return pulumi.get(self, "allowed_values")
@property
@pulumi.getter(name="defaultValue")
def default_value(self) -> Optional[Any]:
"""
Default Value for this parameter.
"""
return pulumi.get(self, "default_value")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
Description of this parameter/resourceGroup.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[str]:
"""
DisplayName of this parameter/resourceGroup.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="strongType")
def strong_type(self) -> Optional[str]:
"""
StrongType for UI to render rich experience during blueprint assignment. Supported strong types are resourceType, principalId and location.
"""
return pulumi.get(self, "strong_type")
@pulumi.output_type
class ParameterValueResponse(dict):
"""
Value for the specified parameter. Can be either 'value' or 'reference' but not both.
"""
def __init__(__self__, *,
reference: Optional['outputs.SecretValueReferenceResponse'] = None,
value: Optional[Any] = None):
"""
Value for the specified parameter. Can be either 'value' or 'reference' but not both.
:param 'SecretValueReferenceResponse' reference: Parameter value as reference type.
:param Any value: Parameter value. Any valid JSON value is allowed including objects, arrays, strings, numbers and booleans.
"""
if reference is not None:
pulumi.set(__self__, "reference", reference)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def reference(self) -> Optional['outputs.SecretValueReferenceResponse']:
"""
Parameter value as reference type.
"""
return pulumi.get(self, "reference")
@property
@pulumi.getter
def value(self) -> Optional[Any]:
"""
Parameter value. Any valid JSON value is allowed including objects, arrays, strings, numbers and booleans.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class ResourceGroupDefinitionResponse(dict):
"""
Represents an Azure resource group in a blueprint definition.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "dependsOn":
suggest = "depends_on"
elif key == "displayName":
suggest = "display_name"
elif key == "strongType":
suggest = "strong_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ResourceGroupDefinitionResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ResourceGroupDefinitionResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ResourceGroupDefinitionResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
depends_on: Optional[Sequence[str]] = None,
description: Optional[str] = None,
display_name: Optional[str] = None,
location: Optional[str] = None,
name: Optional[str] = None,
strong_type: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None):
"""
Represents an Azure resource group in a blueprint definition.
:param Sequence[str] depends_on: Artifacts which need to be deployed before this resource group.
:param str description: Description of this parameter/resourceGroup.
:param str display_name: DisplayName of this parameter/resourceGroup.
:param str location: Location of this resourceGroup. Leave empty if the resource group location will be specified during the blueprint assignment.
:param str name: Name of this resourceGroup. Leave empty if the resource group name will be specified during the blueprint assignment.
:param str strong_type: StrongType for UI to render rich experience during blueprint assignment. Supported strong types are resourceType, principalId and location.
:param Mapping[str, str] tags: Tags to be assigned to this resource group.
"""
if depends_on is not None:
pulumi.set(__self__, "depends_on", depends_on)
if description is not None:
pulumi.set(__self__, "description", description)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if strong_type is not None:
pulumi.set(__self__, "strong_type", strong_type)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="dependsOn")
def depends_on(self) -> Optional[Sequence[str]]:
"""
Artifacts which need to be deployed before this resource group.
"""
return pulumi.get(self, "depends_on")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
Description of this parameter/resourceGroup.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[str]:
"""
DisplayName of this parameter/resourceGroup.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Location of this resourceGroup. Leave empty if the resource group location will be specified during the blueprint assignment.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of this resourceGroup. Leave empty if the resource group name will be specified during the blueprint assignment.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="strongType")
def strong_type(self) -> Optional[str]:
"""
StrongType for UI to render rich experience during blueprint assignment. Supported strong types are resourceType, principalId and location.
"""
return pulumi.get(self, "strong_type")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Tags to be assigned to this resource group.
"""
return pulumi.get(self, "tags")
@pulumi.output_type
class ResourceGroupValueResponse(dict):
"""
Represents an Azure resource group.
"""
def __init__(__self__, *,
location: Optional[str] = None,
name: Optional[str] = None):
"""
Represents an Azure resource group.
:param str location: Location of the resource group.
:param str name: Name of the resource group.
"""
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Location of the resource group.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the resource group.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class SecretValueReferenceResponse(dict):
"""
Reference to a Key Vault secret.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "keyVault":
suggest = "key_vault"
elif key == "secretName":
suggest = "secret_name"
elif key == "secretVersion":
suggest = "secret_version"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SecretValueReferenceResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SecretValueReferenceResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SecretValueReferenceResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
key_vault: 'outputs.KeyVaultReferenceResponse',
secret_name: str,
secret_version: Optional[str] = None):
"""
Reference to a Key Vault secret.
:param 'KeyVaultReferenceResponse' key_vault: Specifies the reference to a given Azure Key Vault.
:param str secret_name: Name of the secret.
:param str secret_version: The version of the secret to use. If left blank, the latest version of the secret is used.
"""
pulumi.set(__self__, "key_vault", key_vault)
pulumi.set(__self__, "secret_name", secret_name)
if secret_version is not None:
pulumi.set(__self__, "secret_version", secret_version)
@property
@pulumi.getter(name="keyVault")
def key_vault(self) -> 'outputs.KeyVaultReferenceResponse':
"""
Specifies the reference to a given Azure Key Vault.
"""
return pulumi.get(self, "key_vault")
@property
@pulumi.getter(name="secretName")
def secret_name(self) -> str:
"""
Name of the secret.
"""
return pulumi.get(self, "secret_name")
@property
@pulumi.getter(name="secretVersion")
def secret_version(self) -> Optional[str]:
"""
The version of the secret to use. If left blank, the latest version of the secret is used.
"""
return pulumi.get(self, "secret_version")
@pulumi.output_type
class UserAssignedIdentityResponse(dict):
"""
User-assigned managed identity.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "clientId":
suggest = "client_id"
elif key == "principalId":
suggest = "principal_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in UserAssignedIdentityResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
UserAssignedIdentityResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
UserAssignedIdentityResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
client_id: Optional[str] = None,
principal_id: Optional[str] = None):
"""
User-assigned managed identity.
:param str client_id: Client App Id associated with this identity.
:param str principal_id: Azure Active Directory principal ID associated with this Identity.
"""
if client_id is not None:
pulumi.set(__self__, "client_id", client_id)
if principal_id is not None:
pulumi.set(__self__, "principal_id", principal_id)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> Optional[str]:
"""
Client App Id associated with this identity.
"""
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> Optional[str]:
"""
Azure Active Directory principal ID associated with this Identity.
"""
return pulumi.get(self, "principal_id")
| [
"noreply@github.com"
] | morrell.noreply@github.com |
6df00727a21728a05316441890050263c914ffd5 | f47626fd3b236dd42f335952a3f3edf55f7e6075 | /region-app/app/tools/constants.py | 1b3a6661296e13c1420a27b4a2ce3b33edcd6574 | [] | no_license | reritom/region-app-example | da541afee5faf04bac65cceda15b6d2c265b7e79 | d0f4c2507c62755f8bbdc1400fb2fc538646ee76 | refs/heads/master | 2022-12-02T20:04:32.028450 | 2020-08-15T08:19:46 | 2020-08-15T08:19:46 | 287,619,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 186 | py | # Constants related to the application should be located here
# Currently we only have one constant used by the RegionController, but this allows us to scale.
COUNTRY_FRANCE = "France"
| [
"reikudjinn@gmail.com"
] | reikudjinn@gmail.com |
74a66ecda7c167ce393132bd9d092d8ca7413ac3 | 1cb97b0fe8b275efd540716cb6e742fc44e927bf | /setup.py | 3b851f54849494044ef8725faa00a353b62ae451 | [
"MIT"
] | permissive | khushjammu/rljax | 31e4d0f9c6aa57a0a07a35f7f8854cc78360ae5a | f2d5e81240d99187fcb625d2caa630c3c7deecfc | refs/heads/master | 2023-06-27T17:15:43.437065 | 2021-07-30T16:55:47 | 2021-07-30T16:55:47 | 391,125,669 | 0 | 0 | MIT | 2021-07-30T16:18:23 | 2021-07-30T16:18:22 | null | UTF-8 | Python | false | false | 2,890 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import io
import os
import sys
from shutil import rmtree
from setuptools import Command, find_packages, setup
NAME = "rljax"
DESCRIPTION = "A collection of RL algorithms written in JAX."
URL = "https://github.com/ku2482/rljax"
EMAIL = "watanabe.toshiki@outlook.jp"
AUTHOR = "Toshiki Watanabe"
REQUIRES_PYTHON = ">=3.6.0"
VERSION = "0.0.4"
here = os.path.abspath(os.path.dirname(__file__))
REQUIRED = open(os.path.join(here, "requirements.txt")).read().splitlines()
EXTRAS = {}
try:
with io.open(os.path.join(here, "README.md"), encoding="utf-8") as f:
long_description = "\n" + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
about = {}
if not VERSION:
project_slug = NAME.lower().replace("-", "_").replace(" ", "_")
with open(os.path.join(here, project_slug, "__version__.py")) as f:
exec(f.read(), about)
else:
about["__version__"] = VERSION
class UploadCommand(Command):
"""Support setup.py upload."""
description = "Build and publish the package."
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print("\033[1m{0}\033[0m".format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status("Removing previous builds…")
rmtree(os.path.join(here, "dist"))
except OSError:
pass
self.status("Building Source and Wheel (universal) distribution…")
os.system("{0} setup.py sdist bdist_wheel --universal".format(sys.executable))
self.status("Uploading the package to PyPI via Twine…")
os.system("twine upload dist/*")
self.status("Pushing git tags…")
os.system("git tag v{0}".format(about["__version__"]))
os.system("git push --tags")
sys.exit()
setup(
name=NAME,
version=about["__version__"],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type="text/markdown",
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=[package for package in find_packages() if package.startswith("rljax")],
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
cmdclass={
"upload": UploadCommand,
},
)
| [
"kuboy2482@gmail.com"
] | kuboy2482@gmail.com |
ccc0c4a0baf39caf1e00f362fe8192eb9b77a0ee | 0635da394505415471efd89d542f225cae3e668b | /networkapi/api_environment_vip/facade.py | 38fb69529f25b6633b6b2dfd9aa6273ac730bec2 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] | permissive | enterstudio/GloboNetworkAPI | 5e2fbe7ef3f4a70aaa0ae474a0c5493e5568cb24 | ea8eebc0337636f9250e628cc392514934db8edd | refs/heads/master | 2023-07-25T20:36:40.717818 | 2017-01-27T20:22:40 | 2017-01-27T20:22:40 | 82,249,922 | 0 | 0 | Apache-2.0 | 2023-09-04T21:56:15 | 2017-02-17T02:48:54 | Python | UTF-8 | Python | false | false | 2,827 | py | # -*- coding: utf-8 -*-
import logging
from networkapi.ambiente.models import EnvironmentVip
from networkapi.requisicaovips.models import OptionVip
from networkapi.requisicaovips.models import OptionVipEnvironmentVip
log = logging.getLogger(__name__)
def get_option_vip_by_environment_vip_ids(environment_vip_ids):
"""Return option vip list by ids of environment vip.
:param environment_vip_ids: ids list of environment vip
:example: [<environment_vip_id>,...]
"""
options_vip = list()
for environment_vip_id in environment_vip_ids:
option_environment_vips = OptionVipEnvironmentVip.objects.filter(
environment=environment_vip_id
).order_by(
'option__tipo_opcao',
'option__nome_opcao_txt'
)
options_vip.append(option_environment_vips)
return options_vip
def get_option_vip_by_environment_vip_type(search_list):
"""Return option vip list by ids of environment vip and option vip type.
:param environment_vip_ids: ids list of environment vip
:param type_option: option vip type
:example: [{
environment_vip_id:<environment_vip_id>
type_option:<type_option>
]}
"""
options_vip = list()
for item in search_list:
option_environment_vips = OptionVip.objects.filter(
optionvipenvironmentvip__environment__id=item[
'environment_vip_id'],
tipo_opcao=item['type_option'])
options_vip.append(option_environment_vips)
return options_vip
def get_type_option_vip_by_environment_vip_ids(environment_vip_ids):
"""Return option vip list by ids of environment vip and option vip type.
:param environment_vip_ids: ids list of environment vip
"""
type_option_vip = list()
for environment_vip_id in environment_vip_ids:
type_options = OptionVip.objects.filter(
optionvipenvironmentvip__environment__id=environment_vip_id
).values('tipo_opcao').distinct()
type_options = [type_option['tipo_opcao']
for type_option in type_options]
type_option_vip.append(type_options)
return type_option_vip
def get_environmentvip_by_ids(environment_vip_ids):
envvip_ids = list()
for environment_vip_id in environment_vip_ids:
envvip = get_environmentvip_by_id(environment_vip_id).id
envvip_ids.append(envvip)
envvips = EnvironmentVip.objects.filter(id__in=envvip_ids)
return envvips
def get_environmentvip_by_id(environment_vip_id):
environmentvip = EnvironmentVip.get_by_pk(environment_vip_id)
return environmentvip
def update_environment_vip(environment_vip):
env = get_environmentvip_by_id(environment_vip.get('id'))
env.conf = environment_vip.get('conf')
env.save()
return env
| [
"ederson.brilhante@corp.globo.com"
] | ederson.brilhante@corp.globo.com |
69231c247f485ff9007070af890fa3965a1f4c62 | 8447b5f83be675c5de085d8824783ec0739690b0 | /tests/test_sampler.py | 9b68ced7cf583e92366ac91fd00cf2d8f0e2aa27 | [] | no_license | jie311/mmdetection_lite | bca7070ad88a04e4f6650292642f98f24f7ebeba | 1c151a7c44759d022fea1d85fb036a5b39409449 | refs/heads/master | 2022-04-19T06:18:42.282754 | 2020-04-13T07:41:10 | 2020-04-13T07:41:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,320 | py | import torch
from mmdet.core import MaxIoUAssigner
from mmdet.core.bbox.samplers import OHEMSampler, RandomSampler
def test_random_sampler():
assigner = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([1, 2])
gt_bboxes_ignore = torch.Tensor([
[30, 30, 40, 40],
])
assign_result = assigner.assign(
bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore,
gt_labels=gt_labels)
sampler = RandomSampler(
num=10, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=True)
sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels)
assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds)
def test_random_sampler_empty_gt():
assigner = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.empty(0, ).long()
assign_result = assigner.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
sampler = RandomSampler(
num=10, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=True)
sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels)
assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds)
def test_random_sampler_empty_pred():
assigner = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
bboxes = torch.empty(0, 4)
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([1, 2])
assign_result = assigner.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
sampler = RandomSampler(
num=10, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=True)
sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels)
assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds)
def _context_for_ohem():
try:
from test_forward import _get_detector_cfg
except ImportError:
# Hack: grab testing utils from test_forward to make a context for ohem
import sys
from os.path import dirname
sys.path.insert(0, dirname(__file__))
from test_forward import _get_detector_cfg
model, train_cfg, test_cfg = _get_detector_cfg(
'faster_rcnn_ohem_r18_fpn_1x.py')
model['pretrained'] = None
# torchvision roi align supports CPU
model['bbox_roi_extractor']['roi_layer']['use_torchvision'] = True
from mmdet.models import build_detector
context = build_detector(model, train_cfg=train_cfg, test_cfg=test_cfg)
return context
def test_ohem_sampler():
assigner = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([1, 2])
gt_bboxes_ignore = torch.Tensor([
[30, 30, 40, 40],
])
assign_result = assigner.assign(
bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore,
gt_labels=gt_labels)
context = _context_for_ohem()
sampler = OHEMSampler(
num=10,
pos_fraction=0.5,
context=context,
neg_pos_ub=-1,
add_gt_as_proposals=True)
feats = [torch.rand(1, 256, int(2**i), int(2**i)) for i in [6, 5, 4, 3, 2]]
sample_result = sampler.sample(
assign_result, bboxes, gt_bboxes, gt_labels, feats=feats)
assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds)
def test_ohem_sampler_empty_gt():
assigner = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.LongTensor([])
gt_bboxes_ignore = torch.Tensor([])
assign_result = assigner.assign(
bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore,
gt_labels=gt_labels)
context = _context_for_ohem()
sampler = OHEMSampler(
num=10,
pos_fraction=0.5,
context=context,
neg_pos_ub=-1,
add_gt_as_proposals=True)
feats = [torch.rand(1, 256, int(2**i), int(2**i)) for i in [6, 5, 4, 3, 2]]
sample_result = sampler.sample(
assign_result, bboxes, gt_bboxes, gt_labels, feats=feats)
assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds)
def test_ohem_sampler_empty_pred():
assigner = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
bboxes = torch.empty(0, 4)
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_labels = torch.LongTensor([1, 2, 2, 3])
gt_bboxes_ignore = torch.Tensor([])
assign_result = assigner.assign(
bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore,
gt_labels=gt_labels)
context = _context_for_ohem()
sampler = OHEMSampler(
num=10,
pos_fraction=0.5,
context=context,
neg_pos_ub=-1,
add_gt_as_proposals=True)
feats = [torch.rand(1, 256, int(2**i), int(2**i)) for i in [6, 5, 4, 3, 2]]
sample_result = sampler.sample(
assign_result, bboxes, gt_bboxes, gt_labels, feats=feats)
assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds)
def test_random_sample_result():
from mmdet.core.bbox.samplers.sampling_result import SamplingResult
SamplingResult.random(num_gts=0, num_preds=0)
SamplingResult.random(num_gts=0, num_preds=3)
SamplingResult.random(num_gts=3, num_preds=3)
SamplingResult.random(num_gts=0, num_preds=3)
SamplingResult.random(num_gts=7, num_preds=7)
SamplingResult.random(num_gts=7, num_preds=64)
SamplingResult.random(num_gts=24, num_preds=3)
for i in range(3):
SamplingResult.random(rng=i)
| [
"760997646@qq.com"
] | 760997646@qq.com |
60417ad7a9157ae72a147b6ef908a2afe7e87952 | b523a7d337ce60e1e1ca779be396eeeaab786b7d | /Python/leetcode_075_sort_colors_2nd.py | 41d2d71cf4a12c7dd6fea12fced690b2be118682 | [] | no_license | bakker4444/Algorithms | 3a65f83fde6a22a82646f6ee463a487f889291d0 | 453e92109494c962c36280cd0d32fb28aa771615 | refs/heads/master | 2021-05-25T11:56:18.424622 | 2019-05-08T22:07:15 | 2019-05-08T22:07:15 | 127,337,782 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,912 | py | ## 75. Sort Colors
#
# Given an array with n objects colored red, white or blue, sort them in-place so that objects of the same color are adjacent, with the colors in the order red, white and blue.
# Here, we will use the integers 0, 1, and 2 to represent the color red, white, and blue respectively.
# Note: You are not suppose to use the library's sort function for this problem.
#
# Example:
# Input: [2,0,2,1,1,0]
# Output: [0,0,1,1,2,2]
#
# Follow up:
# - A rather straight forward solution is a two-pass algorithm using counting sort. First, iterate the array counting number of 0's, 1's, and 2's, then overwrite array with total number of 0's, then 1's and followed by 2's.
# - Could you come up with a one-pass algorithm using only constant space?
##
### counting solution
## time complexity : O(2n)
## space complexity : O(1)
class Solution1(object):
def sortColors(self, nums):
"""
:type nums: List[int]
:rtype: None Do not return anything, modify nums in-place instead.
"""
cnt = [0, 0, 0]
for val in nums:
cnt[val] += 1
for i in range(len(nums)):
if cnt[0] != 0:
nums[i] = 0
cnt[0] -= 1
elif cnt[1] != 0:
nums[i] = 1
cnt[1] -= 1
else: ## cnt[2] != 0
nums[i] = 2
cnt[2] -= 1
return nums
### swap solution
## time complexity : O(n)
## space complexity : O(1)
class Solution2(object):
def sortColors(self, nums):
"""
:type nums: List[int]
:rtype: None Do not return anything, modify nums in-place instead.
"""
low, mid, high = 0, 0, len(nums)-1
while mid <= high:
if nums[mid] == 2:
nums[mid], nums[high] = nums[high], nums[mid]
high -= 1
elif nums[mid] == 0:
nums[mid], nums[low] = nums[low], nums[mid]
mid += 1
low += 1
else: ## nums[mid] == 1
mid += 1
return nums
import unittest
class Test(unittest.TestCase):
def test_sortColors(self):
test_input = [
[2,0,2,1,1,0],
[2, 0, 1],
[1, 0, 2],
[1, 0],
[1, 0, 1],
[1, 2, 1],
]
test_output = [
[0,0,1,1,2,2],
[0, 1, 2],
[0, 1, 2],
[0, 1],
[0, 1, 1],
[1, 1, 2]
]
sol1 = Solution1()
sol2 = Solution2()
for i in range(len(test_input)):
result1 = sol1.sortColors(test_input[i])
result2 = sol2.sortColors(test_input[i])
self.assertEqual(result1, test_output[i])
self.assertEqual(result2, test_output[i])
if __name__ == "__main__":
unittest.main()
| [
"bakker4444@gmail.com"
] | bakker4444@gmail.com |
635ff42ad41b2d8a6c0bda55e9fabefb122925cf | ba59629d53d9500ff620387d8bcbadd639796719 | /code_examples/pytorch/mnist/test_mnist.py | d860a89a1604f92dba036e6cf333c20f03eb83fd | [
"MIT"
] | permissive | Joejiong/examples-2 | 7d423e9bd4e68b29613903158a50d391ffc72ef8 | 553b90b57d2ed8c996c74cbe5d48bb2b7dba5a88 | refs/heads/master | 2023-03-08T02:54:32.583139 | 2021-01-22T18:29:13 | 2021-01-22T18:29:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,157 | py | # Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import inspect
import os
import subprocess
import unittest
import torch
from mnist_poptorch import accuracy
def run_poptorch_mnist(**kwargs):
cwd = os.path.dirname(os.path.abspath(inspect.stack()[0][1]))
cmd = ["python3", 'mnist_poptorch.py']
out = subprocess.check_output(cmd, cwd=cwd).decode("utf-8")
return out
class TestPoptorchMNIST(unittest.TestCase):
def test_accuracy_calculation(self):
pred = torch.tensor([[0.9, 0.05, 0.05],
[0.1, 0.5, 0.4],
[0.6, 0.01, 0.49],
[0.09, 0.11, 0.8]])
label = torch.tensor([0, 1, 2, 2])
acc = accuracy(pred, label)
self.assertEqual(acc, 75)
def test_test_final_training_accuracy(self):
out = run_poptorch_mnist()
final_acc = 0.0
for line in out.split('\n'):
if line.find('Accuracy on test set:') != -1:
final_acc = float(line.split(": ")[-1].strip()[:-1])
break
self.assertGreater(final_acc, 90)
self.assertLess(final_acc, 99.9)
| [
"philb@graphcore.ai"
] | philb@graphcore.ai |
ecd1837e2da77b2be6d2f8094e2424221ab60afb | 7a7a818f482b4781e15948bb64ea6ae79a631175 | /deephyper/benchmark/nas/candleP1B3/data_utils.py | 3e360a5f557773f482433a7ca850a8b4ea64eb0c | [
"BSD-2-Clause"
] | permissive | BethanyL/deephyper | 85157a856b8a35a180d5b56e2b00321ea384ebcc | 42cbb846f2815223c6843e92e234c3b0a765aeb6 | refs/heads/master | 2020-04-11T00:36:49.519366 | 2019-01-13T16:35:11 | 2019-01-13T16:35:11 | 161,391,202 | 1 | 1 | null | 2018-12-11T20:41:41 | 2018-12-11T20:41:41 | null | UTF-8 | Python | false | false | 4,665 | py | from __future__ import absolute_import
from __future__ import print_function
import tarfile
import os
import sys
import shutil
import hashlib
from six.moves.urllib.request import urlopen
from six.moves.urllib.error import URLError, HTTPError
from deephyper.benchmark.candleP1B3Nas.generic_utils import Progbar
# Under Python 2, 'urlretrieve' relies on FancyURLopener from legacy
# urllib module, known to have issues with proxy management
if sys.version_info[0] == 2:
def urlretrieve(url, filename, reporthook=None, data=None):
def chunk_read(response, chunk_size=8192, reporthook=None):
total_size = response.info().get('Content-Length').strip()
total_size = int(total_size)
count = 0
while 1:
chunk = response.read(chunk_size)
count += 1
if not chunk:
reporthook(count, total_size, total_size)
break
if reporthook:
reporthook(count, chunk_size, total_size)
yield chunk
response = urlopen(url, data)
with open(filename, 'wb') as fd:
for chunk in chunk_read(response, reporthook=reporthook):
fd.write(chunk)
else:
from six.moves.urllib.request import urlretrieve
def get_file(fname, origin, untar=False,
md5_hash=None, cache_subdir='common'):
'''Downloads a file from a URL if it not already in the cache.
Passing the MD5 hash will verify the file after download as well as if it is already present in the cache.
# Arguments
fname: name of the file
origin: original URL of the file
untar: boolean, whether the file should be decompressed
md5_hash: MD5 hash of the file for verification
cache_subdir: directory being used as the cache
# Returns
Path to the downloaded file
'''
file_path = os.path.dirname(os.path.realpath(__file__))
datadir_base = os.path.expanduser(os.path.join(file_path, '..', 'Data'))
datadir = os.path.join(datadir_base, cache_subdir)
if not os.path.exists(datadir):
os.makedirs(datadir)
if untar:
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + '.tar.gz'
else:
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
# file found; verify integrity if a hash was provided
if md5_hash is not None:
if not validate_file(fpath, md5_hash):
print('A local file was found, but it seems to be '
'incomplete or outdated.')
download = True
else:
download = True
if download:
print('Downloading data from', origin)
global progbar
progbar = None
def dl_progress(count, block_size, total_size):
global progbar
if progbar is None:
progbar = Progbar(total_size)
else:
progbar.update(count * block_size)
error_msg = 'URL fetch failure on {}: {} -- {}'
try:
try:
urlretrieve(origin, fpath, dl_progress)
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(fpath):
os.remove(fpath)
raise
progbar = None
print()
if untar:
if not os.path.exists(untar_fpath):
print('Untarring file...')
tfile = tarfile.open(fpath, 'r:gz')
try:
tfile.extractall(path=datadir)
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(untar_fpath):
if os.path.isfile(untar_fpath):
os.remove(untar_fpath)
else:
shutil.rmtree(untar_fpath)
raise
tfile.close()
return untar_fpath
print()
return fpath
def validate_file(fpath, md5_hash):
'''Validates a file against a MD5 hash
# Arguments
fpath: path to the file being validated
md5_hash: the MD5 hash being validated against
# Returns
Whether the file is valid
'''
hasher = hashlib.md5()
with open(fpath, 'rb') as f:
buf = f.read()
hasher.update(buf)
if str(hasher.hexdigest()) == str(md5_hash):
return True
else:
return False
| [
"romainegele@gmail.com"
] | romainegele@gmail.com |
6376c69e303c4688ca205d4b3661e35929db601a | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /Yfm3h3nT3apARd4gC_13.py | fdf5b9abdb03a54e6ec2254ddc78f755dd346d28 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 910 | py | """
Create a function that takes a list consisting of dice rolls from 1-6. Return
the sum of your rolls with the following conditions:
1. If a 1 is rolled, that is bad luck. The next roll counts as 0.
2. If a 6 is rolled, that is good luck. The next roll is multiplied by 2.
3. The list length will always be 3 or higher.
### Examples
rolls([1, 2, 3]) ➞ 4
# The second roll, 2, counts as 0 as a result of rolling 1.
rolls([2, 6, 2, 5]) ➞ 17
# The 2 following the 6 was multiplied by 2.
rolls([6, 1, 1]) ➞ 8
# The first roll makes the second roll worth 2, but the
# second roll was still 1 so the third roll doesn't count.
### Notes
Even if a 6 is rolled after a 1, 6 isn't summed but the 6's "effect" still
takes place.
"""
def rolls(lst):
r=lst[0]
for i in range(1,len(lst)):
r+=lst[i]+(lst[i-1]==6)*lst[i]-(lst[i-1]==1)*lst[i]
return r
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
e7e4db5403fa269998ca5914755f189c589de9c1 | 3c847175b995991414bda789eabda8c9b150af4a | /raspberry_pi_unit/opencv_ball_tracking.py | 1eb8096cb52c8b781bf8e6e7eb78f2df29b566d3 | [] | no_license | DamoM73/10-Digital-Technologies | 4ed0149b1c94eecf31e4f6060d79219ad8690832 | 5bf20cacf2b323bee8fcf1ee2260808e86d8f7c2 | refs/heads/master | 2023-05-01T07:02:29.146224 | 2021-05-20T03:17:42 | 2021-05-20T03:17:42 | 294,894,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,869 | py | # import necessary packages
from collections import deque
from imutils.video import VideoStream
import numpy as np
import argparse
import cv2
import imutils
import time
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=64,
help="max buffer size")
args = vars(ap.parse_args())
# define the lower and upper boundaries of the "red"
# ball in the HSV colour space, then initialize the
# list of tracked points
redLower = (0,100,80)
redUpper = (10,255,255)
pts = deque(maxlen=args["buffer"])
# if video path was not supplied, grab the reference
# to the webcam
if not args.get("video", False):
vs = VideoStream(scr=0).start()
# otherwise, grab a reference to the video file
else:
vs = cv2.VideoCapture(args["video"])
# allow the camera or video file to warm up.
time.sleep(2.0)
# keep looping
while True:
# grab the current frame
frame = vs.read()
# handle the frame from VideoCapture to Video Stream
frame = frame[1] if args.get("video", False) else frame
#if we are viewing a video and we did not grab a frame
# then we have reached the end of the video
if frame is None:
break
# resize the frame, blur it, and convert it to the HSV
# colour space
frame = imutils.resize(frame, width = 600)
blurred = cv2.GaussianBlur(frame, (11,11),0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
# construct a mask for the colour "red", then perform
# a series of dilations and erosions to remove and small
# blobs left in the mask
mask = cv2.inRange(hsv, redLower, redUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find the contours in the mask and initialize the current
# (x,y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x,y),radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (in(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size
if radius > 10:
# draw the circle and centroid on the frame
# then update the list of tracked points
cv2.circle(frame, (int(x), int(y)), int(radius),
(0,255,255),2)
cv2.circle(frame, centre, 5, (0,0,255), -1)
# upadte the points queue
pts.appendleft(centre)
# loop over the set of tracked points
for i in range(1, len(pts)):
# if either of the tracked points are None, ignore
# them
if pts[i-1] is None or pts[i] is None:
continue
# otherwise, computer the thickness of the line and
# draw the connecting lines
thickness = int(np.sqrt(args["buffer"] / float(i+1))*2.5)
cv2.line(frame, pts[i - 1], pts[i], (0,0,255), thickness)
# show the frame to our screen
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
break
# if we are not using a video file, stop the camera video stream
if not args.get("video", False):
vs.stop()
# otherwise, release the camera
else:
vs.release()
# close all windows
cv2.destroyAllWindows()
| [
"damomurtagh@gmail.com"
] | damomurtagh@gmail.com |
6c15f08b036a6a77f7b91f708de56490fb8b681c | ef187d259d33e97c7b9ed07dfbf065cec3e41f59 | /work/atcoder/abc/abc070/C/answers/504659_wi.py | d7d7a14e475b780741e10f7ddca3d9fa61389074 | [] | no_license | kjnh10/pcw | 847f7295ea3174490485ffe14ce4cdea0931c032 | 8f677701bce15517fb9362cc5b596644da62dca8 | refs/heads/master | 2020-03-18T09:54:23.442772 | 2018-07-19T00:26:09 | 2018-07-19T00:26:09 | 134,586,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | import sys
stdin = sys.stdin
ni = lambda: int(ns())
na = lambda: list(map(int, stdin.readline().split()))
ns = lambda: stdin.readline()
def gcd(a, b):
while(b > 0):
c = a % b
a = b; b = c
return a
n = ni()
g = 0
for i in range(n):
v = ni()
if g == 0:
g = v
else:
g = g//gcd(g,v)*v
print(g) | [
"kojinho10@gmail.com"
] | kojinho10@gmail.com |
509671c52ef3aab37220d37a55e97f13b9680c63 | 41f7085fffd12bb53222fdba00d033a43b9d7081 | /users/apps.py | b776938c9d4d9c5d420b841f16abfb29baea989c | [] | no_license | arifgafizov/online_store | b852e1bd32149268bbed9159f1037561a3d7e9a0 | 25c32f0ae65469e904509772d414a79a743ae31b | refs/heads/master | 2023-08-01T06:28:47.302377 | 2021-09-18T18:30:28 | 2021-09-18T18:30:28 | 345,300,892 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | from django.apps import AppConfig
class UsersConfig(AppConfig):
name = 'users'
def ready(self):
from .signals import post_save_signup
| [
"agafizov@gmail.com"
] | agafizov@gmail.com |
10c4c8bcbc5ddcf5716aa35f9122b26965c9d62e | 2d80791a21a049243dd2bf7dd95a46c4d4b2510b | /domains/algorithms/warmup/TimeConversion.py | a7612996332e029de98c463eec1bd8e7fe9209aa | [] | no_license | jayrav13/jr-hackerrank | 909346d101fdf08a54ff75ec2ee39c90e661b251 | b7e0130fdd1c2eb4436871be3255200eac8ca3d9 | refs/heads/master | 2021-01-15T15:36:16.772814 | 2016-11-21T18:59:15 | 2016-11-21T18:59:15 | 48,657,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | #!/bin/python
import sys
time = raw_input().strip().split(':')
if time[2][-2:] == 'PM' and int(time[0]) < 12:
time[0] = str(int(time[0]) + 12)
if time[2][-2:] == 'AM':
if time[0] == '12':
time[0] = '00'
if int(time[0]) == 24:
time[0] = "00"
time[2] = time[2][:-2]
print ":".join(time)
| [
"jayrav13@gmail.com"
] | jayrav13@gmail.com |
0742aaed6d2a00c0265fa9c84921f8017affaa93 | 6630694f401f6f475dd81bb01ff9368db844ccff | /configs/_base_/models/mobilevit/mobilevit_s.py | f6a4e05d2c8f1fc4f7b6a6b5953ff52cdfc7a2c6 | [
"Apache-2.0"
] | permissive | open-mmlab/mmpretrain | 98a4d6b3bb747efc3d50decebf84fc3ffa41076a | d2ccc44a2c8e5d49bb26187aff42f2abc90aee28 | refs/heads/main | 2023-08-30T19:11:24.771498 | 2023-08-23T02:45:18 | 2023-08-23T02:45:18 | 278,415,292 | 652 | 186 | Apache-2.0 | 2023-09-08T08:01:40 | 2020-07-09T16:25:04 | Python | UTF-8 | Python | false | false | 339 | py | # model settings
model = dict(
type='ImageClassifier',
backbone=dict(type='MobileViT', arch='small'),
neck=dict(type='GlobalAveragePooling'),
head=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=640,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
))
| [
"noreply@github.com"
] | open-mmlab.noreply@github.com |
dd98d05f369e5f312af1cbd5ef5826092fa4e837 | 24a9f32ae09cb545caf9984cedfad3ff89c0aad0 | /supportsystem/admin.py | c6edd541c0ab347389ef7d8782acc547463ce7be | [] | no_license | Jordonguy/TechCPRSupportSystem | 8bf81708ee3873795a76ad9ff5f79422c9a64d82 | e035fc0cd7502a726d8946f17e4d025ce3a83988 | refs/heads/master | 2020-04-27T13:43:39.318886 | 2019-10-24T23:48:15 | 2019-10-24T23:48:15 | 174,381,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | from django.contrib import admin
# Register your models here.
from .models import Role, Company, Post, Comment, ExtendedUserProfile
admin.site.register(ExtendedUserProfile)
admin.site.register(Role)
admin.site.register(Company)
admin.site.register(Post)
admin.site.register(Comment)
| [
"you@example.com"
] | you@example.com |
fa75cf84f7c515ac5987b1a3387c4d6f759455b9 | 34745a8d54fa7e3d9e4237415eb52e507508ad79 | /Python Fundamentals/03 Lists Basics/More exercises/03_Josephus_Permutation.py | be8fe4e63778f129a03973856fc846ee279e0ba4 | [] | no_license | DilyanTsenkov/SoftUni-Software-Engineering | 50476af0dc88b267d72c56fa87eeb88d841164b2 | fe446e3a50a00bb2e48d71ab8f783e0a4a406094 | refs/heads/main | 2023-08-12T18:18:42.144210 | 2021-09-25T11:10:38 | 2021-09-25T11:10:38 | 317,235,419 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | elements = input().split(" ")
number = int(input())
new_elements_list = []
counter = 0
while len(elements) != 0:
for i in range(len(elements)):
counter += 1
if counter % number == 0:
new_elements_list.append(elements[i])
elements[i] = None
elements = [i for i in elements if i]
print("[" + ",".join(new_elements_list) + "]") | [
"noreply@github.com"
] | DilyanTsenkov.noreply@github.com |
2885153df53235cefe140f7a4cd0be1e421959a6 | 5cd740c36bff792dec540f02ee95336b12808f36 | /account/views.py | eef1484898abccb1e4ecaa69fd7b5cbc5853f694 | [] | no_license | Pyxic/swipe | b4362a9e17d23b4b9f7d9cfcb3a63900119eb9e8 | 584062dfd5c8a5328c22bfcd2e194bb2f94a078c | refs/heads/master | 2023-09-03T21:38:34.577685 | 2021-11-17T12:23:06 | 2021-11-17T12:23:06 | 422,495,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,428 | py | from datetime import date
from django.conf import settings
from django.shortcuts import render, get_object_or_404
from django.utils.decorators import method_decorator
from drf_yasg.utils import swagger_auto_schema
from rest_framework import viewsets, permissions
from rest_framework.permissions import AllowAny
from account.models import User, UserFilter
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import generics
from account.models import Role
from account.permissions import IsAdmin, IsOwner, IsAdminOrOwner, IsDeveloper
from account.serializers import RoleListSerializer, UserListSerializer, ClientUpdateSerializer, \
NotaryDetailSerializer, ClientSerializer, UserFilterSerializer, UserRoleSerializer
@method_decorator(name='get', decorator=swagger_auto_schema(tags=['admin']))
class RoleListView(generics.ListAPIView):
"""Вывод списка ролей"""
queryset = Role.objects.all()
serializer_class = RoleListSerializer
class UserRoleView(generics.RetrieveAPIView):
"""Вывод роли пользователя"""
queryset = User.objects.all()
serializer_class = UserRoleSerializer
permission_classes = [permissions.IsAuthenticated]
class AdminViewSet(viewsets.ReadOnlyModelViewSet):
view_tags = ['admin']
serializer_class = UserListSerializer
permission_classes = [IsAdmin]
queryset = User.objects.filter(is_superuser=True)
class ClientViewSet(viewsets.ModelViewSet):
view_tags = ['client']
def get_queryset(self):
return User.objects.filter(role__name='клиент')
def get_serializer_class(self):
if self.action == 'list':
return UserListSerializer
if self.action in ['retrieve', 'update', 'partial_update', 'destroy']:
return ClientSerializer
def get_permissions(self):
if self.action == 'list':
permission_classes = [AllowAny]
if self.action in ['update', 'partial_update', 'retrieve']:
permission_classes = [IsAdminOrOwner]
return [permission() for permission in permission_classes]
class NotaryViewSet(viewsets.ViewSet):
permission_classes = [IsAdmin]
@swagger_auto_schema(tags=['notary'])
def list(self, request):
"""Вывод списка нотариусов"""
queryset = User.objects.filter(role__name='нотариус')
serializer = UserListSerializer(queryset, many=True)
return Response(serializer.data)
@swagger_auto_schema(tags=['notary'])
def retrieve(self, request, pk=None):
"""Вывод полной информации о нотариусе"""
queryset = User.objects.filter(role__name='нотариус')
notary = get_object_or_404(queryset, pk=pk)
serializer = NotaryDetailSerializer(notary)
return Response(serializer.data)
@method_decorator(name='put', decorator=swagger_auto_schema(tags=['notary']))
@method_decorator(name='patch', decorator=swagger_auto_schema(tags=['notary']))
class NotaryUpdateView(generics.UpdateAPIView):
"""Редактирование нотариуса"""
permission_classes = [IsAdmin]
queryset = User.objects.filter(role__name='нотариус')
serializer_class = NotaryDetailSerializer
@method_decorator(name='delete', decorator=swagger_auto_schema(tags=['notary']))
class NotaryDestroyView(generics.DestroyAPIView):
"""Удаление нотариуса"""
permission_classes = [IsAdmin]
queryset = User.objects.filter(role__name='нотариус')
serializer_class = NotaryDetailSerializer
class DeveloperViewSet(viewsets.ModelViewSet):
view_tags = ['developer']
def get_permissions(self):
if self.action == 'list':
permission_classes = [IsAdmin]
if self.action in ['update', 'partial_update', 'retrieve', 'destroy']:
permission_classes = [IsAdminOrOwner]
return [permission() for permission in permission_classes]
def get_queryset(self):
return User.objects.filter(role__name='застройщик')
def get_serializer_class(self):
if self.action == 'list':
return UserListSerializer
if self.action in ['retrieve', 'update', 'partial_update', 'destroy']:
return ClientSerializer
class ClientUpdateSubscriptionView(generics.GenericAPIView):
permission_classes = (permissions.IsAuthenticated, IsOwner)
serializer_class = ClientUpdateSerializer
queryset = User.objects.filter(role__name='клиент')
view_tags = ['client']
def patch(self, request, *args, **kwargs):
user = self.get_object()
serializer = self.serializer_class(instance=user)
user.end_date = date.today().replace(month=1 if date.today().month // 12 == 1 else date.today().month + 1)
user.subscribed = True
user.save()
return Response({'pk': user.pk, 'subscribed': user.subscribed,
'end_date': user.end_date.strftime('%Y-%m-%d')})
class ChangeBanStatus(APIView):
permission_classes = (permissions.IsAuthenticated, IsAdmin)
view_tags = ['admin']
def patch(self, request, pk):
user = get_object_or_404(User, pk=pk)
user.banned = not user.banned
user.save()
return Response({'pk': user.pk,
'ban': user.banned}, status=status.HTTP_200_OK)
class UserFilterViewSet(viewsets.ModelViewSet):
"""
User can save 'Announcement' filters and get them from db
"""
permission_classes = (IsOwner,)
serializer_class = UserFilterSerializer
queryset = UserFilter.objects.all().order_by('-id')
view_tags = ['user']
def get_queryset(self):
return self.queryset.filter(user=self.request.user)
def perform_create(self, serializer):
serializer.save(user=self.request.user)
def create(self, request, *args, **kwargs):
"""
If user is subscribed - he doesnt have any restrictions
:param request:
:param args:
:param kwargs:
:return: Response
"""
if request.user.subscribed:
return super().create(request, *args, **kwargs)
return Response({'Error': 'Your subscribe are finished. Please, extend your subscribe'},
status=status.HTTP_400_BAD_REQUEST)
| [
"mishakalita3@gmail.com"
] | mishakalita3@gmail.com |
478ebf23b298e716de7e6cb64c4e04d287263c94 | 9994709e43d108ca49af5f5961f56a6492c84eb1 | /src/RegistrationWithConfirmation/settings.py | c1340257f4d192671f4dacebc9156788531fa0de | [] | no_license | achiengcindy/RegistrationWithConfirmation | f115bc8d5dc0ac6f3e832d4d159bc24c41e80915 | a47cca9bd519894684a8b2dbcb1a909c23fed40a | refs/heads/master | 2020-04-11T11:34:57.184708 | 2018-12-14T08:14:42 | 2018-12-14T08:14:42 | 161,752,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,459 | py | """
Django settings for RegistrationWithConfirmation project.
Generated by 'django-admin startproject' using Django 2.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')=6=e8t!183j_69bzxhq(-n4w3g_51vow8o)30cu_@c9ys4ko@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
EMAIL_HOST=''
EMAIL_HOST_USER=''
EMAIL_HOST_PASSWORD=''
EMAIL_PORT=587
EMAIL_USE_TLS=True
DEFAULT_FROM_EMAIL='your email'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'accounts',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'RegistrationWithConfirmation.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'RegistrationWithConfirmation.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL = 'home'
LOGOUT_REDIRECT_URL = 'home'
| [
"achiengcindy36@gmail.com"
] | achiengcindy36@gmail.com |
89070d14e64f9f1511324e3516874a9b389fcbeb | a87810b7abad09b642399e6b83ceabea17a6d9f2 | /lock.py | c6b4ce6650ff9655be2b4a325fb0ebb652ac12e1 | [] | no_license | Sendhub/sh_util | 452fb85386b669657c9c8cadffdfb3520c0ab7e4 | 739c14cdcfeebac2fab9a2e5f76f98ecd524d7bd | refs/heads/master | 2023-07-20T06:43:46.212443 | 2019-06-24T23:24:26 | 2019-06-24T23:24:26 | 10,512,240 | 0 | 0 | null | 2021-06-29T13:45:29 | 2013-06-05T20:35:28 | Python | UTF-8 | Python | false | false | 615 | py | __author__ = 'brock'
import settings
_redis = settings.REDIS
def acquireLock(lockId, timeout=60):
# make sure these redis locks always have a valid timeout
assert timeout > 0
acquired = _redis.setnx(lockId, "true")
if acquired:
_redis.expire(lockId, timeout)
else:
# if there is no timeout set and we couldn't acquire the lock
# then make sure that we set a timeout on the lock so we
# cant have a deadlock
if not _redis.ttl(lockId):
_redis.expire(lockId, timeout)
return acquired
def releaseLock(lockId):
_redis.delete(lockId)
| [
"outtatime@gmail.com"
] | outtatime@gmail.com |
17e2629a616b6de3e7c6b1f78079b754c52ef6ea | 862588320887ec451870fb35856d4315bd2b9685 | /simple_operations/generate_backrub_ensemble.py | 8aae652d80bb8410c64e9cb270f782ef186394d3 | [] | no_license | xingjiepan/pyrosetta_scripts | 6522aa8fef43b89adac6fba1bc6072f0df2425d2 | 640ea455319d55a0cb167c50f2722778dbdde1f1 | refs/heads/master | 2021-03-16T05:35:52.560752 | 2020-05-12T22:58:54 | 2020-05-12T22:58:54 | 111,063,960 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 646 | py | #!/usr/bin/env python3
'''Generate a backbrub ensemble for a given protein.
Usage:
./generate_backrub_ensemble.py input_pdb
'''
import sys
import pyrosetta
from pyrosetta import rosetta
if __name__ == '__main__':
pyrosetta.init()
input_pdb = sys.argv[1]
pose = rosetta.core.pose.Pose()
rosetta.core.import_pose.pose_from_file(pose, input_pdb)
br_mover = rosetta.protocols.backrub.BackrubMover()
pose.dump_pdb('before_br.pdb')
# Dump 20 structures
for i in range(20):
tmp_pose = pose.clone()
br_mover.apply(tmp_pose)
tmp_pose.dump_pdb('after_br_{0}.pdb'.format(i))
| [
"xingjiepan@gmail.com"
] | xingjiepan@gmail.com |
7e6cd1bfda829bc0090b0db86435f83cc639abda | d1aa6e7d5631d7806531660febbd1f856eaeece7 | /python/paddle/distributed/ps/utils/ps_factory.py | ddf5c1e3ec0315397d52c93cfb4eb2b01c3ccb4e | [
"Apache-2.0"
] | permissive | gongweibao/Paddle | 510cd4bc0ef89bc6ccee7b6b8eca52c00e014b77 | 60f9c60cd8196c66c391d79c35d341e9072f8838 | refs/heads/develop | 2023-03-13T17:43:35.675875 | 2022-09-20T08:46:15 | 2022-09-20T08:46:15 | 82,279,237 | 3 | 2 | Apache-2.0 | 2021-05-26T06:17:43 | 2017-02-17T09:16:16 | Python | UTF-8 | Python | false | false | 1,880 | py | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from .ps_program_builder import *
from .public import *
__all__ = [
'PsProgramBuilder', 'GeoPsProgramBuilder', 'CpuSyncPsProgramBuilder',
'CpuAsyncPsProgramBuilder', 'GpuPsProgramBuilder',
'HeterAsyncPsProgramBuilder', 'FlPsProgramBuilder', 'NuPsProgramBuilder'
]
class PsProgramBuilderFactory(object):
def __init__(self):
pass
def _create_ps_program_builder(self, pass_ctx):
attrs = pass_ctx._attrs
if attrs['ps_mode'] == DistributedMode.GEO:
if len(attrs['local_sparse']) != 0:
return globals()['NuPsProgramBuilder'](pass_ctx)
else:
return globals()['GeoPsProgramBuilder'](pass_ctx)
elif attrs['use_ps_gpu']:
return globals()['GpuPsProgramBuilder'](pass_ctx)
elif attrs['is_heter_ps_mode'] and not attrs['is_fl_ps_mode']:
return globals()['HeterAsyncPsProgramBuilder'](pass_ctx)
elif 'is_fl_ps_mode' in attrs and attrs['is_fl_ps_mode']:
return globals()['FlPsProgramBuilder'](pass_ctx)
elif attrs['ps_mode'] == DistributedMode.SYNC:
return globals()['CpuSyncPsProgramBuilder'](pass_ctx)
else:
return globals()['CpuAsyncPsProgramBuilder'](pass_ctx)
| [
"noreply@github.com"
] | gongweibao.noreply@github.com |
654ea14d4f78784922eedf0686a4f756d0dd078a | cbe264842df4eae3569b28ed4aae9489014ed23c | /books/PythonCleanCode/ch7_generator/generators_coroutines_1.py | 65fbfdd9702000177e66d098a1a4ad1ae29048ae | [
"MIT"
] | permissive | zeroam/TIL | 31e176c2f4c3e1ef72b1155353690cc2f7160f96 | 43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1 | refs/heads/master | 2021-07-23T01:43:34.135033 | 2021-07-10T06:47:17 | 2021-07-10T06:47:17 | 167,952,375 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,505 | py | """Clean Code in Python - Chapter 7: Using Generators
> Methods of the Generators Interface.
"""
import time
from log import logger
class DBHandler:
"""Simulate reading from the database by pages."""
def __init__(self, db):
self.db = db
self.is_closed = False
def read_n_records(self, limit):
return [(i, f"row {i}") for i in range(limit)]
def close(self):
logger.debug("closing connection to database %r", self.db)
self.is_closed = True
def stream_db_records(db_handler):
"""Example of .close()
>>> streamer = stream_db_records(DBHandler("testdb")) # doctest: +ELLIPSIS
>>> len(next(streamer))
10
>>> len(next(streamer))
10
"""
try:
while True:
yield db_handler.read_n_records(10)
time.sleep(.1)
except GeneratorExit:
db_handler.close()
class CustomException(Exception):
"""An exception of the domain model."""
def stream_data(db_handler):
"""Test the ``.throw()`` method.
>>> streamer = stream_data(DBHandler("testdb"))
>>> len(next(streamer))
10
"""
while True:
try:
yield db_handler.read_n_records(10)
except CustomException as e:
logger.info("controlled error %r, continuing", e)
except Exception as e:
logger.info("unhandled error %r, stopping", e)
db_handler.close()
break
| [
"imdff0803@gmail.com"
] | imdff0803@gmail.com |
eac8016636cfd014537c66a16982d00c21173836 | ade45967ee95ba61217658b479604bb97e86770e | /isint.py | b13dbef9269d8217ec1be98c8f2ae0469f7a8094 | [] | no_license | parkseohui/git | fb0c3b41e4efd8b7a5220864c935fff7a32523db | 238580378df8772bc47045843db52baac49e658c | refs/heads/master | 2020-04-17T10:45:07.983727 | 2019-02-27T14:27:13 | 2019-02-27T14:27:13 | 166,512,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | #문자열을 제거한뒤 숫자만반환
'''난이도:(쉬움) 현우는 축구를보다가 우리나라선수들의몸값을 알고싶었다
그래서 검색을해서 메모장에 적는데 키보드가 조그만하고 안좋은지라
자꾸 숫자가아닌 문자를 같이입력해버린다
ex: xxx : 1627000000 > xxx : 1w627r00o00p00 만 (특수문자제외)
현우는 왜인지모르지만 뜻대로안되는것에
너무화가나서 자신이수량을입력하면 문자열만 딱빼서 숫자만 반환하는 코드를 만들고싶어한다
화가난 현우를위해 코드를 만들어보자!
'''
print(''.join(i for i in input('') if i.isdigit()))
| [
"skfhddl003@gmail.com"
] | skfhddl003@gmail.com |
4de909fdf690d158215c7a5f55c16f8c14efc0df | 2a54e8d6ed124c64abb9e075cc5524bb859ba0fa | /.history/3-OO-Python/5-encapsulaton_20200415212038.py | 120d237cce96f90aac71a89b9a9b9197e28bc4e1 | [] | no_license | CaptainStorm21/Python-Foundation | 01b5fbaf7a913506518cf22e0339dd948e65cea1 | a385adeda74f43dd7fb2d99d326b0be23db25024 | refs/heads/master | 2021-05-23T01:29:18.885239 | 2020-04-23T19:18:06 | 2020-04-23T19:18:06 | 253,171,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | class PlayerCharacter:
def __init__(self, name, age):
self.name = name
self.age = age
def run (self):
print('run')
def speak(self):
print(f'my name is {') | [
"tikana4@yahoo.com"
] | tikana4@yahoo.com |
753ab077a5840cea3bc5b736b02cdb6ba8ab9c6a | 8fe833d3751486cf03130bfdfafffeaf60c01ff8 | /hwilib/devices/trezorlib/btc.py | f9c56cf0a74ad3b604cc460f66e6170a3fc5193f | [
"MIT"
] | permissive | fmr-llc/HWI | 50726924292c92e857c1ad13458af92a2ca23037 | 4c13daed3b62635320e2411c1dd6a8ee307012e5 | refs/heads/master | 2020-12-11T14:18:51.610966 | 2020-01-13T14:57:30 | 2020-01-13T14:57:40 | 233,870,798 | 1 | 1 | MIT | 2020-01-14T15:20:33 | 2020-01-14T15:20:32 | null | UTF-8 | Python | false | false | 5,700 | py | # This file is part of the Trezor project.
#
# Copyright (C) 2012-2018 SatoshiLabs and contributors
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3
# as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the License along with this library.
# If not, see <https://www.gnu.org/licenses/lgpl-3.0.html>.
from . import messages
from .tools import CallException, expect, normalize_nfc, session
@expect(messages.PublicKey)
def get_public_node(
client,
n,
ecdsa_curve_name=None,
show_display=False,
coin_name=None,
script_type=messages.InputScriptType.SPENDADDRESS,
):
return client.call(
messages.GetPublicKey(
address_n=n,
ecdsa_curve_name=ecdsa_curve_name,
show_display=show_display,
coin_name=coin_name,
script_type=script_type,
)
)
@expect(messages.Address, field="address")
def get_address(
client,
coin_name,
n,
show_display=False,
multisig=None,
script_type=messages.InputScriptType.SPENDADDRESS,
):
return client.call(
messages.GetAddress(
address_n=n,
coin_name=coin_name,
show_display=show_display,
multisig=multisig,
script_type=script_type,
)
)
@expect(messages.MessageSignature)
def sign_message(
client, coin_name, n, message, script_type=messages.InputScriptType.SPENDADDRESS
):
message = normalize_nfc(message)
return client.call(
messages.SignMessage(
coin_name=coin_name, address_n=n, message=message, script_type=script_type
)
)
@session
def sign_tx(client, coin_name, inputs, outputs, details=None, prev_txes=None):
# set up a transactions dict
txes = {None: messages.TransactionType(inputs=inputs, outputs=outputs)}
# preload all relevant transactions ahead of time
for inp in inputs:
if inp.script_type not in (
messages.InputScriptType.SPENDP2SHWITNESS,
messages.InputScriptType.SPENDWITNESS,
messages.InputScriptType.EXTERNAL,
):
try:
prev_tx = prev_txes[inp.prev_hash]
except Exception as e:
raise ValueError("Could not retrieve prev_tx") from e
if not isinstance(prev_tx, messages.TransactionType):
raise ValueError("Invalid value for prev_tx") from None
txes[inp.prev_hash] = prev_tx
if details is None:
signtx = messages.SignTx()
else:
signtx = details
signtx.coin_name = coin_name
signtx.inputs_count = len(inputs)
signtx.outputs_count = len(outputs)
res = client.call(signtx)
# Prepare structure for signatures
signatures = [None] * len(inputs)
serialized_tx = b""
def copy_tx_meta(tx):
tx_copy = messages.TransactionType()
tx_copy.CopyFrom(tx)
# clear fields
tx_copy.inputs_cnt = len(tx.inputs)
tx_copy.inputs = []
tx_copy.outputs_cnt = len(tx.bin_outputs or tx.outputs)
tx_copy.outputs = []
tx_copy.bin_outputs = []
tx_copy.extra_data_len = len(tx.extra_data or b"")
tx_copy.extra_data = None
return tx_copy
R = messages.RequestType
while isinstance(res, messages.TxRequest):
# If there's some part of signed transaction, let's add it
if res.serialized:
if res.serialized.serialized_tx:
serialized_tx += res.serialized.serialized_tx
if res.serialized.signature_index is not None:
idx = res.serialized.signature_index
sig = res.serialized.signature
if signatures[idx] is not None:
raise ValueError("Signature for index %d already filled" % idx)
signatures[idx] = sig
if res.request_type == R.TXFINISHED:
break
# Device asked for one more information, let's process it.
current_tx = txes[res.details.tx_hash]
if res.request_type == R.TXMETA:
msg = copy_tx_meta(current_tx)
res = client.call(messages.TxAck(tx=msg))
elif res.request_type == R.TXINPUT:
msg = messages.TransactionType()
msg.inputs = [current_tx.inputs[res.details.request_index]]
res = client.call(messages.TxAck(tx=msg))
elif res.request_type == R.TXOUTPUT:
msg = messages.TransactionType()
if res.details.tx_hash:
msg.bin_outputs = [current_tx.bin_outputs[res.details.request_index]]
else:
msg.outputs = [current_tx.outputs[res.details.request_index]]
res = client.call(messages.TxAck(tx=msg))
elif res.request_type == R.TXEXTRADATA:
o, l = res.details.extra_data_offset, res.details.extra_data_len
msg = messages.TransactionType()
msg.extra_data = current_tx.extra_data[o : o + l]
res = client.call(messages.TxAck(tx=msg))
if isinstance(res, messages.Failure):
raise CallException("Signing failed")
if not isinstance(res, messages.TxRequest):
raise CallException("Unexpected message")
if None in signatures:
raise RuntimeError("Some signatures are missing!")
return signatures, serialized_tx
| [
"achow101-github@achow101.com"
] | achow101-github@achow101.com |
a2a8f7739a9aee7ce46c3440d4a2914bb62cb20f | 1c4a19c0d1953280f79193f30ad8c4759e3aff58 | /ansys/dpf/core/operators/math/cos_fc.py | 877e5d8eb4f4e277d7a79f318f9792673e6c3de6 | [
"MIT"
] | permissive | hoangxuyenle/DPF-Core | d02c843b678560f12715ea90dc8c9764b3bffc99 | a404dd290c7b3ee75463b2487cafb8bf48468691 | refs/heads/master | 2023-06-15T15:27:02.597938 | 2021-06-22T15:19:04 | 2021-06-22T15:19:04 | 381,611,135 | 0 | 0 | MIT | 2021-06-30T07:18:30 | 2021-06-30T07:18:30 | null | UTF-8 | Python | false | false | 4,828 | py | """
cos_fc
======
"""
from ansys.dpf.core.dpf_operator import Operator
from ansys.dpf.core.inputs import Input, _Inputs
from ansys.dpf.core.outputs import Output, _Outputs, _modify_output_spec_with_one_type
from ansys.dpf.core.operators.specification import PinSpecification, Specification
"""Operators from Ans.Dpf.Native plugin, from "math" category
"""
class cos_fc(Operator):
"""Computes element-wise cos(field[i]).
available inputs:
- fields_container (FieldsContainer)
available outputs:
- fields_container (FieldsContainer)
Examples
--------
>>> from ansys.dpf import core as dpf
>>> # Instantiate operator
>>> op = dpf.operators.math.cos_fc()
>>> # Make input connections
>>> my_fields_container = dpf.FieldsContainer()
>>> op.inputs.fields_container.connect(my_fields_container)
>>> # Instantiate operator and connect inputs in one line
>>> op = dpf.operators.math.cos_fc(fields_container=my_fields_container)
>>> # Get output data
>>> result_fields_container = op.outputs.fields_container()"""
def __init__(self, fields_container=None, config=None, server=None):
super().__init__(name="cos_fc", config = config, server = server)
self._inputs = InputsCosFc(self)
self._outputs = OutputsCosFc(self)
if fields_container !=None:
self.inputs.fields_container.connect(fields_container)
@staticmethod
def _spec():
spec = Specification(description="""Computes element-wise cos(field[i]).""",
map_input_pin_spec={
0 : PinSpecification(name = "fields_container", type_names=["fields_container"], optional=False, document="""field or fields container with only one field is expected""")},
map_output_pin_spec={
0 : PinSpecification(name = "fields_container", type_names=["fields_container"], optional=False, document="""""")})
return spec
@staticmethod
def default_config():
return Operator.default_config(name = "cos_fc")
@property
def inputs(self):
"""Enables to connect inputs to the operator
Returns
--------
inputs : InputsCosFc
"""
return super().inputs
@property
def outputs(self):
"""Enables to get outputs of the operator by evaluationg it
Returns
--------
outputs : OutputsCosFc
"""
return super().outputs
#internal name: cos_fc
#scripting name: cos_fc
class InputsCosFc(_Inputs):
"""Intermediate class used to connect user inputs to cos_fc operator
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.cos_fc()
>>> my_fields_container = dpf.FieldsContainer()
>>> op.inputs.fields_container.connect(my_fields_container)
"""
def __init__(self, op: Operator):
super().__init__(cos_fc._spec().inputs, op)
self._fields_container = Input(cos_fc._spec().input_pin(0), 0, op, -1)
self._inputs.append(self._fields_container)
@property
def fields_container(self):
"""Allows to connect fields_container input to the operator
- pindoc: field or fields container with only one field is expected
Parameters
----------
my_fields_container : FieldsContainer,
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.cos_fc()
>>> op.inputs.fields_container.connect(my_fields_container)
>>> #or
>>> op.inputs.fields_container(my_fields_container)
"""
return self._fields_container
class OutputsCosFc(_Outputs):
"""Intermediate class used to get outputs from cos_fc operator
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.cos_fc()
>>> # Connect inputs : op.inputs. ...
>>> result_fields_container = op.outputs.fields_container()
"""
def __init__(self, op: Operator):
super().__init__(cos_fc._spec().outputs, op)
self._fields_container = Output(cos_fc._spec().output_pin(0), 0, op)
self._outputs.append(self._fields_container)
@property
def fields_container(self):
"""Allows to get fields_container output of the operator
Returns
----------
my_fields_container : FieldsContainer,
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.cos_fc()
>>> # Connect inputs : op.inputs. ...
>>> result_fields_container = op.outputs.fields_container()
"""
return self._fields_container
| [
"lea.paradis@ansys.com"
] | lea.paradis@ansys.com |
acda52f070fb154262908ba31dbace2d0a3c92c6 | 93b8a4be20a0a6b56bc7709b3ab4690135257ebe | /BrowserSession.py | 51cf187550b12d8774319d4032fed37519c2acfc | [] | no_license | kordless/crawlapart | 1c551b300b91d7da245a76dc0e13cde63d7bea00 | da308154be03a08bd752e37a3c6088a356f48208 | refs/heads/master | 2021-04-22T23:16:32.699117 | 2020-03-26T20:49:33 | 2020-03-26T20:49:33 | 249,879,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,609 | py | #!/usr/bin/env python3
import webdriver
import json
import base64
import sys
import requests
import traceback
import logging
import time
# TODO Setup Interaction with DB rather than with flags and config files
# ideally we just want something like -> LookUpWord -> Provider -> Google
# ideally we just want something like -> go_to_url -> Cached/NotCached -> nytimes
# Explorer Mode
# Use Whois, Dig, nc, etc...
# Think of this like the Selenium but a true browser
class BrowserSession:
def __init__(self, url=None, persistent=False, debug=False):
# currently the configuration is going to be config.json
# soon it will be from MongoDB/LocalBox
self.debug = debug
#Navigation
self.url = url
self.stayopen = False
#BrowserSpecific
self.headless = False
self.fullscreen = True
#CrawlSpecific
self.local_db = None # stick to local socket
self.local_index = None # somelocalIndex Store
self.save_text = False
def setup_session(self):
self.config = json.loads(open('config.json', 'r').read())
if self.headless:
self.config['capabilities']['alwaysMatch']['moz:firefoxOptions']['args'].insert(1,'--headless')
self.config['capabilities']['alwaysMatch']['moz:firefoxOptions']['args'].insert(1,'--height=1080')
self.config['capabilities']['alwaysMatch']['moz:firefoxOptions']['args'].insert(1,'--width=1920')
print(self.config['capabilities'])
self.session = webdriver.Session(self.config['webdriverip'], self.config['webdriverport'], capabilities=self.config['capabilities'])
return
def go_to_url(self,url=None,fullscreen=True):
if url is None:
url = self.url
self.session.url = url
if fullscreen:
self.fullscreen=True
self.session.window.fullscreen()
if self.debug:
print("WebDriver to sessionID -------> {}".format(self.session.session_id))
return
def save_screenshot(self,filename=None):
if filename is None:
filename = "Screenshots/ss_{:.0f}.png".format(time.time())
print("Full Filename to use:\n\n")
print(filename + "\n\n")
try:
if self.fullscreen:
r = requests.get(url="http://localhost:4444/session/" + self.session.session_id + "/moz/screenshot/full")
print(r.status_code)
else:
r = requests.get(url="http://localhost:4444/session/" + self.session.session_id + "/screenshot")
if r.status_code == 200:
try:
with open(filename, 'wb') as screenshot:
screenshot.write(base64.b64decode(r.json()['value']))
except IOError as err:
print("I/O error: {0}".format(err))
elif r.status_code == 404:
print("Something is wrong with the session? maybe it's closed????")
print(r.json())
except Exception:
traceback.print_exc()
pass
def main_test():
new_session = BrowserSession()
new_session.headless = True
new_session.setup_session()
#new_session.go_to_url('https://google.com/search?q=MLK',fullscreen=True)
new_session.go_to_url('https://news.ycombinator.com',fullscreen=True)
print("waiting two seconds for page to load")
time.sleep(2)
new_session.save_screenshot()
if __name__ == '__main__':
main_test()
| [
"kordless@gmail.com"
] | kordless@gmail.com |
65688770c987a1408b9f67e406c13f7a8164b3c4 | 3b94782b680ca9a847b48e1a38c56b88823f6992 | /microesc/train.py | 2ee9de4f8287b92c4a81347e96318c7043f75c72 | [] | no_license | ShamsNafisaAli/ESC-CNN-microcontroller | 48e1db3ade88ff79c4a66793d0f992e165917842 | 572c319c7ad4d0a98bf210d59b26f6df923c8e7b | refs/heads/master | 2023-08-23T18:02:52.323459 | 2021-10-09T19:49:12 | 2021-10-09T19:52:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,801 | py |
import os.path
import math
import sys
import uuid
import json
import functools
import datetime
import csv
import pandas
import numpy
import keras
import librosa
import sklearn.metrics
from . import features, urbansound8k, common, models, stats
from . import settings as Settings
def dataframe_generator(X, Y, loader, batchsize=10, n_classes=10, random_state=1):
"""
Keras generator for lazy-loading data based on a pandas.DataFrame
X: data column(s)
Y: target column
loader: function will be passed batches of X to load actual training data
"""
assert len(X) == len(Y), 'X and Y must be equal length'
gen = numpy.random.RandomState(seed=random_state)
while True:
idx = gen.choice(len(X), size=batchsize, replace=False)
rows = X.iloc[idx, :].iterrows()
data = [ loader(d) for _, d in rows ]
y = Y.iloc[idx]
y = keras.utils.to_categorical(y, num_classes=n_classes)
batch = (numpy.array(data), numpy.array(y))
yield batch
class LogCallback(keras.callbacks.Callback):
def __init__(self, log_path, score_epoch):
super().__init__()
self.log_path = log_path
self.score = score_epoch
self._log_file = None
self._csv_writer = None
def __del__(self):
if self._log_file:
self._log_file.close()
def write_entry(self, epoch, data):
data = data.copy()
if not self._csv_writer:
# create writer when we know what fields
self._log_file = open(self.log_path, 'w')
fields = ['epoch'] + sorted(data.keys())
self._csv_writer = csv.DictWriter(self._log_file, fields)
self._csv_writer.writeheader()
data['epoch'] = epoch
self._csv_writer.writerow(data)
self._log_file.flush() # ensure data hits disk
def on_epoch_end(self, epoch, logs):
logs = logs.copy()
more = self.score() # uses current model
for k, v in more.items():
logs[k] = v
self.write_entry(epoch, logs)
def dump_validation_data(val_gen):
Xs = []
Ys = []
i = 0
for batch in val_gen:
X, y = batch
Xs.append(X)
Ys.append(y)
if i < 4:
break
i += 1
Xs = numpy.concatenate(Xs)
Ys = numpy.concatenate(Ys)
numpy.savez('test_data.npz', x_test=Xs, y_test=Ys)
def train_model(out_dir, train, val, model,
loader, val_loader, settings, seed=1):
"""Train a single model"""
frame_samples = settings['hop_length']
train_samples = settings['train_samples']
window_frames = settings['frames']
val_samples = settings['val_samples']
epochs = settings['epochs']
batch_size = settings['batch']
learning_rate = settings.get('learning_rate', 0.01)
assert len(train) > len(val) * 5, 'training data should be much larger than validation'
def top3(y_true, y_pred):
return keras.metrics.top_k_categorical_accuracy(y_true, y_pred, k=3)
optimizer = keras.optimizers.SGD(lr=learning_rate, momentum=settings['nesterov_momentum'], nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
model_path = os.path.join(out_dir, 'e{epoch:02d}-v{val_loss:.2f}.t{loss:.2f}.model.hdf5')
checkpoint = keras.callbacks.ModelCheckpoint(model_path, monitor='val_acc', mode='max',
period=1, verbose=1, save_best_only=False)
def voted_score():
y_pred = features.predict_voted(settings, model, val,
loader=val_loader, method=settings['voting'], overlap=settings['voting_overlap'])
class_pred = numpy.argmax(y_pred, axis=1)
acc = sklearn.metrics.accuracy_score(val.classID, class_pred)
d = {
'voted_val_acc': acc,
}
for k, v in d.items():
print("{}: {:.4f}".format(k, v))
return d
log_path = os.path.join(out_dir, 'train.csv')
log = LogCallback(log_path, voted_score)
train_gen = dataframe_generator(train, train.classID, loader=loader, batchsize=batch_size)
val_gen = dataframe_generator(val, val.classID, loader=val_loader, batchsize=batch_size)
dump_validation_data(val_gen)
callbacks_list = [checkpoint, log]
hist = model.fit_generator(train_gen, validation_data=val_gen,
steps_per_epoch=math.ceil(train_samples/batch_size),
validation_steps=math.ceil(val_samples/batch_size),
callbacks=callbacks_list,
epochs=epochs, verbose=1)
df = history_dataframe(hist)
history_path = os.path.join(out_dir, 'history.csv')
df.to_csv(history_path)
return hist
def history_dataframe(h):
data = {}
data['epoch'] = h.epoch
for k, v in h.history.items():
data[k] = v
df = pandas.DataFrame(data)
return df
def parse(args):
import argparse
parser = argparse.ArgumentParser(description='Train a model')
a = parser.add_argument
common.add_arguments(parser)
Settings.add_arguments(parser)
a('--fold', type=int, default=1,
help='')
a('--skip_model_check', action='store_true', default=False,
help='Skip checking whether model fits on STM32 device')
a('--load', default='',
help='Load a already trained model')
a('--name', type=str, default='',
help='')
parsed = parser.parse_args(args)
return parsed
def setup_keras():
import tensorflow as tf
from keras.backend import tensorflow_backend as B
# allow_growth is needed to avoid CUDNN_STATUS_INTERNAL_ERROR on some convolutional layers
session_config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
sess = tf.Session(config=session_config)
B.set_session(sess)
def load_training_data(data, fold):
assert fold >= 1 # should be 1 indexed
folds = urbansound8k.folds(data)
assert len(folds) == 10
train_data = folds[fold-1][0]
val_data = folds[fold-1][1]
test_folds = folds[fold-1][2].fold.unique()
assert len(test_folds) == 1
assert test_folds[0] == fold, (test_folds[0], '!=', fold) # by convention, test fold is fold number
return train_data, val_data
def main():
setup_keras()
args = parse(sys.argv[1:])
args = dict(args.__dict__)
# experiment settings
feature_dir = args['features_dir']
fold = args['fold']
if args['name']:
name = args['name']
else:
t = datetime.datetime.now().strftime('%Y%m%d-%H%M')
u = str(uuid.uuid4())[0:4]
name = "-".join(['unknown', t, u, 'fold{}'.format(fold)])
output_dir = os.path.join(args['models_dir'], name)
common.ensure_directories(output_dir, feature_dir)
# model settings
exsettings = common.load_settings_path(args['settings_path'])
for k, v in args.items():
if v is not None:
exsettings[k] = v
exsettings = Settings.load_settings(exsettings)
feature_settings = features.settings(exsettings)
train_settings = { k: v for k, v in exsettings.items() if k in Settings.default_training_settings }
model_settings = { k: v for k, v in exsettings.items() if k in Settings.default_model_settings }
features.maybe_download(feature_settings, feature_dir)
data = urbansound8k.load_dataset()
train_data, val_data = load_training_data(data, fold)
def load(sample, validation):
augment = not validation and train_settings['augment'] != 0
d = features.load_sample(sample, feature_settings, feature_dir=feature_dir,
window_frames=model_settings['frames'],
augment=augment, normalize=exsettings['normalize'])
return d
def build_model():
m = models.build(exsettings)
return m
load_model = args['load']
if load_model:
print('Loading existing model', load_model)
m = keras.models.load_model(load_model)
else:
m = build_model()
m.summary()
if args['skip_model_check']:
print('WARNING: model constraint check skipped')
else:
print('Checking model contraints')
ss, ll = stats.check_model_constraints(m)
print('Stats', ss)
print('Training model', name)
print('Settings', json.dumps(exsettings))
h = train_model(output_dir, train_data, val_data,
model=m,
loader=functools.partial(load, validation=False),
val_loader=functools.partial(load, validation=True),
settings=exsettings)
if __name__ == '__main__':
main()
| [
"jononor@gmail.com"
] | jononor@gmail.com |
cfd3122f8016f9ea2f13eeb32f84937b90620619 | 743e3ab9bc94ada35913d03b77bf159c1ec75930 | /ZIJIE复习/11.py | 305043a736f576cc494d81217b9306b9ce2da630 | [] | no_license | Stella2019/leetcode- | 04d3978ba97ab321a5cefc061beefbf3c76cf795 | 60c8a20af961a57f73feb4ccd883dfc73370d994 | refs/heads/main | 2023-03-31T04:08:20.450791 | 2021-04-01T05:38:00 | 2021-04-01T05:38:00 | 353,585,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,674 | py | # 给定一个按照升序排列的整数数组 nums,和一个目标值 target。找出给定目标值在数组中的开始位置和结束位置。##给定一个按照升序排列的整数数组 nums,和一个目标值 target。找出给定目标值在数组中的开始位置和结束位置。
"""
def binarySearch(nums, target);
return binarySeachHelper(nums, target, 0, len(nums) -1 )
def binarySearchHelper(nums, target, left, right):
if left > right:
return -1
middle = (left + right)//2
potentialMatch = nums[middle]
if target == potentialMatch:
return middle
elif target < potentialMatch:
return binarySearchHelper(nums, target, left, middle - 1)
else:
return binarySearchHelper(nums, target, middle + 1, right)
"""
l, r = 0, len(nums) - 1
ans1 = -1
while l <= r:
mid = l + (r - l) // 2
if nums[mid] == target:
ans1 = mid
r = mid - 1
elif nums[mid] < target:
l = mid + 1
else:
r = mid - 1
if ans1 == -1:
print(-1, -1)
else:
ans2 = -1
l, r = ans1, len(nums) - 1
while l <= r:
mid = l + (r - l) // 2
if nums[mid] == target:
ans2 = mid
l = mid + 1
elif nums[mid] < target:
l = mid + 1
else:
r = mid - 1
print(ans1, ans2)
无序数组求中位数
Class
MedianFinder:
def _init_(self):
self.store = []
def addNum(self, num):
if not self.store:
self.store.append(num)
else:
bisect.insort_left(self.store, num)
def findMedian(self):
n = len(self.store)
if n & 1 == 1:
return self.store[n // 2]
def quicksort(num):
| [
"noreply@github.com"
] | Stella2019.noreply@github.com |
e6cddbae4354c8af9c9d5145300b7091f44a9e8b | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/cloud/speech/v1/speech-v1-py/google/cloud/speech_v1/services/speech/__init__.py | 8eac4b03e1638030b4c04ae995e4d7fff0aa0ebf | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 739 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .client import SpeechClient
from .async_client import SpeechAsyncClient
__all__ = (
'SpeechClient',
'SpeechAsyncClient',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
2fd671f0c486f88eddcb6ce484cf4129ba4f765f | 2d4af29250dca8c72b74e190e74d92f1467120a0 | /TaobaoSdk/Request/MallEaiOrderRefundGoodReturnRefuseRequest.py | 4efc69ca0a81e8aeb3c00f91dc7995a2141a3370 | [] | no_license | maimiaolmc/TaobaoOpenPythonSDK | 2c671be93c40cf487c0d7d644479ba7e1043004c | d349aa8ed6229ce6d76a09f279a0896a0f8075b3 | refs/heads/master | 2020-04-06T03:52:46.585927 | 2014-06-09T08:58:27 | 2014-06-09T08:58:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,457 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim: set ts=4 sts=4 sw=4 et:
## @brief 卖家拒绝退货
# @author wuliang@maimiaotech.com
# @date 2013-09-22 16:52:42
# @version: 0.0.0
import os
import sys
import time
def __getCurrentPath():
return os.path.normpath(os.path.join(os.path.realpath(__file__), os.path.pardir))
__modulePath = os.path.join(__getCurrentPath(), os.path.pardir)
__modulePath = os.path.normpath(__modulePath)
if __modulePath not in sys.path:
sys.path.insert(0, __modulePath)
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">卖家拒绝退货</SPAN>
# <UL>
# </UL>
class MallEaiOrderRefundGoodReturnRefuseRequest(object):
def __init__(self):
super(self.__class__, self).__init__()
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">获取API名称</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">str</SPAN>
# </LI>
# </UL>
self.method = "tmall.eai.order.refund.good.return.refuse"
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">时间戳,如果不设置,发送请求时将使用当时的时间</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">int</SPAN>
# </LI>
# </UL>
self.timestamp = int(time.time())
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">退款单编号</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Number</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">required</SPAN>
# </LI>
# </UL>
self.refund_id = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">售中:onsale 售后:aftersale</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">String</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">required</SPAN>
# </LI>
# </UL>
self.refund_phase = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">退款版本号</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Number</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">required</SPAN>
# </LI>
# </UL>
self.refund_version = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">拒绝退款原因留言</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">String</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">required</SPAN>
# </LI>
# </UL>
self.refuse_message = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">拒绝退款举证上传</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">byte[]</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">required</SPAN>
# </LI>
# </UL>
self.refuse_proof = None
| [
"chenke@maimiaotech.com"
] | chenke@maimiaotech.com |
a791ffb684d9822daae88a7e46235dfc22417c67 | 1efeed0fa970b05801a29ccfdc90c52bb571dd02 | /venv/lib/python3.7/site-packages/oslo_policy/tests/test_policy.py | 9a7f6a4146e0f71073bf2429c5cfdc965b61fbe8 | [] | no_license | williamwang0/MusicGen | 2e7fe5d9b2d35d1406b8951a86a5eac6d704571e | b6411505d1fd29e13ca93e3975f3de106ad4a7d0 | refs/heads/master | 2020-07-08T15:48:33.840412 | 2020-05-27T17:30:38 | 2020-05-27T17:30:38 | 203,717,161 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 64,923 | py | # Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test of Policy Engine"""
import os
import mock
from oslo_config import cfg
from oslo_context import context
from oslo_serialization import jsonutils
from oslotest import base as test_base
import six
from oslo_policy import _cache_handler
from oslo_policy import _checks
from oslo_policy import _parser
from oslo_policy import policy
from oslo_policy.tests import base
POLICY_A_CONTENTS = jsonutils.dumps({"default": "role:fakeA"})
POLICY_B_CONTENTS = jsonutils.dumps({"default": "role:fakeB"})
POLICY_FAKE_CONTENTS = jsonutils.dumps({"default": "role:fakeC"})
POLICY_JSON_CONTENTS = jsonutils.dumps({
"default": "rule:admin",
"admin": "is_admin:True"
})
@_checks.register('field')
class FieldCheck(_checks.Check):
"""A non reversible check.
All oslo.policy defined checks have a __str__ method with the property that
rule == str(_parser.parse_rule(rule)). Consumers of oslo.policy may have
defined checks for which that does not hold true. This FieldCheck is not
reversible so we can use it for testing to ensure that this type of check
does not break anything.
"""
def __init__(self, kind, match):
# Process the match
resource, field_value = match.split(':', 1)
field, value = field_value.split('=', 1)
super(FieldCheck, self).__init__(kind, '%s:%s:%s' %
(resource, field, value))
self.field = field
self.value = value
def __call__(self, target_dict, cred_dict, enforcer):
return True
class MyException(Exception):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
class RulesTestCase(test_base.BaseTestCase):
def test_init_basic(self):
rules = policy.Rules()
self.assertEqual({}, rules)
self.assertIsNone(rules.default_rule)
def test_init(self):
rules = policy.Rules(dict(a=1, b=2, c=3), 'a')
self.assertEqual(dict(a=1, b=2, c=3), rules)
self.assertEqual('a', rules.default_rule)
def test_no_default(self):
rules = policy.Rules(dict(a=1, b=2, c=3))
self.assertRaises(KeyError, lambda: rules['d'])
def test_missing_default(self):
rules = policy.Rules(dict(a=1, c=3), 'b')
self.assertRaises(KeyError, lambda: rules['d'])
def test_with_default(self):
rules = policy.Rules(dict(a=1, b=2, c=3), 'b')
self.assertEqual(2, rules['d'])
def test_retrieval(self):
rules = policy.Rules(dict(a=1, b=2, c=3), 'b')
self.assertEqual(1, rules['a'])
self.assertEqual(2, rules['b'])
self.assertEqual(3, rules['c'])
@mock.patch.object(_parser, 'parse_rule', lambda x: x)
def test_load_json(self):
exemplar = jsonutils.dumps({
"admin_or_owner": [["role:admin"], ["project_id:%(project_id)s"]],
"default": []
})
rules = policy.Rules.load(exemplar, 'default')
self.assertEqual('default', rules.default_rule)
self.assertEqual(dict(
admin_or_owner=[['role:admin'], ['project_id:%(project_id)s']],
default=[],
), rules)
@mock.patch.object(_parser, 'parse_rule', lambda x: x)
def test_load_json_invalid_exc(self):
# When the JSON isn't valid, ValueError is raised on load.
exemplar = """{
"admin_or_owner": [["role:admin"], ["project_id:%(project_id)s"]],
"default": [
}"""
self.assertRaises(ValueError, policy.Rules.load, exemplar,
'default')
# However, since change I43782d245d7652ba69613b26fe598ac79ec19929,
# policy.Rules.load() first tries loading with the really fast
# jsonutils.loads(), and if that fails, it tries loading with
# yaml.safe_load(). Since YAML is a superset of JSON, some strictly
# invalid JSON can be parsed correctly by policy.Rules.load() without
# raising an exception. But that means that since 1.17.0, we've been
# accepting (strictly speaking) illegal JSON policy files, and for
# backward compatibility, we should continue to do so. Thus the
# following are here to prevent regressions:
# JSON requires double quotes, but the YAML parser doesn't care
bad_but_acceptable = """{
'admin_or_owner': [["role:admin"], ["project_id:%(project_id)s"]],
'default': []
}"""
self.assertTrue(policy.Rules.load(bad_but_acceptable, 'default'))
# JSON does not allow bare keys, but the YAML parser doesn't care
bad_but_acceptable = """{
admin_or_owner: [["role:admin"], ["project_id:%(project_id)s"]],
default: []
}"""
self.assertTrue(policy.Rules.load(bad_but_acceptable, 'default'))
# JSON is picky about commas, but the YAML parser is more forgiving
# (Note the trailing , in the exemplar is invalid JSON.)
bad_but_acceptable = """{
admin_or_owner: [["role:admin"], ["project_id:%(project_id)s"]],
default: [],
}"""
self.assertTrue(policy.Rules.load(bad_but_acceptable, 'default'))
@mock.patch.object(_parser, 'parse_rule', lambda x: x)
def test_load_empty_data(self):
result = policy.Rules.load('', 'default')
self.assertEqual(result, {})
@mock.patch.object(_parser, 'parse_rule', lambda x: x)
def test_load_yaml(self):
# Test that simplified YAML can be used with load().
# Show that YAML allows useful comments.
exemplar = """
# Define a custom rule.
admin_or_owner: role:admin or project_id:%(project_id)s
# The default rule is used when there's no action defined.
default: []
"""
rules = policy.Rules.load(exemplar, 'default')
self.assertEqual('default', rules.default_rule)
self.assertEqual(dict(
admin_or_owner='role:admin or project_id:%(project_id)s',
default=[],
), rules)
@mock.patch.object(_parser, 'parse_rule', lambda x: x)
def test_load_yaml_invalid_exc(self):
# When the JSON is seriously invalid, ValueError is raised on load().
# (See test_load_json_invalid_exc for what 'seriously invalid' means.)
exemplar = """{
# Define a custom rule.
admin_or_owner: role:admin or project_id:%(project_id)s
# The default rule is used when there's no action defined.
default: [
}"""
self.assertRaises(ValueError, policy.Rules.load, exemplar,
'default')
@mock.patch.object(_parser, 'parse_rule', lambda x: x)
def test_from_dict(self):
expected = {'admin_or_owner': 'role:admin', 'default': '@'}
rules = policy.Rules.from_dict(expected, 'default')
self.assertEqual('default', rules.default_rule)
self.assertEqual(expected, rules)
def test_str(self):
exemplar = jsonutils.dumps({
"admin_or_owner": "role:admin or project_id:%(project_id)s"
}, indent=4)
rules = policy.Rules(dict(
admin_or_owner='role:admin or project_id:%(project_id)s',
))
self.assertEqual(exemplar, str(rules))
def test_str_true(self):
exemplar = jsonutils.dumps({
"admin_or_owner": ""
}, indent=4)
rules = policy.Rules(dict(
admin_or_owner=_checks.TrueCheck(),
))
self.assertEqual(exemplar, str(rules))
def test_load_json_deprecated(self):
with self.assertWarnsRegex(DeprecationWarning,
r'load_json\(\).*load\(\)'):
policy.Rules.load_json(jsonutils.dumps({'default': ''}, 'default'))
class EnforcerTest(base.PolicyBaseTestCase):
def setUp(self):
super(EnforcerTest, self).setUp()
self.create_config_file('policy.json', POLICY_JSON_CONTENTS)
def check_loaded_files(self, filenames):
self.assertEqual(
[self.get_config_file_fullname(n)
for n in filenames],
self.enforcer._loaded_files
)
def _test_scenario_with_opts_registered(self, scenario, *args, **kwargs):
# This test registers some rules, calls the scenario and then checks
# the registered rules. The scenario should be a method which loads
# policy files containing POLICY_*_CONTENTS defined above. They should
# be loaded on the self.enforcer object.
# This should be overridden by the policy file
self.enforcer.register_default(policy.RuleDefault(name='admin',
check_str='is_admin:False'))
# This is not in the policy file, only registered
self.enforcer.register_default(policy.RuleDefault(name='owner',
check_str='role:owner'))
scenario(*args, **kwargs)
self.assertIn('owner', self.enforcer.rules)
self.assertEqual('role:owner', str(self.enforcer.rules['owner']))
self.assertEqual('is_admin:True', str(self.enforcer.rules['admin']))
self.assertIn('owner', self.enforcer.registered_rules)
self.assertIn('admin', self.enforcer.registered_rules)
self.assertNotIn('default', self.enforcer.registered_rules)
self.assertNotIn('owner', self.enforcer.file_rules)
self.assertIn('admin', self.enforcer.file_rules)
self.assertIn('default', self.enforcer.file_rules)
def test_load_file(self):
self.conf.set_override('policy_dirs', [], group='oslo_policy')
self.enforcer.load_rules(True)
self.assertIsNotNone(self.enforcer.rules)
self.assertIn('default', self.enforcer.rules)
self.assertIn('admin', self.enforcer.rules)
self.assertEqual('is_admin:True', str(self.enforcer.rules['admin']))
def test_load_file_opts_registered(self):
self._test_scenario_with_opts_registered(self.test_load_file)
def test_load_directory(self):
self.create_config_file(
os.path.join('policy.d', 'a.conf'), POLICY_A_CONTENTS)
self.create_config_file(
os.path.join('policy.d', 'b.conf'), POLICY_B_CONTENTS)
self.enforcer.load_rules(True)
self.assertIsNotNone(self.enforcer.rules)
loaded_rules = jsonutils.loads(str(self.enforcer.rules))
self.assertEqual('role:fakeB', loaded_rules['default'])
self.assertEqual('is_admin:True', loaded_rules['admin'])
self.check_loaded_files([
'policy.json',
os.path.join('policy.d', 'a.conf'),
os.path.join('policy.d', 'b.conf'),
])
def test_load_directory_opts_registered(self):
self._test_scenario_with_opts_registered(self.test_load_directory)
def test_load_directory_caching_with_files_updated(self):
self.create_config_file(
os.path.join('policy.d', 'a.conf'), POLICY_A_CONTENTS)
self.enforcer.load_rules(False)
self.assertIsNotNone(self.enforcer.rules)
old = six.next(six.itervalues(
self.enforcer._policy_dir_mtimes))
self.assertEqual(1, len(self.enforcer._policy_dir_mtimes))
# Touch the file
conf_path = os.path.join(self.config_dir, os.path.join(
'policy.d', 'a.conf'))
stinfo = os.stat(conf_path)
os.utime(conf_path, (stinfo.st_atime + 10, stinfo.st_mtime + 10))
self.enforcer.load_rules(False)
self.assertEqual(1, len(self.enforcer._policy_dir_mtimes))
self.assertEqual(old, six.next(six.itervalues(
self.enforcer._policy_dir_mtimes)))
loaded_rules = jsonutils.loads(str(self.enforcer.rules))
self.assertEqual('is_admin:True', loaded_rules['admin'])
self.check_loaded_files([
'policy.json',
os.path.join('policy.d', 'a.conf'),
os.path.join('policy.d', 'a.conf'),
])
def test_load_directory_caching_with_files_updated_opts_registered(self):
self._test_scenario_with_opts_registered(
self.test_load_directory_caching_with_files_updated)
def test_load_directory_caching_with_files_same(self, overwrite=True):
self.enforcer.overwrite = overwrite
self.create_config_file(
os.path.join('policy.d', 'a.conf'), POLICY_A_CONTENTS)
self.enforcer.load_rules(False)
self.assertIsNotNone(self.enforcer.rules)
old = six.next(six.itervalues(
self.enforcer._policy_dir_mtimes))
self.assertEqual(1, len(self.enforcer._policy_dir_mtimes))
self.enforcer.load_rules(False)
self.assertEqual(1, len(self.enforcer._policy_dir_mtimes))
self.assertEqual(old, six.next(six.itervalues(
self.enforcer._policy_dir_mtimes)))
loaded_rules = jsonutils.loads(str(self.enforcer.rules))
self.assertEqual('is_admin:True', loaded_rules['admin'])
self.check_loaded_files([
'policy.json',
os.path.join('policy.d', 'a.conf'),
])
def test_load_directory_caching_with_files_same_but_overwrite_false(self):
self.test_load_directory_caching_with_files_same(overwrite=False)
def test_load_directory_caching_with_files_same_opts_registered(self):
self._test_scenario_with_opts_registered(
self.test_load_directory_caching_with_files_same)
def test_load_dir_caching_with_files_same_overwrite_false_opts_reg(self):
# Very long test name makes this difficult
test = getattr(self,
'test_load_directory_caching_with_files_same_but_overwrite_false') # NOQA
self._test_scenario_with_opts_registered(test)
def test_load_multiple_directories(self):
self.create_config_file(
os.path.join('policy.d', 'a.conf'), POLICY_A_CONTENTS)
self.create_config_file(
os.path.join('policy.d', 'b.conf'), POLICY_B_CONTENTS)
self.create_config_file(
os.path.join('policy.2.d', 'fake.conf'), POLICY_FAKE_CONTENTS)
self.conf.set_override('policy_dirs',
['policy.d', 'policy.2.d'],
group='oslo_policy')
self.enforcer.load_rules(True)
self.assertIsNotNone(self.enforcer.rules)
loaded_rules = jsonutils.loads(str(self.enforcer.rules))
self.assertEqual('role:fakeC', loaded_rules['default'])
self.assertEqual('is_admin:True', loaded_rules['admin'])
self.check_loaded_files([
'policy.json',
os.path.join('policy.d', 'a.conf'),
os.path.join('policy.d', 'b.conf'),
os.path.join('policy.2.d', 'fake.conf'),
])
def test_load_multiple_directories_opts_registered(self):
self._test_scenario_with_opts_registered(
self.test_load_multiple_directories)
def test_load_non_existed_directory(self):
self.create_config_file(
os.path.join('policy.d', 'a.conf'), POLICY_A_CONTENTS)
self.conf.set_override('policy_dirs',
['policy.d', 'policy.x.d'],
group='oslo_policy')
self.enforcer.load_rules(True)
self.assertIsNotNone(self.enforcer.rules)
self.assertIn('default', self.enforcer.rules)
self.assertIn('admin', self.enforcer.rules)
self.check_loaded_files(
['policy.json', os.path.join('policy.d', 'a.conf')])
def test_load_non_existed_directory_opts_registered(self):
self._test_scenario_with_opts_registered(
self.test_load_non_existed_directory)
def test_load_policy_dirs_with_non_directory(self):
self.create_config_file(
os.path.join('policy.d', 'a.conf'), POLICY_A_CONTENTS)
self.conf.set_override('policy_dirs',
[os.path.join('policy.d', 'a.conf')],
group='oslo_policy')
self.assertRaises(ValueError, self.enforcer.load_rules, True)
@mock.patch('oslo_policy.policy.Enforcer.check_rules')
def test_load_rules_twice(self, mock_check_rules):
self.enforcer.load_rules()
self.enforcer.load_rules()
self.assertEqual(1, mock_check_rules.call_count)
@mock.patch('oslo_policy.policy.Enforcer.check_rules')
def test_load_rules_twice_force(self, mock_check_rules):
self.enforcer.load_rules(True)
self.enforcer.load_rules(True)
self.assertEqual(2, mock_check_rules.call_count)
@mock.patch('oslo_policy.policy.Enforcer.check_rules')
def test_load_rules_twice_clear(self, mock_check_rules):
self.enforcer.load_rules()
self.enforcer.clear()
# NOTE(bnemec): It's weird that we have to pass True here, but clear
# sets enforcer.use_conf to False, which causes load_rules to be a
# noop when called with no parameters. This is probably a bug.
self.enforcer.load_rules(True)
self.assertEqual(2, mock_check_rules.call_count)
@mock.patch('oslo_policy.policy.Enforcer.check_rules')
def test_load_directory_twice(self, mock_check_rules):
self.create_config_file(
os.path.join('policy.d', 'a.conf'), POLICY_A_CONTENTS)
self.create_config_file(
os.path.join('policy.d', 'b.conf'), POLICY_B_CONTENTS)
self.enforcer.load_rules()
self.enforcer.load_rules()
self.assertEqual(1, mock_check_rules.call_count)
self.assertIsNotNone(self.enforcer.rules)
@mock.patch('oslo_policy.policy.Enforcer.check_rules')
def test_load_directory_twice_force(self, mock_check_rules):
self.create_config_file(
os.path.join('policy.d', 'a.conf'), POLICY_A_CONTENTS)
self.create_config_file(
os.path.join('policy.d', 'b.conf'), POLICY_B_CONTENTS)
self.enforcer.load_rules(True)
self.enforcer.load_rules(True)
self.assertEqual(2, mock_check_rules.call_count)
self.assertIsNotNone(self.enforcer.rules)
@mock.patch('oslo_policy.policy.Enforcer.check_rules')
def test_load_directory_twice_changed(self, mock_check_rules):
self.create_config_file(
os.path.join('policy.d', 'a.conf'), POLICY_A_CONTENTS)
self.enforcer.load_rules()
# Touch the file
conf_path = os.path.join(self.config_dir, os.path.join(
'policy.d', 'a.conf'))
stinfo = os.stat(conf_path)
os.utime(conf_path, (stinfo.st_atime + 10, stinfo.st_mtime + 10))
self.enforcer.load_rules()
self.assertEqual(2, mock_check_rules.call_count)
self.assertIsNotNone(self.enforcer.rules)
def test_set_rules_type(self):
self.assertRaises(TypeError,
self.enforcer.set_rules,
'dummy')
@mock.patch.object(_cache_handler, 'delete_cached_file', mock.Mock())
def test_clear(self):
# Make sure the rules are reset
self.enforcer.rules = 'spam'
self.enforcer.clear()
self.assertEqual({}, self.enforcer.rules)
self.assertIsNone(self.enforcer.default_rule)
self.assertIsNone(self.enforcer.policy_path)
def test_clear_opts_registered(self):
# This should be overridden by the policy file
self.enforcer.register_default(policy.RuleDefault(name='admin',
check_str='is_admin:False'))
# This is not in the policy file, only registered
self.enforcer.register_default(policy.RuleDefault(name='owner',
check_str='role:owner'))
self.test_clear()
self.assertEqual({}, self.enforcer.registered_rules)
def test_rule_with_check(self):
rules_json = jsonutils.dumps({
"deny_stack_user": "not role:stack_user",
"cloudwatch:PutMetricData": ""
})
rules = policy.Rules.load(rules_json)
self.enforcer.set_rules(rules)
action = 'cloudwatch:PutMetricData'
creds = {'roles': ''}
self.assertTrue(self.enforcer.enforce(action, {}, creds))
def test_enforcer_with_default_rule(self):
rules_json = jsonutils.dumps({
"deny_stack_user": "not role:stack_user",
"cloudwatch:PutMetricData": ""
})
rules = policy.Rules.load(rules_json)
default_rule = _checks.TrueCheck()
enforcer = policy.Enforcer(self.conf, default_rule=default_rule)
enforcer.set_rules(rules)
action = 'cloudwatch:PutMetricData'
creds = {'roles': ''}
self.assertTrue(enforcer.enforce(action, {}, creds))
def test_enforcer_force_reload_with_overwrite(self, opts_registered=0):
self.create_config_file(
os.path.join('policy.d', 'a.conf'), POLICY_A_CONTENTS)
self.create_config_file(
os.path.join('policy.d', 'b.conf'), POLICY_B_CONTENTS)
# Prepare in memory fake policies.
self.enforcer.set_rules({'test': _parser.parse_rule('role:test')},
use_conf=True)
self.enforcer.set_rules({'default': _parser.parse_rule('role:fakeZ')},
overwrite=False, # Keeps 'test' role.
use_conf=True)
self.enforcer.overwrite = True
# Call enforce(), it will load rules from
# policy configuration files, to overwrite
# existing fake ones.
self.assertFalse(self.enforcer.enforce('test', {},
{'roles': ['test']}))
self.assertTrue(self.enforcer.enforce('default', {},
{'roles': ['fakeB']}))
# Check against rule dict again from
# enforcer object directly.
self.assertNotIn('test', self.enforcer.rules)
self.assertIn('default', self.enforcer.rules)
self.assertIn('admin', self.enforcer.rules)
loaded_rules = jsonutils.loads(str(self.enforcer.rules))
self.assertEqual(2 + opts_registered, len(loaded_rules))
self.assertIn('role:fakeB', loaded_rules['default'])
self.assertIn('is_admin:True', loaded_rules['admin'])
def test_enforcer_force_reload_with_overwrite_opts_registered(self):
self._test_scenario_with_opts_registered(
self.test_enforcer_force_reload_with_overwrite, opts_registered=1)
def test_enforcer_force_reload_without_overwrite(self, opts_registered=0):
self.create_config_file(
os.path.join('policy.d', 'a.conf'), POLICY_A_CONTENTS)
self.create_config_file(
os.path.join('policy.d', 'b.conf'), POLICY_B_CONTENTS)
# Prepare in memory fake policies.
self.enforcer.set_rules({'test': _parser.parse_rule('role:test')},
use_conf=True)
self.enforcer.set_rules({'default': _parser.parse_rule('role:fakeZ')},
overwrite=False, # Keeps 'test' role.
use_conf=True)
self.enforcer.overwrite = False
self.enforcer._is_directory_updated = lambda x, y: True
# Call enforce(), it will load rules from
# policy configuration files, to merge with
# existing fake ones.
self.assertTrue(self.enforcer.enforce('test', {},
{'roles': ['test']}))
# The existing rules have a same key with
# new loaded ones will be overwrote.
self.assertFalse(self.enforcer.enforce('default', {},
{'roles': ['fakeZ']}))
# Check against rule dict again from
# enforcer object directly.
self.assertIn('test', self.enforcer.rules)
self.assertIn('default', self.enforcer.rules)
self.assertIn('admin', self.enforcer.rules)
loaded_rules = jsonutils.loads(str(self.enforcer.rules))
self.assertEqual(3 + opts_registered, len(loaded_rules))
self.assertIn('role:test', loaded_rules['test'])
self.assertIn('role:fakeB', loaded_rules['default'])
self.assertIn('is_admin:True', loaded_rules['admin'])
def test_enforcer_force_reload_without_overwrite_opts_registered(self):
self._test_scenario_with_opts_registered(
self.test_enforcer_force_reload_without_overwrite,
opts_registered=1)
def test_enforcer_keep_use_conf_flag_after_reload(self):
self.create_config_file(
os.path.join('policy.d', 'a.conf'), POLICY_A_CONTENTS)
self.create_config_file(
os.path.join('policy.d', 'b.conf'), POLICY_B_CONTENTS)
self.assertTrue(self.enforcer.use_conf)
self.assertTrue(self.enforcer.enforce('default', {},
{'roles': ['fakeB']}))
self.assertFalse(self.enforcer.enforce('test', {},
{'roles': ['test']}))
# After enforcement the flag should
# be remained there.
self.assertTrue(self.enforcer.use_conf)
self.assertFalse(self.enforcer.enforce('_dynamic_test_rule', {},
{'roles': ['test']}))
# Then if configure file got changed,
# reloading will be triggered when calling
# enforcer(), this case could happen only
# when use_conf flag equals True.
rules = jsonutils.loads(str(self.enforcer.rules))
rules['_dynamic_test_rule'] = 'role:test'
with open(self.enforcer.policy_path, 'w') as f:
f.write(jsonutils.dumps(rules))
self.enforcer.load_rules(force_reload=True)
self.assertTrue(self.enforcer.enforce('_dynamic_test_rule', {},
{'roles': ['test']}))
def test_enforcer_keep_use_conf_flag_after_reload_opts_registered(self):
# This test does not use _test_scenario_with_opts_registered because
# it loads all rules and then dumps them to a policy file and reloads.
# That breaks the ability to differentiate between registered and file
# loaded policies.
# This should be overridden by the policy file
self.enforcer.register_default(policy.RuleDefault(name='admin',
check_str='is_admin:False'))
# This is not in the policy file, only registered
self.enforcer.register_default(policy.RuleDefault(name='owner',
check_str='role:owner'))
self.test_enforcer_keep_use_conf_flag_after_reload()
self.assertIn('owner', self.enforcer.rules)
self.assertEqual('role:owner', str(self.enforcer.rules['owner']))
self.assertEqual('is_admin:True', str(self.enforcer.rules['admin']))
def test_enforcer_force_reload_false(self):
self.enforcer.set_rules({'test': 'test'})
self.enforcer.load_rules(force_reload=False)
self.assertIn('test', self.enforcer.rules)
self.assertNotIn('default', self.enforcer.rules)
self.assertNotIn('admin', self.enforcer.rules)
def test_enforcer_overwrite_rules(self):
self.enforcer.set_rules({'test': 'test'})
self.enforcer.set_rules({'test': 'test1'}, overwrite=True)
self.assertEqual({'test': 'test1'}, self.enforcer.rules)
def test_enforcer_update_rules(self):
self.enforcer.set_rules({'test': 'test'})
self.enforcer.set_rules({'test1': 'test1'}, overwrite=False)
self.assertEqual({'test': 'test', 'test1': 'test1'},
self.enforcer.rules)
def test_enforcer_with_default_policy_file(self):
enforcer = policy.Enforcer(self.conf)
self.assertEqual(self.conf.oslo_policy.policy_file,
enforcer.policy_file)
def test_enforcer_with_policy_file(self):
enforcer = policy.Enforcer(self.conf, policy_file='non-default.json')
self.assertEqual('non-default.json', enforcer.policy_file)
def test_get_policy_path_raises_exc(self):
enforcer = policy.Enforcer(self.conf, policy_file='raise_error.json')
e = self.assertRaises(cfg.ConfigFilesNotFoundError,
enforcer._get_policy_path, enforcer.policy_file)
self.assertEqual(('raise_error.json', ), e.config_files)
def test_enforcer_set_rules(self):
self.enforcer.load_rules()
self.enforcer.set_rules({'test': 'test1'})
self.enforcer.load_rules()
self.assertEqual({'test': 'test1'}, self.enforcer.rules)
def test_enforcer_default_rule_name(self):
enforcer = policy.Enforcer(self.conf, default_rule='foo_rule')
self.assertEqual('foo_rule', enforcer.rules.default_rule)
self.conf.set_override('policy_default_rule', 'bar_rule',
group='oslo_policy')
enforcer = policy.Enforcer(self.conf, default_rule='foo_rule')
self.assertEqual('foo_rule', enforcer.rules.default_rule)
enforcer = policy.Enforcer(self.conf, )
self.assertEqual('bar_rule', enforcer.rules.default_rule)
def test_enforcer_register_twice_raises(self):
self.enforcer.register_default(policy.RuleDefault(name='owner',
check_str='role:owner'))
self.assertRaises(policy.DuplicatePolicyError,
self.enforcer.register_default,
policy.RuleDefault(name='owner',
check_str='role:owner'))
def test_non_reversible_check(self):
self.create_config_file('policy.json',
jsonutils.dumps(
{'shared': 'field:networks:shared=True'}))
# load_rules succeeding without error is the focus of this test
self.enforcer.load_rules(True)
self.assertIsNotNone(self.enforcer.rules)
loaded_rules = jsonutils.loads(str(self.enforcer.rules))
self.assertNotEqual('field:networks:shared=True',
loaded_rules['shared'])
def test_authorize_opt_registered(self):
self.enforcer.register_default(policy.RuleDefault(name='test',
check_str='role:test'))
self.assertTrue(self.enforcer.authorize('test', {},
{'roles': ['test']}))
def test_authorize_opt_not_registered(self):
self.assertRaises(policy.PolicyNotRegistered,
self.enforcer.authorize, 'test', {},
{'roles': ['test']})
def test_enforcer_accepts_context_objects(self):
rule = policy.RuleDefault(name='fake_rule', check_str='role:test')
self.enforcer.register_default(rule)
request_context = context.RequestContext()
target_dict = {}
self.enforcer.enforce('fake_rule', target_dict, request_context)
def test_enforcer_accepts_subclassed_context_objects(self):
rule = policy.RuleDefault(name='fake_rule', check_str='role:test')
self.enforcer.register_default(rule)
class SpecializedContext(context.RequestContext):
pass
request_context = SpecializedContext()
target_dict = {}
self.enforcer.enforce('fake_rule', target_dict, request_context)
def test_enforcer_rejects_non_context_objects(self):
rule = policy.RuleDefault(name='fake_rule', check_str='role:test')
self.enforcer.register_default(rule)
class InvalidContext(object):
pass
request_context = InvalidContext()
target_dict = {}
self.assertRaises(
policy.InvalidContextObject, self.enforcer.enforce, 'fake_rule',
target_dict, request_context
)
@mock.patch.object(policy.Enforcer, '_map_context_attributes_into_creds')
def test_enforcer_call_map_context_attributes(self, map_mock):
map_mock.return_value = {}
rule = policy.RuleDefault(name='fake_rule', check_str='role:test')
self.enforcer.register_default(rule)
request_context = context.RequestContext()
target_dict = {}
self.enforcer.enforce('fake_rule', target_dict, request_context)
map_mock.assert_called_once_with(request_context)
def test_enforcer_consolidates_context_attributes_with_creds(self):
request_context = context.RequestContext()
expected_creds = request_context.to_policy_values()
creds = self.enforcer._map_context_attributes_into_creds(
request_context
)
# We don't use self.assertDictEqual here because to_policy_values
# actaully returns a non-dict object that just behaves like a
# dictionary, but does some special handling when people access
# deprecated policy values.
for k, v in expected_creds.items():
self.assertEqual(expected_creds[k], creds[k])
def test_map_context_attributes_populated_system(self):
request_context = context.RequestContext(system_scope='all')
expected_creds = request_context.to_policy_values()
expected_creds['system'] = 'all'
creds = self.enforcer._map_context_attributes_into_creds(
request_context
)
# We don't use self.assertDictEqual here because to_policy_values
# actaully returns a non-dict object that just behaves like a
# dictionary, but does some special handling when people access
# deprecated policy values.
for k, v in expected_creds.items():
self.assertEqual(expected_creds[k], creds[k])
def test_enforcer_accepts_policy_values_from_context(self):
rule = policy.RuleDefault(name='fake_rule', check_str='role:test')
self.enforcer.register_default(rule)
request_context = context.RequestContext()
policy_values = request_context.to_policy_values()
target_dict = {}
self.enforcer.enforce('fake_rule', target_dict, policy_values)
def test_enforcer_understands_system_scope(self):
self.conf.set_override('enforce_scope', True, group='oslo_policy')
rule = policy.RuleDefault(
name='fake_rule', check_str='role:test', scope_types=['system']
)
self.enforcer.register_default(rule)
ctx = context.RequestContext(system_scope='all')
target_dict = {}
self.enforcer.enforce('fake_rule', target_dict, ctx)
def test_enforcer_raises_invalid_scope_with_system_scope_type(self):
self.conf.set_override('enforce_scope', True, group='oslo_policy')
rule = policy.RuleDefault(
name='fake_rule', check_str='role:test', scope_types=['system']
)
self.enforcer.register_default(rule)
# model a domain-scoped token, which should fail enforcement
ctx = context.RequestContext(domain_id='fake')
target_dict = {}
self.assertRaises(
policy.InvalidScope, self.enforcer.enforce, 'fake_rule',
target_dict, ctx
)
# model a project-scoped token, which should fail enforcement
ctx = context.RequestContext(project_id='fake')
self.assertRaises(
policy.InvalidScope, self.enforcer.enforce, 'fake_rule',
target_dict, ctx
)
def test_enforcer_understands_domain_scope(self):
self.conf.set_override('enforce_scope', True, group='oslo_policy')
rule = policy.RuleDefault(
name='fake_rule', check_str='role:test', scope_types=['domain']
)
self.enforcer.register_default(rule)
ctx = context.RequestContext(domain_id='fake')
target_dict = {}
self.enforcer.enforce('fake_rule', target_dict, ctx)
def test_enforcer_raises_invalid_scope_with_domain_scope_type(self):
self.conf.set_override('enforce_scope', True, group='oslo_policy')
rule = policy.RuleDefault(
name='fake_rule', check_str='role:test', scope_types=['domain']
)
self.enforcer.register_default(rule)
# model a system-scoped token, which should fail enforcement
ctx = context.RequestContext(system_scope='all')
target_dict = {}
self.assertRaises(
policy.InvalidScope, self.enforcer.enforce, 'fake_rule',
target_dict, ctx
)
# model a project-scoped token, which should fail enforcement
ctx = context.RequestContext(project_id='fake')
self.assertRaises(
policy.InvalidScope, self.enforcer.enforce, 'fake_rule',
target_dict, ctx
)
def test_enforcer_understands_project_scope(self):
self.conf.set_override('enforce_scope', True, group='oslo_policy')
rule = policy.RuleDefault(
name='fake_rule', check_str='role:test', scope_types=['project']
)
self.enforcer.register_default(rule)
ctx = context.RequestContext(project_id='fake')
target_dict = {}
self.enforcer.enforce('fake_rule', target_dict, ctx)
def test_enforcer_raises_invalid_scope_with_project_scope_type(self):
self.conf.set_override('enforce_scope', True, group='oslo_policy')
rule = policy.RuleDefault(
name='fake_rule', check_str='role:test', scope_types=['project']
)
self.enforcer.register_default(rule)
# model a system-scoped token, which should fail enforcement
ctx = context.RequestContext(system_scope='all')
target_dict = {}
self.assertRaises(
policy.InvalidScope, self.enforcer.enforce, 'fake_rule',
target_dict, ctx
)
# model a domain-scoped token, which should fail enforcement
ctx = context.RequestContext(domain_id='fake')
self.assertRaises(
policy.InvalidScope, self.enforcer.enforce, 'fake_rule',
target_dict, ctx
)
class EnforcerNoPolicyFileTest(base.PolicyBaseTestCase):
def setUp(self):
super(EnforcerNoPolicyFileTest, self).setUp()
def check_loaded_files(self, filenames):
self.assertEqual(
[self.get_config_file_fullname(n)
for n in filenames],
self.enforcer._loaded_files
)
def test_load_rules(self):
# Check that loading rules with no policy file does not error
self.enforcer.load_rules(True)
self.assertIsNotNone(self.enforcer.rules)
self.assertEqual(0, len(self.enforcer.rules))
def test_opts_registered(self):
self.enforcer.register_default(policy.RuleDefault(name='admin',
check_str='is_admin:False'))
self.enforcer.register_default(policy.RuleDefault(name='owner',
check_str='role:owner'))
self.enforcer.load_rules(True)
self.assertEqual({}, self.enforcer.file_rules)
self.assertEqual('role:owner', str(self.enforcer.rules['owner']))
self.assertEqual('is_admin:False', str(self.enforcer.rules['admin']))
def test_load_directory(self):
self.create_config_file('policy.d/a.conf', POLICY_JSON_CONTENTS)
self.create_config_file('policy.d/b.conf', POLICY_B_CONTENTS)
self.enforcer.load_rules(True)
self.assertIsNotNone(self.enforcer.rules)
loaded_rules = jsonutils.loads(str(self.enforcer.rules))
self.assertEqual('role:fakeB', loaded_rules['default'])
self.assertEqual('is_admin:True', loaded_rules['admin'])
self.check_loaded_files([
'policy.d/a.conf',
'policy.d/b.conf',
])
class CheckFunctionTestCase(base.PolicyBaseTestCase):
def setUp(self):
super(CheckFunctionTestCase, self).setUp()
self.create_config_file('policy.json', POLICY_JSON_CONTENTS)
def test_check_explicit(self):
rule = base.FakeCheck()
creds = {}
result = self.enforcer.enforce(rule, 'target', creds)
self.assertEqual(('target', creds, self.enforcer), result)
def test_check_no_rules(self):
# Clear the policy.json file created in setUp()
self.create_config_file('policy.json', "{}")
self.enforcer.default_rule = None
self.enforcer.load_rules()
creds = {}
result = self.enforcer.enforce('rule', 'target', creds)
self.assertFalse(result)
def test_check_with_rule(self):
self.enforcer.set_rules(dict(default=base.FakeCheck()))
creds = {}
result = self.enforcer.enforce('default', 'target', creds)
self.assertEqual(('target', creds, self.enforcer), result)
def test_check_rule_not_exist_not_empty_policy_file(self):
# If the rule doesn't exist, then enforce() fails rather than KeyError.
# This test needs a non-empty file otherwise the code short-circuits.
self.create_config_file('policy.json', jsonutils.dumps({"a_rule": []}))
self.enforcer.default_rule = None
self.enforcer.load_rules()
creds = {}
result = self.enforcer.enforce('rule', 'target', creds)
self.assertFalse(result)
def test_check_raise_default(self):
# When do_raise=True and exc is not used then PolicyNotAuthorized is
# raised.
self.enforcer.set_rules(dict(default=_checks.FalseCheck()))
creds = {}
self.assertRaisesRegex(policy.PolicyNotAuthorized,
" is disallowed by policy",
self.enforcer.enforce,
'rule', 'target', creds, True)
def test_check_raise_custom_exception(self):
self.enforcer.set_rules(dict(default=_checks.FalseCheck()))
creds = {}
exc = self.assertRaises(
MyException, self.enforcer.enforce, 'rule', 'target', creds,
True, MyException, 'arg1', 'arg2', kw1='kwarg1',
kw2='kwarg2')
self.assertEqual(('arg1', 'arg2'), exc.args)
self.assertEqual(dict(kw1='kwarg1', kw2='kwarg2'), exc.kwargs)
class RegisterCheckTestCase(base.PolicyBaseTestCase):
@mock.patch.object(_checks, 'registered_checks', {})
def test_register_check(self):
class TestCheck(policy.Check):
pass
policy.register('spam', TestCheck)
self.assertEqual(dict(spam=TestCheck), _checks.registered_checks)
class BaseCheckTypesTestCase(base.PolicyBaseTestCase):
@mock.patch.object(_checks, 'registered_checks', {})
def test_base_check_types_are_public(self):
'''Check that those check types are part of public API.
They are blessed to be used by library consumers.
'''
for check_type in (policy.AndCheck, policy.NotCheck,
policy.OrCheck, policy.RuleCheck):
class TestCheck(check_type):
pass
check_str = str(check_type)
policy.register(check_str, TestCheck)
self.assertEqual(
TestCheck, _checks.registered_checks[check_str],
message='%s check type is not public.' % check_str)
class RuleDefaultTestCase(base.PolicyBaseTestCase):
def test_rule_is_parsed(self):
opt = policy.RuleDefault(name='foo', check_str='rule:foo')
self.assertIsInstance(opt.check, _checks.BaseCheck)
self.assertEqual('rule:foo', str(opt.check))
def test_str(self):
opt = policy.RuleDefault(name='foo', check_str='rule:foo')
self.assertEqual('"foo": "rule:foo"', str(opt))
def test_equality_obvious(self):
opt1 = policy.RuleDefault(name='foo', check_str='rule:foo',
description='foo')
opt2 = policy.RuleDefault(name='foo', check_str='rule:foo',
description='bar')
self.assertEqual(opt1, opt2)
def test_equality_less_obvious(self):
opt1 = policy.RuleDefault(name='foo', check_str='',
description='foo')
opt2 = policy.RuleDefault(name='foo', check_str='@',
description='bar')
self.assertEqual(opt1, opt2)
def test_not_equal_check(self):
opt1 = policy.RuleDefault(name='foo', check_str='rule:foo',
description='foo')
opt2 = policy.RuleDefault(name='foo', check_str='rule:bar',
description='bar')
self.assertNotEqual(opt1, opt2)
def test_not_equal_name(self):
opt1 = policy.RuleDefault(name='foo', check_str='rule:foo',
description='foo')
opt2 = policy.RuleDefault(name='bar', check_str='rule:foo',
description='bar')
self.assertNotEqual(opt1, opt2)
def test_not_equal_class(self):
class NotRuleDefault(object):
def __init__(self, name, check_str):
self.name = name
self.check = _parser.parse_rule(check_str)
opt1 = policy.RuleDefault(name='foo', check_str='rule:foo')
opt2 = NotRuleDefault(name='foo', check_str='rule:foo')
self.assertNotEqual(opt1, opt2)
def test_equal_subclass(self):
class RuleDefaultSub(policy.RuleDefault):
pass
opt1 = policy.RuleDefault(name='foo', check_str='rule:foo')
opt2 = RuleDefaultSub(name='foo', check_str='rule:foo')
self.assertEqual(opt1, opt2)
def test_not_equal_subclass(self):
class RuleDefaultSub(policy.RuleDefault):
pass
opt1 = policy.RuleDefault(name='foo', check_str='rule:foo')
opt2 = RuleDefaultSub(name='bar', check_str='rule:foo')
self.assertNotEqual(opt1, opt2)
def test_create_opt_with_scope_types(self):
scope_types = ['project']
opt = policy.RuleDefault(
name='foo',
check_str='role:bar',
scope_types=scope_types
)
self.assertEqual(opt.scope_types, scope_types)
def test_create_opt_with_scope_type_strings_fails(self):
self.assertRaises(
ValueError,
policy.RuleDefault,
name='foo',
check_str='role:bar',
scope_types='project'
)
def test_create_opt_with_multiple_scope_types(self):
opt = policy.RuleDefault(
name='foo',
check_str='role:bar',
scope_types=['project', 'domain', 'system']
)
self.assertEqual(opt.scope_types, ['project', 'domain', 'system'])
def test_ensure_scope_types_are_unique(self):
self.assertRaises(
ValueError,
policy.RuleDefault,
name='foo',
check_str='role:bar',
scope_types=['project', 'project']
)
class DocumentedRuleDefaultDeprecationTestCase(base.PolicyBaseTestCase):
def test_deprecate_a_policy_check_string(self):
deprecated_rule = policy.DeprecatedRule(
name='foo:create_bar',
check_str='role:fizz'
)
rule_list = [policy.DocumentedRuleDefault(
name='foo:create_bar',
check_str='role:bang',
description='Create a bar.',
operations=[{'path': '/v1/bars', 'method': 'POST'}],
deprecated_rule=deprecated_rule,
deprecated_reason='"role:bang" is a better default',
deprecated_since='N'
)]
enforcer = policy.Enforcer(self.conf)
enforcer.register_defaults(rule_list)
expected_msg = (
'Policy "foo:create_bar":"role:fizz" was deprecated in N in favor '
'of "foo:create_bar":"role:bang". Reason: "role:bang" is a better '
'default. Either ensure your deployment is ready for the new '
'default or copy/paste the deprecated policy into your policy '
'file and maintain it manually.'
)
with mock.patch('warnings.warn') as mock_warn:
enforcer.load_rules()
mock_warn.assert_called_once_with(expected_msg)
def test_deprecate_a_policy_name(self):
deprecated_rule = policy.DeprecatedRule(
name='foo:bar',
check_str='role:baz'
)
rule_list = [policy.DocumentedRuleDefault(
name='foo:create_bar',
check_str='role:baz',
description='Create a bar.',
operations=[{'path': '/v1/bars/', 'method': 'POST'}],
deprecated_rule=deprecated_rule,
deprecated_reason=(
'"foo:bar" is not granular enough. If your deployment has '
'overridden "foo:bar", ensure you override the new policies '
'with same role or rule. Not doing this will require the '
'service to assume the new defaults for "foo:bar:create", '
'"foo:bar:update", "foo:bar:list", and "foo:bar:delete", '
'which might be backwards incompatible for your deployment'
),
deprecated_since='N'
)]
expected_msg = (
'Policy "foo:bar":"role:baz" was deprecated in N in favor of '
'"foo:create_bar":"role:baz". Reason: "foo:bar" is not granular '
'enough. If your deployment has overridden "foo:bar", ensure you '
'override the new policies with same role or rule. Not doing this '
'will require the service to assume the new defaults for '
'"foo:bar:create", "foo:bar:update", "foo:bar:list", and '
'"foo:bar:delete", which might be backwards incompatible for your '
'deployment. Either ensure your deployment is ready for the new '
'default or copy/paste the deprecated policy into your policy '
'file and maintain it manually.'
)
rules = jsonutils.dumps({'foo:bar': 'role:bang'})
self.create_config_file('policy.json', rules)
enforcer = policy.Enforcer(self.conf)
enforcer.register_defaults(rule_list)
with mock.patch('warnings.warn') as mock_warn:
enforcer.load_rules(True)
mock_warn.assert_called_once_with(expected_msg)
def test_deprecate_a_policy_for_removal_logs_warning_when_overridden(self):
rule_list = [policy.DocumentedRuleDefault(
name='foo:bar',
check_str='role:baz',
description='Create a foo.',
operations=[{'path': '/v1/foos/', 'method': 'POST'}],
deprecated_for_removal=True,
deprecated_reason=(
'"foo:bar" is no longer a policy used by the service'
),
deprecated_since='N'
)]
expected_msg = (
'Policy "foo:bar":"role:baz" was deprecated for removal in N. '
'Reason: "foo:bar" is no longer a policy used by the service. Its '
'value may be silently ignored in the future.'
)
rules = jsonutils.dumps({'foo:bar': 'role:bang'})
self.create_config_file('policy.json', rules)
enforcer = policy.Enforcer(self.conf)
enforcer.register_defaults(rule_list)
with mock.patch('warnings.warn') as mock_warn:
enforcer.load_rules()
mock_warn.assert_called_once_with(expected_msg)
def test_deprecate_a_policy_for_removal_does_not_log_warning(self):
# We should only log a warning for operators if they are supplying an
# override for a policy that is deprecated for removal.
rule_list = [policy.DocumentedRuleDefault(
name='foo:bar',
check_str='role:baz',
description='Create a foo.',
operations=[{'path': '/v1/foos/', 'method': 'POST'}],
deprecated_for_removal=True,
deprecated_reason=(
'"foo:bar" is no longer a policy used by the service'
),
deprecated_since='N'
)]
enforcer = policy.Enforcer(self.conf)
enforcer.register_defaults(rule_list)
with mock.patch('warnings.warn') as mock_warn:
enforcer.load_rules()
mock_warn.assert_not_called()
def test_deprecated_policy_for_removal_must_include_deprecated_since(self):
self.assertRaises(
ValueError,
policy.DocumentedRuleDefault,
name='foo:bar',
check_str='rule:baz',
description='Create a foo.',
operations=[{'path': '/v1/foos/', 'method': 'POST'}],
deprecated_for_removal=True,
deprecated_reason='Some reason.'
)
def test_deprecated_policy_must_include_deprecated_since(self):
deprecated_rule = policy.DeprecatedRule(
name='foo:bar',
check_str='rule:baz'
)
self.assertRaises(
ValueError,
policy.DocumentedRuleDefault,
name='foo:bar',
check_str='rule:baz',
description='Create a foo.',
operations=[{'path': '/v1/foos/', 'method': 'POST'}],
deprecated_rule=deprecated_rule,
deprecated_reason='Some reason.'
)
def test_deprecated_rule_requires_deprecated_rule_object(self):
self.assertRaises(
ValueError,
policy.DocumentedRuleDefault,
name='foo:bar',
check_str='rule:baz',
description='Create a foo.',
operations=[{'path': '/v1/foos/', 'method': 'POST'}],
deprecated_rule='foo:bar',
deprecated_reason='Some reason.'
)
def test_deprecated_policy_must_include_deprecated_reason(self):
self.assertRaises(
ValueError,
policy.DocumentedRuleDefault,
name='foo:bar',
check_str='rule:baz',
description='Create a foo.',
operations=[{'path': '/v1/foos/', 'method': 'POST'}],
deprecated_for_removal=True,
deprecated_since='N'
)
def test_override_deprecated_policy_with_old_name(self):
# Simulate an operator overriding a policy
rules = jsonutils.dumps({'foo:bar': 'role:bazz'})
self.create_config_file('policy.json', rules)
# Deprecate the policy name and check string in favor of something
# better.
deprecated_rule = policy.DeprecatedRule(
name='foo:bar',
check_str='role:fizz'
)
rule_list = [policy.DocumentedRuleDefault(
name='foo:create_bar',
check_str='role:bang',
description='Create a bar.',
operations=[{'path': '/v1/bars', 'method': 'POST'}],
deprecated_rule=deprecated_rule,
deprecated_reason='"role:bang" is a better default',
deprecated_since='N'
)]
self.enforcer.register_defaults(rule_list)
# Make sure the override supplied by the operator using the old policy
# name is used in favor of the old or new default.
self.assertFalse(
self.enforcer.enforce('foo:create_bar', {}, {'roles': ['fizz']})
)
self.assertFalse(
self.enforcer.enforce('foo:create_bar', {}, {'roles': ['bang']})
)
self.assertTrue(
self.enforcer.enforce('foo:create_bar', {}, {'roles': ['bazz']})
)
def test_override_deprecated_policy_with_new_name(self):
# Simulate an operator overriding a policy using the new policy name
rules = jsonutils.dumps({'foo:create_bar': 'role:bazz'})
self.create_config_file('policy.json', rules)
# Deprecate the policy name and check string in favor of something
# better.
deprecated_rule = policy.DeprecatedRule(
name='foo:bar',
check_str='role:fizz'
)
rule_list = [policy.DocumentedRuleDefault(
name='foo:create_bar',
check_str='role:bang',
description='Create a bar.',
operations=[{'path': '/v1/bars', 'method': 'POST'}],
deprecated_rule=deprecated_rule,
deprecated_reason='"role:bang" is a better default',
deprecated_since='N'
)]
self.enforcer.register_defaults(rule_list)
# Make sure the override supplied by the operator is being used in
# place of either default value.
self.assertFalse(
self.enforcer.enforce('foo:create_bar', {}, {'roles': ['fizz']})
)
self.assertFalse(
self.enforcer.enforce('foo:create_bar', {}, {'roles': ['bang']})
)
self.assertTrue(
self.enforcer.enforce('foo:create_bar', {}, {'roles': ['bazz']})
)
def test_override_both_new_and_old_policy(self):
# Simulate an operator overriding a policy using both the the new and
# old policy names. The following doesn't make a whole lot of sense
# because the overrides are conflicting, but we want to make sure that
# oslo.policy uses foo:create_bar instead of foo:bar.
rules_dict = {
'foo:create_bar': 'role:bazz',
'foo:bar': 'role:wee'
}
rules = jsonutils.dumps(rules_dict)
self.create_config_file('policy.json', rules)
# Deprecate the policy name and check string in favor of something
# better.
deprecated_rule = policy.DeprecatedRule(
name='foo:bar',
check_str='role:fizz'
)
rule_list = [policy.DocumentedRuleDefault(
name='foo:create_bar',
check_str='role:bang',
description='Create a bar.',
operations=[{'path': '/v1/bars', 'method': 'POST'}],
deprecated_rule=deprecated_rule,
deprecated_reason='"role:bang" is a better default',
deprecated_since='N'
)]
self.enforcer.register_defaults(rule_list)
# The default check string for the old policy name foo:bar should fail
self.assertFalse(
self.enforcer.enforce('foo:create_bar', {}, {'roles': ['fizz']})
)
# The default check string for the new policy name foo:create_bar
# should fail
self.assertFalse(
self.enforcer.enforce('foo:create_bar', {}, {'roles': ['bang']})
)
# The override for the old policy name foo:bar should fail
self.assertFalse(
self.enforcer.enforce('foo:create_bar', {}, {'roles': ['wee']})
)
# The override for foo:create_bar should pass
self.assertTrue(
self.enforcer.enforce('foo:create_bar', {}, {'roles': ['bazz']})
)
class DocumentedRuleDefaultTestCase(base.PolicyBaseTestCase):
def test_contain_operations(self):
opt = policy.DocumentedRuleDefault(
name='foo', check_str='rule:foo', description='foo_api',
operations=[{'path': '/foo/', 'method': 'GET'}])
self.assertEqual(1, len(opt.operations))
def test_multiple_operations(self):
opt = policy.DocumentedRuleDefault(
name='foo', check_str='rule:foo', description='foo_api',
operations=[{'path': '/foo/', 'method': 'GET'},
{'path': '/foo/', 'method': 'POST'}])
self.assertEqual(2, len(opt.operations))
def test_description_not_empty(self):
invalid_desc = ''
self.assertRaises(policy.InvalidRuleDefault,
policy.DocumentedRuleDefault,
name='foo',
check_str='rule:foo',
description=invalid_desc,
operations=[{'path': '/foo/', 'method': 'GET'}])
def test_operation_not_empty_list(self):
invalid_op = []
self.assertRaises(policy.InvalidRuleDefault,
policy.DocumentedRuleDefault,
name='foo',
check_str='rule:foo',
description='foo_api',
operations=invalid_op)
def test_operation_must_be_list(self):
invalid_op = 'invalid_op'
self.assertRaises(policy.InvalidRuleDefault,
policy.DocumentedRuleDefault,
name='foo',
check_str='rule:foo',
description='foo_api',
operations=invalid_op)
def test_operation_must_be_list_of_dicts(self):
invalid_op = ['invalid_op']
self.assertRaises(policy.InvalidRuleDefault,
policy.DocumentedRuleDefault,
name='foo',
check_str='rule:foo',
description='foo_api',
operations=invalid_op)
def test_operation_must_have_path(self):
invalid_op = [{'method': 'POST'}]
self.assertRaises(policy.InvalidRuleDefault,
policy.DocumentedRuleDefault,
name='foo',
check_str='rule:foo',
description='foo_api',
operations=invalid_op)
def test_operation_must_have_method(self):
invalid_op = [{'path': '/foo/path/'}]
self.assertRaises(policy.InvalidRuleDefault,
policy.DocumentedRuleDefault,
name='foo',
check_str='rule:foo',
description='foo_api',
operations=invalid_op)
def test_operation_must_contain_method_and_path_only(self):
invalid_op = [{'path': '/some/path/',
'method': 'GET',
'break': 'me'}]
self.assertRaises(policy.InvalidRuleDefault,
policy.DocumentedRuleDefault,
name='foo',
check_str='rule:foo',
description='foo_api',
operations=invalid_op)
class EnforcerCheckRulesTest(base.PolicyBaseTestCase):
def setUp(self):
super(EnforcerCheckRulesTest, self).setUp()
def test_no_violations(self):
self.create_config_file('policy.json', POLICY_JSON_CONTENTS)
self.enforcer.load_rules(True)
self.assertTrue(self.enforcer.check_rules(raise_on_violation=True))
def test_undefined_rule(self):
rules = jsonutils.dumps({'foo': 'rule:bar'})
self.create_config_file('policy.json', rules)
self.enforcer.load_rules(True)
self.assertFalse(self.enforcer.check_rules())
def test_undefined_rule_raises(self):
rules = jsonutils.dumps({'foo': 'rule:bar'})
self.create_config_file('policy.json', rules)
self.enforcer.load_rules(True)
self.assertRaises(policy.InvalidDefinitionError,
self.enforcer.check_rules, raise_on_violation=True)
def test_cyclical_rules(self):
rules = jsonutils.dumps({'foo': 'rule:bar', 'bar': 'rule:foo'})
self.create_config_file('policy.json', rules)
self.enforcer.load_rules(True)
self.assertFalse(self.enforcer.check_rules())
def test_cyclical_rules_raises(self):
rules = jsonutils.dumps({'foo': 'rule:bar', 'bar': 'rule:foo'})
self.create_config_file('policy.json', rules)
self.enforcer.load_rules(True)
self.assertRaises(policy.InvalidDefinitionError,
self.enforcer.check_rules, raise_on_violation=True)
def test_complex_cyclical_rules_false(self):
rules = jsonutils.dumps({'foo': 'rule:bar',
'bar': 'rule:baz and role:admin',
'baz': 'rule:foo or role:user'})
self.create_config_file('policy.json', rules)
self.enforcer.load_rules(True)
self.assertFalse(self.enforcer.check_rules())
def test_complex_cyclical_rules_true(self):
rules = jsonutils.dumps({'foo': 'rule:bar or rule:baz',
'bar': 'role:admin',
'baz': 'rule:bar or role:user'})
self.create_config_file('policy.json', rules)
self.enforcer.load_rules(True)
self.assertTrue(self.enforcer.check_rules())
| [
"albertczhang@berkeley.edu"
] | albertczhang@berkeley.edu |
3782a988c01ab621e1dad4030dbdac9d132b01e0 | 824b582c2e0236e987a29b233308917fbdfc57a7 | /sdk/python/pulumi_google_native/compute/alpha/get_global_address.py | 917177dfb4772220fcaa71f62ff92d5c6aa6d182 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | 24601/pulumi-google-native | ce8faf8455609a9572a8cbe0638c66427bf0ae7f | b219a14201c6c58eaa10caaeacbdaab528931adf | refs/heads/master | 2023-08-23T05:48:31.819709 | 2021-10-08T18:50:44 | 2021-10-08T18:50:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,292 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetGlobalAddressResult',
'AwaitableGetGlobalAddressResult',
'get_global_address',
'get_global_address_output',
]
@pulumi.output_type
class GetGlobalAddressResult:
def __init__(__self__, address=None, address_type=None, creation_timestamp=None, description=None, ip_version=None, kind=None, label_fingerprint=None, labels=None, name=None, network=None, network_tier=None, prefix_length=None, purpose=None, region=None, self_link=None, self_link_with_id=None, status=None, subnetwork=None, users=None):
if address and not isinstance(address, str):
raise TypeError("Expected argument 'address' to be a str")
pulumi.set(__self__, "address", address)
if address_type and not isinstance(address_type, str):
raise TypeError("Expected argument 'address_type' to be a str")
pulumi.set(__self__, "address_type", address_type)
if creation_timestamp and not isinstance(creation_timestamp, str):
raise TypeError("Expected argument 'creation_timestamp' to be a str")
pulumi.set(__self__, "creation_timestamp", creation_timestamp)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if ip_version and not isinstance(ip_version, str):
raise TypeError("Expected argument 'ip_version' to be a str")
pulumi.set(__self__, "ip_version", ip_version)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if label_fingerprint and not isinstance(label_fingerprint, str):
raise TypeError("Expected argument 'label_fingerprint' to be a str")
pulumi.set(__self__, "label_fingerprint", label_fingerprint)
if labels and not isinstance(labels, dict):
raise TypeError("Expected argument 'labels' to be a dict")
pulumi.set(__self__, "labels", labels)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if network and not isinstance(network, str):
raise TypeError("Expected argument 'network' to be a str")
pulumi.set(__self__, "network", network)
if network_tier and not isinstance(network_tier, str):
raise TypeError("Expected argument 'network_tier' to be a str")
pulumi.set(__self__, "network_tier", network_tier)
if prefix_length and not isinstance(prefix_length, int):
raise TypeError("Expected argument 'prefix_length' to be a int")
pulumi.set(__self__, "prefix_length", prefix_length)
if purpose and not isinstance(purpose, str):
raise TypeError("Expected argument 'purpose' to be a str")
pulumi.set(__self__, "purpose", purpose)
if region and not isinstance(region, str):
raise TypeError("Expected argument 'region' to be a str")
pulumi.set(__self__, "region", region)
if self_link and not isinstance(self_link, str):
raise TypeError("Expected argument 'self_link' to be a str")
pulumi.set(__self__, "self_link", self_link)
if self_link_with_id and not isinstance(self_link_with_id, str):
raise TypeError("Expected argument 'self_link_with_id' to be a str")
pulumi.set(__self__, "self_link_with_id", self_link_with_id)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if subnetwork and not isinstance(subnetwork, str):
raise TypeError("Expected argument 'subnetwork' to be a str")
pulumi.set(__self__, "subnetwork", subnetwork)
if users and not isinstance(users, list):
raise TypeError("Expected argument 'users' to be a list")
pulumi.set(__self__, "users", users)
@property
@pulumi.getter
def address(self) -> str:
"""
The static IP address represented by this resource.
"""
return pulumi.get(self, "address")
@property
@pulumi.getter(name="addressType")
def address_type(self) -> str:
"""
The type of address to reserve, either INTERNAL or EXTERNAL. If unspecified, defaults to EXTERNAL.
"""
return pulumi.get(self, "address_type")
@property
@pulumi.getter(name="creationTimestamp")
def creation_timestamp(self) -> str:
"""
Creation timestamp in RFC3339 text format.
"""
return pulumi.get(self, "creation_timestamp")
@property
@pulumi.getter
def description(self) -> str:
"""
An optional description of this resource. Provide this field when you create the resource.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="ipVersion")
def ip_version(self) -> str:
"""
The IP version that will be used by this address. Valid options are IPV4 or IPV6. This can only be specified for a global address.
"""
return pulumi.get(self, "ip_version")
@property
@pulumi.getter
def kind(self) -> str:
"""
Type of the resource. Always compute#address for addresses.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter(name="labelFingerprint")
def label_fingerprint(self) -> str:
"""
A fingerprint for the labels being applied to this Address, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve an Address.
"""
return pulumi.get(self, "label_fingerprint")
@property
@pulumi.getter
def labels(self) -> Mapping[str, str]:
"""
Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`. The first character must be a lowercase letter, and all following characters (except for the last character) must be a dash, lowercase letter, or digit. The last character must be a lowercase letter or digit.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def network(self) -> str:
"""
The URL of the network in which to reserve the address. This field can only be used with INTERNAL type with the VPC_PEERING purpose.
"""
return pulumi.get(self, "network")
@property
@pulumi.getter(name="networkTier")
def network_tier(self) -> str:
"""
This signifies the networking tier used for configuring this address and can only take the following values: PREMIUM or STANDARD. Internal IP addresses are always Premium Tier; global external IP addresses are always Premium Tier; regional external IP addresses can be either Standard or Premium Tier. If this field is not specified, it is assumed to be PREMIUM.
"""
return pulumi.get(self, "network_tier")
@property
@pulumi.getter(name="prefixLength")
def prefix_length(self) -> int:
"""
The prefix length if the resource represents an IP range.
"""
return pulumi.get(self, "prefix_length")
@property
@pulumi.getter
def purpose(self) -> str:
"""
The purpose of this resource, which can be one of the following values: - GCE_ENDPOINT for addresses that are used by VM instances, alias IP ranges, load balancers, and similar resources. - DNS_RESOLVER for a DNS resolver address in a subnetwork for a Cloud DNS inbound forwarder IP addresses (regional internal IP address in a subnet of a VPC network) - VPC_PEERING for global internal IP addresses used for private services access allocated ranges. - NAT_AUTO for the regional external IP addresses used by Cloud NAT when allocating addresses using . - IPSEC_INTERCONNECT for addresses created from a private IP range that are reserved for a VLAN attachment in an *IPsec-encrypted Cloud Interconnect* configuration. These addresses are regional resources. Not currently available publicly. - `SHARED_LOADBALANCER_VIP` for an internal IP address that is assigned to multiple internal forwarding rules. - `PRIVATE_SERVICE_CONNECT` for a private network address that is used to configure Private Service Connect. Only global internal addresses can use this purpose.
"""
return pulumi.get(self, "purpose")
@property
@pulumi.getter
def region(self) -> str:
"""
The URL of the region where a regional address resides. For regional addresses, you must specify the region as a path parameter in the HTTP request URL. *This field is not applicable to global addresses.*
"""
return pulumi.get(self, "region")
@property
@pulumi.getter(name="selfLink")
def self_link(self) -> str:
"""
Server-defined URL for the resource.
"""
return pulumi.get(self, "self_link")
@property
@pulumi.getter(name="selfLinkWithId")
def self_link_with_id(self) -> str:
"""
Server-defined URL for this resource with the resource id.
"""
return pulumi.get(self, "self_link_with_id")
@property
@pulumi.getter
def status(self) -> str:
"""
The status of the address, which can be one of RESERVING, RESERVED, or IN_USE. An address that is RESERVING is currently in the process of being reserved. A RESERVED address is currently reserved and available to use. An IN_USE address is currently being used by another resource and is not available.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def subnetwork(self) -> str:
"""
The URL of the subnetwork in which to reserve the address. If an IP address is specified, it must be within the subnetwork's IP range. This field can only be used with INTERNAL type with a GCE_ENDPOINT or DNS_RESOLVER purpose.
"""
return pulumi.get(self, "subnetwork")
@property
@pulumi.getter
def users(self) -> Sequence[str]:
"""
The URLs of the resources that are using this address.
"""
return pulumi.get(self, "users")
class AwaitableGetGlobalAddressResult(GetGlobalAddressResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetGlobalAddressResult(
address=self.address,
address_type=self.address_type,
creation_timestamp=self.creation_timestamp,
description=self.description,
ip_version=self.ip_version,
kind=self.kind,
label_fingerprint=self.label_fingerprint,
labels=self.labels,
name=self.name,
network=self.network,
network_tier=self.network_tier,
prefix_length=self.prefix_length,
purpose=self.purpose,
region=self.region,
self_link=self.self_link,
self_link_with_id=self.self_link_with_id,
status=self.status,
subnetwork=self.subnetwork,
users=self.users)
def get_global_address(address: Optional[str] = None,
project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetGlobalAddressResult:
"""
Returns the specified address resource. Gets a list of available addresses by making a list() request.
"""
__args__ = dict()
__args__['address'] = address
__args__['project'] = project
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('google-native:compute/alpha:getGlobalAddress', __args__, opts=opts, typ=GetGlobalAddressResult).value
return AwaitableGetGlobalAddressResult(
address=__ret__.address,
address_type=__ret__.address_type,
creation_timestamp=__ret__.creation_timestamp,
description=__ret__.description,
ip_version=__ret__.ip_version,
kind=__ret__.kind,
label_fingerprint=__ret__.label_fingerprint,
labels=__ret__.labels,
name=__ret__.name,
network=__ret__.network,
network_tier=__ret__.network_tier,
prefix_length=__ret__.prefix_length,
purpose=__ret__.purpose,
region=__ret__.region,
self_link=__ret__.self_link,
self_link_with_id=__ret__.self_link_with_id,
status=__ret__.status,
subnetwork=__ret__.subnetwork,
users=__ret__.users)
@_utilities.lift_output_func(get_global_address)
def get_global_address_output(address: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetGlobalAddressResult]:
"""
Returns the specified address resource. Gets a list of available addresses by making a list() request.
"""
...
| [
"noreply@github.com"
] | 24601.noreply@github.com |
cf54b3ebe7c2041a3817e65718bd1ffbe5bd3061 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03372/s980518099.py | 89fcc36b313a8f6192a1d4f6805dda59e1da0858 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,800 | py | def main():
from collections import namedtuple
import sys
input = sys.stdin.readline
Sushi = namedtuple('Sushi', 'x cal')
n, c = map(int, input().split())
a = []
for _ in range(n):
x, v = map(int, input().split())
a.append(Sushi(x=x, cal=v))
# x昇順ソート済
clock = [0] * (n + 1) # 注目する寿司以前で離脱する最大摂取カロリー
clock_to_0 = [0] * (n + 1) # 時計回り->初期位置の最大摂取カロリー
ma = 0 # 注目する寿司以前で離脱する最大摂取カロリー
ma0 = 0 # 時計回り->初期位置の最大摂取カロリー
curr = 0 # 現在のカロリー(移動によるカロリー消費を無視)
for i, s in enumerate(a, start=1):
curr += s.cal
ma = max(ma, curr - s.x)
ma0 = max(ma0, curr - s.x * 2)
clock[i] = ma
clock_to_0[i] = ma0
anti = [0] * (n + 1) # 注目する寿司以前で離脱する最大摂取カロリー
anti_to_0 = [0] * (n + 1) # 反時計回り->初期位置の最大摂取カロリー
ma = 0 # 注目する寿司以前で離脱する最大摂取カロリー
ma0 = 0 # 時計回り->初期位置の最大摂取カロリー
curr = 0 # 現在のカロリー(移動によるカロリー消費を無視)
for i, s in zip(range(n, -1, -1), reversed(a)):
curr += s.cal
ma = max(ma, curr - (c - s.x))
ma0 = max(ma0, curr - (c - s.x) * 2)
anti[i] = ma
anti_to_0[i] = ma0
ans = 0
for exit_pos in range(1, n + 1):
ans = max(
ans,
clock_to_0[exit_pos - 1] + anti[exit_pos],
anti_to_0[(exit_pos + 1) % (n + 1)] + clock[exit_pos]
)
print(ans)
if __name__ == '__main__':
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
e7bd3908c39dac626bf71a69c7afdbf3231e9f2a | ba80ca143ba35fd481730786a27ebdb1f88ce835 | /algorithm/backjoon/35.DFS/1890.py | aca1cfcee092c960fd8624f4d28c8de0c983167f | [] | no_license | uiandwe/TIL | c541020b65adc53578aeb1c3ba4c6770b3b2e8b3 | 186544469374dd0279099c6c6aa7555ee23e42fe | refs/heads/master | 2022-02-15T08:33:07.270573 | 2022-01-01T15:22:54 | 2022-01-01T15:22:54 | 63,420,931 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | map = [
[2, 3, 3, 1],
[1, 2, 1, 3],
[1, 2, 3, 1],
[3, 1, 1, 0]
]
min_step = float('inf')
position_x = [0, 1]
position_y = [1, 0]
def dfs(x, y, step):
global min_step
if x == 3 and y == 3:
if step < min_step:
min_step = step
return
c = map[x][y]
for i in range(2):
dx = x + ((c*position_x[i]))
dy = y + ((c*position_y[i]))
if dx >= 0 and dx < 4 and dy >= 0 and dy < 4:
dfs(dx, dy, step+1)
def solution():
dfs(0, 0, 0)
print(min_step)
solution()
| [
"noreply@github.com"
] | uiandwe.noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.