blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
11c8958f9396742ac4b16d6c114d81c301a68918 | fbddd92b0eafcc79a24c05cbfe5e688174a5be27 | /day3/test12.py | 39c9d7c2432fe59d7e819419aad614e8a1b25880 | [] | no_license | priyankakumbha/python | 968902b82811c31020b6ad2d853fee8ef64ec1a5 | ae428d56e4afffa2fc16a8e03e99eeb6bf560037 | refs/heads/master | 2022-08-26T13:31:26.874275 | 2020-05-22T12:47:22 | 2020-05-22T12:47:22 | 266,107,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | if False:
print('if1 begin')
elif False:
print('elif begin')
print('end of program')
| [
"priyankakumbhar789@gmail.com"
] | priyankakumbhar789@gmail.com |
1e82bb07d3892933aa9f580b0e62ec13e781beb6 | 22d7d575737eb7d864926993163f73d0bfebd6bc | /programmers/level2/순위검색.py | dad1c07219f4a914dd6624fef568047f9320b7e9 | [] | no_license | taehyungz/Algorithm | 6c624460716424115d3c38587f176eeb0a4e00d9 | 87c02dd047152a5bb4fafcf51be53c329ad563dc | refs/heads/master | 2022-06-26T13:23:50.969544 | 2022-06-23T12:29:04 | 2022-06-23T12:29:04 | 202,279,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 994 | py | from itertools import combinations
import collections
from bisect import *
def solution(info, query):
answer = []
people = collections.defaultdict(list)
for person in info:
person_info = person.split()
person_strs = person_info[:-1]
wage = int(person_info[-1])
people[''.join(person_strs)].append(wage)
for num in range(1,5):
num_list = combinations(range(4), num)
for idxs in num_list:
tperson_strs = person_strs[:]
for idx in idxs:
tperson_strs[idx] = '-'
people[''.join(tperson_strs)].append(wage)
for key in people.keys():
people[key].sort()
for q in query:
ans = 0
qsplit = q.split(" and ")
qsplit.extend(qsplit.pop().split())
wage = int(qsplit.pop())
find = people[''.join(qsplit)]
ans += len(find) - bisect_left(find, wage)
answer.append(ans)
return answer | [
"thkthk97@naver.com"
] | thkthk97@naver.com |
e56d912a217bad287bd62093c917e149efa27d9f | 0eb5c5a8324200affb0ddc076c1115e802415595 | /美团/hotel.py | a1411ae57defe7477c453edb5ef83723534cceb3 | [] | no_license | jiangsy163/pythonProject | 5b7986fb5e89943fc949301c22d03e97bc34b41d | b27f0a5a09ca36063fb45d61ca6ebd06a494ea67 | refs/heads/master | 2023-05-12T03:42:45.885487 | 2021-06-04T09:21:53 | 2021-06-04T09:21:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,619 | py | """
author:张鑫
date:2021/5/12 14:30
"""
import json
import random
import time
import pymongo
import requests
# import pymongo
import schedule
from fake_useragent import UserAgent
headers = {
"User-Agent": UserAgent().random,
}
#
# hotel_list = [
# {
#
# 'poiId': 75548,
# 'X-FOR-WITH': 'x/9q1QUUZKFfU41iyjPEDA9Q736VmZB1I2licJlwVvgPNZCEFEbMOWPgXOYEjdFKxbFmzsu2EVAbYGkE9+c6bH6EWcT9mwgMMhg2iTlnV5U7JyMoxP16wPhPAubMTDjyQCSm8GBiNSrBmMoIPS82mg8kub4PH/us9AoxkkQ0rxipGaYnMHt9+eoJm9FtuJgRETJdRz1xYOAQQovsST5YXQ==',
# },
# {
#
# 'poiId': 193123202,
# 'X-FOR-WITH': 'x/9q1QUUZKFfU41iyjPEDA9Q736VmZB1I2licJlwVvi5ijmy54xl/rRWS+a/2Tm/P5SWrOrXbGsX2iUv7eGoHFmW3F5MroqTmt0scBcgyz9UWELNzLKSGgBgw6cp3qt3XvGX+c2v9urYn8DwF5L5blhoHyMiM6lOy3F2AvaGBG/U+GxOYbSlBp0jLGyp40g+kYGoIxQT5K5uGrrtfHxTTw=='
# },
# {
#
# 'poiId': 41273222,
# # 'X-FOR-WITH':''
# 'X-FOR-WITH': 'x/9q1QUUZKFfU41iyjPEDA9Q736VmZB1I2licJlwVvg7FQnnwBWei9LkGvzfvS/yo3GN6Zp9UIclMGy0wA3/RpoGmjojIee41QCP6+a9bT7EZa83iyqQE2egXaU8i1H6Uq3sGZDN9ZpbuUQYevscAbmweNGBpXgpxpxAOaCE0hqJHJTm/8pZyBE3nVblPGlpRvOyamNBe8GvS3MTgMwQjA=='
# },
# # {
# # 'poiId':,
# # 'X-FOR-WITH':''
# # },
# {
# 'poiId': 877592431,
# 'X-FOR-WITH': 'x/9q1QUUZKFfU41iyjPEDA9Q736VmZB1I2licJlwVvhKm/a19UcToa6iTxYbyCeesRRlqWw+aMLIERhku9PCegh7szcuW9pbkZ2an2pEIYgWJtRuk9V3IX1x6z2ZGh4PFHLg28wMqxNQfVZt31bsxqv7vuoGV5fY9iglF1SmZsoKqGanVPieVq/jvQnDUtnIbHRXFcgpcZkWWGtnVSsOzg==' },
# ]
# database = pymongo.MongoClient('localhost',port=27017)
# db = database['meituan']
# hotels = db['hotels']
# for i in hotel_list:
#
# time.sleep(random.randint(1, 5))
# hotels_list = {}
# url = f'https://ihotel.meituan.com/group/v2/poi/detail/service?utm_medium=pc&version_name=999.9&poiId={i["poiId"]}&X-FOR-WITH={i["X-FOR-WITH"]}'
#
# html = requests.get(url=url, headers=headers, verify=False).json()['data']
#
# # 酒店服务
# service = html['serviceIconsInfo']['serviceIcons']
# for item1 in service:
# attrDesc = item1['attrDesc']
# hotels_list['attrDesc']=attrDesc
# imgUrl = item1['imgUrl']
# hotels_list['imgUrl'] = imgUrl
# # print(f"酒店服务:{item1['attrDesc']},服务链接:{item1['imgUrl']}")
# # 酒店介绍
# introduce = html['hotelIntroInfo']
# for item2 in introduce['poiExtendsInfos']:
# attrDesc = item2['attrDesc']
# attrValue = item2['attrValue']
# hotels_list['attrDesc'] = attrDesc
# hotels_list['attrValue'] = attrValue
# # print(f"酒店介绍:{item2['attrDesc']},详情:{item2['attrValue']}")
#
# for item3 in introduce['poiExtendsInfosDesc']:
# hotels_list['item3']=item3
#
# # print(f"酒店开业时间:{item3}")
#
# # print(f"酒店内部介绍:{introduce['poiDesc']}")
# poiDesc = introduce['poiDesc']
# hotels_list['poiDesc'] = poiDesc
#
# # 设施服务
# Facilities = html['hotelFacilitiesRuleInfo']['hotelFacilitiesInfo']['serviceIconsInfo']['serviceIcons']
#
# for item5 in Facilities:
# attrDesc = item5['attrDesc']
# hotels_list['attrDesc'] = attrDesc
# imgUrl = item5['imgUrl']
# hotels_list['imgUrl'] = imgUrl
# # print(f"设施服务:{item5['attrDesc']},详情:{item5['imgUrl']}")
# # 订房必读
# read = html['hotelFacilitiesRuleInfo']['hotelRuleInfo']['checkInAndOutList']
# for item6 in read:
# attrRuleName = item6['attrRuleName']
# hotels_list['attrRuleName'] = attrRuleName
# attrValueDesc = item6['attrValueDesc']
# hotels_list['attrValueDesc'] = attrValueDesc
# # print(f"必读项:{item6['attrRuleName']},详情:{item6['attrValueDesc']}")
#
# # 订房必读2
# read2 = html['hotelFacilitiesRuleInfo']['roomBookingModelV2']['dataCellModels']
# for item7 in read2:
# title = item7['title']
# hotels_list['title'] = title
# contents = item7['contents']
# hotels_list['contents'] = contents
# # print(f"标题:{item7['title']},链接:{item7['icon']},内容:{item7['contents']}")
#
# # 交通信息
# traffic = html['trafficIntroInfo']
# # 地铁
# subway = traffic['subwayStations']
# for item9 in subway:
# print(f"地铁站:{item9['name']}")
# print(f"距离信息:{item9['distanceDesc']}")
# print(f"直线距离:{item9['distance']}")
# print(f"经度:{item9['longitude']}")
# print(f"维度:{item9['latitude']}")
# # 火车
# train = traffic['railwayStations']
# for item10 in train:
# print(f"火车站:{item10['name']}")
# print(f"距离信息:{item10['distanceDesc']}")
# print(f"直线距离:{item10['distance']}")
# print(f"经度:{item10['longitude']}")
# print(f"维度:{item10['latitude']}")
# latitude = item10['latitude']
# hotels_list['latitude'] = latitude
# # 机场
# train = traffic['airports']
# for item11 in train:
# print(f"机场:{item11['name']}")
# print(f"距离信息:{item11['distanceDesc']}")
# print(f"直线距离:{item11['distance']}")
# print(f"经度:{item11['longitude']}")
# print(f"维度:{item11['latitude']}")
# # 位置
# addr = traffic['addr']
# print(f'位置:{addr}')
# # 推荐理由
# season = html['multiplePoiFeature']
# print(f'酒店名字:{season["poiName"]}')
# print(f'酒店类型:{season["hotelStar"]}')
# print(f'类型介绍:{season["starExplain"]}')
# poiName = season["poiName"]
# hotels_list['poiName'] = poiName
# starExplain = season["starExplain"]
# hotels_list['starExplain'] = starExplain
# hotels.insert(hotels_list)
# print(hotels_list)
class Hotel:
def __init__(self):
self.hotels_list = {}
self.hotel_list = [
{
'poiId': 75548,
'X-FOR-WITH': 'x/9q1QUUZKFfU41iyjPEDA9Q736VmZB1I2licJlwVvgPNZCEFEbMOWPgXOYEjdFKxbFmzsu2EVAbYGkE9+c6bH6EWcT9mwgMMhg2iTlnV5U7JyMoxP16wPhPAubMTDjyQCSm8GBiNSrBmMoIPS82mg8kub4PH/us9AoxkkQ0rxipGaYnMHt9+eoJm9FtuJgRETJdRz1xYOAQQovsST5YXQ==',
},
{
'poiId': 193123202,
'X-FOR-WITH': 'x/9q1QUUZKFfU41iyjPEDA9Q736VmZB1I2licJlwVvi5ijmy54xl/rRWS+a/2Tm/P5SWrOrXbGsX2iUv7eGoHFmW3F5MroqTmt0scBcgyz9UWELNzLKSGgBgw6cp3qt3XvGX+c2v9urYn8DwF5L5blhoHyMiM6lOy3F2AvaGBG/U+GxOYbSlBp0jLGyp40g+kYGoIxQT5K5uGrrtfHxTTw=='
},
{
'poiId': 41273222,
# 'X-FOR-WITH':''
'X-FOR-WITH': 'x/9q1QUUZKFfU41iyjPEDA9Q736VmZB1I2licJlwVvg7FQnnwBWei9LkGvzfvS/yo3GN6Zp9UIclMGy0wA3/RpoGmjojIee41QCP6+a9bT7EZa83iyqQE2egXaU8i1H6Uq3sGZDN9ZpbuUQYevscAbmweNGBpXgpxpxAOaCE0hqJHJTm/8pZyBE3nVblPGlpRvOyamNBe8GvS3MTgMwQjA=='
},
{
'poiId': 877592431,
'X-FOR-WITH': 'x/9q1QUUZKFfU41iyjPEDA9Q736VmZB1I2licJlwVvhKm/a19UcToa6iTxYbyCeesRRlqWw+aMLIERhku9PCegh7szcuW9pbkZ2an2pEIYgWJtRuk9V3IX1x6z2ZGh4PFHLg28wMqxNQfVZt31bsxqv7vuoGV5fY9iglF1SmZsoKqGanVPieVq/jvQnDUtnIbHRXFcgpcZkWWGtnVSsOzg=='},
]
def connect_mongo(self):
self.database = pymongo.MongoClient('localhost', port=27017)
self.db = self.database['meituan']
self.hotels = self.db['hotels']
return self.hotels
def get_html(self):
hotel_list = self.hotel_list
for i in hotel_list:
time.sleep(random.randint(1, 5))
url = f'https://ihotel.meituan.com/group/v2/poi/detail/service?utm_medium=pc&version_name=999.9&poiId={i["poiId"]}&X-FOR-WITH={i["X-FOR-WITH"]}'
self.task(url)
def task(self, url):
html = requests.get(url=url, headers=headers, verify=False).json()['data']
hotels_list = self.hotels_list
hotels = self.connect_mongo()
return self.analy_html(html, hotels_list, hotels)
# print(self.analy_html(html, hotels_list))
def analy_html(self, html, hotels_list, hotels):
# 酒店服务
service = html['serviceIconsInfo']['serviceIcons']
for item1 in service:
attrDesc = item1['attrDesc']
self.hotels_list['attrDesc'] = attrDesc
imgUrl = item1['imgUrl']
self.hotels_list['imgUrl'] = imgUrl
print(f"酒店服务:{item1['attrDesc']},服务链接:{item1['imgUrl']}")
# 酒店介绍
introduce = html['hotelIntroInfo']
for item2 in introduce['poiExtendsInfos']:
attrDesc = item2['attrDesc']
attrValue = item2['attrValue']
hotels_list['attrDesc'] = attrDesc
hotels_list['attrValue'] = attrValue
print(f"酒店介绍:{item2['attrDesc']},详情:{item2['attrValue']}")
for item3 in introduce['poiExtendsInfosDesc']:
hotels_list['item3'] = item3
print(f"酒店开业时间:{item3}")
# print(f"酒店内部介绍:{introduce['poiDesc']}")
poiDesc = introduce['poiDesc']
hotels_list['poiDesc'] = poiDesc
# 设施服务
Facilities = html['hotelFacilitiesRuleInfo']['hotelFacilitiesInfo']['serviceIconsInfo']['serviceIcons']
for item5 in Facilities:
attrDesc = item5['attrDesc']
hotels_list['attrDesc'] = attrDesc
imgUrl = item5['imgUrl']
hotels_list['imgUrl'] = imgUrl
print(f"设施服务:{item5['attrDesc']},详情:{item5['imgUrl']}")
# 订房必读
read = html['hotelFacilitiesRuleInfo']['hotelRuleInfo']['checkInAndOutList']
for item6 in read:
attrRuleName = item6['attrRuleName']
hotels_list['attrRuleName'] = attrRuleName
attrValueDesc = item6['attrValueDesc']
hotels_list['attrValueDesc'] = attrValueDesc
print(f"必读项:{item6['attrRuleName']},详情:{item6['attrValueDesc']}")
# 订房必读2
read2 = html['hotelFacilitiesRuleInfo']['roomBookingModelV2']['dataCellModels']
for item7 in read2:
title = item7['title']
hotels_list['title'] = title
contents = item7['contents']
hotels_list['contents'] = contents
print(f"标题:{item7['title']},链接:{item7['icon']},内容:{item7['contents']}")
# 交通信息
traffic = html['trafficIntroInfo']
# 地铁
subway = traffic['subwayStations']
for item9 in subway:
print(f"地铁站:{item9['name']}")
print(f"距离信息:{item9['distanceDesc']}")
print(f"直线距离:{item9['distance']}")
print(f"经度:{item9['longitude']}")
print(f"维度:{item9['latitude']}")
# 火车
train = traffic['railwayStations']
for item10 in train:
print(f"火车站:{item10['name']}")
print(f"距离信息:{item10['distanceDesc']}")
print(f"直线距离:{item10['distance']}")
print(f"经度:{item10['longitude']}")
print(f"维度:{item10['latitude']}")
latitude = item10['latitude']
hotels_list['latitude'] = latitude
# 机场
train = traffic['airports']
for item11 in train:
print(f"机场:{item11['name']}")
print(f"距离信息:{item11['distanceDesc']}")
print(f"直线距离:{item11['distance']}")
print(f"经度:{item11['longitude']}")
print(f"维度:{item11['latitude']}")
# 位置
addr = traffic['addr']
print(f'位置:{addr}')
# 推荐理由
season = html['multiplePoiFeature']
print(f'酒店名字:{season["poiName"]}')
print(f'酒店类型:{season["hotelStar"]}')
print(f'类型介绍:{season["starExplain"]}')
poiName = season["poiName"]
hotels_list['poiName'] = poiName
starExplain = season["starExplain"]
hotels_list['starExplain'] = starExplain
poiName = season["poiName"]
if hotels['poiName'] == poiName:
hotels.insert(hotels_list)
print(hotels_list)
else:
print('数据已存在')
if __name__ == '__main__':
while 1:
hotel = Hotel()
hotel.get_html()
schedule.every(5).seconds.do(hotel.task)
schedule.run_pending()
time.sleep(10)
| [
"1247371788@qq.com"
] | 1247371788@qq.com |
2f6fbbe1821af45566ad757bea8809b394035fa9 | c7a6f8ed434c86b4cdae9c6144b9dd557e594f78 | /ECE364/.PyCharm40/system/python_stubs/348993582/nss/nss/__init__.py | 45c9d3f59626510337f870f1e5999041539a6bb3 | [] | no_license | ArbalestV/Purdue-Coursework | 75d979bbe72106975812b1d46b7d854e16e8e15e | ee7f86145edb41c17aefcd442fa42353a9e1b5d1 | refs/heads/master | 2020-08-29T05:27:52.342264 | 2018-04-03T17:59:01 | 2018-04-03T17:59:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85,627 | py | # encoding: utf-8
# module nss.nss
# from /usr/lib64/python2.6/site-packages/nss/nss.so
# by generator 1.136
""" This module implements the NSS functions """
# no imports
# Variables with simple values
AsDottedDecimal = 9
AsEnum = 5
AsEnumDescription = 7
AsEnumName = 6
AsIndex = 8
AsLabeledString = 4
AsObject = 0
AsString = 1
AsTypeEnum = 3
AsTypeString = 2
certDirectoryName = 5
certDNSName = 3
certEDIPartyName = 6
certificateUsageAnyCA = 2048
certificateUsageCheckAllUsages = 0
certificateUsageEmailRecipient = 32
certificateUsageEmailSigner = 16
certificateUsageObjectSigner = 64
certificateUsageProtectedObjectSigner = 512
certificateUsageSSLCA = 8
certificateUsageSSLClient = 1
certificateUsageSSLServer = 2
certificateUsageSSLServerWithStepUp = 4
certificateUsageStatusResponder = 1024
certificateUsageUserCertImport = 128
certificateUsageVerifyCA = 256
certIPAddress = 8
certOtherName = 1
certRegisterID = 9
certRFC822Name = 2
certURI = 7
certX400Address = 4
CKA_AC_ISSUER = 131
CKA_ALLOWED_MECHANISMS = 1073743360
CKA_ALWAYS_AUTHENTICATE = 514
CKA_ALWAYS_SENSITIVE = 357
CKA_APPLICATION = 16
CKA_ATTR_TYPES = 133
CKA_AUTH_PIN_FLAGS = 513
CKA_BASE = 306
CKA_BITS_PER_PIXEL = 1030
CKA_CERTIFICATE_CATEGORY = 135
CKA_CERTIFICATE_TYPE = 128
CKA_CHAR_COLUMNS = 1028
CKA_CHAR_ROWS = 1027
CKA_CHAR_SETS = 1152
CKA_CHECK_VALUE = 144
CKA_CLASS = 0
CKA_COEFFICIENT = 296
CKA_COLOR = 1029
CKA_DECRYPT = 261
CKA_DEFAULT_CMS_ATTRIBUTES = 1282
CKA_DERIVE = 268
CKA_ECDSA_PARAMS = 384
CKA_EC_PARAMS = 384
CKA_EC_POINT = 385
CKA_ENCODING_METHODS = 1153
CKA_ENCRYPT = 260
CKA_END_DATE = 273
CKA_EXPONENT_1 = 294
CKA_EXPONENT_2 = 295
CKA_EXTRACTABLE = 354
CKA_HASH_OF_ISSUER_PUBLIC_KEY = 139
CKA_HASH_OF_SUBJECT_PUBLIC_KEY = 138
CKA_HAS_RESET = 770
CKA_HW_FEATURE_TYPE = 768
CKA_ID = 258
CKA_ISSUER = 129
CKA_JAVA_MIDP_SECURITY_DOMAIN = 136
CKA_KEY_GEN_MECHANISM = 358
CKA_KEY_TYPE = 256
CKA_LABEL = 3
CKA_LOCAL = 355
CKA_MECHANISM_TYPE = 1280
CKA_MIME_TYPES = 1154
CKA_MODIFIABLE = 368
CKA_MODULUS = 288
CKA_MODULUS_BITS = 289
CKA_NEVER_EXTRACTABLE = 356
CKA_OBJECT_ID = 18
CKA_OWNER = 132
CKA_PIXEL_X = 1024
CKA_PIXEL_Y = 1025
CKA_PRIME = 304
CKA_PRIME_1 = 292
CKA_PRIME_2 = 293
CKA_PRIME_BITS = 307
CKA_PRIVATE = 2
CKA_PRIVATE_EXPONENT = 291
CKA_PUBLIC_EXPONENT = 290
CKA_REQUIRED_CMS_ATTRIBUTES = 1281
CKA_RESET_ON_INIT = 769
CKA_RESOLUTION = 1026
CKA_SECONDARY_AUTH = 512
CKA_SENSITIVE = 259
CKA_SERIAL_NUMBER = 130
CKA_SIGN = 264
CKA_SIGN_RECOVER = 265
CKA_START_DATE = 272
CKA_SUBJECT = 257
CKA_SUBPRIME = 305
CKA_SUBPRIME_BITS = 308
CKA_SUB_PRIME_BITS = 308
CKA_SUPPORTED_CMS_ATTRIBUTES = 1283
CKA_TOKEN = 1
CKA_TRUSTED = 134
CKA_UNWRAP = 263
CKA_UNWRAP_TEMPLATE = 1073742354
CKA_URL = 137
CKA_VALUE = 17
CKA_VALUE_BITS = 352
CKA_VALUE_LEN = 353
CKA_VENDOR_DEFINED = 2147483648
CKA_VERIFY = 266
CKA_VERIFY_RECOVER = 267
CKA_WRAP = 262
CKA_WRAP_TEMPLATE = 1073742353
CKA_WRAP_WITH_TRUSTED = 528
CKM_AES_CBC = 4226
CKM_AES_CBC_ENCRYPT_DATA = 4357
CKM_AES_CBC_PAD = 4229
CKM_AES_ECB = 4225
CKM_AES_ECB_ENCRYPT_DATA = 4356
CKM_AES_KEY_GEN = 4224
CKM_AES_MAC = 4227
CKM_AES_MAC_GENERAL = 4228
CKM_BATON_CBC128 = 4147
CKM_BATON_COUNTER = 4148
CKM_BATON_ECB128 = 4145
CKM_BATON_ECB96 = 4146
CKM_BATON_KEY_GEN = 4144
CKM_BATON_SHUFFLE = 4149
CKM_BATON_WRAP = 4150
CKM_BLOWFISH_CBC = 4241
CKM_BLOWFISH_KEY_GEN = 4240
CKM_CAMELLIA_CBC = 1362
CKM_CAMELLIA_CBC_ENCRYPT_DATA = 1367
CKM_CAMELLIA_CBC_PAD = 1365
CKM_CAMELLIA_ECB = 1361
CKM_CAMELLIA_ECB_ENCRYPT_DATA = 1366
CKM_CAMELLIA_KEY_GEN = 1360
CKM_CAMELLIA_MAC = 1363
CKM_CAMELLIA_MAC_GENERAL = 1364
CKM_CAST128_CBC = 802
CKM_CAST128_CBC_PAD = 805
CKM_CAST128_ECB = 801
CKM_CAST128_KEY_GEN = 800
CKM_CAST128_MAC = 803
CKM_CAST128_MAC_GENERAL = 804
CKM_CAST3_CBC = 786
CKM_CAST3_CBC_PAD = 789
CKM_CAST3_ECB = 785
CKM_CAST3_KEY_GEN = 784
CKM_CAST3_MAC = 787
CKM_CAST3_MAC_GENERAL = 788
CKM_CAST5_CBC = 802
CKM_CAST5_CBC_PAD = 805
CKM_CAST5_ECB = 801
CKM_CAST5_KEY_GEN = 800
CKM_CAST5_MAC = 803
CKM_CAST5_MAC_GENERAL = 804
CKM_CAST_CBC = 770
CKM_CAST_CBC_PAD = 773
CKM_CAST_ECB = 769
CKM_CAST_KEY_GEN = 768
CKM_CAST_MAC = 771
CKM_CAST_MAC_GENERAL = 772
CKM_CDMF_CBC = 322
CKM_CDMF_CBC_PAD = 325
CKM_CDMF_ECB = 321
CKM_CDMF_KEY_GEN = 320
CKM_CDMF_MAC = 323
CKM_CDMF_MAC_GENERAL = 324
CKM_CMS_SIG = 1280
CKM_CONCATENATE_BASE_AND_DATA = 866
CKM_CONCATENATE_BASE_AND_KEY = 864
CKM_CONCATENATE_DATA_AND_BASE = 867
CKM_DES2_KEY_GEN = 304
CKM_DES3_CBC = 307
CKM_DES3_CBC_ENCRYPT_DATA = 4355
CKM_DES3_CBC_PAD = 310
CKM_DES3_ECB = 306
CKM_DES3_ECB_ENCRYPT_DATA = 4354
CKM_DES3_KEY_GEN = 305
CKM_DES3_MAC = 308
CKM_DES3_MAC_GENERAL = 309
CKM_DES_CBC = 290
CKM_DES_CBC_ENCRYPT_DATA = 4353
CKM_DES_CBC_PAD = 293
CKM_DES_CFB64 = 338
CKM_DES_CFB8 = 339
CKM_DES_ECB = 289
CKM_DES_ECB_ENCRYPT_DATA = 4352
CKM_DES_KEY_GEN = 288
CKM_DES_MAC = 291
CKM_DES_MAC_GENERAL = 292
CKM_DES_OFB64 = 336
CKM_DES_OFB8 = 337
CKM_DH_PKCS_DERIVE = 33
CKM_DH_PKCS_KEY_PAIR_GEN = 32
CKM_DH_PKCS_PARAMETER_GEN = 8193
CKM_DSA = 17
CKM_DSA_KEY_PAIR_GEN = 16
CKM_DSA_PARAMETER_GEN = 8192
CKM_DSA_SHA1 = 18
CKM_ECDH1_COFACTOR_DERIVE = 4177
CKM_ECDH1_DERIVE = 4176
CKM_ECDSA = 4161
CKM_ECDSA_KEY_PAIR_GEN = 4160
CKM_ECDSA_SHA1 = 4162
CKM_ECMQV_DERIVE = 4178
CKM_EC_KEY_PAIR_GEN = 4160
CKM_EXTRACT_KEY_FROM_KEY = 869
CKM_FASTHASH = 4208
CKM_FORTEZZA_TIMESTAMP = 4128
CKM_GENERIC_SECRET_KEY_GEN = 848
CKM_IDEA_CBC = 834
CKM_IDEA_CBC_PAD = 837
CKM_IDEA_ECB = 833
CKM_IDEA_KEY_GEN = 832
CKM_IDEA_MAC = 835
CKM_IDEA_MAC_GENERAL = 836
CKM_JUNIPER_CBC128 = 4194
CKM_JUNIPER_COUNTER = 4195
CKM_JUNIPER_ECB128 = 4193
CKM_JUNIPER_KEY_GEN = 4192
CKM_JUNIPER_SHUFFLE = 4196
CKM_JUNIPER_WRAP = 4197
CKM_KEA_KEY_DERIVE = 4113
CKM_KEA_KEY_PAIR_GEN = 4112
CKM_KEY_WRAP_LYNKS = 1024
CKM_KEY_WRAP_SET_OAEP = 1025
CKM_MD2 = 512
CKM_MD2_HMAC = 513
CKM_MD2_HMAC_GENERAL = 514
CKM_MD2_KEY_DERIVATION = 913
CKM_MD2_RSA_PKCS = 4
CKM_MD5 = 528
CKM_MD5_HMAC = 529
CKM_MD5_HMAC_GENERAL = 530
CKM_MD5_KEY_DERIVATION = 912
CKM_MD5_RSA_PKCS = 5
CKM_PBA_SHA1_WITH_SHA1_HMAC = 960
CKM_PBE_MD2_DES_CBC = 928
CKM_PBE_MD5_CAST128_CBC = 932
CKM_PBE_MD5_CAST3_CBC = 931
CKM_PBE_MD5_CAST5_CBC = 932
CKM_PBE_MD5_CAST_CBC = 930
CKM_PBE_MD5_DES_CBC = 929
CKM_PBE_SHA1_CAST128_CBC = 933
CKM_PBE_SHA1_CAST5_CBC = 933
CKM_PBE_SHA1_DES2_EDE_CBC = 937
CKM_PBE_SHA1_DES3_EDE_CBC = 936
CKM_PBE_SHA1_RC2_128_CBC = 938
CKM_PBE_SHA1_RC2_40_CBC = 939
CKM_PBE_SHA1_RC4_128 = 934
CKM_PBE_SHA1_RC4_40 = 935
CKM_PKCS5_PBKD2 = 944
CKM_RC2_CBC = 258
CKM_RC2_CBC_PAD = 261
CKM_RC2_ECB = 257
CKM_RC2_KEY_GEN = 256
CKM_RC2_MAC = 259
CKM_RC2_MAC_GENERAL = 260
CKM_RC4 = 273
CKM_RC4_KEY_GEN = 272
CKM_RC5_CBC = 818
CKM_RC5_CBC_PAD = 821
CKM_RC5_ECB = 817
CKM_RC5_KEY_GEN = 816
CKM_RC5_MAC = 819
CKM_RC5_MAC_GENERAL = 820
CKM_RIPEMD128 = 560
CKM_RIPEMD128_HMAC = 561
CKM_RIPEMD128_HMAC_GENERAL = 562
CKM_RIPEMD128_RSA_PKCS = 7
CKM_RIPEMD160 = 576
CKM_RIPEMD160_HMAC = 577
CKM_RIPEMD160_HMAC_GENERAL = 578
CKM_RIPEMD160_RSA_PKCS = 8
CKM_RSA_9796 = 2
CKM_RSA_PKCS = 1
CKM_RSA_PKCS_KEY_PAIR_GEN = 0
CKM_RSA_PKCS_OAEP = 9
CKM_RSA_PKCS_PSS = 13
CKM_RSA_X9_31 = 11
CKM_RSA_X9_31_KEY_PAIR_GEN = 10
CKM_RSA_X_509 = 3
CKM_SEED_CBC = 1618
CKM_SEED_CBC_ENCRYPT_DATA = 1623
CKM_SEED_CBC_PAD = 1621
CKM_SEED_ECB = 1617
CKM_SEED_ECB_ENCRYPT_DATA = 1622
CKM_SEED_KEY_GEN = 1616
CKM_SEED_MAC = 1619
CKM_SEED_MAC_GENERAL = 1620
CKM_SHA1_KEY_DERIVATION = 914
CKM_SHA1_RSA_PKCS = 6
CKM_SHA1_RSA_PKCS_PSS = 14
CKM_SHA1_RSA_X9_31 = 12
CKM_SHA224 = 597
CKM_SHA224_HMAC = 598
CKM_SHA224_HMAC_GENERAL = 599
CKM_SHA224_KEY_DERIVATION = 918
CKM_SHA224_RSA_PKCS = 70
CKM_SHA224_RSA_PKCS_PSS = 71
CKM_SHA256 = 592
CKM_SHA256_HMAC = 593
CKM_SHA256_HMAC_GENERAL = 594
CKM_SHA256_KEY_DERIVATION = 915
CKM_SHA256_RSA_PKCS = 64
CKM_SHA256_RSA_PKCS_PSS = 67
CKM_SHA384 = 608
CKM_SHA384_HMAC = 609
CKM_SHA384_HMAC_GENERAL = 610
CKM_SHA384_KEY_DERIVATION = 916
CKM_SHA384_RSA_PKCS = 65
CKM_SHA384_RSA_PKCS_PSS = 68
CKM_SHA512 = 624
CKM_SHA512_HMAC = 625
CKM_SHA512_HMAC_GENERAL = 626
CKM_SHA512_KEY_DERIVATION = 917
CKM_SHA512_RSA_PKCS = 66
CKM_SHA512_RSA_PKCS_PSS = 69
CKM_SHA_1 = 544
CKM_SHA_1_HMAC = 545
CKM_SHA_1_HMAC_GENERAL = 546
CKM_SKIPJACK_CBC64 = 4098
CKM_SKIPJACK_CFB16 = 4102
CKM_SKIPJACK_CFB32 = 4101
CKM_SKIPJACK_CFB64 = 4100
CKM_SKIPJACK_CFB8 = 4103
CKM_SKIPJACK_ECB64 = 4097
CKM_SKIPJACK_KEY_GEN = 4096
CKM_SKIPJACK_OFB64 = 4099
CKM_SKIPJACK_PRIVATE_WRAP = 4105
CKM_SKIPJACK_RELAYX = 4106
CKM_SKIPJACK_WRAP = 4104
CKM_SSL3_KEY_AND_MAC_DERIVE = 882
CKM_SSL3_MASTER_KEY_DERIVE = 881
CKM_SSL3_MASTER_KEY_DERIVE_DH = 883
CKM_SSL3_MD5_MAC = 896
CKM_SSL3_PRE_MASTER_KEY_GEN = 880
CKM_SSL3_SHA1_MAC = 897
CKM_TLS_KEY_AND_MAC_DERIVE = 886
CKM_TLS_MASTER_KEY_DERIVE = 885
CKM_TLS_MASTER_KEY_DERIVE_DH = 887
CKM_TLS_PRE_MASTER_KEY_GEN = 884
CKM_TLS_PRF = 888
CKM_TWOFISH_CBC = 4243
CKM_TWOFISH_KEY_GEN = 4242
CKM_WTLS_CLIENT_KEY_AND_MAC_DERIVE = 981
CKM_WTLS_MASTER_KEY_DERIVE = 977
CKM_WTLS_MASTER_KEY_DERIVE_DH_ECC = 978
CKM_WTLS_PRE_MASTER_KEY_GEN = 976
CKM_WTLS_PRF = 979
CKM_WTLS_SERVER_KEY_AND_MAC_DERIVE = 980
CKM_X9_42_DH_DERIVE = 49
CKM_X9_42_DH_HYBRID_DERIVE = 50
CKM_X9_42_DH_KEY_PAIR_GEN = 48
CKM_X9_42_DH_PARAMETER_GEN = 8194
CKM_X9_42_MQV_DERIVE = 51
CKM_XOR_BASE_AND_DATA = 868
crlEntryReasonAaCompromise = 10
crlEntryReasonAffiliationChanged = 3
crlEntryReasonCaCompromise = 2
crlEntryReasoncertificatedHold = 6
crlEntryReasonCessationOfOperation = 5
crlEntryReasonKeyCompromise = 1
crlEntryReasonPrivilegeWithdrawn = 9
crlEntryReasonRemoveFromCRL = 8
crlEntryReasonSuperseded = 4
crlEntryReasonUnspecified = 0
CRL_DECODE_ADOPT_HEAP_DER = 8
CRL_DECODE_DEFAULT_OPTIONS = 0
CRL_DECODE_DONT_COPY_DER = 1
CRL_DECODE_KEEP_BAD_CRL = 4
CRL_DECODE_SKIP_ENTRIES = 2
CRL_IMPORT_BYPASS_CHECKS = 1
CRL_IMPORT_DEFAULT_OPTIONS = 0
dhKey = 4
dsaKey = 2
ecKey = 6
fortezzaKey = 3
generalName = 1
HEX_SEPARATOR_DEFAULT = ':'
keaKey = 5
NSS_INIT_COOPERATE = 960
NSS_INIT_FORCEOPEN = 8
NSS_INIT_NOCERTDB = 2
NSS_INIT_NOMODDB = 4
NSS_INIT_NOPK11FINALIZE = 256
NSS_INIT_NOROOTINIT = 16
NSS_INIT_OPTIMIZESPACE = 32
NSS_INIT_PK11RELOAD = 128
NSS_INIT_PK11THREADSAFE = 64
NSS_INIT_READONLY = 1
NSS_INIT_RESERVED = 512
nullKey = 0
OCTETS_PER_LINE_DEFAULT = 16
PK11_DIS_COULD_NOT_INIT_TOKEN = 2
PK11_DIS_NONE = 0
PK11_DIS_TOKEN_NOT_PRESENT = 4
PK11_DIS_TOKEN_VERIFY_FAILED = 3
PK11_DIS_USER_SELECTED = 1
PK11_OriginDerive = 1
PK11_OriginFortezzaHack = 3
PK11_OriginGenerated = 2
PK11_OriginNULL = 0
PK11_OriginUnwrap = 4
PKCS12_DES_56 = 131089
PKCS12_DES_EDE3_168 = 131090
PKCS12_RC2_CBC_128 = 131074
PKCS12_RC2_CBC_40 = 131073
PKCS12_RC4_128 = 131082
PKCS12_RC4_40 = 131081
relativeDistinguishedName = 2
rsaKey = 1
secCertTimeExpired = 1
secCertTimeNotValidYet = 2
secCertTimeValid = 0
SEC_CERT_NICKNAMES_ALL = 1
SEC_CERT_NICKNAMES_CA = 4
SEC_CERT_NICKNAMES_SERVER = 3
SEC_CERT_NICKNAMES_USER = 2
SEC_CRL_TYPE = 1
SEC_KRL_TYPE = 0
SEC_OID_AES_128_CBC = 184
SEC_OID_AES_128_ECB = 183
SEC_OID_AES_128_KEY_WRAP = 197
SEC_OID_AES_192_CBC = 186
SEC_OID_AES_192_ECB = 185
SEC_OID_AES_192_KEY_WRAP = 198
SEC_OID_AES_256_CBC = 188
SEC_OID_AES_256_ECB = 187
SEC_OID_AES_256_KEY_WRAP = 199
SEC_OID_ANSIX962_ECDSA_SHA1_SIGNATURE = 201
SEC_OID_ANSIX962_ECDSA_SHA224_SIGNATURE = 277
SEC_OID_ANSIX962_ECDSA_SHA256_SIGNATURE = 278
SEC_OID_ANSIX962_ECDSA_SHA384_SIGNATURE = 279
SEC_OID_ANSIX962_ECDSA_SHA512_SIGNATURE = 280
SEC_OID_ANSIX962_ECDSA_SIGNATURE_RECOMMENDED_DIGEST = 275
SEC_OID_ANSIX962_ECDSA_SIGNATURE_SPECIFIED_DIGEST = 276
SEC_OID_ANSIX962_ECDSA_SIGNATURE_WITH_SHA1_DIGEST = 201
SEC_OID_ANSIX962_EC_C2ONB191V4 = 229
SEC_OID_ANSIX962_EC_C2ONB191V5 = 230
SEC_OID_ANSIX962_EC_C2ONB239V4 = 235
SEC_OID_ANSIX962_EC_C2ONB239V5 = 236
SEC_OID_ANSIX962_EC_C2PNB163V1 = 222
SEC_OID_ANSIX962_EC_C2PNB163V2 = 223
SEC_OID_ANSIX962_EC_C2PNB163V3 = 224
SEC_OID_ANSIX962_EC_C2PNB176V1 = 225
SEC_OID_ANSIX962_EC_C2PNB208W1 = 231
SEC_OID_ANSIX962_EC_C2PNB272W1 = 237
SEC_OID_ANSIX962_EC_C2PNB304W1 = 238
SEC_OID_ANSIX962_EC_C2PNB368W1 = 240
SEC_OID_ANSIX962_EC_C2TNB191V1 = 226
SEC_OID_ANSIX962_EC_C2TNB191V2 = 227
SEC_OID_ANSIX962_EC_C2TNB191V3 = 228
SEC_OID_ANSIX962_EC_C2TNB239V1 = 232
SEC_OID_ANSIX962_EC_C2TNB239V2 = 233
SEC_OID_ANSIX962_EC_C2TNB239V3 = 234
SEC_OID_ANSIX962_EC_C2TNB359V1 = 239
SEC_OID_ANSIX962_EC_C2TNB431R1 = 241
SEC_OID_ANSIX962_EC_PRIME192V1 = 202
SEC_OID_ANSIX962_EC_PRIME192V2 = 203
SEC_OID_ANSIX962_EC_PRIME192V3 = 204
SEC_OID_ANSIX962_EC_PRIME239V1 = 205
SEC_OID_ANSIX962_EC_PRIME239V2 = 206
SEC_OID_ANSIX962_EC_PRIME239V3 = 207
SEC_OID_ANSIX962_EC_PRIME256V1 = 208
SEC_OID_ANSIX962_EC_PUBLIC_KEY = 200
SEC_OID_ANSIX9_DSA_SIGNATURE = 124
SEC_OID_ANSIX9_DSA_SIGNATURE_WITH_SHA1_DIGEST = 125
SEC_OID_AVA_COMMON_NAME = 41
SEC_OID_AVA_COUNTRY_NAME = 42
SEC_OID_AVA_DC = 48
SEC_OID_AVA_DN_QUALIFIER = 47
SEC_OID_AVA_GENERATION_QUALIFIER = 270
SEC_OID_AVA_GIVEN_NAME = 268
SEC_OID_AVA_HOUSE_IDENTIFIER = 271
SEC_OID_AVA_INITIALS = 269
SEC_OID_AVA_LOCALITY = 43
SEC_OID_AVA_ORGANIZATIONAL_UNIT_NAME = 46
SEC_OID_AVA_ORGANIZATION_NAME = 45
SEC_OID_AVA_POSTAL_ADDRESS = 265
SEC_OID_AVA_POSTAL_CODE = 266
SEC_OID_AVA_POST_OFFICE_BOX = 267
SEC_OID_AVA_PSEUDONYM = 272
SEC_OID_AVA_SERIAL_NUMBER = 262
SEC_OID_AVA_STATE_OR_PROVINCE = 44
SEC_OID_AVA_STREET_ADDRESS = 263
SEC_OID_AVA_SURNAME = 261
SEC_OID_AVA_TITLE = 264
SEC_OID_BOGUS_DSA_SIGNATURE_WITH_SHA1_DIGEST = 126
SEC_OID_BOGUS_KEY_USAGE = 173
SEC_OID_CAMELLIA_128_CBC = 288
SEC_OID_CAMELLIA_192_CBC = 289
SEC_OID_CAMELLIA_256_CBC = 290
SEC_OID_CERT_RENEWAL_LOCATOR = 177
SEC_OID_CMS_3DES_KEY_WRAP = 180
SEC_OID_CMS_EPHEMERAL_STATIC_DIFFIE_HELLMAN = 179
SEC_OID_CMS_RC2_KEY_WRAP = 181
SEC_OID_DES_CBC = 10
SEC_OID_DES_CFB = 12
SEC_OID_DES_ECB = 9
SEC_OID_DES_EDE = 14
SEC_OID_DES_EDE3_CBC = 7
SEC_OID_DES_MAC = 13
SEC_OID_DES_OFB = 11
SEC_OID_EXT_KEY_USAGE_CLIENT_AUTH = 147
SEC_OID_EXT_KEY_USAGE_CODE_SIGN = 148
SEC_OID_EXT_KEY_USAGE_EMAIL_PROTECT = 149
SEC_OID_EXT_KEY_USAGE_SERVER_AUTH = 146
SEC_OID_EXT_KEY_USAGE_TIME_STAMP = 150
SEC_OID_FORTEZZA_SKIPJACK = 153
SEC_OID_HMAC_SHA1 = 294
SEC_OID_HMAC_SHA224 = 295
SEC_OID_HMAC_SHA256 = 296
SEC_OID_HMAC_SHA384 = 297
SEC_OID_HMAC_SHA512 = 298
SEC_OID_ISO_SHA1_WITH_RSA_SIGNATURE = 301
SEC_OID_ISO_SHA_WITH_RSA_SIGNATURE = 15
SEC_OID_MD2 = 1
SEC_OID_MD4 = 2
SEC_OID_MD5 = 3
SEC_OID_MISSI_ALT_KEA = 59
SEC_OID_MISSI_DSS = 57
SEC_OID_MISSI_DSS_OLD = 55
SEC_OID_MISSI_KEA = 58
SEC_OID_MISSI_KEA_DSS = 56
SEC_OID_MISSI_KEA_DSS_OLD = 54
SEC_OID_MS_SMIME_ENCRYPTION_KEY_PREFERENCE = 190
SEC_OID_NETSCAPE_AOLSCREENNAME = 260
SEC_OID_NETSCAPE_NICKNAME = 175
SEC_OID_NETSCAPE_RECOVERY_REQUEST = 176
SEC_OID_NETSCAPE_SMIME_KEA = 152
SEC_OID_NS_CERT_EXT_BASE_URL = 64
SEC_OID_NS_CERT_EXT_CA_CERT_URL = 68
SEC_OID_NS_CERT_EXT_CA_CRL_URL = 67
SEC_OID_NS_CERT_EXT_CA_POLICY_URL = 70
SEC_OID_NS_CERT_EXT_CA_REVOCATION_URL = 66
SEC_OID_NS_CERT_EXT_CERT_RENEWAL_TIME = 77
SEC_OID_NS_CERT_EXT_CERT_RENEWAL_URL = 69
SEC_OID_NS_CERT_EXT_CERT_TYPE = 63
SEC_OID_NS_CERT_EXT_COMMENT = 75
SEC_OID_NS_CERT_EXT_ENTITY_LOGO = 72
SEC_OID_NS_CERT_EXT_HOMEPAGE_URL = 71
SEC_OID_NS_CERT_EXT_ISSUER_LOGO = 61
SEC_OID_NS_CERT_EXT_LOST_PASSWORD_URL = 76
SEC_OID_NS_CERT_EXT_NETSCAPE_OK = 60
SEC_OID_NS_CERT_EXT_REVOCATION_URL = 65
SEC_OID_NS_CERT_EXT_SCOPE_OF_USE = 178
SEC_OID_NS_CERT_EXT_SSL_SERVER_NAME = 74
SEC_OID_NS_CERT_EXT_SUBJECT_LOGO = 62
SEC_OID_NS_CERT_EXT_USER_PICTURE = 73
SEC_OID_NS_KEY_USAGE_GOVT_APPROVED = 78
SEC_OID_NS_TYPE_CERT_SEQUENCE = 53
SEC_OID_NS_TYPE_GIF = 49
SEC_OID_NS_TYPE_HTML = 52
SEC_OID_NS_TYPE_JPEG = 50
SEC_OID_NS_TYPE_URL = 51
SEC_OID_OCSP_RESPONDER = 151
SEC_OID_PKCS12 = 100
SEC_OID_PKCS12_BAG_IDS = 103
SEC_OID_PKCS12_CERT_AND_CRL_BAG_ID = 111
SEC_OID_PKCS12_CERT_BAG_IDS = 104
SEC_OID_PKCS12_ENVELOPING_IDS = 108
SEC_OID_PKCS12_ESPVK_IDS = 102
SEC_OID_PKCS12_KEY_BAG_ID = 110
SEC_OID_PKCS12_KEY_USAGE = 81
SEC_OID_PKCS12_MODE_IDS = 101
SEC_OID_PKCS12_OIDS = 105
SEC_OID_PKCS12_PBE_IDS = 106
SEC_OID_PKCS12_PBE_WITH_SHA1_AND_128_BIT_RC2_CBC = 118
SEC_OID_PKCS12_PBE_WITH_SHA1_AND_128_BIT_RC4 = 115
SEC_OID_PKCS12_PBE_WITH_SHA1_AND_40_BIT_RC2_CBC = 119
SEC_OID_PKCS12_PBE_WITH_SHA1_AND_40_BIT_RC4 = 116
SEC_OID_PKCS12_PBE_WITH_SHA1_AND_TRIPLE_DES_CBC = 117
SEC_OID_PKCS12_PKCS8_KEY_SHROUDING = 109
SEC_OID_PKCS12_PKCS8_SHROUDED_KEY_BAG_ID = 161
SEC_OID_PKCS12_RSA_ENCRYPTION_WITH_128_BIT_RC4 = 120
SEC_OID_PKCS12_RSA_ENCRYPTION_WITH_40_BIT_RC4 = 121
SEC_OID_PKCS12_RSA_ENCRYPTION_WITH_TRIPLE_DES = 122
SEC_OID_PKCS12_RSA_SIGNATURE_WITH_SHA1_DIGEST = 123
SEC_OID_PKCS12_SAFE_CONTENTS_ID = 160
SEC_OID_PKCS12_SDSI_CERT_BAG = 114
SEC_OID_PKCS12_SECRET_BAG_ID = 112
SEC_OID_PKCS12_SIGNATURE_IDS = 107
SEC_OID_PKCS12_V1_CERT_BAG_ID = 164
SEC_OID_PKCS12_V1_CRL_BAG_ID = 165
SEC_OID_PKCS12_V1_KEY_BAG_ID = 162
SEC_OID_PKCS12_V1_PKCS8_SHROUDED_KEY_BAG_ID = 163
SEC_OID_PKCS12_V1_SAFE_CONTENTS_BAG_ID = 167
SEC_OID_PKCS12_V1_SECRET_BAG_ID = 166
SEC_OID_PKCS12_V2_PBE_WITH_SHA1_AND_128_BIT_RC2_CBC = 158
SEC_OID_PKCS12_V2_PBE_WITH_SHA1_AND_128_BIT_RC4 = 154
SEC_OID_PKCS12_V2_PBE_WITH_SHA1_AND_2KEY_TRIPLE_DES_CBC = 157
SEC_OID_PKCS12_V2_PBE_WITH_SHA1_AND_3KEY_TRIPLE_DES_CBC = 156
SEC_OID_PKCS12_V2_PBE_WITH_SHA1_AND_40_BIT_RC2_CBC = 159
SEC_OID_PKCS12_V2_PBE_WITH_SHA1_AND_40_BIT_RC4 = 155
SEC_OID_PKCS12_X509_CERT_CRL_BAG = 113
SEC_OID_PKCS1_MD2_WITH_RSA_ENCRYPTION = 17
SEC_OID_PKCS1_MD4_WITH_RSA_ENCRYPTION = 18
SEC_OID_PKCS1_MD5_WITH_RSA_ENCRYPTION = 19
SEC_OID_PKCS1_RSA_ENCRYPTION = 16
SEC_OID_PKCS1_SHA1_WITH_RSA_ENCRYPTION = 20
SEC_OID_PKCS1_SHA256_WITH_RSA_ENCRYPTION = 194
SEC_OID_PKCS1_SHA384_WITH_RSA_ENCRYPTION = 195
SEC_OID_PKCS1_SHA512_WITH_RSA_ENCRYPTION = 196
SEC_OID_PKCS5_PBES2 = 292
SEC_OID_PKCS5_PBE_WITH_MD2_AND_DES_CBC = 21
SEC_OID_PKCS5_PBE_WITH_MD5_AND_DES_CBC = 22
SEC_OID_PKCS5_PBE_WITH_SHA1_AND_DES_CBC = 23
SEC_OID_PKCS5_PBKDF2 = 291
SEC_OID_PKCS5_PBMAC1 = 293
SEC_OID_PKCS7 = 24
SEC_OID_PKCS7_DATA = 25
SEC_OID_PKCS7_DIGESTED_DATA = 29
SEC_OID_PKCS7_ENCRYPTED_DATA = 30
SEC_OID_PKCS7_ENVELOPED_DATA = 27
SEC_OID_PKCS7_SIGNED_DATA = 26
SEC_OID_PKCS7_SIGNED_ENVELOPED_DATA = 28
SEC_OID_PKCS9_CHALLENGE_PASSWORD = 37
SEC_OID_PKCS9_CONTENT_TYPE = 33
SEC_OID_PKCS9_COUNTER_SIGNATURE = 36
SEC_OID_PKCS9_EMAIL_ADDRESS = 31
SEC_OID_PKCS9_EXTENDED_CERTIFICATE_ATTRIBUTES = 39
SEC_OID_PKCS9_EXTENSION_REQUEST = 274
SEC_OID_PKCS9_FRIENDLY_NAME = 171
SEC_OID_PKCS9_LOCAL_KEY_ID = 172
SEC_OID_PKCS9_MESSAGE_DIGEST = 34
SEC_OID_PKCS9_SDSI_CERT = 169
SEC_OID_PKCS9_SIGNING_TIME = 35
SEC_OID_PKCS9_SMIME_CAPABILITIES = 40
SEC_OID_PKCS9_UNSTRUCTURED_ADDRESS = 38
SEC_OID_PKCS9_UNSTRUCTURED_NAME = 32
SEC_OID_PKCS9_X509_CERT = 168
SEC_OID_PKCS9_X509_CRL = 170
SEC_OID_PKIX_CA_ISSUERS = 273
SEC_OID_PKIX_CA_REPOSITORY = 300
SEC_OID_PKIX_CPS_POINTER_QUALIFIER = 128
SEC_OID_PKIX_OCSP = 130
SEC_OID_PKIX_OCSP_ARCHIVE_CUTOFF = 136
SEC_OID_PKIX_OCSP_BASIC_RESPONSE = 131
SEC_OID_PKIX_OCSP_CRL = 133
SEC_OID_PKIX_OCSP_NONCE = 132
SEC_OID_PKIX_OCSP_NO_CHECK = 135
SEC_OID_PKIX_OCSP_RESPONSE = 134
SEC_OID_PKIX_OCSP_SERVICE_LOCATOR = 137
SEC_OID_PKIX_REGCTRL_AUTHENTICATOR = 139
SEC_OID_PKIX_REGCTRL_OLD_CERT_ID = 142
SEC_OID_PKIX_REGCTRL_PKIPUBINFO = 140
SEC_OID_PKIX_REGCTRL_PKI_ARCH_OPTIONS = 141
SEC_OID_PKIX_REGCTRL_PROTOCOL_ENC_KEY = 143
SEC_OID_PKIX_REGCTRL_REGTOKEN = 138
SEC_OID_PKIX_REGINFO_CERT_REQUEST = 145
SEC_OID_PKIX_REGINFO_UTF8_PAIRS = 144
SEC_OID_PKIX_TIMESTAMPING = 299
SEC_OID_PKIX_USER_NOTICE_QUALIFIER = 129
SEC_OID_RC2_CBC = 5
SEC_OID_RC4 = 6
SEC_OID_RC5_CBC_PAD = 8
SEC_OID_RFC1274_MAIL = 99
SEC_OID_RFC1274_UID = 98
SEC_OID_SDN702_DSA_SIGNATURE = 189
SEC_OID_SECG_EC_SECP112R1 = 209
SEC_OID_SECG_EC_SECP112R2 = 210
SEC_OID_SECG_EC_SECP128R1 = 211
SEC_OID_SECG_EC_SECP128R2 = 212
SEC_OID_SECG_EC_SECP160K1 = 213
SEC_OID_SECG_EC_SECP160R1 = 214
SEC_OID_SECG_EC_SECP160R2 = 215
SEC_OID_SECG_EC_SECP192K1 = 216
SEC_OID_SECG_EC_SECP192R1 = 202
SEC_OID_SECG_EC_SECP224K1 = 217
SEC_OID_SECG_EC_SECP224R1 = 218
SEC_OID_SECG_EC_SECP256K1 = 219
SEC_OID_SECG_EC_SECP256R1 = 208
SEC_OID_SECG_EC_SECP384R1 = 220
SEC_OID_SECG_EC_SECP521R1 = 221
SEC_OID_SECG_EC_SECT113R1 = 242
SEC_OID_SECG_EC_SECT113R2 = 243
SEC_OID_SECG_EC_SECT131R1 = 244
SEC_OID_SECG_EC_SECT131R2 = 245
SEC_OID_SECG_EC_SECT163K1 = 246
SEC_OID_SECG_EC_SECT163R1 = 247
SEC_OID_SECG_EC_SECT163R2 = 248
SEC_OID_SECG_EC_SECT193R1 = 249
SEC_OID_SECG_EC_SECT193R2 = 250
SEC_OID_SECG_EC_SECT233K1 = 251
SEC_OID_SECG_EC_SECT233R1 = 252
SEC_OID_SECG_EC_SECT239K1 = 253
SEC_OID_SECG_EC_SECT283K1 = 254
SEC_OID_SECG_EC_SECT283R1 = 255
SEC_OID_SECG_EC_SECT409K1 = 256
SEC_OID_SECG_EC_SECT409R1 = 257
SEC_OID_SECG_EC_SECT571K1 = 258
SEC_OID_SECG_EC_SECT571R1 = 259
SEC_OID_SHA1 = 4
SEC_OID_SHA256 = 191
SEC_OID_SHA384 = 192
SEC_OID_SHA512 = 193
SEC_OID_SMIME_ENCRYPTION_KEY_PREFERENCE = 182
SEC_OID_UNKNOWN = 0
SEC_OID_VERISIGN_USER_NOTICES = 127
SEC_OID_X500_RSA_ENCRYPTION = 97
SEC_OID_X509_AUTH_INFO_ACCESS = 93
SEC_OID_X509_AUTH_KEY_ID = 91
SEC_OID_X509_BASIC_CONSTRAINTS = 85
SEC_OID_X509_CERTIFICATE_POLICIES = 88
SEC_OID_X509_CERT_ISSUER = 284
SEC_OID_X509_CRL_DIST_POINTS = 87
SEC_OID_X509_CRL_NUMBER = 94
SEC_OID_X509_DELTA_CRL_INDICATOR = 282
SEC_OID_X509_EXT_KEY_USAGE = 92
SEC_OID_X509_FRESHEST_CRL = 285
SEC_OID_X509_HOLD_INSTRUCTION_CODE = 281
SEC_OID_X509_INHIBIT_ANY_POLICY = 286
SEC_OID_X509_INVALID_DATE = 96
SEC_OID_X509_ISSUER_ALT_NAME = 84
SEC_OID_X509_ISSUING_DISTRIBUTION_POINT = 283
SEC_OID_X509_KEY_USAGE = 81
SEC_OID_X509_NAME_CONSTRAINTS = 86
SEC_OID_X509_POLICY_CONSTRAINTS = 90
SEC_OID_X509_POLICY_MAPPINGS = 89
SEC_OID_X509_PRIVATE_KEY_USAGE_PERIOD = 82
SEC_OID_X509_REASON_CODE = 95
SEC_OID_X509_SUBJECT_ALT_NAME = 83
SEC_OID_X509_SUBJECT_DIRECTORY_ATTR = 79
SEC_OID_X509_SUBJECT_INFO_ACCESS = 287
SEC_OID_X509_SUBJECT_KEY_ID = 80
SEC_OID_X942_DIFFIE_HELMAN_KEY = 174
ssl_kea_dh = 2
ssl_kea_ecdh = 4
ssl_kea_fortezza = 3
ssl_kea_null = 0
ssl_kea_rsa = 1
# functions
def algtag_to_mechanism(algtag): # real signature unknown; restored from __doc__
"""
algtag_to_mechanism(algtag) -> mechanism
:Parameters:
algtag : int
algorithm tag (e.g. SEC_OID_*)
Returns the key mechanism enumeration constant (CKM_*)
given an algorithm tag. Throws a KeyError exception if the
algorithm tag is invalid.
"""
pass
def cert_crl_reason_from_name(*args, **kwargs): # real signature unknown
"""
crl_reason_from_name(name) -> int
:Parameters:
name : string
name of CERTCRLEntryReasonCode constant
Given the name of a CERTCRLEntryReasonCode constant
return it's integer constant
The string comparison is case insensitive and will match with
or without the crlEntry prefix
"""
pass
def cert_crl_reason_name(*args, **kwargs): # real signature unknown
"""
crl_reason_name(reason) -> string
:Parameters:
reason : int
CERTCRLEntryReasonCode constant
Given a CERTCRLEntryReasonCode constant
return it's name as a string
"""
pass
def cert_general_name_type_from_name(*args, **kwargs): # real signature unknown
"""
general_name_type_from_name(name) -> int
:Parameters:
name : string
name of CERTGeneralNameType constant
Given the name of a CERTGeneralNameType constant
return it's integer constant
The string comparison is case insensitive and will match with
or without the cert prefix
"""
pass
def cert_general_name_type_name(*args, **kwargs): # real signature unknown
"""
general_name_type_name(type) -> string
:Parameters:
type : int
CERTGeneralNameType constant
Given a CERTGeneralNameType constant
return it's name as a string
"""
pass
def cert_usage_flags(flags): # real signature unknown; restored from __doc__
"""
cert_usage_flags(flags) -> ['flag_name', ...]
:Parameters:
flags : int
certificateUsage* bit flags
Given an integer with certificateUsage*
(e.g. nss.certificateUsageSSLServer) bit flags return a sorted
list of their string names.
"""
pass
def create_context_by_sym_key(mechanism, operation, sym_key, sec_param=None): # real signature unknown; restored from __doc__
"""
create_context_by_sym_key(mechanism, operation, sym_key, sec_param=None) -> PK11Context
:Parameters:
mechanism : int
key mechanism enumeration constant (CKM_*)
operation : int
type of operation this context will be doing. A (CKA_*) constant
(e.g. CKA_ENCRYPT, CKA_DECRYPT, CKA_SIGN, CKA_VERIFY, CKA_DIGEST)
sym_key : PK11SymKey object
symmetric key
sec_param : SecItem object or None
mechanism parameters used to build this context or None.
Create a context from a symmetric key)
"""
return PK11Context
def create_digest_context(hash_alg): # real signature unknown; restored from __doc__
"""
create_digest_context(hash_alg) -> PK11Context
:Parameters:
hash_alg : int
hash algorithm enumeration (SEC_OID_*)
e.g.: SEC_OID_MD5, SEC_OID_SHA1, SEC_OID_SHA256, SEC_OID_SHA512, etc.
Create a context for performing digest (hash) operations)
"""
return PK11Context
def data_to_hex(data, octets_per_line=0, separator=None): # real signature unknown; restored from __doc__
"""
data_to_hex(data, octets_per_line=0, separator=':') -> string or list of strings
:Parameters:
data : buffer
Binary data
octets_per_line : integer
Number of octets formatted on one line, if 0 then
return a single string instead of an array of lines
separator : string
String used to seperate each octet
If None it will be as if the empty string had been
passed and no separator will be used.
Format the binary data as hex string(s).
Either a list of strings is returned or a single string.
If octets_per_line is greater than zero then a list of
strings will be returned where each string contains
octets_per_line number of octets (except for the last
string in the list which will contain the remainder of the
octets). Returning a list of "lines" makes it convenient
for a caller to format a block of hexadecimal data with line
wrapping. If octets_per_line is greater than zero indicating
a list result is desired a list is always returned even if
the number of octets would produce only a single line.
If octets_per_line is zero then a single string is returned,
(no line splitting is performed). This is the default.
The separator string is used to separate each octet. If None
it will be as if the empty string had been passed and no
separator will be used.
"""
return ""
def decode_der_crl(der_crl, type=None, decode_options=None): # real signature unknown; restored from __doc__
"""
decode_der_crl(der_crl, type=SEC_CRL_TYPE, decode_options=CRL_DECODE_DEFAULT_OPTIONS) -> SignedCRL
:Parameters:
der_crl : SecItem object
DER encoded CRL data encapsulated in a SECItem.
type : int
revocation list type
may be one of:
- SEC_CRL_TYPE
- SEC_KRL_TYPE
decode_options : int
bit-wise OR of the following flags:
- CRL_DECODE_DONT_COPY_DER
- CRL_DECODE_SKIP_ENTRIES
- CRL_DECODE_KEEP_BAD_CRL
- CRL_DECODE_ADOPT_HEAP_DER
or use CRL_DECODE_DEFAULT_OPTIONS
"""
return SignedCRL
def der_universal_secitem_fmt_lines(sec_item, level=0, octets_per_line=0, separator=None): # real signature unknown; restored from __doc__
"""
der_universal_secitem_fmt_lines(sec_item, level=0, octets_per_line=0, separator=':') -> list of (indent, string) tuples
:Parameters:
sec_item : SecItem object
A SecItem containing a DER encoded ASN1 universal type
level : integer
Initial indentation level, all subsequent indents are relative
to this starting level.
octets_per_line : integer
Number of octets formatted on one line, if 0 then
return a single string instead of an array of lines
separator : string
String used to seperate each octet
If None it will be as if the empty string had been
passed and no separator will be used.
Given a SecItem in DER format which encodes a ASN.1 universal
type convert the item to a string and return a list of
(indent, string) tuples.
"""
return []
def dump_certificate_cache_info(): # real signature unknown; restored from __doc__
"""
dump_certificate_cache_info()
Dump the contents of the certificate cache and the temporary
cert store to stdout.
Use this as a debugging aid to detect leaked references of certs at
shutdown time. For example if `nss.nss_shutdown()` throws a
SEC_ERROR_BUSY exception.
"""
pass
def find_cert_from_nickname(nickname, *user_data): # real signature unknown; restored from __doc__
"""
find_cert_from_nickname(nickname, [user_data1, ...]) -> Certificate
:Parameters:
nickname : string
certificate nickname to search for
user_dataN : object ...
zero or more caller supplied parameters which will
be passed to the password callback function
A nickname is an alias for a certificate subject. There may be
multiple certificates with the same subject, and hence the same
nickname. This function will return the newest certificate that
matches the subject, based on the NotBefore / NotAfter fields of the
certificate.
"""
return Certificate
def find_key_by_any_cert(cert, *user_data): # real signature unknown; restored from __doc__
"""
find_key_by_any_cert(cert, [user_data1, ...]) -> Certificate
:Parameters:
cert : Certificate object
certificate whose private key is being searched for
user_dataN : object ...
zero or more caller supplied parameters which will
be passed to the password callback function
Finds the private key associated with a specified certificate in any
available slot.
"""
return Certificate
def find_slot_by_name(name): # real signature unknown; restored from __doc__
"""
find_slot_by_name(name) -> `PK11Slot`
:Parameters:
name : string
slot name
Given a slot name return a `PK11Slot` object.
"""
pass
def fingerprint_format_lines(data, level=0): # real signature unknown; restored from __doc__
"""
fingerprint_format_lines(data, level=0) ->
:Parameters:
data : SecItem or str or any buffer compatible object
Data to initialize the certificate request from, must be in DER format
level : integer
Initial indentation level, all subsequent indents are relative
to this starting level.
Generates digests of data (i.e. fingerprint) and formats
it into line tuples for text output.
"""
pass
def generate_new_param(mechanism, sym_key=None): # real signature unknown; restored from __doc__
"""
generate_new_param(mechanism, sym_key=None) -> SecItem
:Parameters:
mechanism : int
key mechanism enumeration constant (CKM_*)
sym_key : PK11SymKey object or None
symmetric key or None
Return a SecItem containing a encryption param.
"""
return SecItem
def generate_random(num_bytes): # real signature unknown; restored from __doc__
"""
generate_random(num_bytes) -> string
:Parameters:
num_bytes : integer
Number of num_bytes to generate (must be non-negative)
Generates random data..
"""
return ""
def get_best_slot(mechanism, *user_data): # real signature unknown; restored from __doc__
"""
get_best_slot(mechanism, [user_data1, ...]) -> PK11Slot
:Parameters:
mechanism : int
key mechanism enumeration constant (CKM_*)
user_dataN : object ...
zero or more caller supplied parameters which will
be passed to the password callback function
Find the best slot which supports the given mechanism.
"""
return PK11Slot
def get_block_size(mechanism, sec_param=None): # real signature unknown; restored from __doc__
"""
get_block_size(mechanism, sec_param=None) -> int
:Parameters:
mechanism : int
key mechanism enumeration constant (CKM_*)
sec_param : SecItem object or None
mechanism parameters used to build this context or None.
Get the mechanism block size
"""
return 0
def get_cert_nicknames(certdb, what, *user_data): # real signature unknown; restored from __doc__
"""
get_cert_nicknames(certdb, what, [user_data1, ...]) -> name0, ...
:Parameters:
certdb : CertDB object
CertDB certificate database object
what : integer
one of:
- SEC_CERT_NICKNAMES_ALL
- SEC_CERT_NICKNAMES_USER
- SEC_CERT_NICKNAMES_SERVER
- SEC_CERT_NICKNAMES_CA
user_dataN : object
zero or more caller supplied parameters which will
be passed to the password callback function
Returns a tuple of the nicknames of the certificates in a specified
certificate database.
"""
pass
def get_default_certdb(): # real signature unknown; restored from __doc__
"""
get_default_certdb()
Returns the default certificate database as a CertDB object
"""
pass
def get_internal_key_slot(): # real signature unknown; restored from __doc__
"""
get_internal_key_slot() -> PK11Slot
Get the default internal key slot.
"""
return PK11Slot
def get_internal_slot(): # real signature unknown; restored from __doc__
"""
get_internal_slot() -> PK11Slot
Get the default internal slot.
"""
return PK11Slot
def get_iv_length(mechanism): # real signature unknown; restored from __doc__
"""
get_iv_length(mechanism) -> algtag
:Parameters:
mechanism : int
key mechanism enumeration constant (CKM_*)
Returns the length of the mechanism's initialization vector.
"""
pass
def get_pad_mechanism(mechanism): # real signature unknown; restored from __doc__
"""
get_pad_mechanism(mechanism) -> int
:Parameters:
mechanism : int
key mechanism enumeration constant (CKM_*)
Determine appropriate mechanism to use when padding is required.
If the mechanism does not map to a padding mechanism return the mechanism.
"""
return 0
def hash_buf(hash_alg, data): # real signature unknown; restored from __doc__
"""
hash_buf(hash_alg, data) --> digest
:Parameters:
hash_alg : int
hash algorithm enumeration (SEC_OID_*)
e.g.: SEC_OID_MD5, SEC_OID_SHA1, SEC_OID_SHA256, SEC_OID_SHA512, etc.
data : buffer or string
buffer the digest will be computed for
Computes a digest according to the hash_alg type.
Return the digest data as buffer object.
Note, if a hexidecimal string representation is desired then pass
result to data_to_hex()
"""
pass
def import_crl(slot, der_crl, url, type, import_options, decode_options, *user_data): # real signature unknown; restored from __doc__
"""
import_crl(slot, der_crl, url, type, import_options, decode_options, [user_data1, ...]) -> SignedCRL
:Parameters:
slot : PK11Slot object
designated PK11 slot
der_crl : SecItem object
signed DER CRL data encapsulated in a SecItem object.
url : string
URL of the CRL
type : int
revocation list type
may be one of:
- SEC_CRL_TYPE
- SEC_KRL_TYPE
import_options : int
bit-wise OR of the following flags:
- CRL_IMPORT_BYPASS_CHECKS
or use CRL_IMPORT_DEFAULT_OPTIONS
decode_options : int
bit-wise OR of the following flags:
- CRL_DECODE_DONT_COPY_DER
- CRL_DECODE_SKIP_ENTRIES
- CRL_DECODE_KEEP_BAD_CRL
- CRL_DECODE_ADOPT_HEAP_DER
or use CRL_DECODE_DEFAULT_OPTIONS
user_dataN : object
zero or more caller supplied parameters which will
be passed to the password callback function
"""
return SignedCRL
def import_sym_key(slot, mechanism, origin, operation, key_data, *user_data): # real signature unknown; restored from __doc__
"""
import_sym_key(slot, mechanism, origin, operation, key_data, [user_data1, ...]) -> PK11SymKey
:Parameters:
slot : PK11Slot object
designated PK11 slot
mechanism : int
key mechanism enumeration constant (CKM_*)
origin : int
PK11 origin enumeration (PK11Origin*)
e.g. PK11_OriginDerive, PK11_OriginUnwrap, etc.
operation : int
type of operation this context will be doing. A (CKA_*) constant
(e.g. CKA_ENCRYPT, CKA_DECRYPT, CKA_SIGN, CKA_VERIFY, CKA_DIGEST)
key_data: SecItem object
key data encapsulated in a SECItem used to build the symmetric key.
user_dataN : object ...
zero or more caller supplied parameters which will
be passed to the password callback function
Create a PK11SymKey from data)
"""
return PK11SymKey
def indented_format(line_fmt_tuples, indent_len=4): # real signature unknown; restored from __doc__
"""
indented_format(line_fmt_tuples, indent_len=4) -> string
The function supports the display of complex objects which may be
composed of other complex objects. There is often a need to output
section headers or single strings and lists of <attribute,value> pairs
(the attribute in this discussion is called a label), or even blank
lines. All of these items should line up in columns at different
indentation levels in order to visually see the structure.
It would not be flexible enough to have object formatting routines
which simply returned a single string with all the indentation and
formatting pre-applied. The indentation width may not be what is
desired. Or more importantly you might not be outputting to text
display. It might be a GUI which desires to display the
information. Most GUI's want to handle each string seperately and
control indentation and the visibility of each item (e.g. a tree
control).
At the same time we want to satisfy the need for easy and simple text
output. This routine will do that, e.g.:
print indented_format(obj.format_lines())
To accomodate necessary flexibility the object formatting methods
(format_lines()) return a list of tuples. Each tuple represents a
single line with the first tuple item being the indentation level for
the line. There may be 0,1 or 2 additional strings in the tuple which
are to be output on the line. A single string are usually one of two
things, either a section header or data that has been continuted onto
multiple lines. Two strings usually represent a <attribute,value> pair
with the first string being a label (e.g. attribute name).
Each tuple may be:
(int,)
1-value tuple, no strings, e.g. blank line.
(int, string)
2-value tuple, output string at indent level.
(int, string, string)
3-value tuple, first string is a label, second string is a
value. Starting at the indent level output the label, then
follow with the value. By keeping the label separate from the
value the ouput formatter may elect to align the values in
vertical columns for adjacent lines.
Example::
# This list of tuples,
[(0, 'Constraints'),
(1, 'min:', '0')
(1, 'max:', '100'),
(1, 'Filter Data'),
(2, 'ab bc de f0 12 34 56 78 9a bc de f0')
(2, '12 34 56 78 9a bc de f0 12 34 56 78')
]
# would product this output
Constraints
min: 0
max: 100
Filter Data:
ab bc de f0 12 34 56 78 9a bc de f0
12 34 56 78 9a bc de f0 12 34 56 78
:Parameters:
line_fmt_tuples : [(level, ...),...]
A list of tuples. First tuple value is the indentation level
followed by optional strings for the line.
indent_len : int
Number of space characters repeated for each level and
prepended to the line string.
"""
return ""
def is_fips(): # real signature unknown; restored from __doc__
"""
pk11_is_fips() -> bool
Returns True if the internal module has FIPS enabled, False otherwise.
"""
return False
def key_mechanism_type_from_name(name): # real signature unknown; restored from __doc__
"""
key_mechanism_type_from_name(name) -> int
:Parameters:
name : string
name of key mechanism enumeration constant (CKM_*)
Given the name of a key mechanism enumeration constant (CKM_*)
return it's integer constant
The string comparison is case insensitive and will match with
or without the CKM\_ prefix
"""
return 0
def key_mechanism_type_name(mechanism): # real signature unknown; restored from __doc__
"""
key_mechanism_type_name(mechanism) -> string
:Parameters:
mechanism : int
key mechanism enumeration constant (CKM_*)
Given a key mechanism enumeration constant (CKM_*)
return it's name as a string
"""
return ""
def make_line_fmt_tuples(level, obj): # real signature unknown; restored from __doc__
"""
make_line_fmt_tuples(level, obj) -> [(level, str), ...]
:Parameters:
obj : object
If obj is a tuple or list then each member will be wrapped
in a 2-tuple of (level, str). If obj is a scalar object
then obj will be wrapped in a 2-tuple of (level, obj)
level : integer
Initial indentation level, all subsequent indents are relative
to this starting level.
Return a list of line formatted tuples sutible to passing to
`indented_format()`. Each tuple consists of a integer
level value and a string object. This is equivalent to:
[(level, str(x)) for x in obj].
As a special case convenience if obj is a scalar object (i.e.
not a list or tuple) then [(level, str(obj))] will be returned.
"""
pass
def md5_digest(data): # real signature unknown; restored from __doc__
"""
md5_digest(data) --> digest
:Parameters:
data : buffer or string
buffer the digest will be computed for
Returns 16 octet MD5 digest data as buffer object.
Note, if a hexidecimal string representation is desired then pass
result to data_to_hex()
"""
pass
def mechanism_to_algtag(mechanism): # real signature unknown; restored from __doc__
"""
mechanism_to_algtag(mechanism) -> algtag
:Parameters:
mechanism : int
key mechanism enumeration constant (CKM_*)
Returns the algtag given key mechanism enumeration constant (CKM_*)
Throws an KeyError exception if the mechanism is invalid.
"""
pass
def need_pw_init(): # real signature unknown; restored from __doc__
"""
pk11_need_pw_init() -> bool
Returns True if the internal slot needs to be initialized, False otherwise.
The internal slot token should be initalized if:
The token is not initialized
`PK11Slot.need_login()` == True and `PK11Slot.need_user_init()` == True
Or
The token has a NULL password.
`PK11Slot.need_login()` == False and `PK11Slot.need_user_init()` == False
+------------------+------------------------+---------------------+
|CKF_LOGIN_REQUIRED|CKF_USER_PIN_INITIALIZED|CKF_TOKEN_INITIALIZED|
+==================+========================+=====================+
| False | False | True |
+------------------+------------------------+---------------------+
| True | False | False |
+------------------+------------------------+---------------------+
| False | True | True |
+------------------+------------------------+---------------------+
| True | True | True |
+------------------+------------------------+---------------------+
`PK11Slot.need_login()` == CKF_LOGIN_REQUIRED
`PK11Slot.need_user_init()` == !CKF_USER_PIN_INITIALIZED
"""
return False
def nss_init(cert_dir): # real signature unknown; restored from __doc__
"""
nss_init(cert_dir)
:Parameters:
cert_dir : string
Pathname of the directory where the certificate, key, and
security module databases reside.
Sets up configuration files and performs other tasks required to run
Network Security Services. `nss.nss_init()` differs from
`nss.nss_init_read_write()` because the internal PK11 slot (see
`nss.get_internal_slot()`) is created in Read Only (RO) mode as
opposed to Read Write (RW) mode.
"""
pass
def nss_initialize(cert_dir=None, cert_prefix=None, key_prefix=None, secmod_name=None, flags=0): # real signature unknown; restored from __doc__
"""
nss_initialize(cert_dir=None, cert_prefix=None, key_prefix=None, secmod_name=None, flags=0)
:Parameters:
cert_dir : string
Pathname of the directory where the certificate, key, and
security module databases reside.
cert_prefix : string
Prefix added to the beginning of the certificate database,
for example,"https-server1-".
key_prefix : string
Prefix added to the beginning of the key database,
for example, "https-server1-".
secmod_name : string
Name of the security module database,
usually "secmod.db".
flags
Bit flags that specify how NSS should be initialized.
`nss_initialize()` initializes NSS. It is more flexible than `nss_init()`,
`nss_init_read_write()`, and `nss_init_nodb()`. If any of those simpler NSS
initialization functions suffices for your needs, call that instead.
By default `nss_initialize()` and `nss_init_context()` open the
internal PK11 slot (see `get_internal_slot()`) in Read Write (RW) mode
as opposed to `nss_init()` which opens it in Read Only (RO) mode. If
you want RO mode you pass the `NSS_INIT_READONLY` flag.
The flags parameter is a bitwise OR of the following flags:
NSS_INIT_READONLY
Open the databases read only.
NSS_INIT_NOCERTDB
Don't open the cert DB and key DB's, just initialize the volatile
certdb.
NSS_INIT_NOMODDB
Don't open the security module DB, just initialize the PKCS #11 module.
NSS_INIT_FORCEOPEN
Continue to force initializations even if the databases cannot be
opened.
NSS_INIT_NOROOTINIT
Don't try to look for the root certs module automatically.
NSS_INIT_OPTIMIZESPACE
Optimize for space instead of speed. Use smaller tables and caches.
NSS_INIT_PK11THREADSAFE
Only load PKCS#11 modules that are thread-safe, i.e., that support
locking - either OS locking or NSS-provided locks . If a PKCS#11 module
isn't thread-safe, don't serialize its calls; just don't load it
instead. This is necessary if another piece of code is using the same
PKCS#11 modules that NSS is accessing without going through NSS, for
example, the Java SunPKCS11 provider.
NSS_INIT_PK11RELOAD
Ignore the CKR_CRYPTOKI_ALREADY_INITIALIZED error when loading PKCS#11
modules. This is necessary if another piece of code is using the same
PKCS#11 modules that NSS is accessing without going through NSS, for
example, Java SunPKCS11 provider.
NSS_INIT_NOPK11FINALIZE
Never call C_Finalize on any PKCS#11 module. This may be necessary in
order to ensure continuous operation and proper shutdown sequence if
another piece of code is using the same PKCS#11 modules that NSS is
accessing without going through NSS, for example, Java SunPKCS11
provider. The following limitation applies when this is set :
SECMOD_WaitForAnyTokenEvent will not use C_WaitForSlotEvent, in order
to prevent the need for C_Finalize. This call will be emulated instead.
NSS_INIT_RESERVED
Currently has no effect, but may be used in the future to trigger
better cooperation between PKCS#11 modules used by both NSS and the
Java SunPKCS11 provider. This should occur after a new flag is defined
for C_Initialize by the PKCS#11 working group.
NSS_INIT_COOPERATE
Sets the above four recommended options for applications that use both
NSS and the Java SunPKCS11 provider.
Hint: You can obtain a printable representation of the flags via `nss_init_flags`.
"""
pass
def nss_init_context(cert_dir=None, cert_prefix=None, key_prefix=None, secmod_name=None, init_params=None, flags=0): # real signature unknown; restored from __doc__
"""
nss_init_context(cert_dir=None, cert_prefix=None, key_prefix=None, secmod_name=None, init_params=None, flags=0) -> `InitContext`
:Parameters:
cert_dir : string
Pathname of the directory where the certificate, key, and
security module databases reside.
cert_prefix : string
Prefix added to the beginning of the certificate database,
for example,"https-server1-".
key_prefix : string
Prefix added to the beginning of the key database,
for example, "https-server1-".
secmod_name : string
Name of the security module database,
usually "secmod.db".
init_params : `InitContext` object
Object with a set of initialization parameters.
See `InitContext`.
flags
Bit flags that specify how NSS should be initialized.
`nss_init_context()` initializes NSS within a context and returns a
`InitContext` object. Contexts are used when multiple entities within
a single process wish to use NSS without colliding such as
libraries.
By default `nss_initialize()` and `nss_init_context()` open the
internal PK11 slot (see `get_internal_slot()`) in Read Write (RW) mode
as opposed to `nss_init()` which opens it in Read Only (RO) mode. If
you want RO mode you pass the `NSS_INIT_READONLY` flag.
The flags parameter is a bitwise OR of the following flags:
NSS_INIT_READONLY
Open the databases read only.
NSS_INIT_NOCERTDB
Don't open the cert DB and key DB's, just initialize the volatile
certdb.
NSS_INIT_NOMODDB
Don't open the security module DB, just initialize the PKCS #11 module.
NSS_INIT_FORCEOPEN
Continue to force initializations even if the databases cannot be
opened.
NSS_INIT_NOROOTINIT
Don't try to look for the root certs module automatically.
NSS_INIT_OPTIMIZESPACE
Optimize for space instead of speed. Use smaller tables and caches.
NSS_INIT_PK11THREADSAFE
Only load PKCS#11 modules that are thread-safe, i.e., that support
locking - either OS locking or NSS-provided locks . If a PKCS#11 module
isn't thread-safe, don't serialize its calls; just don't load it
instead. This is necessary if another piece of code is using the same
PKCS#11 modules that NSS is accessing without going through NSS, for
example, the Java SunPKCS11 provider.
NSS_INIT_PK11RELOAD
Ignore the CKR_CRYPTOKI_ALREADY_INITIALIZED error when loading PKCS#11
modules. This is necessary if another piece of code is using the same
PKCS#11 modules that NSS is accessing without going through NSS, for
example, Java SunPKCS11 provider.
NSS_INIT_NOPK11FINALIZE
Never call C_Finalize on any PKCS#11 module. This may be necessary in
order to ensure continuous operation and proper shutdown sequence if
another piece of code is using the same PKCS#11 modules that NSS is
accessing without going through NSS, for example, Java SunPKCS11
provider. The following limitation applies when this is set :
SECMOD_WaitForAnyTokenEvent will not use C_WaitForSlotEvent, in order
to prevent the need for C_Finalize. This call will be emulated instead.
NSS_INIT_RESERVED
Currently has no effect, but may be used in the future to trigger
better cooperation between PKCS#11 modules used by both NSS and the
Java SunPKCS11 provider. This should occur after a new flag is defined
for C_Initialize by the PKCS#11 working group.
NSS_INIT_COOPERATE
Sets the above four recommended options for applications that use both
NSS and the Java SunPKCS11 provider.
Hint: You can obtain a printable representation of the flags via `nss_init_flags`.
"""
pass
def nss_init_flags(flags): # real signature unknown; restored from __doc__
"""
nss_init_flags(flags) -> ['flag_name', ...]
:Parameters:
flags : int
NSS_INIT* bit flags
Given an integer with NSS_INIT*
(e.g. nss.NSS_INIT_READONLY) bit flags return a sorted
list of their string names.
"""
pass
def nss_init_nodb(): # real signature unknown; restored from __doc__
"""
nss_init_nodb()
Performs tasks required to run Network Security Services without setting up
configuration files. Important: This NSS function is not intended for use with
SSL, which requires that the certificate and key database files be opened.
nss_init_nodb opens only the temporary database and the internal PKCS #112
module. Unlike nss_init, nss_init_nodb allows applications that do not have
access to storage for databases to run raw crypto, hashing, and certificate
functions. nss_init_nodb is not idempotent, so call it only once. The policy
flags for all cipher suites are turned off by default, disallowing all cipher
suites. Therefore, an application cannot use NSS to perform any cryptographic
operations until after it enables appropriate cipher suites by calling one of
the SSL Export Policy Functions.
"""
pass
def nss_init_read_write(cert_dir): # real signature unknown; restored from __doc__
"""
nss_init_read_write(cert_dir)
:Parameters:
cert_dir : string
Pathname of the directory where the certificate, key, and
security module databases reside.
Sets up configuration files and performs other tasks required to run
Network Security Services. `nss.nss_init_read_write()` differs from
`nss.nss_init()` because the internal PK11 slot (see
`nss.get_internal_slot()`) is created in Read Write (RW) mode as
opposed to Read Only (RO) mode.
"""
pass
def nss_is_initialized(): # real signature unknown; restored from __doc__
"""
nss_is_initialized() --> bool
Returns whether Network Security Services has already been initialized or not.
"""
pass
def nss_shutdown(): # real signature unknown; restored from __doc__
"""
nss_shutdown()
Closes the key and certificate databases that were opened by nss_init().
NSS can only shutdown successfully if all NSS objects have been
released, otherwise nss_shutdown will fail with the error code
SEC_ERROR_BUSY. Here are some tips to make sure nss_shutdown will
succeed. [1]_
* If the process is a SSL client make sure you call
`ssl.clear_session_cache`.
* If the process is a SSL server make sure you call
`ssl.shutdown_server_session_id_cache()`.
* Make sure all sockets have been closed, open SSL sockets hold
references NSS objects.
* Explicitly delete Python objects which contain NSS objects using the
del command. [2]_
* Use `nss.dump_certificate_cache_info()` to provide information about
which cached objects may still persist and be responsible for
preventing a full NSS shutdown.
.. [1] If the leaked objects are subsequently released after
nss_shutdown is called NSS can be reinitialized with the
various NSS initialization routines. In this cass teh
SEC_ERROR_BUSY error can be thought of as an informatiive
warning.
.. [2] This Python binding to NSS wraps each NSS object inside a
Python object. Like NSS objects Python objects are reference
counted. When the last reference to the Python object
disappears the Python object is destroyed. The destructor for a
Python object wrapping an NSS object releases the NSS reference
to the NSS object. Thus if any Python objects which wrap NSS
objects remain "live" nss_shutdown will fail. Python objects
are typically released by the Python interpretor when the
variable holding the object is assigned a new object or when
the variable holding the object goes out of scope. This means
you may need to manually delete some objects using the del
command rather relying on Python's automatic garbage
collection. Consider this example:
def foo():
nss.nss_init(certdir)
sock = ssl.SSLSocket()
nss.nss_shutdown()
When nss_shutown() is called the sock object is still alive and
holds references to NSS objects. The sock object won't be
released by Python until it goes out of scope when the function
exits. Thus the shutdown will fail with SEC_ERROR_BUSY. But you
can explicitly force the sock object to be released by
explictily deleting it, for example:
def foo():
nss.nss_init(certdir)
sock = ssl.SSLSocket()
del sock
nss.nss_shutdown()
Another way to avoid this issue is to arrange your code such
that nss_shutdown is called from a location in your code which
is not in scope for any NSS objects created. This also implies
you shouldn't assign NSS objects to globals.
"""
pass
def nss_shutdown_context(context): # real signature unknown; restored from __doc__
"""
nss_shutdown_context(context) ->
:Parameters:
context : `InitContext` object
A `InitContext` returned from a previous
call to `nss_init_context`.
xxx
"""
pass
def oid_dotted_decimal(oid): # real signature unknown; restored from __doc__
"""
oid_dotted_decimal(oid) -> string
:Parameters:
oid : may be one of integer, string, SecItem
May be one of:
* integer:: A SEC OID enumeration constant, also known as a tag
(i.e. SEC_OID_*) for example SEC_OID_AVA_COMMON_NAME.
* string:: A string in dotted decimal representation, for example
'OID.2.5.4.3'. The 'OID.' prefix is optional.
Or a string for the tag name (e.g. 'SEC_OID_AVA_COMMON_NAME')
The 'SEC_OID\_' prefix is optional. Or one of the canonical
abbreviations (e.g. 'cn'). Case is not significant.
* SecItem:: A SecItem object encapsulating the OID in
DER format.
Given an oid return it's tag constant as a string.
"""
return ""
def oid_str(oid): # real signature unknown; restored from __doc__
"""
oid_str(oid) -> string
:Parameters:
oid : may be one of integer, string, SecItem
May be one of:
* integer:: A SEC OID enumeration constant, also known as a tag
(i.e. SEC_OID_*) for example SEC_OID_AVA_COMMON_NAME.
* string:: A string in dotted decimal representation, for example
'OID.2.5.4.3'. The 'OID.' prefix is optional.
Or a string for the tag name (e.g. 'SEC_OID_AVA_COMMON_NAME')
The 'SEC_OID\_' prefix is optional. Or one of the canonical
abbreviations (e.g. 'cn'). Case is not significant.
* SecItem:: A SecItem object encapsulating the OID in
DER format.
Given an oid return it's description as a string.
"""
return ""
def oid_tag(oid): # real signature unknown; restored from __doc__
"""
oid_tag(oid) -> int
:Parameters:
oid : may be one of integer, string, SecItem
May be one of:
* integer:: A SEC OID enumeration constant, also known as a tag
(i.e. SEC_OID_*) for example SEC_OID_AVA_COMMON_NAME.
* string:: A string in dotted decimal representation, for example
'OID.2.5.4.3'. The 'OID.' prefix is optional.
Or a string for the tag name (e.g. 'SEC_OID_AVA_COMMON_NAME')
The 'SEC_OID\_' prefix is optional. Or one of the canonical
abbreviations (e.g. 'cn'). Case is not significant.
* SecItem:: A SecItem object encapsulating the OID in
DER format.
Given an oid return it's tag constant.
"""
return 0
def oid_tag_name(oid): # real signature unknown; restored from __doc__
"""
oid_tag_name(oid) -> string
:Parameters:
oid : may be one of integer, string, SecItem
May be one of:
* integer:: A SEC OID enumeration constant, also known as a tag
(i.e. SEC_OID_*) for example SEC_OID_AVA_COMMON_NAME.
* string:: A string in dotted decimal representation, for example
'OID.2.5.4.3'. The 'OID.' prefix is optional.
Or a string for the tag name (e.g. 'SEC_OID_AVA_COMMON_NAME')
The 'SEC_OID\_' prefix is optional. Or one of the canonical
abbreviations (e.g. 'cn'). Case is not significant.
* SecItem:: A SecItem object encapsulating the OID in
DER format.
Given an oid return it's tag constant as a string.
"""
return ""
def param_from_algid(algid): # real signature unknown; restored from __doc__
"""
param_from_algid(algid) -> SecItem
:Parameters:
algid : AlgorithmID object
algorithm id
Return a SecItem containing a encryption param derived from a AlgorithmID.
"""
return SecItem
def param_from_iv(mechanism, iv=None): # real signature unknown; restored from __doc__
"""
param_from_iv(mechanism, iv=None) -> SecItem
:Parameters:
mechanism : int
key mechanism enumeration constant (CKM_*)
iv : SecItem object
initialization vector. If there is no initialization vector you may also pass
None or an empty SecItem object (e.g. SecItem())
Return a SecItem to be used as the initialization vector for encryption/decryption.
"""
return SecItem
def pk11_attribute_type_from_name(name): # real signature unknown; restored from __doc__
"""
pk11_attribute_type_from_name(name) -> int
:Parameters:
name : string
name of PK11 attribute type constant (CKA_*)
Given the name of a PK11 attribute type constant (CKA_*)
return it's integer constant
The string comparison is case insensitive and will match with
or without the CKA\_ prefix
"""
return 0
def pk11_attribute_type_name(type): # real signature unknown; restored from __doc__
"""
pk11_attribute_type_name(type) -> string
:Parameters:
type : int
PK11 attribute type constant (CKA_*)
Given a PK11 attribute type constant (CKA_*)
return it's name as a string
"""
return ""
def pk11_disabled_reason_name(reason): # real signature unknown; restored from __doc__
"""
pk11_disabled_reason_name(reason) -> string
:Parameters:
reason : int
PK11 slot disabled reason constant (PK11_DIS_*)
Given a PK11 slot disabled reason constant (PK11_DIS_*)
return the constant as a string.
"""
return ""
def pk11_disabled_reason_str(reason): # real signature unknown; restored from __doc__
"""
pk11_disabled_reason_str(reason) -> string
:Parameters:
reason : int
PK11 slot disabled reason constant (PK11_DIS_*)
Given a PK11 slot disabled reason constant (PK11_DIS_*)
return a descriptive string
"""
return ""
def pk11_logout_all(): # real signature unknown; restored from __doc__
"""
pk11_logout_all()
Logout of every slot for all modules.
"""
pass
def pkcs12_cipher_from_name(name): # real signature unknown; restored from __doc__
"""
pkcs12_cipher_from_name(name) -> int
:Parameters:
name : string
name of PKCS12_* constant
Given the name of a PKCS12_* constant
return it's integer constant
The string comparison is case insensitive and will match with
or without the PKCS12\_ prefix
"""
return 0
def pkcs12_cipher_name(cipher): # real signature unknown; restored from __doc__
"""
pkcs12_cipher_name(cipher) -> string
:Parameters:
cipher : int
PKCS12_* constant
Given a PKCS12_* constant
return it's name as a string
"""
return ""
def pkcs12_enable_all_ciphers(): # real signature unknown; restored from __doc__
"""
pkcs12_enable_all_ciphers()
Enables all PKCS12 ciphers, which are:
- `PKCS12_RC2_CBC_40`
- `PKCS12_RC2_CBC_128`
- `PKCS12_RC4_40`
- `PKCS12_RC4_128`
- `PKCS12_DES_56`
- `PKCS12_DES_EDE3_168`
"""
pass
def pkcs12_enable_cipher(cipher, enabled): # real signature unknown; restored from __doc__
"""
pkcs12_enable_cipher(cipher, enabled)
:Parameters:
cipher : integer
The PKCS12 cipher suite enumeration (e.g. `PKCS12_DES_EDE3_168`, etc.)
enabled : bool or int
True enables, False disables
The cipher may be one of:
- PKCS12_RC2_CBC_40
- PKCS12_RC2_CBC_128
- PKCS12_RC4_40
- PKCS12_RC4_128
- PKCS12_DES_56
- PKCS12_DES_EDE3_168
"""
pass
def pkcs12_export(nickname, pkcs12_password, key_cipher=None, cert_cipher=None, pin_args=None): # real signature unknown; restored from __doc__
"""
pkcs12_export(nickname, pkcs12_password, key_cipher=SEC_OID_PKCS12_V2_PBE_WITH_SHA1_AND_3KEY_TRIPLE_DES_CBC, cert_cipher=SEC_OID_PKCS12_V2_PBE_WITH_SHA1_AND_40_BIT_RC2_CBC, pin_args=None)
:Parameters:
nickname : string
Certificate nickname to search for.
pkcs12_password : string
The password used to protect the pkcs12_file.
key_cipher : int
A SEC OID TAG enumerated constant selecting the
encryption for the private key (see below).
Also see `nss.pkcs12_map_cipher()` for an alternative
method to select the encryption cipher.
cert_cipher : int
A SEC OID TAG enumerated constant selecting the
encryption for the certificates (see below).
Also see `nss.pkcs12_map_cipher()` for an alternative
method to select the encryption cipher.
pin_args : tuple
Extra parameters which will
be passed to the password callback function.
pkcs12_export() is used to export a certificate and private key pair
from the NSS database in a protected manner. It produces the binary
content of what is typically called a .p12 file (e.g. PKCS12). This
function does not write the file, if you want to write a .p12 file
you must write it's output to a file, for example:
::
pkcs12_data = nss.pkcs12_export(nickname, pkcs12_file_password)
f = open(p12_file_path, 'w')
f.write(pkcs12_data)
f.close()
Password Based Encryption
-------------------------
PKCS #12 provides for not only the protection of the private keys but
also the certificate and meta-data associated with the keys. Password
based encryption is used to protect private keys (i.e. key_cipher) on
export to a PKCS #12 file and also the entire package when allowed
(i.e. cert_cipher). If no algorithm is specified it defaults to using
'PKCS #12 V2 PBE With SHA-1 And 3KEY Triple DES-CBC' for private key
encryption. For historical export control reasons 'PKCS #12 V2 PBE
With SHA-1 And 40 Bit RC2 CBC' is the default for the overall package
encryption when not in FIPS mode and no package encryption when in
FIPS mode. The private key is always protected with strong encryption
by default.
A list of ciphers follows, the term is the SEC OID TAG followd by a
friendly description.
* symmetric CBC ciphers for PKCS #5 V2:
SEC_OID_DES_CBC
DES-CBC.
SEC_OID_RC2_CBC
RC2-CBC.
SEC_OID_RC5_CBC_PAD
RC5-CBCPad.
SEC_OID_DES_EDE3_CBC
DES-EDE3-CBC.
SEC_OID_AES_128_CBC
AES-128-CBC.
SEC_OID_AES_192_CBC
AES-192-CBC.
SEC_OID_AES_256_CBC
AES-256-CBC.
SEC_OID_CAMELLIA_128_CBC
CAMELLIA-128-CBC.
SEC_OID_CAMELLIA_192_CBC
CAMELLIA-192-CBC.
SEC_OID_CAMELLIA_256_CBC
CAMELLIA-256-CBC.
* PKCS #12 PBE Ciphers:
SEC_OID_PKCS12_PBE_WITH_SHA1_AND_128_BIT_RC4
PKCS #12 PBE With SHA-1 and 128 Bit RC4.
SEC_OID_PKCS12_PBE_WITH_SHA1_AND_40_BIT_RC4
PKCS #12 PBE With SHA-1 and 40 Bit RC4.
SEC_OID_PKCS12_PBE_WITH_SHA1_AND_TRIPLE_DES_CBC
PKCS #12 PBE With SHA-1 and Triple DES-CBC.
SEC_OID_PKCS12_PBE_WITH_SHA1_AND_128_BIT_RC2_CBC
PKCS #12 PBE With SHA-1 and 128 Bit RC2 CBC.
SEC_OID_PKCS12_PBE_WITH_SHA1_AND_40_BIT_RC2_CBC
PKCS #12 PBE With SHA-1 and 40 Bit RC2 CBC.
SEC_OID_PKCS12_V2_PBE_WITH_SHA1_AND_128_BIT_RC4
PKCS #12 V2 PBE With SHA-1 And 128 Bit RC4.
SEC_OID_PKCS12_V2_PBE_WITH_SHA1_AND_40_BIT_RC4
PKCS #12 V2 PBE With SHA-1 And 40 Bit RC4.
SEC_OID_PKCS12_V2_PBE_WITH_SHA1_AND_3KEY_TRIPLE_DES_CBC
PKCS #12 V2 PBE With SHA-1 And 3KEY Triple DES-CBC.
SEC_OID_PKCS12_V2_PBE_WITH_SHA1_AND_2KEY_TRIPLE_DES_CBC
PKCS #12 V2 PBE With SHA-1 And 2KEY Triple DES-CBC.
SEC_OID_PKCS12_V2_PBE_WITH_SHA1_AND_128_BIT_RC2_CBC
PKCS #12 V2 PBE With SHA-1 And 128 Bit RC2 CBC.
SEC_OID_PKCS12_V2_PBE_WITH_SHA1_AND_40_BIT_RC2_CBC
PKCS #12 V2 PBE With SHA-1 And 40 Bit RC2 CBC.
* PKCS #5 PBE Ciphers:
SEC_OID_PKCS5_PBE_WITH_MD2_AND_DES_CBC
PKCS #5 Password Based Encryption with MD2 and DES-CBC.
SEC_OID_PKCS5_PBE_WITH_MD5_AND_DES_CBC
PKCS #5 Password Based Encryption with MD5 and DES-CBC.
SEC_OID_PKCS5_PBE_WITH_SHA1_AND_DES_CBC
PKCS #5 Password Based Encryption with SHA-1 and DES-CBC.
"""
pass
def pkcs12_map_cipher(cipher, key_length=0): # real signature unknown; restored from __doc__
"""
pkcs12_map_cipher(cipher, key_length=0) -> int
:Parameters:
cipher : may be one of integer, string or SecItem
May be one of:
* integer:: A SEC OID enumeration constant, also known as a tag
(i.e. SEC_OID_*) for example SEC_OID_DES_EDE3_CBC.
* string:: A string for the tag name
(e.g. 'SEC_OID_DES_EDE3_CBC') The 'SEC_OID\_' prefix is
optional. A string in dotted decimal representation, for
example 'OID.2.5.4.3'. The 'OID.' prefix is optional. Case
is not significant.
* SecItem:: A SecItem object encapsulating the OID in
DER format.
key_length : int
The number of bits in the key. If zero a default will be selected.
Given an cipher and optionally a key length, map that to a PKCS12 encryption
method returned as a SEC_OID tag.
"""
return 0
def pkcs12_set_nickname_collision_callback(callback): # real signature unknown; restored from __doc__
"""
pkcs12_set_nickname_collision_callback(callback)
:Parameters:
callback : function pointer
The callback function
When importing a certificate via a `PKCS12Decoder` object and the
nickname is not set or collides with an existing nickname in the NSS
database then this callback is invoked to resolve the problem. If no
nickname collision callback has been set then an internal default
callback will be used instead which calls the NSS function CERT_MakeCANickname
(available in the Python binding as `Certificate.make_ca_nickname()`).
The callback has the signature::
nickname_collision_callback(old_nickname, cert) --> new_nickname, cancel
old_nickname
the preious nickname or None if previous did not exist
cert
the `Certificate` object being imported.
The callback returns 2 values, the new nickname, and a boolean.
new_nickname
The new nickname to try or None
cancel
boolean indicating if collision resolution should be cancelled
"""
pass
def pkcs12_set_preferred_cipher(cipher, enabled): # real signature unknown; restored from __doc__
"""
pkcs12_set_preferred_cipher(cipher, enabled)
:Parameters:
cipher : integer
The PKCS12 cipher suite enumeration (e.g. `PKCS12_DES_EDE3_168`, etc.)
enabled : bool or int
True enables, False disables
This function enables or disables the preferred flag on a
PKCS cipher. The default preferred cipher is `PKCS12_RC2_CBC_40`.
The cipher may be one of:
- `PKCS12_RC2_CBC_40`
- `PKCS12_RC2_CBC_128`
- `PKCS12_RC4_40`
- `PKCS12_RC4_128`
- `PKCS12_DES_56`
- `PKCS12_DES_EDE3_168`
"""
pass
def pub_wrap_sym_key(mechanism, pub_key, sym_key): # real signature unknown; restored from __doc__
"""
pub_wrap_sym_key(mechanism, pub_key, sym_key) -> SecItem
:Parameters:
mechanism : int
CK_MECHANISM_TYPE enumerated constant
pub_key : `PublicKey` object
Public key used to wrap.
sym_key : `PK11SymKey` object
Symmetric key that will be wrapped.
:returns:
Wrapped symmetric key as SecItem
Wraps a public key wrap (which only RSA can do).
"""
return SecItem
def read_der_from_file(file, ascii=False): # real signature unknown; restored from __doc__
"""
read_der_from_file(file, ascii=False) -> SecItem
:Parameters:
file : file name or file object
If string treat as file path to open and read,
if file object read from file object.
ascii : boolean
If True treat file contents as ascii data.
If PEM delimiters are found strip them.
Then base64 decode the contents.
Read the contents of a file and return as a SecItem object.
If file is a string then treat it as a file pathname and open
and read the contents of that file. If file is a file object
then read the contents from the file object
If the file contents begin with a PEM header then treat the
the file as PEM encoded and decode the payload into DER form.
Otherwise the file contents is assumed to already be in DER form.
The returned SecItem contains the DER contents of the file.
"""
return SecItem
def read_hex(input, separators=None): # real signature unknown; restored from __doc__
"""
read_hex(input, separators=" ,:\t\n") -> buffer
:Parameters:
input : string
string containing hexadecimal data
separators : string or None
string containing set of separator characters
Any character encountered during parsing which is in
this string will be skipped and considered a separator
between pairs of hexadecimal characters.
Parse a string containing hexadecimal data and return a buffer
object containing the binary octets. Each octet in the string is
represented as a pair of case insensitive hexadecimal characters
(0123456789abcdef). Each octet must be a pair of
characters. Octets may optionally be preceded by 0x or 0X. Octets
may be separated by separator characters specified in the
separators string. The separators string is a set of
characters. Any character in the separators character set will be
ignored when it occurs between octets. If no separators should be
considered then pass an empty string.
Using the default separators each of these strings is valid input
representing the same 8 octet sequence:
01, 23, 45, 67, 89, ab, cd, ef
01, 23, 45, 67, 89, AB, CD, EF
0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef
01:23:45:67:89:ab:cd:ef
0123456789abcdef
01 23 45 67 89 ab cd ef
0x010x230x450x670x890xab0xcd0xef
"""
return buffer(*(), **{})
def set_password_callback(callback): # real signature unknown; restored from __doc__
"""
set_password_callback(callback)
:Parameters:
callback : function pointer
The callback function
The callback has the signature::
password_callback(slot, retry, [user_data1, ...])
slot
PK11Slot object
retry
boolean indicating if this is a retry
user_dataN
zero or more caller supplied optional parameters
"""
pass
def sha1_digest(data): # real signature unknown; restored from __doc__
"""
sha1_digest(data) --> digest
:Parameters:
data : buffer or string
buffer the digest will be computed for
Returns 20 octet SHA1 digest data as buffer object.
Note, if a hexidecimal string representation is desired then pass
result to data_to_hex()
"""
pass
def sha256_digest(data): # real signature unknown; restored from __doc__
"""
sha256_digest(data) --> digest
:Parameters:
data : buffer or string
buffer the digest will be computed for
Returns 32 octet SHA256 digest data as buffer object.
Note, if a hexidecimal string representation is desired then pass
result to data_to_hex()
"""
pass
def sha512_digest(data): # real signature unknown; restored from __doc__
"""
sha512_digest(data) --> digest
:Parameters:
data : buffer or string
buffer the digest will be computed for
Returns 64 octet SHA512 digest data as buffer object.
Note, if a hexidecimal string representation is desired then pass
result to data_to_hex()
"""
pass
def token_exists(mechanism): # real signature unknown; restored from __doc__
"""
pk11_token_exists(mechanism) -> bool
:Parameters:
mechanism : int
key mechanism enumeration constant (CKM_*)
Return True if a token is available which can perform
the desired mechanism, False otherwise.
"""
return False
def x509_alt_name(sec_item, repr_kind=None): # real signature unknown; restored from __doc__
"""
x509_alt_name(sec_item, repr_kind=AsString) -> (SecItem, ...)
:Parameters:
sec_item : SecItem object
A SecItem containing a DER encoded alternative name extension.
repr_kind : RepresentationKind constant
Specifies what the contents of the returned tuple will be.
May be one of:
AsObject
The general name as a nss.GeneralName object
AsString
The general name as a string.
(e.g. "http://crl.geotrust.com/crls/secureca.crl")
AsTypeString
The general name type as a string.
(e.g. "URI")
AsTypeEnum
The general name type as a general name type enumerated constant.
(e.g. nss.certURI )
AsLabeledString
The general name as a string with it's type prepended.
(e.g. "URI: http://crl.geotrust.com/crls/secureca.crl"
Return a tuple of GeneralNames according the representation kind.
"""
pass
def x509_ext_key_usage(sec_item, repr_kind=None): # real signature unknown; restored from __doc__
"""
x509_ext_key_usage(sec_item, repr_kind=AsString) -> (obj, ...)
:Parameters:
sec_item : SecItem object
A SecItem containing a DER encoded sequence of OID's
repr_kind : RepresentationKind constant
Specifies what the contents of the returned tuple will be.
May be one of:
AsObject
Each extended key usage will be a SecItem object embedding
the OID in DER format.
AsString
Each extended key usage will be a descriptive string.
(e.g. "TLS Web Server Authentication Certificate")
AsDottedDecimal
Each extended key usage will be OID rendered as a dotted decimal string.
(e.g. "OID.1.3.6.1.5.5.7.3.1")
AsEnum
Each extended key usage will be OID tag enumeration constant (int).
(e.g. nss.SEC_OID_EXT_KEY_USAGE_SERVER_AUTH)
Return a tuple of OID's according the representation kind.
"""
pass
def x509_key_usage(bitstr, repr_kind=None): # real signature unknown; restored from __doc__
"""
x509_key_usage(bitstr, repr_kind=AsEnumDescription) -> (str, ...)
:Parameters:
bitstr : SecItem object
A SecItem containing a DER encoded bit string.
repr_kind : RepresentationKind constant
Specifies what the contents of the returned tuple will be.
May be one of:
AsEnum
The enumerated constant.
(e.g. nss.KU_DIGITAL_SIGNATURE)
AsEnumDescription
A friendly human readable description of the enumerated constant as a string.
(e.g. "Digital Signature")
AsIndex
The bit position within the bit string.
Return a tuple of string name for each enabled bit in the key
usage bit string.
"""
pass
# classes
from AlgorithmID import AlgorithmID
from AuthKeyID import AuthKeyID
from AVA import AVA
from BasicConstraints import BasicConstraints
from CertDB import CertDB
from Certificate import Certificate
from CertificateExtension import CertificateExtension
from CertificateRequest import CertificateRequest
from CRLDistributionPoint import CRLDistributionPoint
from CRLDistributionPts import CRLDistributionPts
from DN import DN
from DSAPublicKey import DSAPublicKey
from GeneralName import GeneralName
from InitContext import InitContext
from InitParameters import InitParameters
from KEYPQGParams import KEYPQGParams
from PK11Context import PK11Context
from PK11Slot import PK11Slot
from PK11SymKey import PK11SymKey
from PKCS12DecodeItem import PKCS12DecodeItem
from PKCS12Decoder import PKCS12Decoder
from PrivateKey import PrivateKey
from PublicKey import PublicKey
from RDN import RDN
from RSAGenParams import RSAGenParams
from RSAPublicKey import RSAPublicKey
from SecItem import SecItem
from SignedCRL import SignedCRL
from SignedData import SignedData
from SubjectPublicKeyInfo import SubjectPublicKeyInfo
# variables with complex values
_C_API = None # (!) real value is ''
| [
"pkalita@princeton.edu"
] | pkalita@princeton.edu |
8a733a8f5dad298b7398c3bfd2c77b3d3aaef9c9 | c50e7eb190802d7849c0d0cea02fb4d2f0021777 | /src/virtual-wan/azext_vwan/vendored_sdks/v2020_05_01/v2020_05_01/aio/operations/_network_watchers_operations.py | be53b3760e1ebc7fc1906132d816e0b86f9aa4f4 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Azure/azure-cli-extensions | c1615b19930bba7166c282918f166cd40ff6609c | b8c2cf97e991adf0c0a207d810316b8f4686dc29 | refs/heads/main | 2023-08-24T12:40:15.528432 | 2023-08-24T09:17:25 | 2023-08-24T09:17:25 | 106,580,024 | 336 | 1,226 | MIT | 2023-09-14T10:48:57 | 2017-10-11T16:27:31 | Python | UTF-8 | Python | false | false | 104,916 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NetworkWatchersOperations:
"""NetworkWatchersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def create_or_update(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.NetworkWatcher",
**kwargs
) -> "_models.NetworkWatcher":
"""Creates or updates a network watcher in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters that define the network watcher resource.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.NetworkWatcher
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkWatcher, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_05_01.models.NetworkWatcher
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkWatcher"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NetworkWatcher')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NetworkWatcher', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkWatcher', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'} # type: ignore
async def get(
self,
resource_group_name: str,
network_watcher_name: str,
**kwargs
) -> "_models.NetworkWatcher":
"""Gets the specified network watcher by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkWatcher, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_05_01.models.NetworkWatcher
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkWatcher"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkWatcher', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
network_watcher_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
network_watcher_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified network watcher resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.TagsObject",
**kwargs
) -> "_models.NetworkWatcher":
"""Updates a network watcher tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters supplied to update network watcher tags.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkWatcher, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_05_01.models.NetworkWatcher
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkWatcher"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkWatcher', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.NetworkWatcherListResult"]:
"""Gets all network watchers by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkWatcherListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_05_01.models.NetworkWatcherListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkWatcherListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkWatcherListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers'} # type: ignore
def list_all(
self,
**kwargs
) -> AsyncIterable["_models.NetworkWatcherListResult"]:
"""Gets all network watchers by subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkWatcherListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_05_01.models.NetworkWatcherListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkWatcherListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkWatcherListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkWatchers'} # type: ignore
async def get_topology(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.TopologyParameters",
**kwargs
) -> "_models.Topology":
"""Gets the current network topology by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters that define the representation of topology.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.TopologyParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Topology, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_05_01.models.Topology
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Topology"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.get_topology.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TopologyParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Topology', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_topology.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/topology'} # type: ignore
async def _verify_ip_flow_initial(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.VerificationIPFlowParameters",
**kwargs
) -> "_models.VerificationIPFlowResult":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VerificationIPFlowResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._verify_ip_flow_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VerificationIPFlowParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VerificationIPFlowResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('VerificationIPFlowResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_verify_ip_flow_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/ipFlowVerify'} # type: ignore
async def begin_verify_ip_flow(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.VerificationIPFlowParameters",
**kwargs
) -> AsyncLROPoller["_models.VerificationIPFlowResult"]:
"""Verify IP flow from the specified VM to a location given the currently configured NSG rules.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters that define the IP flow to be verified.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.VerificationIPFlowParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VerificationIPFlowResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_05_01.models.VerificationIPFlowResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VerificationIPFlowResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._verify_ip_flow_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VerificationIPFlowResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_verify_ip_flow.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/ipFlowVerify'} # type: ignore
async def _get_next_hop_initial(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.NextHopParameters",
**kwargs
) -> "_models.NextHopResult":
cls = kwargs.pop('cls', None) # type: ClsType["_models.NextHopResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._get_next_hop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NextHopParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NextHopResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('NextHopResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_next_hop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/nextHop'} # type: ignore
async def begin_get_next_hop(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.NextHopParameters",
**kwargs
) -> AsyncLROPoller["_models.NextHopResult"]:
"""Gets the next hop from the specified VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters that define the source and destination endpoint.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.NextHopParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either NextHopResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_05_01.models.NextHopResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NextHopResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._get_next_hop_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NextHopResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_next_hop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/nextHop'} # type: ignore
async def _get_vm_security_rules_initial(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.SecurityGroupViewParameters",
**kwargs
) -> "_models.SecurityGroupViewResult":
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityGroupViewResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._get_vm_security_rules_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'SecurityGroupViewParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('SecurityGroupViewResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('SecurityGroupViewResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_vm_security_rules_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/securityGroupView'} # type: ignore
async def begin_get_vm_security_rules(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.SecurityGroupViewParameters",
**kwargs
) -> AsyncLROPoller["_models.SecurityGroupViewResult"]:
"""Gets the configured and effective security group rules on the specified VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters that define the VM to check security groups for.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.SecurityGroupViewParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SecurityGroupViewResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_05_01.models.SecurityGroupViewResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityGroupViewResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._get_vm_security_rules_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('SecurityGroupViewResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_vm_security_rules.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/securityGroupView'} # type: ignore
async def _get_troubleshooting_initial(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.TroubleshootingParameters",
**kwargs
) -> "_models.TroubleshootingResult":
cls = kwargs.pop('cls', None) # type: ClsType["_models.TroubleshootingResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._get_troubleshooting_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TroubleshootingParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('TroubleshootingResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('TroubleshootingResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_troubleshooting_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/troubleshoot'} # type: ignore
async def begin_get_troubleshooting(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.TroubleshootingParameters",
**kwargs
) -> AsyncLROPoller["_models.TroubleshootingResult"]:
"""Initiate troubleshooting on a specified resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that define the resource to troubleshoot.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.TroubleshootingParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either TroubleshootingResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_05_01.models.TroubleshootingResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.TroubleshootingResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._get_troubleshooting_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('TroubleshootingResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_troubleshooting.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/troubleshoot'} # type: ignore
async def _get_troubleshooting_result_initial(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.QueryTroubleshootingParameters",
**kwargs
) -> "_models.TroubleshootingResult":
cls = kwargs.pop('cls', None) # type: ClsType["_models.TroubleshootingResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._get_troubleshooting_result_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'QueryTroubleshootingParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('TroubleshootingResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('TroubleshootingResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_troubleshooting_result_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/queryTroubleshootResult'} # type: ignore
async def begin_get_troubleshooting_result(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.QueryTroubleshootingParameters",
**kwargs
) -> AsyncLROPoller["_models.TroubleshootingResult"]:
"""Get the last completed troubleshooting result on a specified resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that define the resource to query the troubleshooting result.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.QueryTroubleshootingParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either TroubleshootingResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_05_01.models.TroubleshootingResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.TroubleshootingResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._get_troubleshooting_result_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('TroubleshootingResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_troubleshooting_result.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/queryTroubleshootResult'} # type: ignore
async def _set_flow_log_configuration_initial(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.FlowLogInformation",
**kwargs
) -> "_models.FlowLogInformation":
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowLogInformation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._set_flow_log_configuration_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'FlowLogInformation')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('FlowLogInformation', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('FlowLogInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_set_flow_log_configuration_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/configureFlowLog'} # type: ignore
async def begin_set_flow_log_configuration(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.FlowLogInformation",
**kwargs
) -> AsyncLROPoller["_models.FlowLogInformation"]:
"""Configures flow log and traffic analytics (optional) on a specified resource.
:param resource_group_name: The name of the network watcher resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that define the configuration of flow log.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.FlowLogInformation
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either FlowLogInformation or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_05_01.models.FlowLogInformation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowLogInformation"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._set_flow_log_configuration_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('FlowLogInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_set_flow_log_configuration.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/configureFlowLog'} # type: ignore
async def _get_flow_log_status_initial(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.FlowLogStatusParameters",
**kwargs
) -> "_models.FlowLogInformation":
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowLogInformation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._get_flow_log_status_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'FlowLogStatusParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('FlowLogInformation', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('FlowLogInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_flow_log_status_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/queryFlowLogStatus'} # type: ignore
async def begin_get_flow_log_status(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.FlowLogStatusParameters",
**kwargs
) -> AsyncLROPoller["_models.FlowLogInformation"]:
"""Queries status of flow log and traffic analytics (optional) on a specified resource.
:param resource_group_name: The name of the network watcher resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that define a resource to query flow log and traffic analytics
(optional) status.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.FlowLogStatusParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either FlowLogInformation or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_05_01.models.FlowLogInformation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowLogInformation"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._get_flow_log_status_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('FlowLogInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_flow_log_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/queryFlowLogStatus'} # type: ignore
async def _check_connectivity_initial(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.ConnectivityParameters",
**kwargs
) -> "_models.ConnectivityInformation":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectivityInformation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._check_connectivity_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ConnectivityParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConnectivityInformation', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('ConnectivityInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_check_connectivity_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectivityCheck'} # type: ignore
async def begin_check_connectivity(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.ConnectivityParameters",
**kwargs
) -> AsyncLROPoller["_models.ConnectivityInformation"]:
"""Verifies the possibility of establishing a direct TCP connection from a virtual machine to a
given endpoint including another VM or an arbitrary remote server.
:param resource_group_name: The name of the network watcher resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that determine how the connectivity check will be performed.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.ConnectivityParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ConnectivityInformation or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_05_01.models.ConnectivityInformation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectivityInformation"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._check_connectivity_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ConnectivityInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_check_connectivity.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectivityCheck'} # type: ignore
async def _get_azure_reachability_report_initial(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.AzureReachabilityReportParameters",
**kwargs
) -> "_models.AzureReachabilityReport":
cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureReachabilityReport"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._get_azure_reachability_report_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'AzureReachabilityReportParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AzureReachabilityReport', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('AzureReachabilityReport', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_azure_reachability_report_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/azureReachabilityReport'} # type: ignore
async def begin_get_azure_reachability_report(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.AzureReachabilityReportParameters",
**kwargs
) -> AsyncLROPoller["_models.AzureReachabilityReport"]:
"""NOTE: This feature is currently in preview and still being tested for stability. Gets the
relative latency score for internet service providers from a specified location to Azure
regions.
:param resource_group_name: The name of the network watcher resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that determine Azure reachability report configuration.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.AzureReachabilityReportParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AzureReachabilityReport or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_05_01.models.AzureReachabilityReport]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureReachabilityReport"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._get_azure_reachability_report_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('AzureReachabilityReport', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_azure_reachability_report.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/azureReachabilityReport'} # type: ignore
async def _list_available_providers_initial(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.AvailableProvidersListParameters",
**kwargs
) -> "_models.AvailableProvidersList":
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailableProvidersList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._list_available_providers_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'AvailableProvidersListParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AvailableProvidersList', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('AvailableProvidersList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_available_providers_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/availableProvidersList'} # type: ignore
async def begin_list_available_providers(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.AvailableProvidersListParameters",
**kwargs
) -> AsyncLROPoller["_models.AvailableProvidersList"]:
"""NOTE: This feature is currently in preview and still being tested for stability. Lists all
available internet service providers for a specified Azure region.
:param resource_group_name: The name of the network watcher resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that scope the list of available providers.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.AvailableProvidersListParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AvailableProvidersList or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_05_01.models.AvailableProvidersList]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailableProvidersList"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._list_available_providers_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('AvailableProvidersList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_available_providers.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/availableProvidersList'} # type: ignore
async def _get_network_configuration_diagnostic_initial(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.NetworkConfigurationDiagnosticParameters",
**kwargs
) -> "_models.NetworkConfigurationDiagnosticResponse":
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkConfigurationDiagnosticResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._get_network_configuration_diagnostic_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NetworkConfigurationDiagnosticParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NetworkConfigurationDiagnosticResponse', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('NetworkConfigurationDiagnosticResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_network_configuration_diagnostic_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/networkConfigurationDiagnostic'} # type: ignore
async def begin_get_network_configuration_diagnostic(
self,
resource_group_name: str,
network_watcher_name: str,
parameters: "_models.NetworkConfigurationDiagnosticParameters",
**kwargs
) -> AsyncLROPoller["_models.NetworkConfigurationDiagnosticResponse"]:
"""Gets Network Configuration Diagnostic data to help customers understand and debug network
behavior. It provides detailed information on what security rules were applied to a specified
traffic flow and the result of evaluating these rules. Customers must provide details of a flow
like source, destination, protocol, etc. The API returns whether traffic was allowed or denied,
the rules evaluated for the specified flow and the evaluation results.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters to get network configuration diagnostic.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.NetworkConfigurationDiagnosticParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either NetworkConfigurationDiagnosticResponse or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_05_01.models.NetworkConfigurationDiagnosticResponse]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkConfigurationDiagnosticResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._get_network_configuration_diagnostic_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkConfigurationDiagnosticResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_network_configuration_diagnostic.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/networkConfigurationDiagnostic'} # type: ignore
| [
"noreply@github.com"
] | Azure.noreply@github.com |
d6891e0277a475c88b44289a0aa0412e9a07c473 | ce661026009d622db924080d85ab529f1cae6b60 | /projecteuler.net/wip,74.py | 224eff7255f3ca4eefe694ca10860e3677307103 | [] | no_license | predavlad/projecteuler | d54f5d85ab0133b19b54b4168990b90f09a0184c | 58e1637733bb7e01e44bfac707353ecfe84d9b19 | refs/heads/master | 2021-01-23T15:29:26.257019 | 2019-02-09T10:11:23 | 2019-02-09T10:11:23 | 12,952,194 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,554 | py | import time
# 170 seconds, although it can be improved if we count all permutations of a number at the same time, instead of
# going through everything
start_time = time.time()
fact_cache = {}
dig_fact_cache = {}
loop_cache = {}
def fact(n):
global fact_cache
if n in [0, 1]:
return 1
if n not in fact_cache:
fact_cache[n] = n * fact(n - 1)
return fact_cache[n]
def digit_fact_sum(n):
global dig_fact_cache
if n in dig_fact_cache:
return dig_fact_cache[n]
return sum(map(fact, map(int, str(n))))
def get_loop_count(n):
global loop_cache
orig_n = n
is_loop = False
chain = {}
while not is_loop:
new_n = digit_fact_sum(n)
chain[n], n = new_n, new_n
if new_n in chain:
is_loop = True
chain_len = len(chain)
current = orig_n
while current != new_n:
loop_cache[current] = chain_len
current = chain[current]
chain_len -= 1
for i in range(chain_len):
loop_cache[current] = chain_len
current = chain[current]
return len(chain)
assert get_loop_count(69) == 5
assert get_loop_count(145) == 1
assert get_loop_count(540) == 2
assert get_loop_count(78) == 4
assert get_loop_count(1479) == 60
LIMIT = 10 ** 3
COUNT = 60
counter = 0
for i in xrange(LIMIT):
if i not in loop_cache:
nr = get_loop_count(i)
if loop_cache[i] == COUNT:
counter += 1
if counter % 10 == 0:
print counter, i
print counter
print time.time() - start_time, "seconds"
| [
"preda.vlad@yahoo.com"
] | preda.vlad@yahoo.com |
812662b6d90892d254a75762deed638a32163b4f | ffadf9541d01cf9af20c419759d48b1eb01bfd35 | /pachong/PCdemo1/day16/数据格式转存.py | 346d8cb0bafaacbd1e96a7fe95ccb5ff16e63996 | [] | no_license | 1987617587/lsh_py | b1bb1016eaafcba03bbc4a5310c1db04ae227af4 | 80eb5175cd0e5b3c6c5e2ebb906bb78d9a8f9e0d | refs/heads/master | 2021-01-02T05:14:31.330287 | 2020-06-20T05:18:23 | 2020-06-20T05:18:23 | 239,498,994 | 2 | 1 | null | 2020-06-07T23:09:56 | 2020-02-10T11:46:47 | Python | UTF-8 | Python | false | false | 3,073 | py | # author:lsh
# datetime:2020/4/14 14:39
'''
.::::. _oo0oo_
.::::::::. o8888888o
::::::::::: 88" . "88
..:::::::::::' (| -_- |)
'::::::::::::' 0\ = /0
.:::::::::: ___/`---'\___
'::::::::::::::.. .' \\| |# '.
..::::::::::::. / \\||| : |||# \
``:::::::::::::::: / _||||| -:- |||||- \
::::``:::::::::' .:::. | | \\\ - #/ | |
::::' ':::::' .::::::::. | \_| ''\---/'' |_/ |
.::::' :::: .:::::::'::::. \ .-\__ '-' ___/-. /
.:::' ::::: .:::::::::' ':::::. ___'. .' /--.--\ `. .'___
.::' :::::.:::::::::' ':::::. ."" '< `.___\_<|>_/___.' >' "".
.::' ::::::::::::::' ``::::. | | : `- \`.;`\ _ /`;.`/ - ` : | |
...::: ::::::::::::' ``::. \ \ `_. \_ __\ /__ _/ .-` / /
```` ':. ':::::::::' ::::.. `-.____`.___ \_____/___.-`___.-'
'.:::::' ':'````.. `=---='
女神保佑 永无BUG 佛祖保佑 永无BUG
'''
import pandas as pd
import csv
import codecs
import json
# csv===> excel
# df = pd.read_csv('./data/a.csv',encoding='utf-8')
# df.to_excel('./data/a.xlsx',sheet_name='csv转excel')
# excel ===> csv
# df_xls = pd.read_excel('./data/a.xlsx',index_col=0)
# df_xls.to_csv('./data/b.csv',encoding='utf-8')
# csv ===>json
df_csv = pd.read_csv('./data/a.csv',encoding='utf-8')
count = df_csv.shape[0] # 获取行数
with open('./data/b.json','w',encoding='utf-8') as file:
for i in range(count):
d = {
'0':df_csv.iloc[i,0],
'1':df_csv.iloc[i,1],
'2':df_csv.iloc[i,2],
}
file.write(json.dumps(d)+'\n')
# json ===>csv
with open('./data/a.json','w',encoding='utf-8') as file1:
with open('./data/c.csv', 'w', encoding='utf-8') as file2:
wr = csv.writer(file2)
wr.writerow(['1','2','3'])
line = file1.readline()
while line:
d = json.loads(line)
wr.writerow([d['1'],d['2'],d[3]]) | [
"1987617587@qq.com"
] | 1987617587@qq.com |
d8e589c781a6a06fefb67ae1e521c503eb350e17 | 9de64f94ffe3b57de373bebdd5344d0d4e725a9c | /lib/models/__init__.py | 2c735037871bae9eb7ff1fda8bc6d6c6e4333962 | [
"MIT"
] | permissive | CoinCheung/BiSeNet | 1196ed0463d067e8b145b716aae39cf1963f4ffb | f2b901599752ce50656d2e50908acecd06f7eb47 | refs/heads/master | 2023-02-17T08:45:51.374875 | 2023-02-05T03:08:32 | 2023-02-05T03:08:32 | 159,607,436 | 1,312 | 322 | MIT | 2023-02-05T03:08:33 | 2018-11-29T04:27:51 | Python | UTF-8 | Python | false | false | 146 | py |
from .bisenetv1 import BiSeNetV1
from .bisenetv2 import BiSeNetV2
model_factory = {
'bisenetv1': BiSeNetV1,
'bisenetv2': BiSeNetV2,
}
| [
"867153576@qq.com"
] | 867153576@qq.com |
97f074169dcb14b39a6e9f309e209e8ebe7d2140 | e1f5cf7055e54f24f4bea5b1232f337fcbcae63c | /regex/solution/regex_html.py | 410dd5e6e7c177e7143be8c9fcf65f1daa3c06ab | [
"MIT"
] | permissive | revirevy/book-python | fcd64d44840b68e528422de785383f9d4a81fb98 | 9da7bfd43117f33530e708e889c26152dc8c7a25 | refs/heads/master | 2020-04-01T05:13:50.394724 | 2018-10-13T14:58:19 | 2018-10-13T14:58:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,459 | py | import re
TEXT = """
<html><body><bgsound assignment="jfktalk.wav" loop="2"><p></p><center><h3>John F. Kennedy Moon Speech - Rice Stadium</h3><img assignment="jfkrice.jpg"><h3>September 12, 1962</h3></center><p></p><hr><p></p><center>Movie clips of JFK speaking at Rice University: <a href="JFKatRice.mov">(.mov)</a> or <a href="jfkrice.avi">(.avi)</a> (833K)</center><p><a href="jfkru56k.asf">See and hear</a> the entire speech for 56K modem download [8.7 megabytes in a .asf movie format which requires Windows Media Player 7 (speech lasts about 33 minutes)].<br><a href="jfkru100.asf">See and hear</a> the entire speech for higher speed access [25.3 megabytes in .asf movie format which requires Windows Media Player 7].<br><a href="jfkslide.asf">See and hear</a> a five minute audio version of the speech with accompanying slides and music. This is a most inspirational presentation of, perhaps, the most famous space speech ever given. The file is a streaming video Windows Media Player 7 format. [11 megabytes in .asf movie format which requires Windows Media Player 7]. <br><a href="jfk_rice_speech.mpg">See and hear</a> the 17 minute 48 second speech in the .mpg format. This is a very large file of 189 megabytes and only suggested for those with DSL, ASDL, or cable modem access as the download time on a 28.8K or 56K modem would be many hours duration. </p><p></p><hr><p></p><center><h4>TEXT OF PRESIDENT JOHN KENNEDY'S RICE STADIUM MOON SPEECH</h4></center><p>President Pitzer, Mr. Vice President, Governor, CongressmanThomas, Senator Wiley, and Congressman Miller, Mr. Webb, Mr.Bell, scientists, distinguished guests, and ladies and gentlemen:</p><p>We choose to go to the moon. We choose to go to the moon in this decade and do the other things, not because they areeasy, but because they are hard, because that goal will serve to organize and measure the best of our energies and skills,because that challenge is one that we are willing to accept, one we are unwilling to postpone, and one which we intend to win,and the others, too. </p><p>It is for these reasons that I regard the decision last year to shift our efforts in space from low to high gear as among the mostimportant decisions that will be made during my incumbency in the office of the Presidency. </p><p>In the last 24 hours we have seen facilities now being created for the greatest and most complex exploration in man's history.We have felt the ground shake and the air shattered by the testing of a Saturn C-1 booster rocket, many times as powerful asthe Atlas which launched John Glenn, generating power equivalent to 10,000 automobiles with their accelerators on the floor.We have seen the site where the F-1 rocket engines, each one as powerful as all eight engines of the Saturn combined, will beclustered together to make the advanced Saturn missile, assembled in a new building to be built at Cape Canaveral as tall as a48 story structure, as wide as a city block, and as long as two lengths of this field.</p><p></p><hr><p></p><center><a href="movies.html">Return to Space Movies Cinema</a></center></body></html>
"""
paragraphs = re.split(r'</?p>', TEXT)
for p in paragraphs:
if p.startswith('We choose'):
print(p)
paragraphs = re.findall(r'<p>(\w.*?)</p>', TEXT)
for p in paragraphs:
if p.startswith('We choose'):
print(p)
output = re.search(r'<p>(We choose .*?)</p>', TEXT)
output = re.sub('</?p>', '', output.group())
print(output)
| [
"matt@astrotech.io"
] | matt@astrotech.io |
91d121b6b72945f63cdeb6e0e53dff0100fbec44 | 4cc285b0c585241ff4404087e6fbb901195639be | /NeuralNetworkNumbers/venv/Lib/site-packages/sklearn/datasets/tests/test_covtype.py | bd775d75563d31b490373811c09ca4abba681004 | [] | no_license | strazhg/NeuralNetworksPython | 815542f4ddbb86e918e657f783158f8c078de514 | 15038e44a5a6c342336c119cdd2abdeffd84b5b1 | refs/heads/main | 2023-04-16T18:51:29.602644 | 2021-04-27T14:46:55 | 2021-04-27T14:46:55 | 361,944,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:f48c1479cc19ae778036630c6d97033d44099090499f8db51ddd327f13e7f9fd
size 1677
| [
"golubstrazh@gmail.com"
] | golubstrazh@gmail.com |
ece5d3da37a3165a695a7b77b2fc0daaf58c442c | de9b8b7192a0a81e9249823bb2b86f0b7e452863 | /.history/main_20171106230505.py | 0f73c604abb8fcbe3b81c435a1da27dfbf8b824e | [
"MIT"
] | permissive | reecebenson/uwe-dadsa-tennis-a | f5eaeb1b96d4e61f29279514e68eeea8ad6533db | d0763f819b300fcd0ce27041f5bc4ef0519c00bf | refs/heads/master | 2023-07-08T16:13:23.963348 | 2017-11-30T12:07:01 | 2017-11-30T12:07:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,096 | py | # DADSA - Assignment 1
# Reece Benson
import random
from classes import Menu as Menu
from classes import Handler as Handler
class App():
# Define the variables we will be using
debug = True
handler = None
# Define all of the properties we will need to use
def __init__(self):
# Load our handler
self.handler = Handler.Handler(self)
self.handler.load()
# Generate rounds
self.generate_rounds()
# Hold the program
self.exit()
# Generate our rounds from our player list
def generate_rounds(self):
# Let's generate our random rounds from scratch
round_data = { }
# Write our new data to memory
for seasonId in self.handler.get_seasons():
season = self.handler.get_season(seasonId)
players = season.players()
# Generate our rounds
for gender in players:
# Create our gendered rounds
if(not gender in round_data):
# Default Round Cap
roundCap = 3
# Do we have a Round Cap overrider for this gender?
if(gender + "_cap" in season.settings()):
roundCap = season.settings()[gender + "_cap"]
# Update our round data
round_data.update({ gender: [ { "_roundCap": roundCap } ] })
# Create our round data from players
rnd_players = random.sample(players[gender], len(players[gender]))
for i in range(int(len(rnd_players) / 2 )):
# Grab our versus players
playerOne = rnd_players[i * 2]
playerTwo = rnd_players[(i * 2) + 1]
round_data[gender].append({ playerOne.name(): 0, playerTwo.name(): 0 })
print(round_data)
# A method which exits the program after the user has pressed the Return key
def exit(self):
input(">>> Press <Return> to terminate the program")
exit()
App() | [
"me@reecebenson.me"
] | me@reecebenson.me |
1327bb0c4a5e509d0c7c62b0352b208928bf34fe | 4ff0ff57e0fee60caf90cf1a2319b7615858b5ff | /cw_hr_holidays_extended/reports/__init__.py | f81bbb1467b1c4b3122cfce1a6ad1b2c93ff037b | [] | no_license | akradore/ACC_12 | 257a590acfb1afc92122e46b6db0ccbfdb3969be | 5ed668bda8177586695f5dc2e68a48806eccf976 | refs/heads/master | 2023-03-17T08:53:58.822549 | 2020-02-24T12:32:05 | 2020-02-24T12:32:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | # -*- coding: utf-8 -*-
from . import holidays_summary_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | [
"arun01@mmproject.net"
] | arun01@mmproject.net |
b808d02359c77895488e059565589a1e8d9c1703 | 9734c93c86c982b1ce046340bac9e53645b261b8 | /tests/cli/helpers/codepage.py | 136905dc4ab533842e791e8dbbc543f7b954b2ed | [
"Apache-2.0"
] | permissive | log2timeline/plaso | cd72dd407d6c5627506c14f58cb8f6a6926aa808 | d6022f8cfebfddf2d08ab2d300a41b61f3349933 | refs/heads/main | 2023-09-02T08:43:48.241198 | 2023-08-19T07:28:12 | 2023-08-19T07:28:12 | 23,812,315 | 1,506 | 421 | Apache-2.0 | 2023-09-04T08:24:53 | 2014-09-08T23:29:28 | Python | UTF-8 | Python | false | false | 1,704 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the codepage CLI arguments helper."""
import argparse
import unittest
from plaso.cli import tools
from plaso.cli.helpers import codepage
from plaso.lib import errors
from tests.cli import test_lib as cli_test_lib
class CodepagergumentsHelperTest(cli_test_lib.CLIToolTestCase):
"""Tests for the codepage CLI arguments helper."""
# pylint: disable=no-member,protected-access
_EXPECTED_OUTPUT = """\
usage: cli_helper.py [--codepage CODEPAGE]
Test argument parser.
{0:s}:
--codepage CODEPAGE The preferred codepage, which is used for decoding
single-byte or multi-byte character extracted strings.
""".format(cli_test_lib.ARGPARSE_OPTIONS)
def testAddArguments(self):
"""Tests the AddArguments function."""
argument_parser = argparse.ArgumentParser(
prog='cli_helper.py', description='Test argument parser.',
add_help=False,
formatter_class=cli_test_lib.SortedArgumentsHelpFormatter)
codepage.CodepageArgumentsHelper.AddArguments(argument_parser)
output = self._RunArgparseFormatHelp(argument_parser)
self.assertEqual(output, self._EXPECTED_OUTPUT)
def testParseOptions(self):
"""Tests the ParseOptions function."""
options = cli_test_lib.TestOptions()
options.preferred_codepage = 'cp1252'
test_tool = tools.CLITool()
codepage.CodepageArgumentsHelper.ParseOptions(options, test_tool)
self.assertEqual(test_tool._preferred_codepage, options.preferred_codepage)
with self.assertRaises(errors.BadConfigObject):
codepage.CodepageArgumentsHelper.ParseOptions(options, None)
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | log2timeline.noreply@github.com |
eb98e85b9ab7e02cbf47d85a6b890c00abf277a8 | e3eead40e93fdf5186269536edefab4f08e9a5a2 | /LeetCode/161-one_edit_distance.py | d2fda9fcd5d0e610b12400ba057379ba56eaf180 | [] | no_license | davll/practical-algorithms | bbc930b42363cae00ce39e8a686854c19131d334 | 0e35e4cc87bd41144b8e34302aafe776fec1b356 | refs/heads/master | 2021-08-22T13:12:34.555074 | 2020-03-28T08:56:13 | 2020-03-28T08:56:13 | 147,224,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | def one_edit_distance(s1, s2):
n1, n2 = len(s1), len(s2)
l, r = 0, 0
while l < n1 and l < n2:
if s1[l] == s2[l]:
l += 1
else:
break
n1, n2 = n1-l, n2-l
while r < n1 and r < n2:
if s1[-1-r] == s2[-1-r]:
r += 1
else:
break
n1, n2 = n1-r, n2-r
return max(n1, n2) == 1
class Solution:
def isOneEditDistance(self, s: str, t: str) -> bool:
return one_edit_distance(s, t)
| [
"davll.xc@gmail.com"
] | davll.xc@gmail.com |
1ca107d39fcf682c12bd191ef447080ea774f49a | 6b05bddf2e294c8e1b39846aecadfa06b4ff805d | /test/test_v1_generation_status.py | 121f9db48f40ba73016ca068c091830bc77627e5 | [
"Apache-2.0"
] | permissive | kubevirt/client-python | 5ca82fe55d48c07f62796d2bed3605a7c189922c | 235fe17f58d41165010be7e4122cb67bdc866fe7 | refs/heads/master | 2023-09-03T12:25:27.272479 | 2023-08-17T00:33:31 | 2023-08-17T00:33:31 | 105,017,761 | 29 | 25 | Apache-2.0 | 2022-10-20T13:52:10 | 2017-09-27T12:51:32 | Python | UTF-8 | Python | false | false | 925 | py | # coding: utf-8
"""
KubeVirt API
This is KubeVirt API an add-on for Kubernetes.
OpenAPI spec version: 1.0.0
Contact: kubevirt-dev@googlegroups.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubevirt
from kubevirt.rest import ApiException
from kubevirt.models.v1_generation_status import V1GenerationStatus
class TestV1GenerationStatus(unittest.TestCase):
""" V1GenerationStatus unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1GenerationStatus(self):
"""
Test V1GenerationStatus
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubevirt.models.v1_generation_status.V1GenerationStatus()
pass
if __name__ == '__main__':
unittest.main()
| [
"kubevirt-bot"
] | kubevirt-bot |
ca0d61bc85b983481ef9efb4c5f39e48b6622d3b | 2709e527c217a8264b48e2f549b3284e5ccb9551 | /0x09-python-everything_is_object/100-magic_string.py | f70d7350af05a64e469e472edab507ed4a90ae61 | [] | no_license | kwhit2/holbertonschool-higher_level_programming | 489d6b88ed14b9f2efd4637d8a71ae569b5027f6 | 2660516b12fee0f03c4025ba1d8d2762a8880a06 | refs/heads/main | 2023-05-22T17:57:02.035803 | 2021-06-12T18:43:54 | 2021-06-12T18:43:54 | 319,346,696 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | #!/usr/bin/python3
def magic_string(str=[]):
str.append("Holberton") # this also works: str += ["Holberton"]
return (", ".join(str))
| [
"kfw2@outlook.com"
] | kfw2@outlook.com |
b0ed5be72b248dee34ba8ffdeb7e2c8fc09ba3c3 | e14372adf86d3c4f9e73c9f7111db3215c696c3d | /1.入门/二级/LE8.py | c0e3f65646dff938600e801494d64b31300cde86 | [] | no_license | hewei-bit/PYTHON_learning | 71ddd7560a52575528547187f4fb40f39a3cbbdb | 18de8e5bdca165df5a5a4b5e0887846593656f4e | refs/heads/master | 2022-12-02T13:38:05.907135 | 2020-08-13T04:57:41 | 2020-08-13T04:57:41 | 261,647,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,171 | py | import jieba
txt = open("threekingdoms.txt","r",encoding='utf-8').read()
excludes = {"商议","如何","主公","军士","将军","却说","不可","不能","如此","二人","荆州"}
words = jieba.lcut(txt)
print(words)
counts = {}
for word in words:
if len(word) == 1:
continue
elif word == "诸葛亮" or word == "孔明曰":
rword = "孔明"
elif word=="关公" or word == "云长":
rword = "关羽"
elif word=="玄德" or word == "玄德曰":
rword = "刘备"
elif word=="孟德" or word == "丞相":
rword = "曹操"
else:
rword = word
counts[rword] = counts.get(rword,0) + 1
for word in excludes:
del counts[word]
items = list(counts.items())
items.sort(key = lambda x:x[1],reverse = True)
for i in range(10):
word,count = items[i]
print("{0:<15}{1:>5}".format(word,count))
'''
def getText():
txt = open("hamlet.txt","r").read()
txt = txt.lower()
for ch in "!@#$%^&*+_-,./{|}~`‘’“”;:[\\]?=":
txt = txt.replace(ch,"")
return txt
hamletTxt = getText()
words = hamletTxt.split()
counts = {}
for word in words:
counts[word] = counts.get(word,0) + 1
items = list(counts.items())
items.sort(key=lambda x:x[1],reverse = True)
for i in range(10):
word,count = items[i]
print("{0:<10}{1:>5}".format(word,count))
'''
'''
def getNum():
nums = []
iNumStr = input("请输入数字(回车退出)")
while iNumStr != '':
nums.append(eval(iNumStr))
iNumStr = input("请输入数字(回车退出)")
return nums
def mean(numbers):
isum = 0.0
for i in numbers:
isum += i
return isum/len(numbers)
def dev(numbers,mean):
sdev = 0.0
for num in numbers:
sdev = sdev + (num - mean)**2
return pow(sdev / (len(numbers)-1),0.5)
def median(numbers):
sorted(numbers)
size = len(numbers)
if size % 2 == 0:
med = (numbers[size//2-1] + numbers[size//2])/2
else:
med = numbers[size//2]
return med
n = getNum()
m = mean(n)
print("平均值:{},方差:{},中位数:{} ".format(m,dev(n,m),median(n)))
'''
| [
"1003826976@qq.com"
] | 1003826976@qq.com |
0962cb9637e184ab6273ea26fe316387229a270a | 9c4850697d66c6119e1d0b1f6347c1ea1d5d1ebb | /devilry/devilry_qualifiesforexam/tests/rest/test_status.py | 41a3aad48fdde302c0a24d6cde12c8e8ecd3e0a8 | [] | no_license | evestera/devilry-django | 7952e0d65f23af6c4dc2fd22fb97462e4231deac | 760a4ca1c40a7ac0d60f7675efa6919ffae585da | refs/heads/master | 2020-12-25T20:09:00.756243 | 2015-05-21T16:33:47 | 2015-05-21T16:33:47 | 36,460,383 | 0 | 0 | null | 2015-05-28T19:14:10 | 2015-05-28T19:14:09 | null | UTF-8 | Python | false | false | 17,687 | py | from django.test import TransactionTestCase
from django.core.urlresolvers import reverse
from devilry.apps.core.testhelper import TestHelper
from devilry.apps.core.models import Period
from devilry.devilry_rest.testclient import RestClient
from devilry.devilry_qualifiesforexam.models import Status
from devilry.devilry_qualifiesforexam.pluginhelpers import create_settings_sessionkey
from devilry.devilry_qualifiesforexam.pluginhelpers import PluginResultsFailedVerification
from devilry.devilry_qualifiesforexam.registry import qualifiesforexam_plugins
def noop(*args):
pass
class TestRestStatus(TransactionTestCase):
def setUp(self):
self.testhelper = TestHelper()
self.testhelper.add(nodes='uni:admin(uniadmin)',
subjects=['sub'],
periods=[
'oldperiod:admin(periodadmin):begins(-12):ends(2)',
'p1:admin(periodadmin):begins(-3):ends(6)',
'p2:admin(periodadmin):begins(-1):ends(6)'])
self.client = RestClient()
self.url = reverse('devilry_qualifiesforexam-rest-status')
self.testhelper.create_superuser('superuser')
qualifiesforexam_plugins.add(
id = 'devilry_qualifiesforexam.test.noop-plugin',
url = '/some/noop-url',
title = 'Noop',
post_statussave=noop,
description = 'noop',
pluginsettings_summary_generator = lambda status: 'noop summary'
)
def tearDown(self):
for pluginid in ('devilry_qualifiesforexam.test.plugin', 'devilry_qualifiesforexam.test.noop-plugin'):
if pluginid in qualifiesforexam_plugins:
del qualifiesforexam_plugins.items[pluginid]
def _get_url(self, periodid=None):
if periodid:
return '{0}{1}'.format(self.url, periodid)
else:
return self.url
def _create_relatedstudent(self, username, fullname=None):
user = getattr(self.testhelper, username, None)
if not user:
user = self.testhelper.create_user(username, fullname=fullname)
relstudent = self.testhelper.sub_p1.relatedstudent_set.create(user=user)
return relstudent
def _postas(self, username, data):
self.client.login(username=username, password='test')
return self.client.rest_post(self._get_url(), data)
def _test_post_as(self, username):
self.assertEquals(Status.objects.count(), 0)
relatedStudent1 = self._create_relatedstudent('student1', 'Student One')
relatedStudent2 = self._create_relatedstudent('student2', 'Student Two')
content, response = self._postas(username, {
'period': self.testhelper.sub_p1.id,
'status': 'ready',
'message': 'This is a test',
'plugin': 'devilry_qualifiesforexam.test.noop-plugin',
'pluginsessionid': 'tst',
'passing_relatedstudentids': [relatedStudent1.id]
})
self.assertEquals(response.status_code, 201)
self.assertEquals(Status.objects.count(), 1)
status = Status.objects.all()[0]
self.assertEquals(status.period, self.testhelper.sub_p1)
self.assertEquals(status.status, 'ready')
self.assertEquals(status.message, 'This is a test')
self.assertEquals(status.plugin, 'devilry_qualifiesforexam.test.noop-plugin')
self.assertEqual(status.students.count(), 2)
qualifies1 = status.students.get(relatedstudent=relatedStudent1)
qualifies2 = status.students.get(relatedstudent=relatedStudent2)
self.assertTrue(qualifies1.qualifies)
self.assertFalse(qualifies2.qualifies)
def test_post_as_periodadmin(self):
self._test_post_as(self.testhelper.periodadmin)
def test_post_as_nodeadmin(self):
self._test_post_as(self.testhelper.uniadmin)
def test_post_as_superuser(self):
self._test_post_as(self.testhelper.superuser)
def test_post_as_nobody(self):
self.testhelper.create_user('nobody')
content, response = self._postas('nobody', {
'period': self.testhelper.sub_p1.id,
'status': 'ready',
'message': 'This is a test',
'plugin': 'devilry_qualifiesforexam.test.noop-plugin',
'pluginsessionid': 'tst',
'passing_relatedstudentids': [10]
})
self.assertEqual(response.status_code, 403)
def test_post_almostready(self):
relatedStudent1 = self._create_relatedstudent('student1', 'Student One')
relatedStudent2 = self._create_relatedstudent('student2', 'Student Two')
content, response = self._postas('periodadmin', {
'period': self.testhelper.sub_p1.id,
'status': 'almostready',
'message': 'This is a test',
'plugin': 'devilry_qualifiesforexam.test.noop-plugin',
'pluginsessionid': 'tst',
'passing_relatedstudentids': [relatedStudent1.id],
'notready_relatedstudentids': [relatedStudent2.id]
})
self.assertEquals(response.status_code, 201)
status = Status.objects.all()[0]
self.assertEquals(status.status, 'almostready')
self.assertEqual(status.students.count(), 2)
qualifies1 = status.students.get(relatedstudent=relatedStudent1)
qualifies2 = status.students.get(relatedstudent=relatedStudent2)
self.assertTrue(qualifies1.qualifies)
self.assertIsNone(qualifies2.qualifies)
def test_post_notreadystudents_with_invalidstatus(self):
relatedStudent1 = self._create_relatedstudent('student1', 'Student One')
content, response = self._postas('periodadmin', {
'period': self.testhelper.sub_p1.id,
'status': 'ready', # Could choose any status except almostready for this test to be valid
'plugin': 'devilry_qualifiesforexam.test.noop-plugin',
'pluginsessionid': 'tst',
'notready_relatedstudentids': [relatedStudent1.id]
})
self.assertEquals(response.status_code, 400)
self.assertEqual(content['details'],
u'Only the ``almostready`` status allows marking students as not ready for export.')
def test_post_notready_check_studentsignored(self):
relatedStudent1 = self._create_relatedstudent('student1', 'Student One')
content, response = self._postas('periodadmin', {
'period': self.testhelper.sub_p1.id,
'status': 'notready',
'pluginsessionid': 'tst',
'message': 'Test'
})
self.assertEquals(response.status_code, 201)
status = Status.objects.all()[0]
self.assertEquals(status.status, 'notready')
self.assertEquals(status.message, 'Test')
self.assertEqual(status.students.count(), 0)
def test_post_notready_messagerequired(self):
relatedStudent1 = self._create_relatedstudent('student1', 'Student One')
content, response = self._postas('periodadmin', {
'period': self.testhelper.sub_p1.id,
'status': 'notready',
'pluginsessionid': 'tst',
'message': ' ',
'plugin': 'devilry_qualifiesforexam.test.noop-plugin'
})
self.assertEquals(response.status_code, 400)
self.assertEqual(content['errors'][0], u'Message can not be empty when status is ``notready``.')
def test_post_invalidstatus(self):
relatedStudent1 = self._create_relatedstudent('student1', 'Student One')
content, response = self._postas('periodadmin', {
'period': self.testhelper.sub_p1.id,
'status': 'invalidstatus',
'pluginsessionid': 'tst',
'plugin': 'devilry_qualifiesforexam.test.noop-plugin',
'passing_relatedstudentids': [relatedStudent1.id]
})
self.assertEqual(response.status_code, 400)
self.assertEqual(content['field_errors']['status'][0],
u'Select a valid choice. invalidstatus is not one of the available choices.')
def _getinstanceas(self, username, periodid):
self.client.login(username=username, password='test')
return self.client.rest_get(self._get_url(periodid))
def _test_getinstance_as(self, username):
relatedStudent1 = self._create_relatedstudent('student1', 'Student One')
relatedStudent2 = self._create_relatedstudent('student2', 'Student Two')
status = Status(
period = self.testhelper.sub_p1,
status = 'ready',
message = 'Test',
user = getattr(self.testhelper, username),
plugin = 'devilry_qualifiesforexam.test.noop-plugin'
)
status.save()
status.students.create(relatedstudent=relatedStudent1, qualifies=True)
status.students.create(relatedstudent=relatedStudent2, qualifies=False)
content, response = self._getinstanceas(username, self.testhelper.sub_p1.id)
self.assertEqual(response.status_code, 200)
self.assertEqual(set(content.keys()),
set(['id', 'perioddata', 'statuses', u'is_active', u'short_name', u'long_name', u'subject']))
self.assertEqual(content['id'], self.testhelper.sub_p1.id)
statuses = content['statuses']
self.assertEqual(len(statuses), 1)
self.assertEqual(set(statuses[0].keys()),
set([u'id', u'status', u'plugin', u'statustext',
u'period', u'passing_relatedstudentids_map',
u'user', u'message', u'createtime', u'pluginsettings_summary',
u'plugin_description', u'plugin_title']))
self.assertEqual(statuses[0]['period'], self.testhelper.sub_p1.id)
self.assertEqual(statuses[0]['status'], 'ready')
self.assertEqual(statuses[0]['message'], 'Test')
self.assertEqual(statuses[0]['plugin'], 'devilry_qualifiesforexam.test.noop-plugin')
self.assertEqual(statuses[0]['pluginsettings_summary'], 'noop summary')
self.assertIn(str(relatedStudent1.id), statuses[0]['passing_relatedstudentids_map'])
def test_getinstance_as_periodadmin(self):
self._test_getinstance_as('periodadmin')
def test_getinstance_as_nodeadmin(self):
self._test_getinstance_as('uniadmin')
def test_getinstance_as_superuser(self):
self._test_getinstance_as('superuser')
def test_getinstanceas_nobody(self):
self.testhelper.create_user('nobody')
content, response = self._getinstanceas('nobody', self.testhelper.sub_p1.id)
self.assertEqual(response.status_code, 403)
def test_getinstance_no_statuses(self):
content, response = self._getinstanceas('periodadmin', self.testhelper.sub_p1.id)
self.assertEqual(response.status_code, 404)
self.assertEquals(content['detail'], u'The period has no statuses')
def test_getinstance_invalid_period(self):
periodid = 10000
self.assertFalse(Period.objects.filter(id=periodid).exists()) # Just to be sure we dont get false positives
content, response = self._getinstanceas('periodadmin', periodid)
self.assertEqual(response.status_code, 404)
self.assertEquals(content['detail'], u'The period with ID 10000 does not exist')
def _getlistas(self, username, **kwargs):
self.client.login(username=username, password='test')
return self.client.rest_get(self._get_url(), **kwargs)
def _createlistteststatus(self, period, status='ready',
plugin='devilry_qualifiesforexam.test.noop-plugin'):
status = Status(
period = period,
status = status,
message = 'Test',
user = self.testhelper.periodadmin,
plugin = plugin
)
status.full_clean()
status.save()
return status
def _test_getlist_as(self, username):
self._createlistteststatus(self.testhelper.sub_oldperiod)
self._createlistteststatus(self.testhelper.sub_p1, status='notready', plugin='')
import time
time.sleep(0.1) # Sleep to make sure the status below is the active status
status = self._createlistteststatus(self.testhelper.sub_p1)
content, response = self._getlistas(username)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(content), 2)
p1info = content[0]
self.assertEqual(p1info['id'], self.testhelper.sub_p1.id)
self.assertEqual(p1info['active_status']['id'], status.id)
p2info = content[1]
self.assertEqual(p2info['id'], self.testhelper.sub_p2.id)
self.assertEqual(p2info['active_status'], None)
def test_getlist_as_periodadmin(self):
self._test_getlist_as('periodadmin')
def test_getlist_as_nodeadmin(self):
self._test_getlist_as('uniadmin')
def test_getlist_as_superuser(self):
self._test_getlist_as('superuser')
def test_getlist_as_nobody(self):
self.testhelper.create_user('nobody')
content, response = self._getlistas('nobody')
self._createlistteststatus(self.testhelper.sub_p1,
status='notready', plugin='')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(content), 0)
def test_get_within_node(self):
self.testhelper.add(nodes='uni.extra:admin(extraadmin)',
subjects=['othersub'],
periods=['p1:admin(periodadmin):begins(-3):ends(6)'])
content, response = self._getlistas('extraadmin', node_id=self.testhelper.uni_extra.id)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(content), 1)
p1info = content[0]
self.assertEqual(p1info['id'], self.testhelper.othersub_p1.id)
def test_get_within_node_notactive(self):
self.testhelper.add(nodes='uni.extra:admin(extraadmin)',
subjects=['othersub'],
periods=['old:admin(periodadmin):begins(-12):ends(6)'])
content, response = self._getlistas('extraadmin', node_id=self.testhelper.uni_extra.id)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(content), 0)
def test_get_within_node_notadmin_on_requested(self):
self.testhelper.add(nodes='uni.extra:admin(extraadmin)')
content, response = self._getlistas('extraadmin', node_id=self.testhelper.uni.id)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(content), 0)
def test_save_settings(self):
savedsettings = {}
def save_settings(status, settings):
savedsettings['status'] = status
savedsettings['settings'] = settings
qualifiesforexam_plugins.add(
id = 'devilry_qualifiesforexam.test.plugin',
url = '/some/url',
title = 'Test',
description = 'A test',
uses_settings=True,
post_statussave = save_settings
)
self.client.login(username='periodadmin', password='test')
session = self.client.session
session[create_settings_sessionkey('tst')] = {'test': 'settings'}
session.save()
content, response = self._postas('periodadmin', {
'period': self.testhelper.sub_p1.id,
'status': 'ready',
'plugin': 'devilry_qualifiesforexam.test.plugin',
'pluginsessionid': 'tst',
'passing_relatedstudentids': []
})
self.assertEqual(response.status_code, 201)
self.assertEqual(len(savedsettings), 2)
self.assertEqual(savedsettings['settings'], {'test': 'settings'})
self.assertIsInstance(savedsettings['status'], Status)
def test_save_settings_missing_sessiondata(self):
def save_settings(status, settings):
pass
qualifiesforexam_plugins.add(
id = 'devilry_qualifiesforexam.test.plugin',
url = '/some/url',
title = 'Test',
uses_settings = True,
description = 'A test',
post_statussave = save_settings
)
self.assertEquals(Status.objects.count(), 0)
content, response = self._postas('periodadmin', {
'period': self.testhelper.sub_p1.id,
'status': 'ready',
'plugin': 'devilry_qualifiesforexam.test.plugin',
'pluginsessionid': 'tst',
'passing_relatedstudentids': []
})
self.assertEqual(response.status_code, 400)
self.assertEqual(content['detail'],
u'The "devilry_qualifiesforexam.test.plugin"-plugin requires settings - no settings found in the session.')
self.assertEquals(Status.objects.count(), 0) # The database rolled back because of the error
def test_fail_verification(self):
def post_statussave(status, settings):
raise PluginResultsFailedVerification('Invalid')
qualifiesforexam_plugins.add(
id = 'devilry_qualifiesforexam.test.plugin',
url = '/some/url',
title = 'Test',
description = 'A test',
post_statussave = post_statussave
)
self.assertEquals(Status.objects.count(), 0)
content, response = self._postas('periodadmin', {
'period': self.testhelper.sub_p1.id,
'status': 'ready',
'plugin': 'devilry_qualifiesforexam.test.plugin',
'pluginsessionid': 'tst',
'passing_relatedstudentids': []
})
self.assertEqual(response.status_code, 400)
self.assertEqual(content['detail'], u'Invalid')
self.assertEquals(Status.objects.count(), 0) # The database rolled back because of the error
| [
"post@espenak.net"
] | post@espenak.net |
1e8b36d3decd0e0499113472c2e39046014353fa | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/CodeJamData/16/51/8.py | 0a0a76035762a98ccb3b14bab09892bf3db8e467 | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 775 | py | #!/usr/bin/python2
# -*- coding: utf-8 -*-
import sys
import os
class Stack(object):
def __init__(self, N):
self.a = ['X'] * N
self.n = 0
def push(self, x):
self.a[self.n] = x
self.n += 1
def top(self):
return self.a[self.n-1]
def pop(self):
self.n -= 1
return self.a[self.n]
def main():
T = int(sys.stdin.readline())
for t in xrange(1, T+1):
s = sys.stdin.readline().strip()
ret = 0
st = Stack(len(s))
for x in s:
if st.n and st.top() == x:
ret += 10
st.pop()
else:
st.push(x)
ret += 5 * st.n/2
print "Case #%d: %s" % (t, ret)
if __name__ == '__main__':
main()
| [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
380a93700651bc938a605253ad4f55098290336a | 2d358ffb51f03cc64cc2da0f684b0928aebe139c | /test2/booktest/migrations/0002_areainfo.py | 5907812c65cd3ab0178138341bec716118c0025e | [] | no_license | 853695319/learningdjango | 195ffabdbd3a5b6bc4386cbb678504c0d2cd0095 | d2aac1117bb2ca31e4f247a9d206adcf3a9f39a2 | refs/heads/master | 2020-05-03T04:59:16.094900 | 2019-04-23T06:25:02 | 2019-04-23T06:25:02 | 178,437,059 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('booktest', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AreaInfo',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('atitle', models.CharField(max_length=20)),
('aparent', models.ForeignKey(blank=True, null=True, to='booktest.AreaInfo')),
],
),
]
| [
"853695319@qq.com"
] | 853695319@qq.com |
5beb6ad74c365944a9dacad8a0775794bd47beee | 8dc7bafb4b20f7b556ca733c8a1e46ae8dd8d4cb | /accounting/accounting/doctype/payment_entry/payment_entry.py | 19e889378bebce608bf0293a5597c21df65c02c0 | [
"MIT"
] | permissive | MohamedAbdulsalam96/accounting | 06d50f236ae94cf7741bf2750a098ae18fa02fe0 | e4665df4dce863e0703b8b733255d200aa953c1c | refs/heads/master | 2023-05-30T19:19:14.365102 | 2021-06-08T10:08:36 | 2021-06-08T10:08:36 | 630,869,866 | 1 | 0 | null | 2023-04-21T10:42:57 | 2023-04-21T10:42:57 | null | UTF-8 | Python | false | false | 389 | py | # Copyright (c) 2021, ac and contributors
# For license information, please see license.txt
from accounting.accounting.doctype.gl_entry.utils import create_gl_entry
from frappe.model.document import Document
class PaymentEntry(Document):
def on_submit(self):
create_gl_entry(self, self.account_paid_to, self.amount, 0)
create_gl_entry(self, self.account_paid_from, 0, self.amount)
| [
"anand21nanda@gmail.com"
] | anand21nanda@gmail.com |
567fc4698f2d54fee0f1380d1be5135399468276 | eef614ad302e1662f51fe6d8a880cfeef10bd3b1 | /tests/functional_tests/accounts/test_signup.py | 75dec47aad56a16052232ca9d3e7a76b518e1b3a | [
"MIT"
] | permissive | gitter-badger/apostello | a334a5a63a833a9e4a84f5872d44da7bdccc5477 | d6eeacf7a726aa33a79676fdd3d05b25a05b0f70 | refs/heads/master | 2021-01-18T18:59:36.111493 | 2016-03-17T20:01:08 | 2016-03-17T20:01:08 | 54,472,652 | 0 | 0 | null | 2016-03-22T12:16:51 | 2016-03-22T12:16:50 | null | UTF-8 | Python | false | false | 3,403 | py | import pytest
from django.contrib.auth.models import User
from django.core import mail
from site_config.models import SiteConfiguration
@pytest.mark.django_db
@pytest.mark.slow
class TestSignup:
def test_sign_up(self, live_server, browser, users):
"""
Tests the sign up form and checks that the appropriate emails
have been sent afterwards.
"""
# signup
uri = '/accounts/signup'
browser.get(live_server + uri)
email_box = browser.find_elements_by_name('email')[0]
email_box.send_keys('testsignupemail@example.com')
password_box1 = browser.find_elements_by_name('password1')[0]
password_box1.send_keys('top_secret')
password_box2 = browser.find_elements_by_name('password2')[0]
password_box2.send_keys('top_secret')
login_button = browser.find_elements_by_xpath(
'html/body/div/div/form/button'
)[0]
login_button.click()
# check we have been redirected
assert '/accounts/confirm-email/' in browser.current_url
assert len(mail.outbox) == 1
# assert '[apostello] New User' in mail.outbox[0].subject # not sent
# when we have no office email set
assert 'Please Confirm Your E-mail Address' in mail.outbox[0].subject
for x in mail.outbox[0].body.split():
if x.startswith('http'):
confirm_url = x
browser.get(confirm_url)
confirm_button = browser.find_element_by_class_name('button')
confirm_button.click()
user = User.objects.get(email='testsignupemail@example.com')
assert not user.is_staff
assert not user.is_superuser
def test_first_user_sign_up(self, live_server, browser):
"""
Tests the sign up form and checks that the appropriate emails
have been sent afterwards.
Then we confirm the email and verify the user has been made an admin.
"""
# add an office email to test correct email is sent on sign up
config = SiteConfiguration.get_solo()
config.office_email = 'test@apostello.ninja'
config.save()
# signup
uri = '/accounts/signup'
browser.get(live_server + uri)
email_box = browser.find_elements_by_name('email')[0]
email_box.send_keys('testsignupemail@example.com')
password_box1 = browser.find_elements_by_name('password1')[0]
password_box1.send_keys('top_secret')
password_box2 = browser.find_elements_by_name('password2')[0]
password_box2.send_keys('top_secret')
login_button = browser.find_elements_by_xpath(
'html/body/div/div/form/button'
)[0]
login_button.click()
# check we have been redirected
assert '/accounts/confirm-email/' in browser.current_url
assert len(mail.outbox) == 2
assert '[apostello] New User' in mail.outbox[0].subject
assert 'Please Confirm Your E-mail Address' in mail.outbox[1].subject
for x in mail.outbox[1].body.split():
if x.startswith('http'):
confirm_url = x
browser.get(confirm_url)
confirm_button = browser.find_element_by_class_name('button')
confirm_button.click()
user = User.objects.get(email='testsignupemail@example.com')
assert user.is_staff
assert user.is_superuser
| [
"montgomery.dean97@gmail.com"
] | montgomery.dean97@gmail.com |
52bdf9b51ef3cb5ff7e208165146ae83d5eef6a2 | 71bc873c20fbc45bb5e13095d2474496818a23f9 | /code word2vec_experiment_/district_stop_words.py | 397ba3344690383eea7c5304e4c07ce74bdefc1f | [] | no_license | 2877992943/lianyun | f31c44ea2e266bae51cae4fa464d1bae368c8d3f | a872d6cd1b2eff402bcccb326d33d086816d87af | refs/heads/master | 2021-01-20T16:17:20.226401 | 2017-05-10T06:49:31 | 2017-05-10T06:49:31 | 90,830,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 808 | py | #! -*- coding:utf-8 -*-
import pandas as pd
import sys,os,re
reload(sys)
sys.setdefaultencoding('utf8')
import numpy as np
if __name__=='__main__':
path='../backup/2014_district.csv'
df=pd.read_csv(path,encoding='utf-8')
ll=df['dis'].values.tolist()
ll_clean=[]
ll_clean1=[]# more than 3 words ,strip 省
for item in ll:
if item==np.nan:continue
item=re.sub('[\s+]','',item)
ll_clean.append(item)
###
if item.decode('utf-8').__len__()>2:ll_clean1.append(item[:-1])
print len(ll_clean),' '.join(ll_clean)
print len(ll_clean1),' '.join(ll_clean1)
####
pd.to_pickle(ll_clean1+ll_clean,'../data/district_noise')
| [
"2877992943@qq.com"
] | 2877992943@qq.com |
58ac3cb19d3a3277dc1dda822c0f890412c72177 | 2218e1da5cb944e4509f8641ca051de137645c5e | /剑指 Offer/First/14-1.cuttingRope.py | f9cc32be30464432e53c133a87cfd362aabb6fd1 | [] | no_license | Hegemony/Python-Practice | 9e76ebb414433e51c2074602fb0a871891647839 | b68ea41688e9e305635c63fdc43402e2b6fe6524 | refs/heads/main | 2023-05-05T14:00:59.921803 | 2021-06-01T15:38:30 | 2021-06-01T15:38:30 | 301,602,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | class Solution:
def cuttingRope(self, n: int) -> int:
res = 0
for i in range(2, n + 1):
cnt = n // i
yu = n % i
res = max(res, (cnt + 1) ** yu * cnt ** (i - yu))
return res | [
"noreply@github.com"
] | Hegemony.noreply@github.com |
13a71adb0e45fcc83aa599afb5f5d43c08fd678f | ed11f664cbc459c7a4456dd58f2b231edcb22f33 | /ctm_api_client/models/agent_in_hostgroup.py | 25a98d8dbf0878138a0c8238cf908665308d20ce | [
"BSD-3-Clause"
] | permissive | jpmc216/ctm_python_client | c8b8ba60580bf869b3d1e6af9b99737e0a7ea527 | de44e5012214ec42bb99b7f9b4ebc5394cd14328 | refs/heads/main | 2023-08-26T22:06:34.022576 | 2021-10-25T13:41:31 | 2021-10-25T13:41:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,627 | py | # coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.20.215
Contact: customer_support@bmc.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from ctm_api_client.configuration import Configuration
class AgentInHostgroup(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
"host": "str",
"tag": "str",
"hostgroup_agent_participation": "HostgroupAgentParticipation",
}
attribute_map = {
"host": "host",
"tag": "tag",
"hostgroup_agent_participation": "hostgroupAgentParticipation",
}
def __init__(
self,
host=None,
tag=None,
hostgroup_agent_participation=None,
_configuration=None,
): # noqa: E501
"""AgentInHostgroup - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._host = None
self._tag = None
self._hostgroup_agent_participation = None
self.discriminator = None
self.host = host
if tag is not None:
self.tag = tag
if hostgroup_agent_participation is not None:
self.hostgroup_agent_participation = hostgroup_agent_participation
@property
def host(self):
"""Gets the host of this AgentInHostgroup. # noqa: E501
The hostname of the agent. # noqa: E501
:return: The host of this AgentInHostgroup. # noqa: E501
:rtype: str
"""
return self._host
@host.setter
def host(self, host):
"""Sets the host of this AgentInHostgroup.
The hostname of the agent. # noqa: E501
:param host: The host of this AgentInHostgroup. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and host is None:
raise ValueError(
"Invalid value for `host`, must not be `None`"
) # noqa: E501
self._host = host
@property
def tag(self):
"""Gets the tag of this AgentInHostgroup. # noqa: E501
Host Group tag. HIDDEN. # noqa: E501
:return: The tag of this AgentInHostgroup. # noqa: E501
:rtype: str
"""
return self._tag
@tag.setter
def tag(self, tag):
"""Sets the tag of this AgentInHostgroup.
Host Group tag. HIDDEN. # noqa: E501
:param tag: The tag of this AgentInHostgroup. # noqa: E501
:type: str
"""
self._tag = tag
@property
def hostgroup_agent_participation(self):
"""Gets the hostgroup_agent_participation of this AgentInHostgroup. # noqa: E501
The host condition. HIDDEN. # noqa: E501
:return: The hostgroup_agent_participation of this AgentInHostgroup. # noqa: E501
:rtype: HostgroupAgentParticipation
"""
return self._hostgroup_agent_participation
@hostgroup_agent_participation.setter
def hostgroup_agent_participation(self, hostgroup_agent_participation):
"""Sets the hostgroup_agent_participation of this AgentInHostgroup.
The host condition. HIDDEN. # noqa: E501
:param hostgroup_agent_participation: The hostgroup_agent_participation of this AgentInHostgroup. # noqa: E501
:type: HostgroupAgentParticipation
"""
self._hostgroup_agent_participation = hostgroup_agent_participation
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
if issubclass(AgentInHostgroup, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AgentInHostgroup):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, AgentInHostgroup):
return True
return self.to_dict() != other.to_dict()
| [
"vtadinad@bmc.com"
] | vtadinad@bmc.com |
e6f27f97491f2a6ecc94d9372b6fdcd1658f6b7e | 4e22e93ecdb105df4e15c63f4503522b3525f70b | /ansible/ovirt-engine-ansible4/ov4_assigned_permissions | f936ffaff44356c2f6620a8a9c2915b06b06ba14 | [
"Apache-2.0"
] | permissive | machacekondra/ovirt-engine-ansible | df7e1d1bb14f154bc411bc628ee483898ecbc712 | ffc5fcb5d36f039e347208ab5f9fc672cfb1f596 | refs/heads/master | 2021-01-19T05:30:28.561742 | 2016-07-20T15:21:03 | 2016-07-20T15:21:03 | 61,116,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,138 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
DOCUMENTATION = '''
---
module: ov4_assigned_permissions
short_description: ov4_assigned_permissions module to manage assigned-permissions in oVirt
author: "Ondra Machacek (@machacekondra)"
version_added: 2.0
description:
- "This modules is used to manage oVirt assigned-permissions."
options:
method:
required: True
description:
- "Action to be run on assigned-permissions."
choices:
- add
- list
- get
- remove
auth:
required: True
description:
- "Dictionary with values needed to create HTTP connection to oVirt:"
- "** C(username)[I(required)] - The name of the user, something like `I(admin@internal)`."
- "** C(password)[I(required)] - The password of the user."
- "** C(url)[I(required)] - A string containing the base URL of the server, usually
something like `I(https://server.example.com/ovirt-engine/api)`."
- "** C(sso_token) - SSO token to be used instead of login with username/password."
- "** C(insecure) - A boolean flag that indicates if the server TLS
certificate and host name should be checked."
- "** C(ca_file) - A PEM file containing the trusted CA certificates. The
certificate presented by the server will be verified using these CA
certificates. If `C(ca_file)` parameter is not set, system wide
CA certificate store is used."
service:
required: false
description:
- "URL path of the service we want to work with, usually something like I(/vms/123/disks/456)."
parameters:
required: false
description:
- "Dictionary which specify additional parameters to be send with request."
- " C(add) parameters:"
- "** I(permission)[dict] - U(https://jhernand.fedorapeople.org/ovirt-api-explorer/#/types/permission)."
- " C(remove) parameters:"
- "** I(async)[boolean] - Indicates if the remove should be performed asynchronously."
'''
RETURN = '''
'''
import sys
import json
def add(connection, path, **kwargs):
request = Request(method='POST', path='%s/permissions' % path)
request.body = json.dumps(kwargs.pop('permission'))
response = connection.send(request)
if response.code in [201, 202]:
return {'changed': True, 'permission': response.body}
return {'changed': False, 'error': response.body}
def get(connection, path, **kwargs):
request = Request(method='GET', path='%s' % path, query=kwargs)
response = connection.send(request)
return {'changed': False, 'permission': response.body['permission']}
def list(connection, path, **kwargs):
request = Request(method='GET', path='%s/permissions' % path, query=kwargs)
response = connection.send(request)
if 'permission' in response.body:
return {'changed': False, 'permission': response.body['permission']}
return {'changed': False, 'error': response.body}
def remove(connection, path, **kwargs):
request = Request(method='DELETE', path='%s' % path, query=kwargs)
response = connection.send(request)
if response.code in [200]:
return {'changed': True}
return {'changed': False, 'error': response.body}
def main():
module = AnsibleModule(
argument_spec=dict(
method=dict(required=True, choices=['add', 'list', 'get', 'remove']),
auth=dict(required=True, type='dict'),
service=dict(required=False, type='str', default=''),
parameters=dict(required=False, type='dict', default=dict()),
)
)
auth = module.params.pop('auth')
connection = Connection(
url=auth.get('url'),
username=auth.get('username'),
password=auth.get('password'),
ca_file=auth.get('ca_file', None),
insecure=auth.get('insecure', False),
sso_token=auth.get('sso_token', None),
)
try:
method = module.params.pop('method')
ret = getattr(sys.modules[__name__], method)(connection, module.params['service'], **module.params.pop('parameters'))
module.exit_json(**ret)
except Error as e:
module.fail_json(msg="Error: %s" % e)
finally:
if auth.get('sso_token', None) is None:
connection.close()
from ansible.module_utils.basic import *
from ansible.module_utils.ovirt4 import *
if __name__ == "__main__":
main()
| [
"omachace@redhat.com"
] | omachace@redhat.com | |
55fa3f7713e3159c89ae3d4fb4dd6300c16bd5fb | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/59/usersdata/222/61395/submittedfiles/testes.py | 4e2bd5734fb77391b86989a58aa509a2b9e0c261 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py |
def angulosexasimal(L):
soma=0
for i in range(0,len(L),1):
soma=lista[i]+(lista[i+1]/60)+(lista[i+2]/3600)
return soma
L=[23,37,28]
print(soma)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
a6b7bb8cd0e86c6c35ca9e4e9bfd04ab18e13630 | aad164e4efe1d55cc189c35956bfd435b14a0f52 | /eve-8.21.494548/lib/carbonlib/trinity/windowsEvents.py | f2776157a9f13ed4b623582a3622bc8e728b2ef0 | [] | no_license | Pluckyduck/eve | 61cc41fe8fd4dca4fbdcc4761a37bcfeb27ed84f | 9a277707ab1f162c6bd9618faf722c0be3ea93ad | refs/heads/master | 2020-12-28T23:35:29.992875 | 2013-05-06T14:24:33 | 2013-05-06T14:24:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,434 | py | #Embedded file name: c:\depot\games\branches\release\EVE-TRANQUILITY\carbon\common\lib\trinity\windowsEvents.py
WM_NULL = 0
WM_CREATE = 1
WM_DESTROY = 2
WM_MOVE = 3
WM_SIZE = 5
WM_ACTIVATE = 6
WM_SETFOCUS = 7
WM_KILLFOCUS = 8
WM_ENABLE = 10
WM_SETREDRAW = 11
WM_SETTEXT = 12
WM_GETTEXT = 13
WM_GETTEXTLENGTH = 14
WM_PAINT = 15
WM_CLOSE = 16
WM_QUERYENDSESSION = 17
WM_QUIT = 18
WM_QUERYOPEN = 19
WM_ERASEBKGND = 20
WM_SYSCOLORCHANGE = 21
WM_ENDSESSION = 22
WM_SYSTEMERROR = 23
WM_SHOWWINDOW = 24
WM_CTLCOLOR = 25
WM_WININICHANGE = 26
WM_SETTINGCHANGE = 26
WM_DEVMODECHANGE = 27
WM_ACTIVATEAPP = 28
WM_FONTCHANGE = 29
WM_TIMECHANGE = 30
WM_CANCELMODE = 31
WM_SETCURSOR = 32
WM_MOUSEACTIVATE = 33
WM_CHILDACTIVATE = 34
WM_QUEUESYNC = 35
WM_GETMINMAXINFO = 36
WM_PAINTICON = 38
WM_ICONERASEBKGND = 39
WM_NEXTDLGCTL = 40
WM_SPOOLERSTATUS = 42
WM_DRAWITEM = 43
WM_MEASUREITEM = 44
WM_DELETEITEM = 45
WM_VKEYTOITEM = 46
WM_CHARTOITEM = 47
WM_SETFONT = 48
WM_GETFONT = 49
WM_SETHOTKEY = 50
WM_GETHOTKEY = 51
WM_QUERYDRAGICON = 55
WM_COMPAREITEM = 57
WM_COMPACTING = 65
WM_WINDOWPOSCHANGING = 70
WM_WINDOWPOSCHANGED = 71
WM_POWER = 72
WM_COPYDATA = 74
WM_CANCELJOURNAL = 75
WM_NOTIFY = 78
WM_INPUTLANGCHANGEREQUEST = 80
WM_INPUTLANGCHANGE = 81
WM_TCARD = 82
WM_HELP = 83
WM_USERCHANGED = 84
WM_NOTIFYFORMAT = 85
WM_CONTEXTMENU = 123
WM_STYLECHANGING = 124
WM_STYLECHANGED = 125
WM_DISPLAYCHANGE = 126
WM_GETICON = 127
WM_SETICON = 128
WM_NCCREATE = 129
WM_NCDESTROY = 130
WM_NCCALCSIZE = 131
WM_NCHITTEST = 132
WM_NCPAINT = 133
WM_NCACTIVATE = 134
WM_GETDLGCODE = 135
WM_NCMOUSEMOVE = 160
WM_NCLBUTTONDOWN = 161
WM_NCLBUTTONUP = 162
WM_NCLBUTTONDBLCLK = 163
WM_NCRBUTTONDOWN = 164
WM_NCRBUTTONUP = 165
WM_NCRBUTTONDBLCLK = 166
WM_NCMBUTTONDOWN = 167
WM_NCMBUTTONUP = 168
WM_NCMBUTTONDBLCLK = 169
WM_KEYFIRST = 256
WM_KEYDOWN = 256
WM_KEYUP = 257
WM_CHAR = 258
WM_DEADCHAR = 259
WM_SYSKEYDOWN = 260
WM_SYSKEYUP = 261
WM_SYSCHAR = 262
WM_SYSDEADCHAR = 263
WM_KEYLAST = 264
WM_IME_STARTCOMPOSITION = 269
WM_IME_ENDCOMPOSITION = 270
WM_IME_COMPOSITION = 271
WM_IME_KEYLAST = 271
WM_INITDIALOG = 272
WM_COMMAND = 273
WM_SYSCOMMAND = 274
WM_TIMER = 275
WM_HSCROLL = 276
WM_VSCROLL = 277
WM_INITMENU = 278
WM_INITMENUPOPUP = 279
WM_MENUSELECT = 287
WM_MENUCHAR = 288
WM_ENTERIDLE = 289
WM_CTLCOLORMSGBOX = 306
WM_CTLCOLOREDIT = 307
WM_CTLCOLORLISTBOX = 308
WM_CTLCOLORBTN = 309
WM_CTLCOLORDLG = 310
WM_CTLCOLORSCROLLBAR = 311
WM_CTLCOLORSTATIC = 312
WM_MOUSEFIRST = 512
WM_MOUSEMOVE = 512
WM_LBUTTONDOWN = 513
WM_LBUTTONUP = 514
WM_LBUTTONDBLCLK = 515
WM_RBUTTONDOWN = 516
WM_RBUTTONUP = 517
WM_RBUTTONDBLCLK = 518
WM_MBUTTONDOWN = 519
WM_MBUTTONUP = 520
WM_MBUTTONDBLCLK = 521
WM_MOUSEWHEEL = 522
WM_MOUSEHWHEEL = 526
WM_PARENTNOTIFY = 528
WM_ENTERMENULOOP = 529
WM_EXITMENULOOP = 530
WM_NEXTMENU = 531
WM_SIZING = 532
WM_CAPTURECHANGED = 533
WM_MOVING = 534
WM_POWERBROADCAST = 536
WM_DEVICECHANGE = 537
WM_MDICREATE = 544
WM_MDIDESTROY = 545
WM_MDIACTIVATE = 546
WM_MDIRESTORE = 547
WM_MDINEXT = 548
WM_MDIMAXIMIZE = 549
WM_MDITILE = 550
WM_MDICASCADE = 551
WM_MDIICONARRANGE = 552
WM_MDIGETACTIVE = 553
WM_MDISETMENU = 560
WM_ENTERSIZEMOVE = 561
WM_EXITSIZEMOVE = 562
WM_DROPFILES = 563
WM_MDIREFRESHMENU = 564
WM_IME_SETCONTEXT = 641
WM_IME_NOTIFY = 642
WM_IME_CONTROL = 643
WM_IME_COMPOSITIONFULL = 644
WM_IME_SELECT = 645
WM_IME_CHAR = 646
WM_IME_KEYDOWN = 656
WM_IME_KEYUP = 657
WM_MOUSEHOVER = 673
WM_NCMOUSELEAVE = 674
WM_MOUSELEAVE = 675
WM_CUT = 768
WM_COPY = 769
WM_PASTE = 770
WM_CLEAR = 771
WM_UNDO = 772
WM_RENDERFORMAT = 773
WM_RENDERALLFORMATS = 774
WM_DESTROYCLIPBOARD = 775
WM_DRAWCLIPBOARD = 776
WM_PAINTCLIPBOARD = 777
WM_VSCROLLCLIPBOARD = 778
WM_SIZECLIPBOARD = 779
WM_ASKCBFORMATNAME = 780
WM_CHANGECBCHAIN = 781
WM_HSCROLLCLIPBOARD = 782
WM_QUERYNEWPALETTE = 783
WM_PALETTEISCHANGING = 784
WM_PALETTECHANGED = 785
WM_HOTKEY = 786
WM_PRINT = 791
WM_PRINTCLIENT = 792
WM_HANDHELDFIRST = 856
WM_HANDHELDLAST = 863
WM_PENWINFIRST = 896
WM_PENWINLAST = 911
WM_COALESCE_FIRST = 912
WM_COALESCE_LAST = 927
WM_DDE_FIRST = 992
WM_DDE_INITIATE = 992
WM_DDE_TERMINATE = 993
WM_DDE_ADVISE = 994
WM_DDE_UNADVISE = 995
WM_DDE_ACK = 996
WM_DDE_DATA = 997
WM_DDE_REQUEST = 998
WM_DDE_POKE = 999
WM_DDE_EXECUTE = 1000
WM_DDE_LAST = 1000
WM_USER = 1024
WM_APP = 32768
WM_XBUTTONDOWN = 523
WM_XBUTTONUP = 524
MK_CONTROL = 8
MK_LBUTTON = 1
MK_MBUTTON = 16
MK_RBUTTON = 2
MK_SHIFT = 4
MK_XBUTTON1 = 32
MK_XBUTTON2 = 64
XBUTTON1 = 1
XBUTTON2 = 2 | [
"ferox2552@gmail.com"
] | ferox2552@gmail.com |
2da592d046f1fde36fb19d493634f5a7e2f922cd | d58542787230deb9efa0499451a7f228570562c1 | /djangoapps/schools/migrations/0006_auto_20170330_0720.py | 322872df2939273964e354697590eec098517d2a | [
"MIT"
] | permissive | serudda/waysily-server | 5d9edfeb14fef208ea7dd10dc06deb44b784bb4c | b2da3f3a97dbd1ef46d65f13ee9b2098124d4fc4 | refs/heads/master | 2023-03-03T05:36:50.689008 | 2017-12-27T15:25:32 | 2017-12-27T15:25:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 849 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-03-30 07:20
from __future__ import unicode_literals
from django.db import migrations
import multiselectfield.db.fields
class Migration(migrations.Migration):
dependencies = [
('schools', '0005_auto_20170330_0249'),
]
operations = [
migrations.AlterField(
model_name='immersion',
name='option',
field=multiselectfield.db.fields.MultiSelectField(choices=[(1, 'Games and activities with local'), (2, 'Coffee tasting experience'), (3, 'Beer tasting experience'), (4, 'Chocolate tasting experience'), (5, 'Pub crawl'), (6, 'Local food tasting'), (7, 'Local dance class'), (8, 'Cooking Local Food'), (9, 'Local Movies Night'), (10, 'Practice local sport')], max_length=20, verbose_name='Immersion options'),
),
]
| [
"sergioruizdavila@gmail.com"
] | sergioruizdavila@gmail.com |
cbc8f4f44e59bc6c67f6e78b2974635907244521 | 005f02cb534bbf91fe634fcf401441e1179365c8 | /8-Python Level 2/8.1-Scope(hierarkia e variablave)/scope.py | ad63f08884169ea6ecf4d5dead0fa576e946220a | [] | no_license | Ruxhino-B/django-deployment-example | 220a39a456871a1bf42a64fd5b945731056fc7b9 | e19713ac1e11af202152ad20d7c3c94891a77e83 | refs/heads/master | 2020-04-18T02:21:10.505691 | 2020-01-06T14:18:18 | 2020-01-06T14:25:25 | 167,159,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,321 | py | # x = 25
# def my_func():
# x = 50
# return x
# print(x) #Ketu printohet 25 sepse eshte variabel global
# print(my_func()) #Ketu printohet 50 sepse merret x i funksionit
# my_func()
# print(x) #Ketu serisht printohet 25 edhe pse eshte thirrur my_func mesiper
#locals
#lambda x: x**2 #ky eshte nje varibel lokal
#eclosing function locals (funksione te bashkegjitur lokal)
# name = 'this is a global name!'
# def greet():
# name = "Samy"
# def hello():
# print("hello " + name)
# hello() #jo jep hello Samy. sepse jo kerkon nje shkalle me lart per vaiablin name
# #nqs name brenda func greet fshihet at here hello kerkon nje shkalle
# #me lart per variablin name dhe printon hello this is a global name!
# greet() #nuk publikon asgje sepse vetem sa i jep funksionit vleren Samy
# print(name) #kjo printon serisht This is a global name
#Billd in level jane funksione ose varibla qe i ka python vete psh len-->jep gjatsi
#len = 23 #kjo eshte gabim sepse nqs therasim len del 23 e jo me funksini me mat gjatsin
x = 50
def func(x):
print('x is: ',x) #jep x is 50
global x = 100 #ben ndryshimin e variablit global x
x = 100
print('x u be: ',x)#jep x u be 100
func(x)
print(x) #jep vleren 100 sepse x u ndryshu vlera global
| [
"ruxhino@gmail.com"
] | ruxhino@gmail.com |
73e53aef9409cf8adde4832c379ef7acbb4ac3d4 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_exercises/exercises/_algorithms_challenges/pybites/beginner/91/test_anyall.py | d7c14f07ad586e2dafa7a87f3d42bb10477a5bb5 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 994 | py | # _______ p__
#
# ____ ? _______ ? ? ?
#
#
# ?p__.m__.p.("arg, expected", [
# ('aioue', T..),
# ('EoUia', T..),
# ('aaAiIee', T..),
# ('AEIOU', T..),
# ('aaeeouu', T..),
# ('abcde', F..),
# ('AE123', F..),
# ('AiOuef', F..),
# ])
# ___ test_contains_only_vowels arg, expected
# ... b.. ? ? __ ?
#
#
# ?p__.m__.p.("arg, expected", [
# ('Python', T..),
# ('pycharm', T..),
# ('PYTHON', T..),
# ('teaser', T..),
# ('bob', T..),
# ('julian', T..),
# ('yes', T..),
# ('no', T..),
# ('america', F..),
# ('B@b', F..),
# ('Jules', F..),
# ('agua', F..),
# ('123', F..),
# ('', F..),
# ])
# ___ test_contains_any_py_chars arg, expected
# ... b.. ? ? __ ?
#
#
# ?p__.m__.p.("arg, expected", [
# ('yes1', T..),
# ('123', T..),
# ('hello2', T..),
# ('up2date', T..),
# ('yes', F..),
# ('hello', F..),
# ('', F..),
# ])
# ___ test_contains_digits arg, expected
# ... b.. ? ? __ ? | [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
04a74b3c4532d7de6e7e1b194009e0a4ffb452ef | a1798c553d5b0ddbbb323f3789db991e07105867 | /exec/bin/bash/google-cloud-sdk/.install/.backup/lib/surface/compute/networks/subnets/expand_ip_range.py | 5afff149ebc5aa364a22ec7be2b9cbc336c43bda | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | bopopescu/Wingman | d5d44752f4cd361d51b1e4a62076fe95106ec503 | 957143324fc795f034a57f529cd7b61b970f4a53 | refs/heads/master | 2022-11-20T05:43:31.780030 | 2016-09-30T02:07:00 | 2016-09-30T02:07:00 | 282,287,992 | 0 | 0 | null | 2020-07-24T18:15:54 | 2020-07-24T18:15:54 | null | UTF-8 | Python | false | false | 5,646 | py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for expanding IP range of a subnetwork."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import lister
from googlecloudsdk.api_lib.compute import request_helper
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions as exceptions
from googlecloudsdk.command_lib.compute import flags
from googlecloudsdk.core.console import console_io
import ipaddr
@base.ReleaseTracks(base.ReleaseTrack.BETA, base.ReleaseTrack.ALPHA)
class ExpandIpRange(base_classes.NoOutputAsyncMutator):
"""Expand IP range of a subnetwork."""
@staticmethod
def Args(parser):
flags.AddRegionFlag(
parser,
resource_type='subnetwork',
operation_type='expand IP range for')
parser.add_argument(
'--prefix-length',
type=int,
help=(
'The new prefix length of the subnet. It must be smaller than the '
'original and in the private address space 10.0.0.0/8, '
'172.16.0.0/12 or 192.168.0.0/16 defined in RFC 1918.'),
required=True)
parser.add_argument(
'name',
completion_resource='compute.subnetworks',
help='The name of the subnetwork for which to expand IP range.')
@property
def service(self):
return self.compute.subnetworks
@property
def method(self):
return 'ExpandIpCidrRange'
@property
def resource_type(self):
return 'subnetworks'
def CreateRequests(self, args):
"""Returns requests for expanding IP CIDR range."""
new_prefix_length = self._ValidatePrefixLength(args.prefix_length)
subnetwork_ref = self.CreateRegionalReference(args.name, args.region)
original_ip_cidr_range = self._GetOriginalIpCidrRange(subnetwork_ref)
new_ip_cidr_range = self._InferNewIpCidrRange(
subnetwork_ref.Name(), original_ip_cidr_range, new_prefix_length)
self._PromptToConfirm(
subnetwork_ref.Name(), original_ip_cidr_range, new_ip_cidr_range)
request = self._CreateExpandIpCidrRangeRequest(
subnetwork_ref, new_ip_cidr_range)
return [request]
def _ValidatePrefixLength(self, new_prefix_length):
if not 0 <= new_prefix_length <= 29:
raise exceptions.InvalidArgumentException(
'--prefix-length',
'Prefix length must be in the range [0, 29].')
return new_prefix_length
def _GetOriginalIpCidrRange(self, subnetwork_ref):
subnetwork = self._GetSubnetwork(subnetwork_ref)
if not subnetwork:
raise exceptions.ToolException(
'Subnet [{subnet}] was not found in region {region}.'.format(
subnet=subnetwork_ref.Name(), region=subnetwork_ref.region))
return subnetwork['ipCidrRange']
def _InferNewIpCidrRange(
self, subnet_name, original_ip_cidr_range, new_prefix_length):
unmasked_new_ip_range = '{0}/{1}'.format(
original_ip_cidr_range.split('/')[0],
new_prefix_length)
network = ipaddr.IPv4Network(unmasked_new_ip_range)
return str(network.masked())
def _PromptToConfirm(
self, subnetwork_name, original_ip_cidr_range, new_ip_cidr_range):
prompt_message_template = (
'The IP range of subnetwork [{0}] will be expanded from {1} to {2}. '
'This operation may take several minutes to complete '
'and cannot be undone.')
prompt_message = prompt_message_template.format(
subnetwork_name, original_ip_cidr_range, new_ip_cidr_range)
if not console_io.PromptContinue(message=prompt_message, default=True):
raise exceptions.ToolException('Operation aborted by user.')
def _CreateExpandIpCidrRangeRequest(self, subnetwork_ref, new_ip_cidr_range):
request_body = self.messages.SubnetworksExpandIpCidrRangeRequest(
ipCidrRange=new_ip_cidr_range)
return self.messages.ComputeSubnetworksExpandIpCidrRangeRequest(
subnetwork=subnetwork_ref.Name(),
subnetworksExpandIpCidrRangeRequest=request_body,
project=self.project,
region=subnetwork_ref.region)
def _GetSubnetwork(self, subnetwork_ref):
get_request = (
self.compute.subnetworks,
'Get',
self.messages.ComputeSubnetworksGetRequest(
project=self.project,
region=subnetwork_ref.region,
subnetwork=subnetwork_ref.Name()))
errors = []
objects = request_helper.MakeRequests(
requests=[get_request],
http=self.http,
batch_url=self.batch_url,
errors=errors,
custom_get_requests=None)
resources = list(lister.ProcessResults(objects, field_selector=None))
return resources[0] if resources else None
ExpandIpRange.detailed_help = {
'brief': 'Expand the IP range of a Google Compute Engine subnetwork',
'DESCRIPTION': """\
*{command}* is used to expand the IP range of a subnetwork in a custom
mode network.
""",
'EXAMPLES': """\
To expand the IP range of ``SUBNET'' to /16, run:
$ {command} SUBNET --region us-central1 --prefix-length 16
""",
}
| [
"tobiah.rex@gmail.com"
] | tobiah.rex@gmail.com |
90df0b83ccc840e73645521d83b6cac57016345f | e6945ece453368c03a77626833e38416ac736a66 | /algorithms/basic_algorithms/sort012.py | 4c05fc48dfafe6a468e49c2c9367b4c5e1b6beb7 | [] | no_license | aa-ag/nano | f8949efa0e6c5df9365629ef7e4a7447ea0d8bb7 | 3133662b7095069485a9860bef58f9bc5760ada3 | refs/heads/main | 2023-04-17T05:28:12.621387 | 2021-04-14T00:52:54 | 2021-04-14T00:52:54 | 338,670,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,253 | py | def sort_012(input_list):
"""
The idea is to put 0 and 2 in their correct positions, which will make sure
all the 1s are automatically placed in their right positions
"""
# initialize pointers for next positions of 0 and 2
next_pos_0 = 0
next_pos_2 = len(input_list) - 1
front_index = 0
while front_index <= next_pos_2:
if input_list[front_index] == 0:
input_list[front_index] = input_list[next_pos_0]
input_list[next_pos_0] = 0
next_pos_0 += 1
front_index += 1
elif input_list[front_index] == 2:
input_list[front_index] = input_list[next_pos_2]
input_list[next_pos_2] = 2
next_pos_2 -= 1
else:
front_index += 1
# tests
def test_function(test_case):
sort_012(test_case)
if test_case == sorted(test_case):
print("Pass")
else:
print("Fail")
# test 1
test_case = [0, 0, 2, 2, 2, 1, 1, 1, 2, 0, 2]
test_function(test_case)
# test 2
test_case = [2, 1, 2, 0, 0, 2, 1, 0, 1, 0, 0,
2, 2, 2, 1, 2, 0, 0, 0, 2, 1, 0, 2, 0, 0, 1]
test_function(test_case)
# test 3
test_case = [2, 2, 0, 0, 2, 1, 0, 2, 2, 1, 1, 1, 0, 1, 2, 0, 2, 0, 1]
test_function(test_case)
| [
"aaron.aguerrevere@gmail.com"
] | aaron.aguerrevere@gmail.com |
f44b98f23b014660859af3b6c140b53872bde1d8 | f7630fd6c829cb306e72472296e3a513844d99af | /lib/python3.8/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_fsp_vlan_interface_secondaryip.py | 4919d980d2fd35d11d074bd3bca64cebe88f986c | [] | no_license | baltah666/automation | 6eccce20c83dbe0d5aa9a82a27937886e3131d32 | 140eb81fe9bacb9a3ed1f1eafe86edeb8a8d0d52 | refs/heads/master | 2023-03-07T10:53:21.187020 | 2023-02-10T08:39:38 | 2023-02-10T08:39:38 | 272,007,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,415 | py | #!/usr/bin/python
from __future__ import absolute_import, division, print_function
# Copyright 2019-2021 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_fsp_vlan_interface_secondaryip
short_description: no description
description:
- This module is able to configure a FortiManager device.
- Examples include all parameters and values which need to be adjusted to data sources before usage.
version_added: "1.0.0"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Frank Shen (@fshen01)
- Hongbin Lu (@fgtdev-hblu)
notes:
- Running in workspace locking mode is supported in this FortiManager module, the top
level parameters workspace_locking_adom and workspace_locking_timeout help do the work.
- To create or update an object, use state present directive.
- To delete an object, use state absent directive.
- Normally, running one module can fail when a non-zero rc is returned. you can also override
the conditions to fail or succeed with parameters rc_failed and rc_succeeded
options:
enable_log:
description: Enable/Disable logging for task
required: false
type: bool
default: false
proposed_method:
description: The overridden method for the underlying Json RPC request
required: false
type: str
choices:
- update
- set
- add
bypass_validation:
description: |
only set to True when module schema diffs with FortiManager API structure,
module continues to execute without validating parameters
required: false
type: bool
default: false
workspace_locking_adom:
description: |
the adom to lock for FortiManager running in workspace mode, the value can be global and others including root
required: false
type: str
workspace_locking_timeout:
description: the maximum time in seconds to wait for other user to release the workspace lock
required: false
type: int
default: 300
state:
description: the directive to create, update or delete an object
type: str
required: true
choices:
- present
- absent
rc_succeeded:
description: the rc codes list with which the conditions to succeed will be overriden
type: list
required: false
rc_failed:
description: the rc codes list with which the conditions to fail will be overriden
type: list
required: false
adom:
description: the parameter (adom) in requested url
type: str
required: true
vlan:
description: the parameter (vlan) in requested url
type: str
required: true
fsp_vlan_interface_secondaryip:
description: the top level parameters set
required: false
type: dict
suboptions:
allowaccess:
description: description
type: list
choices:
- https
- ping
- ssh
- snmp
- http
- telnet
- fgfm
- auto-ipsec
- radius-acct
- probe-response
- capwap
- dnp
- ftm
- fabric
- speed-test
detectprotocol:
description: description
type: list
choices:
- ping
- tcp-echo
- udp-echo
detectserver:
type: str
description: no description
gwdetect:
type: str
description: no description
choices:
- 'disable'
- 'enable'
ha-priority:
type: int
description: no description
id:
type: int
description: no description
ip:
type: str
description: no description
ping-serv-status:
type: int
description: no description
seq:
type: int
description: no description
'''
EXAMPLES = '''
- hosts: fortimanager-inventory
collections:
- fortinet.fortimanager
connection: httpapi
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
- name: no description
fmgr_fsp_vlan_interface_secondaryip:
bypass_validation: False
workspace_locking_adom: <value in [global, custom adom including root]>
workspace_locking_timeout: 300
rc_succeeded: [0, -2, -3, ...]
rc_failed: [-2, -3, ...]
adom: <your own value>
vlan: <your own value>
state: <value in [present, absent]>
fsp_vlan_interface_secondaryip:
allowaccess:
- https
- ping
- ssh
- snmp
- http
- telnet
- fgfm
- auto-ipsec
- radius-acct
- probe-response
- capwap
- dnp
- ftm
- fabric
- speed-test
detectprotocol:
- ping
- tcp-echo
- udp-echo
detectserver: <value of string>
gwdetect: <value in [disable, enable]>
ha-priority: <value of integer>
id: <value of integer>
ip: <value of string>
ping-serv-status: <value of integer>
seq: <value of integer>
'''
RETURN = '''
request_url:
description: The full url requested
returned: always
type: str
sample: /sys/login/user
response_code:
description: The status of api request
returned: always
type: int
sample: 0
response_message:
description: The descriptive message of the api response
type: str
returned: always
sample: OK.
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import NAPIManager
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_galaxy_version
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_parameter_bypass
def main():
jrpc_urls = [
'/pm/config/adom/{adom}/obj/fsp/vlan/{vlan}/interface/secondaryip',
'/pm/config/global/obj/fsp/vlan/{vlan}/interface/secondaryip'
]
perobject_jrpc_urls = [
'/pm/config/adom/{adom}/obj/fsp/vlan/{vlan}/interface/secondaryip/{secondaryip}',
'/pm/config/global/obj/fsp/vlan/{vlan}/interface/secondaryip/{secondaryip}'
]
url_params = ['adom', 'vlan']
module_primary_key = 'id'
module_arg_spec = {
'enable_log': {
'type': 'bool',
'required': False,
'default': False
},
'forticloud_access_token': {
'type': 'str',
'required': False,
'no_log': True
},
'proposed_method': {
'type': 'str',
'required': False,
'choices': [
'set',
'update',
'add'
]
},
'bypass_validation': {
'type': 'bool',
'required': False,
'default': False
},
'workspace_locking_adom': {
'type': 'str',
'required': False
},
'workspace_locking_timeout': {
'type': 'int',
'required': False,
'default': 300
},
'rc_succeeded': {
'required': False,
'type': 'list'
},
'rc_failed': {
'required': False,
'type': 'list'
},
'state': {
'type': 'str',
'required': True,
'choices': [
'present',
'absent'
]
},
'adom': {
'required': True,
'type': 'str'
},
'vlan': {
'required': True,
'type': 'str'
},
'fsp_vlan_interface_secondaryip': {
'required': False,
'type': 'dict',
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'options': {
'allowaccess': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'type': 'list',
'choices': [
'https',
'ping',
'ssh',
'snmp',
'http',
'telnet',
'fgfm',
'auto-ipsec',
'radius-acct',
'probe-response',
'capwap',
'dnp',
'ftm',
'fabric',
'speed-test'
]
},
'detectprotocol': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'type': 'list',
'choices': [
'ping',
'tcp-echo',
'udp-echo'
]
},
'detectserver': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'type': 'str'
},
'gwdetect': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'ha-priority': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'type': 'int'
},
'id': {
'required': True,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'type': 'int'
},
'ip': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'type': 'str'
},
'ping-serv-status': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'type': 'int'
},
'seq': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'type': 'int'
}
}
}
}
params_validation_blob = []
check_galaxy_version(module_arg_spec)
module = AnsibleModule(argument_spec=check_parameter_bypass(module_arg_spec, 'fsp_vlan_interface_secondaryip'),
supports_check_mode=False)
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
connection.set_option('enable_log', module.params['enable_log'] if 'enable_log' in module.params else False)
connection.set_option('forticloud_access_token',
module.params['forticloud_access_token'] if 'forticloud_access_token' in module.params else None)
fmgr = NAPIManager(jrpc_urls, perobject_jrpc_urls, module_primary_key, url_params, module, connection, top_level_schema_name='data')
fmgr.validate_parameters(params_validation_blob)
fmgr.process_curd(argument_specs=module_arg_spec)
else:
module.fail_json(msg='MUST RUN IN HTTPAPI MODE')
module.exit_json(meta=module.params)
if __name__ == '__main__':
main()
| [
"baltah666@gmail.com"
] | baltah666@gmail.com |
af5a5803b937541ad6881d91e5345a919f0db0ba | 8c2b682e8bb27a32553cd6012d249df68617d262 | /src/rot13.py | 6b5445bca95bd2be62b28e8b32a33e78f9bfe258 | [] | no_license | Vlad-Shcherbina/Morph-Endo-Legacy | 7ca6de7632bb1fee69e1b6ec614a9ac9181fd297 | e620eeb2e144d0f8cf252770d4b5c38ac99449c7 | refs/heads/master | 2021-01-22T04:36:49.008953 | 2011-02-18T16:22:34 | 2011-02-18T16:22:34 | 1,352,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | import sys
A = ord('A')
a = ord('a')
table = ''.join(map(chr,
range(A)+
range(A+13, A+26)+range(A, A+13)+
range(A+26, a)+
range(a+13, a+26)+range(a, a+13)+
range(a+26, 256)))
for line in sys.stdin:
sys.stdout.write(line.translate(table)) | [
"vlad.shcherbina@gmail.com"
] | vlad.shcherbina@gmail.com |
72189e28891678dea78ecc6262d662e8917521d2 | bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d | /lib/surface/firestore/backups/delete.py | db44a094048b32399ee0cdc1780a08bdc70508c3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | google-cloud-sdk-unofficial/google-cloud-sdk | 05fbb473d629195f25887fc5bfaa712f2cbc0a24 | 392abf004b16203030e6efd2f0af24db7c8d669e | refs/heads/master | 2023-08-31T05:40:41.317697 | 2023-08-23T18:23:16 | 2023-08-23T18:23:16 | 335,182,594 | 9 | 2 | NOASSERTION | 2022-10-29T20:49:13 | 2021-02-02T05:47:30 | Python | UTF-8 | Python | false | false | 1,583 | py | # -*- coding: utf-8 -*- #
# Copyright 2023 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The gcloud Firestore backups delete command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.firestore import backups
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.firestore import flags
from googlecloudsdk.core import properties
@base.Hidden
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class Delete(base.DeleteCommand):
"""Deletes a Cloud Firestore backup.
## EXAMPLES
To delete `cf9f748a-7980-4703-b1a1-d1ffff591db0` backup in us-east1.
$ {command} --location=us-east1
--backup=cf9f748a-7980-4703-b1a1-d1ffff591db0
"""
@staticmethod
def Args(parser):
flags.AddLocationFlag(parser, required=True, hidden=True)
flags.AddBackupFlag(parser)
def Run(self, args):
project = properties.VALUES.core.project.Get(required=True)
return backups.DeleteBackup(project, args.location, args.backup)
| [
"cloudsdk.mirror@gmail.com"
] | cloudsdk.mirror@gmail.com |
3b9de62a4fe10f0e2e6b3a426c400aff2b705434 | 45e66980d15a06b264f31f9a7d6dcd6dc271b815 | /test/functional/rpc_getchaintips.py | f64055f5d7e031f912ab20627262990cd936fe7d | [
"MIT"
] | permissive | Lucky1689/ukcoin | e3ff17c66c85f5531d81580e4bc84ff3994924af | 11bcd6ded7b11a7179e32f1bf0d6f75615c0dde1 | refs/heads/master | 2022-09-20T17:25:14.553647 | 2020-06-03T18:08:17 | 2020-06-03T18:08:17 | 262,382,958 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,422 | py | #!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Copyright (c) 2017 The Raven Core developers
# Copyright (c) 2018 The Rito Core developers
# Copyright (c) 2020 The Ukcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the getchaintips RPC.
- introduce a network split
- work on chains of different lengths
- join the network together again
- verify that getchaintips now returns two chain tips.
"""
from test_framework.test_framework import UkcoinTestFramework
from test_framework.util import assert_equal
class GetChainTipsTest (UkcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.extra_args = [["-maxreorg=10000"], ["-maxreorg=10000"], ["-maxreorg=10000"], ["-maxreorg=10000"]]
def run_test (self):
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 1)
assert_equal (tips[0]['branchlen'], 0)
assert_equal (tips[0]['height'], 200)
assert_equal (tips[0]['status'], 'active')
# Split the network and build two chains of different lengths.
self.split_network ()
self.nodes[0].generate(10)
self.nodes[2].generate(20)
self.sync_all([self.nodes[:2], self.nodes[2:]])
tips = self.nodes[1].getchaintips ()
assert_equal (len (tips), 1)
shortTip = tips[0]
assert_equal (shortTip['branchlen'], 0)
assert_equal (shortTip['height'], 210)
assert_equal (tips[0]['status'], 'active')
tips = self.nodes[3].getchaintips ()
assert_equal (len (tips), 1)
longTip = tips[0]
assert_equal (longTip['branchlen'], 0)
assert_equal (longTip['height'], 220)
assert_equal (tips[0]['status'], 'active')
# Join the network halves and check that we now have two tips
# (at least at the nodes that previously had the short chain).
self.join_network ()
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 2)
assert_equal (tips[0], longTip)
assert_equal (tips[1]['branchlen'], 10)
assert_equal (tips[1]['status'], 'valid-fork')
tips[1]['branchlen'] = 0
tips[1]['status'] = 'active'
assert_equal (tips[1], shortTip)
if __name__ == '__main__':
GetChainTipsTest ().main ()
| [
"Ukcoin168@gmail.com"
] | Ukcoin168@gmail.com |
fb55a6fe4035493d57f4e6701df30f4667531636 | 24cf6d01fc9485c2e5578523bce6313aab47a30e | /DataLoaders/RN_DataLoader.py | 722af7b3989143c336ff0c507fcc1971dabe7058 | [] | no_license | sahahn/GenDiagFramework | 352212b2c540a6db73e810e416a9d3d4fa84f95a | 29498d3667d644d5b3a8fd0f0e277cbdd14027ba | refs/heads/master | 2020-04-14T06:10:10.609366 | 2019-06-18T20:22:31 | 2019-06-18T20:22:31 | 163,678,894 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,762 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 13 11:09:31 2018
@author: sage
"""
from DataLoaders.TwoD_DataLoader import TwoD_DataLoader
from config import config
import nibabel as nib
import numpy as np
import csv
def get_name_slc(chunk):
'''Specific function for loading retina-net style csv'''
relevant_chunk = chunk.split('/')[-1].replace('.jpg','')
name = relevant_chunk[:-3]
slc = int(relevant_chunk[-3:])
return name, slc
class RN_DataLoader(TwoD_DataLoader):
def load_labels(self, include_none=True):
print('include none: ', include_none)
self.file_names = set()
self.data_points = []
with open(self.label_location) as csvfile:
reader = csv.reader(csvfile)
for row in reader:
name, slc = get_name_slc(row[0])
self.file_names.add(name)
try:
label = [float(row[i]) for i in range(1,5)]
except ValueError:
label = [None]
label.append(config['name_convs'][row[-1]])
if label[0] == None and not include_none:
continue
if label[0] == None:
label = np.empty((5))
self.data_points.append(self.create_data_point(name, label, slc=slc))
def load_new(self):
for name in self.label_location:
raw_file_path = self.init_location + name + '.nii'
try:
raw_file = nib.load(raw_file_path)
except:
raw_file = nib.load(raw_file_path + '.gz')
data = raw_file.get_data()
data = data.transpose(2,1,0)
for slc in range(len(data)):
label = np.empty((5))
dp = self.create_data_point(name, label, slc=slc)
image = data[slc]
image = self.initial_preprocess(image, 0)
dp.set_data(image)
self.data_points.append(dp)
def load_annotations(annotations_loc):
'''Create an instance of the Retina Net DataLoader in order to load
the data point w/ just label, name and slice information, and return
the datapoints - notably loading only annotations with info'''
RN_Loader = RN_DataLoader('fake/', annotations_loc)
RN_Loader.load_labels(include_none=False)
return RN_Loader.data_points
| [
"sahahn@uvm.edu"
] | sahahn@uvm.edu |
bd4bbabf5022a10ced1a65609982eeed8984efa0 | 9c0abddc6bff5e65284145110663c88863e647c1 | /scripts/polyphemus | d83d6cadce361a9f2504b0ea985eef86374e08a4 | [
"BSD-2-Clause"
] | permissive | polyphemus-ci/polyphemus | 07ffb912d17e971a13f3e89b282dd13e33f23b82 | 3ae6cb9ff312d90478d8a294681bd898b7f45b1c | refs/heads/master | 2020-04-18T14:06:38.833687 | 2015-03-16T18:28:25 | 2015-03-16T18:28:25 | 13,967,339 | 1 | 0 | null | 2015-03-16T18:28:25 | 2013-10-29T20:12:32 | Python | UTF-8 | Python | false | false | 95 | #!/usr/bin/env python
"""polyphemus entry point."""
from polyphemus.main import main
main()
| [
"scopatz@gmail.com"
] | scopatz@gmail.com | |
4e825dc3c1d3e37064aefc4516a83fc19b3800e0 | cbfb679bd068a1153ed855f0db1a8b9e0d4bfd98 | /leet/greedy/Min_Steps_to_Make_Piles_Equal_Height.py | 5a29476f4a588b2983fa92ec5fc6e3ae81d6e5c0 | [] | no_license | arsamigullin/problem_solving_python | 47715858a394ba9298e04c11f2fe7f5ec0ee443a | 59f70dc4466e15df591ba285317e4a1fe808ed60 | refs/heads/master | 2023-03-04T01:13:51.280001 | 2023-02-27T18:20:56 | 2023-02-27T18:20:56 | 212,953,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 748 | py | # microsoft
# Alexa is given n piles of equal or unequal heights. In one step, Alexa can remove any number of
# boxes from the pile which has the maximum height and try to make it equal to the one which is just
# lower than the maximum height of the stack.
# Determine the minimum number of steps required to make all of the piles equal in height.
#
# Example 1:
#
# Input: piles = [5, 2, 1]
# Output: 3
# Explanation:
# Step 1: reducing 5 -> 2 [2, 2, 1]
# Step 2: reducing 2 -> 1 [2, 1, 1]
# Step 3: reducing 2 -> 1 [1, 1, 1]
# So final number of steps required is 3.
def solution(piles):
total = 0
l = sorted(piles,reverse = True)
for i in range(0, len(l) - 1):
if l[i] > l[i + 1]:
total += i + 1
return total | [
"ar.smglln@gmail.com"
] | ar.smglln@gmail.com |
f92a2d65cadcf3edcc995607470cff87ffcb5b0f | b525ad01dac1595f1f8e124067e7cf86fe79f5f1 | /car_yaohao/tui/yaohao_prediction.py | ce300b7db846ddd4010841f554da6c085cc1db86 | [] | no_license | oaifaye/tensorflow_demo | cad381700d16b10052525a4c2210cf2243df746b | 660636d7bef8c7f4853c0b3a99ffdb4b6f569180 | refs/heads/master | 2021-09-05T09:44:22.357399 | 2018-01-26T07:02:24 | 2018-01-26T07:02:24 | 119,017,504 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,400 | py | '''
Created on 2018年1月13日
@author: Administrator
tf多特征非线性回归
https://www.2cto.com/kf/201704/626628.html
'''
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# 特征数
featurenum = 3
x = tf.placeholder(tf.float32, [None, featurenum])
y = tf.placeholder(tf.float32, [None, 1])
#定义神经网络中间层权值
weights_l1 = tf.placeholder(tf.float32, [featurenum, 10])
biases_l1 = tf.placeholder(tf.float32, [1, 10])
wx_plust_b_l1 = tf.matmul(x, weights_l1) + biases_l1
# l1 = tf.nn.relu(wx_plust_b_l1)#双曲正切函数作为激活函数
l1 = tf.nn.tanh(wx_plust_b_l1)#双曲正切函数作为激活函数
# l1 = tf.sigmoid(wx_plust_b_l1)
#定义输出层
weights_l2 = tf.placeholder(tf.float32, [ 10,1])
biases_l2 = tf.placeholder(tf.float32, [ 1,1])
wx_plust_b_l2 = tf.matmul(l1, weights_l2) + biases_l2
# prediction = tf.nn.relu(wx_plust_b_l2)#预测结果
prediction = tf.nn.tanh(wx_plust_b_l2)#预测结果
# prediction = tf.sigmoid(wx_plust_b_l2)#预测结果
#代价函数
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())#变量初始化,一定要做
seed_plt = 0.042
x_data = [
[-seed_plt*23,0.11789,0.11910],[-seed_plt*22,0.12477,0.12820],
[-seed_plt*21,0.14280,0.14806],[-seed_plt*20,0.15508,0.16252],[-seed_plt*19,0.16890,0.17658],
[-seed_plt*18,0.18292,0.19115],[-seed_plt*17,0.19867,0.20520],[-seed_plt*16,0.19941,0.20397],
[-seed_plt*15,0.16605,0.16883],[-seed_plt*14,0.15972,0.16275],[-seed_plt*13,0.16640,0.16952],
[-seed_plt*12,0.15151,0.15679],[-seed_plt*11,0.15752,0.16554],[-seed_plt*10,0.18508,0.19272],
[-seed_plt*9,0.19853,0.21006],[-seed_plt*8,0.21182,0.22585],[-seed_plt*7,0.32371,0.24690],
[-seed_plt*6,0.25987,0.27367],[-seed_plt*5,0.28182,0.29401],[-seed_plt*4,0.28292,0.29100],
[-seed_plt*3,0.22107,0.22590],[-seed_plt*2,0.22458,0.23236],[-seed_plt*1,0.23198,0.24061],
[seed_plt*1,0.21902,0.23076],[seed_plt*2,0.24511,0.25761],[seed_plt*3,0.26545,0.28025],
[seed_plt*4,0.27891,0.29496],[seed_plt*5,0.30306,0.32079],[seed_plt*6,0.31664,0.32712],
[seed_plt*7,0.15473,0.15936],[seed_plt*8,0.16705,0.17397],[seed_plt*9,0.18764,0.19823],
[seed_plt*10,0.19872,0.21006],[seed_plt*11,0.20558,0.21532],[seed_plt*12,0.20169,0.21244],
[seed_plt*13,0.19800,0.20500]
]
#学习率0.1
weights_ave_l1_p = [[-1.6345720291137695, 0.16002704203128815, -3.720055341720581, -0.09972929954528809, -2.0044445991516113, -0.25151756405830383, 1.092233657836914, -1.3219331502914429, 2.7261900901794434, -0.09247738122940063], [0.7514986395835876, -1.0940899848937988, 2.1134166717529297, -0.08639880269765854, 0.4848458170890808, -1.8154855966567993, 2.683783531188965, -0.4798979163169861, -0.3681236803531647, 0.3427703082561493], [0.5650380849838257, -0.6899119019508362, -0.3770529627799988, -1.0565630197525024, -2.6447322368621826, -2.462433099746704, 1.0621711015701294, 0.7418441772460938, -0.027339881286025047, -1.2374218702316284]]
biases_ave_l1_p = [[0.002408053958788514, -0.043371863663196564, -0.10514367371797562, -0.022852644324302673, -0.2746281027793884, 0.09120028465986252, 0.18297520279884338, -0.15229398012161255, 0.163157120347023, -0.03869754448533058]]
weights_ave_l2_p = [[1.0024975538253784], [0.984232485294342], [-0.8198782801628113], [0.8732115030288696], [-0.6737377047538757], [-2.5154635906219482], [-1.3600997924804688], [-1.046596646308899], [-1.076171875], [0.9199466109275818]]
biases_ave_l2_p = [[-0.08473621308803558]]
prediction_ave = sess.run(prediction, feed_dict={x:x_data,weights_l1:weights_ave_l1_p,biases_l1:biases_ave_l1_p,weights_l2:weights_ave_l2_p,biases_l2:biases_ave_l2_p})
print('预测ave:',prediction_ave)
weights_min_l1_p = [[1.5045439004898071, -2.2153048515319824, -0.05088438838720322, -5.076720237731934, 0.08630691468715668, -1.2653288841247559, -0.8979445695877075, 0.05308128520846367, 3.4334943294525146, 0.022553058341145515], [0.02079005539417267, -0.7477558851242065, 0.9663386344909668, -2.0129551887512207, -0.8947086930274963, -0.8803507685661316, 3.7918221950531006, 1.847798228263855, 4.007891654968262, 1.2713923454284668], [-2.1645727157592773, -1.8836021423339844, -0.6455804705619812, -2.0849404335021973, -0.35961779952049255, 0.14747656881809235, -2.182036876678467, -1.195816993713379, -0.455473929643631, 2.7534902095794678]]
biases_min_l1_p = [[-0.05450700595974922, -0.057999689131975174, 0.009392624720931053, -0.04014386981725693, -0.0126974917948246, 0.001202933257445693, 0.007342544849961996, 0.01960461027920246, -0.04443024843931198, -0.02939458005130291]]
weights_min_l2_p = [[-1.569625735282898], [1.710821509361267], [-0.38559436798095703], [1.3638279438018799], [0.5812253952026367], [0.3018932342529297], [-2.6823058128356934], [-0.8767712712287903], [2.8246896266937256], [1.399678349494934]]
biases_min_l2_p = [[-0.024785390123724937]]
prediction_min = sess.run(prediction, feed_dict={x:x_data,weights_l1:weights_min_l1_p,biases_l1:biases_min_l1_p,weights_l2:weights_min_l2_p,biases_l2:biases_min_l2_p})
print('预测min:',prediction_min)
#画图
# seed_plt = 0.040
x_plt = [[-seed_plt*23],[-seed_plt*22],[-seed_plt*21],[-seed_plt*20],[-seed_plt*19],[-seed_plt*18],[-seed_plt*17],[-seed_plt*16],[-seed_plt*15],[-seed_plt*14],[-seed_plt*13],[-seed_plt*12],
[-seed_plt*11],[-seed_plt*10],[-seed_plt*9],[-seed_plt*8],[-seed_plt*7],[-seed_plt*6],[-seed_plt*5],[-seed_plt*4],[-seed_plt*3],[-seed_plt*2],[-seed_plt*1],
[seed_plt*1],[seed_plt*2],[seed_plt*3],[seed_plt*4],[seed_plt*5],[seed_plt*6],[seed_plt*7],[seed_plt*8],[seed_plt*9],[seed_plt*10],[seed_plt*11],[seed_plt*12],[seed_plt*13],
# [seed_plt*14],
# [seed_plt*15],[seed_plt*16]#,[seed_plt*17],[seed_plt*18],[seed_plt*19],[seed_plt*20],[seed_plt*21],[seed_plt*22],[seed_plt*23],[seed_plt*24],[seed_plt*25]
]
plt.figure()
plt.plot(x_plt, prediction_ave, 'r-', lw = 5)#画预测的实线,红色
plt.plot(x_plt, prediction_min, 'b-', lw = 5)#画预测的实线,红色
plt.show()
| [
"slf_work@hotmail.com"
] | slf_work@hotmail.com |
7558eb13c62e6e8701d8c5244984f91e09431bde | 750c78b144a9ff5f03732e0db56af61e4b99de04 | /allProjects/allProjects/asgi.py | 9e2dcc5548b16ec4f516282108a60bba2206402c | [] | no_license | Pradeepsuthar/Covid19_symptomChecker | 30af12bc26dfbcfea1c5f8231016fc5b17bac066 | 5c42464819dbc188d2957b4d5e98f85f90c9e3f0 | refs/heads/master | 2021-05-21T16:58:44.421559 | 2020-09-17T19:18:09 | 2020-09-17T19:18:09 | 252,727,833 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | """
ASGI config for allProjects project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'allProjects.settings')
application = get_asgi_application()
| [
"sutharpradeep081@gmail.com"
] | sutharpradeep081@gmail.com |
1c8a5f2b8ea1d21124aa4c853b16f90ccf7db25c | 636ba2700eaf3a151b73144b510f38c75ab1919d | /keras/keras46_MC_5_diabetes.py | 5b1e1f877d96d175939c96bf9885a18040677c9f | [] | no_license | Taerimmm/ML | 17997f388e18c28dfd9de83af98a6d4bebe7e1f0 | 6147cede81ebcc95f21adebf75731fbbb11edfab | refs/heads/master | 2023-06-10T14:26:45.335219 | 2021-07-05T15:30:47 | 2021-07-05T15:30:47 | 324,874,959 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,790 | py | import numpy as np
from sklearn.datasets import load_diabetes
dataset = load_diabetes()
x = dataset.data
y = dataset.target
print(x.shape, y.shape)
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=.2, random_state=45)
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=.2, random_state=45)
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
x_val = scaler.transform(x_val)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
model = Sequential()
model.add(Dense(32, activation='relu', input_shape=(10,)))
model.add(Dense(8, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam', metrics=['mae'])
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
modelpath = '../data/modelcheckpoint/k46_diabetes_{epoch:02d}-{val_loss:.4f}.hdf5'
cp = ModelCheckpoint(filepath=modelpath, monitor='val_loss', save_best_only=True, mode='auto')
es = EarlyStopping(monitor='val_loss', patience=50, mode='auto')
model.fit(x_train, y_train, epochs=10000, batch_size=8, validation_data=(x_val, y_val), verbose=2, callbacks=[es,cp])
loss, mse = model.evaluate(x_test, y_test)
print("loss :", loss)
print('MSE :', mse)
y_predict = model.predict(x_test)
from sklearn.metrics import mean_squared_error, r2_score
def rmse(y_test, y_predict):
return np.sqrt(mean_squared_error(y_test, y_predict))
print('RMSE :', rmse(y_test, y_predict))
print('MSE :', mean_squared_error(y_test, y_predict))
print('R2 :', r2_score(y_test, y_predict))
| [
"xofla7560@naver.com"
] | xofla7560@naver.com |
d4b3004e359038387a94c6cd18da5927de369bf1 | b09de58f95a76a5b1304205e44ba6be3965da33a | /chapter_06/06_use_generator_model.py | 6293a47fcd3a3256bffaa319886fa4c8d652695d | [] | no_license | fenago/generative-adversarial-networks | 416c6c84370c87148e00fa714b5d7ac043667cc9 | 9c9c08e25d01d7b69c863d7a9927b15140dca4d3 | refs/heads/master | 2021-01-02T12:02:08.749359 | 2020-02-29T12:35:21 | 2020-02-29T12:35:21 | 239,614,901 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,269 | py | # %%
# define and use the generator model
from numpy.random import randn
from keras.models import Sequential
from keras.layers import Dense
%matplotlib notebook
from matplotlib import pyplot
# define the standalone generator model
def define_generator(latent_dim, n_outputs=2):
model = Sequential()
model.add(Dense(15, activation='relu', kernel_initializer='he_uniform', input_dim=latent_dim))
model.add(Dense(n_outputs, activation='linear'))
return model
# generate points in latent space as input for the generator
def generate_latent_points(latent_dim, n):
# generate points in the latent space
x_input = randn(latent_dim * n)
# reshape into a batch of inputs for the network
x_input = x_input.reshape(n, latent_dim)
return x_input
# use the generator to generate n fake examples and plot the results
def generate_fake_samples(generator, latent_dim, n):
# generate points in latent space
x_input = generate_latent_points(latent_dim, n)
# predict outputs
X = generator.predict(x_input)
# plot the results
pyplot.scatter(X[:, 0], X[:, 1])
pyplot.show()
# size of the latent space
latent_dim = 5
# define the discriminator model
model = define_generator(latent_dim)
# generate and plot generated samples
generate_fake_samples(model, latent_dim, 100) | [
"31277617+athertahir@users.noreply.github.com"
] | 31277617+athertahir@users.noreply.github.com |
b99a243aa03a9fd7d85f1b818df6bfd48f4d9dab | 4bb30a508977ad6b950b98c8ef4d7dec9d988a6a | /bacs350/demo/week05/Demo14/Demo14/wsgi.py | 6cc8bbaec0e26b099073f17bc6059709b4275f6a | [] | no_license | Mark-Seaman/UNC-BACS350-2020-Fall | cfad05b6b58f15401e120beba9a79a9bbd8a1525 | 8a14ad72f43fdffa9491b9a9b38d65d71a074986 | refs/heads/master | 2023-07-19T03:01:27.298647 | 2020-12-14T19:24:39 | 2020-12-14T19:24:39 | 279,434,877 | 0 | 0 | null | 2021-09-22T19:44:33 | 2020-07-13T23:41:47 | HTML | UTF-8 | Python | false | false | 389 | py | """
WSGI config for Demo14 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Demo14.settings')
application = get_wsgi_application()
| [
"Mark.Seaman@imac.net"
] | Mark.Seaman@imac.net |
ec26144644568fdd3f604ce3d8d4aa765ca16d56 | 3d569375e38cbc2e73f54a9e5dd140b4021edb46 | /爬虫/requests项目/爬取去哪儿景点.py | da06f893e653a0a7f404c190512dd1a2bdc4fadd | [] | no_license | Gscsd8527/python | 2dffb13944346ca1772a4de52a80c644f19bcf72 | c7cb0653355365fc18a235f427315fae8f2b8734 | refs/heads/master | 2020-04-28T21:15:23.514693 | 2019-04-20T12:50:04 | 2019-04-20T12:50:04 | 175,575,773 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,007 | py | import requests
from bs4 import BeautifulSoup
import re
def parse(url):
html = requests.get(url,headers=headers)
html = html.text
soup = BeautifulSoup(html,'lxml')
city = soup.select('div.e_destin_ct dl.m_nav dd a')
lst =[]
for i in city:
# 有的城市被推荐到热门城市中去了,而在地区中也有该城市,所以要去重
if i.text not in lst:
lst.append(i.text)
city_name = i.text
city_url = i.get('href')
city_url='https:'+city_url
parse_url(city_name,city_url)
# 解析每个城市url中的数据
def parse_url(city_name,city_url):
print(city_name,city_url)
html = requests.get(city_url,headers=headers)
html = html.text
soup = BeautifulSoup(html,'lxml')
print(soup)
if __name__=='__main__':
headers = {
'User-Agent': 'Mozilla/4.0(compatible;MSIE 5.5;Windows NT)', }
start_url='https://dujia.qunar.com/p/domestic?tm=ign_origin'
parse(start_url)
| [
"tan_gscsd@163.com"
] | tan_gscsd@163.com |
93410943fa474b3e5f7f1f559b3bddf843a3abd9 | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/Ablation4/Gather2_W_fixGood_C_change/ep0_test/pyr_0s/L6/step09_0side_L6.py | 21fc6f084b824966d3500838e49e391b5b9d89c5 | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,983 | py | #############################################################################################################################################################################################################
from step08_c_use_G_generate_I_w_M_to_Wx_Wy_Wz_focus_to_Cx_Cy_focus_combine import I_w_M_to_W_to_C
from step08_b_use_G_generate_0_util import Tight_crop, Color_jit
from step09_c_train_step import Train_step_I_w_M_to_W_to_C
from step09_d_KModel_builder_combine_step789 import KModel_builder, MODEL_NAME
color_jit = Color_jit(do_ratio=0.6)
use_gen_op_p20 = I_w_M_to_W_to_C( separate_out=True, focus=True, tight_crop=Tight_crop(pad_size=20, resize=(255, 255), jit_scale= 0) ) ### 我目前的 multi_model 的 I_to_Wxyz_to_Cxy_general 是 全部都回傳 Wz_pre_w_M, Wy_pre_w_M, Wx_pre_w_M, Cx_pre_w_M, Cy_pre_w_M, 所以不管 wi/woDIV, Separate 全設 True 就對了
use_train_step_p20 = Train_step_I_w_M_to_W_to_C( separate_out=True, focus=True, tight_crop=Tight_crop(pad_size=20, resize=(255, 255), jit_scale= 15), color_jit=color_jit ) ### 我目前的 multi_model 的 I_to_Wxyz_to_Cxy_general 是 全部都回傳 Wz_pre_w_M, Wy_pre_w_M, Wx_pre_w_M, Cx_pre_w_M, Cy_pre_w_M, 所以不管 wi/woDIV, Separate 全設 True 就對了
from Exps_7_v3.doc3d.Ablation4.W_w_M_to_C_pyr.pyr_0s.L6.step09_0side_L6 import *
from Exps_7_v3.doc3d.Ablation4.I_w_M_to_W_pyr.pyr_3s.L5.step09_3side_L5 import ch032_pyramid_1side_6__2side_4__3side_3 as I_w_M_to_W_Tcrop255_p20_3s_L5_good
import time
start_time = time.time()
###############################################################################################################################################################################################
#########################################################################################
ch032_pyramid_0side_and_1s6_2s6 = KModel_builder().set_model_name(MODEL_NAME.multi_flow_unet).set_multi_model_builders(op_type="I_to_Wxyz_to_Cxy_general", W_to_Cx_Cy=ch032_pyramid_0side, I_to_Wx_Wy_Wz=I_w_M_to_W_Tcrop255_p20_3s_L5_good).set_multi_model_separate_focus(I_to_W_separ=False, I_to_W_focus=True, W_to_C_separ=False, W_to_C_focus=True).set_gen_op( use_gen_op_p20 ).set_train_step( use_train_step_p20 )
#########################################################################################
###############################################################################################################################################################################################
if(__name__ == "__main__"):
import numpy as np
print("build_model cost time:", time.time() - start_time)
data = np.zeros(shape=(1, 512, 512, 1))
use_model = ch032_pyramid_0side
use_model = use_model.build()
result = use_model.generator(data, Mask=data)
print(result[0].shape)
from kong_util.tf_model_util import Show_model_weights
Show_model_weights(use_model.generator)
use_model.generator.summary()
| [
"s89334roy@yahoo.com.tw"
] | s89334roy@yahoo.com.tw |
f6c058527d475a757333395590501ec8765c4950 | c3a6b6f74623b2b26e6d4a259b06367ff6ac7a60 | /tests/test_backbones/test_shufflenet_v2.py | d69ec6c117832e05159a8a12e9d20694a34127ea | [
"Apache-2.0"
] | permissive | jcwon0/BlurHPE_prev | f3785eeac7063799874f5272eacbda0234d0369a | 8cac1de10a60898eaa702e536c4e24a27d469f6d | refs/heads/master | 2023-04-01T03:26:59.371057 | 2021-04-02T07:51:46 | 2021-04-02T07:51:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,495 | py | import pytest
import torch
from torch.nn.modules import GroupNorm
from torch.nn.modules.batchnorm import _BatchNorm
from mmpose.models.backbones import ShuffleNetV2
from mmpose.models.backbones.shufflenet_v2 import InvertedResidual
def is_block(modules):
"""Check if is ResNet building block."""
if isinstance(modules, (InvertedResidual, )):
return True
return False
def is_norm(modules):
"""Check if is one of the norms."""
if isinstance(modules, (GroupNorm, _BatchNorm)):
return True
return False
def check_norm_state(modules, train_state):
"""Check if norm layer is in correct train state."""
for mod in modules:
if isinstance(mod, _BatchNorm):
if mod.training != train_state:
return False
return True
def test_shufflenetv2_invertedresidual():
with pytest.raises(AssertionError):
# when stride==1, in_channels should be equal to out_channels // 2 * 2
InvertedResidual(24, 32, stride=1)
with pytest.raises(AssertionError):
# when in_channels != out_channels // 2 * 2, stride should not be
# equal to 1.
InvertedResidual(24, 32, stride=1)
# Test InvertedResidual forward
block = InvertedResidual(24, 48, stride=2)
x = torch.randn(1, 24, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size((1, 48, 28, 28))
# Test InvertedResidual with checkpoint forward
block = InvertedResidual(48, 48, stride=1, with_cp=True)
assert block.with_cp
x = torch.randn(1, 48, 56, 56)
x.requires_grad = True
x_out = block(x)
assert x_out.shape == torch.Size((1, 48, 56, 56))
def test_shufflenetv2_backbone():
with pytest.raises(ValueError):
# groups must be in 0.5, 1.0, 1.5, 2.0]
ShuffleNetV2(widen_factor=3.0)
with pytest.raises(ValueError):
# frozen_stages must be in [0, 1, 2, 3]
ShuffleNetV2(widen_factor=1.0, frozen_stages=4)
with pytest.raises(ValueError):
# out_indices must be in [0, 1, 2, 3]
ShuffleNetV2(widen_factor=1.0, out_indices=(4, ))
with pytest.raises(TypeError):
# pretrained must be str or None
model = ShuffleNetV2()
model.init_weights(pretrained=1)
# Test ShuffleNetV2 norm state
model = ShuffleNetV2()
model.init_weights()
model.train()
assert check_norm_state(model.modules(), True)
# Test ShuffleNetV2 with first stage frozen
frozen_stages = 1
model = ShuffleNetV2(frozen_stages=frozen_stages)
model.init_weights()
model.train()
for param in model.conv1.parameters():
assert param.requires_grad is False
for i in range(0, frozen_stages):
layer = model.layers[i]
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test ShuffleNetV2 with norm_eval
model = ShuffleNetV2(norm_eval=True)
model.init_weights()
model.train()
assert check_norm_state(model.modules(), False)
# Test ShuffleNetV2 forward with widen_factor=0.5
model = ShuffleNetV2(widen_factor=0.5, out_indices=(0, 1, 2, 3))
model.init_weights()
model.train()
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size((1, 48, 28, 28))
assert feat[1].shape == torch.Size((1, 96, 14, 14))
assert feat[2].shape == torch.Size((1, 192, 7, 7))
# Test ShuffleNetV2 forward with widen_factor=1.0
model = ShuffleNetV2(widen_factor=1.0, out_indices=(0, 1, 2, 3))
model.init_weights()
model.train()
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size((1, 116, 28, 28))
assert feat[1].shape == torch.Size((1, 232, 14, 14))
assert feat[2].shape == torch.Size((1, 464, 7, 7))
# Test ShuffleNetV2 forward with widen_factor=1.5
model = ShuffleNetV2(widen_factor=1.5, out_indices=(0, 1, 2, 3))
model.init_weights()
model.train()
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size((1, 176, 28, 28))
assert feat[1].shape == torch.Size((1, 352, 14, 14))
assert feat[2].shape == torch.Size((1, 704, 7, 7))
# Test ShuffleNetV2 forward with widen_factor=2.0
model = ShuffleNetV2(widen_factor=2.0, out_indices=(0, 1, 2, 3))
model.init_weights()
model.train()
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size((1, 244, 28, 28))
assert feat[1].shape == torch.Size((1, 488, 14, 14))
assert feat[2].shape == torch.Size((1, 976, 7, 7))
# Test ShuffleNetV2 forward with layers 3 forward
model = ShuffleNetV2(widen_factor=1.0, out_indices=(2, ))
model.init_weights()
model.train()
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert isinstance(feat, torch.Tensor)
assert feat.shape == torch.Size((1, 464, 7, 7))
# Test ShuffleNetV2 forward with layers 1 2 forward
model = ShuffleNetV2(widen_factor=1.0, out_indices=(1, 2))
model.init_weights()
model.train()
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 2
assert feat[0].shape == torch.Size((1, 232, 14, 14))
assert feat[1].shape == torch.Size((1, 464, 7, 7))
# Test ShuffleNetV2 forward with checkpoint forward
model = ShuffleNetV2(widen_factor=1.0, with_cp=True)
for m in model.modules():
if is_block(m):
assert m.with_cp
| [
"jcwon@postech.ac.kr"
] | jcwon@postech.ac.kr |
f6ddf8e6bf8dc89101fd648bc2a72a106cebf946 | 85f94cfd370ca7d384977cb091e886d8f80161e8 | /setup.py | ff1a0fe29c8ffa0d284df0108fcff728caaedef3 | [
"MIT"
] | permissive | litwisha/asyncio_monkey | f4df643a953f6282b6a2f1202cef5d2cfc113f98 | 7f845b676bdb4db1f4ccb8377327a09db4391322 | refs/heads/master | 2020-12-30T14:44:29.324840 | 2017-05-12T08:54:53 | 2017-05-12T08:54:53 | 91,078,807 | 0 | 0 | null | 2017-05-12T10:10:30 | 2017-05-12T10:10:30 | null | UTF-8 | Python | false | false | 1,349 | py | import io
import os
import re
from setuptools import setup
def get_version():
regex = r"__version__\s=\s\'(?P<version>[\d\.]+?)\'"
path = ('asyncio_monkey.py',)
return re.search(regex, read(*path)).group('version')
def read(*parts):
filename = os.path.join(os.path.abspath(os.path.dirname(__file__)), *parts)
with io.open(filename, encoding='utf-8', mode='rt') as fp:
return fp.read()
setup(
name='asyncio_monkey',
version=get_version(),
author='wikibusiness',
author_email='osf@wikibusiness.org',
url='https://github.com/wikibusiness/asyncio_monkey',
description='Simple lru_cache for asyncio',
long_description=read('README.rst'),
extras_require={
':python_version=="3.3"': ['asyncio'],
},
py_modules=['asyncio_monkey'],
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords=['asyncio', 'monkey patch'],
)
| [
"hellysmile@gmail.com"
] | hellysmile@gmail.com |
5bdcf9044e8d6f72c9c4ff65ce03136b1cf7aefe | d8498ca05daa83f108a231c024b4efa2f4b71747 | /ex13_8.py | 91f418127e41f7460d0dfa1e63b5135d5cbddbf7 | [
"CC-BY-4.0",
"CC-BY-3.0"
] | permissive | antoalv19/TP_solutions | ca9baf22ca1d16bac1f1bbabdefe473bb1b0ac0a | 7c1d3b35c11cfbd9c66be9878aafa1dbe8802581 | refs/heads/master | 2020-07-03T14:31:44.626360 | 2020-01-04T20:01:27 | 2020-01-04T20:01:27 | 201,936,524 | 0 | 0 | null | 2019-08-12T13:32:04 | 2019-08-12T13:32:03 | null | UTF-8 | Python | false | false | 1,735 | py | import random
import string
from collections import defaultdict
def read_and_analyze(filename, skip_header=True):
'''Read a text file and perform Markov analysis.
structure: dict[prefix] = suffix
Returns a markov dict
'''
d = defaultdict(list)
with open(filename, encoding="utf8") as fin:
if skip_header:
skip_gutenberg_header(fin)
for line in fin:
line = line.replace('-', '')
line_split = line.split()
for i in range(0,len(line_split)-1):
strippables = string.punctuation + string.whitespace
word = line_split[i]
word = word.strip(strippables).lower()
# simple behavior with low effiency
d[word].append(line_split[i+1])
# This method could have problem with next line
return d
def generate_random_content(d, prefix_len=2, text_len=50):
'''Generate a random text based on a given markov dict
Starts again if raised error
'''
des = []
word_list = []
for word in list(d.keys()):
if len(word) == prefix_len:
word_list.append(word)
first = random.choice(word_list)
des.append(first)
first_s = d[first]
index2 = random.randint(0,len(first_s)-1)
for i in range(text_len):
previous = first_s[index2]
sub = d[previous]
random_index = random.randint(0,len(sub)-1)
des.append(sub[random_index])
print(' '.join(des))
def skip_gutenberg_header(fp):
"""Reads from fp until it finds the line that ends the header.
fp: open file object
copied from author's answer
"""
for line in fp:
if line.startswith('*END*THE SMALL PRINT!'):
break
def main():
d = read_and_analyze('emma.txt')
#for prefix, suffix in d.items():
# print(prefix, suffix)
print("Generating random text")
generate_random_content(d)
if __name__ == '__main__':
main() | [
"dixingxu@gmail.com"
] | dixingxu@gmail.com |
09ac31571e4ff375dc9625c68e94a200764ca389 | 0031efd83a49c4686b23ea629e1fb922b0b9e956 | /ru/views.py | 964e3e5b6f5d932ac8839f642084ff1b684a2cbb | [] | no_license | bomvendador/trans | 05f41e8b39a94178444ddb50436855db8d1c66ec | 9792d13dabe594ecf543218cb841dec69c9d43db | refs/heads/master | 2021-01-11T20:03:47.173580 | 2017-03-13T13:50:36 | 2017-03-13T13:50:36 | 79,447,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,498 | py | # -*- coding: utf-8 -*-
# from __future__ import unicode_literals
import sys
from uuid import uuid4
import unicodedata
from __builtin__ import unicode
from django.utils.encoding import smart_text
from django.shortcuts import render, redirect
from django.views import generic
from django.contrib.auth.models import User
from .models import Language, SentDoc, SentFiles, UserProfile, Role, OrderStatus, OrderSource, Client, TranslationTheme, TranslationType, BackCall, Testimonials, TimelineOrder
from django.utils.translation import ugettext as _
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
from django.http import HttpResponse, JsonResponse
import hashlib
import os
from django.conf import settings
from django.core.files.storage import FileSystemStorage
import re
from string import punctuation
from dashboard_ru.views import update_client_statistics
from dashboard_ru import views as dash_views
from django.contrib.auth.decorators import login_required
import logging
logger = logging.getLogger('django-debug')
class IndexView (generic.TemplateView):
template_name = 'index.html'
context_object_name = 'user'
def get_queryset(self):
return User.objects.all()
def post(self, request, *args, **kwargs):
context = self.get_context_data()
username = context['login']
password = context['password']
# print('login' + str(login) + ' pass = ' + password)
user = authenticate(username=username, password=password)
if user is not None:
print(user.username)
if user.is_active:
login(request, user)
return redirect('ru:index')
else:
# messages.error(self.request, 'fdfdfd')
message_ = u'Логин или пароль указаны не верно'
return HttpResponse(message_)
# return render(self.request, 'index.html', {'user': user})
return render(self.request, 'index.html', {'user': User.objects.all()})
# return super(generic.TemplateView, self).render_to_response(context)
def get_context_data(self, **kwargs):
# context = super(IndexView, self).get_context_data(**kwargs)
if self.request.user.is_authenticated():
try:
user_profile = UserProfile.objects.get(user=self.request.user)
except UserProfile.DoesNotExist:
user_profile = None
else:
user_profile = None
context = {'login': self.request.POST.get('login'),
'password': self.request.POST.get('password'),
'langs': Language.objects.all().order_by('name'),
'user_profile': user_profile,
'testimonials': Testimonials.objects.all(),
'prolingva_tel': settings.PROLINGVA_TEL
}
# print(self.request.POST.get('login'))
return context
class BaseView (generic.TemplateView):
template_name = 'base.html'
context_object_name = 'user'
def get_queryset(self):
return {'user': User.objects.all(), 'lang': Language.objects.all().order_by('name')}
def baseView (request):
context = {'user': User.objects.all(),
'langs': Language.objects.all().order_by('name'),
'prolingva_tel': settings.PROLINGVA_TEL
}
return render(request, 'base.html', context)
def loginUser(request):
if request.method == 'POST':
login = request.POST['login']
password = request.POST['password']
print(login + ' ' + password)
user = authenticate(username=login, password=password)
if user is not None:
print(user.username)
if user.is_active:
login(request, user)
return redirect('ru:index')
return render(request, 'index.html')
def logout_user(request):
if request.user:
logger.debug('logout')
logout(request)
return redirect('ru:index')
def check_user(request):
if request.method == 'POST':
email = request.POST['email']
password = request.POST['password']
print('before')
# user_ = User.objects.get(email=email)
user = authenticate(username=email, password=password)
# print('username = ' + str(user_.username) + ' ddd = ' + str(user))
if user is not None:
print('user')
if user.is_active:
login(request, user)
error_ = 0
return HttpResponse(error_)
else:
# messages.error(self.request, 'fdfdfd')
error_ = 1
return HttpResponse(error_)
def reg_user(request):
print(request)
if request.method == 'POST':
email = request.POST['email']
password = request.POST['password']
name = request.POST['name']
try:
user_exist = User.objects.get(username=email)
except User.DoesNotExist:
user_exist = None
if user_exist is None:
user_profile = UserProfile()
user = User()
user.username = email
user.set_password(password)
user.first_name = name
user.email = email
user.save()
if request.POST.get('role', False) == u'Клиент':
client = Client()
client.user = user
client.creator = user
client.name = name
client.email = email
client.visited_times = 1
client.save()
user_profile.role = Role.objects.get(role_name=request.POST['role'])
email_context = {'user': user, 'login': email, 'password': password, 'prolingva_tel': settings.PROLINGVA_TEL}
dash_views.send_email(request, 'welcome.html', 'info@prolingva.ru', [email], email_context)
# else:
# user_profile.role = Role.objects.get(id=3)
# user = User.objects.create(username=email, email=email, password=password, first_name=name)
user_profile.user = user
user_profile.tel = request.POST.get('tel', False)
user_profile.save()
# hashed_password = hashlib.md5(password)
user_auth = authenticate(username=email, password=password)
login(request, user_auth)
message = user.id
else:
message = 'user_exists'
return HttpResponse(message)
# class Base_view_ (generic.TemplateView):
# template_name = 'base_board.html'
def base_view_(request):
print('fff')
return render(request, 'base_board.html')
def get_langs(request):
langs = Language.objects.all().order_by('name')
return HttpResponse(langs)
def user_client_add(name, email, tel):
user = User()
user.first_name = name
user.email = email
user.username = email
password = User.objects.make_random_password()
print(password)
user.set_password(password)
user.save()
client = Client()
client.name = name
client.email = email
client.tel = tel
client.init_password = password
client.user = user
client.creator = user
client.save()
user_profile = UserProfile()
user_profile.tel = tel
user_profile.user = user
user_profile.role = Role.objects.get(role_name=u'Клиент')
user_profile.save()
return {'user': user, 'client': client, 'user_profile': user_profile}
def save_files_trans(request):
if request.method == 'POST':
logger.debug('request = ' + str(request.FILES.getlist('file')))
email_source = u'Сайт - заявка'
data = request.POST
user_exists = False
doc_sent = SentDoc()
if data.get('contact_form_footer'):
name = request.POST['name_contact_form_footer']
email = request.POST['email_contact_form_footer']
try:
# id = request.POST['tel_doc_send']
client = Client.objects.get(email=email)
user = User.objects.get(id=client.user.id)
message = 'user_exists'
except Client.DoesNotExist:
user = user_client_add(name, email, '')['user']
message = 'ok'
doc_sent.author = user
doc_sent.email = email
doc_sent.user = user
doc_sent.name = name
doc_sent.contact_form_message = data['message_contact_form_footer']
doc_sent.status = OrderStatus.objects.get(name=u'Новый')
doc_sent.order_src = OrderSource.objects.get(name=u'Сайт - футер')
doc_sent.text = ''
doc_sent.save()
update_client_statistics(user)
email_context = {'client': name, 'email': email, 'type': u'Сайт - футер', 'message': data[
'message_contact_form_footer'], 'prolingva_tel': settings.PROLINGVA_TEL}
dash_views.send_email(request, 'orders.html', 'info@prolingva.ru', ['orders@prolingva.ru'], email_context)
return HttpResponse(message)
if request.POST.get('back_call'):
print('call')
name = request.POST['name_back_call_form']
tel = request.POST['tel_back_call_form']
back_call = BackCall()
back_call.tel = tel
back_call.name = name
back_call.save()
email_context = {'client': name, 'type': u'Обратный звонок', 'tel': tel, 'prolingva_tel': settings.PROLINGVA_TEL }
dash_views.send_email(request, 'orders.html', 'info@prolingva.ru', ['orders@prolingva.ru'], email_context)
return HttpResponse('ok')
if request.POST.get('learn_more'):
short_name_theme = ''
short_name_type = ''
if data['learn_more'] == 'trans_official_docs':
short_name_theme = 'official'
if data['learn_more'] == 'trans_medic':
short_name_theme = 'medic'
if data['learn_more'] == 'trans_custom':
short_name_theme = 'custom'
if data['learn_more'] == 'trans_tech':
short_name_theme = 'technical'
if data['learn_more'] == 'trans_www':
short_name_theme = 'www'
if data['learn_more'] == 'trans_economic':
short_name_theme = 'economic'
if data['learn_more'] == 'type_written':
short_name_type = 'written'
if data['learn_more'] == 'type_spoken':
short_name_type = 'spoken'
if data['learn_more'] == 'type_video':
short_name_type = 'video'
name = request.POST['name_contact_form']
email = request.POST['email_contact_form']
tel = request.POST['tel_contact_form']
text = request.POST['text_contact_form']
try:
# id = request.POST['tel_doc_send']
client = Client.objects.get(email=email)
user = User.objects.get(id=client.user.id)
message = 'user_exists'
except Client.DoesNotExist:
user = user_client_add(name, email, tel)['user']
message = 'ok'
doc_sent.author = user
doc_sent.email = email
doc_sent.user = user
doc_sent.name = name
doc_sent.tel = tel
doc_sent.contact_form_message = data['text_contact_form']
doc_sent.status = OrderStatus.objects.get(name=u'Новый')
doc_sent.order_src = OrderSource.objects.get(name=u'Сайт')
if short_name_type:
doc_sent.translation_type = TranslationType.objects.get(short_name=short_name_type)
email_context = {'client': name, 'type': u'Заказ услуги', 'email': email, 'tel': tel, 'order_type': short_name_type, 'message': data['text_contact_form']}
if short_name_theme:
email_context = {'client': name, 'type': u'Заказ услуги', 'email': email, 'tel': tel, 'order_theme': short_name_theme,
'prolingva_tel': settings.PROLINGVA_TEL, 'message': data['text_contact_form']}
doc_sent.translation_theme = TranslationTheme.objects.get(short_name=short_name_theme)
doc_sent.text = ''
doc_sent.save()
timeline_user = user
timeline_userprofile = UserProfile.objects.get(user=user)
timeline = TimelineOrder(order=doc_sent, author=timeline_user, author_profile=timeline_userprofile, event=u'Заявка создана')
timeline.save()
dash_views.send_email(request, 'orders.html', 'info@prolingva.ru', ['orders@prolingva.ru'], email_context)
return HttpResponse(message)
else:
if not request.user.is_anonymous():
user_profile = UserProfile.objects.get(user=request.user)
doc_sent.author = request.user
if user_profile.role.role_name != u'Клиент':
name = request.POST['name_doc_send']
email = request.POST['email_doc_send']
tel = request.POST['tel_doc_send']
# if request.POST['client_id_doc_send']:
# client_by_id = Client.objects.get(id=request.POST['client_id_doc_send'])
# user_exists = True
# user = User.objects.get(id=client.user.id)
# else:
#
# user = User()
# user.first_name = name
# user.email = email
# user.username = email
# password = User.objects.make_random_password()
# print(password)
# user.set_password(password)
# user.save()
#
# client = Client()
# client.name = name
# client.email = email
# client.tel = tel
# client.init_password = password
# client.user = user
# client.creator = request.user
# client.save()
#
# user_profile = UserProfile()
# user_profile.tel = tel
# user_profile.user = user
# user_profile.role = Role.objects.get(role_name='Client')
# user_profile.save()
try:
# id = request.POST['tel_doc_send']
client = Client.objects.get(email=email)
user_exists = True
user = User.objects.get(id=client.user.id)
except Client.DoesNotExist:
user = User()
user.first_name = name
user.email = email
user.username = email
password = User.objects.make_random_password()
print(password)
user.set_password(password)
user.save()
client = Client()
client.name = name
client.email = email
client.tel = tel
client.init_password = password
client.user = user
client.creator = request.user
client.save()
user_profile = UserProfile()
user_profile.tel = tel
user_profile.user = user
user_profile.role = Role.objects.get(role_name=u'Клиент')
user_profile.save()
else:
user_exists = True
client = Client.objects.get(user=request.user)
name = client.name
email = client.email
tel = client.tel
user = User.objects.get(id=request.user.id)
timeline_user = request.user
timeline_userprofile = UserProfile.objects.get(user=request.user)
else:
name = request.POST['name_doc_send']
email = request.POST['email_doc_send']
tel = request.POST['tel_doc_send']
try:
# id = request.POST['tel_doc_send']
client = Client.objects.get(email=email)
user_exists = True
user = User.objects.get(id=client.user.id)
except Client.DoesNotExist:
user = User()
user.first_name = name
user.email = email
user.username = email
password = User.objects.make_random_password()
print(password)
user.set_password(password)
user.save()
client = Client()
client.name = name
client.email = email
client.tel = tel
client.init_password = password
client.user = user
client.creator = user
client.save()
user_profile = UserProfile()
user_profile.tel = tel
user_profile.user = user
user_profile.role = Role.objects.get(role_name=u'Клиент')
user_profile.save()
timeline_user = user
timeline_userprofile = user_profile
email_context = {
'prolingva_tel': settings.PROLINGVA_TEL,
'user': user,
'login': client.email,
'password': client.init_password,
}
dash_views.send_email(request, 'order_welcome.html', 'info@prolingva.ru', client.email, email_context)
# dash_views.send_email(request, 'order_welcome.html', 'info@prolingva.ru', ['orders@prolingva.ru'], email_context)
doc_sent.author = user
text = request.POST.get('text_doc_send')
if request.POST.get('trans_from'):
try:
trans_from = Language.objects.get(name=request.POST.get('trans_from'))
except Language.DoesNotExist:
trans_from = None
doc_sent.trans_from = trans_from
if request.POST.get('trans_to'):
try:
trans_to = Language.objects.get(name=request.POST.get('trans_to'))
except Language.DoesNotExist:
trans_to = None
doc_sent.trans_to = trans_to
doc_sent.email = email
doc_sent.user = user
doc_sent.name = name
doc_sent.tel = tel
doc_sent.text = text
doc_sent.status = OrderStatus.objects.get(name=u'Новый')
# doc_sent.author = request.user
# file = SentFiles()
r = re.compile(r'[{}]'.format(punctuation))
if text:
text_str = r.sub(' ', text)
text_qnt = len(text_str.split())
doc_sent.text_qnt = text_qnt
numbers = sum(c.isdigit() for c in text)
alphas = sum(c.isalpha() for c in text)
spaces = sum(c.isspace() for c in text)
symbols = len(text) - numbers - alphas - spaces
doc_sent.text_alphas_qnt = alphas
doc_sent.text_digits_qnt = numbers
doc_sent.text_spaces_qnt = spaces
doc_sent.text_symbols_qnt = symbols
doc_sent.text_total_qnt = len(text)
files_qnt = 0
for f in request.FILES:
files_qnt += 1
if files_qnt > 0:
doc_sent.files_qnt = files_qnt
if request.user.is_authenticated():
try:
user_profile = UserProfile.objects.get(user=request.user)
if user_profile.role.role_name != u'Клиент':
doc_sent.order_src = OrderSource.objects.get(name=u'Персонал')
email_source = u'Персонал'
else:
logger.debug('client')
if request.POST.get('order_source') == 'dashboard':
logger.debug('clientwww')
doc_sent.order_src = OrderSource.objects.get(name=u'Панель управления - Клиент')
email_source = u'Панель управления - Клиент'
except UserProfile.DoesNotExist:
doc_sent.order_src = OrderSource.objects.get(name=u'Сайт')
email_source = u'Сайт - заявка'
doc_sent.save()
# sys.getdefaultencoding = lambda : 'UTF-8'
logger.debug('--------------')
# if 'file' in data:
# logger.debug(data['file[0]'])
# logger.debug(request.FILES.getlist('file[0]'))
data_files = request.FILES
logger.debug(data_files)
for k in request.FILES.keys():
logger.debug('key = ' + str(k))
for f in request.FILES.getlist(k):
# f = request.FILES.getlist(key):
# logger.debug('file = ' + str(f.name).encode('utf-8'))
# logger.debug('file = ' + str(f.name).encode('utf-8'))
# file_name = f.name.split('.')
# s = SentFiles(file=f, sent_doc=doc_sent, file_name=f.name)
s = SentFiles(file=f, sent_doc=doc_sent)
# new_file_name = update_filename(s, f.name)
# logger.debug(str(new_file_name))
# s = SentFiles(file=f, sent_doc=doc_sent, file_name=unicodedata.normalize('NFKD', f.name).encode('utf-8', 'ignore'))
s.save()
if user_exists:
message = 'user_exists'
timeline_user = user
timeline_userprofile = user_profile
else:
message = 'ok'
# timeline_user = None
# timeline_userprofile = None
timeline = TimelineOrder(order=doc_sent, author=timeline_user, author_profile=timeline_userprofile, event=u'Заявка создана')
timeline.save()
email_context = {'client': name, 'email': email, 'type': email_source, 'ID': doc_sent.id, 'tel': tel, 'prolingva_tel': settings.PROLINGVA_TEL}
dash_views.send_email(request, 'orders.html', 'info@prolingva.ru', ['orders@prolingva.ru'], email_context)
return HttpResponse(message)
return HttpResponse()
def update_filename(instance, filename):
# path = "upload/path/"
format_ = instance.useinstance.transaction_uuid + instance.file_extension
return os.path.join(format_)
def learn_more_trans(request):
context = {
'learn_more': 'trans',
'langs': Language.objects.all().order_by('name'),
'prolingva_tel': settings.PROLINGVA_TEL
}
return render(request, 'learn_more_trans.html', context)
def learn_more_types(request):
context = {
'learn_more': 'types',
'langs': Language.objects.all().order_by('name'),
'prolingva_tel': settings.PROLINGVA_TEL
}
return render(request, 'learn_more_trans.html', context)
def confidentiality(request):
context = {
'confidentiality': 'yes',
'prolingva_tel': settings.PROLINGVA_TEL
}
return render(request, 'confidentiality.html', context)
@login_required(redirect_field_name=None, login_url='/ru/dashbrd/login')
def get_testimonials_list(request):
context = dash_views.get_data_proc(request)
testimonials = Testimonials.objects.all()
context.update(
{
'testimonials': testimonials
})
return render(request, 'testimonials_list.html', context)
@login_required(redirect_field_name=None, login_url='/ru/dashbrd/login')
def get_new_testimonials_list(request):
context = dash_views.get_data_proc(request)
testimonials = Testimonials.objects.filter(new=True)
context.update(
{
'testimonials': testimonials,
'new': 1
})
return render(request, 'testimonials_list.html', context)
# def path_and_rename(path):
# def wrapper(instance, filename):
# ext = filename.split('.')[-1]
# # get filename
# if instance.pk:
# filename = '{}.{}'.format(instance.pk, ext)
# else:
# # set filename as random string
# filename = '{}.{}'.format(uuid4().hex, ext)
# # return the whole path to the file
# return os.path.join(path, filename)
# return wrapper
| [
"bomvendador@yandex.ru"
] | bomvendador@yandex.ru |
0f7d7020576e273030845274280660faaa0eb0d4 | a2b429075098ef615a104845b8434e7fdeff9d14 | /antspynet/architectures/create_transformer_model.py | b6a98b298ce927d7b6731ff412647e2d4bb336d2 | [
"Apache-2.0"
] | permissive | ANTsX/ANTsPyNet | de95ec1ceca6bd146b99127c36273ba4649be40b | 1703acb58ed053ce3348aa061e4087bac953dd07 | refs/heads/master | 2023-08-09T17:26:33.179674 | 2023-08-04T14:22:18 | 2023-08-04T14:22:18 | 189,067,098 | 171 | 36 | Apache-2.0 | 2023-07-13T15:57:34 | 2019-05-28T16:44:24 | Python | UTF-8 | Python | false | false | 16,923 | py | import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import (Add, Dense, Dropout, Flatten,
Input, LayerNormalization, MultiHeadAttention)
from antspynet.utilities import (ExtractPatches2D, ExtractPatches3D, EncodePatches,
ExtractConvolutionalPatches2D, ExtractConvolutionalPatches3D,
StochasticDepth)
import numpy as np
def multilayer_perceptron(x, hidden_units, dropout_rate=0.0):
for units in hidden_units:
x = Dense(units, activation=tf.nn.gelu)(x)
if dropout_rate > 0.0:
x = Dropout(dropout_rate)(x)
return x
def create_vision_transformer_model_2d(input_image_size,
number_of_classification_labels=1000,
mode='classification',
patch_size=6,
number_of_transformer_layers=8,
transformer_units=[128, 64],
projection_dimension=64,
number_of_attention_heads=4,
mlp_head_units=[2048, 1024],
dropout_rate=0.5):
"""
Implementation of the Vision transformer architecture.
https://keras.io/examples/vision/image_classification_with_vision_transformer/
Arguments
---------
input_image_size : tuple of length 4
Used for specifying the input tensor shape. The shape (or dimension) of
that tensor is the image dimensions followed by the number of channels
(e.g., red, green, and blue).
number_of_classification_labels : int
Number of classification labels.
patch_size : int
Size of a single patch dimension.
number_of_transformer_layers : int
Number of transformer layers.
transformer_units : tuple or list
Size of the hidden units in the layers of the MLP.
projection_dimension : int
Multi-head attention layer parameter
mlp_head_units : tuple or list
Size of the dense layers of the final classifier.
dropout_rate : float between 0 and 1
Dropout rate of the multilayer perceptron and the previous dropout layer.
Returns
-------
Keras model
A 2-D keras model.
Example
-------
>>> model = create_vision_transformer_model_2d((224, 224, 1))
>>> model.summary()
"""
inputs = Input(shape=input_image_size)
patches = ExtractPatches2D(patch_size)(inputs)
number_of_patches = ((input_image_size[0] * input_image_size[1]) // (patch_size ** 2))
encoded_patches = EncodePatches(number_of_patches,
projection_dimension)(patches)
for _ in range(number_of_transformer_layers):
x1 = LayerNormalization(epsilon=1e-6)(encoded_patches)
attention_output = MultiHeadAttention(num_heads=number_of_attention_heads,
key_dim=projection_dimension,
dropout=dropout_rate/5.0)(x1, x1)
x2 = Add()([attention_output, encoded_patches])
x3 = LayerNormalization(epsilon=1e-6)(x2)
x3 = multilayer_perceptron(x3,
hidden_units=transformer_units,
dropout_rate=dropout_rate/5.0)
encoded_patches = Add()([x3, x2])
representation = LayerNormalization(epsilon=1e-6)(encoded_patches)
representation = Flatten()(representation)
representation = Dropout(dropout_rate)(representation)
features = multilayer_perceptron(representation,
hidden_units=mlp_head_units,
dropout_rate=dropout_rate)
layer_activation = ''
if mode == 'classification':
layer_activation = 'softmax'
elif mode == 'regression':
layer_activation = 'linear'
elif mode == 'sigmoid':
layer_activation = 'sigmoid'
else:
raise ValueError('mode must be either `classification` or `regression`.')
outputs = Dense(number_of_classification_labels,
activation=layer_activation)(features)
model = Model(inputs=inputs, outputs=outputs)
return model
def create_vision_transformer_model_3d(input_image_size,
number_of_classification_labels=1000,
mode="classification",
patch_size=6,
number_of_transformer_layers=8,
transformer_units=[128, 64],
projection_dimension=64,
number_of_attention_heads=4,
mlp_head_units=[2048, 1024],
dropout_rate=0.5):
"""
Implementation of the Vision transformer architecture.
https://keras.io/examples/vision/image_classification_with_vision_transformer/
Arguments
---------
input_image_size : tuple of length 5
Used for specifying the input tensor shape. The shape (or dimension) of
that tensor is the image dimensions followed by the number of channels
(e.g., red, green, and blue).
number_of_classification_labels : int
Number of classification labels.
patch_size : int
Size of a single patch dimension.
number_of_transformer_layers : int
Number of transformer layers.
transformer_units : tuple or list
Size of the hidden units in the layers of the MLP.
projection_dimension : int
Multi-head attention layer parameter
mlp_head_units : tuple or list
Size of the dense layers of the final classifier.
dropout_rate : float between 0 and 1
Dropout rate of the multilayer perceptron and the previous dropout layer.
Returns
-------
Keras model
A 3-D keras model.
Example
-------
>>> model = create_vision_transformer_model_3d(((224, 224, 224, 1))
>>> model.summary()
"""
inputs = Input(shape=input_image_size)
patches = ExtractPatches3D(patch_size)(inputs)
number_of_patches = ((input_image_size[0] * input_image_size[1] * input_image_size[2]) // (patch_size ** 3))
encoded_patches = EncodePatches(number_of_patches,
projection_dimension)(patches)
for _ in range(number_of_transformer_layers):
x1 = LayerNormalization(epsilon=1e-6)(encoded_patches)
attention_output = MultiHeadAttention(num_heads=number_of_attention_heads,
key_dim=projection_dimension,
dropout=dropout_rate/5.0)(x1, x1)
x2 = Add()([attention_output, encoded_patches])
x3 = LayerNormalization(epsilon=1e-6)(x2)
x3 = multilayer_perceptron(x3,
hidden_units=transformer_units,
dropout_rate=dropout_rate/5.0)
encoded_patches = Add()([x3, x2])
representation = LayerNormalization(epsilon=1e-6)(encoded_patches)
representation = Flatten()(representation)
representation = Dropout(dropout_rate)(representation)
features = multilayer_perceptron(representation,
hidden_units=mlp_head_units,
dropout_rate=dropout_rate)
layer_activation = ''
if mode == 'classification':
layer_activation = 'softmax'
elif mode == 'regression':
layer_activation = 'linear'
elif mode == 'sigmoid':
layer_activation = 'sigmoid'
else:
raise ValueError('mode must be either `classification` or `regression`.')
outputs = Dense(number_of_classification_labels,
activation=layer_activation)(features)
model = Model(inputs=inputs, outputs=outputs)
return model
def create_compact_convolutional_transformer_model_2d(input_image_size,
number_of_classification_labels=1000,
mode="classification",
number_of_transformer_layers=2,
transformer_units=[128, 128],
projection_dimension=64,
number_of_attention_heads=4,
stochastic_depth_rate=0.1):
"""
Implementation of the Vision transformer architecture.
https://keras.io/examples/vision/cct/
Arguments
---------
input_image_size : tuple of length 4
Used for specifying the input tensor shape. The shape (or dimension) of
that tensor is the image dimensions followed by the number of channels
(e.g., red, green, and blue).
number_of_classification_labels : int
Number of classification labels.
patch_size : int
Size of a single patch dimension.
number_of_transformer_layers : int
Number of transformer layers.
transformer_units : tuple or list
Size of the hidden units in the layers of the MLP.
projection_dimension : int
Multi-head attention layer parameter
stochastic_depth_rate : float between 0 and 1
Dropout rate of the stochastic depth layer
Returns
-------
Keras model
A 2-D keras model.
Example
-------
>>> model = antspynet.create_compact_convolutional_transformer_model_2d((224, 224, 1))
>>> model.summary()
"""
inputs = Input(shape=input_image_size)
ExtractPatches = ExtractConvolutionalPatches2D(kernel_size=3,
stride=1,
padding=1,
pooling_kernel_size=3,
pooling_stride=2,
number_of_filters=[64, 128],
do_positional_embedding=True)
encoded_patches = ExtractPatches(inputs)
# Apply positional embedding.
positional_embedding, sequence_length = ExtractPatches.positional_embedding(input_image_size)
positions = tf.range(start=0, limit=sequence_length, delta=1)
position_embeddings = positional_embedding(positions)
encoded_patches += position_embeddings
# Calculate Stochastic Depth probabilities.
dpr = [x for x in np.linspace(0, stochastic_depth_rate, number_of_transformer_layers)]
for i in range(number_of_transformer_layers):
x1 = LayerNormalization(epsilon=1e-5)(encoded_patches)
attention_output = MultiHeadAttention(num_heads=number_of_attention_heads,
key_dim=projection_dimension,
dropout=0.1)(x1, x1)
attention_output = StochasticDepth(dpr[i])(attention_output)
x2 = Add()([attention_output, encoded_patches])
x3 = LayerNormalization(epsilon=1e-5)(x2)
x3 = multilayer_perceptron(x3,
hidden_units=transformer_units,
dropout_rate=0.1)
x3 = StochasticDepth(dpr[i])(x3)
encoded_patches = Add()([x3, x2])
representation = LayerNormalization(epsilon=1e-5)(encoded_patches)
attention_weights = tf.nn.softmax(Dense(1)(representation), axis=1)
weighted_representation = tf.matmul(attention_weights, representation, transpose_a=True)
weighted_representation = tf.squeeze(weighted_representation, -2)
layer_activation = ''
if mode == 'classification':
layer_activation = 'softmax'
elif mode == 'regression':
layer_activation = 'linear'
elif mode == 'sigmoid':
layer_activation = 'sigmoid'
else:
raise ValueError('mode must be either `classification` or `regression`.')
outputs = Dense(number_of_classification_labels,
activation=layer_activation)(weighted_representation)
model = Model(inputs=inputs, outputs=outputs)
return model
def create_compact_convolutional_transformer_model_3d(input_image_size,
number_of_classification_labels=1000,
mode="classification",
number_of_transformer_layers=2,
transformer_units=[128, 128],
projection_dimension=64,
number_of_attention_heads=4,
stochastic_depth_rate=0.1):
"""
Implementation of the Vision transformer architecture.
https://keras.io/examples/vision/cct/
Arguments
---------
input_image_size : tuple of length 5
Used for specifying the input tensor shape. The shape (or dimension) of
that tensor is the image dimensions followed by the number of channels
(e.g., red, green, and blue).
number_of_classification_labels : int
Number of classification labels.
patch_size : int
Size of a single patch dimension.
number_of_transformer_layers : int
Number of transformer layers.
transformer_units : tuple or list
Size of the hidden units in the layers of the MLP.
projection_dimension : int
Multi-head attention layer parameter
stochastic_depth_rate : float between 0 and 1
Dropout rate of the stochastic depth layer
Returns
-------
Keras model
A 3-D keras model.
Example
-------
>>> model = antspynet.create_compact_convolutional_transformer_model_3d((224, 224, 224, 1))
>>> model.summary()
"""
inputs = Input(shape=input_image_size)
ExtractPatches = ExtractConvolutionalPatches3D(kernel_size=3,
stride=1,
padding=1,
pooling_kernel_size=3,
pooling_stride=2,
number_of_filters=[64, 128],
do_positional_embedding=True)
encoded_patches = ExtractPatches(inputs)
# Apply positional embedding.
positional_embedding, sequence_length = ExtractPatches.positional_embedding(input_image_size)
positions = tf.range(start=0, limit=sequence_length, delta=1)
position_embeddings = positional_embedding(positions)
encoded_patches += position_embeddings
# Calculate Stochastic Depth probabilities.
dpr = [x for x in np.linspace(0, stochastic_depth_rate, number_of_transformer_layers)]
for i in range(number_of_transformer_layers):
x1 = LayerNormalization(epsilon=1e-5)(encoded_patches)
attention_output = MultiHeadAttention(num_heads=number_of_attention_heads,
key_dim=projection_dimension,
dropout=0.1)(x1, x1)
attention_output = StochasticDepth(dpr[i])(attention_output)
x2 = Add()([attention_output, encoded_patches])
x3 = LayerNormalization(epsilon=1e-5)(x2)
x3 = multilayer_perceptron(x3,
hidden_units=transformer_units,
dropout_rate=0.1)
x3 = StochasticDepth(dpr[i])(x3)
encoded_patches = Add()([x3, x2])
representation = LayerNormalization(epsilon=1e-5)(encoded_patches)
attention_weights = tf.nn.softmax(Dense(1)(representation), axis=1)
weighted_representation = tf.matmul(attention_weights, representation, transpose_a=True)
weighted_representation = tf.squeeze(weighted_representation, -2)
layer_activation = ''
if mode == 'classification':
layer_activation = 'softmax'
elif mode == 'regression':
layer_activation = 'linear'
elif mode == 'sigmoid':
layer_activation = 'sigmoid'
else:
raise ValueError('mode must be either `classification` or `regression`.')
outputs = Dense(number_of_classification_labels,
activation=layer_activation)(weighted_representation)
model = Model(inputs=inputs, outputs=outputs)
return model
| [
"ntustison@gmail.com"
] | ntustison@gmail.com |
7888f9d838d0b3092c11ba496518c7f5bda7104d | 453e53e9074c1657b3d04c3d3c07b8b21601852a | /.history/scraping_vivino_20210205211247.py | 94882c0a467b5cae6a96af69c9894569de42a2ed | [] | no_license | kristiewirth/vivino-data-analysis | 58fd66fb3b8d8a098dab5bc030b0110b287f6e22 | 8c02c14926fdd6dbb493b68b64daa27a7242780f | refs/heads/main | 2023-02-28T02:44:15.130286 | 2021-02-06T05:25:59 | 2021-02-06T05:25:59 | 336,463,060 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,102 | py | import pprint as pprint
import pandas as pd
import requests
import seaborn as sns
import numpy as np
df = pd.DataFrame(columns=["wine", "rating", "price"])
for page_num in np.arange(1, 10, 1):
r = requests.get(
"https://www.vivino.com/api/explore/explore",
params={
"currency_code": "US",
"min_rating": "1",
"page": page_num,
# "price_range_max": "100",
"price_range_min": "9",
"order_by": "price",
"order": "asc",
},
headers={
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0"
},
)
for record in r.json()["explore_vintage"]["matches"]:
try:
wine = record["vintage"]["name"]
rating = record["vintage"]["statistics"]["ratings_average"]
price = record["price"]["amount"]
df = df.append(
pd.DataFrame(
[[wine, rating, price]], columns=["wine", "rating", "price"]
)
)
except Exception:
pass
df.reset_index(inplace=True, drop=True)
df.sort_values(by="price", ascending=True, inplace=True)
df.to_csv("vivino-ratings.csv")
############################################################
df = pd.read_csv("vivino-ratings.csv")
# Graphing
sns.regplot(data=df, x="price", y="rating")
# Making summary df
df["rounded_price"] = 5 * round(df["price"] / 5)
rounded_ratings_df = (
pd.DataFrame(df.groupby("rounded_price")["rating"].mean())
.sort_values(by="rounded_price", ascending=True)
.reset_index(drop=False)
)
rounded_ratings_df["rounded_rating"] = round(rounded_ratings_df["rating"], 2)
# Calculating increases in ratings over time
previous = 0
diffs = []
for i, row in rounded_ratings_df.iterrows():
if previous == 0:
diffs.append(0)
else:
diff = row["rating"] - previous
diffs.append(diff)
previous = row["rating"]
rounded_ratings_df["increase_in_rating"] = diffs
rounded_ratings_df.drop("rating", axis=1, inplace=True) | [
"kristie.ann.wirth@gmail.com"
] | kristie.ann.wirth@gmail.com |
71d52862afb3a6831974332d3e21dc231baa6c9b | 9d30115d59ed821a5c7aecf2318b5e0ed22c9676 | /src/codewars/python/5kyu/to_camel_case.py | 4b2b5f97106701d81ffce74746cf93b6f058d660 | [] | no_license | garigari-kun/til | 02c7bf05274d1077b454e1f7d4a7355849441524 | b71f36a66045ab7da7f4a97f7e18de2aaa05f493 | refs/heads/master | 2020-04-16T02:13:45.727909 | 2018-12-16T01:26:40 | 2018-12-16T01:26:40 | 56,369,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,062 | py | """
Complete the method/function so that it converts dash/underscore delimited words into camel casing.
The first word within the output should be capitalized only if the original word was capitalized.
Examples:
# returns "theStealthWarrior"
to_camel_case("the-stealth-warrior")
# returns "TheStealthWarrior"
to_camel_case("The_Stealth_Warrior")
"""
def to_camel_case(text):
if not len(text):
return ''
result = ''
word_list = []
text = text.replace('_', '-')
if '-' in text:
word_list = text.split('-')
print(word_list)
for index, word in enumerate(word_list):
if index == 0:
result += word
else:
if word[0].islower():
result += word[0].capitalize() + word[1:]
else:
result += word
return result
if __name__ == '__main__':
print(to_camel_case("the-stealth-warrior"))
print(to_camel_case("The_Stealth_Warrior"))
print(to_camel_case("The-pippi_was_Hungry"))
| [
"keisuke.cs@gmail.com"
] | keisuke.cs@gmail.com |
8c1bc0ee50750025de12f5b5afc1698f858033f0 | b9865b85f99ece1cb92038a53a6f9f205fe69bc2 | /sddsToolkit/printpage.py | 7e7d5337c0cdbe2352afce9ff19b5f781e932f8a | [] | no_license | Tubbz-alt/SDDSTOOLS | 43aa32dc556c8e1886eb408b09fb600bc6da91a3 | d2a896277d6a516d7138c493a51119ac50c586c6 | refs/heads/master | 2021-05-29T08:49:46.271548 | 2014-10-22T17:58:14 | 2014-10-22T17:58:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | from sdds import sddsdata
def printpage(sddsobj):
numberOfColumns = len(sddsobj.columnName)
a=[[] for i in range(numberOfColumns) ]
print a
# for i in range(numberOfParameters):
# sddsobj.parameterData[i].append(sddsdata.GetParameter(sddsobj.index,i))
for i in range(numberOfColumns):
a[i].append(sddsdata.GetColumn(sddsobj.index,i))
print a
print a[1][0][0]
| [
"joelfred@slac.stanford.edu"
] | joelfred@slac.stanford.edu |
4196e6ba86de68c23b2e601d85a6e2f9d3a8ec74 | 24c5b944717b35db74fe33c494ded4194b53aeca | /sephiroth/__init__.py | 6cf205d176b48d14b97c2b3b6f83b8b5d367ecc1 | [
"WTFPL"
] | permissive | zevlag/sephiroth | bd1f42f697fb8dffbc38f5388636ff848de72b0f | 4c77aee5880751962e29b9a439a17c9c5e725a40 | refs/heads/main | 2023-03-12T07:24:53.636547 | 2021-02-26T18:45:10 | 2021-02-26T18:45:10 | 342,656,892 | 0 | 0 | WTFPL | 2021-02-26T17:56:09 | 2021-02-26T17:56:09 | null | UTF-8 | Python | false | false | 128 | py | __author__ = "0xdade"
__maintainer__ = "0xdade"
__email__ = "dade@actualcrimes.org"
__license__ = "WTFPL"
__version__ = "1.0.2"
| [
"0xdade@users.noreply.github.com"
] | 0xdade@users.noreply.github.com |
466e264e0eb65c0ff42c8b1860796f28bd68de10 | 3c24f5e9a513f447d2d9f4c912e020775e15bebe | /dbm.py | d8a5a65cd6fc91862b53ab2526b266a7120d54ad | [
"MIT"
] | permissive | GiggleLiu/QuRBM | 2ef5659640f272dd0bd7500862f78e1770e5376d | 2cb16e534ccbf875b88c164837bb8ffada5a2b03 | refs/heads/master | 2021-01-23T01:45:44.578449 | 2017-05-01T16:40:05 | 2017-05-01T16:40:05 | 85,930,945 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,550 | py | '''
Restricted Boltzmann Machine.
'''
from numpy import *
import numbers,pdb
from scipy.special import expit
from sstate import SparseState
from group import NoGroup
from utils import fh
__all__=['DBM','random_dbm']
class DBM(object):
'''
Restricted Boltzmann Machine class.
Attributes:
:a: 1darray, the bias for input layer
:b_L: list of 1darray, biases for hidden layers.
:W_L: list of 2darray, the weights
:group: Group, translation group.
:nin,nhid: int, number of input and hidden layer, (nin,nh) = shape(W)
'''
def __init__(self,b_L,W_L,group=NoGroup(),var_mask=None,input_node_type='linear',hidden_node_type='linear'):
self.b_L,self.W_L=b_L,W_L
self.group=group
if var_mask is None:
var_mask=[True]*(len(b_L)+len(W_L))
else:
if len(var_mask)!=len(b_L)+len(W_L):raise ValueError('number of variable mask not match.')
self.var_mask=var_mask
self.input_node_type,self.hidden_node_type=input_node_type,hidden_node_type
#check data
for i in xrange(len(W_L)):
w=W_L[i]
bl=b_L[i]
br=b_L[i+1]
if w.shape!=(len(bl),len(br)):
raise ValueError('Matrix-bias shape mismatch.')
if not len(self.b_L)==len(self.W_L)+1: raise ValueError('# of layer weights and biases not match.')
def __str__(self):
return '<DBM>\n%s\n%s\nGroup = %s'%('\n'.join(['b(%s) %s'%(i,b) for i,b in enumerate(self.b_L)]),\
'\n'.join(['W(%s,%s) %s'%(i,i+1,W) for i,W in enumerate(self.W_L)]),self.group)
def __repr__(self):
return '<DBM> in[%s] hid[%s]'%(self.nin,' x '.join([str(len(b)) for b in self.b_L[1:]]))
@property
def num_layers(self):return len(self.b_L)
@property
def nin(self): return len(self.b_L[0])
@property
def weight_dtype(self):
return self.W[0].dtype
def layer_dim(self,i):
'''dimension of i-th layer.'''
return len(self.b_L[i])
def get_W0_nogroup(self):
'''Get the group expanded W.'''
return self.group.unfold_W(self.W_L[0])
def get_a_nogroup(self):
'''Get the group expanded a.'''
return self.group.unfold_a(self.b_L[0])
def feed_input(self,v):
'''
Feed visible inputs, and get output in hidden layers.
Parameters:
:v: 1d array, input vector.
Return:
1darray, raw output in hidden nodes.
'''
for W,b in zip(self.W_L,self.b_L[1:]):
v=v.dot(W)+b
if self.hidden_node_type=='binary':
v=expit(v)
return v
def feed_hidden(self,h):
'''
Feed hidden inputs, and reconstruct visible layers.
Parameters:
:h: 1d array, input vector.
Return:
1darray, raw output in input nodes.
'''
for W,b in zip(self.W_L,self.b_L):
if h.ndim>1:
res=self.get_W_nogroup().dot(h.T).T+self.get_a_nogroup()
else:
res=self.get_W_nogroup().dot(h)+self.get_a_nogroup()
if self.input_node_type=='binary':
return expit(res)
else:
return res
def tovec(self,spaceconfig): #poor designed interface.
'''
Get the state vector.
\Psi(s,W)=\sum_{\{hi\}} e^{\sum_j a_j\sigma_j^z+\sum_i b_ih_i +\sum_{ij}W_{ij}h_i\sigma_j}
'''
return self.get_weight(config=1-2*spaceconfig.ind2config(arange(spaceconfig.hndim)))
def get_weight(self,config,theta=None):
'''
Get the weight for specific configuration.
Parameters:
:config: 1darray,
:theta: 1darray/None, table of hidden layer output: b+v.dot(W), intended to boost operation.
Return:
number,
'''
group=self.group
if theta is None: theta=self.feed_input(config)
return exp(sum([group.apply(asarray(config),ig).dot(self.a) for ig in xrange(group.ng)],axis=0))*prod(fh(theta),axis=-1)
def dump_arr(self):
'''Dump values to an array.'''
return concatenate([b for b,mask in zip(self.b_L,self.var_mask[:self.num_layers]) if mask]+\
[W.ravel() for W,mask in zip(self.W_L,self.var_mask[self.num_layers:]) if mask])
def load_arr(self,v):
'''Load data from an array.'''
offset=0
for b,mask in zip(self.b_L,self.var_mask[:self.num_layers]):
if mask:
layer_size=len(b)
b[:]=v[offset:offset+layer_size]
offset+=layer_size
for W,mask in zip(self.W_L,self.var_mask[self.num_layers:]):
if mask:
layer_size=W.shape[0]*W.shape[1]
W[...]=v[offset:offset+layer_size].reshape(W.shape)
offset+=layer_size
def random_dbm(dims,group=NoGroup(),dtype='complex128',magnitude=2e-2,**kwargs):
'''Get a random Restricted Boltzmann Machine'''
num_layers=len(dims)
b_L,W_L=[],[]
if dtype=='complex128':
rng=lambda shape:random.uniform(-magnitude,magnitude,shape)+1j*random.uniform(-magnitude,magnitude,shape)
elif dtype=='float64':
rng=lambda shape:random.uniform(-magnitude,magnitude,shape)
else:
raise ValueError('unsupported dtype %s'%dtype)
for i in xrange(num_layers):
b_L.append(rng(dims[i]))
if i!=0:
W_L.append(rng((dims[i-1],dims[i])))
return DBM(b_L=b_L,W_L=W_L,group=group,**kwargs)
| [
"cacate0129@gmail.com"
] | cacate0129@gmail.com |
d93af5bb78793b79096c1c32ccf6655fe77bfd53 | 74d0235c4eed1e4bc57dd906d2b3958cb48b9dba | /torch/fx/experimental/fx2trt/example/unittests.py | a8d8d4cb93d08263d3c834e4a91fbda507846076 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | anjali411/pytorch | a31ecf84fe892f19452b1063f2b1de1f88d84bb0 | 51b67f2bca3014aa5e7f675237543b8f82743032 | refs/heads/master | 2022-07-22T16:58:56.800837 | 2021-10-14T17:22:15 | 2021-10-14T17:23:55 | 208,863,312 | 1 | 0 | NOASSERTION | 2020-05-14T06:54:25 | 2019-09-16T17:56:13 | C++ | UTF-8 | Python | false | false | 4,052 | py | import torch
from torch.ao.quantization.quantize_fx import (
prepare_fx,
convert_fx,
get_tensorrt_backend_config_dict
)
import torch.fx.experimental.fx_acc.acc_tracer as acc_tracer
from torch.fx.experimental.fx2trt.fx2trt import TRTInterpreter, InputTensorSpec, TRTModule
from torch.testing._internal.common_quantization import QuantizationTestCase
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.common_quantization import NodeSpec as ns
import unittest
def lower_to_trt(model, sample_input, shape_ranges):
model = acc_tracer.trace(model, [sample_input]) # type: ignore[attr-defined]
interp = TRTInterpreter(
model,
[InputTensorSpec(
torch.Size([-1, *sample_input.shape[1:]]), torch.float,
shape_ranges=shape_ranges, has_batch_dim=True)],
explicit_batch_dimension=True, explicit_precision=True)
engine, input_names, output_names = interp.run(fp16_mode=False, int8_mode=True)
trt_mod = TRTModule(engine, input_names, output_names)
return trt_mod
@unittest.skipIf(not TEST_CUDA, "gpu is not available.")
class TestQuantizeFxTRT(QuantizationTestCase):
def test_conv(self):
class Conv2d(torch.nn.Module):
def __init__(self, *args):
super().__init__()
self.conv = torch.nn.Conv2d(*args)
def forward(self, x):
return self.conv(x)
conv2d_input = torch.rand(1, 3, 224, 224)
conv2d_module_args = (3, 3, 3)
m = Conv2d(*conv2d_module_args).eval()
qconfig = torch.ao.quantization.QConfig(
activation=torch.ao.quantization.observer.HistogramObserver.with_args(
qscheme=torch.per_tensor_symmetric, dtype=torch.qint8
),
weight=torch.ao.quantization.default_weight_observer
)
prepared = prepare_fx(m, {"": qconfig}, backend_config_dict=get_tensorrt_backend_config_dict())
# calibration
prepared(conv2d_input)
quantized = convert_fx(prepared, is_reference=True)
node_occurrence = {
ns.call_function(torch.quantize_per_tensor): 1,
ns.call_method("dequantize"): 1
}
self.checkGraphModuleNodes(quantized, expected_node_occurrence=node_occurrence)
# lower to trt
trt_mod = lower_to_trt(quantized, conv2d_input, [((1, 3, 224, 224), (5, 3, 224, 224), (10, 3, 224, 224))])
# make sure it runs
trt_mod(conv2d_input.cuda())
def test_linear(self):
class LinearModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(5, 10)
def forward(self, x):
return self.linear(x)
linear_module_input = torch.rand(8, 5)
m = LinearModule().eval()
qconfig = torch.ao.quantization.QConfig(
activation=torch.ao.quantization.observer.HistogramObserver.with_args(
qscheme=torch.per_tensor_symmetric, dtype=torch.qint8
),
weight=torch.ao.quantization.default_weight_observer
)
prepared = prepare_fx(m, {"": qconfig}, backend_config_dict=get_tensorrt_backend_config_dict())
# calibration
prepared(linear_module_input)
quantized = convert_fx(prepared, is_reference=True)
node_occurrence = {
ns.call_function(torch.quantize_per_tensor): 1,
ns.call_method("dequantize"): 1
}
self.checkGraphModuleNodes(quantized, expected_node_occurrence=node_occurrence)
# lower to trt
trt_mod = lower_to_trt(
quantized,
linear_module_input,
[((1, *linear_module_input.shape[1:]),
(5, *linear_module_input.shape[1:]),
(10, *linear_module_input.shape[1:]))])
# make sure it runs
trt_mod(linear_module_input.cuda())
if __name__ == '__main__':
run_tests()
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
f700721c9081d5fcaafc1f6a00bf77aae404a0d3 | 8ad573e455450ce45b8dfd1cb995a8ce43f9a114 | /core/result/failure/bad_request.py | a7b271b3de9e8ba75db86d3542a964b797fee057 | [] | no_license | afsaneh92/dr_autol | 4da458b8a6682603c227e34a1a827a5918d40831 | a1bd6d55ce9b67543ad7387631c48440dd38f68d | refs/heads/master | 2022-12-23T08:32:47.851059 | 2019-12-10T13:51:24 | 2019-12-10T13:51:24 | 227,130,083 | 0 | 0 | null | 2022-12-08T01:05:23 | 2019-12-10T13:38:52 | Python | UTF-8 | Python | false | false | 132 | py | from core.result import Result
class BadRequest(Result):
def dictionary_creator(self):
return {"status": self.status}
| [
"forafsaneh.91@gmail.com"
] | forafsaneh.91@gmail.com |
b67237062e950386e920e3e37c3bf871a80d73e4 | f9f0ddbb211bde92316ca746938688e7c82e2fe0 | /flask-test/flask/app/recipes/routes.py | aa2d6e07e23264d7363001d65eaa43fce1987817 | [] | no_license | Todai88/Python-Refresher | c93475cc08b2ed6905901f0f615bc446820382d8 | a74a7e878906201ed7f09a17d44fc94d1137eb3a | refs/heads/master | 2020-04-06T07:38:13.539997 | 2018-11-19T21:01:46 | 2018-11-19T21:01:46 | 157,279,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | from flask import render_template
from . import recipes_blueprint
@recipes_blueprint.route('/')
def index():
return render_template('recipes/index.html')
| [
"joabaj88@gmail.com"
] | joabaj88@gmail.com |
84e191ba583ee19dc3188ec624ee40a7210a743b | 3468fe20cd1128eb8e18354c30490421e504e4af | /portal/context_processors.py | 0a47dca0d4d1b2ed920de4681821c9ea8c8b7f4c | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | djpeluca/utopia-cms | 7da45422ffc4f1f397f385ea37243f2745a758de | 1e444afea565fdc734abf449b8ebe9b7c2c47d80 | refs/heads/main | 2023-08-19T23:04:44.666527 | 2021-10-27T01:55:11 | 2021-10-27T01:55:11 | 387,323,009 | 0 | 0 | BSD-3-Clause | 2021-07-19T03:03:48 | 2021-07-19T03:03:48 | null | UTF-8 | Python | false | false | 4,157 | py | # -*- coding: utf-8 -*-
import pycountry
from django_mobile import get_flavour
from django.conf import settings
from django.contrib.sites.models import Site
from core.models import Publication, Category
def urls(request):
url_dict = {}
for attr in dir(settings):
if attr.endswith('_URL'):
try:
url_dict[attr] = getattr(settings, attr).replace('%s', '')
except AttributeError:
pass
url_dict['URL_SCHEME'] = settings.URL_SCHEME
return url_dict
def gtm(request):
return {'GTM_CONTAINER_ID': settings.GTM_CONTAINER_ID, 'GTM_AMP_CONTAINER_ID': settings.GTM_AMP_CONTAINER_ID}
def site(request):
site = Site.objects.get_current()
meta_robots_content = 'noindex' if any(['/' in r.disallowed_urls() for r in site.rule_set.all()]) else 'all'
return {
'site': site, 'meta_robots_content': meta_robots_content,
'country_name': pycountry.countries.get(alpha2=settings.LOCAL_COUNTRY).name,
'site_description': getattr(settings, 'HOMEV3_SITE_DESCRIPTION', site.name)}
def publications(request):
DEFAULT_PUB = settings.DEFAULT_PUB
try:
default_pub = Publication.objects.get(slug=DEFAULT_PUB)
except Publication.DoesNotExist:
default_pub = None
result = {
'BASE_SUB': settings.BASE_SUB,
'DEFAULT_PUB': DEFAULT_PUB,
'default_pub': default_pub,
'custom_icons_publications': getattr(settings, 'CORE_CUSTOM_ICONS_PUBLICATIONS', None),
}
for p in Publication.objects.exclude(slug=DEFAULT_PUB).iterator():
result.update({p.slug.upper() + '_SUB': p.slug, p.slug + '_pub': p})
if get_flavour(request) == 'amp':
result['extra_header_template'] = getattr(settings, 'HOMEV3_EXTRA_HEADER_TEMPLATE_AMP', None)
else:
result['extra_header_template'] = getattr(settings, 'HOMEV3_EXTRA_HEADER_TEMPLATE', None)
result['footer_template'] = settings.HOMEV3_FOOTER_TEMPLATE
# use this context processor to load also some other useful variables configured in settings
result.update(
(
(var, getattr(settings, var, None)) for var in (
'HOMEV3_CUSTOM_CSS',
'HOMEV3_CUSTOM_PRINT_CSS',
'HOMEV3_LOGO',
'HOMEV3_LOGO_WIDTH',
'HOMEV3_SECONDARY_LOGO',
'HOMEV3_LOGO_FOOTER',
'HOMEV3_LOGO_FOOTER_WIDTH',
'HOMEV3_LOGO_PRINTABLE',
'HOMEV3_LOGO_PRINTABLE_WIDTH',
'HOMEV3_LOGO_ALT_TEXT',
'HOMEV3_TWITTER_SITE_META',
'HOMEV3_EXTRA_META',
'CORE_ARTICLE_DETAIL_PUBLISHER_META',
'PWA_MANIFEST_STATIC_PATH',
)
)
)
return result
def main_menus(request):
"""
Fills context variables to be shown or needed in the main menus.
Also fill another context variables using to the visualization of many ux "modules".
"""
result = {
'MENU_CATEGORIES': Category.objects.filter(order__isnull=False),
'CORE_ENABLE_PODCAST': getattr(settings, 'CORE_ENABLE_PODCAST', False),
'MOBILE_NAV_EXTRA_TEMPLATE': getattr(settings, 'HOMEV3_MOBILE_NAV_EXTRA_TEMPLATE', None),
'LOGIN_NO_REDIRECT_URLPATHS': ['/usuarios/sesion-cerrada/', '/usuarios/error/login/', '/admin/logout/'],
}
mobile_nav_ths = 4 + getattr(settings, 'HOMEV3_MOBILE_NAV_EXTRA_THS', 0)
menu_lal = getattr(settings, 'HOMEV3_LATEST_ARTICLE_LINKS', ())
if menu_lal:
result['MENU_LATEST_ARTICLE_LINKS'] = menu_lal
mobile_nav_ths += 1
if len(menu_lal) > 1:
result['MENU_LATEST_ARTICLE_LINKS_DROPDOWN'] = getattr(
settings, 'HOMEV3_LATEST_ARTICLE_LINKS_DROPDOWN', 'latest'
)
try:
menu_publications = Publication.objects.filter(public=True).exclude(
slug__in=getattr(settings, 'HOMEV3_EXCLUDED_MENU_PUBLICATIONS', ()))
except Exception:
menu_publications = "no-menu"
result.update({'MENU_PUBLICATIONS': menu_publications, 'mobile_nav_ths': mobile_nav_ths})
return result
| [
"apacheco@ladiaria.com.uy"
] | apacheco@ladiaria.com.uy |
1c54eb1dee6f7a9baa2427d22dd6937e2490becc | cb1d06e91347a23438057d9f40b5a74cad595766 | /autonetkit/anm/base.py | 0322dcc13b36045638ab3a3c17513d8c61a2fca4 | [] | permissive | plucena24/autonetkit | 9f94d3fba6bfad54793a7de58ef17439c2c71f0b | f7e8c03ee685d5b89f9028cb556017e730e0446c | refs/heads/master | 2023-08-16T18:03:54.593010 | 2014-11-07T13:43:39 | 2014-11-07T13:43:39 | 27,204,033 | 0 | 0 | BSD-3-Clause | 2023-08-08T18:36:36 | 2014-11-27T01:36:38 | Python | UTF-8 | Python | false | false | 15,296 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import itertools
import logging
import autonetkit
import autonetkit.log as log
from autonetkit.anm.edge import NmEdge
from autonetkit.anm.graph_data import NmGraphData
from autonetkit.anm.interface import NmPort
from autonetkit.anm.node import NmNode
from autonetkit.exception import OverlayNotFound
# TODO: check if this is still a performance hit
from autonetkit.log import CustomAdapter
from autonetkit.anm.ank_element import AnkElement
class OverlayBase(AnkElement):
'''Base class for overlays - overlay graphs, subgraphs, projections, etc'''
def __init__(self, anm, overlay_id):
""""""
if overlay_id not in anm.overlay_nx_graphs:
raise OverlayNotFound(overlay_id)
# TODO: return False instead?
self._overlay_id = overlay_id
self._anm = anm
#logger = logging.getLogger('ANK')
#logstring = 'Overlay: %s' % str(overlay_id)
#logger = CustomAdapter(logger, {'item': logstring})
#object.__setattr__(self, 'log', logger)
#self.init_logging("graph")
logger = log
object.__setattr__(self, 'log', logger)
self.init_logging("graph")
def __repr__(self):
"""
Example:
>>> anm = autonetkit.topos.house()
>>> anm['phy']
phy
"""
return self._overlay_id
def is_multigraph(self):
"""
Example:
>>> anm = autonetkit.topos.house()
>>> anm['phy'].is_multigraph()
False
>>> anm = autonetkit.topos.multi_edge()
>>> anm['phy'].is_multigraph()
True
"""
return self._graph.is_multigraph()
@property
def data(self):
"""Returns data stored on this overlay graph"""
return NmGraphData(self._anm, self._overlay_id)
def __contains__(self, n):
"""
Example:
>>> anm = autonetkit.topos.house()
>>> "r1" in anm['phy']
True
>>> "test" in anm['phy']
False
"""
try:
return n.node_id in self._graph
except AttributeError:
# try with node_id as a string
return n in self._graph
def interface(self, interface):
""""""
return NmPort(self._anm, self._overlay_id, interface.node_id,
interface.interface_id)
def edge(self, edge_to_find, dst_to_find=None, key=0):
'''returns edge in this graph with same src and dst
and key for parallel edges (default is to return first edge)
#TODO: explain parameter overloading: strings, edges, nodes...
Example:
>>> anm = autonetkit.topos.house()
>>> g_phy = anm['phy']
>>> e_r1_r2 = g_phy.edge("r1", "r2")
Can also find from an edge
>>> e_r1_r2_input = anm['input'].edge(e_r1_r2)
And for multi-edge graphs can specify key
>>> anm = autonetkit.topos.multi_edge()
>>> e1 = anm['phy'].edge("r1", "r2", 0)
>>> e2 = anm['phy'].edge("r1", "r2", 1)
>>> e1 == e2
False
>>> autonetkit.update_http(anm)
>>> eth0_r1 = anm["phy"].node("r1").interface("eth0")
>>> eth3_r1 = anm["phy"].node("r1").interface("eth3")
>>> eth0_r2 = anm["phy"].node("r2").interface("eth0")
>>> anm["phy"].has_edge(eth0_r1, eth0_r2)
True
>>> anm["phy"].has_edge(eth3_r1, eth0_r2)
False
'''
# TODO: handle multigraphs
if isinstance(edge_to_find, NmEdge):
# TODO: tidy this logic
edge = edge_to_find # alias for neater code
if (edge.is_multigraph() and self.is_multigraph()
and self._graph.has_edge(edge.src,
edge.dst, key=edge.ekey)):
return NmEdge(self._anm, self._overlay_id,
edge.src, edge.dst, edge.ekey)
elif (self._graph.has_edge(edge.src, edge.dst)):
return NmEdge(self._anm, self._overlay_id,
edge.src, edge.dst)
if isinstance(edge_to_find, NmEdge):
src_id = edge_to_find.src
dst_id = edge_to_find.dst
search_key = key
if self.is_multigraph():
for (src, dst, rkey) in self._graph.edges(src_id,
keys=True):
if dst == dst_id and rkey == search_key:
return NmEdge(self._anm, self._overlay_id, src,
dst, search_key)
for (src, dst) in self._graph.edges(src_id):
if dst == dst_id:
return NmEdge(self._anm, self._overlay_id, src, dst)
# from here on look for (src, dst) pairs
src = edge_to_find
dst = dst_to_find
if (isinstance(src, basestring) and isinstance(dst, basestring)):
src = src.lower()
dst = dst.lower()
if self.is_multigraph():
if self._graph.has_edge(src, dst, key=key):
return NmEdge(self._anm, self._overlay_id, src,
dst, key)
elif self._graph.has_edge(src, dst):
return NmEdge(self._anm, self._overlay_id, src, dst)
if isinstance(src, NmNode) and isinstance(dst, NmNode):
src_id = src.node_id
dst_id = dst.node_id
if self.is_multigraph():
if self._graph.has_edge(src_id, dst_id, key):
return NmEdge(self._anm, self._overlay_id, src, dst, key)
else:
if self._graph.has_edge(src_id, dst_id):
return NmEdge(self._anm, self._overlay_id, src, dst)
if isinstance(src, NmPort) and isinstance(dst, NmPort):
# further filter result by ports
src_id = src.node_id
dst_id = dst.node_id
src_int = src.interface_id
dst_int = dst.interface_id
# TODO: combine duplicated logic from above
#TODO: test with directed graph
if self.is_multigraph():
# search edges from src to dst
for src, iter_dst, iter_key in self._graph.edges(src_id, keys=True):
if iter_dst != dst_id:
continue # to a different node
ports = self._graph[src][iter_dst][iter_key]["_ports"]
if ports[src_id] == src_int and ports[dst_id] == dst_int:
return NmEdge(self._anm, self._overlay_id, src_id, dst_id, iter_key)
else:
#TODO: add test case for here
for src, iter_dst in self._graph.edges(src_id):
if iter_dst != dst_id:
continue # to a different node
ports = self._graph[src][iter_dst]["_ports"]
if ports[src_id] == src_int and ports[dst_id] == dst_int:
return NmEdge(self._anm, self._overlay_id, src_id, dst_id)
def __getitem__(self, key):
""""""
return self.node(key)
def node(self, key):
"""Returns node based on name
This is currently O(N). Could use a lookup table
Example:
>>> anm = autonetkit.topos.house()
>>> g_phy = anm['phy']
>>> r1 = g_phy.node("r1")
Can also find across layers
>>> r1_input = anm['input'].node(r1)
"""
# TODO: refactor
try:
if key.node_id in self._graph:
return NmNode(self._anm, self._overlay_id, key.node_id)
except AttributeError:
# try as string id
if key in self._graph:
return NmNode(self._anm, self._overlay_id, key)
# doesn't have node_id, likely a label string, search based on this
# label
for node in self:
if str(node) == key:
return node
# TODO: change warning to an exception
log.warning('Unable to find node %s in %s ' % (key, self))
return None
def overlay(self, key):
"""Get to other overlay graphs in functions"""
# TODO: refactor: shouldn't be returning concrete instantiation from
# abstract parent!
from autonetkit.anm.graph import NmGraph
return NmGraph(self._anm, key)
@property
def name(self):
""""""
return self.__repr__()
def __nonzero__(self):
return self.anm.has_overlay(self._overlay_id)
def node_label(self, node):
""""""
return repr(NmNode(self._anm, self._overlay_id, node))
def has_edge(self, edge_to_find, dst_to_find=None,):
"""Tests if edge in graph
>>> anm = autonetkit.topos.house()
>>> g_phy = anm['phy']
>>> r1 = g_phy.node("r1")
>>> r2 = g_phy.node("r2")
>>> r5 = g_phy.node("r5")
>>> g_phy.has_edge(r1, r2)
True
>>> g_phy.has_edge(r1, r5)
False
>>> e_r1_r2 = anm['input'].edge(r1, r2)
>>> g_phy.has_edge(e_r1_r2)
True
"""
if dst_to_find is None:
if self.is_multigraph():
return self._graph.has_edge(edge_to_find.src,
edge_to_find.dst, edge_to_find.ekey)
return self._graph.has_edge(edge_to_find.src, edge_to_find.dst)
else:
return bool(self.edge(edge_to_find, dst_to_find))
def __iter__(self):
""""""
return iter(self.nodes())
def __len__(self):
""""""
return len(self._graph)
def nodes(self, *args, **kwargs):
"""
>>> anm = autonetkit.topos.multi_as()
>>> g_phy = anm["phy"]
>>> g_phy.nodes()
[r4, r5, r6, r7, r1, r2, r3, r8, r9, r10]
>>> g_phy.nodes(asn=1)
[r4, r5, r1, r2, r3]
>>> g_phy.nodes(asn=3)
[r7, r8, r9, r10]
>>> g_phy.nodes(asn=1, ibgp_role="RR")
[r4, r5]
>>> g_phy.nodes(asn=1, ibgp_role="RRC")
[r1, r2, r3]
"""
result = list(NmNode(self._anm, self._overlay_id, node)
for node in self._graph)
if len(args) or len(kwargs):
result = self.filter(result, *args, **kwargs)
return result
def routers(self, *args, **kwargs):
"""Shortcut for nodes(), sets device_type to be router
>>> anm = autonetkit.topos.mixed()
>>> anm['phy'].routers()
[r1, r2, r3]
"""
result = self.nodes(*args, **kwargs)
return [r for r in result if r.is_router()]
def switches(self, *args, **kwargs):
"""Shortcut for nodes(), sets device_type to be switch
>>> anm = autonetkit.topos.mixed()
>>> anm['phy'].switches()
[sw1]
"""
result = self.nodes(*args, **kwargs)
return [r for r in result if r.is_switch()]
def servers(self, *args, **kwargs):
"""Shortcut for nodes(), sets device_type to be server
>>> anm = autonetkit.topos.mixed()
>>> anm['phy'].servers()
[s1]
"""
result = self.nodes(*args, **kwargs)
return [r for r in result if r.is_server()]
def l3devices(self, *args, **kwargs):
"""Shortcut for nodes(), tests if device is_l3device
>>> anm = autonetkit.topos.mixed()
>>> anm['phy'].l3devices()
[s1, r1, r2, r3]
"""
result = self.nodes(*args, **kwargs)
return [r for r in result if r.is_l3device()]
def device(self, key):
"""To access programatically"""
return NmNode(self._anm, self._overlay_id, key)
def groupby(self, attribute, nodes=None):
"""Returns a dictionary sorted by attribute
>>> anm = autonetkit.topos.house()
>>> g_phy = anm['phy']
>>> g_phy.groupby("asn")
{1: [r1, r2, r3], 2: [r4, r5]}
Can also specify a subset to work from
>>> nodes = [n for n in g_phy if n.degree() > 2]
>>> g_phy.groupby("asn", nodes=nodes)
{1: [r2, r3]}
"""
result = {}
if not nodes:
data = self.nodes()
else:
data = nodes
data = sorted(data, key=lambda x: x.get(attribute))
for (key, grouping) in itertools.groupby(data, key=lambda x:
x.get(attribute)):
result[key] = list(grouping)
return result
def filter(self, nbunch=None, *args, **kwargs):
""""""
if nbunch is None:
nbunch = self.nodes()
def filter_func(node):
"""Filter based on args and kwargs"""
return all(getattr(node, key) for key in args) \
and all(getattr(node, key) == val for (key, val) in
kwargs.items())
return [n for n in nbunch if filter_func(n)]
def edges(self, src_nbunch=None, dst_nbunch=None, *args,
**kwargs):
"""
>>> anm = autonetkit.topos.house()
>>> g_phy = anm['phy']
>>> g_phy.edges()
[(r4, r5), (r4, r2), (r5, r3), (r1, r2), (r1, r3), (r2, r3)]
>>> g_phy.edge("r1", "r2").color = "red"
>>> g_phy.edges(color = "red")
[(r1, r2)]
"""
# src_nbunch or dst_nbunch may be single node
# TODO: refactor this
if src_nbunch:
nbunch_out = []
try:
src_nbunch = src_nbunch.node_id
except AttributeError:
src_nbunch = (n.node_id for n in src_nbunch)
# only store the id in overlay
def filter_func(edge):
"""Filter based on args and kwargs"""
return all(getattr(edge, key) for key in args) \
and all(getattr(edge, key) == val for (key, val) in
kwargs.items())
if self.is_multigraph():
valid_edges = list((src, dst, key) for (src, dst, key) in
self._graph.edges(src_nbunch, keys=True))
else:
default_key = 0
valid_edges = list((src, dst, default_key)
for (src, dst) in self._graph.edges(src_nbunch))
if dst_nbunch:
try:
dst_nbunch = dst_nbunch.node_id
dst_nbunch = set([dst_nbunch])
except AttributeError:
dst_nbunch = (n.node_id for n in dst_nbunch)
dst_nbunch = set(dst_nbunch)
valid_edges = list((src, dst, key) for (src, dst, key) in
valid_edges if dst in dst_nbunch)
if len(args) or len(kwargs):
all_edges = [NmEdge(self._anm, self._overlay_id, src, dst,
key) for (src, dst, key) in valid_edges]
result = list(edge for edge in all_edges
if filter_func(edge))
else:
result = list(NmEdge(self._anm, self._overlay_id, src, dst,
key) for (src, dst, key) in valid_edges)
return list(result)
| [
"simon.knight@gmail.com"
] | simon.knight@gmail.com |
08ce49dff12d8cd48db82a6193beb842cf53e16a | 11f7499cc543ee0704ddd79728c92ac9e550ccab | /frontend/__init__.py | 2e8d088b48426ee69192d33af989cec91174f8af | [
"MIT"
] | permissive | lampwins/orangengine-ui | daeef1eb7ff062cd74bd2b7304129bd76afd0ba2 | 8c864cd297176aa0ff9ead9682f2085f9fd3f1c0 | refs/heads/develop | 2021-01-11T14:35:02.138278 | 2017-04-25T00:38:01 | 2017-04-25T00:38:01 | 80,165,960 | 1 | 4 | null | 2017-04-20T22:40:19 | 2017-01-26T23:30:20 | CSS | UTF-8 | Python | false | false | 211 | py |
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
if __name__ == '__main__':
# dissable directly running this
pass
| [
"lampwins@gmail.com"
] | lampwins@gmail.com |
0bf3f23db7e2058769e4e2f2eeb45303b302ee3a | 6a7e9e0e9c08132166f566bd88ae1c46ff8f9c0a | /azure-mgmt-monitor/azure/mgmt/monitor/models/metric_alert_status_properties_py3.py | 5990828126e4ce5a1944e301d027d2e3f9f2d076 | [
"MIT"
] | permissive | ashirey-msft/azure-sdk-for-python | d92381d11c48f194ec9f989f5f803db614fb73f2 | e04778e13306dad2e8fb044970215bad6296afb6 | refs/heads/master | 2020-03-23T06:05:39.283442 | 2018-09-15T00:18:26 | 2018-09-15T00:18:26 | 141,188,192 | 0 | 1 | MIT | 2018-07-16T20:02:52 | 2018-07-16T20:02:52 | null | UTF-8 | Python | false | false | 1,278 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class MetricAlertStatusProperties(Model):
"""An alert status properties.
:param dimensions:
:type dimensions: dict[str, str]
:param status: status value
:type status: str
:param timestamp: UTC time when the status was checked.
:type timestamp: datetime
"""
_attribute_map = {
'dimensions': {'key': 'dimensions', 'type': '{str}'},
'status': {'key': 'status', 'type': 'str'},
'timestamp': {'key': 'timestamp', 'type': 'iso-8601'},
}
def __init__(self, *, dimensions=None, status: str=None, timestamp=None, **kwargs) -> None:
super(MetricAlertStatusProperties, self).__init__(**kwargs)
self.dimensions = dimensions
self.status = status
self.timestamp = timestamp
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
cb59d89b368e5ee2b874cd455bb6ac8c130f4f51 | ce6f72cdc8b72d05fad900d2aaaffdbdf05b1d05 | /0x05-python-exceptions/101-main.py | 187409e6c781b65b8d5bd19da5deaafb8efe771f | [] | no_license | Cu7ious/holbertonschool-higher_level_programming | 4ae7f145f88b333cbd93e3a151c8a1892f942adb | b86439b7c2e4b3d199dbd638888524579aa69de9 | refs/heads/master | 2020-03-09T13:11:00.370612 | 2018-09-09T02:17:26 | 2018-09-09T02:17:26 | 128,804,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 507 | py | #!/usr/bin/python3
safe_function = __import__('101-safe_function').safe_function
def my_div(a, b):
return a / b
result = safe_function(my_div, 10, 2)
print("result of my_div: {}".format(result))
result = safe_function(my_div, 10, 0)
print("result of my_div: {}".format(result))
def print_list(my_list, len):
i = 0
while i < len:
print(my_list[i])
i += 1
return len
result = safe_function(print_list, [1, 2, 3, 4], 10)
print("result of print_list: {}".format(result))
| [
"328@holbertonschool.com"
] | 328@holbertonschool.com |
1b775ed02fe183a9ce73135aef444f8280749b75 | 677b9b1aab74d998fa0dd26b0309ed5bbe8775e9 | /T53/webapp/traders/admin.py | 22a67f4b9eb4c73f951d58e37fc1992fd92d55f9 | [
"MIT"
] | permissive | University-of-Petroleum-Energy-Studies/Hackathon_5.0 | db7291a8d69569322dd3dbd7547e74d6460b9275 | 6a5e6547c4aaf9a1fc2cb8ed49fa5fd54ad7c351 | refs/heads/master | 2023-03-11T20:22:44.770520 | 2021-02-22T17:47:58 | 2021-02-22T17:47:58 | 340,362,356 | 2 | 69 | MIT | 2021-03-03T17:16:38 | 2021-02-19T12:27:01 | HTML | UTF-8 | Python | false | false | 168 | py | from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Cart)
admin.site.register(CartItem)
admin.site.register(Order)
| [
"500077185@stu.upes.ac.in"
] | 500077185@stu.upes.ac.in |
64547fc947b04b977f1f3b6781322b2e9c136185 | 45da9cd96c91be944ecbd0d5b0b1a7dfc975ed8f | /octavia/tests/unit/api/v2/types/test_health_monitors.py | d4a706f7cca992d2bc714d30f654a00078b0a461 | [
"Apache-2.0"
] | permissive | mail2nsrajesh/octavia | 7635be2f69de18c1d10d218c3c2f3122f343490a | 7466016ae982af2a560a94327f9e63a7e7151cc5 | refs/heads/master | 2023-08-28T15:22:00.984583 | 2017-06-29T22:04:35 | 2017-06-29T22:04:35 | 96,141,893 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,946 | py | # Copyright 2014 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import uuidutils
from wsme import exc
from wsme.rest import json as wsme_json
from wsme import types as wsme_types
from octavia.api.v2.types import health_monitor as hm_type
from octavia.common import constants
from octavia.tests.unit.api.v2.types import base
class TestHealthMonitor(object):
_type = None
def test_invalid_type(self):
body = {"delay": 1, "timeout": 1, "max_retries": 1}
if self._type is hm_type.HealthMonitorPOST:
body.update({"type": 1, "pool_id": uuidutils.generate_uuid()})
self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type,
body)
def test_invalid_delay(self):
body = {"delay": "one", "timeout": 1, "max_retries": 1}
if self._type is hm_type.HealthMonitorPOST:
body.update({"type": constants.PROTOCOL_HTTP,
"pool_id": uuidutils.generate_uuid()})
self.assertRaises(ValueError, wsme_json.fromjson, self._type, body)
def test_invalid_timeout(self):
body = {"delay": 1, "timeout": "one", "max_retries": 1}
if self._type is hm_type.HealthMonitorPOST:
body.update({"type": constants.PROTOCOL_HTTP,
"pool_id": uuidutils.generate_uuid()})
self.assertRaises(ValueError, wsme_json.fromjson, self._type, body)
def test_invalid_max_retries_down(self):
body = {"delay": 1, "timeout": 1, "max_retries": "one"}
if self._type is hm_type.HealthMonitorPOST:
body.update({"type": constants.PROTOCOL_HTTP,
"pool_id": uuidutils.generate_uuid()})
self.assertRaises(ValueError, wsme_json.fromjson, self._type, body)
def test_invalid_max_retries(self):
body = {"delay": 1, "timeout": 1, "max_retries": "one"}
if self._type is hm_type.HealthMonitorPOST:
body.update({"type": constants.PROTOCOL_HTTP,
"pool_id": uuidutils.generate_uuid()})
self.assertRaises(ValueError, wsme_json.fromjson, self._type, body)
def test_invalid_http_method(self):
body = {"delay": 1, "timeout": 1, "max_retries": 1,
"http_method": 1}
if self._type is hm_type.HealthMonitorPOST:
body.update({"type": constants.PROTOCOL_HTTP,
"pool_id": uuidutils.generate_uuid()})
self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type,
body)
def test_invalid_url_path(self):
body = {"delay": 1, "timeout": 1, "max_retries": 1, "url_path": 1}
if self._type is hm_type.HealthMonitorPOST:
body.update({"type": constants.PROTOCOL_HTTP,
"pool_id": uuidutils.generate_uuid()})
self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type,
body)
def test_invalid_expected_codes(self):
body = {"delay": 1, "timeout": 1, "max_retries": 1,
"expected_codes": "lol"}
if self._type is hm_type.HealthMonitorPOST:
body.update({"type": constants.PROTOCOL_HTTP,
"pool_id": uuidutils.generate_uuid()})
self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type,
body)
class TestHealthMonitorPOST(base.BaseTypesTest, TestHealthMonitor):
_type = hm_type.HealthMonitorPOST
def test_health_monitor(self):
body = {"type": constants.HEALTH_MONITOR_HTTP, "delay": 1,
"timeout": 1, "max_retries_down": 1, "max_retries": 1,
"pool_id": uuidutils.generate_uuid()}
hm = wsme_json.fromjson(self._type, body)
self.assertTrue(hm.admin_state_up)
def test_type_mandatory(self):
body = {"delay": 80, "timeout": 1, "max_retries": 1,
"pool_id": uuidutils.generate_uuid()}
self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type,
body)
def test_delay_mandatory(self):
body = {"type": constants.HEALTH_MONITOR_HTTP, "timeout": 1,
"max_retries": 1, "pool_id": uuidutils.generate_uuid()}
self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type,
body)
def test_timeout_mandatory(self):
body = {"type": constants.HEALTH_MONITOR_HTTP, "delay": 1,
"max_retries": 1, "pool_id": uuidutils.generate_uuid()}
self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type,
body)
def test_max_retries_mandatory(self):
body = {"type": constants.HEALTH_MONITOR_HTTP, "delay": 1,
"timeout": 1, "pool_id": uuidutils.generate_uuid()}
self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type,
body)
def test_default_health_monitor_values(self):
# http_method = 'GET'
# url_path = '/'
# expected_codes = '200'
# max_retries_down = 3
# admin_state_up = True
# The above are not required but should have the above example defaults
body = {"type": constants.HEALTH_MONITOR_HTTP, "delay": 1,
"timeout": 1, "max_retries": 1,
"pool_id": uuidutils.generate_uuid()}
hmpost = wsme_json.fromjson(self._type, body)
self.assertEqual('GET', hmpost.http_method)
self.assertEqual('/', hmpost.url_path)
self.assertEqual('200', hmpost.expected_codes)
self.assertEqual(3, hmpost.max_retries_down)
self.assertTrue(hmpost.admin_state_up)
def test_non_uuid_project_id(self):
body = {"type": constants.HEALTH_MONITOR_HTTP, "delay": 1,
"timeout": 1, "max_retries_down": 1, "max_retries": 1,
"project_id": "non-uuid",
"pool_id": uuidutils.generate_uuid()}
hm = wsme_json.fromjson(self._type, body)
self.assertEqual(hm.project_id, body['project_id'])
class TestHealthMonitorPUT(base.BaseTypesTest, TestHealthMonitor):
_type = hm_type.HealthMonitorPUT
def test_health_monitor(self):
body = {"http_method": constants.HEALTH_MONITOR_HTTP_METHOD_HEAD}
hm = wsme_json.fromjson(self._type, body)
self.assertEqual(wsme_types.Unset, hm.admin_state_up)
| [
"flux.adam@gmail.com"
] | flux.adam@gmail.com |
ce730cf624457dbefcf7601c397a5f443a058437 | 5f3c8eddb8c5a14fb3b5931f332d401207666036 | /src/core/prometheus/server/multiserver.py | dd17e6f81fe23066c61aa76845e3af8a1a2d5313 | [
"Apache-2.0"
] | permissive | hwinther/lanot | dec8fe48efb6245af009bedf65b2bc089e92efa0 | f6700cacb3946535081624467b746fdfd38e021d | refs/heads/master | 2021-03-24T12:02:47.530833 | 2019-05-01T11:56:05 | 2019-05-01T11:56:05 | 91,605,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,018 | py | # coding=utf-8
import gc
gc.collect()
class WrappedServer(object):
def __init__(self, server, kwargs):
self.server = server
self.kwargs = kwargs
class MultiServer(object):
def __init__(self):
self.wrapped_servers = list()
def add(self, server, **kwargs):
self.wrapped_servers.append(WrappedServer(server, kwargs))
def start(self):
for wrapped_server in self.wrapped_servers:
wrapped_server.server.pre_loop(**wrapped_server.kwargs)
wrapped_server.server.loop_active = True
loop_active = True
while loop_active:
for wrapped_server in self.wrapped_servers:
wrapped_server.server.loop_tick(**wrapped_server.kwargs)
if not wrapped_server.server.loop_active:
loop_active = False
break
gc.collect()
for wrapped_server in self.wrapped_servers:
wrapped_server.server.post_loop(**wrapped_server.kwargs)
| [
"hc@wsh.no"
] | hc@wsh.no |
d3b5341008c433c4c9e272a4682211eb0a7769b7 | b993df6e8d563a75ea6f6ce40a41aa3e59675dbf | /20190826/swea 1959 teacher.py | fda05257747cebbaa7d780eed6d9350a18fa3e62 | [] | no_license | BuankerC/projectswea | 7d6a5f35b73e553037efd01d6a57811a9778834d | ea169d2352f3785ff2ce47475d203c3f751716cb | refs/heads/master | 2020-07-03T11:31:43.283821 | 2020-01-04T03:19:25 | 2020-01-04T03:19:25 | 201,892,552 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | def f(X, Y): # X 긴 리스트, Y 짧은 리스트
maxV = 0
for i in range(0, len(X)-len(Y)+1): # 긴 리스트에서 곱의 합을 구할 구간의 시작
s = 0
for j in range(0, len(Y)): # 짧은 리스트의 인덱스
s += X[i+j]*Y[j]
if maxV < s:
maxV = s
return maxV
T = int(input())
for tc in range(1, T+1):
N, M = map(int, input().split())
A = list(map(int, input().split()))
B = list(map(int, input().split()))
if N > M:
r = f(A, B)
else:
r = f(B, A)
print('#{} {}'.format(tc, r)) | [
"ergaster91@gmail.com"
] | ergaster91@gmail.com |
8abdea02a3e0be361bce98471f2e44687fb8b1f8 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/costmanagement/azure-mgmt-costmanagement/generated_samples/mca_billing_account_query_grouping.py | 3e3dffe50f130a730bac03849e93ecc7ec43a642 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,873 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.costmanagement import CostManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-costmanagement
# USAGE
python mca_billing_account_query_grouping.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = CostManagementClient(
credential=DefaultAzureCredential(),
)
response = client.query.usage(
scope="providers/Microsoft.Billing/billingAccounts/12345:6789",
parameters={
"dataset": {
"aggregation": {"totalCost": {"function": "Sum", "name": "PreTaxCost"}},
"granularity": "None",
"grouping": [{"name": "ResourceGroup", "type": "Dimension"}],
},
"timeframe": "TheLastMonth",
"type": "Usage",
},
)
print(response)
# x-ms-original-file: specification/cost-management/resource-manager/Microsoft.CostManagement/stable/2022-10-01/examples/MCABillingAccountQueryGrouping.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
9230dacc7da1f64be7a93e490cfe38d4a197efb9 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_1_1/IvanJobs/A.py | cdbbafacd6ac8a847e937d62c39bffd9717cbf23 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 530 | py | in_file = 'A-large.in'
out_file = 'A-small-attempt0.out'
def solve(fin, fout):
T = int(fin.readline().strip())
for _t in range(1, T + 1):
S = list(fin.readline().strip())
N = len(S)
C = [S[0],]
for i in range(1, N):
if S[i] >= C[0]:
C.insert(0, S[i])
else:
C.append(S[i])
fout.write('Case #%d: %s\n' % (_t, ''.join(C)))
with open(in_file, 'r') as fin, open(out_file, 'w') as fout:
solve(fin, fout) | [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
53b85586e8e6200bf65b2c761bf26cd0697947c3 | 3e276ce46afcdaf365fd62b45ceba19327535f14 | /src/plugins/github/libs/repo.py | bcb8e7fb11a49342478d116e5e4c779dd2d7f605 | [
"MIT"
] | permissive | 17Y9E81/QQ-GitHub-Bot | 1ca28ccc4b1a2bbbbb24419271389599dcd8ceb4 | 35c20d28aafaedc1813c6213ede9f2f51e56d5a2 | refs/heads/master | 2023-07-13T12:26:33.201661 | 2021-08-25T09:17:20 | 2021-08-25T09:17:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 855 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@Author : yanyongyu
@Date : 2021-03-12 15:36:14
@LastEditors : yanyongyu
@LastEditTime : 2021-06-15 22:16:20
@Description : None
@GitHub : https://github.com/yanyongyu
"""
__author__ = "yanyongyu"
from typing import Optional
from src.libs.github import Github
from .. import github_config as config
from src.libs.github.models import Repository
async def get_repo(owner: str,
repo_name: str,
token: Optional[str] = None) -> Repository:
if token:
g = Github(token)
elif config.github_client_id and config.github_client_secret:
g = Github(config.github_client_id, config.github_client_secret)
else:
g = Github()
async with g:
return await g.get_repo(f"{owner}/{repo_name}", False)
| [
"yanyongyu_1@126.com"
] | yanyongyu_1@126.com |
c20cc8afb23cffd3e7ae7a58f51025f5392d97e7 | 8a1686aeeefa80afeb0aa9f45ed72a75883458c4 | /dit/other/tests/test_lautum_information.py | 704b2bffab2cb42985e37083b41e312dfa12d31b | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | heleibin/dit | 70afd57f31be346e48b9b28c67fd6e019132ac36 | ebd0c11600e559bf34cf12a6b4e451057838e324 | refs/heads/master | 2020-09-27T07:42:15.991500 | 2019-11-23T06:10:11 | 2019-11-23T06:10:11 | 226,466,522 | 1 | 0 | BSD-3-Clause | 2019-12-07T06:26:50 | 2019-12-07T06:26:50 | null | UTF-8 | Python | false | false | 550 | py | """
Tests for dit.multivariate.lautum_information.
"""
from __future__ import division
import pytest
from dit import Distribution as D
from dit.other import lautum_information as L
def test_lm1():
""" Test L """
outcomes = ['000', '001', '010', '011', '100', '101', '110', '111']
pmf = [3/16, 1/16, 1/16, 3/16, 1/16, 3/16, 3/16, 1/16]
d = D(outcomes, pmf)
assert L(d) == pytest.approx(0.20751874963942196)
assert L(d, [[0], [1]]) == pytest.approx(0)
assert L(d, [[0], [1]], [2]) == pytest.approx(0.20751874963942196)
| [
"ryangregoryjames@gmail.com"
] | ryangregoryjames@gmail.com |
354fd35838836b5e7f00b6bfabc282a025f8a154 | c6e23f02b28604d43ad37cb339738560e8fefdee | /python3/python_open_file.py | b9b0275a6444dabcc5ba7fdefa3314ebc9c11899 | [] | no_license | testdata6/python-test | 5fcf3449c3ed84be3f0d526048bb2a26baf4fa7f | 3539ddfc70416943b28691c2cac9f7f03aad5991 | refs/heads/master | 2020-04-09T07:07:26.503612 | 2019-10-02T10:57:17 | 2019-10-02T10:57:17 | 160,141,238 | 0 | 3 | null | 2019-10-02T10:57:19 | 2018-12-03T06:10:20 | Python | UTF-8 | Python | false | false | 1,587 | py | #!/usr/bin/python
## Reading files
## Use open() function to read your files from the outside of python.
## We can open this file with below modes
## r = read the information
## w = write the information
## a = Append information
## r+ = read and write
## close() Function is use to close your open file.
#-------------------
## In this example,try to read file "employees.txt"
#open("employees.txt", "r")
## store in variable.
employee_file = open("hello-string.py", "r")
print(employee_file.readable()) # Check the condition and print True if it is in read mode else print false. (It wont continue if the condition is false.)
print(employee_file.read()) # It will read and print all the content of the file.
print(employee_file.readline())
employee_file.close()
#-------------------
print("---------------")
employee_file = open("hello-string.py", "r")
print(employee_file.readline()) # Print first line of the file
employee_file.close()
#-------------------
print("---------------")
employee_file = open("hello-string.py", "r")
print(employee_file.readline()) # Print first line of the file
print(employee_file.readline()) # Print second line of the file
employee_file.close()
#-------------------
print("---------------")
employee_file = open("hello-string.py", "r")
print(employee_file.readlines()) # print all the lines in arry elements
employee_file.close()
#-------------------
print("---------------")
employee_file = open("hello-string.py", "r")
print(employee_file.readlines()[1]) # To print specific element from the arry
employee_file.close() | [
"amit.ganvir6@gmail.com"
] | amit.ganvir6@gmail.com |
1c20b1148f8f1df51f01782f03d855c1f2c26a1b | 82aace1431e0af949b1294d979a16f8dc18f48c2 | /Python_OOP_Softuni/Decorators_Exercises/venv/even_params.py | 9ac76d3d246628be691e00ddf5d93d62b584cc0b | [
"MIT"
] | permissive | borisboychev/SoftUni | 6778450417f889f8e89c709897b9e26c7129dbf6 | 22062312f08e29a1d85377a6d41ef74966d37e99 | refs/heads/master | 2023-03-27T13:11:17.378197 | 2021-03-26T09:14:25 | 2021-03-26T09:14:25 | 295,463,442 | 1 | 0 | null | 2020-10-12T12:54:13 | 2020-09-14T15:46:12 | Python | UTF-8 | Python | false | false | 520 | py | def even_parameters(func):
def wrapper(*args):
evens = [x for x in args if isinstance(x, int) and x % 2 == 0]
if len(evens) != len(args):
return f"Please use only even numbers!"
return func(*args)
return wrapper
@even_parameters
def add(a, b):
return a + b
print(add(2, 4))
print(add("Peter", 1))
@even_parameters
def multiply(*nums):
result = 1
for num in nums:
result *= num
return result
print(multiply(2, 4, 6, 8))
print(multiply(2, 4, 9, 8))
| [
"borisboychev007@Gmail.com"
] | borisboychev007@Gmail.com |
a956f55dd99c58bf49160ba1899bf027d6403a56 | 4e006370c50da435941297e940b742d5c5ef124c | /reference-code/puppy/model/query.py | 2afd810ab94bac1c6c14627b11ba119b5a4f29ef | [
"MIT"
] | permissive | Granvanoeli/ifind | 12328bec166483f2c6a0c29d2026bfca0097bb53 | ed72aee466649bd834d5b4459eb6e0173df6e2ec | refs/heads/master | 2021-01-17T23:16:39.122348 | 2015-03-11T11:37:35 | 2015-03-11T11:37:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,659 | py | # -*- coding: utf8 -*-
import urllib
class Query(object):
"""
OpenSearch Query.
Models an OpenSearch Query element.
See: http://www.opensearch.org/Specifications/OpenSearch/1.1#OpenSearch_Query_element
"""
def __init__(self, search_terms):
"""
Constructor for Query.
Parameters:
* search_terms (str): the search terms of the query
"""
super(Query, self).__init__()
self.search_terms = search_terms
self.count = 0
self.start_index = 0
self.start_page = 0
self.language = ''
self.service = ''
self.suggestions = {}
from puppy.query.tokenizer import BasicTokenizer
self.tokenizer = BasicTokenizer()
def __eq__(self, q):
a = self.search_terms
if isinstance(q, Query):
b = q.search_terms
else:
b = q
return a == b
def __hash__(self):
return hash(self.search_terms)
def url_quote(self):
return urllib.quote(self.search_terms)
def lower(self):
return Query(self.search_terms.lower())
def __str__(self):
return self.search_terms
def tokenize(self):
return self.tokenizer(self.search_terms)
def write_xml(self):
"""
Creates XML for OpenSearch Query.
Returns:
* query_xml (str): OpenSearch Query as XML
TODO code Query.write_xml()
"""
pass
@staticmethod
def parse_xml(self, oss_xml):
"""
Parse OpenSearch Query XML.
Parameters:
* oss_xml (str): OpenSearch Query XML
Returns:
* puppy.model.OpenSearch.Query
TODO code Query.parse_xml()
"""
pass
| [
"leifos@acm.org"
] | leifos@acm.org |
34885a081c4a4f125fe7154617bfd4f10dd4e75c | ff294d3e93cba8d03bfc7ac9e3d99e8a7d04acb4 | /wukong.py | 7be67d3e98163eb46ca73e328417b6b836b08235 | [
"MIT"
] | permissive | weixiaopassking/wukong-robot | 65dd1aa2822bee112ec2611af2b20e6e68bcd8c8 | 3a9732472ec64504b2879ba8f47d73e1bac03612 | refs/heads/master | 2020-06-02T11:11:28.663600 | 2019-02-23T13:24:19 | 2019-02-23T13:24:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,641 | py | # -*- coding: utf-8-*-
from snowboy import snowboydecoder
from robot import config, utils, constants, logging, statistic, Player
from robot.Updater import Updater
from robot.ConfigMonitor import ConfigMonitor
from robot.Conversation import Conversation
from server import server
from watchdog.observers import Observer
from subprocess import call
import sys
import os
import signal
import yaml
import requests
import hashlib
import os
import fire
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
logger = logging.getLogger(__name__)
class Wukong(object):
def init(self):
global conversation
self.detector = None
self._interrupted = False
print('''
********************************************************
* wukong-robot - 中文语音对话机器人 *
* (c) 2019 潘伟洲 <m@hahack.com> *
* https://github.com/wzpan/wukong-robot.git *
********************************************************
如需退出,可以按 Ctrl-4 组合键。
''')
config.init()
self._conversation = Conversation()
self._conversation.say('{} 你好!试试对我喊唤醒词叫醒我吧'.format(config.get('first_name', '主人')), True)
self._observer = Observer()
event_handler = ConfigMonitor(self._conversation)
self._observer.schedule(event_handler, constants.CONFIG_PATH, False)
self._observer.schedule(event_handler, constants.DATA_PATH, False)
self._observer.start()
def _signal_handler(self, signal, frame):
self._interrupted = True
utils.clean()
self._observer.stop()
def _detected_callback(self):
if not utils.is_proper_time():
logger.warning('勿扰模式开启中')
return
if self._conversation.isRecording:
logger.warning('正在录音中,跳过')
return
Player.play(constants.getData('beep_hi.wav'))
logger.info('开始录音')
self._conversation.interrupt()
self._conversation.isRecording = True;
def _do_not_bother_on_callback(self):
utils.do_not_bother = True
Player.play(constants.getData('off.wav'))
logger.info('勿扰模式打开')
def _do_not_bother_off_callback(self):
utils.do_not_bother = False
Player.play(constants.getData('on.wav'))
logger.info('勿扰模式关闭')
def _interrupt_callback(self):
return self._interrupted
def run(self):
self.init()
# capture SIGINT signal, e.g., Ctrl+C
signal.signal(signal.SIGINT, self._signal_handler)
# site
server.run(self._conversation, self)
statistic.report(0)
self.initDetector()
def initDetector(self):
if self.detector is not None:
self.detector.terminate()
models = [
constants.getHotwordModel(config.get('hotword', 'wukong.pmdl')),
constants.getHotwordModel(utils.get_do_not_bother_on_hotword()),
constants.getHotwordModel(utils.get_do_not_bother_off_hotword())
]
self.detector = snowboydecoder.HotwordDetector(models, sensitivity=config.get('sensitivity', 0.5))
# main loop
try:
self.detector.start(detected_callback=[self._detected_callback,
self._do_not_bother_on_callback,
self._do_not_bother_off_callback],
audio_recorder_callback=self._conversation.converse,
interrupt_check=self._interrupt_callback,
silent_count_threshold=config.get('silent_threshold', 15),
recording_timeout=config.get('recording_timeout', 5) * 4,
sleep_time=0.03)
self.detector.terminate()
except Exception as e:
logger.critical('离线唤醒机制初始化失败:{}'.format(e))
def md5(self, password):
return hashlib.md5(password.encode('utf-8')).hexdigest()
def update(self):
updater = Updater()
return updater.update()
def fetch(self):
updater = Updater()
updater.fetch()
def restart(self):
logger.critical('程序重启...')
python = sys.executable
os.execl(python, python, * sys.argv)
if __name__ == '__main__':
if len(sys.argv) == 1:
wukong = Wukong()
wukong.run()
else:
fire.Fire(Wukong)
| [
"m@hahack.com"
] | m@hahack.com |
0c16a7a90a73e965be94888d55b81d775af1d692 | c1267fbec95318184e7388cddf9b7085f797d514 | /2022/11 November/db11292022.py | 24d6448cf5f12453bf1a06dd12662031c34363b4 | [
"MIT"
] | permissive | vishrutkmr7/DailyPracticeProblemsDIP | 1aedfd2e173847bf22989a6b0ec550acebb2bd86 | 2c365f633a1e1bee281fbdc314969f03b17ac9ec | refs/heads/master | 2023-05-31T23:49:52.135349 | 2023-05-28T09:32:12 | 2023-05-28T09:32:12 | 199,596,248 | 10 | 4 | MIT | 2022-11-02T21:31:59 | 2019-07-30T07:12:46 | Python | UTF-8 | Python | false | false | 1,371 | py | """
You are Given an image represented as a matrix. Each value in the matrix represents the color of
an individual pixel. Given a new color represented as an integer and a starting row and column,
transform every adjacent pixel to the starting pixel that has the same color to the new color.
Note: This is effectively implementing a “bucket fill” in a software like Microsoft paint.
Ex: Given the following image, row, column, and color…
image = [
[0,1,1],
[0,1,0],
[1,1,1]
], row = 1, column = 1, color = 3 modify image to be as follows...
image = [
[0, 3, 3],
[0, 3, 0],
[3, 3, 3],
].
"""
class Solution:
def floodFill(
self, image: list[list[int]], sr: int, sc: int, newColor: int
) -> list[list[int]]:
color = image[sr][sc]
if color == newColor:
return image
def dfs(r, c):
if image[r][c] != color:
return
image[r][c] = newColor
if r >= 1:
dfs(r - 1, c)
if r + 1 < len(image):
dfs(r + 1, c)
if c >= 1:
dfs(r, c - 1)
if c + 1 < len(image[0]):
dfs(r, c + 1)
dfs(sr, sc)
return image
# Test Cases
if __name__ == "__main__":
solution = Solution()
print(solution.floodFill([[0, 1, 1], [0, 1, 0], [1, 1, 1]], 1, 1, 3))
| [
"vishrutkmr7@gmail.com"
] | vishrutkmr7@gmail.com |
7a72814e45a60165eb83e2f468fe15e8a6e6f252 | 02952fc67147a2f11a9ed8c4eb29210bec5672ed | /business/service/controllers/asset.py | f8689535b840944700446821a8c74e7c96365255 | [] | no_license | cuijianzhe/cow | b110a70398b09a401dadc7d3ed24dfe2bae50f5b | 3539cab6e73571f84b7f17391d9a363a756f12e1 | refs/heads/main | 2023-06-04T10:33:33.975885 | 2021-06-19T10:40:36 | 2021-06-19T10:40:36 | 340,634,448 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,738 | py | from django.db import transaction
from django.db.models import Q
from base import errors
from base import controllers as base_ctl
from business.service.models import ServiceAssetModel
from asset.manager.models import AssetModel
def create_service_asset(obj_id, asset_id, operator=None):
'''
创建服务关联资产模块
'''
query = {
'service_id': obj_id,
'asset_id': asset_id,
}
if ServiceAssetModel.objects.filter(**query).exists():
raise errors.CommonError('服务已关联此资产模块')
data = query
obj = base_ctl.create_obj(ServiceAssetModel, data, operator)
data = obj.to_dict()
return data
def delete_service_asset(obj_id, asset_id, operator=None):
'''
删除服务关联资产模块
'''
query = {
'service_id': obj_id,
'asset_id': asset_id,
}
obj = ServiceAssetModel.objects.filter(**query).first()
if not obj:
raise errors.CommonError('服务未关联此资产模块')
base_ctl.delete_obj(ServiceAssetModel, obj.id, operator)
def get_service_assets(obj_id, page_num=None, page_size=None, operator=None):
'''
获取服务关联资产模块列表
'''
batch_ids = ServiceAssetModel.objects.filter(service_id=obj_id)\
.values_list('asset_id', flat=True).all()
base_query = AssetModel.objects.filter(id__in=batch_ids)
total = base_query.count()
objs = base_ctl.query_objs_by_page(base_query, page_num, page_size)
data_list = []
for obj in objs:
data = obj.to_dict()
data_list.append(data)
data = {
'total': total,
'data_list': data_list,
}
return data
| [
"598941324@qq.com"
] | 598941324@qq.com |
df96f22537c11dc40db74f536f445dbf791445d9 | b1bc75cd16aaf393920c4b49f19696c14e762a4d | /src/Comprehensions_Exercise/08_Heroes_Inventory.py | 71fa4895f59a7ae8aaccaae8b621e4efd45def4b | [] | no_license | ivelinakaraivanova/SoftUniPythonAdvanced | 848ef5bd4332438a8b061ff5fb2d425103a1d4a4 | 0a90f30b724f716303dc33b2a6a6234f78218c5e | refs/heads/main | 2023-01-08T19:03:15.719055 | 2020-11-08T12:25:10 | 2020-11-08T12:25:10 | 311,052,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 676 | py | names = input().split(", ")
inventory = {name: {} for name in names}
while True:
info = input()
if info == "End":
break
else:
split_info = info.split("-")
name = split_info[0]
item = split_info[1]
cost = int(split_info[2])
if item not in inventory[name]:
inventory[name][item] = cost
for name, items in inventory.items():
items_cost = 0
for cost in inventory[name].values():
items_cost += cost
print(f"{name} -> Items: {len(inventory[name])}, Cost: {items_cost}")
# [print(f"{name} -> Items: {len(inventory[name])}, Cost: {sum(inventory[name].values())}") for name in inventory]
| [
"73067985+ivelinakaraivanova@users.noreply.github.com"
] | 73067985+ivelinakaraivanova@users.noreply.github.com |
9b7f78505b524d6179cf5728fe0c1145ea7b6752 | d8346eaf1c910ff02c7b243692a2766b8b089f06 | /for-post/python-standard-library-inspect/s1-is-what/isfunction.py | 7024a5e72808a4b90f8fa453d4d5dbed0ed797ee | [] | no_license | dustinpfister/examples-python | 55304c99ba3af82cd8784ee98745546632155c68 | a9910ee05d4df524f951f61b6d9778531a58ccbf | refs/heads/master | 2023-03-06T21:10:18.888654 | 2021-02-26T20:32:52 | 2021-02-26T20:32:52 | 318,595,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | import inspect
def foo():
return 'bar'
print(inspect.isfunction(foo)) # True
print( inspect.isfunction('') ) # False
print( inspect.isfunction(42) ) # False
print( inspect.isfunction([]) ) # False
| [
"dustin.pfister@gmail.com"
] | dustin.pfister@gmail.com |
b26d9dd1c406f50cbc795ab1b824165b0d90bd8e | 4c7914bf0eb52f2fe5dab70fa630a322a9449e05 | /flask_web_api/todo-api/flask/Lib/site-packages/pip/_vendor/distlib/version.py | 17db973c32a62a00a6b5d3e8e2b53541535e715d | [] | no_license | xhongc/pythonCrawl | f334d737326a47782d2533c4db23734729f13099 | a38e59496dd78b6e070ea6882043b1744190103e | refs/heads/master | 2022-12-10T01:22:01.608193 | 2020-01-12T09:43:19 | 2020-01-12T09:43:22 | 93,115,695 | 4 | 5 | null | 2022-11-22T02:36:28 | 2017-06-02T01:47:22 | Python | UTF-8 | Python | false | false | 23,715 | py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2016 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""
Implementation of a flexible versioning scheme providing support for PEP-440,
setuptools-compatible and semantic versioning.
"""
import logging_t
import re
from .compat import string_types
__all__ = ['NormalizedVersion', 'NormalizedMatcher',
'LegacyVersion', 'LegacyMatcher',
'SemanticVersion', 'SemanticMatcher',
'UnsupportedVersionError', 'get_scheme']
logger = logging_t.getLogger(__name__)
class UnsupportedVersionError(ValueError):
"""This is an unsupported version."""
pass
class Version(object):
def __init__(self, s):
self._string = s = s.strip()
self._parts = parts = self.parse(s)
assert isinstance(parts, tuple)
assert len(parts) > 0
def parse(self, s):
raise NotImplementedError('please implement in a subclass')
def _check_compatible(self, other):
if type(self) != type(other):
raise TypeError('cannot compare %r and %r' % (self, other))
def __eq__(self, other):
self._check_compatible(other)
return self._parts == other._parts
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
self._check_compatible(other)
return self._parts < other._parts
def __gt__(self, other):
return not (self.__lt__(other) or self.__eq__(other))
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __ge__(self, other):
return self.__gt__(other) or self.__eq__(other)
# See http://docs.python.org/reference/datamodel#object.__hash__
def __hash__(self):
return hash(self._parts)
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__, self._string)
def __str__(self):
return self._string
@property
def is_prerelease(self):
raise NotImplementedError('Please implement in subclasses.')
class Matcher(object):
version_class = None
dist_re = re.compile(r"^(\w[\s\w'.-]*)(\((.*)\))?")
comp_re = re.compile(r'^(<=|>=|<|>|!=|={2,3}|~=)?\s*([^\s,]+)$')
num_re = re.compile(r'^\d+(\.\d+)*$')
# value is either a callable or the name of a method
_operators = {
'<': lambda v, c, p: v < c,
'>': lambda v, c, p: v > c,
'<=': lambda v, c, p: v == c or v < c,
'>=': lambda v, c, p: v == c or v > c,
'==': lambda v, c, p: v == c,
'===': lambda v, c, p: v == c,
# by default, compatible => >=.
'~=': lambda v, c, p: v == c or v > c,
'!=': lambda v, c, p: v != c,
}
def __init__(self, s):
if self.version_class is None:
raise ValueError('Please specify a version class')
self._string = s = s.strip()
m = self.dist_re.match(s)
if not m:
raise ValueError('Not valid: %r' % s)
groups = m.groups('')
self.name = groups[0].strip()
self.key = self.name.lower() # for case-insensitive comparisons
clist = []
if groups[2]:
constraints = [c.strip() for c in groups[2].split(',')]
for c in constraints:
m = self.comp_re.match(c)
if not m:
raise ValueError('Invalid %r in %r' % (c, s))
groups = m.groups()
op = groups[0] or '~='
s = groups[1]
if s.endswith('.*'):
if op not in ('==', '!='):
raise ValueError('\'.*\' not allowed for '
'%r constraints' % op)
# Could be a partial version (e.g. for '2.*') which
# won't parse as a version, so keep it as a string
vn, prefix = s[:-2], True
if not self.num_re.match(vn):
# Just to check that vn is a valid version
self.version_class(vn)
else:
# Should parse as a version, so we can create an
# instance for the comparison
vn, prefix = self.version_class(s), False
clist.append((op, vn, prefix))
self._parts = tuple(clist)
def match(self, version):
"""
Check if the provided version matches the constraints.
:param version: The version to match against this instance.
:type version: String or :class:`Version` instance.
"""
if isinstance(version, string_types):
version = self.version_class(version)
for operator, constraint, prefix in self._parts:
f = self._operators.get(operator)
if isinstance(f, string_types):
f = getattr(self, f)
if not f:
msg = ('%r not implemented '
'for %s' % (operator, self.__class__.__name__))
raise NotImplementedError(msg)
if not f(version, constraint, prefix):
return False
return True
@property
def exact_version(self):
result = None
if len(self._parts) == 1 and self._parts[0][0] in ('==', '==='):
result = self._parts[0][1]
return result
def _check_compatible(self, other):
if type(self) != type(other) or self.name != other.name:
raise TypeError('cannot compare %s and %s' % (self, other))
def __eq__(self, other):
self._check_compatible(other)
return self.key == other.key and self._parts == other._parts
def __ne__(self, other):
return not self.__eq__(other)
# See http://docs.python.org/reference/datamodel#object.__hash__
def __hash__(self):
return hash(self.key) + hash(self._parts)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._string)
def __str__(self):
return self._string
PEP440_VERSION_RE = re.compile(r'^v?(\d+!)?(\d+(\.\d+)*)((a|b|c|rc)(\d+))?'
r'(\.(post)(\d+))?(\.(dev)(\d+))?'
r'(\+([a-zA-Z\d]+(\.[a-zA-Z\d]+)?))?$')
def _pep_440_key(s):
s = s.strip()
m = PEP440_VERSION_RE.match(s)
if not m:
raise UnsupportedVersionError('Not a valid version: %s' % s)
groups = m.groups()
nums = tuple(int(v) for v in groups[1].split('.'))
while len(nums) > 1 and nums[-1] == 0:
nums = nums[:-1]
if not groups[0]:
epoch = 0
else:
epoch = int(groups[0])
pre = groups[4:6]
post = groups[7:9]
dev = groups[10:12]
local = groups[13]
if pre == (None, None):
pre = ()
else:
pre = pre[0], int(pre[1])
if post == (None, None):
post = ()
else:
post = post[0], int(post[1])
if dev == (None, None):
dev = ()
else:
dev = dev[0], int(dev[1])
if local is None:
local = ()
else:
parts = []
for part in local.split('.'):
# to ensure that numeric compares as > lexicographic, avoid
# comparing them directly, but encode a tuple which ensures
# correct sorting
if part.isdigit():
part = (1, int(part))
else:
part = (0, part)
parts.append(part)
local = tuple(parts)
if not pre:
# either before pre-release, or final release and after
if not post and dev:
# before pre-release
pre = ('a', -1) # to sort before a0
else:
pre = ('z',) # to sort after all pre-releases
# now look at the state of post and dev.
if not post:
post = ('_',) # sort before 'a'
if not dev:
dev = ('final',)
#print('%s -> %s' % (s, m.groups()))
return epoch, nums, pre, post, dev, local
_normalized_key = _pep_440_key
class NormalizedVersion(Version):
"""A rational version.
Good:
1.2 # equivalent to "1.2.0"
1.2.0
1.2a1
1.2.3a2
1.2.3b1
1.2.3c1
1.2.3.4
TODO: fill this out
Bad:
1 # minimum two numbers
1.2a # release level must have a release serial
1.2.3b
"""
def parse(self, s):
result = _normalized_key(s)
# _normalized_key loses trailing zeroes in the release
# clause, since that's needed to ensure that X.Y == X.Y.0 == X.Y.0.0
# However, PEP 440 prefix matching needs it: for example,
# (~= 1.4.5.0) matches differently to (~= 1.4.5.0.0).
m = PEP440_VERSION_RE.match(s) # must succeed
groups = m.groups()
self._release_clause = tuple(int(v) for v in groups[1].split('.'))
return result
PREREL_TAGS = set(['a', 'b', 'c', 'rc', 'dev'])
@property
def is_prerelease(self):
return any(t[0] in self.PREREL_TAGS for t in self._parts if t)
def _match_prefix(x, y):
x = str(x)
y = str(y)
if x == y:
return True
if not x.startswith(y):
return False
n = len(y)
return x[n] == '.'
class NormalizedMatcher(Matcher):
version_class = NormalizedVersion
# value is either a callable or the name of a method
_operators = {
'~=': '_match_compatible',
'<': '_match_lt',
'>': '_match_gt',
'<=': '_match_le',
'>=': '_match_ge',
'==': '_match_eq',
'===': '_match_arbitrary',
'!=': '_match_ne',
}
def _adjust_local(self, version, constraint, prefix):
if prefix:
strip_local = '+' not in constraint and version._parts[-1]
else:
# both constraint and version are
# NormalizedVersion instances.
# If constraint does not have a local component,
# ensure the version doesn't, either.
strip_local = not constraint._parts[-1] and version._parts[-1]
if strip_local:
s = version._string.split('+', 1)[0]
version = self.version_class(s)
return version, constraint
def _match_lt(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if version >= constraint:
return False
release_clause = constraint._release_clause
pfx = '.'.join([str(i) for i in release_clause])
return not _match_prefix(version, pfx)
def _match_gt(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if version <= constraint:
return False
release_clause = constraint._release_clause
pfx = '.'.join([str(i) for i in release_clause])
return not _match_prefix(version, pfx)
def _match_le(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
return version <= constraint
def _match_ge(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
return version >= constraint
def _match_eq(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if not prefix:
result = (version == constraint)
else:
result = _match_prefix(version, constraint)
return result
def _match_arbitrary(self, version, constraint, prefix):
return str(version) == str(constraint)
def _match_ne(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if not prefix:
result = (version != constraint)
else:
result = not _match_prefix(version, constraint)
return result
def _match_compatible(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if version == constraint:
return True
if version < constraint:
return False
# if not prefix:
# return True
release_clause = constraint._release_clause
if len(release_clause) > 1:
release_clause = release_clause[:-1]
pfx = '.'.join([str(i) for i in release_clause])
return _match_prefix(version, pfx)
_REPLACEMENTS = (
(re.compile('[.+-]$'), ''), # remove trailing puncts
(re.compile(r'^[.](\d)'), r'0.\1'), # .N -> 0.N at start
(re.compile('^[.-]'), ''), # remove leading puncts
(re.compile(r'^\((.*)\)$'), r'\1'), # remove parentheses
(re.compile(r'^v(ersion)?\s*(\d+)'), r'\2'), # remove leading v(ersion)
(re.compile(r'^r(ev)?\s*(\d+)'), r'\2'), # remove leading v(ersion)
(re.compile('[.]{2,}'), '.'), # multiple runs of '.'
(re.compile(r'\b(alfa|apha)\b'), 'alpha'), # misspelt alpha
(re.compile(r'\b(pre-alpha|prealpha)\b'),
'pre.alpha'), # standardise
(re.compile(r'\(beta\)$'), 'beta'), # remove parentheses
)
_SUFFIX_REPLACEMENTS = (
(re.compile('^[:~._+-]+'), ''), # remove leading puncts
(re.compile('[,*")([\]]'), ''), # remove unwanted chars
(re.compile('[~:+_ -]'), '.'), # replace illegal chars
(re.compile('[.]{2,}'), '.'), # multiple runs of '.'
(re.compile(r'\.$'), ''), # trailing '.'
)
_NUMERIC_PREFIX = re.compile(r'(\d+(\.\d+)*)')
def _suggest_semantic_version(s):
"""
Try to suggest a semantic form for a version for which
_suggest_normalized_version couldn't come up with anything.
"""
result = s.strip().lower()
for pat, repl in _REPLACEMENTS:
result = pat.sub(repl, result)
if not result:
result = '0.0.0'
# Now look for numeric prefix, and separate it out from
# the rest.
#import pdb; pdb.set_trace()
m = _NUMERIC_PREFIX.match(result)
if not m:
prefix = '0.0.0'
suffix = result
else:
prefix = m.groups()[0].split('.')
prefix = [int(i) for i in prefix]
while len(prefix) < 3:
prefix.append(0)
if len(prefix) == 3:
suffix = result[m.end():]
else:
suffix = '.'.join([str(i) for i in prefix[3:]]) + result[m.end():]
prefix = prefix[:3]
prefix = '.'.join([str(i) for i in prefix])
suffix = suffix.strip()
if suffix:
#import pdb; pdb.set_trace()
# massage the suffix.
for pat, repl in _SUFFIX_REPLACEMENTS:
suffix = pat.sub(repl, suffix)
if not suffix:
result = prefix
else:
sep = '-' if 'dev' in suffix else '+'
result = prefix + sep + suffix
if not is_semver(result):
result = None
return result
def _suggest_normalized_version(s):
"""Suggest a normalized version close to the given version string.
If you have a version string that isn't rational (i.e. NormalizedVersion
doesn't like it) then you might be able to get an equivalent (or close)
rational version from this function.
This does a number of simple normalizations to the given string, based
on observation of versions currently in use on PyPI. Given a dump of
those version during PyCon 2009, 4287 of them:
- 2312 (53.93%) match NormalizedVersion without change
with the automatic suggestion
- 3474 (81.04%) match when using this suggestion method
@param s {str} An irrational version string.
@returns A rational version string, or None, if couldn't determine one.
"""
try:
_normalized_key(s)
return s # already rational
except UnsupportedVersionError:
pass
rs = s.lower()
# part of this could use maketrans
for orig, repl in (('-alpha', 'a'), ('-beta', 'b'), ('alpha', 'a'),
('beta', 'b'), ('rc', 'c'), ('-final', ''),
('-pre', 'c'),
('-release', ''), ('.release', ''), ('-stable', ''),
('+', '.'), ('_', '.'), (' ', ''), ('.final', ''),
('final', '')):
rs = rs.replace(orig, repl)
# if something ends with dev or pre, we add a 0
rs = re.sub(r"pre$", r"pre0", rs)
rs = re.sub(r"dev$", r"dev0", rs)
# if we have something like "b-2" or "a.2" at the end of the
# version, that is probably beta, alpha, etc
# let's remove the dash or dot
rs = re.sub(r"([abc]|rc)[\-\.](\d+)$", r"\1\2", rs)
# 1.0-dev-r371 -> 1.0.dev371
# 0.1-dev-r79 -> 0.1.dev79
rs = re.sub(r"[\-\.](dev)[\-\.]?r?(\d+)$", r".\1\2", rs)
# Clean: 2.0.a.3, 2.0.b1, 0.9.0~c1
rs = re.sub(r"[.~]?([abc])\.?", r"\1", rs)
# Clean: v0.3, v1.0
if rs.startswith('v'):
rs = rs[1:]
# Clean leading '0's on numbers.
#TODO: unintended side-effect on, e.g., "2003.05.09"
# PyPI stats: 77 (~2%) better
rs = re.sub(r"\b0+(\d+)(?!\d)", r"\1", rs)
# Clean a/b/c with no version. E.g. "1.0a" -> "1.0a0". Setuptools infers
# zero.
# PyPI stats: 245 (7.56%) better
rs = re.sub(r"(\d+[abc])$", r"\g<1>0", rs)
# the 'dev-rNNN' tag is a dev tag
rs = re.sub(r"\.?(dev-r|dev\.r)\.?(\d+)$", r".dev\2", rs)
# clean the - when used as a pre delimiter
rs = re.sub(r"-(a|b|c)(\d+)$", r"\1\2", rs)
# a terminal "dev" or "devel" can be changed into ".dev0"
rs = re.sub(r"[\.\-](dev|devel)$", r".dev0", rs)
# a terminal "dev" can be changed into ".dev0"
rs = re.sub(r"(?![\.\-])dev$", r".dev0", rs)
# a terminal "final" or "stable" can be removed
rs = re.sub(r"(final|stable)$", "", rs)
# The 'r' and the '-' tags are post release tags
# 0.4a1.r10 -> 0.4a1.post10
# 0.9.33-17222 -> 0.9.33.post17222
# 0.9.33-r17222 -> 0.9.33.post17222
rs = re.sub(r"\.?(r|-|-r)\.?(\d+)$", r".post\2", rs)
# Clean 'r' instead of 'dev' usage:
# 0.9.33+r17222 -> 0.9.33.dev17222
# 1.0dev123 -> 1.0.dev123
# 1.0.git123 -> 1.0.dev123
# 1.0.bzr123 -> 1.0.dev123
# 0.1a0dev.123 -> 0.1a0.dev123
# PyPI stats: ~150 (~4%) better
rs = re.sub(r"\.?(dev|git|bzr)\.?(\d+)$", r".dev\2", rs)
# Clean '.pre' (normalized from '-pre' above) instead of 'c' usage:
# 0.2.pre1 -> 0.2c1
# 0.2-c1 -> 0.2c1
# 1.0preview123 -> 1.0c123
# PyPI stats: ~21 (0.62%) better
rs = re.sub(r"\.?(pre|preview|-c)(\d+)$", r"c\g<2>", rs)
# Tcl/Tk uses "px" for their post release markers
rs = re.sub(r"p(\d+)$", r".post\1", rs)
try:
_normalized_key(rs)
except UnsupportedVersionError:
rs = None
return rs
#
# Legacy version processing (distribute-compatible)
#
_VERSION_PART = re.compile(r'([a-z]+|\d+|[\.-])', re.I)
_VERSION_REPLACE = {
'pre': 'c',
'preview': 'c',
'-': 'final-',
'rc': 'c',
'dev': '@',
'': None,
'.': None,
}
def _legacy_key(s):
def get_parts(s):
result = []
for p in _VERSION_PART.split(s.lower()):
p = _VERSION_REPLACE.get(p, p)
if p:
if '0' <= p[:1] <= '9':
p = p.zfill(8)
else:
p = '*' + p
result.append(p)
result.append('*final')
return result
result = []
for p in get_parts(s):
if p.startswith('*'):
if p < '*final':
while result and result[-1] == '*final-':
result.pop()
while result and result[-1] == '00000000':
result.pop()
result.append(p)
return tuple(result)
class LegacyVersion(Version):
def parse(self, s):
return _legacy_key(s)
@property
def is_prerelease(self):
result = False
for x in self._parts:
if (isinstance(x, string_types) and x.startswith('*') and
x < '*final'):
result = True
break
return result
class LegacyMatcher(Matcher):
version_class = LegacyVersion
_operators = dict(Matcher._operators)
_operators['~='] = '_match_compatible'
numeric_re = re.compile('^(\d+(\.\d+)*)')
def _match_compatible(self, version, constraint, prefix):
if version < constraint:
return False
m = self.numeric_re.match(str(constraint))
if not m:
logger.warning('Cannot compute compatible match for version %s '
' and constraint %s', version, constraint)
return True
s = m.groups()[0]
if '.' in s:
s = s.rsplit('.', 1)[0]
return _match_prefix(version, s)
#
# Semantic versioning
#
_SEMVER_RE = re.compile(r'^(\d+)\.(\d+)\.(\d+)'
r'(-[a-z0-9]+(\.[a-z0-9-]+)*)?'
r'(\+[a-z0-9]+(\.[a-z0-9-]+)*)?$', re.I)
def is_semver(s):
return _SEMVER_RE.match(s)
def _semantic_key(s):
def make_tuple(s, absent):
if s is None:
result = (absent,)
else:
parts = s[1:].split('.')
# We can't compare ints and strings on Python 3, so fudge it
# by zero-filling numeric values so simulate a numeric comparison
result = tuple([p.zfill(8) if p.isdigit() else p for p in parts])
return result
m = is_semver(s)
if not m:
raise UnsupportedVersionError(s)
groups = m.groups()
major, minor, patch = [int(i) for i in groups[:3]]
# choose the '|' and '*' so that versions sort correctly
pre, build = make_tuple(groups[3], '|'), make_tuple(groups[5], '*')
return (major, minor, patch), pre, build
class SemanticVersion(Version):
def parse(self, s):
return _semantic_key(s)
@property
def is_prerelease(self):
return self._parts[1][0] != '|'
class SemanticMatcher(Matcher):
version_class = SemanticVersion
class VersionScheme(object):
def __init__(self, key, matcher, suggester=None):
self.key = key
self.matcher = matcher
self.suggester = suggester
def is_valid_version(self, s):
try:
self.matcher.version_class(s)
result = True
except UnsupportedVersionError:
result = False
return result
def is_valid_matcher(self, s):
try:
self.matcher(s)
result = True
except UnsupportedVersionError:
result = False
return result
def is_valid_constraint_list(self, s):
"""
Used for processing some metadata fields
"""
return self.is_valid_matcher('dummy_name (%s)' % s)
def suggest(self, s):
if self.suggester is None:
result = None
else:
result = self.suggester(s)
return result
_SCHEMES = {
'normalized': VersionScheme(_normalized_key, NormalizedMatcher,
_suggest_normalized_version),
'legacy': VersionScheme(_legacy_key, LegacyMatcher, lambda self, s: s),
'semantic': VersionScheme(_semantic_key, SemanticMatcher,
_suggest_semantic_version),
}
_SCHEMES['default'] = _SCHEMES['normalized']
def get_scheme(name):
if name not in _SCHEMES:
raise ValueError('unknown scheme name: %r' % name)
return _SCHEMES[name]
| [
"408737515@qq.com"
] | 408737515@qq.com |
ce0b323cb50ab3b2911cba7b87a7286982cf0a08 | 85f5dff291acf1fe7ab59ca574ea9f4f45c33e3b | /api/tacticalrmm/accounts/migrations/0020_role_can_manage_roles.py | 2768c2e431be1755bae81a13959ab0f191794daa | [
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | sadnub/tacticalrmm | a4ecaf994abe39244a6d75ed2166222abb00d4f4 | 0af95aa9b1084973642da80e9b01a18dcacec74a | refs/heads/develop | 2023-08-30T16:48:33.504137 | 2023-04-10T22:57:44 | 2023-04-10T22:57:44 | 243,405,684 | 0 | 2 | MIT | 2020-09-08T13:03:30 | 2020-02-27T01:43:56 | Python | UTF-8 | Python | false | false | 387 | py | # Generated by Django 3.2.1 on 2021-05-11 17:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0019_user_role'),
]
operations = [
migrations.AddField(
model_name='role',
name='can_manage_roles',
field=models.BooleanField(default=False),
),
]
| [
"dcparsi@gmail.com"
] | dcparsi@gmail.com |
70c795892317f9d87e01e06f716470718f0d8793 | 2305ce053d16652d31823bd07faf38553b4f9b63 | /books/AutomateTheBoringStuffWithPython/Chapter15/P06_multithreading.py | 07eaa3a08feacddc40392fc2cca9fa838ed80fa3 | [
"MIT"
] | permissive | leihuagh/python-tutorials | cff3c5e250a152252d4b725bca19f55721483249 | 33831b983d7bd1491e367b6c7654e687d5ba709b | refs/heads/master | 2020-03-29T17:59:31.226400 | 2018-09-24T08:41:26 | 2018-09-24T08:41:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | # This program uses the threading module to manipulate threads
import threading
# Passing Arguments to the Thread's Target Function
threadObj = threading.Thread(target=print, args=['Cats', 'Dogs', 'Frogs'], kwargs={'sep': ' & '})
threadObj.start()
| [
"jose@JoseALerma.com"
] | jose@JoseALerma.com |
fff3485b667977e7f7ca2a6ff3f4a59c0275e427 | fde4ec12ffb460d8c952923571c25ff7e231e274 | /lib/support.py | 86fe69ac2a2e6a77842060b848371d5780cefead | [] | no_license | alexdawn/final-lib-mod-tools | 0b8188c44a053acb90ab5d5ab3156f97beb0d636 | 3ba136fee9db43237e87eb48724f1a2aff02fafb | refs/heads/main | 2023-05-06T11:29:41.450438 | 2021-05-22T16:32:47 | 2021-05-22T16:32:47 | 347,744,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,553 | py | from utilities import check_eof
from flags import nationalities, unit_types, support_flags
def read_support(file):
supports = []
header = file.read(13)
if header != b'SUPPORT\x00\x001.0\x00':
raise RuntimeError("File is not a SUPPORT.DAT")
count = int.from_bytes(file.read(4), 'little')
for i in range(count):
cid = int.from_bytes(file.read(2), 'little')
flags = int.from_bytes(file.read(4), 'little')
if flags & (0xffff - 0x600b) != 0:
raise RuntimeError(f"Unknown flag, {cid} {flags}")
flag_text = []
for f, v in support_flags.items():
if f & flags:
flag_text.append(v)
licon = int.from_bytes(file.read(4), 'little')
ricon = int.from_bytes(file.read(4), 'little')
nation = int.from_bytes(file.read(4), 'little')
string_length1 = int.from_bytes(file.read(1), 'little')
string1 = bytes(file.read(string_length1)).decode('utf-8')
string_length2 = int.from_bytes(file.read(1), 'little')
string2 = bytes(file.read(string_length2)).decode('utf-8')
cost = int.from_bytes(file.read(2), 'little')
breakpoint = int.from_bytes(file.read(1), 'little')
# TODO handle special morale values
#define CLAN_MORALE -1
#define MORALE_SPECIAL -2
#define ALLWAYS_PASS_MORALE 0
morale = int.from_bytes(file.read(1), 'little', signed=True)
vp = int.from_bytes(file.read(1), 'little')
u_count = int.from_bytes(file.read(1), 'little')
allow_nation = int.from_bytes(file.read(4), 'little') # allow nat flag
nation_flags = []
for f, v in nationalities.items():
if f & allow_nation:
nation_flags.append(v)
allow_type = int.from_bytes(file.read(4), 'little') # allow type flag
allow_type_flags = []
for f, v in unit_types.items():
if f & allow_type:
allow_type_flags.append(v)
turns_to_build = int.from_bytes(file.read(2), 'big')
uids = [
int.from_bytes(file.read(2), 'little')
for i in range(u_count)
]
#unames = [unit_lookup[x] for x in uids]
#detachments[cid] = string1
supports.append({
"id": cid,
"name1": string1,
"name2": string2,
"cost": cost,
"breakpoint": breakpoint,
"morale": morale,
"victory_points": vp,
"units": uids,
"flags": flag_text,
"licon": licon,
"ricon": ricon,
"nation": nation,
"allow_nation": nation_flags,
"type": allow_type_flags,
"time_to_build": turns_to_build
})
check_eof(file)
return supports
def write_support(file, data):
support_flags_lookups = {v: k for k, v in support_flags.items()}
nationalities_lookup = {v: k for k, v in nationalities.items()}
unit_types_lookup = {v: k for k, v in unit_types.items()}
file.write(b'SUPPORT\x00\x001.0\x00')
file.write(len(data).to_bytes(4, 'little'))
for d in data:
file.write(d['id'].to_bytes(2, 'little'))
#encode flags
flag_value = 0
for x in d['flags']:
flag_value += support_flags_lookups[x]
file.write(flag_value.to_bytes(4, 'little'))
file.write(d['licon'].to_bytes(4, 'little'))
file.write(d['ricon'].to_bytes(4, 'little'))
file.write(d['nation'].to_bytes(4, 'little'))
file.write(len(d['name1']).to_bytes(1, 'little'))
file.write(d['name1'].encode('ascii'))
file.write(len(d['name2']).to_bytes(1, 'little'))
file.write(d['name2'].encode('ascii'))
file.write(d['cost'].to_bytes(2, 'little'))
file.write(d['breakpoint'].to_bytes(1, 'little'))
file.write(d['morale'].to_bytes(1, 'little', signed=True))
file.write(d['victory_points'].to_bytes(1, 'little'))
file.write(len(d['units']).to_bytes(1, 'little'))
# nation flag
flag_value = 0
for x in d['allow_nation']:
flag_value += nationalities_lookup[x]
file.write(flag_value.to_bytes(4, 'little'))
# allow type flags
flag_value = 0
for x in d['type']:
flag_value += unit_types_lookup[x]
file.write(flag_value.to_bytes(4, 'little'))
file.write(d['time_to_build'].to_bytes(2, 'big'))
for u in d['units']:
file.write(u.to_bytes(2, 'little'))
| [
"al_4242@hotmail.co.uk"
] | al_4242@hotmail.co.uk |
8d07e5610c20f8a30a00db726be0afc5b8e0e99d | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/arc087/C/2448233.py | 0025206cebd9820324b3c8fdd297e25509fe70e7 | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 1,016 | py | N, L = map(int, input().split())
make = lambda:[None, None, 0]
root = make()
def construct(s):
n = root
for i in s:
if n[i] is None:
n[i] = n = make()
else:
n = n[i]
n[2] = 1
for i in range(N):
s = map(int, input())
construct(s)
caps = {}
st = [(root, 0, 0)]
while st:
n, i, l = st.pop()
if i:
if n[1] is None:
caps[L - l] = caps.get(L - l, 0) + 1
else:
if not n[1][2]:
st.append((n[1], 0, l+1))
else:
st.append((n, 1, l))
if n[0] is None:
caps[L - l] = caps.get(L - l, 0) + 1
else:
if not n[0][2]:
st.append((n[0], 0, l+1))
ans = 0
for v in caps:
k = caps[v]
if k % 2 == 0:
continue
v -= 1
r = 1
while v % 4 == 3:
v //= 4
r *= 4
if v % 4 == 1:
ans ^= r * 2
else:
ans ^= r
print('Alice' if ans else 'Bob') | [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
28015851dc7482ba551b07fe5e664bf6037e2d3b | 633ab8880dc367feefdb6ef565ed0e70a4094bc1 | /10001-11000/10998.py | 3131629d360c36ea51689e91ebb58cea9811362e | [] | no_license | winston1214/baekjoon | 2e9740ee2824d7777f6e64d50087b5c040baf2c6 | 20125255cd5b359023a6297f3761b2db1057d67d | refs/heads/master | 2023-03-04T09:07:27.688072 | 2021-02-16T13:51:49 | 2021-02-16T13:51:49 | 284,832,623 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 76 | py | # @Author YoungMinKim
# baekjoon
a, b = map(int, input().split())
print(a*b) | [
"winston1214@naver.com"
] | winston1214@naver.com |
d1b020ece21f7ce8853640f57ddbd7c5ce371652 | 9aa06834320de94c68a43c6a211dde044c25bee2 | /lessons/python/lesson-4/tic-tac-toe.py | 2897d9f2406a7a784ac8e626e34280f1d6b628cb | [] | no_license | airportyh/begin-to-code | 7624728fa7ae916488383a1bf2e000b734af549b | 3ad315548d84d68186b29b966187b2baafb3336d | refs/heads/master | 2021-01-22T12:39:01.365091 | 2018-05-18T21:15:22 | 2018-05-18T21:15:22 | 102,354,678 | 0 | 1 | null | 2018-10-23T11:33:59 | 2017-09-04T11:34:14 | JavaScript | UTF-8 | Python | false | false | 99 | py | board = [
[' ', ' ', ' '],
[' ', ' ', ' '],
[' ', ' ', ' ']
]
while True:
print()
| [
"airportyh@gmail.com"
] | airportyh@gmail.com |
8d2737a017cfb3ed8e38396bf711a649d34cd224 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_exercises/templates/_algorithms_challenges/pybites/intermediate/290_v2/test_class_rosters.py | 867ffcc720b31176660563cf54d6c02a576c0193 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 1,294 | py | # _______ p__
#
# ____ ? _______ ?
#
# full """
# 17409,"Matheson, Rick",,,,,,,,,,
# 36283,"Jones, Tom",SCI09-4 - SU,MATH09-2 - PH,TA09-1 - AB,IS09-4 - LM,SCI09-3 - NdN,MATH09-2 - RB,DE09-3 - KmQ,ENG09-3 - KaR,PE09-3 - PS
# 99415,"Blake, Arnold",,,,,,,,,,
# """ # noqa E501
# p.. """
# 17409,"Jones, Tom",,,,,,,,,,
# 17409,"Matheson, Rick",,IS09-1 - BR,,SCI09-4 - SU,MATH09-2 - RB,,ENG09-4 - LE,,PE09-1 - MR,
# 99415,"Blake, Arnold",,,,,,,,,,
# """ # noqa E501
# empty """
# 99415,"Blake, Arnold",,,,,,,,,,
# 21692,"Prest, Phil",,,,,,,,,,
# 36283,"Jones, Tom",,,,,,,,,,
# """ # noqa E501
#
#
# ?p__.m__.p. "content, expected", [
# (? 'SCI09-4,2020,36283',
# 'MATH09-2,2020,36283',
# 'TA09-1,2020,36283',
# 'IS09-4,2020,36283',
# 'SCI09-3,2020,36283',
# 'MATH09-2,2020,36283',
# 'DE09-3,2020,36283',
# 'ENG09-3,2020,36283',
# 'PE09-3,2020,36283' ),
# (p.., 'IS09-1,2020,17409',
# 'SCI09-4,2020,17409',
# 'MATH09-2,2020,17409',
# 'ENG09-4,2020,17409',
# 'PE09-1,2020,17409' ),
# (? # list),
#
# ___ test_class_rosters content e.. tmp_path
# csvfile ? / "content"
# ?.w.. ?.l..
# a.. ? ?
# ... a.. __ e..
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
67079c5188c8255386f1a779167f04afdd8b0222 | a5103b7d5066138ac1a9aabc273361491a5031cd | /daily/8/pytorch_tutoral/nmt/utils.py | 18f8ce03f52f5026e6e9c64dfede2228fae0242d | [] | no_license | mckjzhangxk/deepAI | 0fa2f261c7899b850a4ec432b5a387e8c5f13e83 | 24e60f24b6e442db22507adddd6bf3e2c343c013 | refs/heads/master | 2022-12-13T18:00:12.839041 | 2021-06-18T03:01:10 | 2021-06-18T03:01:10 | 144,862,423 | 1 | 1 | null | 2022-12-07T23:31:01 | 2018-08-15T14:19:10 | Jupyter Notebook | UTF-8 | Python | false | false | 1,250 | py | import torch
def subseqenceMask(x):
'''
x:(N,T)
return:
(1,T,T)
'''
T=x.size(-1)
return torch.tril(torch.ones((T,T))).to(x.device).unsqueeze(0).byte()
def standardMask(x,paddingidx):
'''
x:(N,T)
paddingidx:set coresponding mask=0 when occur
paddingidx
return:(N,1,T)
'''
return (x!=paddingidx).unsqueeze(1).byte()
def makeMask(x,y,paddingidx):
xmask=standardMask(x,paddingidx)
ymask=standardMask(y,paddingidx)&subseqenceMask(y)
return xmask,ymask
def translation(x,parser,eos):
'''
x:(N)
'''
ret=[]
N=x.shape[0]
for line in x:
wordlist=[parser.vocab.itos[w] for w in line]
if eos in wordlist:
wordlist=wordlist[:wordlist.index(eos)]
l=' '.join(wordlist)
ret.append(l)
return ret
if __name__=='__main__':
import matplotlib.pyplot as plt
N,T=38,25
X=torch.randint(0,10,(N,T))
mask1=standardMask(X,0)
mask2=subseqenceMask(X)
assert mask1.shape==(N,1,T)
assert mask2.shape==(1,T,T)
plt.figure()
plt.imshow(mask1[0],cmap='gray')
plt.figure()
plt.imshow(mask2[0],cmap='gray')
plt.figure()
plt.imshow(mask1[0]&mask2[0],cmap='gray')
plt.show()
| [
"mckj_zhangxk@163.com"
] | mckj_zhangxk@163.com |
9b7bcbbd5ad71852ef309eabb6c4791ae139d2ec | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p3BR/R2/benchmark/startQiskit243.py | fd8225ab38340539504426ed59c0d2a4ae4171d8 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,565 | py | # qubit number=3
# total number=48
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.h(input_qubit[2]) # number=38
prog.cz(input_qubit[0],input_qubit[2]) # number=39
prog.h(input_qubit[2]) # number=40
prog.cx(input_qubit[0],input_qubit[2]) # number=31
prog.h(input_qubit[2]) # number=42
prog.cz(input_qubit[0],input_qubit[2]) # number=43
prog.h(input_qubit[2]) # number=44
prog.cx(input_qubit[0],input_qubit[2]) # number=45
prog.x(input_qubit[2]) # number=46
prog.cx(input_qubit[0],input_qubit[2]) # number=47
prog.cx(input_qubit[0],input_qubit[2]) # number=37
prog.cx(input_qubit[0],input_qubit[2]) # number=33
prog.h(input_qubit[2]) # number=25
prog.cz(input_qubit[0],input_qubit[2]) # number=26
prog.h(input_qubit[2]) # number=27
prog.h(input_qubit[1]) # number=7
prog.cz(input_qubit[2],input_qubit[1]) # number=8
prog.rx(0.17592918860102857,input_qubit[2]) # number=34
prog.rx(-0.3989822670059037,input_qubit[1]) # number=30
prog.h(input_qubit[1]) # number=9
prog.h(input_qubit[1]) # number=18
prog.cz(input_qubit[2],input_qubit[1]) # number=19
prog.h(input_qubit[1]) # number=20
prog.y(input_qubit[1]) # number=14
prog.h(input_qubit[1]) # number=22
prog.cz(input_qubit[2],input_qubit[1]) # number=23
prog.h(input_qubit[1]) # number=24
prog.z(input_qubit[2]) # number=3
prog.z(input_qubit[1]) # number=41
prog.x(input_qubit[1]) # number=17
prog.y(input_qubit[2]) # number=5
prog.x(input_qubit[2]) # number=21
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit243.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('qasm_simulator')
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
7e348e9e9eefeca6a65cfeb167a6469c154902b8 | 6f05f7d5a67b6bb87956a22b988067ec772ba966 | /data/train/python/66188be9f74aff684d30eb43f897a3a8f4f5af3aMultipleBrokersVirtualTopic.py | 66188be9f74aff684d30eb43f897a3a8f4f5af3a | [
"MIT"
] | permissive | harshp8l/deep-learning-lang-detection | 93b6d24a38081597c610ecf9b1f3b92c7d669be5 | 2a54293181c1c2b1a2b840ddee4d4d80177efb33 | refs/heads/master | 2020-04-07T18:07:00.697994 | 2018-11-29T23:21:23 | 2018-11-29T23:21:23 | 158,597,498 | 0 | 0 | MIT | 2018-11-21T19:36:42 | 2018-11-21T19:36:41 | null | UTF-8 | Python | false | false | 3,763 | py | #!/usr/bin/env python
# Massimo Paladin
# Massimo.Paladin@cern.ch
import os
from MultipleProducerConsumer import MultipleProducerConsumer, TimeoutException
import sys
import time
from utils.Timer import Timer
import logging
logging.basicConfig()
log = logging.getLogger(__file__)
class MultipleBrokersVirtualTopic(MultipleProducerConsumer):
def __init__(self, mainBrokerName, mainBrokerHost, otherBrokers, port=6163, destination='test.virtualtopic', vtPrefix='Consumer', hostcert=None, hostkey=None, messages=10, timeout=15):
MultipleProducerConsumer.__init__(self)
self.mainBrokerName = mainBrokerName
self.mainBrokerHost = mainBrokerHost
self.otherBrokers = otherBrokers
self.port = port
self.destination = destination
self.vtPrefix = vtPrefix
self.hostcert = hostcert
self.hostkey = hostkey
self.messages = messages
self.timeout = timeout
def setup(self):
self.destinationTopic = '/topic/%s' % self.destination
if self.hostcert and self.hostkey:
self.setSSLAuthentication(self.hostcert, self.hostkey)
self.createBroker(self.mainBrokerName, self.mainBrokerHost, self.port)
for name, host in self.otherBrokers.items():
self.createBroker(name, host, self.port)
def run(self):
timer = Timer(self.timeout)
''' Starting consumers '''
for name, host in self.otherBrokers.items():
self.createConsumer(name,
'/queue/%s.%s.%s' % (self.vtPrefix, name, self.destination),
timer.left)
time.sleep(1)
''' Creating producer and sending messages '''
self.createProducer(self.mainBrokerName, self.destinationTopic, timer.left)
for i in range(self.messages):
self.sendMessage(self.mainBrokerName,
self.destinationTopic,
{'persistent':'true'},
'testing-%s' % i)
self.waitForMessagesToBeSent(self.mainBrokerName,
self.destinationTopic,
self.messages)
for broker in self.otherBrokers:
self.waitForMessagesToArrive(broker, '/queue/%s.%s.%s' % (self.vtPrefix, broker, self.destination), self.messages, timer.left)
''' Wait a couple of seconds to see if we get duplicated '''
time.sleep(2)
for broker in self.otherBrokers:
self.assertMessagesNumber(broker, '/queue/%s.%s.%s' % (self.vtPrefix, broker, self.destination), self.messages)
def stop(self):
self.destroyAllBrokers()
if __name__ == '__main__':
log.setLevel(logging.INFO)
logging.getLogger('MultipleProducerConsumer').setLevel(logging.INFO)
broker = 'vtb-71'
brokerHost = 'vtb-generic-71'
brokers = {'vtb-71':'vtb-generic-71',
'vtb-72':'vtb-generic-72',
'vtb-73':'vtb-generic-73',
'vtb-74':'vtb-generic-74'}
# broker = 'gridmsg1'
# brokerHost = 'gridmsg101.cern.ch'
# brokers = {'gridmsg1':'gridmsg101.cern.ch',
# 'gridmsg2':'gridmsg102.cern.ch',
# 'auth':'broker.afroditi.hellasgrid.gr',
# 'srce':'msg.cro-ngi.hr'}
mbvt = MultipleBrokersVirtualTopic(broker, brokerHost, brokers, 6163)
mbvt.setup()
try:
mbvt.start()
except KeyboardInterrupt:
print "keyboard interrupt"
except TimeoutException, e:
print '%s' % e
except AssertionError, e:
print '%s' % e
mbvt.stop()
print 'Test passed!'
| [
"aliostad+github@gmail.com"
] | aliostad+github@gmail.com |
5e6ec243dd20422d54285f93ee30bdd41b8f52bb | 73e147e1d49656fafba5d4bf84df5ded2c4dca73 | /team_9/cocos/test/test_scene_add_rotated.py | 6147c4160ead9a0f710d5cc59d65fce542588dd8 | [
"LGPL-2.1-only",
"CC-BY-NC-4.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-proprietary-license",
"CC-BY-NC-SA-2.0",
"BSD-3-Clause"
] | permissive | Donnyvdm/dojo19 | 2278747366c57bfc80eb9ee28ca617ec0a79bae3 | 3cf043a84e3ad6d3c4d59cd9c50b160e1ff03400 | refs/heads/master | 2020-07-26T12:22:15.882800 | 2019-09-15T20:34:36 | 2019-09-15T20:34:36 | 208,642,183 | 1 | 0 | BSD-3-Clause | 2019-09-15T18:57:53 | 2019-09-15T18:57:52 | null | UTF-8 | Python | false | false | 1,083 | py | from __future__ import division, print_function, unicode_literals
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "t 0.1, s, q"
tags = "Scene, rotation"
import cocos
from cocos.director import director
from cocos.sprite import Sprite
from cocos.layer import *
import pyglet
class TestLayer(cocos.layer.Layer):
def __init__(self):
super( TestLayer, self ).__init__()
x,y = director.get_window_size()
sprite1 = Sprite( 'grossini.png' , (x//4, y//2) )
sprite2 = Sprite( 'grossinis_sister1.png', (x//2, y//2) )
sprite3 = Sprite( 'grossinis_sister2.png', (x/(4/3.0), y//2) )
self.add( sprite2 )
self.add( sprite1 )
self.add( sprite3 )
def main():
director.init()
main_scene = cocos.scene.Scene()
main_scene.add( ColorLayer( 255, 0, 0, 255 ) )
l = TestLayer()
l.rotation = 45
main_scene.add( l )
director.run (main_scene)
if __name__ == '__main__':
main()
| [
"a.campello@wellcome.ac.uk"
] | a.campello@wellcome.ac.uk |
a03527cfea5f7c60f818ab1ab24315d3cf61bff2 | 7554a16508c7f57afe7e99a0ee1ddec5a7150018 | /test/email_tut.py | 65d4c1db9959fd303f315fb6fd5d25ac514706c0 | [] | no_license | csrgxtu/Hummable | 05414f6c8bc856d10d39af9997029f04293c79a9 | 17444c3f9a98149cf409fa72d3dc710e3957bf9f | refs/heads/master | 2021-01-23T00:14:46.615266 | 2017-04-13T16:36:26 | 2017-04-13T16:36:26 | 85,707,147 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,264 | py | import imaplib, struct, time
import email as Email
class Mail():
def __init__(self):
self.user = '*'
self.password = 'fuck'
# self.ser = serial.Serial('/dev/tty.usbmodem621', 9600)
self.M = imaplib.IMAP4_SSL('imap.gmail.com', '993')
self.M.login(self.user, self.password)
def get_emails(self, email_ids):
data = []
for e_id in email_ids:
_, response = self.M.fetch(e_id, '(UID BODY[TEXT])')
data.append(response[0][1])
return data
def get_subjects(self, email_ids):
subjects = []
for e_id in email_ids:
_, response = self.M.fetch(e_id, '(body[header.fields (from)])')
print('From', str(response[0][1]).split(' <')[1][0:-10])
_, response = self.M.fetch(e_id, '(body[header.fields (subject)])')
print('Subject', str(response[0][1][9:])[2:-9])
subjects.append(response[0][1][9:])
return subjects
def emails_from(self, name):
'''Search for all mail from name'''
status, response = self.M.search(None, '(FROM "%s")' % name)
email_ids = [e_id for e_id in response[0].split()]
print('Number of emails from %s: %i. IDs: %s' % (name, len(email_ids), email_ids))
return email_ids
def checkMail(self):
self.M.select('INBOX')
status, response = self.M.status('INBOX', "(UNSEEN)")
status, email_ids = self.M.search(None, '(UNSEEN)')
print(str(email_ids[0])[1:].replace("'", "").split(' '))
emails = self.get_subjects(str(email_ids[0])[1:].replace("'", "").split(' '))
for email in emails:
print(email)
# self.unRead = self.M.search(None, '(UNSEEN)')
# print(self.unRead[0].split())
# print(self.unRead[1][0].split())
# # return len(self.unRead[1][0].split())
# return self.unRead[1][0].split()[-1]
def sendData(self):
self.numMessages = self.checkMail()
# turn the string into packed binary data to send int
self.ser.write(struct.pack('B', self.numMessages))
email = Mail()
# check for new mail every minute
while 1:
mid = email.checkMail()
# result, data = email.M.uid('fetch', mid, '(RFC822)')
# print(result)
# # b = Email.message_from_string(data[0][1])
# b = Email.message_from_bytes(data[0][1])
# print(b['From'])
# print(b['Subject'])
# payloads = b.get_payload()
# for payload in payloads:
# # print(payload)
# print(payload.get_payload())
time.sleep(60) | [
"1246506786@qq.com"
] | 1246506786@qq.com |
5d5b6f3ee3813a35cf978786bddba84d270686ee | 7a3389ad132779916574091e3b909fed0b93ace7 | /pom/asker/asker_home_page.py | 2d9a3b14e47f57b210bdfa24a3a7ed1c9f9eef2e | [] | no_license | bomcon123456/GI_TestAutomationOnboard | 793f594e5209cff365d3089d3103fe83af253114 | dc8b1c0a8a8ac05a23aefc42052251febc8160d5 | refs/heads/master | 2020-09-04T05:34:38.936616 | 2019-11-05T11:14:16 | 2019-11-05T11:14:16 | 219,668,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,314 | py | import time
from .base_asker_page import BaseAskerPage
class AskerHomePage(BaseAskerPage):
query_field = 'textarea[name=\'text\']'
start_query_button = '.gi-Button.gi-Button--accent.gi-Button--lg.u-width-100'
def login_and_query(self):
login_locator = '#test-login-button'
user = 'input[name=\'email\']'
password = 'input[name=\'password\']'
login_button = 'button#login-button'
self.asker.find_element(login_locator).click()
time.sleep(1)
self.asker.find_element(user).send_keys('askerSelenium2@gmail.com')
self.asker.find_element(password).send_keys('MotConVit123!@')
self.asker.find_element(login_button).click()
self.query()
def query(self):
problem_text = 'Lorem ipsum dolor sit amet, consectetur adipiscing elit. ' \
'Donec bibendum, turpis dignissim lobortis dapibus, ' \
'libero arcu cursus elit, a luctus felis lacus et orci. ' \
'Aenean cursus, risus non sodales blandit, ' \
'dui nunc sagittis mi, ac gravida sapien magna at ipsum.'
self.asker.get_waited_visible_element(self.query_field).send_keys(problem_text)
self.asker.get_waited_clickable_element(self.start_query_button).click()
| [
"bomcon123456@gmail.com"
] | bomcon123456@gmail.com |
07fe709101a5b5e53ea109ffbf256a9ac0e00b8a | b76d4db5f996a431615af302a087bada90e7e802 | /Matplotlib_Learn/直方图均衡化.py | cab646d486f3d95af897b56a965c9317f805b3b4 | [] | no_license | budaLi/AI_learn | 1571b27810f004b4a00f9fe0f3dfe7472fa6ee65 | c8b1bf0d5dde7b02ff8b3d0121fd7b6b0ab06862 | refs/heads/master | 2020-09-20T05:42:55.405552 | 2019-12-03T04:42:13 | 2019-12-03T04:42:13 | 224,390,786 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,579 | py | # @Time : 2019/12/3 10:43
# @Author : Libuda
# @FileName: 直方图均衡化.py
# @Software: PyCharm
#直方图均衡化是指将一副图像的灰度直方图变平 使变换后的图像中每个灰度值的分布概率都相同
#目的是增强图像的对比度
# 如果一幅图像的灰度直方图几乎覆盖了整个灰度的取值范围,并且除了个别灰度值的个数较为突出,
# 整个灰度值分布近似于均匀分布,那么这幅图像就具有较大的灰度动态范围和较高的对比度,
# 同时图像的细节更为丰富。已经证明,仅仅依靠输入图像的直方图信息,就可以得到一个变换函数,
# 利用该变换函数可以将输入图像达到上述效果,该过程就是直方图均衡化。
from PIL import Image
from pylab import *
image_path=r'C:\Users\lenovo\PycharmProjects\AI_learn\Matplotlib_Learn\1.jpg'
im = array(Image.open(image_path).convert('L'))
def histeq(im,nbr_bins =256):
"""
对一副灰度图像进行直方图均值化
:param im:
:param nbr_bins:
:return:
"""
#计算图像的直方图
#histrgram 直方统计图函数
# histogram(a, bins=10, range=None, weights=None, density=False);
# a是待统计数据的数组;
# bins指定统计的区间个数;
# range是一个长度为2的元组,表示统计范围的最小值和最大值,默认值None,表示范围由数据的范围决定
# weights为数组的每个元素指定了权值, histogram()
# 会对区间中数组所对应的权值进行求和
# density为True时,返回每个区间的概率密度;为False,返回每个区间中元素的个数
print(im.shape)
imhist, bins = histogram(im.flatten(),nbr_bins,density=False)
print(imhist) #每个像素出现的次数
print(bins) #像素
#累计分布函数 (cumulative distribution function,简写为 cdf,将像素值的范围映射到目标范围的归一化操作
#cumsum 累加函数 https://blog.csdn.net/feng_jlin/article/details/82790746
cdf = imhist.cumsum()
#归一化
cdf = 255*cdf /cdf[-1]
#使用累计分布函数的线性插值 计算新的像素值
#flatten 将图片多维向量转化为一维向量
# interp 线性插值函数 https://blog.csdn.net/hfutdog/article/details/87386901
im2 = interp(im.flatten(),bins[:-1],cdf)
im2 = im2.reshape(im.shape)
new_image = Image.fromarray(uint8(im2))
# new_image.save("直方图均衡化图.jpg")
new_image.show()
if __name__ == '__main__':
histeq(im) | [
"1364826576@qq.com"
] | 1364826576@qq.com |
dc2732a1e7b3eaca2dd341a07f466ef1b8af2a2a | f972e22df004b419d23b4b03d3c7e42e604a2e2b | /compute/wps/tests/test_backend_ophidia.py | e3b0b3f1d98a35e239a5cba845155eb7e3ba2d2b | [] | no_license | OphidiaBigData/esgf-compute-wps | 9ec663b1701f2336f08117a6fb0725d71adfe078 | 8dd26dde385fbe861c78e432e0954725d7bf9b18 | refs/heads/master | 2020-04-28T10:20:49.718253 | 2019-02-04T09:46:43 | 2019-02-04T09:46:43 | 175,198,536 | 0 | 0 | null | 2019-03-12T11:39:20 | 2019-03-12T11:39:19 | null | UTF-8 | Python | false | false | 1,713 | py | import unittest
import cwt
import mock
from django import test
from django.conf import settings
from wps import models
from wps import WPSError
from wps.backends import ophidia
#class OphidiaBackendTestCase(test.TestCase):
# fixtures = ['servers.json', 'users.json', 'processes.json']
#
# def setUp(self):
# self.backend = ophidia.Ophidia()
#
# self.user = models.User.objects.first()
#
# def test_execute_missing_operation(self):
# mock_job = mock.MagicMock()
#
# variables = {
# 'v0': cwt.Variable('file:///test.nc', 'tas', name='v0'),
# }
#
# domains = {'d0': cwt.Domain([cwt.Dimension('time', 0, 200)])}
#
# with self.assertRaises(WPSError) as e:
# self.backend.execute('Oph.max', variables, domains, {}, job=mock_job, user=self.user)
#
# def test_execute(self):
# mock_job = mock.MagicMock()
#
# variables = {
# 'v0': cwt.Variable('file:///test.nc', 'tas', name='v0'),
# }
#
# domains = {'d0': cwt.Domain([cwt.Dimension('time', 0, 200)])}
#
# operation = cwt.Process(identifier='Oph.max', name='max')
#
# operation.inputs = variables.values()
#
# operation.domain = domains['d0']
#
# result = self.backend.execute('Oph.max', variables, domains, {'max': operation}, job=mock_job, user=self.user)
#
# self.assertIsNotNone(result)
#
# def test_populate_processes(self):
# process_count = len(ophidia.PROCESSES)
#
# self.backend.populate_processes()
#
# self.assertEqual(len(self.backend.processes), process_count)
#
# def test_initialize(self):
# with self.assertNumQueries(0):
# self.backend.initialize()
| [
"boutte.jason@gmail.com"
] | boutte.jason@gmail.com |
aaf5614c687126755584a1a474e49dc112cd19ea | 2e6fbadca5364eb3c7271cef68c4745fde59259a | /dmu_utils/sqlachemy/types.py | 34b4f9958bf0fa293f9226789a8dca4379b3b6c9 | [] | no_license | dmugtasimov/dmu-utils | b25369c264f426800d916580af4ebb43c2c9f408 | c4f91fb43675a4bcc54cc7e5ea52a789e46f6439 | refs/heads/master | 2021-01-23T03:37:42.182066 | 2017-11-10T07:44:38 | 2017-11-10T07:44:38 | 86,107,402 | 0 | 0 | null | 2017-04-09T16:06:14 | 2017-03-24T20:21:17 | Python | UTF-8 | Python | false | false | 1,852 | py | from six import iteritems
from schematics import types as sch_types
from schematics.undefined import Undefined
from sqlalchemy import types as sa_types, Column
from dmu_utils.schematics.types import NonUnicodeStringType, JSONType, CustomDecimalType
SCHEMATICS_TO_SQLALCHEMY_TYPE_MAP = {
sch_types.StringType: sa_types.Unicode,
NonUnicodeStringType: sa_types.String,
sch_types.IntType: sa_types.Integer,
sch_types.DateTimeType: sa_types.DateTime,
# TODO(dmu) HIGH: Is sch_types.DecimalType really supported?
sch_types.DecimalType: sa_types.Numeric,
CustomDecimalType: sa_types.Numeric,
sch_types.FloatType: sa_types.Float,
sch_types.BooleanType: sa_types.Boolean,
JSONType: sa_types.JSON,
sch_types.ModelType: sa_types.JSON,
sch_types.ListType: sa_types.JSON,
}
def get_sqlalchemy_type(schematics_type):
sqlalchemy_type = SCHEMATICS_TO_SQLALCHEMY_TYPE_MAP.get(schematics_type)
if sqlalchemy_type:
return sqlalchemy_type
for from_type, to_type in iteritems(SCHEMATICS_TO_SQLALCHEMY_TYPE_MAP):
if issubclass(schematics_type, from_type):
return to_type
raise ValueError('Unsupported schematics type: {}'.format(schematics_type))
def schematics_field_to_sqlalchemy_column(field):
schematics_type = type(field)
sqlalchemy_type = get_sqlalchemy_type(schematics_type)
kwargs = {}
if issubclass(schematics_type, sch_types.StringType):
kwargs['length'] = field.max_length
if issubclass(schematics_type, CustomDecimalType):
kwargs['precision'] = field.precision
kwargs['scale'] = field.scale
column_kwargs = {
'nullable': not field.required
}
if field._default is not Undefined:
column_kwargs['default'] = field._default
return Column(sqlalchemy_type(**kwargs), **column_kwargs)
| [
"dmugtasimov@gmail.com"
] | dmugtasimov@gmail.com |
719073220d1c46074a1cbeaab8a80b7b66eec773 | c53b3e120c59557daaa2fa5b7626413105eb5965 | /tendenci/libs/storage.py | a9fe3d80c705775eb3eb7fb84553790eb2a4c435 | [] | no_license | chendong0444/ams | 8483334d9b687708d533190b62c1fa4fd4690f2c | f2ac4ecc076b223c262f2cde4fa3b35b4a5cd54e | refs/heads/master | 2021-05-01T03:59:18.682836 | 2018-07-23T06:33:41 | 2018-07-23T06:33:41 | 121,194,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 986 | py | from django.conf import settings
from django.core.files.storage import get_storage_class
def get_default_storage():
"""
Get a default storage class.
"""
return get_storage_class(settings.DEFAULT_FILE_STORAGE)()
def get_static_storage():
"""
Get a static storage class.
"""
return get_storage_class(settings.STATICFILES_STORAGE)()
def get_file_content(name, storage_type='default'):
"""
Get the file content from the specified storage.
"""
if storage_type == 'static':
storage = get_static_storage()
else:
storage = get_default_storage()
f = storage.open(name)
content = f.read()
f.close()
return content
def save_file_content(name, content, storage_type='default'):
"""
Save the file content to the specified storage.
"""
if storage_type == 'static':
storage = get_static_storage()
else:
storage = get_default_storage()
return storage.save(name, content)
| [
"chendong@shinezone.com"
] | chendong@shinezone.com |
acb3cf104d10bcfe884c85399d7498730baca759 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part004318.py | 06d607cf05ac691993249723299536d42c25a9a7 | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,604 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher139719(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.3.0', 1, 1, None), Mul),
(VariableWithCount('i2.3.0_1', 1, 1, S(1)), Mul)
]),
1: (1, Multiset({}), [
(VariableWithCount('i2.2.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.3.0', 1, 1, S(1)), Mul)
]),
2: (2, Multiset({0: 1}), [
(VariableWithCount('i2.3.0', 1, 1, S(1)), Mul)
]),
3: (3, Multiset({1: 1}), [
(VariableWithCount('i2.3.0', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher139719._instance is None:
CommutativeMatcher139719._instance = CommutativeMatcher139719()
return CommutativeMatcher139719._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 139718
if len(subjects) >= 1 and isinstance(subjects[0], Pow):
tmp1 = subjects.popleft()
subjects2 = deque(tmp1._args)
# State 140737
if len(subjects2) >= 1:
tmp3 = subjects2.popleft()
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.3.1', tmp3)
except ValueError:
pass
else:
pass
# State 140738
if len(subjects2) >= 1:
tmp5 = subjects2.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.3.2', tmp5)
except ValueError:
pass
else:
pass
# State 140739
if len(subjects2) == 0:
pass
# State 140740
if len(subjects) == 0:
pass
# 0: x**p
yield 0, subst2
subjects2.appendleft(tmp5)
subjects2.appendleft(tmp3)
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.3.2.0', S(0))
except ValueError:
pass
else:
pass
# State 141470
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.3.2.1.0', S(1))
except ValueError:
pass
else:
pass
# State 141471
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.3.2.1.2', S(1))
except ValueError:
pass
else:
pass
# State 141472
if len(subjects2) >= 1:
tmp10 = subjects2.popleft()
subst4 = Substitution(subst3)
try:
subst4.try_add_variable('i2.3.2.1.1', tmp10)
except ValueError:
pass
else:
pass
# State 141473
if len(subjects2) >= 1 and subjects2[0] == Integer(-1):
tmp12 = subjects2.popleft()
# State 141474
if len(subjects2) == 0:
pass
# State 141475
if len(subjects) == 0:
pass
# 1: 1/(a + b*x**n)
yield 1, subst4
subjects2.appendleft(tmp12)
subjects2.appendleft(tmp10)
if len(subjects2) >= 1 and isinstance(subjects2[0], Pow):
tmp13 = subjects2.popleft()
subjects14 = deque(tmp13._args)
# State 141476
if len(subjects14) >= 1:
tmp15 = subjects14.popleft()
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.3.2.1.1', tmp15)
except ValueError:
pass
else:
pass
# State 141477
subst4 = Substitution(subst3)
try:
subst4.try_add_variable('i2.3.2.1.2', 1)
except ValueError:
pass
else:
pass
# State 141478
if len(subjects14) == 0:
pass
# State 141479
if len(subjects2) >= 1 and subjects2[0] == Integer(-1):
tmp18 = subjects2.popleft()
# State 141480
if len(subjects2) == 0:
pass
# State 141481
if len(subjects) == 0:
pass
# 1: 1/(a + b*x**n)
yield 1, subst4
subjects2.appendleft(tmp18)
if len(subjects14) >= 1:
tmp19 = subjects14.popleft()
subst4 = Substitution(subst3)
try:
subst4.try_add_variable('i2.3.2.1.2', tmp19)
except ValueError:
pass
else:
pass
# State 141478
if len(subjects14) == 0:
pass
# State 141479
if len(subjects2) >= 1 and subjects2[0] == Integer(-1):
tmp21 = subjects2.popleft()
# State 141480
if len(subjects2) == 0:
pass
# State 141481
if len(subjects) == 0:
pass
# 1: 1/(a + b*x**n)
yield 1, subst4
subjects2.appendleft(tmp21)
subjects14.appendleft(tmp19)
subjects14.appendleft(tmp15)
subjects2.appendleft(tmp13)
if len(subjects2) >= 1 and isinstance(subjects2[0], Mul):
tmp22 = subjects2.popleft()
associative1 = tmp22
associative_type1 = type(tmp22)
subjects23 = deque(tmp22._args)
matcher = CommutativeMatcher141483.get()
tmp24 = subjects23
subjects23 = []
for s in tmp24:
matcher.add_subject(s)
for pattern_index, subst2 in matcher.match(tmp24, subst1):
pass
if pattern_index == 0:
pass
# State 141490
if len(subjects2) >= 1 and subjects2[0] == Integer(-1):
tmp25 = subjects2.popleft()
# State 141491
if len(subjects2) == 0:
pass
# State 141492
if len(subjects) == 0:
pass
# 1: 1/(a + b*x**n)
yield 1, subst2
subjects2.appendleft(tmp25)
subjects2.appendleft(tmp22)
if len(subjects2) >= 1 and isinstance(subjects2[0], Add):
tmp26 = subjects2.popleft()
associative1 = tmp26
associative_type1 = type(tmp26)
subjects27 = deque(tmp26._args)
matcher = CommutativeMatcher141494.get()
tmp28 = subjects27
subjects27 = []
for s in tmp28:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp28, subst0):
pass
if pattern_index == 0:
pass
# State 141511
if len(subjects2) >= 1 and subjects2[0] == Integer(-1):
tmp29 = subjects2.popleft()
# State 141512
if len(subjects2) == 0:
pass
# State 141513
if len(subjects) == 0:
pass
# 1: 1/(a + b*x**n)
yield 1, subst1
subjects2.appendleft(tmp29)
subjects2.appendleft(tmp26)
subjects.appendleft(tmp1)
return
yield
from matchpy.matching.many_to_one import CommutativeMatcher
from collections import deque
from .generated_part004319 import *
from matchpy.utils import VariableWithCount
from multiset import Multiset
from .generated_part004320 import * | [
"franz.bonazzi@gmail.com"
] | franz.bonazzi@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.