blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
efa7461ea64952459e66e1148200f28b73b950ac | bdcfab7fef115312988ef7d2bf4f375ab0723e51 | /test/myself/TestFile.py | 1a105c4d101ed1f5621b1671181223668f23ea7b | [] | no_license | ChenLaiHong/pythonBase | c2587bfa78fbbec225c364c297394131fa2f6f37 | 00bf1b404be8714f907c7750a60c052e83555b3e | refs/heads/master | 2020-03-11T00:57:13.403866 | 2019-09-19T11:55:09 | 2019-09-19T11:55:09 | 129,676,746 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,178 | py | # 文件操作
# 打开文件
# "r"只读;
# "w"只写,文件不存在会自动创建;指针在文件头
# "a"追加,文件不存在会自动创建,指针在文件末尾
# f = open("test.txt", "a", encoding="utf-8")
#
# # 读写操作
# # content = f.read()
# # print(content)
#
# f.write("张三、李四")
#
# # 关闭文件
# f.close()
# 操作图片,增加b是操作二进制
# fromFile = open("xx.jpg", "rb")
# fromContent = fromFile.read()
# print(fromContent)
# fromFile.close()
#
# toFile = open("xx2.jpg", "wb")
# content = fromContent[0: len(fromContent) // 2]
# toFile.write(content)
# fromFile.close()
# 增加+
# "r+"可读可写,先读再写的话不会覆盖原来的只是在后面添加
# 定位
# f.seek(偏移量,[0,1,2]),0:开头;1:当前位置;2:文件末尾(偏移量只能是负的)
# 注意:文本文件的操作模式下(不带b)只能写0
# 如果想写1/2,必须在二进制文件操作模式下(带b)
# f = open("test.txt", "r", encoding="utf-8")
# # 打印当前指针的位置,文件指针默认在文件最开始的地方
# print(f.tell())
# # 将指针移动三位(seek方法用来移动指针的位置)
# f.seek(3)
# # 再次打印指针的位置
# print(f.tell())
# # 读取当前指针位置到文件最后的内容
# print(f.read())
# f.close()
# f = open("xx.jpg", "rb")
# # 打印当前指针的位置,文件指针默认在文件最开始的地方
# print(f.tell())
# # 将指针移动三位(seek方法用来移动指针的位置)
# f.seek(-3, 2)
# # 再次打印指针的位置
# print(f.tell())
# # 读取当前指针位置到文件最后的内容
# print(f.read())
# f.close()
# f.read(len):len:读取文件内容的长度,默认是文件所有内容
f = open("test.txt", "r", encoding="utf-8")
print(f.read(3))
# 将指针移动两位再读
# f.seek(2)
# print(f.read(3))
f.close()
# f.readLine([limit]):读取一行数据
# print(f.readline(), end="")
# f.readLines()会自动的将文件按换行符进行处理
# 将处理的每一行组成一个列表返回
# print(f.readlines())
# for i in f:
# print(i, end="")
# import os
# os.rename("x.jpg", "xx.jpg")
| [
"1185630400@qq.com"
] | 1185630400@qq.com |
082f1032d60a7484435d669338999b4da1b246a5 | 2a871d3f15bfd1b9d4cd962f681503eaf0a535cc | /sample_player/sample_walker.py | 1ce5ba9d7a8f8e6089ea607cc3de637181bed503 | [] | no_license | PFCM/generative-music | 937046ff30eafc5db6cdbb361be6c7732fbebbd5 | 481b216f2ae795b0ac48a57b83e92fde082b34e9 | refs/heads/master | 2021-04-28T09:38:41.065310 | 2018-09-06T09:47:42 | 2018-09-06T09:47:42 | 122,046,080 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,722 | py | """
Load some embeddings and do a random walk through them.
"""
import itertools
import os
import sys
import time
from functools import partial
from itertools import chain, islice
from multiprocessing.dummy import Pool
import librosa
import networkx as nx
import numpy as np
import pyo
from sklearn.neighbors import kneighbors_graph
from organise import read_embeddings
def embedding_neighbours_graph(filenames, embeddings, neighbours=2):
"""Make a networkx graph (with filenames as nodes) with connectivity
derived from the nearest neighbours of `embeddings`.
Uses euclidean distance."""
graph = nx.Graph(
kneighbors_graph(embeddings, neighbours, p=2, mode='distance'))
graph = nx.relabel_nodes(
graph, {i: f
for i, f in enumerate(filenames)}, copy=False)
return graph
def printer(gen, ident=''):
"""print the elements of a generator then yield them"""
for item in gen:
print('{}{}'.format(ident, item))
yield item
def random_walk(graph, start_node):
"""Random walk through the graph, yielding nodes as we go. Will be
infinite, use itertools.islice or the like to get the right amount."""
current_node = start_node
while True:
yield current_node
options = list(graph.neighbors(current_node))
current_node = np.random.choice(options)
def random_walkers(graph, start_nodes, times, callback):
"""Run a bunch of random walkers with exponentially distributed step
times. Blocks until a keyboard interrupt.
`start_nodes` and `times` should be dictionaries, the keys will be passed
to `callback` along with the new values.
"""
keys, starters = zip(*start_nodes.items())
rates = np.array([times[k] for k in keys])
wait_times = np.random.exponential(rates)
walkers = [random_walk(graph, start_nodes[key]) for key in keys]
while True:
try:
wait = np.min(wait_times)
time.sleep(wait)
wait_times -= wait
changes, = np.where(wait_times < 1e-7)
for result in changes:
callback(keys[result], next(walkers[result]))
wait_times[result] = np.random.exponential(rates[result])
except KeyboardInterrupt:
return
def walker(graph, args, length=10):
"""Random walk through a component of the sound graph, playing as we go"""
num, component = args
start = np.random.choice(list(component))
files = islice(random_walk(graph, start), max(5, len(component) // length))
print('{}--{}'.format(num, len(component)))
files = printer(files, '~~~~{}~'.format(num))
samples = (os.path.join(os.path.dirname(sys.argv[1]), path)
for path in files)
player = pyo.SfPlayer(next(samples), mul=0.1)
trig = pyo.TrigFunc(player['trig'], make_looper(samples, player))
player.out()
while player.isPlaying():
time.sleep(1)
def make_looper(gen, player):
"""make a trigger function to loop through the generator"""
def _replace_fname():
player.stop()
try:
player.setPath(next(gen))
player.out()
except StopIteration:
pass
return _replace_fname
def main():
"""quick test"""
embeddings = read_embeddings(sys.argv[1])
graph = embedding_neighbours_graph(*embeddings)
print('{} components'.format(nx.number_connected_components(graph)))
server = pyo.Server(nchnls=1, duplex=0).boot()
server.start()
with Pool(8) as pool:
results = pool.imap_unordered(
partial(walker, graph), enumerate(nx.connected_components(graph)))
_ = list(results)
if __name__ == '__main__':
main()
| [
"pfcmathews@gmail.com"
] | pfcmathews@gmail.com |
ce82c00ac5d6fb5049cfe4eeb9fcf1d8cbf86f55 | 99f02678101b1790a982301824aa14ed0140c21b | /backend/hawaii_hop_27263/wsgi.py | 35ac4077780e887db0b65c0dbc572bf4d2bbb114 | [] | no_license | crowdbotics-apps/hawaii-hop-27263 | 5128b9057e952375c421e4347ed9b280ffefe127 | 7ae5d451ee9217011921e2874d01e93f1cc07017 | refs/heads/master | 2023-05-03T20:17:25.616012 | 2021-05-22T22:11:16 | 2021-05-22T22:11:16 | 369,914,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | """
WSGI config for hawaii_hop_27263 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hawaii_hop_27263.settings')
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
ba8be02377db6cac60f7f9ce5c89b1fcf36d52d4 | 0b9470f9a839d87b21fd575421b5223afb4573c6 | /04day/09-保护对象方法.py | b6a8997e6e6a1ba485c6cbd32ab9f59a0547bc71 | [] | no_license | ZiHaoYa/1808 | 351356b4fa920a5075899c8abdce24a61502097f | 891582547fef4c6fd4fd4132da033e48e069901f | refs/heads/master | 2020-03-30T06:20:46.898840 | 2018-09-29T08:56:53 | 2018-09-29T08:56:53 | 147,603,769 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | class Tencent():
def __vip(self):#最核心的方法 私有方法
print("开会员成功")
def open(self,money):#判断钱够不够
if money > 10:
self.__vip()
else:
print("QQ币不足")
qq = Tencent()
qq.open(12)
#qq.vip()
| [
"qingyuan@geekniu.com"
] | qingyuan@geekniu.com |
44ddb5ba007d7311a9825af8b5f62b3c330df6df | 4d2238210813c1581bf44f64d8a63196f75d2df4 | /craking/exercise1/listPairToGetSum.py | a1b6f9664ea3f6108ac27c44ed6d15581fd24d99 | [] | no_license | wwtang/code02 | b1600d34907404c81fa523cfdaa74db0021b8bb3 | 9f03dda7b339d8c310c8a735fc4f6d795b153801 | refs/heads/master | 2020-12-24T14:10:33.738734 | 2012-12-14T04:24:47 | 2012-12-14T04:24:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,686 | py | """
Given an array of integers, list out all elements that add up to a given sum X with time complexity O(nlgn)
A = {6,4,5,7,9,1,2}
Sum = 10 Then the pairs are - {6,4} , {9,1}
Two methods:
1, sort the array, two pointer begin from the start and the end
sorting takes O(nlgn), iteration takes o(n) Overall Nlg(n)
2. dumps the array into dict, compute the difference of givenSum with current value, look up the difference in the dict
time complexity O(n)
space complexity O(n)
"""
def checkSum(arr, gsum):
if len(arr) ==0:
return None
i = 0
j = len(arr)-1
#sort the array
arr.sort()
res =[]#record the pairs
while i < j:
if arr[i]+arr[j] >gsum:
j -=1
elif arr[i]+arr[j]<gsum:
i +=1
else:
res.append((arr[i],arr[j]))
i +=1
j -=1
return res
def checkSum2(arr, gsum):
"""
use a dict to look
"""
if len(arr) ==0:
return None
d = dict()
for v in arr:
if v not in d:
d[v] = 1
else:
d[v] +=1
res = []
for value in arr:
diff = gsum - value
if diff in d:
res.append((value, diff))
return res
def checkSum3(arr, gsum):
"""
use binary search
"""
if len(arr) ==0:
return None
arr.sort()
res = []
for value in arr:
diff = gsum - value
other = binarySearch(arr,diff)
if other:
res.append((value,other))
return res
def binarySearch(arr, target):
if len(arr) == 0:
return None
i = 0
j = len(arr)
while i<=j:
mid = (i+j)/2
if arr[mid] > target:
j = mid-1
elif arr[mid] < target:
i = mid+1
else:
return arr[mid]
def main():
arr = [6,4,5,7,9,1,2]
gsum = 10
print checkSum(arr, gsum)
print checkSum2(arr, gsum)
print checkSum3(arr,gsum)
if __name__=="__main__":
main() | [
"andytang1994@gmail.com"
] | andytang1994@gmail.com |
941580ec7caf0ea5b4cc0b5f27f63d9e1b1c79f1 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/webpubsub/azure-messaging-webpubsubservice/samples/integration_sample.py | 20680a5da983cf49176db9486613de8c1233f116 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 4,634 | py | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -------------------------------------------------------------------------
import threading
import logging
import time
import json
import os
from websocket import WebSocketApp
from typing import List, Optional
from azure.messaging.webpubsubservice import WebPubSubServiceClient
logging.basicConfig(level=logging.DEBUG)
LOG = logging.getLogger()
class WebsocketClientsManager:
'''
This class contains multiple websocket clients which are connected to Azure Web PubSub Services.
'''
def __init__(self) -> None:
self.clients = []
self.connection_ids = []
self.recv_messages = []
def add_client(self, service: WebPubSubServiceClient, user_id: str, groups: Optional[List[str]] = None):
def on_message(websocket_app: WebSocketApp, message: str):
message = json.loads(message)
if message["type"] == "message":
self.recv_messages.append(message["data"])
if message["type"] == "system" and message["event"] == "connected":
self.connection_ids.append(message["connectionId"])
LOG.debug(message)
def on_open(websocket_app: WebSocketApp):
LOG.debug("connected")
token = service.get_client_access_token(groups=groups, user_id=user_id)
client = WebSocketApp(token["url"], subprotocols=['json.webpubsub.azure.v1'], on_open=on_open, on_message=on_message)
self.clients.append(client)
def start_all(self):
for client in self.clients:
wst = threading.Thread(target=client.run_forever, daemon=True)
wst.start()
LOG.debug("Waiting for all clients connected...")
while len(self.connection_ids) != self.client_number:
pass
@property
def client_number(self):
return len(self.clients)
def test_overall_integration(webpubsub_connection_string: str):
# build a service client from the connection string.
service = WebPubSubServiceClient.from_connection_string(webpubsub_connection_string, hub='hub', logging_enable=False)
# build multiple websocket clients connected to the Web PubSub service
clients = WebsocketClientsManager()
for i in range(5):
clients.add_client(service, user_id="User%d" % clients.client_number, groups=["InitGroup"])
clients.start_all()
# test naive send_to_all
service.send_to_all(message='Message_For_All', content_type='text/plain') # N messages
# test if generating token with the initial group is working
service.send_to_group(group="InitGroup", message='Message_For_InitGroup', content_type='text/plain') # N messages
# test if parameter "filter" in send is working
service.send_to_all("Message_Not_For_User0", filter="userId ne 'User0'", content_type='text/plain') # N - 1 messages
# test if remove_connection_from_all_groups works
group_names = ["Group%d" % i for i in range(3)]
for group in group_names:
service.add_connection_to_group(group, clients.connection_ids[0])
service.send_to_group(group, "Message_For_RemoveFromAll", content_type='text/plain')
service.remove_connection_from_all_groups(clients.connection_ids[0])
for group in group_names:
service.send_to_group(group, "Message_For_RemoveFromAll", content_type='text/plain')
# other tests
service.send_to_user("User0", message='Message_For_User0', content_type='text/plain') # 1 messages
time.sleep(5)
LOG.info("Received Message: ", clients.recv_messages)
assert service.group_exists("InitGroup") == True
assert clients.recv_messages.count("Message_For_All") == clients.client_number
assert clients.recv_messages.count("Message_For_InitGroup") == clients.client_number
assert clients.recv_messages.count("Message_Not_For_User0") == clients.client_number - 1
assert clients.recv_messages.count("Message_For_User0") == 1
assert clients.recv_messages.count("Message_For_RemoveFromAll") == 3
LOG.info("Complete All Integration Test Successfully")
if __name__ == "__main__":
try:
connection_string = os.environ['WEBPUBSUB_CONNECTION_STRING']
except KeyError:
LOG.error("Missing environment variable 'WEBPUBSUB_CONNECTION_STRING' - please set if before running the example")
exit()
test_overall_integration(connection_string) | [
"noreply@github.com"
] | Azure.noreply@github.com |
45f7f8b8d105147e9c5231b74163145d87771c88 | c8785278ccc04221bc4f618acfb6a639b452d13f | /python/xor_linear_space.py | 17ae40b0518c304dfaf7bc883d3f1bea5ce94837 | [
"MIT"
] | permissive | fishilico/shared | 14edd274df12057ae1dc294fe10715ddc969e796 | a846ce38894f749082405d9a86fcf13d0bf5b992 | refs/heads/master | 2023-08-31T15:58:35.327197 | 2023-08-27T05:12:43 | 2023-08-27T05:12:43 | 14,258,504 | 35 | 15 | null | null | null | null | UTF-8 | Python | false | false | 8,775 | py | #!/usr/bin/env python
# -*- coding:UTF-8 -*-
# Copyright (c) 2018 Nicolas Iooss
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Work on vectors from GF(2) = Z/2Z, to solve some linear equations
These functions are useful when working on some cryptography CTF contests,
where the encryption function is either linear (f(x^y) = f(x)^f(y)) or affine
(f(x^y)^f(0) = f(x)^f(y)).
For example RTFM Sigsegv1 Finals included such a challenge:
https://github.com/Read-The-Fancy-Manual/Finale-2018/tree/master/License_5
"""
import binascii
import functools
import itertools
import hashlib
try:
import sage.all
HAVE_SAGE = True
except ImportError:
HAVE_SAGE = False
else:
# GF(2) = Z/2Z = {0, 1}
GF2 = sage.all.Zmod(2)
def decode_bigint_be(data):
"""Decode a Big-Endian big integer"""
return int(binascii.hexlify(data).decode('ascii'), 16)
def encode_bigint_be(value, bytelen=None):
"""Encode a Big-Endian big integer"""
if bytelen is None:
bytelen = (value.bit_length() + 7) // 8
hexval = '{{:0{:d}x}}'.format(bytelen * 2).format(value)
return binascii.unhexlify(hexval.encode('ascii'))
def decode_bigint_bitvec(bitvec):
"""Decode a Bit-Endian integer from a vector of bits"""
return int(''.join(str(bit) for bit in bitvec), 2)
def encode_bigint_bitvec(value, bitlen=None):
"""Encode a Bit-Endian integer into a vector of bits"""
if bitlen is None:
bitlen = value.bit_length()
binval = '{{:0{:d}b}}'.format(bitlen).format(value)
return [int(x) for x in binval]
assert decode_bigint_bitvec([0, 1, 0, 0]) == 4
assert encode_bigint_bitvec(42, 8) == [0, 0, 1, 0, 1, 0, 1, 0]
def triangulate_vectors(basis):
"""Compute a new basis for the linear span of the given basis using Gauss' Pivot
Returns the new basis and the set of indices from the original one
"""
new_basis = [vector for vector in basis]
new_basis_idx = [set([i]) for i in range(len(basis))]
for idx_progress in range(len(basis) - 1):
mask = (1 << (idx_progress + 1)) - 1
# Find a candidate
idx_candidate = idx_progress
while idx_candidate < len(basis) and (new_basis[idx_candidate] & mask) == 0:
idx_candidate += 1
if idx_candidate >= len(basis):
# Every vector is zero for this bit
continue
# Switch items
new_basis[idx_progress], new_basis[idx_candidate] = \
new_basis[idx_candidate], new_basis[idx_progress]
new_basis_idx[idx_progress], new_basis_idx[idx_candidate] = \
new_basis_idx[idx_candidate], new_basis_idx[idx_progress]
# Nullify a coordinate from the remaining vectors
for idx_remove in range(idx_progress + 1, len(basis)):
if new_basis[idx_remove] & mask:
new_basis[idx_remove] ^= new_basis[idx_progress]
new_basis_idx[idx_remove] = \
new_basis_idx[idx_remove].symmetric_difference(new_basis_idx[idx_progress])
return new_basis, new_basis_idx
def diagonalize_vectors(basis):
"""Diagnalize a basis and return a list of set of indexes to get each bit"""
new_basis, new_basis_idx = triangulate_vectors(basis)
for idx_progress in range(len(basis)):
for bit in range(idx_progress + 1, len(basis)):
if (new_basis[idx_progress] >> bit) & 1:
new_basis[idx_progress] ^= new_basis[bit]
new_basis_idx[idx_progress] = new_basis_idx[idx_progress].symmetric_difference(new_basis_idx[bit])
assert all(new_basis[i] == 1 << i for i in range(len(basis)))
return new_basis_idx
def is_collinear(vector, basis):
"""Does the vector belongs to the linear span of basis in a GF(2)-space?
Returns None if it does not, or a set of indices in the basis if it does
"""
# Triangulate the basis
new_basis, new_basis_idx = triangulate_vectors(basis)
vector_idx = set()
for i in range(len(basis)):
if vector & new_basis[i] & (1 << i):
vector ^= new_basis[i]
vector_idx = vector_idx.symmetric_difference(new_basis_idx[i])
return None if vector else vector_idx
def is_collinear_with_sage(vector, basis):
"""Implement is_collinear with sage"""
bitlen = max(v.bit_length() for v in itertools.chain(basis, [vector]))
sage_basis = [encode_bigint_bitvec(v, bitlen) for v in basis]
sage_basis.append(encode_bigint_bitvec(vector, bitlen))
mat_rank = sage.all.matrix(GF2, sage_basis).rank()
return mat_rank != len(basis) + 1
def check_sha_base(bitsize):
"""Use a base made of SHA-2 hashes"""
hash_functions = {
256: hashlib.sha256,
384: hashlib.sha384,
512: hashlib.sha512,
}
hash_function = hash_functions[bitsize]
print("Checking basis with SHA-{}".format(bitsize))
basis_counters = []
basis = []
triangular_basis = []
counter = 0
while len(basis) != bitsize:
test_vector_bytes = hash_function(str(counter).encode('ascii')).digest()
test_vector_int = decode_bigint_be(test_vector_bytes)
if is_collinear(test_vector_int, triangular_basis):
counter += 1
continue
basis_counters.append(counter)
basis.append(test_vector_int)
triangular_basis.append(test_vector_int)
triangular_basis = triangulate_vectors(triangular_basis)[0]
counter += 1
print("... Found basis after {} tests".format(counter))
diag_basis_idx = diagonalize_vectors(basis)
# Check that the algorithm worked
for bitpos in range(bitsize):
basis_vectors = [basis[idx] for idx in diag_basis_idx[bitpos]]
value = functools.reduce(lambda x, y: x ^ y, basis_vectors)
assert value == (1 << bitpos)
# Use Sage if available
if HAVE_SAGE:
print("... Inverting the matrix with Sage")
encoded_basis = [encode_bigint_bitvec(v, bitsize) for v in basis]
sage_basis = sage.all.matrix(GF2, encoded_basis).transpose()
assert sage_basis.rank() == bitsize
# sage_basis * [1 0 0 0 0].transpose() = basis[0]
# sage_basis * [0 1 0 0 0].transpose() = basis[1]
# etc.
# Now invert the basis
sage_inv = sage_basis.inverse()
test_vect = sage_inv * sage.all.matrix(GF2, encoded_basis[0]).transpose()
assert test_vect[0][0] == 1
assert all(test_vect[i][0] == 0 for i in range(1, bitsize))
# Convert diag_basis_idx to a matrix
mat_for_diag_basis_idx_values = [[0] * bitsize for _ in range(bitsize)]
for bitpos in range(bitsize):
for idx in diag_basis_idx[bitpos]:
mat_for_diag_basis_idx_values[idx][bitsize - bitpos - 1] = 1
mat_for_diag_basis_idx = sage.all.matrix(GF2, mat_for_diag_basis_idx_values)
assert mat_for_diag_basis_idx == sage_inv
# Compute the coordinates for a given test vector
test_message = b'Hello, world!'
test_vector = decode_bigint_be(test_message)
test_indexes = set()
for bitpos in range(bitsize):
if (test_vector >> bitpos) & 1:
test_indexes = test_indexes.symmetric_difference(diag_basis_idx[bitpos])
linear_result = functools.reduce(lambda x, y: x ^ y, [basis[idx] for idx in test_indexes])
linear_result_bytes = encode_bigint_be(linear_result)
print("Obtained {} by combining {} SHA-{} digests".format(
repr(linear_result_bytes), len(test_indexes), bitsize))
assert linear_result_bytes == test_message
if __name__ == '__main__':
if not HAVE_SAGE:
print("Module sage was not found. Package sagemath may not be installed or compatible with this Python")
check_sha_base(256)
# check_sha_base(384)
# check_sha_base(512)
| [
"nicolas.iooss_git@polytechnique.org"
] | nicolas.iooss_git@polytechnique.org |
9a90883d80499bf0a1a21fa094cbd7dad34e97ca | 699606935f69a153d952b49f6f18df6bd51df1e0 | /tensorflow/python/data/experimental/kernel_tests/stats_dataset_ops_test.py | 4d794b4b8458d830361a4445f71da8f27360c2ae | [
"Apache-2.0"
] | permissive | NMarlon/tensorflow | 2c7d0cf81863508a23d97a9d21c0f06b28e9b075 | b21a78132f73f68bf636ba8e491d7f52a55523b3 | refs/heads/master | 2020-04-04T05:10:48.282828 | 2018-11-01T14:35:50 | 2018-11-01T14:39:54 | 155,737,478 | 1 | 0 | Apache-2.0 | 2018-11-01T15:38:34 | 2018-11-01T15:38:34 | null | UTF-8 | Python | false | false | 16,591 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline statistics gathering ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.experimental.kernel_tests import reader_dataset_ops_test_base
from tensorflow.python.data.experimental.kernel_tests import stats_dataset_test_base
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.experimental.ops import stats_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class StatsDatasetTest(stats_dataset_test_base.StatsDatasetTestBase):
def testBytesProduced(self):
stats_aggregator = stats_ops.StatsAggregator()
dataset = dataset_ops.Dataset.range(100).map(
lambda x: array_ops.tile([x], ops.convert_to_tensor([x]))).apply(
stats_ops.bytes_produced_stats("bytes_produced")).apply(
stats_ops.set_stats_aggregator(stats_aggregator))
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
summary_t = stats_aggregator.get_summary()
with self.cached_session() as sess:
sess.run(iterator.initializer)
expected_sum = 0.0
for i in range(100):
self.assertAllEqual(
np.array([i] * i, dtype=np.int64), sess.run(next_element))
summary_str = sess.run(summary_t)
self._assertSummaryHasCount(summary_str, "bytes_produced", float(i + 1))
expected_sum += i * 8.0
self._assertSummaryHasSum(summary_str, "bytes_produced", expected_sum)
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
summary_str = sess.run(summary_t)
self._assertSummaryHasCount(summary_str, "bytes_produced", 100.0)
self._assertSummaryHasSum(summary_str, "bytes_produced", expected_sum)
def testLatencyStats(self):
stats_aggregator = stats_ops.StatsAggregator()
dataset = dataset_ops.Dataset.range(100).apply(
stats_ops.latency_stats("record_latency")).apply(
stats_ops.set_stats_aggregator(stats_aggregator))
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
summary_t = stats_aggregator.get_summary()
with self.cached_session() as sess:
sess.run(iterator.initializer)
for i in range(100):
self.assertEqual(i, sess.run(next_element))
self._assertSummaryHasCount(
sess.run(summary_t), "record_latency", float(i + 1))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
self._assertSummaryHasCount(sess.run(summary_t), "record_latency", 100.0)
def testPrefetchBufferUtilization(self):
stats_aggregator = stats_ops.StatsAggregator()
dataset = dataset_ops.Dataset.range(100).map(
lambda x: array_ops.tile([x], ops.convert_to_tensor([x]))).prefetch(
-1).apply(stats_ops.set_stats_aggregator(stats_aggregator))
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
summary_t = stats_aggregator.get_summary()
with self.cached_session() as sess:
sess.run(iterator.initializer)
for i in range(100):
self.assertAllEqual(
np.array([i] * i, dtype=np.int64), sess.run(next_element))
summary_str = sess.run(summary_t)
self._assertSummaryHasCount(summary_str, "Prefetch::buffer_utilization",
float(i + 1))
self._assertSummaryContains(summary_str, "Prefetch::buffer_capacity")
self._assertSummaryContains(summary_str, "Prefetch::buffer_size")
self._assertSummaryHasRange(summary_str, "Prefetch::buffer_utilization",
0, 1)
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
summary_str = sess.run(summary_t)
self._assertSummaryHasCount(summary_str, "Prefetch::buffer_utilization",
100)
def testPrefetchBufferScalars(self):
stats_aggregator = stats_ops.StatsAggregator()
dataset = dataset_ops.Dataset.range(10).map(
lambda x: array_ops.tile([x], ops.convert_to_tensor([x]))).prefetch(
0).apply(stats_ops.set_stats_aggregator(stats_aggregator))
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
summary_t = stats_aggregator.get_summary()
with self.cached_session() as sess:
sess.run(iterator.initializer)
for i in range(10):
self.assertAllEqual(
np.array([i] * i, dtype=np.int64), sess.run(next_element))
summary_str = sess.run(summary_t)
self._assertSummaryHasScalarValue(summary_str,
"Prefetch::buffer_capacity", 0)
self._assertSummaryHasScalarValue(summary_str, "Prefetch::buffer_size",
0)
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testFilteredElementsStats(self):
stats_aggregator = stats_ops.StatsAggregator()
dataset = dataset_ops.Dataset.range(101).filter(
lambda x: math_ops.equal(math_ops.mod(x, 3), 0)).apply(
stats_ops.set_stats_aggregator(stats_aggregator))
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
summary_t = stats_aggregator.get_summary()
with self.test_session() as sess:
sess.run(iterator.initializer)
for i in range(34):
self.assertEqual(i * 3, sess.run(next_element))
if i is not 0:
self._assertSummaryHasScalarValue(
sess.run(summary_t), "Filter::dropped_elements", float(i * 2))
self._assertSummaryHasScalarValue(
sess.run(summary_t), "Filter::filtered_elements", float(i + 1))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
self._assertSummaryHasScalarValue(
sess.run(summary_t), "Filter::dropped_elements", 67.0)
self._assertSummaryHasScalarValue(
sess.run(summary_t), "Filter::filtered_elements", 34.0)
def testMapBufferUtilization(self):
def dataset_fn():
return dataset_ops.Dataset.range(10).map(
lambda x: array_ops.tile([x], ops.convert_to_tensor([x])),
num_parallel_calls=4)
self._testParallelCallsStats(
dataset_fn, "ParallelMap", 10, function_processing_time=True)
def testMapAutoTuneBufferUtilization(self):
def dataset_fn():
dataset = dataset_ops.Dataset.range(10).map(
lambda x: array_ops.tile([x], ops.convert_to_tensor([x])),
num_parallel_calls=optimization.AUTOTUNE)
options = dataset_ops.Options()
options.experimental_autotune = True
return dataset.with_options(options)
self._testParallelCallsStats(
dataset_fn, "ParallelMap", 10, function_processing_time=True)
def testInterleaveAutoTuneBufferUtilization(self):
def dataset_fn():
dataset = dataset_ops.Dataset.range(10).map(
lambda x: array_ops.tile([x], ops.convert_to_tensor([x])))
dataset = dataset_ops.Dataset.range(1).interleave(
lambda _: dataset,
cycle_length=1,
num_parallel_calls=optimization.AUTOTUNE)
options = dataset_ops.Options()
options.experimental_autotune = True
return dataset.with_options(options)
self._testParallelCallsStats(dataset_fn, "ParallelInterleaveV2", 10)
def testMapAndBatchAutoTuneBufferUtilization(self):
def dataset_fn():
dataset = dataset_ops.Dataset.range(100).apply(
batching.map_and_batch(
lambda x: array_ops.tile([x], ops.convert_to_tensor([2])),
num_parallel_calls=optimization.AUTOTUNE,
batch_size=16))
options = dataset_ops.Options()
options.experimental_autotune = True
return dataset.with_options(options)
num_output = 100 // 16 + 1
self._testParallelCallsStats(
dataset_fn,
"MapAndBatch",
num_output,
check_elements=False,
function_processing_time=True)
def testReinitialize(self):
stats_aggregator = stats_ops.StatsAggregator()
dataset = dataset_ops.Dataset.range(100).apply(
stats_ops.latency_stats("record_latency")).apply(
stats_ops.set_stats_aggregator(stats_aggregator))
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
summary_t = stats_aggregator.get_summary()
with self.cached_session() as sess:
for j in range(5):
sess.run(iterator.initializer)
for i in range(100):
self.assertEqual(i, sess.run(next_element))
self._assertSummaryHasCount(
sess.run(summary_t), "record_latency", float((j * 100) + i + 1))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
self._assertSummaryHasCount(
sess.run(summary_t), "record_latency", (j + 1) * 100.0)
def testNoAggregatorRegistered(self):
dataset = dataset_ops.Dataset.range(100).apply(
stats_ops.latency_stats("record_latency"))
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
with self.cached_session() as sess:
sess.run(iterator.initializer)
for i in range(100):
self.assertEqual(i, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testMultipleTags(self):
stats_aggregator = stats_ops.StatsAggregator()
dataset = dataset_ops.Dataset.range(100).apply(
stats_ops.latency_stats("record_latency")).apply(
stats_ops.latency_stats("record_latency_2")).apply(
stats_ops.set_stats_aggregator(stats_aggregator))
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
summary_t = stats_aggregator.get_summary()
with self.cached_session() as sess:
sess.run(iterator.initializer)
for i in range(100):
self.assertEqual(i, sess.run(next_element))
self._assertSummaryHasCount(
sess.run(summary_t), "record_latency", float(i + 1))
self._assertSummaryHasCount(
sess.run(summary_t), "record_latency_2", float(i + 1))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
self._assertSummaryHasCount(sess.run(summary_t), "record_latency", 100.0)
self._assertSummaryHasCount(
sess.run(summary_t), "record_latency_2", 100.0)
def testRepeatedTags(self):
stats_aggregator = stats_ops.StatsAggregator()
dataset = dataset_ops.Dataset.range(100).apply(
stats_ops.latency_stats("record_latency")).apply(
stats_ops.latency_stats("record_latency")).apply(
stats_ops.set_stats_aggregator(stats_aggregator))
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
summary_t = stats_aggregator.get_summary()
with self.cached_session() as sess:
sess.run(iterator.initializer)
for i in range(100):
self.assertEqual(i, sess.run(next_element))
self._assertSummaryHasCount(
sess.run(summary_t), "record_latency", float(2 * (i + 1)))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
self._assertSummaryHasCount(sess.run(summary_t), "record_latency", 200.0)
def testMultipleIteratorsSameAggregator(self):
stats_aggregator = stats_ops.StatsAggregator()
dataset = dataset_ops.Dataset.range(100).apply(
stats_ops.latency_stats("record_latency")).apply(
stats_ops.set_stats_aggregator(stats_aggregator))
iterator_0 = dataset.make_initializable_iterator()
iterator_1 = dataset.make_initializable_iterator()
next_element = iterator_0.get_next() + iterator_1.get_next()
summary_t = stats_aggregator.get_summary()
with self.cached_session() as sess:
sess.run([iterator_0.initializer, iterator_1.initializer])
for i in range(100):
self.assertEqual(i * 2, sess.run(next_element))
self._assertSummaryHasCount(
sess.run(summary_t), "record_latency", float(2 * (i + 1)))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
self._assertSummaryHasCount(sess.run(summary_t), "record_latency", 200.0)
def testMultipleDatasetWithTags(self):
stats_aggregator = stats_ops.StatsAggregator()
dataset = dataset_ops.Dataset.range(100).apply(
stats_ops.latency_stats("record_latency")).apply(
stats_ops.set_stats_aggregator(stats_aggregator, "dataset1"))
dataset2 = dataset_ops.Dataset.range(100).apply(
stats_ops.latency_stats("record_latency")).apply(
stats_ops.set_stats_aggregator(stats_aggregator, "dataset2"))
iterator_0 = dataset.make_initializable_iterator()
iterator_1 = dataset2.make_initializable_iterator()
next_element = iterator_0.get_next() + iterator_1.get_next()
summary_t = stats_aggregator.get_summary()
with self.test_session() as sess:
sess.run([iterator_0.initializer, iterator_1.initializer])
for i in range(100):
self.assertEqual(i * 2, sess.run(next_element))
self._assertSummaryHasCount(
sess.run(summary_t), "dataset1_record_latency", float(i + 1))
self._assertSummaryHasCount(
sess.run(summary_t), "dataset2_record_latency", float(i + 1))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
self._assertSummaryHasCount(
sess.run(summary_t), "dataset1_record_latency", 100.0)
self._assertSummaryHasCount(
sess.run(summary_t), "dataset2_record_latency", 100.0)
class FeatureStatsDatasetTest(
stats_dataset_test_base.StatsDatasetTestBase,
reader_dataset_ops_test_base.MakeBatchedFeaturesDatasetTestBase):
def testFeaturesStats(self):
num_epochs = 5
total_records = num_epochs * self._num_records
batch_size = 2
stats_aggregator = stats_ops.StatsAggregator()
def dataset_fn():
return self.make_batch_feature(
filenames=self.test_filenames[0],
num_epochs=num_epochs,
batch_size=batch_size,
shuffle=True,
shuffle_seed=5,
drop_final_batch=False)
num_output = total_records // batch_size
if total_records % batch_size:
num_output = total_records // batch_size + 1
self._testParallelCallsStats(
dataset_fn, "ParseExample", num_output, check_elements=False)
iterator = dataset_fn().apply(
stats_ops.set_stats_aggregator(
stats_aggregator, "record_stats")).make_initializable_iterator()
next_element = iterator.get_next()
summary_t = stats_aggregator.get_summary()
with self.test_session() as sess:
sess.run(iterator.initializer)
for _ in range(num_output):
sess.run(next_element)
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
self._assertSummaryHasCount(
sess.run(summary_t), "record_stats_features", total_records)
self._assertSummaryHasCount(
sess.run(summary_t), "record_stats_feature-values", total_records)
self._assertSummaryHasSum(
sess.run(summary_t), "record_stats_features", total_records * 4)
self._assertSummaryHasSum(
sess.run(summary_t), "record_stats_feature-values",
self._sum_keywords(1) * num_epochs + 3 * total_records)
if __name__ == "__main__":
test.main()
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
b833d9eb4d791fe08f8cb7fbedc1ee7f77c983ce | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /Rn3g3hokznLu8ZtDP_4.py | 9a6e5bf2404857405d8c95ab030f03a3b3675f35 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 843 | py | """
Write a function which increments a string to create a new string.
* **If the string ends with a number** , the number should be incremented by `1`.
* **If the string doesn't end with a number** , `1` should be **added** to the new string.
* **If the number has leading zeros** , the amount of digits **should be considered**.
### Examples
increment_string("foo") ➞ "foo1"
increment_string("foobar0009") ➞ "foobar0010"
increment_string("foo099") ➞ "foo100"
### Notes
N/A
"""
def increment_string(txt):
if not txt[-1].isdigit(): return txt + '1'
else:
num = [x for x in txt if x.isdigit()]
let = ''.join(x for x in txt if x.isalpha())
a=list(str(int(''.join(num))+1)[::-1])+['0']*(len(num)-len(str(int(''.join(num))+1)))
return let + ''.join(a[::-1])
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
710ddac897877db3f60b5650a5bbc440c9ed3017 | 4577d8169613b1620d70e3c2f50b6f36e6c46993 | /students/1803699/homework03/program03.py | fcf92797b2788e70eb83311b15b208da45c244b8 | [] | no_license | Fondamenti18/fondamenti-di-programmazione | cbaf31810a17b5bd2afaa430c4bf85d05b597bf0 | 031ec9761acb1a425fcc4a18b07884b45154516b | refs/heads/master | 2020-03-24T03:25:58.222060 | 2018-08-01T17:52:06 | 2018-08-01T17:52:06 | 142,419,241 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,064 | py | from immagini import *
def ricolora(fname, lista, fnameout):
img=load(fname)
coppie=[]
j=len(img[0])
k=len(img)
for quadrupla in lista:
areac=quadrupla[2]
bordoc=quadrupla[3]
baselist=set()
nextlist=set()
checkedlist=set()
arealist=set()
bordolist=set()
color=img[quadrupla[1]][quadrupla[0]]
areacounter=0
bordocounter=0
baselist.add((quadrupla[0],quadrupla[1]))
while len(baselist)>0:
for pixel in baselist:
if pixel not in checkedlist:
x=pixel[0]
y=pixel[1]
counter=0
if x!=j-1:
if img[y][x+1]==color:
nextlist.add((x+1,y))
counter+=1
if y!=k-1:
if img[y+1][x]==color:
nextlist.add((x,y+1))
counter+=1
if x!=0:
if img[y][x-1]==color:
nextlist.add((x-1,y))
counter+=1
if y!=0:
if img[y-1][x]==color:
nextlist.add((x,y-1))
counter+=1
if counter==4:
arealist.add(pixel)
areacounter+=1
else:
bordolist.add(pixel)
bordocounter+=1
checkedlist.add(pixel)
baselist=set()
for i in nextlist:
baselist.add(i)
nextlist=set()
for pixel in arealist:
img[pixel[1]][pixel[0]]=areac
for pixel in bordolist:
img[pixel[1]][pixel[0]]=bordoc
coppie.append((areacounter,bordocounter))
save(img,fnameout)
return coppie
| [
"a.sterbini@gmail.com"
] | a.sterbini@gmail.com |
5f0cfa1e7c2414a3499568a58470b9e2dcb72c07 | e9233367116ace8aac3218abcb8480ac236ac42f | /build/motor_arduino/cmake/motor_arduino-genmsg-context.py | 407947db99afa267b28a1e1dd203ffb9ad0fd72f | [] | no_license | zhangzhongwd/optical_waveguide_calibration | 2c695592d792b26aa8a3218faa86adbff478cd3b | 431d7e0e74885030648c17030f197efa3909a48c | refs/heads/master | 2020-11-29T08:19:12.000041 | 2019-12-25T08:14:20 | 2019-12-25T08:14:20 | 230,065,827 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | # generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/zhong/Sensor/src/motor_arduino/msg/Stepper.msg;/home/zhong/Sensor/src/motor_arduino/msg/Limit.msg"
services_str = ""
pkg_name = "motor_arduino"
dependencies_str = "std_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "motor_arduino;/home/zhong/Sensor/src/motor_arduino/msg;std_msgs;/opt/ros/kinetic/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/kinetic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| [
"zhangzhongwd@gmail.com"
] | zhangzhongwd@gmail.com |
22ff80f135fb0d20d7d0387a072fb397f970347f | 165538de7879dded2b9bd0694f3134b36e923b84 | /Python3/1624-Largest-Substring-Between-Two-Equal-Characters/soln.py | ce3fd3b1970776588f3cc8f42a2831594c716f51 | [
"MIT"
] | permissive | zhangyaqi1989/LeetCode-Solutions | 6f710153ec828e6d9e58a30ae8009f754cae4be6 | 2655a1ffc8678ad1de6c24295071308a18c5dc6e | refs/heads/master | 2023-01-23T16:23:55.483396 | 2020-12-06T17:35:09 | 2020-12-06T17:35:09 | 270,014,622 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | class Solution:
def maxLengthBetweenEqualCharacters(self, s: str) -> int:
last_idxes = [-1] * 26
ans = -1
for i, ch in enumerate(s):
val = ord(ch) - ord('a')
if last_idxes[val] != -1:
temp = i - last_idxes[val] - 1
ans = max(ans, temp)
else:
last_idxes[val] = i
return ans
| [
"zhang623@wisc.edu"
] | zhang623@wisc.edu |
ea169c925512954473486672a50a844bef88ff5a | 77d808f47101202db6cec5a9eee6b38c55f73fde | /17. EXERCISE - Objects and Classes/09.py | f69eb464cd55efefcd817a0f4283f308600429d0 | [] | no_license | dimDamyanov/Py-Fundamentals | 2ce5591fbfebf8d95c832e3f7109b24e53dd721b | 5ccae5bfa456829d97e8773ee9f5eaa5f5051765 | refs/heads/main | 2023-01-29T22:21:07.788061 | 2020-12-13T08:11:04 | 2020-12-13T08:11:04 | 317,682,227 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 607 | py | class Movie:
__watched_movies = 0
def __init__(self, name, director):
self.name = name
self.director = director
self.watched = False
def change_name(self, new_name):
self.name = new_name
def change_director(self, new_director):
self.director = new_director
def watch(self):
if not self.watched:
self.watched = True
Movie.__watched_movies += 1
def __repr__(self):
return f'Movie name: {self.name};' \
f' Movie director: {self.director}. Total watched movies: {Movie.__watched_movies}' | [
"dim.damianov@gmail.com"
] | dim.damianov@gmail.com |
3f0202e1af943b8bbda704c6e356788f663611fd | 1d892928c70ee9ddf66f2a37a8e083d2632c6e38 | /nova/ipv6/api.py | d74e6cd370bb1cfe071d4498e8ccdf7d772c799f | [
"Apache-2.0"
] | permissive | usc-isi/essex-baremetal-support | 74196c3f1332ee3cdeba9c263faff0ac0567d3cf | a77daf8ef56cf41e38de36621eda25ed3f180156 | refs/heads/master | 2021-05-19T03:12:11.929550 | 2020-07-24T14:15:26 | 2020-07-24T14:15:26 | 4,702,421 | 0 | 1 | Apache-2.0 | 2020-07-24T14:15:27 | 2012-06-18T15:19:41 | null | UTF-8 | Python | false | false | 1,342 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova.openstack.common import cfg
from nova import utils
ipv6_backend_opt = cfg.StrOpt('ipv6_backend',
default='rfc2462',
help='Backend to use for IPv6 generation')
FLAGS = flags.FLAGS
FLAGS.register_opt(ipv6_backend_opt)
def reset_backend():
global IMPL
IMPL = utils.LazyPluggable('ipv6_backend',
rfc2462='nova.ipv6.rfc2462',
account_identifier='nova.ipv6.account_identifier')
def to_global(prefix, mac, project_id):
return IMPL.to_global(prefix, mac, project_id)
def to_mac(ipv6_address):
return IMPL.to_mac(ipv6_address)
reset_backend()
| [
"dkang@isi.edu"
] | dkang@isi.edu |
f2073f68965e7070362240f4fd2d08cb653f4697 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/398/usersdata/281/81356/submittedfiles/av1_programa2.py | f84881738dd63b159a8897f7eaba31b5aa5a48b2 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | # -*- coding: utf-8 -*-
import math
#COMECE SEU CÓDIGO ABAIXO DESTA LINHA
a=int(input(':'))
b=int(input(':'))
c=int(input(':'))
d=int(input(':'))
e=int(input(':'))
f=int(input(':'))
if a>0 and a<=13 and b>0 and b<=13 and c>0 and c<=13 and d>0 and d<=13:
if a<b and b<c and c<d and d<e and e<f:
print('C')
elif a>b or b>c or c>d or d>e or e>f:
print('N')
else:
print('fim')
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
65d7292164c9a3e0351ff48dcb3a057b8dd2290d | 792ae5d2a5c17af4f2ccfa582e3aeec569a6809a | /42. Trapping Rain Water.py | f35ce886c90306b5cd3f91bc4258b415545fef8e | [] | no_license | ADebut/Leetcode | 396b8b95ad5b5e623db2839bbfdec861c4c1731f | 7333d481e00e8c1bc5b827d1d4ccd6e4d291abd7 | refs/heads/master | 2020-07-05T18:48:27.504540 | 2019-10-28T10:51:43 | 2019-10-28T10:51:43 | 202,735,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 609 | py | class Solution:
def trap(self, height: List[int]) -> int:
volume = 0
max_left = [0 for i in range(len(height))]
max_right = [0 for i in range(len(height))]
for i in range(1, len(height) - 1):
max_left[i] = max(max_left[i - 1], height[i - 1])
for i in range(len(height) - 2, 0, -1):
max_right[i] = max(max_right[i + 1], height[i + 1])
for i in range(1, len(height) -1):
mini = min(max_left[i], max_right[i])
if mini > height[i]:
volume += mini - height[i]
return volume | [
"chen758@usc.edu"
] | chen758@usc.edu |
dab457aaf7c8b1c95b2a79290fb335200d7007d5 | 36a094a44450d1353e9dfc8242a54e2bb70bb9b5 | /src/ebonite/runtime/client/__init__.py | c38cc6baa94f20f83240507ae40107cb1bfec558 | [
"Apache-2.0"
] | permissive | zyfra/ebonite | 52843ce847a3fd28e4ba8ab64d986dcfb23671c0 | b01b662c43709d152940f488574d78ff25f89ecf | refs/heads/master | 2022-11-29T21:20:02.358797 | 2020-10-19T12:22:49 | 2020-10-19T12:22:49 | 221,721,146 | 275 | 18 | Apache-2.0 | 2022-11-21T22:44:02 | 2019-11-14T14:49:47 | Python | UTF-8 | Python | false | false | 55 | py | from .base import BaseClient
__all__ = ['BaseClient']
| [
"mike0sv@gmail.com"
] | mike0sv@gmail.com |
13f4510cf7c53943658d0e717453ef04820570b8 | 9ae6ce54bf9a2a86201961fdbd5e7b0ec913ff56 | /google/ads/googleads/v11/resources/types/customer_user_access.py | 4ef6dbad73be7b9adc4eff12bb1be5952d52fbc8 | [
"Apache-2.0"
] | permissive | GerhardusM/google-ads-python | 73b275a06e5401e6b951a6cd99af98c247e34aa3 | 676ac5fcb5bec0d9b5897f4c950049dac5647555 | refs/heads/master | 2022-07-06T19:05:50.932553 | 2022-06-17T20:41:17 | 2022-06-17T20:41:17 | 207,535,443 | 0 | 0 | Apache-2.0 | 2019-09-10T10:58:55 | 2019-09-10T10:58:55 | null | UTF-8 | Python | false | false | 2,730 | py | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v11.enums.types import access_role as gage_access_role
__protobuf__ = proto.module(
package="google.ads.googleads.v11.resources",
marshal="google.ads.googleads.v11",
manifest={"CustomerUserAccess",},
)
class CustomerUserAccess(proto.Message):
r"""Represents the permission of a single user onto a single
customer.
Attributes:
resource_name (str):
Immutable. Name of the resource. Resource names have the
form:
``customers/{customer_id}/customerUserAccesses/{user_id}``
user_id (int):
Output only. User id of the user with the
customer access. Read only field
email_address (str):
Output only. Email address of the user.
Read only field
This field is a member of `oneof`_ ``_email_address``.
access_role (google.ads.googleads.v11.enums.types.AccessRoleEnum.AccessRole):
Access role of the user.
access_creation_date_time (str):
Output only. The customer user access
creation time. Read only field
The format is "YYYY-MM-DD HH:MM:SS".
Examples: "2018-03-05 09:15:00" or "2018-02-01
14:34:30".
This field is a member of `oneof`_ ``_access_creation_date_time``.
inviter_user_email_address (str):
Output only. The email address of the inviter
user. Read only field
This field is a member of `oneof`_ ``_inviter_user_email_address``.
"""
resource_name = proto.Field(proto.STRING, number=1,)
user_id = proto.Field(proto.INT64, number=2,)
email_address = proto.Field(proto.STRING, number=3, optional=True,)
access_role = proto.Field(
proto.ENUM, number=4, enum=gage_access_role.AccessRoleEnum.AccessRole,
)
access_creation_date_time = proto.Field(
proto.STRING, number=6, optional=True,
)
inviter_user_email_address = proto.Field(
proto.STRING, number=7, optional=True,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"noreply@github.com"
] | GerhardusM.noreply@github.com |
0f345d89caa6e429008ad6a6714ea68a992ab415 | 3481e590d546c9072a63ece64dd52c3eb063ca38 | /rl_exp/multiplayer_env.py | 8af2fb9065d577d368ea954e7f6b9ca04dee555d | [] | no_license | SSS135/rl_exp | 7a1c5bd5a58ab558a9e32ea90a13c26704d06209 | f7b1735449dd1a3210cf8898344e856a712d6205 | refs/heads/master | 2020-05-29T15:22:10.881771 | 2020-04-30T12:26:31 | 2020-04-30T12:26:31 | 189,218,216 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | import gym
class MultiplayerEnv(gym.Env):
def __init__(self, num_players):
self.num_players = num_players | [
"sss13594@gmail.com"
] | sss13594@gmail.com |
dab29b7fb99b4053d194a299747a2da05ff8aaf5 | ea260e2d501eda7f04705dbe22a9263e1ffb99c9 | /lambda_function.py | ec573d04b0d8c88a2a2c8f3aa63ba9b9ca9897f0 | [] | no_license | PeterMitrano/my_desk | d4cd5edf3e054e9d51da4b0d6b6c16e0f088caa9 | dfd9816acb7fdb1dd9637b730fa738e2ffac1e8c | refs/heads/master | 2020-07-23T21:08:15.349369 | 2017-08-25T21:20:30 | 2017-08-25T21:20:30 | 66,438,995 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,832 | py | from collections import namedtuple
import logging
import requests
LOGGER = 'my_desk'
APP_ID = 'amzn1.ask.skill.999f5e91-7264-4660-b397-5efc340a51f9'
def send_command(command_endpoint):
result = namedtuple('Result', ['err', 'error_msg', 'err_speech'])
if not command_endpoint:
return result(True, 'command endpoint is ' + str(command_endpoint),
"I could not understand your command.")
try:
get_result = requests.get(
'http://66.189.43.74:3776/' + command_endpoint, timeout=3)
if get_result.ok:
return result(False, None, None)
else:
return result(True, "failed to get result",
"Your desk sent an invalid response.")
except requests.exceptions.Timeout:
return result(True, "get request timed out",
"Your desk did not respond to my command.")
def handle_event(event, context):
logging.getLogger(LOGGER).warn(event)
if event['session']['application']['applicationId'] != APP_ID:
raise RuntimeError('Wrong applicationId')
request = event['request']
command_endpoint = None
if 'intent' in request:
intent = request['intent']['name']
if intent == 'GoToPositionIntent':
position = request['intent']['slots']['Position']['value']
if position == 'sit' or position == 'sitting':
logging.getLogger(LOGGER).warn('sit')
command_endpoint = 'position/sit'
elif position == 'stand' or position == 'standing':
logging.getLogger(LOGGER).warn('stand')
command_endpoint = 'position/stand'
elif intent == 'GoToHeightIntent':
height = request['intent']['slots']['Height']['value']
logging.getLogger(LOGGER).warn(height)
command_endpoint = 'height/' + height
elif intent == 'GoUpIntent':
logging.getLogger(LOGGER).warn('up')
command_endpoint = 'up'
elif intent == 'GoDownIntent':
logging.getLogger(LOGGER).warn('down')
command_endpoint = 'down'
elif intent == 'AMAZON.StopIntent':
logging.getLogger(LOGGER).warn('stop')
command_endpoint = 'stop'
result = send_command(command_endpoint)
if result.err:
logging.getLogger(LOGGER).warn("error sending command: %s",
result.error_msg)
response = {
"version": 1.0,
"response": {
"outputSpeech": {
"type": "PlainText",
"text": result.err_speech
}
}
}
else:
response = {"version": 1.0, "response": {}}
logging.getLogger(LOGGER).warn(response)
return response
| [
"mitranopeter@gmail.com"
] | mitranopeter@gmail.com |
b7e46c5a94d311113b0ba3112f69ff066a02906b | b76615ff745c6d66803506251c3d4109faf50802 | /pyobjc-framework-AVFoundation/PyObjCTest/test_avoutputsettingsassistant.py | 94f07c16b0ffb29979c57ddd8ffc0e0cc3789951 | [
"MIT"
] | permissive | danchr/pyobjc-git | 6ef17e472f54251e283a0801ce29e9eff9c20ac0 | 62b787fddeb381184043c7ff136f1c480755ab69 | refs/heads/master | 2021-01-04T12:24:31.581750 | 2020-02-02T20:43:02 | 2020-02-02T20:43:02 | 240,537,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 929 | py | from PyObjCTools.TestSupport import *
import AVFoundation
class TestAVOutputSettingsAssistant(TestCase):
@min_os_level("10.9")
def testConstants10_9(self):
self.assertIsInstance(AVFoundation.AVOutputSettingsPreset640x480, unicode)
self.assertIsInstance(AVFoundation.AVOutputSettingsPreset960x540, unicode)
self.assertIsInstance(AVFoundation.AVOutputSettingsPreset1280x720, unicode)
self.assertIsInstance(AVFoundation.AVOutputSettingsPreset1920x1080, unicode)
@min_os_level("10.10")
def testConstants10_10(self):
self.assertIsInstance(AVFoundation.AVOutputSettingsPreset3840x2160, unicode)
@min_os_level("10.13")
def testConstants10_13(self):
self.assertIsInstance(AVFoundation.AVOutputSettingsPresetHEVC1920x1080, unicode)
self.assertIsInstance(AVFoundation.AVOutputSettingsPresetHEVC3840x2160, unicode)
if __name__ == "__main__":
main()
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
a845dbc5c0be469b58674b1c5b3717a7951103be | 76e9afdf16eabcc9e1a3facd308e56362112efc4 | /20210227_ls/net_cls/unet_ls_4.py | 38d9ec4ce8430c10d163b12a9afc54500c0d2034 | [] | no_license | rerejii/pwb_work_2021 | c65c5e787ad98b7d847cb63ebadc24a02f001e90 | 8ecfb2a98d9d396ed505ecc939e384cf6400412d | refs/heads/main | 2023-03-30T10:43:18.115386 | 2021-03-24T05:38:41 | 2021-03-24T05:38:41 | 350,954,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,345 | py | # -*- coding: utf-8 -*-
import glob
import tensorflow as tf
import time
import math
import ShareNetFunc as nfunc
import tensorflow_addons as tfa
import numpy as np
# - Discriminator_Loss
# - 弁別子損失関数は2つの入力を取ります。実画像、生成画像
# - real_lossは、実画像と1つの配列のシグモイドクロスエントロピー損失です(これらは実画像であるため)
# - generated_lossは、生成された画像とゼロの配列のシグモイドクロスエントロピー損失です(これらは偽の画像であるため)
# - 次に、total_lossはreal_lossとgenerated_lossの合計です
# - Generator_Loss
# - これは、生成された画像と1の配列のシグモイドクロスエントロピー損失です。
# - 紙はまた、生成された画像とターゲット画像とのMAEであるL1損失を(絶対平均誤差)を含みます。
# - これにより、生成された画像が構造的にターゲット画像に似たものになります。
# - 総発電機損失= gan_loss + LAMBDA * l1_lossを計算する式。ここでLAMBDA = 100です。この値は、論文の著者によって決定されました。
class NetManager:
def __init__(self,
loss_object=tf.keras.losses.BinaryCrossentropy(from_logits=True),
gen_optimizer=tf.keras.optimizers.Adam(2e-4, beta_1=0.5),
# dis_optimizer=tf.keras.optimizers.Adam(2e-4, beta_1=0.5),
output_channel=1,
lambda_val=100,):
self.output_channel = output_channel
self.gen = self._Generator()
self.dis = self._Discriminator()
self.loss_object = loss_object
self.gen_optimizer = gen_optimizer
# self.dis_optimizer = dis_optimizer
self.lambda_val = lambda_val
self.best_test_accuracy = tf.Variable(initial_value=0.0,
trainable=False,
dtype=tf.float32,
name='best_test_accuracy')
self.best_validation_accuracy = tf.Variable(initial_value=0.0,
trainable=False,
dtype=tf.float32,
name='best_validation_accuracy')
self.step = tf.Variable(initial_value=0, trainable=False, dtype=tf.int32, name='step')
self.epoch = tf.Variable(initial_value=0, trainable=False, dtype=tf.int32, name='epoch')
self.study_time = tf.Variable(initial_value=0.0, trainable=False, dtype=tf.float32, name='study_time')
self.total_time = tf.Variable(initial_value=0.0, trainable=False, dtype=tf.float32, name='total_time')
self.checkpoint = tf.train.Checkpoint(
generator_optimizer=self.gen_optimizer,
# discriminator_optimizer=self.dis_optimizer,
generator=self.gen,
discriminator=self.dis,
best_validation_accuracy=self.best_validation_accuracy,
best_test_accuracy=self.best_test_accuracy,
step=self.step,
epoch=self.epoch,
study_time=self.study_time,
total_time=self.total_time,
)
self.time_basis = 54000 # timeの基準
self.day_time = 86400 # 1日
self.padding = 0
# ************ クラス外呼び出し関数 ************
# ===== ckpt関係 =====
def ckpt_restore(self, path):
self.checkpoint.restore(path)
def get_str_study_time(self):
ms_time, s_time = math.modf(self.study_time.numpy() + self.time_basis) # ミニセカンド セカンド
day, times = divmod(s_time, self.day_time) # 日数と時間に
day = int(day)
step_times = time.strptime(time.ctime(times))
str_time = str(day) + ':' + time.strftime("%H:%M:%S", step_times) + str(ms_time)[1:]
return str_time
def get_str_total_time(self):
ms_time, s_time = math.modf(self.total_time.numpy() + self.time_basis) # ミニセカンド セカンド
day, times = divmod(s_time, self.day_time) # 日数と時間に
day = int(day)
step_times = time.strptime(time.ctime(times))
str_time = str(day) + ':' + time.strftime("%H:%M:%S", step_times) + str(ms_time)[1:]
return str_time
def get_epoch(self):
return self.epoch.numpy()
def get_step(self):
return self.step.numpy()
def add_study_time(self, proc_time):
self.study_time.assign(self.study_time + proc_time)
def add_total_time(self, proc_time):
self.total_time.assign(self.total_time + proc_time)
def update_check_best_validation_accuracy(self, accuracy):
if accuracy > self.best_validation_accuracy:
self.best_validation_accuracy = accuracy
return True
return False
def update_check_best_test_accuracy(self, accuracy):
if accuracy > self.best_test_accuracy:
self.best_test_accuracy = accuracy
return True
return False
def get_checkpoint(self):
return self.checkpoint
# ===== ネットワーク関係 =====
def get_padding(self):
return self.padding
def get_generator(self):
return self.gen
def get_discriminator(self):
return self.dis
def set_ckpt_val(self, step_val=None, epoch_val=None):
if step_val is not None:
self.step.assign(step_val)
if epoch_val is not None:
self.epoch.assign(epoch_val)
def get_generator_optimizer(self):
return self.gen_optimizer
# def get_discriminator_optimizer(self):
# return self.dis_optimizer
def net_weight_mask(self, weight, distance):
dis_w = tf.less_equal(distance, 30) # <=
bound_w = tf.equal(weight, 0)
out = tf.equal(dis_w, bound_w)
out = tf.cast(out, tf.float32)
return out
def not_bound_mask(self, weight):
bound_w = tf.equal(weight, 0)
bound_w = tf.cast(bound_w, tf.float32)
return bound_w
def generator_loss(self, gen_output, target, weight, distance):
# gen_bin = self.binary_from_data(gen_output, label='output')
# gen_bin = tf.cast(gen_bin, tf.float32)
# c_weight = tf.numpy_function(self.connect_weight, inp=[gen_bin, target], Tout=tf.float32)
# c_weight = self.connect_weight(gen_bin, target)
# net_weight_mask = self.net_weight_mask(weight, distance)
# net_weight = net_weight_mask + self.not_bound_mask(weight)
gen_loss = self.loss_object(y_true=target, y_pred=gen_output, sample_weight=None)
return gen_loss
def evaluation_generator_loss(self, gen_output, target, weight=None):
# gen_bin = self.binary_from_data(gen_output, label='output')
# gen_bin = tf.cast(gen_bin, tf.float32)
# c_weight, err_count = self.connect_weight(gen_bin, target, 5, True)
# c_weight, err_count = tf.numpy_function(self.connect_weight, inp=[gen_bin, target, 5, True], Tout=[tf.float32, tf.float32])
# net_c_weight = self.net_weight(c_weight)
gen_loss = self.loss_object(y_true=target, y_pred=gen_output, sample_weight=None)
return gen_loss, tf.constant(0.0), tf.constant(0.0)
# def discriminator_loss(self, disc_real_output, disc_generated_output):
# # 本物に対して1(本物)と判定できたか
# real_loss = self.loss_object(tf.ones_like(disc_real_output), disc_real_output)
# # 偽物に対して0(偽物)と判定できたか
# generated_loss = self.loss_object(tf.zeros_like(disc_generated_output), disc_generated_output)
# # lossの合計
# total_disc_loss = real_loss + generated_loss
# return total_disc_loss
# # ===== model save =====
# def save_generator(self, path):
# self.gen.save(path)
# ===== 値変換系 =====
def binary_from_img(self, data):
return tf.greater_equal(data, 255)
def binary_from_data(self, data, label=None):
if label == 'target':
return tf.greater_equal(data, 0.5)
if label == 'output':
return tf.greater_equal(data, 0.)
# else:
# return tf.greater_equal(data, 0.5)
def img_from_netdata(self, data):
return data * 255
def netdata_from_img(self, data):
return data / 255
# def binary_from_img(self, data):
# return tf.greater_equal(data, 127.5)
#
# def binary_from_data(self, data):
# return tf.greater_equal(data, 0)
#
# def img_from_netdata(self, data):
# return (data + 1) * 127.5
#
# def netdata_from_img(self, data):
# return (data / 127.5) - 1
# ==============================================================================================================
# ========== train関数 =========================================================================================
# ==============================================================================================================
@tf.function # 関数をグラフモードで実行 https://www.tensorflow.org/guide/autograph?hl=ja
def multi_train_step(self, ds, device_list):
generator_gradients_list = []
discriminator_gradients_list = []
for gpu_index in device_list:
with tf.device('/gpu:%d' % gpu_index): # gpu単位の処理
# tf.GradientTape()以下に勾配算出対象の計算を行う https://qiita.com/propella/items/5b2182b3d6a13d20fefd
with tf.GradientTape() as gen_tape:
# input_image, target, weight = next(ds_iter)
input_image, target, weight, distance = ds
target = nfunc.target_cut_padding(target=target, padding=self.get_padding())
# Generatorによる画像生成
generator = self.get_generator()
gen_output = generator(input_image, training=True)
# Discriminatorによる判定
# discriminator = self.get_discriminator()
# disc_real_output = discriminator([input_image, target], training=True)
# disc_generated_output = discriminator([input_image, gen_output], training=True)
# loss算出
gen_loss = self.generator_loss(gen_output, target, weight, distance)
err_count = 0
c_weight = tf.ones_like(weight)
# gen_loss, err_count, c_weight = self.evaluation_generator_loss(gen_output, target, weight)
# disc_loss = self.discriminator_loss(disc_real_output, disc_generated_output)
# 勾配算出 trainable_variables:訓練可能な変数
generator_gradients = gen_tape.gradient(gen_loss, generator.trainable_variables)
# discriminator_gradients = disc_tape.gradient(disc_loss, discriminator.trainable_variables)
# 後で平均を取る為に保存
generator_gradients_list.append(generator_gradients)
# discriminator_gradients_list.append(discriminator_gradients)
# gpu単位の処理ここまで
# with tf.device('/gpu:%d' % device_list[0]): # gpu単位の処理
generator = self.get_generator()
discriminator = self.get_discriminator()
# 勾配の平均、怪しい
generator_gradients_average = nfunc.average_gradients(generator_gradients_list)
# discriminator_gradients_average = nfunc.average_gradients(discriminator_gradients_list)
# 勾配の適用
self.get_generator_optimizer().apply_gradients(zip(generator_gradients_average, generator.trainable_variables))
# self.get_discriminator_optimizer().apply_gradients(zip(discriminator_gradients_average, discriminator.trainable_variables))
return gen_output, err_count, c_weight
@tf.function # 関数をグラフモードで実行 https://www.tensorflow.org/guide/autograph?hl=ja
def train_step(self, ds, device_list, rate=1):
with tf.device('/gpu:%d' % device_list[0]): # gpu単位の処理
# tf.GradientTape()以下に勾配算出対象の計算を行う https://qiita.com/propella/items/5b2182b3d6a13d20fefd
with tf.GradientTape() as gen_tape:
# input_image, target, weight = next(ds_iter)
input_image, target, weight, distance = ds
target = nfunc.target_cut_padding(target=target, padding=self.get_padding())
# Generatorによる画像生成
generator = self.get_generator()
gen_output = generator(input_image, training=True)
# Discriminatorによる判定
# discriminator = self.get_discriminator()
# disc_real_output = discriminator([input_image, target], training=True)
# disc_generated_output = discriminator([input_image, gen_output], training=True)
# loss算出
# gen_loss = self.generator_loss(gen_output, target, weight)
gen_loss = self.generator_loss(gen_output, target, weight, distance)
err_count = 0
c_weight = tf.ones_like(weight)
# gen_loss, err_count, c_weight = self.evaluation_generator_loss(gen_output, target, weight)
# disc_loss = self.discriminator_loss(disc_real_output, disc_generated_output)
# 勾配算出 trainable_variables:訓練可能な変数
generator_gradients = gen_tape.gradient(gen_loss,generator.trainable_variables)
# discriminator_gradients = disc_tape.gradient(disc_loss,discriminator.trainable_variables)
# バッチサイズ毎のデータサイズの割合を勾配に適用させる 1/len(device_list)を掛けて、複数GPU時の時との差を調整
rate = tf.cast(rate, tf.float32)
rate_gpu = tf.cast(1/len(device_list), tf.float32)
use_rate = rate * rate_gpu
generator_gradients = nfunc.rate_multiply(generator_gradients, use_rate)
# discriminator_gradients = nfunc.rate_multiply(discriminator_gradients, rate * (1/len(device_list)))
# gpu単位の処理ここまで
# with tf.device('/gpu:%d' % gpu_index): # gpu単位の処理
generator = self.get_generator()
discriminator = self.get_discriminator()
# 勾配の適用
self.get_generator_optimizer().apply_gradients(zip(generator_gradients, generator.trainable_variables))
# self.get_discriminator_optimizer().apply_gradients(zip(discriminator_gradients, discriminator.trainable_variables))
return gen_output, err_count, c_weight
# ==============================================================================================================
# ========== check_accuracy関数 ================================================================================
# ==============================================================================================================
@tf.function # 関数をグラフモードで実行 https://www.tensorflow.org/guide/autograph?hl=ja
def multi_check_step(self, ds_iter, device_list, data_n):
accuracy_list = []
for gpu_index in device_list:
with tf.device('/gpu:%d' % gpu_index): # gpu単位の処理
input_image, target = next(ds_iter)
target = nfunc.target_cut_padding(target=target, padding=self.get_padding())
# Generatorによる画像生成
generator = self.get_generator()
gen_output = generator(input_image, training=False)
accuracy_list.append(nfunc.evaluate(net_cls=self, out=gen_output, ans=target) * data_n)
return sum(accuracy_list)
@tf.function # 関数をグラフモードで実行 https://www.tensorflow.org/guide/autograph?hl=ja
def check_step(self, ds_iter, gpu_index, data_n):
accuracy_list = []
with tf.device('/gpu:%d' % gpu_index): # gpu単位の処理
input_image, target = next(ds_iter)
target = nfunc.target_cut_padding(target=target, padding=self.get_padding())
# Generatorによる画像生成
generator = self.get_generator()
gen_output = generator(input_image, training=False)
accuracy_list.append(nfunc.evaluate(net_cls=self, out=gen_output, ans=target) * data_n)
return sum(accuracy_list)
# ************ クラス内呼び出し関数 ************
# ========== NET ===========
def _Generator(self):
down_stack = [
self._downsample(64, 4, apply_batchnorm=False), # (bs, 128, 128, 64)
self._downsample(128, 4), # (bs, 64, 64, 128)
self._downsample(256, 4), # (bs, 32, 32, 256)
self._downsample(512, 4), # (bs, 16, 16, 512)
self._downsample(512, 4), # (bs, 8, 8, 512)
self._downsample(512, 4), # (bs, 4, 4, 512)
self._downsample(512, 4), # (bs, 2, 2, 512)
self._downsample(512, 4), # (bs, 1, 1, 512)
]
up_stack = [
self._upsample(512, 4, apply_dropout=True), # (bs, 2, 2, 1024)
self._upsample(512, 4, apply_dropout=True), # (bs, 4, 4, 1024)
self._upsample(512, 4, apply_dropout=True), # (bs, 8, 8, 1024)
self._upsample(512, 4), # (bs, 16, 16, 1024)
self._upsample(256, 4), # (bs, 32, 32, 512)
self._upsample(128, 4), # (bs, 64, 64, 256)
self._upsample(64, 4), # (bs, 128, 128, 128)
]
initializer = tf.random_normal_initializer(0., 0.02)
last = tf.keras.layers.Conv2DTranspose(self.output_channel, 4,
strides=2,
padding='same',
kernel_initializer=initializer,
activation=None) # (bs, 256, 256, 3) tanh->=-1~1
concat = tf.keras.layers.Concatenate() # 連結
inputs = tf.keras.layers.Input(shape=[None,None,5])
x = inputs
# Downsampling through the model
skips = []
for down in down_stack:
x = down(x)
skips.append(x)
skips = reversed(skips[:-1])
# Upsampling and establishing the skip connections
for up, skip in zip(up_stack, skips):
x = up(x)
x = concat([x, skip])
x = last(x)
return tf.keras.Model(inputs=inputs, outputs=x) # 返却値は-1~1
def _Discriminator(self):
initializer = tf.random_normal_initializer(0., 0.02)
inp = tf.keras.layers.Input(shape=[None, None, 3], name='input_image')
tar = tf.keras.layers.Input(shape=[None, None, 1], name='target_image')
x = tf.keras.layers.concatenate([inp, tar]) # (bs, 256, 256, channels*2)
down1 = self._downsample(64, 4, False)(x) # (bs, 128, 128, 64)
down2 = self._downsample(128, 4)(down1) # (bs, 64, 64, 128)
down3 = self._downsample(256, 4)(down2) # (bs, 32, 32, 256)
zero_pad1 = tf.keras.layers.ZeroPadding2D()(down3) # (bs, 34, 34, 256)
conv = tf.keras.layers.Conv2D(512, 4, strides=1,
kernel_initializer=initializer,
use_bias=False)(zero_pad1) # (bs, 31, 31, 512)
batchnorm1 = tf.keras.layers.BatchNormalization()(conv)
leaky_relu = tf.keras.layers.LeakyReLU()(batchnorm1)
zero_pad2 = tf.keras.layers.ZeroPadding2D()(leaky_relu) # (bs, 33, 33, 512)
last = tf.keras.layers.Conv2D(1, 4, strides=1,
kernel_initializer=initializer)(zero_pad2) # (bs, 30, 30, 1)
return tf.keras.Model(inputs=[inp, tar], outputs=last)
# ============ NET FUNCTION ==========
def _downsample(self, filters, size, apply_batchnorm=True):
initializer = tf.random_normal_initializer(0., 0.02)
result = tf.keras.Sequential()
result.add(
tf.keras.layers.Conv2D(filters, size, strides=2, padding='same',
kernel_initializer=initializer, use_bias=False))
if apply_batchnorm:
result.add(tf.keras.layers.BatchNormalization())
result.add(tf.keras.layers.LeakyReLU())
return result
def _upsample(self, filters, size, apply_dropout=False):
initializer = tf.random_normal_initializer(0., 0.02)
result = tf.keras.Sequential()
result.add(
tf.keras.layers.Conv2DTranspose(filters, size, strides=2,
padding='same',
kernel_initializer=initializer,
use_bias=False))
result.add(tf.keras.layers.BatchNormalization())
if apply_dropout:
result.add(tf.keras.layers.Dropout(0.5))
result.add(tf.keras.layers.ReLU())
return result
| [
"hayakawa.shinya.kochi@gmail.com"
] | hayakawa.shinya.kochi@gmail.com |
a1025224fff76aee836ecf09e5384f9e919891e0 | 5336e77ea7dc15de19e8e0722549d0fe35c88699 | /auth_path/services_serializer.py | ca6ea65450602c5f22940f7caf8d4cc68258e090 | [] | no_license | xal9wiii4ik/django-crm | 37d16d6cbec3bda3e7751144d9f0466c5d8897d8 | ba0858a47ef8ab91a9d5a26ec5328ecaadfa1034 | refs/heads/master | 2023-03-30T20:17:47.265034 | 2021-04-08T21:53:44 | 2021-04-08T21:53:44 | 323,412,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,875 | py | from django.contrib.auth.hashers import make_password
from django.contrib.auth.models import User
from rest_framework import serializers
def verification_password(value: str) -> str:
"""Check password"""
if len(value) >= 8:
if any((c in set('QAZWSXEDCRFVTGBYHNUJMIKOLP')) for c in value):
if any((f in set('1234567890') for f in value)):
return make_password(value)
else:
raise serializers.ValidationError('Password must contain at least 1 number')
else:
raise serializers.ValidationError('Password must contain at least 1 uppercase letter')
else:
raise serializers.ValidationError('Password must have to have at least 8 characters')
def verification_unique_email(value: str) -> str:
"""Checking unique of email"""
user = User.objects.filter(email=value)
if len(user) == 0:
return value
else:
raise serializers.ValidationError('User with given credentials already exist')
def verification_unique_username(value: str) -> str:
"""Checking unique of username"""
user = User.objects.filter(username=value)
if len(user) == 0:
return value
else:
raise serializers.ValidationError('User with given credentials already exist')
def verification_exist_email(value: str) -> str:
"""Checking exist email"""
user = User.objects.filter(email=value)
if len(user) != 0:
return value
else:
raise serializers.ValidationError('User with given credentials are not found')
def verification_email_and_return_username(value: str) -> str:
"""Checking exist email and return value"""
user = User.objects.filter(email=value)
if len(user) != 0:
return user[0].username
else:
raise serializers.ValidationError('User with given credentials are not found')
| [
"xal9wIII4ik@yandex.ru"
] | xal9wIII4ik@yandex.ru |
3586ccadfb58facf6e41abb02ed1e53dd12448bd | d496f743372562ddeac41fb40619d725089d538f | /docker_box/urls.py | 7b2c8def66410af3dd27c133dbda786ad5bcd5a6 | [
"MIT"
] | permissive | druuu/docker-box | 18391919498e59631509e1203a00a0e76fb46e5d | 36619f91fbc8ac356b55e05d7301e8c27e015117 | refs/heads/master | 2021-01-21T11:27:11.003086 | 2018-03-10T18:52:12 | 2018-03-10T18:52:12 | 83,549,111 | 0 | 0 | null | 2017-03-01T11:53:16 | 2017-03-01T11:53:16 | null | UTF-8 | Python | false | false | 199 | py | from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('dockit.urls', namespace='docker_box')),
]
| [
"ashwin@micropyramid.com"
] | ashwin@micropyramid.com |
329fc3d0591d6575ee97725f1cab29d392776153 | 0097d779d4d7551569625f6cca16d8bb8e935712 | /python/password.py | ee22eba4d579bbbb78367e46753c0cb74931199c | [] | no_license | kamesh051/django_tutorial-master | 48b710e3b7382f6204a696187bc1e4e6f20c6a04 | 0ff99f1db6d73e569ec0aa8539c73118310acee1 | refs/heads/master | 2020-03-27T19:21:06.471604 | 2018-09-02T05:13:06 | 2018-09-02T05:13:06 | 146,983,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,790 | py | import random
import sys
import string
def input(help_text="Do you want me to help you with a password? \n"):
return (str(raw_input(help_text)))
def main():
a = []
while True:
x = input()
if x == "yes":
while True:
z = input("How many characters, between 3 and 50, do you want it to have? \n")
if not z.isdigit() or int(z)<3 or int(z)>50:
print "please use a number between 3 and 50!"
else:
while True:
y = input("Do you want it to be a weak, medium, strong or insane password? \n")
if y!="weak" and y!="medium" and y!="strong" and y!="insane":
print "please use weak/medium/strong/insane "
elif y == "weak":
for i in xrange(int(z)):
b = [random.choice(string.ascii_lowercase)]
a.append(random.choice(b))
print "how about: \n", "".join(a)
sys.exit()
elif y == "medium":
for i in xrange(int(z)):
b = [random.choice(string.ascii_lowercase),random.choice(string.ascii_uppercase)]
a.append(random.choice(b))
print "how about: \n", "".join(a)
sys.exit()
elif y == "strong":
for i in xrange(int(z)):
b = [random.choice(string.ascii_lowercase),random.choice(string.digits),random.choice(string.ascii_uppercase)]
a.append(random.choice(b))
print "how about: \n", "".join(a)
sys.exit()
elif y == "insane":
for i in xrange(int(z)):
b = [random.choice(string.digits), random.choice(string.ascii_lowercase), random.choice("-_?!/.,';][)(~`@#$%^&*+|"),random.choice(string.ascii_uppercase)]
a.append(random.choice(b))
print "how about: \n", "".join(a)
sys.exit()
elif x == "no":
print "Ok, goodbye!"
sys.exit()
else:
print " please use yes or no!"
main()
| [
"kameshgithub@gmail.com"
] | kameshgithub@gmail.com |
15a68d03a1bb8812e543e6eac440ae430cd48763 | a2062bd70adf7c64d39401211d4597d010afdad3 | /21.05.20/Pandas02_02_FunEx07_송예지.py | e4b0d61eeb4fb7494fdb2b6054fd7128f7695800 | [] | no_license | yeijSong/LearningPandas | ebf3f3f2416bb3b7254ebf86ede8ce5f696bb1f9 | 6e79a54a4412455d9729412551a64a7e86534ced | refs/heads/main | 2023-05-24T10:38:41.514011 | 2021-06-21T13:25:28 | 2021-06-21T13:25:28 | 378,843,876 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 770 | py | '''
# 매개변수에 초기값 미리 설정하기
say_myself 함수는 3개의 매개변수를 받아서
마지막 인수인 man이 True이면 남자, False면 여자 출력
default값이 man이므로 아무 것도 적지 않으면 man으로 인식
'''
def say_myself(name,old,man=True):
print('나의 이름은 %s입니다.'%name)
print('나이는 %d살입니다.'%old)
if man:
print('남자입니다')
else:
print('여자입니다')
say_myself('소나무',27)
print()
say_myself('오렌지',25,False)
print()
'''
say_myself('오렌지',22,man)
이렇게 하면 오류가 뜸
man이 정의되지 않았다고 함
위에서 man을 참으로 정의했기 때문에
참, 거짓으로만 입력해야하는 것으로 보임
''' | [
"noreply@github.com"
] | yeijSong.noreply@github.com |
ea4782259049a80c31467404d5c5dbcf39fa371a | 1d928c3f90d4a0a9a3919a804597aa0a4aab19a3 | /python/sentry/2019/12/event.py | 05dded33a7bd91cb7bd5703a00c6f8639d536402 | [] | no_license | rosoareslv/SED99 | d8b2ff5811e7f0ffc59be066a5a0349a92cbb845 | a062c118f12b93172e31e8ca115ce3f871b64461 | refs/heads/main | 2023-02-22T21:59:02.703005 | 2021-01-28T19:40:51 | 2021-01-28T19:40:51 | 306,497,459 | 1 | 1 | null | 2020-11-24T20:56:18 | 2020-10-23T01:18:07 | null | UTF-8 | Python | false | false | 21,570 | py | from __future__ import absolute_import
import six
import string
import pytz
from collections import OrderedDict
from dateutil.parser import parse as parse_date
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from hashlib import md5
from semaphore.processing import StoreNormalizer
from sentry import eventtypes, nodestore
from sentry.db.models import (
BoundedBigIntegerField,
BoundedIntegerField,
Model,
NodeData,
NodeField,
sane_repr,
)
from sentry.db.models.manager import BaseManager
from sentry.interfaces.base import get_interfaces
from sentry.utils import json
from sentry.utils.cache import memoize
from sentry.utils.canonical import CanonicalKeyDict, CanonicalKeyView
from sentry.utils.safe import get_path
from sentry.utils.strings import truncatechars
class EventDict(CanonicalKeyDict):
"""
Creating an instance of this dictionary will send the event through basic
(Rust-based) type/schema validation called "re-normalization".
This is used as a wrapper type for `Event.data` such that creating an event
object (or loading it from the DB) will ensure the data fits the type
schema.
"""
def __init__(self, data, skip_renormalization=False, **kwargs):
is_renormalized = isinstance(data, EventDict) or (
isinstance(data, NodeData) and isinstance(data.data, EventDict)
)
if not skip_renormalization and not is_renormalized:
normalizer = StoreNormalizer(is_renormalize=True, enable_trimming=False)
data = normalizer.normalize_event(dict(data))
CanonicalKeyDict.__init__(self, data, **kwargs)
class EventCommon(object):
"""
Methods and properties common to both Event and SnubaEvent.
"""
@classmethod
def generate_node_id(cls, project_id, event_id):
"""
Returns a deterministic node_id for this event based on the project_id
and event_id which together are globally unique. The event body should
be saved under this key in nodestore so it can be retrieved using the
same generated id when we only have project_id and event_id.
"""
return md5("{}:{}".format(project_id, event_id)).hexdigest()
# TODO (alex) We need a better way to cache these properties. functools32
# doesn't quite do the trick as there is a reference bug with unsaved
# models. But the current _group_cache thing is also clunky because these
# properties need to be stripped out in __getstate__.
@property
def group(self):
from sentry.models import Group
if not self.group_id:
return None
if not hasattr(self, "_group_cache"):
self._group_cache = Group.objects.get(id=self.group_id)
return self._group_cache
@group.setter
def group(self, group):
# guard against None to not fail on AttributeError
# otherwise Django 1.10 will swallow it in db.models.base init, but
# consequently fail to remove from kwargs, and you'll get the red herring
# TypeError: 'group' is an invalid keyword argument for this function.
if group is not None:
self.group_id = group.id
self._group_cache = group
@property
def project(self):
from sentry.models import Project
if not hasattr(self, "_project_cache"):
self._project_cache = Project.objects.get(id=self.project_id)
return self._project_cache
@project.setter
def project(self, project):
if project is None:
self.project_id = None
else:
self.project_id = project.id
self._project_cache = project
def get_interfaces(self):
return CanonicalKeyView(get_interfaces(self.data))
@memoize
def interfaces(self):
return self.get_interfaces()
def get_interface(self, name):
return self.interfaces.get(name)
def get_legacy_message(self):
# TODO: This is only used in the pagerduty plugin. We should use event.title
# there and remove this function once users have been notified, since PD
# alert routing may be based off the message field.
return (
get_path(self.data, "logentry", "formatted")
or get_path(self.data, "logentry", "message")
or self.message
)
def get_event_type(self):
"""
Return the type of this event.
See ``sentry.eventtypes``.
"""
return self.data.get("type", "default")
def get_event_metadata(self):
"""
Return the metadata of this event.
See ``sentry.eventtypes``.
"""
# For some inexplicable reason we have some cases where the data
# is completely empty. In that case we want to hobble along
# further.
return self.data.get("metadata") or {}
def get_grouping_config(self):
"""Returns the event grouping config."""
from sentry.grouping.api import get_grouping_config_dict_for_event_data
return get_grouping_config_dict_for_event_data(self.data, self.project)
def get_hashes(self, force_config=None):
"""
Returns the calculated hashes for the event. This uses the stored
information if available. Grouping hashes will take into account
fingerprinting and checksums.
"""
# If we have hashes stored in the data we use them, otherwise we
# fall back to generating new ones from the data. We can only use
# this if we do not force a different config.
if force_config is None:
hashes = self.data.get("hashes")
if hashes is not None:
return hashes
return filter(
None, [x.get_hash() for x in self.get_grouping_variants(force_config).values()]
)
def get_grouping_variants(self, force_config=None, normalize_stacktraces=False):
"""
This is similar to `get_hashes` but will instead return the
grouping components for each variant in a dictionary.
If `normalize_stacktraces` is set to `True` then the event data will be
modified for `in_app` in addition to event variants being created. This
means that after calling that function the event data has been modified
in place.
"""
from sentry.grouping.api import get_grouping_variants_for_event, load_grouping_config
from sentry.stacktraces.processing import normalize_stacktraces_for_grouping
# Forcing configs has two separate modes. One is where just the
# config ID is given in which case it's merged with the stored or
# default config dictionary
if force_config is not None:
if isinstance(force_config, six.string_types):
stored_config = self.get_grouping_config()
config = dict(stored_config)
config["id"] = force_config
else:
config = force_config
# Otherwise we just use the same grouping config as stored. if
# this is None the `get_grouping_variants_for_event` will fill in
# the default.
else:
config = self.data.get("grouping_config")
config = load_grouping_config(config)
if normalize_stacktraces:
normalize_stacktraces_for_grouping(self.data, config)
return get_grouping_variants_for_event(self, config)
def get_primary_hash(self):
# TODO: This *might* need to be protected from an IndexError?
return self.get_hashes()[0]
@property
def title(self):
# also see event_manager.py which inserts this for snuba
et = eventtypes.get(self.get_event_type())()
return et.get_title(self.get_event_metadata())
@property
def culprit(self):
# For a while events did not save the culprit
if self.group_id:
return self.data.get("culprit") or self.group.culprit
return self.data.get("culprit")
@property
def location(self):
# also see event_manager.py which inserts this for snuba
et = eventtypes.get(self.get_event_type())()
return et.get_location(self.get_event_metadata())
@property
def real_message(self):
# XXX(mitsuhiko): this is a transitional attribute that should be
# removed. `message` will be renamed to `search_message` and this
# will become `message`.
return (
get_path(self.data, "logentry", "formatted")
or get_path(self.data, "logentry", "message")
or ""
)
@property
def organization(self):
return self.project.organization
@property
def version(self):
return self.data.get("version", "5")
@property
def ip_address(self):
ip_address = get_path(self.data, "user", "ip_address")
if ip_address:
return ip_address
remote_addr = get_path(self.data, "request", "env", "REMOTE_ADDR")
if remote_addr:
return remote_addr
return None
@property
def tags(self):
try:
rv = sorted(
[
(t, v)
for t, v in get_path(self.data, "tags", filter=True) or ()
if t is not None and v is not None
]
)
return rv
except ValueError:
# at one point Sentry allowed invalid tag sets such as (foo, bar)
# vs ((tag, foo), (tag, bar))
return []
def get_tag(self, key):
for t, v in self.tags:
if t == key:
return v
return None
@property
def release(self):
return self.get_tag("sentry:release")
@property
def dist(self):
return self.get_tag("sentry:dist")
def get_raw_data(self):
"""Returns the internal raw event data dict."""
return dict(self.data.items())
@property
def size(self):
return len(json.dumps(dict(self.data)))
@property
def transaction(self):
return self.get_tag("transaction")
def get_email_subject(self):
template = self.project.get_option("mail:subject_template")
if template:
template = EventSubjectTemplate(template)
else:
template = DEFAULT_SUBJECT_TEMPLATE
return truncatechars(template.safe_substitute(EventSubjectTemplateData(self)), 128).encode(
"utf-8"
)
def get_environment(self):
from sentry.models import Environment
if not hasattr(self, "_environment_cache"):
self._environment_cache = Environment.objects.get(
organization_id=self.project.organization_id,
name=Environment.get_name_or_default(self.get_tag("environment")),
)
return self._environment_cache
def get_minimal_user(self):
"""
A minimal 'User' interface object that gives us enough information
to render a user badge.
"""
return self.get_interface("user")
def as_dict(self):
"""Returns the data in normalized form for external consumers."""
# We use a OrderedDict to keep elements ordered for a potential JSON serializer
data = OrderedDict()
data["event_id"] = self.event_id
data["project"] = self.project_id
data["release"] = self.release
data["dist"] = self.dist
data["platform"] = self.platform
data["message"] = self.real_message
data["datetime"] = self.datetime
data["tags"] = [(k.split("sentry:", 1)[-1], v) for (k, v) in self.tags]
for k, v in sorted(six.iteritems(self.data)):
if k in data:
continue
if k == "sdk":
v = {v_k: v_v for v_k, v_v in six.iteritems(v) if v_k != "client_ip"}
data[k] = v
# for a long time culprit was not persisted. In those cases put
# the culprit in from the group.
if data.get("culprit") is None and self.group_id:
data["culprit"] = self.group.culprit
# Override title and location with dynamically generated data
data["title"] = self.title
data["location"] = self.location
return data
def bind_node_data(self):
node_id = Event.generate_node_id(self.project_id, self.event_id)
node_data = nodestore.get(node_id) or {}
ref = self.data.get_ref(self)
self.data.bind_data(node_data, ref=ref)
class SnubaEvent(EventCommon):
"""
An event backed by data stored in snuba.
This is a readonly event and does not support event creation or save.
The basic event data is fetched from snuba, and the event body is
fetched from nodestore and bound to the data property in the same way
as a regular Event.
"""
# The minimal list of columns we need to get from snuba to bootstrap an
# event. If the client is planning on loading the entire event body from
# nodestore anyway, we may as well only fetch the minimum from snuba to
# avoid duplicated work.
minimal_columns = ["event_id", "group_id", "project_id", "timestamp"]
__repr__ = sane_repr("project_id", "group_id")
def __init__(self, snuba_values):
"""
When initializing a SnubaEvent, think about the attributes you
might need to access on it. If you only need a few properties, and
they are all available in snuba, then you should use
`SnubaEvent.selected_colums` (or a subset depending on your needs)
But if you know you are going to need the entire event body anyway
(which requires a nodestore lookup) you may as well just initialize
the event with `SnubaEvent.minimal_colums` and let the rest of of
the attributes come from nodestore.
"""
assert all(k in snuba_values for k in SnubaEvent.minimal_columns)
# self.snuba_data is a dict of all the stuff we got from snuba
self.snuba_data = snuba_values
# self.data is a (lazy) dict of everything we got from nodestore
node_id = SnubaEvent.generate_node_id(
self.snuba_data["project_id"], self.snuba_data["event_id"]
)
self.data = NodeData(node_id, data=None, wrapper=EventDict)
# ============================================
# Snuba-only implementations of properties that
# would otherwise require nodestore data.
# ============================================
@property
def tags(self):
"""
Override of tags property that uses tags from snuba rather than
the nodestore event body. This might be useful for implementing
tag deletions without having to rewrite nodestore blobs.
"""
if "tags.key" in self.snuba_data and "tags.value" in self.snuba_data:
keys = self.snuba_data["tags.key"]
values = self.snuba_data["tags.value"]
if keys and values and len(keys) == len(values):
return sorted(zip(keys, values))
else:
return []
else:
return super(SnubaEvent, self).tags
def get_minimal_user(self):
from sentry.interfaces.user import User
if all(key in self.snuba_data for key in ["user_id", "email", "username", "ip_address"]):
user_id = self.snuba_data["user_id"]
email = self.snuba_data["email"]
username = self.snuba_data["username"]
ip_address = self.snuba_data["ip_address"]
else:
user_id = self.data["user_id"]
email = self.data["email"]
username = self.data["username"]
ip_address = self.data["ip_address"]
return User.to_python(
{"id": user_id, "email": email, "username": username, "ip_address": ip_address}
)
# If the data for these is available from snuba, we assume
# it was already normalized on the way in and we can just return
# it, otherwise we defer to EventCommon implementation.
def get_event_type(self):
if "type" in self.snuba_data:
return self.snuba_data["type"]
return super(SnubaEvent, self).get_event_type()
@property
def ip_address(self):
if "ip_address" in self.snuba_data:
return self.snuba_data["ip_address"]
return super(SnubaEvent, self).ip_address
@property
def title(self):
if "title" in self.snuba_data:
return self.snuba_data["title"]
return super(SnubaEvent, self).title
@property
def culprit(self):
if "culprit" in self.snuba_data:
return self.snuba_data["culprit"]
return super(SnubaEvent, self).culprit
@property
def location(self):
if "location" in self.snuba_data:
return self.snuba_data["location"]
return super(SnubaEvent, self).location
# ====================================================
# Snuba implementations of the django fields on Event
# ====================================================
@property
def datetime(self):
"""
Reconstruct the datetime of this event from the snuba timestamp
"""
# dateutil seems to use tzlocal() instead of UTC even though the string
# ends with '+00:00', so just replace the TZ with UTC because we know
# all timestamps from snuba are UTC.
return parse_date(self.timestamp).replace(tzinfo=pytz.utc)
@property
def message(self):
if "message" in self.snuba_data:
return self.snuba_data["message"]
return self.data.get("message")
@property
def platform(self):
if "platform" in self.snuba_data:
return self.snuba_data["platform"]
return self.data.get("platform")
@property
def id(self):
# Because a snuba event will never have a django row id, just return
# the hex event_id here. We should be moving to a world where we never
# have to reference the row id anyway.
return self.event_id
@property
def timestamp(self):
return self.snuba_data["timestamp"]
@property
def event_id(self):
return self.snuba_data["event_id"]
@property
def project_id(self):
return self.snuba_data["project_id"]
@project_id.setter
def project_id(self, value):
self.snuba_data["project_id"] = value
@property
def group_id(self):
return self.snuba_data["group_id"]
@group_id.setter
def group_id(self, value):
self.snuba_data["group_id"] = value
def save(self):
raise NotImplementedError
def ref_func(x):
return x.project_id or x.project.id
class Event(EventCommon, Model):
"""
An event backed by data stored in postgres.
"""
__core__ = False
group_id = BoundedBigIntegerField(blank=True, null=True)
event_id = models.CharField(max_length=32, null=True, db_column="message_id")
project_id = BoundedBigIntegerField(blank=True, null=True)
message = models.TextField()
platform = models.CharField(max_length=64, null=True)
datetime = models.DateTimeField(default=timezone.now, db_index=True)
time_spent = BoundedIntegerField(null=True)
data = NodeField(
blank=True,
null=True,
ref_func=ref_func,
ref_version=2,
wrapper=EventDict,
skip_nodestore_save=True,
)
objects = BaseManager()
class Meta:
app_label = "sentry"
db_table = "sentry_message"
verbose_name = _("message")
verbose_name_plural = _("messages")
unique_together = (("project_id", "event_id"),)
index_together = (("group_id", "datetime"),)
__repr__ = sane_repr("project_id", "group_id")
def __getstate__(self):
state = Model.__getstate__(self)
# do not pickle cached info. We want to fetch this on demand
# again. In particular if we were to pickle interfaces we would
# pickle a CanonicalKeyView which old sentry workers do not know
# about
state.pop("_project_cache", None)
state.pop("_environment_cache", None)
state.pop("_group_cache", None)
state.pop("interfaces", None)
return state
class EventSubjectTemplate(string.Template):
idpattern = r"(tag:)?[_a-z][_a-z0-9]*"
class EventSubjectTemplateData(object):
tag_aliases = {"release": "sentry:release", "dist": "sentry:dist", "user": "sentry:user"}
def __init__(self, event):
self.event = event
def __getitem__(self, name):
if name.startswith("tag:"):
name = name[4:]
value = self.event.get_tag(self.tag_aliases.get(name, name))
if value is None:
raise KeyError
return six.text_type(value)
elif name == "project":
return self.event.project.get_full_name()
elif name == "projectID":
return self.event.project.slug
elif name == "shortID" and self.event.group_id:
return self.event.group.qualified_short_id
elif name == "orgID":
return self.event.organization.slug
elif name == "title":
return self.event.title
raise KeyError
DEFAULT_SUBJECT_TEMPLATE = EventSubjectTemplate("$shortID - $title")
| [
"rodrigosoaresilva@gmail.com"
] | rodrigosoaresilva@gmail.com |
4e0054a974afc9d91894e6d6b3d4084aa442bab7 | 6ffc81125d6bb5f8476f95b2334a27807b8290de | /flexneuart/featextr_server/python_generated/protocol/ExternalScorer.py | 1e84a976a0fe5638b048d8d10db98a81d6f48eb0 | [
"BSD-2-Clause",
"Apache-2.0"
] | permissive | oaqa/FlexNeuART | 4cb341ca3c3f94fa28a7cfd4aef5451de3a4a2cb | 0bd3e06735ff705731fb6cee62d3486276beccdf | refs/heads/master | 2023-09-01T00:19:33.980081 | 2023-05-26T19:19:30 | 2023-05-26T19:19:30 | 64,071,121 | 156 | 21 | Apache-2.0 | 2023-09-10T01:27:05 | 2016-07-24T15:08:03 | Java | UTF-8 | Python | false | true | 19,129 | py | #
# Autogenerated by Thrift Compiler (0.12.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
import logging
from .ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
all_structs = []
class Iface(object):
def getScoresFromParsed(self, query, docs):
"""
Parameters:
- query
- docs
"""
pass
def getScoresFromRaw(self, query, docs):
"""
Parameters:
- query
- docs
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def getScoresFromParsed(self, query, docs):
"""
Parameters:
- query
- docs
"""
self.send_getScoresFromParsed(query, docs)
return self.recv_getScoresFromParsed()
def send_getScoresFromParsed(self, query, docs):
self._oprot.writeMessageBegin('getScoresFromParsed', TMessageType.CALL, self._seqid)
args = getScoresFromParsed_args()
args.query = query
args.docs = docs
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getScoresFromParsed(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getScoresFromParsed_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "getScoresFromParsed failed: unknown result")
def getScoresFromRaw(self, query, docs):
"""
Parameters:
- query
- docs
"""
self.send_getScoresFromRaw(query, docs)
return self.recv_getScoresFromRaw()
def send_getScoresFromRaw(self, query, docs):
self._oprot.writeMessageBegin('getScoresFromRaw', TMessageType.CALL, self._seqid)
args = getScoresFromRaw_args()
args.query = query
args.docs = docs
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getScoresFromRaw(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getScoresFromRaw_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.err is not None:
raise result.err
raise TApplicationException(TApplicationException.MISSING_RESULT, "getScoresFromRaw failed: unknown result")
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["getScoresFromParsed"] = Processor.process_getScoresFromParsed
self._processMap["getScoresFromRaw"] = Processor.process_getScoresFromRaw
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_getScoresFromParsed(self, seqid, iprot, oprot):
args = getScoresFromParsed_args()
args.read(iprot)
iprot.readMessageEnd()
result = getScoresFromParsed_result()
try:
result.success = self._handler.getScoresFromParsed(args.query, args.docs)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getScoresFromParsed", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getScoresFromRaw(self, seqid, iprot, oprot):
args = getScoresFromRaw_args()
args.read(iprot)
iprot.readMessageEnd()
result = getScoresFromRaw_result()
try:
result.success = self._handler.getScoresFromRaw(args.query, args.docs)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except ScoringException as err:
msg_type = TMessageType.REPLY
result.err = err
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getScoresFromRaw", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class getScoresFromParsed_args(object):
"""
Attributes:
- query
- docs
"""
def __init__(self, query=None, docs=None,):
self.query = query
self.docs = docs
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.query = TextEntryParsed()
self.query.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.docs = []
(_etype10, _size7) = iprot.readListBegin()
for _i11 in range(_size7):
_elem12 = TextEntryParsed()
_elem12.read(iprot)
self.docs.append(_elem12)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getScoresFromParsed_args')
if self.query is not None:
oprot.writeFieldBegin('query', TType.STRUCT, 1)
self.query.write(oprot)
oprot.writeFieldEnd()
if self.docs is not None:
oprot.writeFieldBegin('docs', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.docs))
for iter13 in self.docs:
iter13.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.query is None:
raise TProtocolException(message='Required field query is unset!')
if self.docs is None:
raise TProtocolException(message='Required field docs is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getScoresFromParsed_args)
getScoresFromParsed_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'query', [TextEntryParsed, None], None, ), # 1
(2, TType.LIST, 'docs', (TType.STRUCT, [TextEntryParsed, None], False), None, ), # 2
)
class getScoresFromParsed_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.MAP:
self.success = {}
(_ktype15, _vtype16, _size14) = iprot.readMapBegin()
for _i18 in range(_size14):
_key19 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
_val20 = []
(_etype24, _size21) = iprot.readListBegin()
for _i25 in range(_size21):
_elem26 = iprot.readDouble()
_val20.append(_elem26)
iprot.readListEnd()
self.success[_key19] = _val20
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getScoresFromParsed_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.MAP, 0)
oprot.writeMapBegin(TType.STRING, TType.LIST, len(self.success))
for kiter27, viter28 in self.success.items():
oprot.writeString(kiter27.encode('utf-8') if sys.version_info[0] == 2 else kiter27)
oprot.writeListBegin(TType.DOUBLE, len(viter28))
for iter29 in viter28:
oprot.writeDouble(iter29)
oprot.writeListEnd()
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getScoresFromParsed_result)
getScoresFromParsed_result.thrift_spec = (
(0, TType.MAP, 'success', (TType.STRING, 'UTF8', TType.LIST, (TType.DOUBLE, None, False), False), None, ), # 0
)
class getScoresFromRaw_args(object):
"""
Attributes:
- query
- docs
"""
def __init__(self, query=None, docs=None,):
self.query = query
self.docs = docs
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.query = TextEntryRaw()
self.query.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.docs = []
(_etype33, _size30) = iprot.readListBegin()
for _i34 in range(_size30):
_elem35 = TextEntryRaw()
_elem35.read(iprot)
self.docs.append(_elem35)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getScoresFromRaw_args')
if self.query is not None:
oprot.writeFieldBegin('query', TType.STRUCT, 1)
self.query.write(oprot)
oprot.writeFieldEnd()
if self.docs is not None:
oprot.writeFieldBegin('docs', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.docs))
for iter36 in self.docs:
iter36.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.query is None:
raise TProtocolException(message='Required field query is unset!')
if self.docs is None:
raise TProtocolException(message='Required field docs is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getScoresFromRaw_args)
getScoresFromRaw_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'query', [TextEntryRaw, None], None, ), # 1
(2, TType.LIST, 'docs', (TType.STRUCT, [TextEntryRaw, None], False), None, ), # 2
)
class getScoresFromRaw_result(object):
"""
Attributes:
- success
- err
"""
def __init__(self, success=None, err=None,):
self.success = success
self.err = err
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.MAP:
self.success = {}
(_ktype38, _vtype39, _size37) = iprot.readMapBegin()
for _i41 in range(_size37):
_key42 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
_val43 = []
(_etype47, _size44) = iprot.readListBegin()
for _i48 in range(_size44):
_elem49 = iprot.readDouble()
_val43.append(_elem49)
iprot.readListEnd()
self.success[_key42] = _val43
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.err = ScoringException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getScoresFromRaw_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.MAP, 0)
oprot.writeMapBegin(TType.STRING, TType.LIST, len(self.success))
for kiter50, viter51 in self.success.items():
oprot.writeString(kiter50.encode('utf-8') if sys.version_info[0] == 2 else kiter50)
oprot.writeListBegin(TType.DOUBLE, len(viter51))
for iter52 in viter51:
oprot.writeDouble(iter52)
oprot.writeListEnd()
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getScoresFromRaw_result)
getScoresFromRaw_result.thrift_spec = (
(0, TType.MAP, 'success', (TType.STRING, 'UTF8', TType.LIST, (TType.DOUBLE, None, False), False), None, ), # 0
(1, TType.STRUCT, 'err', [ScoringException, None], None, ), # 1
)
fix_spec(all_structs)
del all_structs
| [
"leo@boytsov.info"
] | leo@boytsov.info |
f8671f2e8b5bf4e1f21484e59b893f0ce9fb25ba | b06a317fd3d1d0f27d8d14731d2d84a1963d98eb | /commons/c2cgeoportal_commons/alembic/main/6a412d9437b1_rename_serverogc_to_ogcserver.py | d82ee0bd1f3fe67745fb033bdd9ed1952959a3f3 | [
"BSD-2-Clause-Views"
] | permissive | samupl/c2cgeoportal | 2844be2376b0598307a4c3e0732aa4e7d196d3be | 63a27ceacb47cc1db00d853b507ee3d568320a48 | refs/heads/master | 2020-03-30T00:58:58.166112 | 2018-09-26T17:42:37 | 2018-09-26T17:42:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,441 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2016-2018, Camptocamp SA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
"""Rename ServerOGC to OGCServer
Revision ID: 6a412d9437b1
Revises: 29f2a32859ec
Create Date: 2016-06-28 18:08:23.888198
"""
from alembic import op
from c2c.template.config import config
# revision identifiers, used by Alembic.
revision = '6a412d9437b1'
down_revision = '29f2a32859ec'
branch_labels = None
depends_on = None
def upgrade():
schema = config['schema']
op.rename_table('server_ogc', 'ogc_server', schema=schema)
with op.batch_alter_table('layer_wms', schema=schema) as table_op:
table_op.alter_column('server_ogc_id', new_column_name='ogc_server_id')
def downgrade():
schema = config['schema']
op.rename_table('ogc_server', 'server_ogc', schema=schema)
with op.batch_alter_table('layer_wms', schema=schema) as table_op:
table_op.alter_column('ogc_server_id', new_column_name='server_ogc_id')
| [
"stephane.brunner@camptocamp.com"
] | stephane.brunner@camptocamp.com |
17f5cdfad8ea3aa5b4807f8d00ed3fd9be67775e | 0adb68bbf576340c8ba1d9d3c07320ab3bfdb95e | /regexlib/2021-5-15/python_re2_test_file/regexlib_3424.py | 88dbd13df9a356348055fff7129a8f2730844f25 | [
"MIT"
] | permissive | agentjacker/ReDoS-Benchmarks | c7d6633a3b77d9e29e0ee2db98d5dfb60cde91c6 | f5b5094d835649e957bf3fec6b8bd4f6efdb35fc | refs/heads/main | 2023-05-10T13:57:48.491045 | 2021-05-21T11:19:39 | 2021-05-21T11:19:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 559 | py | # 3424
# (\s*<(\w*)\s+(((\w*)(=["']?)([\w\W\d]*?)["']?)+)\s*/?>)
# EXPONENT
# nums:5
# EXPONENT AttackString:"< "+"0="*16+"!_1_EOA(i or ii)"
import re2 as re
from time import perf_counter
regex = """(\s*<(\w*)\s+(((\w*)(=["']?)([\w\W\d]*?)["']?)+)\s*/?>)"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "< " + "0=" * i * 1 + "!_1_EOA(i or ii)"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *1}: took {DURATION} seconds!") | [
"liyt@ios.ac.cn"
] | liyt@ios.ac.cn |
7940b6f81821c86a3a9703791694818e2cf56511 | c7d08810eaa13882c65c2f2cf6b4eaa68239910b | /resume_autofill_site/resume_autofill_site/settings.py | ceb958789c264fa96a9cb770246abb973b5d0109 | [] | no_license | thayton/resume-auto-fill | cbcf9b9a81ef7ed7b28c51bebdf8b863bcab7e2d | a5bccb0c422b4d8ce36f15af29ef70847cf24c48 | refs/heads/master | 2021-01-24T22:13:34.514409 | 2014-11-03T20:16:51 | 2014-11-03T20:16:51 | 24,721,068 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,243 | py | """
Django settings for resume_autofill_site project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'm2^pd!sgx&ixcu&3u9lsly4e#u6g+7a-z$pi(uhp4433smnk5u'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'resume_autofill',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'resume_autofill_site.urls'
WSGI_APPLICATION = 'resume_autofill_site.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'resume_autofill',
'USER': os.environ.get("RESUME_AUTOFILL_DB_USER", ''),
'PASSWORD': os.environ.get("RESUME_AUTOFILL_DB_PASSWORD", ''),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| [
"thayton@neekanee.com"
] | thayton@neekanee.com |
3ad723a11f004fabb06e1714eac83026827275b7 | c61310b3da23494fdcd2eae31b26a7c97dbab8a8 | /bl2_save_edit.py | 8ac96670f77ccdc7e2bdd5c9ff7fd46eaa408255 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | Riroaki/borderlands2 | d64971ed73b6bf593c12435e4c503520d916849d | f400305e38445fc944d49f6fa8a0588eea25a86c | refs/heads/master | 2022-03-24T11:41:30.670307 | 2019-11-16T18:15:05 | 2019-11-16T18:15:05 | 262,360,555 | 1 | 0 | null | 2020-05-08T15:32:01 | 2020-05-08T15:32:01 | null | UTF-8 | Python | false | false | 673 | py | #!/usr/bin/env python3
import sys
import traceback
from borderlands.bl2 import AppBL2
if __name__ == "__main__":
try:
app = AppBL2(sys.argv[1:])
app.run()
except Exception as e:
print('Something went wrong, but please ensure you have the latest', file=sys.stderr)
print('version from https://github.com/apocalyptech/borderlands2 before', file=sys.stderr)
print('reporting a bug. Information useful for a report follows:', file=sys.stderr)
print('', file=sys.stderr)
print(repr(sys.argv), file=sys.stderr)
print('', file=sys.stderr)
traceback.print_exc(None, sys.stderr)
sys.exit(1)
| [
"pez@apocalyptech.com"
] | pez@apocalyptech.com |
865ca12781577b88d3a2bea93f25428c522a73ce | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/86/usersdata/160/53060/submittedfiles/pico.py | 2709fb36f0f75f68fe6937ef2397ffef9447892c | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | # -*- coding: utf-8 -*-
def pico(n):
#CONTINUE...
cont=0
for i in range(0,len(n)-1,1):
if n[i]<n[i] and n[i]<n[i]:
cont=cont+1
if cont!=0:
return(True)
else:
return(False)
n = int(input('Digite a quantidade de elementos da lista: '))
#CONTINUE...
a=[]
for i in range(1,n+1,1):
valor=int(input('Digite o elementos da lista:'))
a.append(valor)
if pico(a):
print('N')
else:
print('S')
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
c1721a80983a02b687c9b844a9bb852d595586f8 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/coderByte_20200518204427.py | 3e1b5f8da2ca300d3e716d8bb95cc003dbd1d538 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py |
def QuestionsMarks(str):
numbers = []
# others = []
for char in str:
if char.isdigit():
numbers.append(int(char))
elif char == '?':
numbers.append(char)
for i in range(len(numbers)):
print(i)
print(numbers[i])
if numbers[i] =
# break
print(numbers.pop())
return str
# keep this function call here
QuestionsMarks("acc?7??sss?3rr1??????5") | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
49fcbd8414edd891f3a5061eeebfcc6931da6384 | a667d52d9be08aab9f101952dfede0e29a43b012 | /src/apps/escuela/forms.py | ae8961a84de3e12b386ae7f35317ad2fbc91c7a8 | [] | no_license | Epatzan/app-suni | 50b04bbf58417e8cfba77a5e00c2d6a5a6537c16 | b7f9eaf62942797cd4f222ea2fb82348304aeaf4 | refs/heads/master | 2023-08-21T13:15:44.826842 | 2018-03-15T14:00:30 | 2018-03-15T14:00:30 | 123,631,541 | 0 | 0 | null | 2018-03-02T21:13:55 | 2018-03-02T21:13:55 | null | UTF-8 | Python | false | false | 5,314 | py | from django import forms
from datetime import date
from django.core.urlresolvers import reverse_lazy
from django.forms.models import inlineformset_factory
from django.forms.formsets import BaseFormSet, formset_factory
from apps.escuela.models import (
Escuela, EscContacto, EscContactoTelefono,
EscContactoMail, EscNivel, EscSector, EscPoblacion,
EscMatricula, EscRendimientoAcademico)
from apps.main.models import Departamento, Municipio
from apps.mye.models import Cooperante, Proyecto
class FormEscuelaCrear(forms.ModelForm):
lat = forms.CharField(
required=False,
label='Latitud',
widget=forms.NumberInput(attrs={'step': 'any'}))
lng = forms.CharField(
required=False,
label='Longitud',
widget=forms.NumberInput(attrs={'step': 'any'}))
class Meta:
model = Escuela
fields = '__all__'
exclude = ['mapa']
widgets = {
'municipio': forms.Select(attrs={'class': 'select2'})
}
class EscuelaBuscarForm(forms.Form):
ESTADO_CHOICES = (
(None, 'No importa'),
(False, 'Sí'),
(True, 'No'),)
codigo = forms.CharField(
label='Código',
required=False)
nombre = forms.CharField(
required=False)
direccion = forms.CharField(
label='Dirección',
widget=forms.TextInput(),
required=False)
departamento = forms.ModelChoiceField(
queryset=Departamento.objects.all(),
widget=forms.Select(attrs={'data-url': reverse_lazy('municipio_api_list')}),
required=False)
municipio = forms.ModelChoiceField(
queryset=Municipio.objects.all(),
required=False)
nivel = forms.ModelChoiceField(
queryset=EscNivel.objects.all(),
required=False)
sector = forms.ModelChoiceField(
queryset=EscSector.objects.all(),
required=False)
poblacion_min = forms.IntegerField(
label='Población mínima',
required=False)
poblacion_max = forms.IntegerField(
label='Población máxima',
required=False)
solicitud = forms.ChoiceField(
required=False,
choices=ESTADO_CHOICES)
solicitud_id = forms.IntegerField(
label='Número de solicitud',
min_value=1,
required=False)
validacion = forms.ChoiceField(
label='Validación',
required=False,
choices=ESTADO_CHOICES)
validacion_id = forms.IntegerField(
label='Número de validación',
min_value=1,
required=False)
equipamiento = forms.ChoiceField(
required=False,
choices=ESTADO_CHOICES)
equipamiento_id = forms.IntegerField(
label='Número de entrega',
min_value=1,
required=False)
cooperante_tpe = forms.ModelChoiceField(
label='Cooperante de equipamiento',
queryset=Cooperante.objects.all(),
required=False)
proyecto_tpe = forms.ModelChoiceField(
label='Proyecto de equipamiento',
queryset=Proyecto.objects.all(),
required=False)
class EscPoblacionForm(forms.ModelForm):
fecha = forms.DateField(
initial=date.today(),
widget=forms.TextInput(attrs={'class': 'datepicker'}))
class Meta:
model = EscPoblacion
fields = '__all__'
widgets = {
'escuela': forms.HiddenInput()
}
class ContactoForm(forms.ModelForm):
telefono = forms.CharField(
required=False,
widget=forms.NumberInput(attrs={'class': 'form-control', 'min': 0}))
mail = forms.EmailField(
required=False,
widget=forms.EmailInput(attrs={'class': 'form-control'}))
class Meta:
model = EscContacto
fields = '__all__'
widgets = {
'escuela': forms.HiddenInput()
}
class EscContactoTelefonoForm(forms.ModelForm):
class Meta:
model = EscContactoTelefono
fields = '__all__'
exclude = ['contacto']
class EscContactoTelefonoFormset(BaseFormSet):
def clean(self):
telefonos = []
if any(self.errors):
return
for form in self.forms:
if form.cleaned_data:
telefono = form.cleaned_data['telefono']
if telefono in telefonos:
raise forms.ValidationError('Los números no pueden repetirse')
telefonos.append(telefono)
ContactoTelefonoFormSet = inlineformset_factory(
EscContacto,
EscContactoTelefono,
fields='__all__',
extra=1,
can_delete=True)
ContactoMailFormSet = inlineformset_factory(
EscContacto,
EscContactoMail,
fields='__all__',
extra=1,
can_delete=True)
MailFormSet = formset_factory(EscContactoTelefonoFormset, formset=EscContactoTelefonoFormset)
class EscMatriculaForm(forms.ModelForm):
class Meta:
model = EscMatricula
fields = '__all__'
widgets = {
'escuela': forms.HiddenInput()
}
class EscRendimientoAcademicoForm(forms.ModelForm):
"""Formulario para crear un registro de `:class:EscRendimientoAcademico`
desde una escuela.
"""
class Meta:
model = EscRendimientoAcademico
fields = '__all__'
widgets = {
'escuela': forms.HiddenInput()
}
| [
"jinchuika@gmail.com"
] | jinchuika@gmail.com |
292d5b2771a74f723aa8855a241c3ca6fec6014f | d4432b0c95e5a25c489f825ba0f44e0ecd958669 | /lessons stormnet/lesson3/incapculation.py | 1ce2277132010aa31e15d8a698217dadd0fd5c79 | [] | no_license | romanannaev/python | 1c250c425224ab824492e4893edf786e35b14369 | e2a9015bfceeac940936758f84f0dfbf67897f1d | refs/heads/master | 2021-07-05T23:39:05.480997 | 2019-11-13T10:44:00 | 2019-11-13T10:44:00 | 175,044,359 | 2 | 0 | null | 2020-09-04T20:42:14 | 2019-03-11T16:50:24 | CSS | UTF-8 | Python | false | false | 280 | py | class Cat:
def __init__(self, name, female):
self.__name = name
self.female = female
def __get_mur(self):
print('mur-mur-mur')
mursik = Cat('mursik', 'kot')
# print(mursik.__name)
print(mursik.female)
# mursik.__get_mur()
mursik._Cat__get_mur()
| [
"romanannaev1992@gmail.com"
] | romanannaev1992@gmail.com |
13790a927b2c4f1b823d74cc945464fd1e8c337e | ca1ba1b871845bb4c17d1947eac5f59907b17f71 | /deepx/nn/__init__.py | 4dc6e4b4ecade811dde9ebfced579bf1f5262bb0 | [
"MIT"
] | permissive | adcengiz/beermind | 695808ab7af51a861e74cbdc7c406bea637e8f19 | 695fd1b2f6d376b6dd9f70ba2cd2c13e5efb9c47 | refs/heads/master | 2021-05-30T07:41:26.927864 | 2015-11-23T10:12:02 | 2015-11-23T10:12:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 94 | py | from model import ParameterModel
from lstm import LSTM, LSTMLayer
from softmax import Softmax
| [
"sharad.vikram@gmail.com"
] | sharad.vikram@gmail.com |
249b5bd914c4442573b5cc09828915b2858e7e59 | 759d0ef07c5473dfdef37454e34259771d16deab | /GJMorph/auxFuncs.py | f074ad51570cc1232767f0014f87d6980ac556ef | [
"CC-BY-4.0",
"LicenseRef-scancode-public-domain"
] | permissive | wachtlerlab/GJMorph | 3442ba85c08303238854f64b641c80197f7eb55f | a158e825ae3c20f94e1a6b12b2578aa1b3f42a8b | refs/heads/master | 2020-03-21T15:57:52.466404 | 2019-08-05T18:41:26 | 2019-08-05T18:41:26 | 138,742,834 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,867 | py | import numpy as np
# **********************************************************************************************************************
def resampleSWC(swcFile, resampleLength, mask=None, swcData=None, calculateBranchLens=False):
'''
Resample the SWC points to place points at every resamplelength along the central line of every segment. Radii are interpolated.
:param swcData: nx4 swc point data
:param resampleLength: length at with resampling is done.
:return: branchCenters, branchLens,
ndarray of shape (#pts, 7) with each row containing (node ID, node type, x, y, z, r, parent ID)
'''
if swcData is None:
swcData = np.loadtxt(swcFile)
inds = swcData[:, 0].tolist()
oldNewDict = {}
currentMax = 1
if mask is None:
mask = [True] * swcData.shape[0]
else:
assert len(mask) == swcData.shape[0], 'Supplied mask is invalid for ' + swcFile
resampledSWCData = []
getSegLen = lambda a, b: np.linalg.norm(a - b)
if calculateBranchLens:
branchCenters = []
branchLens = []
totalLen = 0
for pt in swcData:
if pt[6] < 0:
if mask[inds.index(int(pt[0]))]:
resampledSWCData.append([currentMax] + pt[1:].tolist())
oldNewDict[pt[0]] = currentMax
currentMax += 1
if (pt[6] > 0) and (int(pt[6]) in inds):
if mask[inds.index(int(pt[0]))]:
parentPt = swcData[inds.index(pt[6]), :]
segLen = getSegLen(pt[2:5], parentPt[2:5])
totalLen += segLen
currentParent = oldNewDict[pt[6]]
if segLen > resampleLength:
temp = pt[2:5] - parentPt[2:5]
distTemp = np.linalg.norm(temp)
unitDirection = temp / distTemp
radGrad = (pt[5] - parentPt[5]) / distTemp
for newPtsInd in range(1, int(np.floor(segLen / resampleLength)) + 1):
temp = [currentMax, pt[1]] + \
(parentPt[2:5] + newPtsInd * resampleLength * unitDirection).tolist()
temp.append(parentPt[5] + newPtsInd * radGrad * resampleLength)
if calculateBranchLens:
branchLens.append(resampleLength)
branchCenters.append(parentPt[2:5] + (newPtsInd - 0.5) * resampleLength * unitDirection)
temp.append(currentParent)
currentParent = currentMax
currentMax += 1
resampledSWCData.append(temp)
if calculateBranchLens:
branchCenters.append(0.5 * (pt[2:5] + np.array(resampledSWCData[-1][2:5])))
branchLens.append(np.linalg.norm(pt[2:5] - np.array(resampledSWCData[-1][2:5])))
resampledSWCData.append([currentMax] + pt[1:6].tolist() + [currentParent])
oldNewDict[pt[0]] = currentMax
currentMax += 1
else:
if calculateBranchLens:
branchCenters.append(0.5 * (pt[2:5] + parentPt[2:5]))
branchLens.append(segLen)
resampledSWCData.append([currentMax] + pt[1:6].tolist() + [currentParent])
oldNewDict[pt[0]] = currentMax
currentMax += 1
if calculateBranchLens:
return np.array(branchCenters), np.array(branchLens), np.array(resampledSWCData)
else:
return totalLen, np.array(resampledSWCData)
#***********************************************************************************************************************
def windowSWCPts(branchMeans, gridSize, translationIndicator=(0, 0, 0)):
"""
Custom internal function, use at your own risk!
Approximates points represented by rows of branchMeans to nearest voxel centers, where voxels are cubes of side
<gridSize> and are constructed so that there is a voxel with center at the origin. If translationIndicator is
specified, then the voxels are constructed in a way such that a voxel has a center at
- <translationIndicator> * <gridSize> * 0.5.
:param branchMeans: np.array of shape (nRows, 3)
:param gridSize: float
:param translationIndicator: three member iterable of floats
:return: voxelCenters, np.array of shape (nRows, 3), rounded to 6 digits
"""
offset = np.array(translationIndicator) * gridSize * 0.5
temp = branchMeans + offset
voxelCenters = np.array(np.round(temp / gridSize), dtype=np.int32) * gridSize - offset
return np.round(voxelCenters, 6)
#*********************************************************************************************************************** | [
"ajkumaraswamy@tutamail.com"
] | ajkumaraswamy@tutamail.com |
13b5621657465c1700dcd523a472a763187d94a1 | e60487a8f5aad5aab16e671dcd00f0e64379961b | /project/myVacations/vacay/migrations/0002_auto_20191218_2308.py | aef339aafb17c3650a5c1f96f30cfd82d74f60dd | [] | no_license | reenadangi/python | 4fde31737e5745bc5650d015e3fa4354ce9e87a9 | 568221ba417dda3be7f2ef1d2f393a7dea6ccb74 | refs/heads/master | 2021-08-18T08:25:40.774877 | 2021-03-27T22:20:17 | 2021-03-27T22:20:17 | 247,536,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | # Generated by Django 2.2.7 on 2019-12-19 05:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vacay', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='destination',
name='album_title',
field=models.TextField(),
),
]
| [
"reena.dangi@gmail.com"
] | reena.dangi@gmail.com |
eb590e4c84b9786a061331130dccde977b2d21b1 | fff54b01b46cef0bbc70a6469c88c01c82af5a57 | /programming/language/python3/python3-sip/actions.py | c8d1ffbb8fe07a364656fc2c7d076ef632a8a3fd | [] | no_license | LimeLinux/Packages | e51deae6c0d1406e31f06caa5aaa7749466bef0b | d492e075d8b051df68b98c315ad0628e33a8fac4 | refs/heads/master | 2021-01-11T12:37:22.150638 | 2018-08-30T18:24:32 | 2018-08-30T18:24:32 | 77,054,292 | 5 | 19 | null | 2018-02-02T17:24:06 | 2016-12-21T13:33:45 | Python | UTF-8 | Python | false | false | 737 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from pisi.actionsapi import shelltools
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import pythonmodules
from pisi.actionsapi import get
WorkDir = "sip-%s" % get.srcVERSION()
def setup():
shelltools.system("find . -type f -exec sed -i 's/Python.h/python3.6m\/Python.h/g' {} \;")
pythonmodules.run('configure.py CFLAGS="%s" CXXFLAGS="%s"' % (get.CFLAGS(), get.CXXFLAGS()), pyVer = "3")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
| [
"ergunsalman@hotmail.com"
] | ergunsalman@hotmail.com |
0730eda28f9a1c5ce0c14000cca5e3f67947326c | 1bfad01139237049eded6c42981ee9b4c09bb6de | /RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/pimsm/router/interface/crprange/crprange.py | 92777d7e1071268c9dcf7c21200e7017df67f42d | [
"MIT"
] | permissive | kakkotetsu/IxNetwork | 3a395c2b4de1488994a0cfe51bca36d21e4368a5 | f9fb614b51bb8988af035967991ad36702933274 | refs/heads/master | 2020-04-22T09:46:37.408010 | 2019-02-07T18:12:20 | 2019-02-07T18:12:20 | 170,284,084 | 0 | 0 | MIT | 2019-02-12T08:51:02 | 2019-02-12T08:51:01 | null | UTF-8 | Python | false | false | 11,915 | py |
# Copyright 1997 - 2018 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class CrpRange(Base):
"""The CrpRange class encapsulates a user managed crpRange node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the CrpRange property from a parent instance.
The internal properties list will be empty when the property is accessed and is populated from the server using the find method.
The internal properties list can be managed by the user by using the add and remove methods.
"""
_SDM_NAME = 'crpRange'
def __init__(self, parent):
super(CrpRange, self).__init__(parent)
@property
def AdvertisementHoldTime(self):
"""The time interval (in seconds) between two consecutive Candidate RP advertisements.
Returns:
number
"""
return self._get_attribute('advertisementHoldTime')
@AdvertisementHoldTime.setter
def AdvertisementHoldTime(self, value):
self._set_attribute('advertisementHoldTime', value)
@property
def BackOffInterval(self):
"""The back off time interval for the C-RP-Adv messages.
Returns:
number
"""
return self._get_attribute('backOffInterval')
@BackOffInterval.setter
def BackOffInterval(self, value):
self._set_attribute('backOffInterval', value)
@property
def CrpAddress(self):
"""Start address of the set of candidate RPs to be simulated.
Returns:
str
"""
return self._get_attribute('crpAddress')
@CrpAddress.setter
def CrpAddress(self, value):
self._set_attribute('crpAddress', value)
@property
def Enabled(self):
"""Enables/disables a Candidate RP range on the fly. The default is disabled.
Returns:
bool
"""
return self._get_attribute('enabled')
@Enabled.setter
def Enabled(self, value):
self._set_attribute('enabled', value)
@property
def GroupAddress(self):
"""Starting group address of the group range for which the candidate RP will advertise candidacy.
Returns:
str
"""
return self._get_attribute('groupAddress')
@GroupAddress.setter
def GroupAddress(self, value):
self._set_attribute('groupAddress', value)
@property
def GroupCount(self):
"""Number of groups in the range.
Returns:
number
"""
return self._get_attribute('groupCount')
@GroupCount.setter
def GroupCount(self, value):
self._set_attribute('groupCount', value)
@property
def GroupMaskLen(self):
"""Mask width (prefix length in bits) for the group range.
Returns:
number
"""
return self._get_attribute('groupMaskLen')
@GroupMaskLen.setter
def GroupMaskLen(self, value):
self._set_attribute('groupMaskLen', value)
@property
def MeshingType(self):
"""It indicates if the mappings for groups and RP addresses are Fully-Meshed or One-To-One.
Returns:
str(fullyMeshed|oneToOne)
"""
return self._get_attribute('meshingType')
@MeshingType.setter
def MeshingType(self, value):
self._set_attribute('meshingType', value)
@property
def PeriodicAdvertisementInterval(self):
"""Rate controlling variable indicating how many C-RP-Adv messages can be sent in the specified time interval.
Returns:
number
"""
return self._get_attribute('periodicAdvertisementInterval')
@PeriodicAdvertisementInterval.setter
def PeriodicAdvertisementInterval(self, value):
self._set_attribute('periodicAdvertisementInterval', value)
@property
def PriorityChangeInterval(self):
"""Time interval after which priority of all the RPs get changed, if priority type is incremental or random.
Returns:
number
"""
return self._get_attribute('priorityChangeInterval')
@PriorityChangeInterval.setter
def PriorityChangeInterval(self, value):
self._set_attribute('priorityChangeInterval', value)
@property
def PriorityType(self):
"""It indicates the type of priority to be held by the candidate RPs (CRPs). The options are Same, Incremental, and Random.
Returns:
str(same|incremental|random)
"""
return self._get_attribute('priorityType')
@PriorityType.setter
def PriorityType(self, value):
self._set_attribute('priorityType', value)
@property
def PriorityValue(self):
"""Value of priority field sent in candidate RP advertisement messages.
Returns:
number
"""
return self._get_attribute('priorityValue')
@PriorityValue.setter
def PriorityValue(self, value):
self._set_attribute('priorityValue', value)
@property
def RouterCount(self):
"""Total number of candidate RPs to be simulated starting from C-RP Address. A contiguous address range is used for this RP range simulation.
Returns:
number
"""
return self._get_attribute('routerCount')
@RouterCount.setter
def RouterCount(self, value):
self._set_attribute('routerCount', value)
@property
def TriggeredCrpMessageCount(self):
"""The number of times CRP advertisements is sent to the newly elected Bootstrap Router.
Returns:
number
"""
return self._get_attribute('triggeredCrpMessageCount')
@TriggeredCrpMessageCount.setter
def TriggeredCrpMessageCount(self, value):
self._set_attribute('triggeredCrpMessageCount', value)
def add(self, AdvertisementHoldTime=None, BackOffInterval=None, CrpAddress=None, Enabled=None, GroupAddress=None, GroupCount=None, GroupMaskLen=None, MeshingType=None, PeriodicAdvertisementInterval=None, PriorityChangeInterval=None, PriorityType=None, PriorityValue=None, RouterCount=None, TriggeredCrpMessageCount=None):
"""Adds a new crpRange node on the server and retrieves it in this instance.
Args:
AdvertisementHoldTime (number): The time interval (in seconds) between two consecutive Candidate RP advertisements.
BackOffInterval (number): The back off time interval for the C-RP-Adv messages.
CrpAddress (str): Start address of the set of candidate RPs to be simulated.
Enabled (bool): Enables/disables a Candidate RP range on the fly. The default is disabled.
GroupAddress (str): Starting group address of the group range for which the candidate RP will advertise candidacy.
GroupCount (number): Number of groups in the range.
GroupMaskLen (number): Mask width (prefix length in bits) for the group range.
MeshingType (str(fullyMeshed|oneToOne)): It indicates if the mappings for groups and RP addresses are Fully-Meshed or One-To-One.
PeriodicAdvertisementInterval (number): Rate controlling variable indicating how many C-RP-Adv messages can be sent in the specified time interval.
PriorityChangeInterval (number): Time interval after which priority of all the RPs get changed, if priority type is incremental or random.
PriorityType (str(same|incremental|random)): It indicates the type of priority to be held by the candidate RPs (CRPs). The options are Same, Incremental, and Random.
PriorityValue (number): Value of priority field sent in candidate RP advertisement messages.
RouterCount (number): Total number of candidate RPs to be simulated starting from C-RP Address. A contiguous address range is used for this RP range simulation.
TriggeredCrpMessageCount (number): The number of times CRP advertisements is sent to the newly elected Bootstrap Router.
Returns:
self: This instance with all currently retrieved crpRange data using find and the newly added crpRange data available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._create(locals())
def remove(self):
"""Deletes all the crpRange data in this instance from server.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, AdvertisementHoldTime=None, BackOffInterval=None, CrpAddress=None, Enabled=None, GroupAddress=None, GroupCount=None, GroupMaskLen=None, MeshingType=None, PeriodicAdvertisementInterval=None, PriorityChangeInterval=None, PriorityType=None, PriorityValue=None, RouterCount=None, TriggeredCrpMessageCount=None):
"""Finds and retrieves crpRange data from the server.
All named parameters support regex and can be used to selectively retrieve crpRange data from the server.
By default the find method takes no parameters and will retrieve all crpRange data from the server.
Args:
AdvertisementHoldTime (number): The time interval (in seconds) between two consecutive Candidate RP advertisements.
BackOffInterval (number): The back off time interval for the C-RP-Adv messages.
CrpAddress (str): Start address of the set of candidate RPs to be simulated.
Enabled (bool): Enables/disables a Candidate RP range on the fly. The default is disabled.
GroupAddress (str): Starting group address of the group range for which the candidate RP will advertise candidacy.
GroupCount (number): Number of groups in the range.
GroupMaskLen (number): Mask width (prefix length in bits) for the group range.
MeshingType (str(fullyMeshed|oneToOne)): It indicates if the mappings for groups and RP addresses are Fully-Meshed or One-To-One.
PeriodicAdvertisementInterval (number): Rate controlling variable indicating how many C-RP-Adv messages can be sent in the specified time interval.
PriorityChangeInterval (number): Time interval after which priority of all the RPs get changed, if priority type is incremental or random.
PriorityType (str(same|incremental|random)): It indicates the type of priority to be held by the candidate RPs (CRPs). The options are Same, Incremental, and Random.
PriorityValue (number): Value of priority field sent in candidate RP advertisement messages.
RouterCount (number): Total number of candidate RPs to be simulated starting from C-RP Address. A contiguous address range is used for this RP range simulation.
TriggeredCrpMessageCount (number): The number of times CRP advertisements is sent to the newly elected Bootstrap Router.
Returns:
self: This instance with matching crpRange data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of crpRange data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the crpRange data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| [
"hubert.gee@keysight.com"
] | hubert.gee@keysight.com |
e641aa7c71cf66fa224790c752cd052fd1758d64 | aee4c0839933a11d8ce3c485d06595202dd3cabd | /keras/utils/timed_threads.py | 794fd243c42b7d426baedaf569d92cbf771be5b0 | [
"Apache-2.0"
] | permissive | xiaoheilong3112/keras | fc3025a2f14838bf8416b2faed766cb43da62f9b | 8d5e9b2163ec9b7d9f70920d1c7992b6df6820ec | refs/heads/master | 2023-08-07T18:23:36.804563 | 2023-07-25T19:16:12 | 2023-07-25T19:16:48 | 137,238,629 | 1 | 0 | Apache-2.0 | 2023-07-26T05:22:44 | 2018-06-13T15:59:45 | Python | UTF-8 | Python | false | false | 5,379 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Thread utilities."""
import abc
import threading
from absl import logging
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.utils.TimedThread", v1=[])
class TimedThread:
"""Time-based interval Threads.
Runs a timed thread every x seconds. It can be used to run a threaded
function alongside model training or any other snippet of code.
Args:
interval: The interval, in seconds, to wait between calls to the
`on_interval` function.
**kwargs: additional args that are passed to `threading.Thread`. By
default, `Thread` is started as a `daemon` thread unless
overridden by the user in `kwargs`.
Examples:
```python
class TimedLogIterations(keras.utils.TimedThread):
def __init__(self, model, interval):
self.model = model
super().__init__(interval)
def on_interval(self):
# Logs Optimizer iterations every x seconds
try:
opt_iterations = self.model.optimizer.iterations.numpy()
print(f"Epoch: {epoch}, Optimizer Iterations: {opt_iterations}")
except Exception as e:
print(str(e)) # To prevent thread from getting killed
# `start` and `stop` the `TimerThread` manually. If the `on_interval` call
# requires access to `model` or other objects, override `__init__` method.
# Wrap it in a `try-except` to handle exceptions and `stop` the thread run.
timed_logs = TimedLogIterations(model=model, interval=5)
timed_logs.start()
try:
model.fit(...)
finally:
timed_logs.stop()
# Alternatively, run the `TimedThread` in a context manager
with TimedLogIterations(model=model, interval=5):
model.fit(...)
# If the timed thread instance needs access to callback events,
# subclass both `TimedThread` and `Callback`. Note that when calling
# `super`, they will have to called for each parent class if both of them
# have the method that needs to be run. Also, note that `Callback` has
# access to `model` as an attribute and need not be explictly provided.
class LogThreadCallback(
keras.utils.TimedThread, keras.callbacks.Callback
):
def __init__(self, interval):
self._epoch = 0
keras.utils.TimedThread.__init__(self, interval)
keras.callbacks.Callback.__init__(self)
def on_interval(self):
if self.epoch:
opt_iter = self.model.optimizer.iterations.numpy()
logging.info(f"Epoch: {self._epoch}, Opt Iteration: {opt_iter}")
def on_epoch_begin(self, epoch, logs=None):
self._epoch = epoch
with LogThreadCallback(interval=5) as thread_callback:
# It's required to pass `thread_callback` to also `callbacks` arg of
# `model.fit` to be triggered on callback events.
model.fit(..., callbacks=[thread_callback])
```
"""
def __init__(self, interval, **kwargs):
self.interval = interval
self.daemon = kwargs.pop("daemon", True)
self.thread_kwargs = kwargs
self.thread = None
self.thread_stop_event = None
def _call_on_interval(self):
# Runs indefinitely once thread is started
while not self.thread_stop_event.is_set():
self.on_interval()
self.thread_stop_event.wait(self.interval)
def start(self):
"""Creates and starts the thread run."""
if self.thread and self.thread.is_alive():
logging.warning("Thread is already running.")
return
self.thread = threading.Thread(
target=self._call_on_interval,
daemon=self.daemon,
**self.thread_kwargs
)
self.thread_stop_event = threading.Event()
self.thread.start()
def stop(self):
"""Stops the thread run."""
if self.thread_stop_event:
self.thread_stop_event.set()
def is_alive(self):
"""Returns True if thread is running. Otherwise returns False."""
if self.thread:
return self.thread.is_alive()
return False
def __enter__(self):
# Starts the thread in context manager
self.start()
return self
def __exit__(self, *args, **kwargs):
# Stops the thread run.
self.stop()
@abc.abstractmethod
def on_interval(self):
"""User-defined behavior that is called in the thread."""
raise NotImplementedError(
"Runs every x interval seconds. Needs to be "
"implemented in subclasses of `TimedThread`"
)
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
a3fc9fe94927859b1ed6817401054961cd3c0119 | 057eefa3b2c263ccc087ec6ce49fc5a0e513fb05 | /example/messenger.py | 6a099a36cf8cc451c354e77e75714b54aec6230d | [
"MIT"
] | permissive | antikytheraton/hotelBotOne | e01cb2136b04359dfb4b361c3d5d5dd84db87621 | b6e5a09ca068dc76ee5fe8c3af7534091efcdf27 | refs/heads/master | 2021-01-23T04:33:56.882349 | 2017-06-19T22:00:33 | 2017-06-19T22:00:33 | 92,931,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,518 | py | """# coding: utf-8"""
# -*- coding: utf-8 -*-
import os
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.insert(0,parentdir)
import json
from example.config import CONFIG
from fbmq import Attachment, Template, QuickReply, NotificationType
from example.fbpage import page
USER_SEQ = {}
@page.handle_optin
def received_authentication(event):
sender_id = event.sender_id
recipient_id = event.recipient_id
time_of_auth = event.timestamp
pass_through_param = event.optin.get("ref")
print("Received authentication for user %s and page %s with pass "
"through param '%s' at %s" % (sender_id, recipient_id, pass_through_param, time_of_auth))
page.send(sender_id, "Authentication successful")
@page.handle_echo
def received_echo(event):
message = event.message
message_id = message.get("mid")
app_id = message.get("app_id")
metadata = message.get("metadata")
print("page id : %s , %s" % (page.page_id, page.page_name))
print("Received echo for message %s and app %s with metadata %s" % (message_id, app_id, metadata))
@page.handle_message
def received_message(event):
sender_id = event.sender_id
recipient_id = event.recipient_id
time_of_message = event.timestamp
message = event.message
print("Received message for user %s and page %s at %s with message:"
% (sender_id, recipient_id, time_of_message))
print(message)
seq = message.get("seq", 0)
message_id = message.get("mid")
app_id = message.get("app_id")
metadata = message.get("metadata")
message_text = message.get("text")
message_attachments = message.get("attachments")
quick_reply = message.get("quick_reply")
seq_id = sender_id + ':' + recipient_id
if USER_SEQ.get(seq_id, -1) >= seq:
print("Ignore duplicated request")
return None
else:
USER_SEQ[seq_id] = seq
# if quick_reply:
# quick_reply_payload = quick_reply.get('payload')
# print("quick reply for message %s with payload %s" % (message_id, quick_reply_payload))
#
# page.send(sender_id, "Quick reply tapped")
if message_text:
send_message(sender_id, message_text)
elif message_attachments:
print('-------------------------------message_attachments-------------------------------------')
print('message with attachments')
#page.send(sender_id, "Message with attachment received")
page.send(sender_id, "Lo siento, por ahora solo reconozco texto")
page.send(sender_id, "Prueba con un hola ;)")
@page.handle_delivery
def received_delivery_confirmation(event):
delivery = event.delivery
message_ids = delivery.get("mids")
watermark = delivery.get("watermark")
if message_ids:
for message_id in message_ids:
print("Received delivery confirmation for message ID: %s" % message_id)
print("All message before %s were delivered." % watermark)
@page.handle_postback
def received_postback(event):
sender_id = event.sender_id
recipient_id = event.recipient_id
time_of_postback = event.timestamp
payload = event.postback_payload
print('-------------------------postback_payload----------------------------------------')
print(payload)
print("Received postback for user %s and page %s with payload '%s' at %s"
% (sender_id, recipient_id, payload, time_of_postback))
if payload == 'hoteles_playa':
page.send(sender_id, "Tengo dos promociones para ti")
page.send(sender_id, Template.Generic([
Template.GenericElement("Hotel Cancún",
subtitle="Reserva ahora tu hotel en Cancún",
item_url="http://www.mariachi.io/",
image_url=CONFIG['SERVER_URL'] + "/assets/hotel_cancun.jpg",
buttons=[
Template.ButtonPostBack("reservar", "hotel_cancun")
]),
Template.GenericElement("Hotel Cabo",
subtitle="Reserva ahora tu hotel en los Cabos",
item_url="http://www.mariachi.io/",
image_url=CONFIG['SERVER_URL'] + "/assets/hotel_cabo.jpg",
buttons=[
Template.ButtonPostBack("reservar", "hotel_cabo")
])
]))
page.send(sender_id, "La cuarta noche es gratis si reservas desde el chatbot!!!")
elif payload == 'hotel_cancun' or payload == 'hotel_cabo':
page.send(sender_id, "Reservación exitosa!!")
elif payload == 'hotel_ibis' or payload == 'hotel_francia':
page.send(sender_id, Template.Buttons("Selecciona tu método de pago", [
Template.ButtonWeb("Tarjeta crédito", "https://akrocard.com/wp-content/uploads/2015/05/tarjeta-pvc-chip-CPU.png"),
Template.ButtonWeb("Tarjeta débito", "https://akrocard.com/wp-content/uploads/2015/05/tarjeta-pvc-chip-CPU.png")
]))
elif payload == 'reservar_habitacion':
page.send(sender_id, "Por favor indícame separando con comas, ciudad, estado, fecha de llegada y fecha de partida iniciando por día, mes y año (01, 02, 17)")
elif payload == 'hoteles_cercanos':
page.send(sender_id, "Claro que sí, será un placer hospedarte.")
page.send(sender_id, "Compartir ubicacion?",
quick_replies=[QuickReply(title="compartir", payload="compartir"),
QuickReply(title="en otro momento", payload="en otro momento")],
metadata="DEVELOPER_DEFINED_METADATA")
elif payload == 'servicios_hotel':
page.send(sender_id, "Hola, tambien puedo ayudarte con servicio directo a tu habitación")
page.send(sender_id, "En que te podemos servir?")
else:
page.send(sender_id, "Postback called")
@page.handle_read
def received_message_read(event):
watermark = event.read.get("watermark")
seq = event.read.get("seq")
print("Received message read event for watermark %s and sequence number %s" % (watermark, seq))
@page.handle_account_linking
def received_account_link(event):
sender_id = event.sender_id
status = event.account_linking.get("status")
auth_code = event.account_linking.get("authorization_code")
print("Received account link event with for user %s with status %s and auth code %s "
% (sender_id, status, auth_code))
def send_message(recipient_id, text):
# If we receive a text message, check to see if it matches any special
# keywords and send back the corresponding example. Otherwise, just echo
# the text we received.
special_keywords = {
"Image": send_image,
"Gif": send_gif,
"Audio": send_audio,
"Video": send_video,
"File": send_file,
"Button": send_button,
"Generic": send_generic,
"Receipt": send_receipt,
"Quick reply": send_quick_reply,
"Read receipt": send_read_receipt,
"Typing on": send_typing_on,
"Typing off": send_typing_off,
"Account linking": send_account_linking,
"Hi": send_menu,
"Holi": send_menu,
"Hola": send_menu,
"Hola papu": send_menu,
"Hola bot": send_menu,
"Holi bot": send_menu,
"Quiero reservar una habitación": send_reservacion,
"Reservar habitación": send_reservacion,
"Quiero reservar una habitacion": send_reservacion,
"Reservar habitacion": send_reservacion,
"quiero reservar una habitacion": send_reservacion,
"reservar habitacion": send_reservacion,
"Aguascalientes, Aguascalientes, 04/04/17, 06/04/17": send_Aguascalientes,
"Aguascalientes": send_Aguascalientes,
"Quiero conocer hoteles cerca de mi": send_hoteles_cercanos,
"Quiero conocer hoteles cercanos a mi": send_hoteles_cercanos,
"Hoteles cercanos": send_hoteles_cercanos,
"Quisiera ordenar una hamburguesa con queso a la habitación 506": send_hambuerguesa,
"Quiero una hamburguesa con queso": send_hambuerguesa,
"Quiero una hamburguesa": send_hambuerguesa,
"Raymundo Castellanos, cargo a la cuenta de la habitación": send_solicitud,
"Raymundo Castellanos, cargo a la cuenta de la habitación 506": send_solicitud,
"Raymundo Castellanos con cargo a mi cuenta": send_solicitud,
"Raymundo Castellanos": send_solicitud,
"Raymundo":send_solicitud,
"Me gustaría agregar papas fritas a mi orden": send_papas,
"Me gustaría agregar papas fritas a la orden": send_papas,
"Con papas": send_papas,
"Con queso": send_papas,
"Ubicación": send_location,
"compartir": send_location,
"en otro momento": send_no_compartir
}
if text in special_keywords:
special_keywords[text](recipient_id)
else:
#page.send(recipient_id, text, callback=send_text_callback, notification_type=NotificationType.REGULAR)
page.send(recipient_id, "Aun no tengo redes neuronales :(")
page.send(recipient_id, "Pero puedo copiar lo que dices :)")
page.send(recipient_id, text, callback=send_text_callback, notification_type=NotificationType.REGULAR)
#page.send(recipient_id, ";P")
def send_text_callback(payload, response):
print("SEND CALLBACK")
def send_menu(recipient_id):
print('-----------------------------------------HOLI------SEND_MENU------------------------------------------------------')
page.send(recipient_id, "Hola, soy hotelbot y estoy para servirte ;)")
page.send(recipient_id, "Estos son los servicios con los que puedo ayudarte:")
page.send(recipient_id, Template.Generic([
Template.GenericElement("Promociones hoteles playa",
subtitle="Disfruta de unas vacaciones en el mar",
image_url=CONFIG['SERVER_URL'] + "/assets/playa5.jpg",
buttons=[
Template.ButtonPostBack("seleccionar", "hoteles_playa")
]),
Template.GenericElement("Promociones hoteles ciudad",
subtitle="Goza de un tour por la ciudad",
image_url=CONFIG['SERVER_URL'] + "/assets/city3.jpg",
buttons=[
Template.ButtonPostBack("seleccionar", "hoteles_ciudad")
]),
Template.GenericElement("Reservar una habitación",
subtitle="Descansa en una habitación de lujo",
image_url=CONFIG['SERVER_URL'] + "/assets/habitacion.jpg",
buttons=[
Template.ButtonPostBack("seleccionar", "reservar_habitacion")
]),
Template.GenericElement("Conocer hoteles cerca de ti",
subtitle="Encuentra un hotel a la medida",
image_url=CONFIG['SERVER_URL'] + "/assets/hotel.jpg",
buttons=[
Template.ButtonPostBack("seleccionar", "hoteles_cercanos")
]),
Template.GenericElement("Solicitar servicios dentro del hotel",
subtitle="Disfruta de nuestros servicios",
image_url=CONFIG['SERVER_URL'] + "/assets/servicioshotel.jpg",
buttons=[
Template.ButtonPostBack("seleccionar", "servicios_hotel")
])
]))
def send_reservacion(recipient):
print('----------------------------------RESERVAR----HABITACION--------------------------------------')
page.send(recipient, "Por favor indícame separando con comas, ciudad, estado, fecha de llegada y fecha de partida iniciando por día, mes y año (01, 02, 17)")
def send_Aguascalientes(recipient):
page.send(recipient, "¿En cuál de nuestros hoteles te gustaría hospedarte?")
page.send(recipient, Template.Generic([
Template.GenericElement("Hotel Ibis",
subtitle="Haz tu reservación en Hotel Ibis",
image_url=CONFIG['SERVER_URL'] + "/assets/hotel_ibis_aguascalientes.jpg",
buttons=[
Template.ButtonPostBack("seleccionar", "hotel_ibis")
]),
Template.GenericElement("Hotel Francia Aguascalientes",
subtitle="Haz tu reservación en Hotel Francia",
image_url=CONFIG['SERVER_URL'] + "/assets/hotel_francia_aguascalientes.jpg",
buttons=[
Template.ButtonPostBack("seleccionar", "hotel_francia")
])
]))
def send_hambuerguesa(recipient):
print('------------------------------SERVICIO---A----HABITACION----------------------------------------')
page.send(recipient, "Por favor indique nombre de la persona que se registró en esta habitación y método de pago.")
def send_solicitud(recipient):
page.send(recipient, "Su solicitud está siendo atendida por nuestro personal, en breve recibirá la orden en su habitación.")
def send_papas(recipient):
page.send(recipient, "Confirmado, su solicitud está siendo atendida por nuestro personal, en breve recibirá la orden en su habitación.")
page.send(recipient, "Es un placer servirle")
def send_location(recipient):
print('-----------------------------enviar---ubicacion-------------------------------------------------')
page.send(recipient, "Estos son los resultados que encontramos")
page.send(recipient, Template.Generic([
Template.GenericElement("Hotel San Francisco",
subtitle="Haz tu reservación",
image_url=CONFIG['SERVER_URL'] + "/assets/hotel_san_francisco.jpg",
buttons=[
Template.ButtonWeb("Ruta", "https://www.google.com.mx/maps/dir/19.2963254,-99.1855357/Hotel+San+Francisco+Centro+Hist%C3%B3rico,+Luis+Moya,+Cuauht%C3%A9moc,+Ciudad+de+M%C3%A9xico,+CDMX/@19.3615022,-99.2475501,12z/data=!3m1!4b1!4m9!4m8!1m1!4e1!1m5!1m1!1s0x85d1fa2819fbd205:0x459bfda439d1d2aa!2m2!1d-99.1449614!2d19.434211")
]),
Template.GenericElement("Grand Hotel Ciudad de Mexico",
subtitle="Haz tu reservación",
image_url=CONFIG['SERVER_URL'] + "/assets/hotel_ciudad_mexico.jpg",
buttons=[
Template.ButtonWeb("Ruta", "https://www.google.com.mx/maps/dir/19.2963254,-99.1855357/Gran+Hotel+Ciudad+de+M%C3%A9xico,+Av.+16+de+Septiembre+82,+Centro,+06000+Cuauht%C3%A9moc,+CDMX/@19.3610787,-99.246697,12z/data=!3m1!4b1!4m9!4m8!1m1!4e1!1m5!1m1!1s0x85ce0157191d1341:0xd6e6ab909104fb4c!2m2!1d{{longitude}}!2d{{latitude}}")
]),
Template.GenericElement("Hotel Isabel la Catolica",
subtitle="Haz tu reservación",
image_url=CONFIG['SERVER_URL'] + "/assets/hotel_isabel.jpg",
buttons=[
Template.ButtonWeb("Ruta", "https://www.google.com.mx/maps/dir/19.2961852,-99.1855789/Hotel+Isabel,+Isabel+la+Cat%C3%B3lica+63,+Centro+Hist%C3%B3rico,+Centro,+06000+Ciudad+de+M%C3%A9xico,+CDMX/@19.3593533,-99.2125291,13z/data=!3m1!4b1!4m9!4m8!1m1!4e1!1m5!1m1!1s0x85d1fed2ef819f19:0x65f5a7cded682f87!2m2!1d{{longitude}}!2d{{latitude}}")])
]))
def send_no_compartir(recipient):
print('----------------------------send_no_compartir------------------------------------------------')
page.send(recipient, "En que mas te puedo ayudar?")
def send_hoteles_cercanos(recipient):
print('------------------------------HOTELES----CERCANOS-----------------------------------------------')
page.send(recipient, "Claro que sí, será un placer hospedarte.")
page.send(recipient, "Compartir ubicacion?",
quick_replies=[QuickReply(title="compartir", payload="compartir"),
QuickReply(title="en otro momento", payload="en otro momento")],
metadata="DEVELOPER_DEFINED_METADATA")
# page.send(recipient, "What's your favorite movie genre?",
# quick_replies=[QuickReply(title="Action", payload="PICK_ACTION"),
# QuickReply(title="Comedy", payload="PICK_COMEDY")],
# metadata="DEVELOPER_DEFINED_METADATA")
def send_image(recipient):
print('00000000000000000000000000000000000000000000000000000000000000')
print(recipient)
print('00000000000000000000000000000000000000000000000000000000000000')
#page.send(recipient, Attachment.Image(CONFIG['SERVER_URL'] + "https://www.qa.dineroexpress.com.mx/img/137435869.jpg"))
page.send(recipient, Attachment.Image("https://www.qa.dineroexpress.com.mx/img/137435869.jpg"))
def send_gif(recipient):
page.send(recipient, Attachment.Image(CONFIG['SERVER_URL'] + "/assets/instagram_logo.gif"))
def send_audio(recipient):
page.send(recipient, Attachment.Audio(CONFIG['SERVER_URL'] + "/assets/sample.mp3"))
def send_video(recipient):
page.send(recipient, Attachment.Video(CONFIG['SERVER_URL'] + "/assets/allofus480.mov"))
def send_file(recipient):
#page.send(recipient, Attachment.File(CONFIG['SERVER_URL'] + "/assets/test.txt"))
page.send(recipient, Attachment.File("https://www.qa.dineroexpress.com.mx/img/137435869.jpg"))
def send_button(recipient):
"""
Shortcuts are supported
page.send(recipient, Template.Buttons("hello", [
{'type': 'web_url', 'title': 'Open Web URL', 'value': 'https://www.oculus.com/en-us/rift/'},
{'type': 'postback', 'title': 'tigger Postback', 'value': 'DEVELOPED_DEFINED_PAYLOAD'},
{'type': 'phone_number', 'title': 'Call Phone Number', 'value': '+16505551234'},
]))
"""
page.send(recipient, Template.Buttons("hello", [
Template.ButtonWeb("Open Web URL", "https://www.oculus.com/en-us/rift/"),
Template.ButtonPostBack("trigger Postback", "DEVELOPED_DEFINED_PAYLOAD"),
Template.ButtonPhoneNumber("Call Phone Number", "+16505551234")
]))
@page.callback(['DEVELOPED_DEFINED_PAYLOAD'])
def callback_clicked_button(payload, event):
print(payload, event)
def send_generic(recipient):
page.send(recipient, Template.Generic([
Template.GenericElement("rift",
subtitle="Next-generation virtual reality",
item_url="https://www.oculus.com/en-us/rift/",
image_url=CONFIG['SERVER_URL'] + "/assets/rift.png",
buttons=[
Template.ButtonWeb("Open Web URL", "https://www.oculus.com/en-us/rift/"),
Template.ButtonPostBack("tigger Postback", "DEVELOPED_DEFINED_PAYLOAD"),
Template.ButtonPhoneNumber("Call Phone Number", "+16505551234")
]),
Template.GenericElement("touch",
subtitle="Your Hands, Now in VR",
item_url="https://www.oculus.com/en-us/touch/",
image_url=CONFIG['SERVER_URL'] + "/assets/touch.png",
buttons=[
{'type': 'web_url', 'title': 'Open Web URL',
'value': 'https://www.oculus.com/en-us/rift/'},
{'type': 'postback', 'title': 'tigger Postback',
'value': 'DEVELOPED_DEFINED_PAYLOAD'},
{'type': 'phone_number', 'title': 'Call Phone Number', 'value': '+16505551234'},
])
]))
def send_receipt(recipient):
receipt_id = "order1357"
element = Template.ReceiptElement(title="Oculus Rift",
subtitle="Includes: headset, sensor, remote",
quantity=1,
price=599.00,
currency="USD",
image_url=CONFIG['SERVER_URL'] + "/assets/riftsq.png"
)
address = Template.ReceiptAddress(street_1="1 Hacker Way",
street_2="",
city="Menlo Park",
postal_code="94025",
state="CA",
country="US")
summary = Template.ReceiptSummary(subtotal=698.99,
shipping_cost=20.00,
total_tax=57.67,
total_cost=626.66)
adjustment = Template.ReceiptAdjustment(name="New Customer Discount", amount=-50)
page.send(recipient, Template.Receipt(recipient_name='Peter Chang',
order_number=receipt_id,
currency='USD',
payment_method='Visa 1234',
timestamp="1428444852",
elements=[element],
address=address,
summary=summary,
adjustments=[adjustment]))
def send_quick_reply(recipient):
"""
shortcuts are supported
page.send(recipient, "What's your favorite movie genre?",
quick_replies=[{'title': 'Action', 'payload': 'PICK_ACTION'},
{'title': 'Comedy', 'payload': 'PICK_COMEDY'}, ],
metadata="DEVELOPER_DEFINED_METADATA")
"""
page.send(recipient, "What's your favorite movie genre?",
quick_replies=[QuickReply(title="Action", payload="PICK_ACTION"),
QuickReply(title="Comedy", payload="PICK_COMEDY")],
metadata="DEVELOPER_DEFINED_METADATA")
@page.callback(['PICK_ACTION'])
def callback_picked_genre(payload, event):
print(payload, event)
def send_read_receipt(recipient):
page.mark_seen(recipient)
def send_typing_on(recipient):
page.typing_on(recipient)
def send_typing_off(recipient):
page.typing_off(recipient)
def send_account_linking(recipient):
page.send(recipient, Template.AccountLink(text="Welcome. Link your account.",
account_link_url=CONFIG['SERVER_URL'] + "/authorize",
account_unlink_button=True))
def send_text_message(recipient, text):
page.send(recipient, text, metadata="DEVELOPER_DEFINED_METADATA")
| [
"binauralvoice@gmail.com"
] | binauralvoice@gmail.com |
2eca7755846fedb58b2c1bacaa67edd161a96c9b | ac0894b411507bfd027696b6bf11b5e384ed68fc | /need-to-do/python3------download-problem--of--leetcode/845.longest-mountain-in-array.py | c66780c77e698c67fa30867bea3b0d5858a98fac | [] | no_license | mkzpd/leetcode-solution | 1d19554628c34c74012fa52582c225e6dccb345c | 60c9b218683bcdee86477a910c58ec702185c726 | refs/heads/master | 2020-05-31T05:56:48.985529 | 2019-09-20T09:10:49 | 2019-09-20T09:10:49 | 190,128,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,188 | py | #
# @lc app=leetcode id=845 lang=python3
#
# [845] Longest Mountain in Array
#
# https://leetcode.com/problems/longest-mountain-in-array/description/
#
# algorithms
# Medium (34.97%)
# Total Accepted: 21.6K
# Total Submissions: 61.6K
# Testcase Example: '[2,1,4,7,3,2,5]'
#
# Let's call any (contiguous) subarray B (of A) a mountain if the following
# properties hold:
#
#
# B.length >= 3
# There exists some 0 < i < B.length - 1 such that B[0] < B[1] < ... B[i-1] <
# B[i] > B[i+1] > ... > B[B.length - 1]
#
#
# (Note that B could be any subarray of A, including the entire array A.)
#
# Given an array A of integers, return the length of the longest mountain.
#
# Return 0 if there is no mountain.
#
# Example 1:
#
#
# Input: [2,1,4,7,3,2,5]
# Output: 5
# Explanation: The largest mountain is [1,4,7,3,2] which has length 5.
#
#
# Example 2:
#
#
# Input: [2,2,2]
# Output: 0
# Explanation: There is no mountain.
#
#
# Note:
#
#
# 0 <= A.length <= 10000
# 0 <= A[i] <= 10000
#
#
# Follow up:
#
#
# Can you solve it using only one pass?
# Can you solve it in O(1) space?
#
#
#
class Solution:
def longestMountain(self, A: List[int]) -> int:
| [
"sodgso262@gmail.com"
] | sodgso262@gmail.com |
5a1176e053d3166f99224f9c3611db49c2d3ac53 | d2821e3679389796d65b423ef10a8ce42a419d56 | /exampleproject/views.py | 1e40dad12f629d2fa64569ab2109cdee9ba6a59f | [
"BSD-3-Clause"
] | permissive | zerc/django_molder | e47750108213bbfec08cf6eb40eb69db6564e1ba | 23d0672eaa60b7bdace0252136bbf8ad9c7631ea | refs/heads/master | 2023-01-06T17:55:49.748944 | 2015-07-04T18:44:23 | 2015-07-04T18:44:23 | 35,910,107 | 0 | 0 | BSD-3-Clause | 2022-12-26T19:44:00 | 2015-05-19T21:41:41 | Python | UTF-8 | Python | false | false | 348 | py | # coding: utf-8
from django.views.generic.base import TemplateView
from forms import FormOne
class IndexPage(TemplateView):
template_name = 'index.html'
def get_context_data(self, *args, **kwargs):
context = super(IndexPage, self).get_context_data(*args, **kwargs)
context['form_one'] = FormOne()
return context
| [
"zero13cool@yandex.ru"
] | zero13cool@yandex.ru |
faa63d893e6d3b0c367cad9ca3a15d27cb896d32 | 633b695a03e789f6aa644c7bec7280367a9252a8 | /mfem_gallery/ex17p.py | ef12ea9d15cb62c3612b27a57a2b84c3ec6b1dba | [] | no_license | tnakaicode/PlotGallery | 3d831d3245a4a51e87f48bd2053b5ef82cf66b87 | 5c01e5d6e2425dbd17593cb5ecc973982f491732 | refs/heads/master | 2023-08-16T22:54:38.416509 | 2023-08-03T04:23:21 | 2023-08-03T04:23:21 | 238,610,688 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 12,703 | py | '''
MFEM example 17p
How to run:
mpirun -np 4 python <arguments>
Example of arguments:
ex17p.py -m beam-tri.mesh
ex17p.py -m beam-quad.mesh
ex17p.py -m beam-tet.mesh
ex17p.py -m beam-hex.mesh
ex17p.py -m beam-quad.mesh -rs 2 -rp 2 -o 3 -elast
ex17p.py -m beam-quad.mesh -rs 2 -rp 3 -o 2 -a 1 -k 1
ex17p.py -m beam-hex.mesh -rs 2 -rp 1 -o 2
'''
import sys
from mfem.common.arg_parser import ArgParser
from os.path import expanduser, join
import numpy as np
from mfem import path
import mfem.par as mfem
from mpi4py import MPI
num_procs = MPI.COMM_WORLD.size
myid = MPI.COMM_WORLD.rank
class InitDisplacement(mfem.VectorPyCoefficient):
def __init__(self, dim):
self.dim = dim
mfem.VectorPyCoefficient.__init__(self, dim)
def EvalValue(self, x):
u = [0.0]*dim
u[-1] = -0.2*x[0]
return tuple(u)
class StressCoefficient(mfem.PyCoefficientBase):
def __init__(self, lambda_, mu_, si=0, sj=0):
super(StressCoefficient, self).__init__(0)
self.lam = lambda_ # coefficient
self.mu = mu_ # coefficient
self.si = si ; self.sj = sj # component
self.u = None # displacement GridFunction
self.grad = mfem.DenseMatrix()
def SetComponent(self, i, j):
self.si = i
self.sj = j
def SetDisplacement(self, u):
self.u = u
def Eval(self, T, ip):
si, sj = self.si, self.sj
L = self.lam.Eval(T, ip)
M = self.mu.Eval(T, ip)
self.u.GetVectorGradient(T, self.grad)
if (self.si == self.sj):
div_u = self.grad.Trace();
return L * div_u + 2 * M * self.grad[si, si]
else:
return M * (self.grad[si,sj] + self.grad[sj,si]);
class VisMan(object):
def __init__(self, vishost, visport):
self.host = vishost
self.port = visport
self.socks = []
self.output = None
self.win_x = 0
self.win_y = 0
self.win_w = 200 # window width
self.win_h = 150 # window height
self.stride_x = self.win_w
self.stride_y = self.win_h + 20
self.win_nx = 4 # number of windows in a row
self.sid = 0
def NewWindow(self):
self.socks.append(mfem.socketstream(self.host, self.port))
self.output = self.socks[-1]
self.output.precision(8)
self.socks
self.sid = self.sid + 1
def CloseConnection(self):
self.socks = []
del self.output
self.output = None
def PositionWindow(self):
if self.output is None: return
sid = self.sid
command = ("window_geometry " +
str(self.win_x + self.stride_x*(sid % self.win_nx)) +
' ' +
str(self.win_y + self.stride_y*(sid/self.win_nx)) +
' ' + str(self.win_w) + ' ' + str(self.win_h))
self.output.send_text(command)
self.output.flush()
def send_solution(self, mesh, x):
if self.output is None: return
self.output.send_solution(mesh, x)
def send_text(self, x):
if self.output is None: return
self.output.send_text(x)
def flush(self):
if self.output is None: return
self.output.flush()
parser = ArgParser(description='Ex17')
parser.add_argument('-m', '--mesh',
default = 'beam-tri.mesh',
action = 'store', type = str,
help='Mesh file to use.')
parser.add_argument('-rs', '--refine-serial',
action = 'store', default = -1, type=int,
help = "Number of times to refine the mesh uniformly before parallel")
parser.add_argument('-rp', '--refine-parallel',
action = 'store', default = 1, type=int,
help = "Number of times to refine the mesh uniformly after parallel")
parser.add_argument('-o', '--order',
action = 'store', default = 1, type=int,
help = "Finite element order (polynomial degree)");
parser.add_argument('-a', '--alpha',
action = 'store', default = -1.0, type=float,
help = '\n'.join(["One of the two DG penalty parameters, typically +1/-1."
" See the documentation of class DGElasticityIntegrator."]))
parser.add_argument('-k', '--kappa',
action = 'store', default = -1.0, type=float,
help = '\n'.join(["One of the two DG penalty parameters, should be positve."
" Negative values are replaced with (order+1)^2."]))
parser.add_argument('-elast', '--amg-for-elasticity',
action = 'store_true',
help = 'Use the special AMG elasticity solver (GM/LN approaches)',
dest = 'amg_elast', default = False)
parser.add_argument('-sys', '--amg-for-systems',
action = 'store_false',
help = 'Use standard AMG for systems (unknown approach).',
dest = 'amg_elast', default = True)
parser.add_argument('-vis', '--visualization',
action = 'store_true',
help='Enable GLVis visualization')
args = parser.parse_args()
ser_ref_levels = args.refine_serial
par_ref_levels = args.refine_parallel
order = args.order
alpha = args.alpha
kappa = args.kappa
amg_elast = args.amg_elast
visualization = args.visualization
if (kappa < 0):
kappa = (order+1.)*(order+1.)
args.kappa = kappa
if (myid == 0): parser.print_options(args)
# 2. Read the mesh from the given mesh file.
meshfile =expanduser(join(path, 'data', args.mesh))
mesh = mfem.Mesh(meshfile, 1,1)
dim = mesh.Dimension()
if (mesh.attributes.Max() < 2 or
mesh.bdr_attributes.Max() < 2):
if (myid == 0):
print("\n".join(["Input mesh should have at least two materials and ", "two boundary attributes! (See schematic in ex17.cpp)\n"]))
sys.exit()
# 3. Refine the mesh to increase the resolution.
if ser_ref_levels < 0:
ser_ref_levels = int(np.floor(np.log(5000./mesh.GetNE())/np.log(2.)/dim))
for x in range(ser_ref_levels):
mesh.UniformRefinement();
# Since NURBS meshes do not support DG integrators, we convert them to
# regular polynomial mesh of the specified (solution) order.
if (mesh.NURBSext): mesh.SetCurvature(order)
pmesh = mfem.ParMesh(MPI.COMM_WORLD, mesh)
del mesh
for x in range(par_ref_levels):
pmesh.UniformRefinement();
# 4. Define a DG vector finite element space on the mesh. Here, we use
# Gauss-Lobatto nodal basis because it gives rise to a sparser matrix
# compared to the default Gauss-Legendre nodal basis.
fec = mfem.DG_FECollection(order, dim, mfem.BasisType.GaussLobatto)
fespace = mfem.ParFiniteElementSpace(pmesh, fec, dim, mfem.Ordering.byVDIM)
glob_size = fespace.GlobalTrueVSize()
if (myid == 0):
print('Number of finite element unknowns: '+ str(glob_size))
print('Assembling:')
# 5. In this example, the Dirichlet boundary conditions are defined by
# marking boundary attributes 1 and 2 in the marker Array 'dir_bdr'.
# These b.c. are imposed weakly, by adding the appropriate boundary
# integrators over the marked 'dir_bdr' to the bilinear and linear forms.
# With this DG formulation, there are no essential boundary conditions.
ess_tdof_list = mfem.intArray()
dir_bdr = mfem.intArray(pmesh.bdr_attributes.Max())
dir_bdr.Assign(0)
dir_bdr[0] = 1 # boundary attribute 1 is Dirichlet
dir_bdr[1] = 1 # boundary attribute 2 is Dirichlet
# 6. Define the DG solution vector 'x' as a finite element grid function
# corresponding to fespace. Initialize 'x' using the 'InitDisplacement'
# function.
x = mfem.ParGridFunction(fespace)
init_x = InitDisplacement(dim)
x.ProjectCoefficient(init_x)
# 7. Set up the Lame constants for the two materials. They are defined as
# piece-wise (with respect to the element attributes) constant
# coefficients, i.e. type PWConstCoefficient.
lamb = mfem.Vector(pmesh.attributes.Max()) # lambda is not possible in python
lamb.Assign(1.0)
lamb[0] = 50.
lambda_c = mfem.PWConstCoefficient(lamb)
mu = mfem.Vector(pmesh.attributes.Max())
mu.Assign(1.0);
mu[0] = 50.0
mu_c = mfem.PWConstCoefficient(mu)
# 8. Set up the linear form b(.) which corresponds to the right-hand side of
# the FEM linear system. In this example, the linear form b(.) consists
# only of the terms responsible for imposing weakly the Dirichlet
# boundary conditions, over the attributes marked in 'dir_bdr'. The
# values for the Dirichlet boundary condition are taken from the
# VectorFunctionCoefficient 'x_init' which in turn is based on the
# function 'InitDisplacement'.
b = mfem.ParLinearForm(fespace)
if (myid == 0): print('r.h.s ...')
integrator = mfem.DGElasticityDirichletLFIntegrator(init_x, lambda_c,
mu_c, alpha, kappa)
b.AddBdrFaceIntegrator(integrator , dir_bdr)
b.Assemble()
# 9. Set up the bilinear form a(.,.) on the DG finite element space
# corresponding to the linear elasticity integrator with coefficients
# lambda and mu as defined above. The additional interior face integrator
# ensures the weak continuity of the displacement field. The additional
# boundary face integrator works together with the boundary integrator
# added to the linear form b(.) to impose weakly the Dirichlet boundary
# conditions.
a = mfem.ParBilinearForm(fespace)
a.AddDomainIntegrator(mfem.ElasticityIntegrator(lambda_c, mu_c))
a.AddInteriorFaceIntegrator(
mfem.DGElasticityIntegrator(lambda_c, mu_c, alpha, kappa))
a.AddBdrFaceIntegrator(
mfem.DGElasticityIntegrator(lambda_c, mu_c, alpha, kappa),dir_bdr)
if (myid == 0): print('matrix ...')
a.Assemble()
A = mfem.HypreParMatrix()
B = mfem.Vector()
X = mfem.Vector()
a.FormLinearSystem(ess_tdof_list, x, b, A, X, B);
if (myid == 0): print('done.')
# 11. Define a simple symmetric Gauss-Seidel preconditioner and use it to
# solve the system Ax=b with PCG for the symmetric formulation, or GMRES
# for the non-symmetric.
rtol = 1e-6
amg = mfem.HypreBoomerAMG(A)
if (amg_elast):
amg.SetElasticityOptions(fespace)
else:
amg.SetSystemsOptions(dim)
if (alpha == -1.0):
solver = mfem.CGSolver(A.GetComm())
else:
solver = mfem.GMRESSolver(A.GetComm())
solver.SetKDim(50)
solver.SetRelTol(rtol)
solver.SetMaxIter(500)
solver.SetPrintLevel(1)
solver.SetOperator(A)
solver.SetPreconditioner(amg)
solver.Mult(B, X)
# 12. Recover the solution as a finite element grid function 'x'.
a.RecoverFEMSolution(X, b, x)
# 13. Use the DG solution space as the mesh nodal space. This allows us to
# save the displaced mesh as a curved DG mesh.
pmesh.SetNodalFESpace(fespace)
reference_nodes = mfem.Vector()
if (visualization):
reference_nodes.Assign(pmesh.GetNodes())
# 14. Save the displaced mesh and minus the solution (which gives the
# backward displacements to the reference mesh). This output can be
# viewed later using GLVis
nodes = pmesh.GetNodes()
nodes += x
x.Neg()
smyid = '{:0>6d}'.format(myid)
mesh_name = "mesh."+smyid
sol_name = "sol." +smyid
pmesh.Print(mesh_name, 8)
x.Save(sol_name, 8)
# 15. Visualization: send data by socket to a GLVis server.
if (visualization):
vis = VisMan("localhost", 19916)
glvis_keys = "Rjlc" if (dim < 3) else "c"
vis.NewWindow()
vis.send_text("parallel " + str(pmesh.GetNRanks()) + " " +
str(pmesh.GetMyRank()))
vis.send_solution(pmesh, x)
vis.send_text("keys " + glvis_keys)
vis.send_text("window_title 'Deformed configuration'")
vis.send_text("plot_caption 'Backward displacement'")
vis.PositionWindow()
vis.CloseConnection()
c = "xyz"
scalar_dg_space = mfem.ParFiniteElementSpace(pmesh, fec)
stress = mfem.ParGridFunction(scalar_dg_space)
stress_c = StressCoefficient(lambda_c, mu_c)
pmesh.GetNodes().Assign(reference_nodes)
x.Neg()
stress_c.SetDisplacement(x)
def make_plot(si, sj):
stress_c.SetComponent(si, sj);
stress.ProjectCoefficient(stress_c);
vis.NewWindow()
vis.send_text("parallel " + str(pmesh.GetNRanks()) + " " +
str(pmesh.GetMyRank()))
vis.send_solution(pmesh, stress)
vis.send_text("keys " + glvis_keys)
vis.send_text("window_title |Stress" + c[si] + c[sj] + "|")
vis.PositionWindow()
vis.CloseConnection()
for si in range(dim):
for jj in range(dim-si):
make_plot(si, si+jj)
| [
"tnakaicode@gmail.com"
] | tnakaicode@gmail.com |
820bd4c1439ca1bc08e2dc3a6a6c047e9b8c0462 | ee6afed750c9edce0064680bba6a023c5c67db73 | /JusTalkENV/lib/python2.7/encodings/aliases.py | e3230cf8468099a2a911002b62b17f3d10b08f52 | [] | no_license | khoa408/JusTalk | 664caaa8d13880b4a59402817c8a90028766dcaf | e2bd537c487a342cc22597f63a32ae32edca6429 | refs/heads/master | 2022-12-11T02:42:38.709326 | 2019-12-02T09:34:32 | 2019-12-02T09:34:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | /usr/local/Cellar/python@2/2.7.15_1/Frameworks/Python.framework/Versions/2.7/lib/python2.7/encodings/aliases.py | [
"khoa.d.tran97@gmail.com"
] | khoa.d.tran97@gmail.com |
0793c4f5593285e6f03140c10385fccb1b2d9643 | 62766deea531d0b89b86a53e6f51b94fd2a88f23 | /AtCoder/ABC/014/b.py | fb73f462150a40d9afa30384818cff98f81943e2 | [
"MIT"
] | permissive | ttyskg/ProgrammingCompetition | 53620b07317ae5cbd1ee06272e573e3682ac15f3 | 885c5a1be228ae7ba9f00b3d63521c9ff7d21608 | refs/heads/master | 2023-08-18T08:38:33.068168 | 2023-08-15T04:28:13 | 2023-08-15T04:28:13 | 183,425,786 | 0 | 0 | MIT | 2023-08-15T04:28:14 | 2019-04-25T12:02:53 | Python | UTF-8 | Python | false | false | 292 | py | import sys
def main():
input = sys.stdin.readline
n, X = map(int, input().split())
A = list(map(int, input().split()))[::-1]
ans = 0
for i, a in enumerate(A):
if X & 2**(n-1-i):
ans += a
return ans
if __name__ == '__main__':
print(main())
| [
"tatsu100311@gmail.com"
] | tatsu100311@gmail.com |
4e33a21ad3f9248831981123ced61cd4eff6d9b8 | f57907e356079871a8b6d9292dfdb99572098f15 | /DM_int_report_drw/hierarchicalClustering.py | 25f33b0fa60de38268260bfb0c6c5029cfce9558 | [] | no_license | drwiner/WesternCorpus | 421c6b7e2d142b9d46eacd59062e42c403e33be7 | aaac96260c9e976ac030bf38c038a36d48e994ff | refs/heads/master | 2021-01-11T16:56:56.664567 | 2017-11-15T00:49:36 | 2017-11-15T00:49:36 | 79,701,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,955 | py | import numpy as np
from itertools import product
from clockdeco import clock
import math
# import collections
# NamedPoint = collections.namedtuple('NamedPoint', ['id', 'point'])
class NamedPoint:
def __init__(self, int_id, point):
self.int_id = int_id
self.point = point
def __hash__(self):
return hash(self.int_id)
def __repr__(self):
return 'named_point: ' + str(self.int_id) + ' ' + str(self.point)
def __add__(self, other):
return self.point + other.point
class Cluster:
def __init__(self, int_id, points):
self.int_id = int_id
self.points = [points]
def absorb(self, cluster):
self.points.extend(cluster.points)
def __hash__(self):
return hash(self.int_id)
def __len__(self):
return len(self.points)
def __getitem__(self, pos):
return self.points[pos]
def __repr__(self):
n = 'cluster: ' + str(self.int_id) + ' --- '
return n + ' '.join(str(p) for p in self.points)
def dotDistance(s1, s2):
return math.sqrt(np.dot(s1.point - s2.point, s1.point - s2.point))
def nopointDist(a1, a2):
return math.sqrt(np.dot(a1 - a2, a1 - a2))
# @clock
def singleLink(S1, S2):
# S1 and S2 are clusters, possibly with just 1 entity
# each entity has a point
S_prod = set(product(S1, S2))
return min(dotDistance(s1,s2) for s1,s2 in S_prod)
# @clock
def completeLink(S1, S2):
S_prod = set(product(S1, S2))
return max(dotDistance(s1,s2) for s1, s2 in S_prod)
# @clock
def meanLink(S1, S2):
a1 = (1/len(S1))*sum(s.point for s in S1)
a2 = (1/len(S2))*sum(s.point for s in S2)
return nopointDist(a1, a2)
points = set()
def initialLoading(text_name):
# initial loading
C1 = open('C1.txt')
# points = set()
for line in C1:
split_line = line.split()
p = np.array([float(i) for i in split_line[1:]])
points.add(NamedPoint(int(split_line[0]), p))
singleLink_clusters = set()
completeLink_clusters = set()
meanLink_clusters = set()
def initClusters():
# initialization:
for point in points:
p = [point]
singleLink_clusters.add(Cluster(point.int_id, p))
completeLink_clusters.add(Cluster(point.int_id, p))
meanLink_clusters.add(Cluster(point.int_id, p))
@clock
def h_clustering(clusters, k, dist_method):
clusts = set(clusters)
while len(clusts) > k:
pairwise_clusters = set(product(clusts, clusts))
arg_mins = None
m = float("inf")
for c1, c2 in pairwise_clusters:
if c1 == c2:
continue
value = dist_method(c1, c2)
if value < m:
m = value
arg_mins = (c1, c2)
if arg_mins is None:
print('wtf')
c1, c2 = arg_mins
if len(c1) < len(c2):
c2.absorb(c1)
clusts = clusts - {c1}
else:
c1.absorb(c2)
clusts = clusts - {c2}
return clusts
# def output(k):
# k = 4
# sl_clusts = h_clustering(singleLink_clusters, k, singleLink)
# print('Shortest Link:\n')
# for clust in sl_clusts:
# print(clust)
# for point in clust:
# print(point.int_id,point.point)
# print('\n')
# print('Complete Link:\n')
# cl_clusts = h_clustering(completeLink_clusters, k, completeLink)
# for clust in cl_clusts:
# print(clust)
# for point in clust:
# print(point.int_id, point.point)
# print('\n')
# print('Mean Link:\n')
# ml_clusts = h_clustering(meanLink_clusters, k, meanLink)
# for clust in ml_clusts:
# print(clust)
# for point in clust:
# print(point.int_id, point.point)
# print('\n')
#
# import matplotlib.pyplot as plt
#
# colours = ['r', 'g', 'y', 'b']
# s1 = list(sl_clusts)
#
# for i in range(k):
# x = [p.point[0] for p in s1[i]]
# y = [p.point[1] for p in s1[i]]
# plt.scatter(x, y, c=colours[i])
#
# plt.show()
#
# s1 = list(cl_clusts)
#
# for i in range(k):
# x = [p.point[0] for p in s1[i]]
# y = [p.point[1] for p in s1[i]]
# plt.scatter(x, y, c=colours[i])
#
# plt.show()
#
# s1 = list(ml_clusts)
#
# for i in range(k):
# x = [p.point[0] for p in s1[i]]
# y = [p.point[1] for p in s1[i]]
# plt.scatter(x, y, c=colours[i])
#
# plt.show()
# initialLoading(45)
# initClusters()
# output(4) | [
"drwiner131@gmail.com"
] | drwiner131@gmail.com |
bc3b56a85820774d1ac5eb6f3e9241bd004eb841 | 693567f042c6bd93ecdda41cb5db81c55ccf3158 | /List/reverce a list.py | a12c0980bd0bc7958284b52cdbd7adfc033583fc | [] | no_license | supriyo-pal/Python-Practice | 5806e0045ebfeb04856246a245430e2ab7921ba9 | 2025369f0d23d603ad27eaff149500137e98dbcf | refs/heads/main | 2023-01-25T05:31:58.404283 | 2020-12-09T19:08:22 | 2020-12-09T19:08:22 | 317,021,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 2 23:52:19 2020
@author: Supriyo
"""
list1=[87,56,89,65,45,23]
def Reverce(list1):
return [ele for ele in reversed(list1)]
print(Reverce(list1)) | [
"noreply@github.com"
] | supriyo-pal.noreply@github.com |
e56e255fb03e55b4a999dc58e4c69021594129ee | 2fd4de2f0820f186c735f0619bce2a0318bbfc38 | /examples/demo.py | d97e0a9e2b393666a9c4f11e75bd4607ebbe1e27 | [
"MIT"
] | permissive | SunYanCN/AppZoo | e90b778fefdaf1a440c3fd40d078b5396e4e3f06 | 91b04cc75fcc5f70ae5819e98233ea9146c1f001 | refs/heads/master | 2023-08-22T05:41:22.175291 | 2021-10-12T13:37:21 | 2021-10-12T13:37:21 | 359,024,301 | 0 | 0 | MIT | 2021-09-05T12:24:47 | 2021-04-18T02:12:40 | Python | UTF-8 | Python | false | false | 1,424 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : tql-App.
# @File : demo
# @Time : 2019-11-13 15:44
# @Author : yuanjie
# @Email : yuanjie@xiaomi.com
# @Software : PyCharm
# @Description :
import asyncio
from datetime import datetime, time, timedelta
from sanic import Sanic
from sanic_scheduler import SanicScheduler, task
app = Sanic()
scheduler = SanicScheduler(app)
import values
import os
d = {}
@task(timedelta(seconds=3))
def hello(app):
"""Runs the function every 3 seconds."""
import time
d['a'] = time.ctime()
print(os.popen("ls").read())
# values.set_value(time.ctime())
print("Hello, {0}".format(app), datetime.now())
@task(timedelta(hours=1), time(hour=1, minute=30))
async def foo_bar(_):
"""Runs the function every 1 hours after 1:30."""
print("Foo", datetime.now())
await asyncio.sleep(1)
print("Bar")
@task(timedelta(minutes=2), timedelta(seconds=10))
def baz(_):
"""Runs the function every 2 minutes after 10 seconds."""
print("Baz", datetime.now())
@task(start=timedelta(seconds=10))
def another(_):
"""Run the function after 10 seconds once."""
print("another", datetime.now())
from appzoo import App
app_ = App()
app_.app = app
app_.add_route('/', lambda **kwargs: d) # values.get_value()
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000, debug=True, workers=4)
| [
"313303303@qq.com"
] | 313303303@qq.com |
6282b45debae15af7a0b552e1dc0444245d5ceea | 4659f8758c5204ff27a14dd4352dc63f564c1136 | /my_library/db/models/association.py | fe2af630bbbd484a70bb7278b14199ee2e95ada1 | [
"MIT"
] | permissive | mplanchard/webservice_template | 71823dcd2b98fa93aa2145011ee5a7b820c25f77 | fe3e865909d56d8c010f55e08dc6faf6bf4f8ef2 | refs/heads/master | 2021-03-27T11:12:06.470951 | 2018-03-29T02:34:51 | 2018-03-29T02:34:51 | 123,056,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | """Association tables."""
from __future__ import absolute_import, unicode_literals
from sqlalchemy import Column, Integer, ForeignKey, Table
from .base import Base
books_authors = Table(
'books_authors',
Base.metadata,
Column('author_id', Integer, ForeignKey('authors.id')),
Column('book_id', Integer, ForeignKey('books.id')),
)
| [
"msplanchard@gmail.com"
] | msplanchard@gmail.com |
b07ff3984943ff1650437d7ffc167855e241a701 | aa2157e595b89c3512857e41fee16e8b11d7a657 | /Fresher Lavel Logical Programms/Program to Sort latter in Alphabetic Order.py | f3f2d0d298eec4eb8600504fb0d9ea8dc6664df5 | [] | no_license | biswaranjanroul/Python-Logical-Programms | efee6276eea3eafab9ee6b6e7e0910b715a504d1 | 152dcecf2ecae7891a11769f250a4dc8d9d6b15f | refs/heads/master | 2022-12-15T07:37:45.978218 | 2020-09-17T13:24:53 | 2020-09-17T13:24:53 | 296,326,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 79 | py | str =input("Enter a string:")
b = sorted(str)
print(b)
for i in b:
print(i) | [
"biswaranjanroul2@gmail.com"
] | biswaranjanroul2@gmail.com |
60b2bbc2b50ed814b1dc6531018652e7245713ea | a57207602b4da08b0433b1b513c25788476460a1 | /bac_tasks/pipelines/annotation.py | 2774c9388a75ffc343c636eaef3ca6f1864bdcd9 | [] | no_license | Antonior26/BaC | 2e8a1bab08a6e70eb9390947850db2a0c5bca81f | 53cf4dcdbc4e56157dbf9e661bcec71cd89d0289 | refs/heads/master | 2020-04-17T15:42:29.871904 | 2019-12-15T18:04:48 | 2019-12-15T18:04:48 | 166,709,586 | 1 | 0 | null | 2020-04-09T20:55:53 | 2019-01-20T21:08:13 | Python | UTF-8 | Python | false | false | 2,940 | py | import json
import os
from django.conf import settings
from bac_tasks.pipelines.base import JobFailedException, PipelineComponent
class Annotation(PipelineComponent):
_name = 'ANNOTATION'
_workflow = {
"stages":
[
{"name": "call_features_CDS_glimmer3",
"glimmer3_parameters": {
"min_training_len": "2000"
}},
{"name": "annotate_proteins_similarity",
"similarity_parameters": {
"annotate_hypothetical_only": "1"
}},
{"name": "resolve_overlapping_features",
"resolve_overlapping_features_parameters": {}
}
]
}
def _run(self):
name = self.sample.identifier
assembly = self.sample.assembly
output_dir = self.result_folder
sp = self.sample.isolate.species.name
rast_create_genome = settings.ANNOTATION_PATHS['rast_create_genome']
rast_process_genome = settings.ANNOTATION_PATHS['rast_process_genome']
rast_export_genome = settings.ANNOTATION_PATHS['rast_export_genome']
os.makedirs(output_dir, exist_ok=True)
output_dir_exports = os.path.join(output_dir, 'exports')
os.makedirs(output_dir_exports, exist_ok=True)
workflow = os.path.join(output_dir, name + '.workflow')
fdw = open(workflow, 'w')
json.dump(self._workflow, fdw)
fdw.close()
gto = os.path.join(output_dir, name + '.gto')
gto2 = os.path.join(output_dir, name + '.gto2')
genebank = os.path.join(output_dir_exports, name + '.gbk')
gff = os.path.join(output_dir_exports, name + '.gff')
embl = os.path.join(output_dir_exports, name + '.embl')
rna_fasta = os.path.join(output_dir_exports, name + '.rna.fasta')
cds_fasta = os.path.join(output_dir_exports, name + '.cds.fasta')
protein_fasta = os.path.join(output_dir_exports, name + '.proteins.fasta')
self.pipeline_step(rast_create_genome, '-o', gto, '--contig', assembly, '--genetic-code', '11', '--genome-id',
name, '--domain', 'Bacteria', '--scientific-name', sp)
self.pipeline_step(rast_process_genome, '-o', gto2, '-i', gto, '--workflow', workflow)
self.pipeline_step(rast_export_genome, 'gff', '-i', gto2, '-o', gff)
self.pipeline_step(rast_export_genome, 'protein_fasta', '-i', gto2, '-o', protein_fasta)
self.pipeline_step(rast_export_genome, 'feature_dna', '--feature-type', 'rna', '-i', gto2, '-o', rna_fasta)
self.pipeline_step(rast_export_genome, 'feature_dna', '--feature-type', 'CDS', '-i', gto2, '-o', cds_fasta)
return self._results
def post_run(self):
if self._results is None:
JobFailedException('Please execute job first using execute method')
self.sample.rast_folder = self._results
self.sample.save()
| [
"antonio.rueda-martin@genomicsengland.co.uk"
] | antonio.rueda-martin@genomicsengland.co.uk |
b927a00f944c15635399243be39c7fa5201b5b7e | fd2908f80e6a20d1a2c9e7f39bc18ce53e625e9f | /esp-2-Compton/petrillo/plotcasetta.py | a2e14dea094cada89642c7795f05320d0304d77e | [] | no_license | Gattocrucco/lab4mpr | a79ecdcb293923188fcef637c7566bbb3904af05 | c613bb8e57c2943123697789f5b600483a2b4ff6 | refs/heads/master | 2021-09-15T08:15:19.129544 | 2018-05-28T23:20:47 | 2018-05-28T23:20:47 | 111,024,426 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 834 | py | import histo
import numpy as np
from matplotlib import pyplot as plt
files = ['../dati/histo-16feb-ang45.dat', '../dati/histo-16feb-ang45-casetta.dat']
titles = ['spettro a 45°' , 'spettro a 45° con schermaggio']
fig = plt.figure('plotcasetta', figsize=[6.88, 2.93])
fig.clf()
fig.set_tight_layout(True)
for i in range(len(files)):
counts = np.loadtxt(files[i], unpack=True, dtype='u2')
edges = np.arange(2 ** 13 + 1)
rebin = 32
counts = histo.partial_sum(counts, rebin)
edges = edges[::rebin]
ax = fig.add_subplot(1, 2, i + 1)
histo.bar_line(edges, counts, ax=ax, color='black')
if i == 0:
ax.set_ylabel('conteggio [(%d$\\cdot$digit)$^{-1}$]' % (rebin,))
ax.set_xlabel('canale ADC [digit]')
ax.set_title(titles[i])
ax.grid(linestyle=':')
fig.show()
| [
"info@giacomopetrillo.com"
] | info@giacomopetrillo.com |
006804836e6670346e1afab4e376895f84afc4eb | 9c84f9d5dc15a7aa5d1caf05b6ae5ea83e39be3a | /python_stack/django/django_full_stack/Wall/Wall/urls.py | 0e971e97eb63117ee550a9dea13036688cd31b57 | [] | no_license | tomnguyen103/Coding_Dojo | 0fc4007296feb775b4bcd6ee98f66286b2786adb | ec46b866fc7e58a37d07b63b26b38d19eaeb96f6 | refs/heads/master | 2022-12-28T03:47:57.172540 | 2020-06-15T23:03:50 | 2020-06-15T23:03:50 | 212,214,976 | 1 | 0 | null | 2022-12-11T18:36:51 | 2019-10-01T22:59:37 | Python | UTF-8 | Python | false | false | 816 | py | """Wall URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
# from django.contrib import admin
urlpatterns = [
# url(r'^admin/', admin.site.urls),
url(r'^', include('apps.main.urls')),
]
| [
"huuthong103@gmail.com"
] | huuthong103@gmail.com |
1a2c142d1f04cf5c4e7320b05bf0b4af8adb51c2 | 165eb709370407093bd6ba22e466f6070ea2123c | /examples/adwords/v201601/campaign_management/add_campaign_labels.py | 78d075acd7bee801719188e315c8140782ec59b5 | [
"Apache-2.0"
] | permissive | pawankydv/googleads-python-lib | 92ed86d74a09a91fd3c95d6471a8c23adb0de440 | c0f8ce6c4acfe88ce8f913a4f0e0e92b548e1022 | refs/heads/master | 2022-06-29T05:26:19.996211 | 2016-02-23T20:07:02 | 2016-02-23T20:07:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,012 | py | #!/usr/bin/python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds a label to multiple campaigns.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
from googleads import adwords
CAMPAIGN_ID1 = 'INSERT_FIRST_CAMPAIGN_ID_HERE'
CAMPAIGN_ID2 = 'INSERT_SECOND_CAMPAIGN_ID_HERE'
LABEL_ID = 'INSERT_LABEL_ID_HERE'
def main(client, campaign_id1, campaign_id2, label_id):
# Initialize appropriate service.
campaign_service = client.GetService('CampaignService', version='v201601')
operations = [
{
'operator': 'ADD',
'operand': {
'campaignId': campaign_id1,
'labelId': label_id,
}
},
{
'operator': 'ADD',
'operand': {
'campaignId': campaign_id2,
'labelId': label_id,
}
}
]
result = campaign_service.mutateLabel(operations)
# Display results.
for label in result['value']:
print ('CampaignLabel with campaignId \'%s\' and labelId \'%s\' was added.'
% (label['campaignId'], label['labelId']))
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, CAMPAIGN_ID1, CAMPAIGN_ID2, LABEL_ID)
| [
"msaniscalchi@google.com"
] | msaniscalchi@google.com |
bedb27f13c256128040486c37f3d346febca1cca | 610349599d32d7fc5ddae5dcb202836ca8be50aa | /blog/migrations/0013_auto_20200917_0441.py | 25848d5bebfae32506aece8caf48f7f56de261d3 | [] | no_license | reetjakhar09/blogs | e3d9d14c01096e4a50474b5a7f562bea7b655a76 | d0e17a8dd3761aaa08a59c466820040e05dc300a | refs/heads/master | 2022-12-20T05:03:50.350408 | 2020-09-29T16:40:17 | 2020-09-29T16:40:17 | 299,676,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,453 | py | # Generated by Django 2.2.16 on 2020-09-17 04:41
from django.db import migrations, models
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('blog', '0012_auto_20200916_1140'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=80)),
('email', models.EmailField(max_length=254)),
('body', models.TextField()),
('created_on', models.DateTimeField(auto_now_add=True)),
('active', models.BooleanField(default=False)),
],
options={
'ordering': ['created_on'],
},
),
migrations.AlterField(
model_name='post',
name='slug',
field=django_extensions.db.fields.AutoSlugField(blank=True, editable=False, populate_from='title'),
),
migrations.AlterField(
model_name='tag',
name='slug',
field=django_extensions.db.fields.AutoSlugField(blank=True, editable=False, populate_from='title'),
),
migrations.AddField(
model_name='post',
name='comments',
field=models.ManyToManyField(to='blog.Comment'),
),
]
| [
"test@gmail.com"
] | test@gmail.com |
b4c033e2dc7ef615aa538096e2591b4a6e59ef60 | fe0017ae33385d7a2857d0aa39fa8861b40c8a88 | /env/lib/python3.8/site-packages/pandas/core/arrays/_mixins.py | b11a853c8d39e5b6959a994d7637c842b006251d | [] | no_license | enriquemoncerrat/frasesback | eec60cc7f078f9d24d155713ca8aa86f401c61bf | e2c77f839c77f54e08a2f0930880cf423e66165b | refs/heads/main | 2023-01-03T23:21:05.968846 | 2020-10-18T21:20:27 | 2020-10-18T21:20:27 | 305,198,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,460 | py | from typing import Any, Sequence, Tuple, TypeVar
import numpy as np
from pandas.compat.numpy import function as nv
from pandas.core.algorithms import take, unique
from pandas.core.arrays.base import ExtensionArray
from pandas.errors import AbstractMethodError
from pandas.util._decorators import cache_readonly
_T = TypeVar("_T", bound="NDArrayBackedExtensionArray")
class NDArrayBackedExtensionArray(ExtensionArray):
"""
ExtensionArray that is backed by a single NumPy ndarray.
"""
_ndarray: np.ndarray
def _from_backing_data(self: _T, arr: np.ndarray) -> _T:
"""
Construct a new ExtensionArray `new_array` with `arr` as its _ndarray.
This should round-trip:
self == self._from_backing_data(self._ndarray)
"""
raise AbstractMethodError(self)
# ------------------------------------------------------------------------
def take(
self: _T,
indices: Sequence[int],
allow_fill: bool = False,
fill_value: Any = None,
) -> _T:
if allow_fill:
fill_value = self._validate_fill_value(fill_value)
new_data = take(
self._ndarray, indices, allow_fill=allow_fill, fill_value=fill_value,
)
return self._from_backing_data(new_data)
def _validate_fill_value(self, fill_value):
"""
If a fill_value is passed to `take` convert it to a representation
suitable for self._ndarray, raising ValueError if this is not possible.
Parameters
----------
fill_value : object
Returns
-------
fill_value : native representation
Raises
------
ValueError
"""
raise AbstractMethodError(self)
# ------------------------------------------------------------------------
# TODO: make this a cache_readonly; for that to work we need to remove
# the _index_data kludge in libreduction
@property
def shape(self) -> Tuple[int, ...]:
return self._ndarray.shape
def __len__(self) -> int:
return self.shape[0]
@cache_readonly
def ndim(self) -> int:
return len(self.shape)
@cache_readonly
def size(self) -> int:
return np.prod(self.shape)
@cache_readonly
def nbytes(self) -> int:
return self._ndarray.nbytes
def reshape(self: _T, *args, **kwargs) -> _T:
new_data = self._ndarray.reshape(*args, **kwargs)
return self._from_backing_data(new_data)
def ravel(self: _T, *args, **kwargs) -> _T:
new_data = self._ndarray.ravel(*args, **kwargs)
return self._from_backing_data(new_data)
@property
def T(self: _T) -> _T:
new_data = self._ndarray.T
return self._from_backing_data(new_data)
# ------------------------------------------------------------------------
def copy(self: _T) -> _T:
new_data = self._ndarray.copy()
return self._from_backing_data(new_data)
def repeat(self: _T, repeats, axis=None) -> _T:
"""
Repeat elements of an array.
See Also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(tuple(), dict(axis=axis))
new_data = self._ndarray.repeat(repeats, axis=axis)
return self._from_backing_data(new_data)
def unique(self: _T) -> _T:
new_data = unique(self._ndarray)
return self._from_backing_data(new_data)
| [
"enriquemoncerrat@gmail.com"
] | enriquemoncerrat@gmail.com |
42600e834fe46701bc796af4b95355eacce3dda5 | 2614a671d011b002dbd4a3ddd80ced6e6c3d5b98 | /neural_sp/datasets/asr.py | 3d1b6c0f607a6e93328f1ef6843eee9c5bc962ee | [
"Apache-2.0"
] | permissive | zh794390558/neural_sp | ff1832bf7321e3ef23eeb3e85261354922edd18a | a0d9f5a70242fad3235ae5b12e4e792075a5f91d | refs/heads/master | 2023-01-28T18:26:24.187109 | 2020-09-09T05:15:15 | 2020-09-09T05:15:15 | 294,017,550 | 0 | 0 | null | 2020-09-09T06:00:19 | 2020-09-09T06:00:19 | null | UTF-8 | Python | false | false | 20,720 | py | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Base class for loading dataset for ASR.
In this class, all data will be loaded at each step.
You can use the multi-GPU version.
"""
import codecs
import kaldiio
import numpy as np
import os
import pandas as pd
import random
from neural_sp.datasets.token_converter.character import Char2idx
from neural_sp.datasets.token_converter.character import Idx2char
from neural_sp.datasets.token_converter.phone import Idx2phone
from neural_sp.datasets.token_converter.phone import Phone2idx
from neural_sp.datasets.token_converter.word import Idx2word
from neural_sp.datasets.token_converter.word import Word2idx
from neural_sp.datasets.token_converter.wordpiece import Idx2wp
from neural_sp.datasets.token_converter.wordpiece import Wp2idx
random.seed(1)
np.random.seed(1)
def count_vocab_size(dict_path):
vocab_count = 1 # for <blank>
with codecs.open(dict_path, 'r', encoding='utf-8') as f:
for line in f:
if line.strip() != '':
vocab_count += 1
return vocab_count
class Dataset(object):
def __init__(self, tsv_path, dict_path,
unit, batch_size, n_epochs=1e10,
is_test=False, min_n_frames=40, max_n_frames=2000,
shuffle_bucket=False, sort_by='utt_id',
short2long=False, sort_stop_epoch=1000, dynamic_batching=False,
corpus='',
tsv_path_sub1=False, tsv_path_sub2=False,
dict_path_sub1=False, dict_path_sub2=False, nlsyms=False,
unit_sub1=False, unit_sub2=False,
wp_model=False, wp_model_sub1=False, wp_model_sub2=False,
ctc=False, ctc_sub1=False, ctc_sub2=False,
subsample_factor=1, subsample_factor_sub1=1, subsample_factor_sub2=1,
discourse_aware=False, first_n_utterances=-1):
"""A class for loading dataset.
Args:
tsv_path (str): path to the dataset tsv file
dict_path (str): path to the dictionary
unit (str): word/wp/char/phone/word_char
batch_size (int): size of mini-batch
nlsyms (str): path to the non-linguistic symbols file
n_epochs (int): total epochs for training.
is_test (bool):
min_n_frames (int): exclude utterances shorter than this value
max_n_frames (int): exclude utterances longer than this value
shuffle_bucket (bool): gather the similar length of utterances and shuffle them
sort_by (str): sort all utterances in the ascending order
input: sort by input length
output: sort by output length
shuffle: shuffle all utterances
short2long (bool): sort utterances in the descending order
sort_stop_epoch (int): After sort_stop_epoch, training will revert
back to a random order
dynamic_batching (bool): change batch size dynamically in training
ctc (bool):
subsample_factor (int):
wp_model (): path to the word-piece model for sentencepiece
corpus (str): name of corpus
discourse_aware (bool):
first_n_utterances (int): evaluate the first N utterances
"""
super(Dataset, self).__init__()
self.epoch = 0
self.iteration = 0
self.offset = 0
self.set = os.path.basename(tsv_path).split('.')[0]
self.is_test = is_test
self.unit = unit
self.unit_sub1 = unit_sub1
self.batch_size = batch_size
self.n_epochs = n_epochs
self.shuffle_bucket = shuffle_bucket
if shuffle_bucket:
assert sort_by in ['input', 'output']
self.sort_stop_epoch = sort_stop_epoch
self.sort_by = sort_by
assert sort_by in ['input', 'output', 'shuffle', 'utt_id']
self.dynamic_batching = dynamic_batching
self.corpus = corpus
self.discourse_aware = discourse_aware
if discourse_aware:
assert not is_test
self.vocab = count_vocab_size(dict_path)
self.eos = 2
self.pad = 3
# NOTE: reserved in advance
self.idx2token = []
self.token2idx = []
# Set index converter
if unit in ['word', 'word_char']:
self.idx2token += [Idx2word(dict_path)]
self.token2idx += [Word2idx(dict_path, word_char_mix=(unit == 'word_char'))]
elif unit == 'wp':
self.idx2token += [Idx2wp(dict_path, wp_model)]
self.token2idx += [Wp2idx(dict_path, wp_model)]
elif unit in ['char']:
self.idx2token += [Idx2char(dict_path)]
self.token2idx += [Char2idx(dict_path, nlsyms=nlsyms)]
elif 'phone' in unit:
self.idx2token += [Idx2phone(dict_path)]
self.token2idx += [Phone2idx(dict_path)]
else:
raise ValueError(unit)
for i in range(1, 3):
dict_path_sub = locals()['dict_path_sub' + str(i)]
wp_model_sub = locals()['wp_model_sub' + str(i)]
unit_sub = locals()['unit_sub' + str(i)]
if dict_path_sub:
setattr(self, 'vocab_sub' + str(i), count_vocab_size(dict_path_sub))
# Set index converter
if unit_sub:
if unit_sub == 'wp':
self.idx2token += [Idx2wp(dict_path_sub, wp_model_sub)]
self.token2idx += [Wp2idx(dict_path_sub, wp_model_sub)]
elif unit_sub == 'char':
self.idx2token += [Idx2char(dict_path_sub)]
self.token2idx += [Char2idx(dict_path_sub, nlsyms=nlsyms)]
elif 'phone' in unit_sub:
self.idx2token += [Idx2phone(dict_path_sub)]
self.token2idx += [Phone2idx(dict_path_sub)]
else:
raise ValueError(unit_sub)
else:
setattr(self, 'vocab_sub' + str(i), -1)
# Load dataset tsv file
df = pd.read_csv(tsv_path, encoding='utf-8', delimiter='\t')
df = df.loc[:, ['utt_id', 'speaker', 'feat_path',
'xlen', 'xdim', 'text', 'token_id', 'ylen', 'ydim']]
for i in range(1, 3):
if locals()['tsv_path_sub' + str(i)]:
df_sub = pd.read_csv(locals()['tsv_path_sub' + str(i)], encoding='utf-8', delimiter='\t')
df_sub = df_sub.loc[:, ['utt_id', 'speaker', 'feat_path',
'xlen', 'xdim', 'text', 'token_id', 'ylen', 'ydim']]
setattr(self, 'df_sub' + str(i), df_sub)
else:
setattr(self, 'df_sub' + str(i), None)
self.input_dim = kaldiio.load_mat(df['feat_path'][0]).shape[-1]
# Remove inappropriate utterances
if is_test or discourse_aware:
print('Original utterance num: %d' % len(df))
n_utts = len(df)
df = df[df.apply(lambda x: x['ylen'] > 0, axis=1)]
print('Removed %d empty utterances' % (n_utts - len(df)))
if first_n_utterances > 0:
n_utts = len(df)
df = df[df.apply(lambda x: x['ylen'] > 0, axis=1)]
df = df.truncate(before=0, after=first_n_utterances - 1)
print('Select first %d utterances' % len(df))
else:
print('Original utterance num: %d' % len(df))
n_utts = len(df)
df = df[df.apply(lambda x: min_n_frames <= x[
'xlen'] <= max_n_frames, axis=1)]
df = df[df.apply(lambda x: x['ylen'] > 0, axis=1)]
print('Removed %d utterances (threshold)' % (n_utts - len(df)))
if ctc and subsample_factor > 1:
n_utts = len(df)
df = df[df.apply(lambda x: x['ylen'] <= (x['xlen'] // subsample_factor), axis=1)]
print('Removed %d utterances (for CTC)' % (n_utts - len(df)))
for i in range(1, 3):
df_sub = getattr(self, 'df_sub' + str(i))
ctc_sub = locals()['ctc_sub' + str(i)]
subsample_factor_sub = locals()['subsample_factor_sub' + str(i)]
if df_sub is not None:
if ctc_sub and subsample_factor_sub > 1:
df_sub = df_sub[df_sub.apply(
lambda x: x['ylen'] <= (x['xlen'] // subsample_factor_sub), axis=1)]
if len(df) != len(df_sub):
n_utts = len(df)
df = df.drop(df.index.difference(df_sub.index))
print('Removed %d utterances (for CTC, sub%d)' % (n_utts - len(df), i))
for j in range(1, i + 1):
setattr(self, 'df_sub' + str(j),
getattr(self, 'df_sub' + str(j)).drop(getattr(self, 'df_sub' + str(j)).index.difference(df.index)))
if corpus == 'swbd':
# 1. serialize
# df['session'] = df['speaker'].apply(lambda x: str(x).split('-')[0])
# 2. not serialize
df['session'] = df['speaker'].apply(lambda x: str(x))
else:
df['session'] = df['speaker'].apply(lambda x: str(x))
# Sort tsv records
if discourse_aware:
# Sort by onset (start time)
df = df.assign(prev_utt='')
df = df.assign(line_no=list(range(len(df))))
if corpus == 'swbd':
df['onset'] = df['utt_id'].apply(lambda x: int(x.split('_')[-1].split('-')[0]))
elif corpus == 'csj':
df['onset'] = df['utt_id'].apply(lambda x: int(x.split('_')[1]))
elif corpus == 'tedlium2':
df['onset'] = df['utt_id'].apply(lambda x: int(x.split('-')[-2]))
else:
raise NotImplementedError(corpus)
df = df.sort_values(by=['session', 'onset'], ascending=True)
# Extract previous utterances
groups = df.groupby('session').groups
df['prev_utt'] = df.apply(
lambda x: [df.loc[i, 'line_no']
for i in groups[x['session']] if df.loc[i, 'onset'] < x['onset']], axis=1)
df['n_prev_utt'] = df.apply(lambda x: len(x['prev_utt']), axis=1)
df['n_utt_in_session'] = df.apply(
lambda x: len([i for i in groups[x['session']]]), axis=1)
df = df.sort_values(by=['n_utt_in_session'], ascending=short2long)
# NOTE: this is used only when LM is trained with seliarize: true
# if is_test and corpus == 'swbd':
# # Sort by onset
# df['onset'] = df['utt_id'].apply(lambda x: int(x.split('_')[-1].split('-')[0]))
# df = df.sort_values(by=['session', 'onset'], ascending=True)
elif not is_test:
if sort_by == 'input':
df = df.sort_values(by=['xlen'], ascending=short2long)
elif sort_by == 'output':
df = df.sort_values(by=['ylen'], ascending=short2long)
elif sort_by == 'shuffle':
df = df.reindex(np.random.permutation(self.df.index))
# Re-indexing
if discourse_aware:
self.df = df
for i in range(1, 3):
if getattr(self, 'df_sub' + str(i)) is not None:
setattr(self, 'df_sub' + str(i),
getattr(self, 'df_sub' + str(i)).reindex(df.index))
else:
self.df = df.reset_index()
for i in range(1, 3):
if getattr(self, 'df_sub' + str(i)) is not None:
setattr(self, 'df_sub' + str(i),
getattr(self, 'df_sub' + str(i)).reindex(df.index).reset_index())
if discourse_aware:
self.df_indices_buckets = self.discourse_bucketing(batch_size)
elif shuffle_bucket:
self.df_indices_buckets = self.shuffle_bucketing(batch_size)
else:
self.df_indices = list(self.df.index)
def __len__(self):
return len(self.df)
@property
def epoch_detail(self):
"""Percentage of the current epoch."""
return self.offset / len(self)
@property
def n_frames(self):
return self.df['xlen'].sum()
def reset(self, batch_size=None):
"""Reset data counter and offset.
Args:
batch_size (int): size of mini-batch
"""
if batch_size is None:
batch_size = self.batch_size
if self.discourse_aware:
self.df_indices_buckets = self.discourse_bucketing(batch_size)
elif self.shuffle_bucket:
self.df_indices_buckets = self.shuffle_bucketing(batch_size)
else:
self.df_indices = list(self.df.index)
self.offset = 0
def __iter__(self):
return self
def next(self, batch_size):
return self.__next__(batch_size)
def __next__(self, batch_size=None):
"""Generate each mini-batch.
Args:
batch_size (int): size of mini-batch
Returns:
mini_batch (dict):
is_new_epoch (bool): flag for the end of the current epoch
"""
if batch_size is None:
batch_size = self.batch_size
if self.epoch >= self.n_epochs:
raise StopIteration
indices, is_new_epoch = self.sample_index(batch_size)
mini_batch = self.__getitem__(indices)
if is_new_epoch:
# shuffle the whole data
if self.epoch + 1 == self.sort_stop_epoch:
self.sort_by = 'shuffle'
self.df = self.df.reindex(np.random.permutation(self.df.index))
for i in range(1, 3):
if getattr(self, 'df_sub' + str(i)) is not None:
setattr(self, 'df_sub' + str(i),
getattr(self, 'df_sub' + str(i)).reindex(self.df.index).reset_index())
# Re-indexing
self.df = self.df.reset_index()
self.reset()
self.epoch += 1
return mini_batch, is_new_epoch
def sample_index(self, batch_size):
"""Sample data indices of mini-batch.
Args:
batch_size (int): size of mini-batch
Returns:
indices (np.ndarray): indices of dataframe in the current mini-batch
is_new_epoch (bool): flag for the end of the current epoch
"""
is_new_epoch = False
if self.discourse_aware:
indices = self.df_indices_buckets.pop(0)
self.offset += len(indices)
is_new_epoch = (len(self.df_indices_buckets) == 0)
elif self.shuffle_bucket:
indices = self.df_indices_buckets.pop(0)
self.offset += len(indices)
is_new_epoch = (len(self.df_indices_buckets) == 0)
# Shuffle uttrances in mini-batch
indices = random.sample(indices, len(indices))
else:
if len(self.df_indices) > batch_size:
# Change batch size dynamically
min_xlen = self.df[self.offset:self.offset + 1]['xlen'].values[0]
min_ylen = self.df[self.offset:self.offset + 1]['ylen'].values[0]
batch_size = self.set_batch_size(batch_size, min_xlen, min_ylen)
indices = list(self.df[self.offset:self.offset + batch_size].index)
self.offset += len(indices)
else:
# Last mini-batch
indices = self.df_indices[:]
self.offset = len(self)
is_new_epoch = True
# Change batch size dynamically
min_xlen = self.df[indices[0]:indices[0] + 1]['xlen'].values[0]
min_ylen = self.df[indices[0]:indices[0] + 1]['ylen'].values[0]
batch_size = self.set_batch_size(batch_size, min_xlen, min_ylen)
# Remove the rest
indices = indices[:batch_size]
# Shuffle uttrances in mini-batch
indices = random.sample(indices, len(indices))
for i in indices:
self.df_indices.remove(i)
return indices, is_new_epoch
def __getitem__(self, indices):
"""Create mini-batch per step.
Args:
indices (np.ndarray): indices of dataframe in the current mini-batch
Returns:
mini_batch_dict (dict):
xs (list): input data of size `[T, input_dim]`
xlens (list): lengths of xs
ys (list): reference labels in the main task of size `[L]`
ys_sub1 (list): reference labels in the 1st auxiliary task of size `[L_sub1]`
ys_sub2 (list): reference labels in the 2nd auxiliary task of size `[L_sub2]`
utt_ids (list): name of each utterance
speakers (list): name of each speaker
sessions (list): name of each session
"""
# inputs
xs = [kaldiio.load_mat(self.df['feat_path'][i]) for i in indices]
xlens = [self.df['xlen'][i] for i in indices]
utt_ids = [self.df['utt_id'][i] for i in indices]
speakers = [self.df['speaker'][i] for i in indices]
sessions = [self.df['session'][i] for i in indices]
texts = [self.df['text'][i] for i in indices]
feat_paths = [self.df['feat_path'][i] for i in indices]
# main outputs
if self.is_test:
ys = [self.token2idx[0](self.df['text'][i]) for i in indices]
else:
ys = [list(map(int, str(self.df['token_id'][i]).split())) for i in indices]
# sub1 outputs
ys_sub1 = []
if self.df_sub1 is not None:
ys_sub1 = [list(map(int, str(self.df_sub1['token_id'][i]).split())) for i in indices]
elif self.vocab_sub1 > 0 and not self.is_test:
ys_sub1 = [self.token2idx[1](self.df['text'][i]) for i in indices]
# sub2 outputs
ys_sub2 = []
if self.df_sub2 is not None:
ys_sub2 = [list(map(int, str(self.df_sub2['token_id'][i]).split())) for i in indices]
elif self.vocab_sub2 > 0 and not self.is_test:
ys_sub2 = [self.token2idx[2](self.df['text'][i]) for i in indices]
mini_batch_dict = {
'xs': xs,
'xlens': xlens,
'ys': ys,
'ys_sub1': ys_sub1,
'ys_sub2': ys_sub2,
'utt_ids': utt_ids,
'speakers': speakers,
'sessions': sessions,
'text': texts,
'feat_path': feat_paths, # for plot
}
return mini_batch_dict
def set_batch_size(self, batch_size, min_xlen, min_ylen):
if not self.dynamic_batching:
return batch_size
if min_xlen <= 800:
pass
elif min_xlen <= 1600 or 80 < min_ylen <= 100:
batch_size //= 2
else:
batch_size //= 4
return max(1, batch_size)
def shuffle_bucketing(self, batch_size):
df_indices_buckets = [] # list of list
offset = 0
while True:
min_xlen = self.df[offset:offset + 1]['xlen'].values[0]
min_ylen = self.df[offset:offset + 1]['ylen'].values[0]
_batch_size = self.set_batch_size(batch_size, min_xlen, min_ylen)
indices = list(self.df[offset:offset + _batch_size].index)
df_indices_buckets.append(indices)
offset += len(indices)
if offset + _batch_size >= len(self):
break
# shuffle buckets
random.shuffle(df_indices_buckets)
return df_indices_buckets
def discourse_bucketing(self, batch_size):
df_indices_buckets = [] # list of list
session_groups = [(k, v) for k, v in self.df.groupby('n_utt_in_session').groups.items()]
if self.shuffle_bucket:
random.shuffle(session_groups)
for n_utt, ids in session_groups:
first_utt_ids = [i for i in ids if self.df['n_prev_utt'][i] == 0]
for i in range(0, len(first_utt_ids), batch_size):
first_utt_ids_mb = first_utt_ids[i:i + batch_size]
for j in range(n_utt):
indices = [k + j for k in first_utt_ids_mb]
df_indices_buckets.append(indices)
return df_indices_buckets
| [
"hiro.mhbc@gmail.com"
] | hiro.mhbc@gmail.com |
bc19ab8e189e29a058a5dded83af0e6c030da832 | d5fe9d0c7c93c3250b9e212435b02d8373dec091 | /pro/8.py | 98751a3c8367041e394594d3ac34676580508ce2 | [] | no_license | HarshaaArunachalam/GUV | 6937adb84f0928f08c9fbc519310abc06ef3541a | c047887bf6c19a4950c5f634111e1c02966367e5 | refs/heads/master | 2020-05-31T10:52:23.280052 | 2019-08-10T20:23:11 | 2019-08-10T20:23:11 | 190,249,464 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | def GCD(a,b):
z=0
c=min(a,b)
for k in range(c,1,-1):
if(a%k==0)and (b%k==0):
return k
z=1
if z!=1:
return 1
x=input()
y=x.split(" ")
z=[]
w=[]
res=[]
z1=int(y[0])
z2=int(y[1])
z3=input()
u=z3.split(" ")
for i in range(len(u)):
w.append(int(u[i]))
for j in range(z2):
f=input()
g=f.split(" ")
g1=int(g[0])-1
g2=int(g[1])-1
s=GCD(w[g1],w[g2])
res.append(s)
for l in res:
print(str(l))
| [
"noreply@github.com"
] | HarshaaArunachalam.noreply@github.com |
1a8d14085751c17d7831e7fca361ca6d2d552255 | 5ce040197421e557f8e7337183c2420d1cb898b0 | /temoatools/analyze_capacity.py | 5fee6b66e7d42c6683ded9998b5d838c9ccfa662 | [] | no_license | coopercenter/temoatools | b7bd871d1066dbfe2f43481a6c2ca84e4315b2b3 | 151ad4e68a4082166f87db2081bfe552f6d92253 | refs/heads/master | 2023-04-17T00:55:44.106235 | 2021-04-20T19:53:08 | 2021-04-20T19:53:08 | 295,842,252 | 1 | 1 | null | 2020-11-17T16:14:21 | 2020-09-15T20:30:59 | Python | UTF-8 | Python | false | false | 7,400 | py | import os
import sqlite3
import pandas as pd
import temoatools as tt
debug = False
resolution = 600 # DPI
# ==============================================================================
# Remove filetype from filename
def name(db):
return db[:db.find('.')]
# ==============================================================================
def getCapacity(folders, dbs, switch='fuel', sector_name='electric', save_data='N', create_plots='N',
run_name=''):
# inputs:
# 1) folders - paths containing dbs (list or single string if all in the same path)
# 2) dbs - names of databases (list)
# 3) switch - 'fuel' or 'tech', basis of categorization
# 4) sectorName - name of temoa sector to be analyzed
# 5) saveData - 'Y' or 'N', default is 'N'
# 6) createPlot - 'Y' or 'N', default is 'N'
# 7) run_name - Used for saving results in dedicated folder
#
# outputs:
# 1) capacity - pandas DataFrame holding capacity for each model year
# ==============================================================================
print("Analyzing capacity")
# Save original directory
wrkdir = os.getcwd()
# If only a single db and folder provided, change to a list
if type(dbs) == str and type(folders) == str:
dbs = [dbs]
folders = [folders]
# If a list of folders is provided with one database, only use first folder
elif type(dbs) == str:
dbs = [dbs]
folders = [folders[0]]
# If only a single folder provided, create a list of the same folder
elif type(folders) == str:
fldrs = []
for db in dbs:
fldrs.append(folders)
folders = fldrs
# Create dictionary to hold each capacity_single series
capacity = pd.DataFrame(dtype='float64')
# Iterate through each db
for folder, db in zip(folders, dbs):
capacity_single = SingleDB(folder, db, switch=switch, sector_name=sector_name)
capacity = pd.concat([capacity, capacity_single])
# Reset index (remove multi-level indexing, easier to use in Excel)
capacity = capacity.reset_index()
# Directory to hold results
if save_data == 'Y' or create_plots == 'Y':
tt.create_results_dir(wrkdir=wrkdir, run_name=run_name)
# Save results to Excel
if save_data == 'Y':
# Create savename based on switch
if switch == 'fuel':
savename = 'capacity_by_fuel.csv'
elif switch == 'tech':
savename = 'capacity_by_tech.csv'
# Save
capacity.to_csv(savename)
# Create plots
if create_plots == 'Y':
import matplotlib.pyplot as plt
import seaborn as sns
plt.rcParams.update({'figure.max_open_warning': 0}) # ignore warning
# new figure
plt.figure()
# set aesthetics
sns.set_style("white", {"font.family": "serif", "font.serif": ["Times", "Palatino", "serif"]})
sns.set_context("paper")
sns.set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
# wide to long
df2 = pd.melt(capacity, id_vars=['database', 'scenario', 'fuelOrTech'], var_name='var', value_name='value')
# plot
sns.relplot(x='var', y='value', hue='database', data=df2, kind='line', col='fuelOrTech', col_wrap=4)
# save
if switch == 'fuel':
savename = 'capacity_by_fuel.png'
elif switch == 'tech':
savename = 'capacity_by_tech.png'
plt.savefig(savename, dpi=resolution)
# close figure
plt.close()
# Return to original directory
os.chdir(wrkdir)
# return capacity as a dictionary
return capacity
# ==============================================================================
def SingleDB(folder, db, switch='fuel', sector_name='electric'):
# inputs:
# 1) folder - path containing db
# 2) db - name of databas
# 3) switch - 'fuel' or 'tech', basis of categorization
# 5) sectorName - name of temoa sector to be analyzed
#
# outputs:
# 1) capacity - pandas DataFrame holding capacity for each model year
# ==============================================================================
print("\tAnalyzing db: ", db)
# save original folder
origDir = os.getcwd()
# move to folder
os.chdir(folder)
# Connect to Database
con = sqlite3.connect(db)
cur = con.cursor()
# Read from database:
# Select All Efficiencies
qry = "SELECT * FROM Efficiency"
cur.execute(qry)
db_efficiency = cur.fetchall()
# Select All time_periods
qry = "SELECT * FROM time_periods"
cur.execute(qry)
db_t_periods = cur.fetchall()
# Select All technologies
qry = "SELECT * FROM technologies"
cur.execute(qry)
db_technologies = cur.fetchall()
# Select All Capacities
qry = "SELECT * FROM Output_CapacityByPeriodAndTech"
cur.execute(qry)
db_Output_CapacityByPeriodAndTech = cur.fetchall()
# Review db_t_periods to select future time periods
future_t_periods = []
for t_periods, flag in db_t_periods:
if flag == 'f':
if t_periods not in future_t_periods:
future_t_periods.append(t_periods)
# Review db_technologies to select related sector
techs = []
for tech, flag, sector, tech_desc, tech_category in db_technologies:
if sector == sector_name or sector_name == "all":
if tech not in techs:
techs.append(tech)
# Review db_efficiency to create a dictionary of fuels
d = {}
for input_comm, tech, vintage, output_comm, efficiency, ef_notes in db_efficiency:
if tech in techs:
if tech not in d.keys():
d[tech] = input_comm
# Sort data and assign as columns and rows
if switch == 'fuel':
cols = sorted(set(d.values()))
elif switch == 'tech':
cols = sorted(techs)
future_t_periods = sorted(future_t_periods)
rows = future_t_periods[:-1]
# Identify Unique Scenarios
qry = "SELECT * FROM Output_Objective"
cur.execute(qry)
db_objective = cur.fetchall()
scenarios = []
for scenario, objective_name, total_system_cost in db_objective:
if scenario not in scenarios:
scenarios.append(scenario)
# Create pandas DataFrame to hold yearlyEmissions for all scenarios
index = pd.MultiIndex.from_product([[db], scenarios, cols], names=['database', 'scenario', 'fuelOrTech'])
df = pd.DataFrame(index=index, columns=future_t_periods[:-1], dtype='float64')
df = df.fillna(0.0) # Default value to zero
# Review db_Output_CapacityByPeriodAndTech to fill data frame
for scenario, sector, t_periods, tech, capacity in db_Output_CapacityByPeriodAndTech:
if sector == sector_name or sector_name == "all":
if switch == 'fuel':
df.loc[(db, scenario, d[tech]), t_periods] = df.loc[(db, scenario, d[tech]), t_periods] + capacity
elif switch == 'tech':
df.loc[(db, scenario, tech), t_periods] = df.loc[(db, scenario, tech), t_periods] + capacity
# return to original folder
os.chdir(origDir)
# return capacity as a DataFrame
return df
| [
"jab6ft@virginia.edu"
] | jab6ft@virginia.edu |
62ca49659bf0b33d8660c6f9a1527e2fb058ff0d | 273901aaf0addb4c8f6b1a015ef29c7e704d2a47 | /parsette/__init__.py | b6550dc540c55d338b58409c31cd02c3dc73e2d2 | [] | no_license | bqqbarbhg/parsette | 36ea2d97893c24a3553dbaad1c81e5c9eea3f8a3 | 0b57f84d0e55b8701443b8ebfc3aa2ea8ba80ad6 | refs/heads/master | 2020-04-30T11:49:03.519862 | 2019-03-20T20:25:12 | 2019-03-20T20:25:12 | 176,811,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36 | py |
from .parsette import Lexer, Rule
| [
"samuli.1995@hotmail.com"
] | samuli.1995@hotmail.com |
f5ac969f98498b817f245eea5127f37ec78d5b86 | d57b51ec207002e333b8655a8f5832ed143aa28c | /.history/l5/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA/app_20200705181608.py | cfa4d4767bd82bbd5cf7b9f1eb764d3239a2e78c | [] | no_license | yevheniir/python_course_2020 | b42766c4278a08b8b79fec77e036a1b987accf51 | a152d400ab4f45d9d98d8ad8b2560d6f0b408c0b | refs/heads/master | 2022-11-15T07:13:24.193173 | 2020-07-11T15:43:26 | 2020-07-11T15:43:26 | 278,890,802 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,390 | py | import random
from flask import Flask
from flask import render_template
#######################################
choose = ['rock', 'paper', 'scisors']
app = Flask(__name__)
#######################################
def winners():
if winner == 'compucter':
return render_template('2.html')
else:
return render_template('3.html')
def random2():
choose = random.choice(choose)
def game(player, bot):
if (bot == 'rock' and player == 'paper') or (bot == 'paper' and player == 'scisors') or (bot == 'scisors' and player == 'rock'):
winner = 'player'
#######################################
elif (bot == 'paper' and player == 'rock') or (bot == 'scisors' and player == 'paper') or (bot == 'rock' and player == 'scisors'):
winner = 'compucter'
else:
winner = 'Tie'
@app.route('/')
def home():
return render_template('index.html')
#######################################
@app.route('/rock')
def rock():
random2()
player = "rock"
game(player, )
#######################################
@app.route('/paper')
def paper():
random2()
player = "paper"
game()
#######################################
@app.route('/scisors')
def scisors():
random2()
player = "scisors"
game()
#######################################
@app.route('/tie')
def tie():
winner = 'Tie'
return render_template('1.html')
#######################################
if __name__ == '__main__':
app.run() | [
"yevheniira@intelink-ua.com"
] | yevheniira@intelink-ua.com |
d866b7ed98c4357d9ccc8df8cd0f45a28a019655 | 3a01d6f6e9f7db7428ae5dc286d6bc267c4ca13e | /unittests/pytests/friction/TestSlipWeakeningTimeStable.py | 85102fd93c1ba47de9f52358dcbc839563be8946 | [
"MIT"
] | permissive | youngsolar/pylith | 1ee9f03c2b01560706b44b4ccae99c3fb6b9fdf4 | 62c07b91fa7581641c7b2a0f658bde288fa003de | refs/heads/master | 2020-12-26T04:04:21.884785 | 2014-10-06T21:42:42 | 2014-10-06T21:42:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,318 | py | #!/usr/bin/env python
#
# ======================================================================
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2014 University of California, Davis
#
# See COPYING for license information.
#
# ======================================================================
#
## @file unittests/pytests/friction/TestSlipWeakeningTimeStable.py
## @brief Unit testing of SlipWeakeningTimeStable object.
import unittest
from pylith.friction.SlipWeakeningTimeStable import SlipWeakeningTimeStable
# ----------------------------------------------------------------------
class TestSlipWeakeningTimeStable(unittest.TestCase):
"""
Unit testing of SlipWeakeningTimeStable object.
"""
def setUp(self):
"""
Setup test subject.
"""
self.friction = SlipWeakeningTimeStable()
return
def test_constructor(self):
"""
Test constructor.
"""
return
def test_factory(self):
"""
Test factory method.
"""
from pylith.friction.SlipWeakeningTimeStable import friction_model
m = friction_model()
return
# End of file
| [
"baagaard@usgs.gov"
] | baagaard@usgs.gov |
d27ad5b6ed1c2a47b246e44fc38b2f4f0c57ce96 | a11bd8615f47c15fb52cd83fe7722309f250537d | /pytgf/test/test_controls/wrappers/test_bot_wrapper.py | f70529d81b25b6e27e7274105b99cfc20429dba0 | [] | no_license | Angeall/pyTGF | 75a0abfc6605f08c93181248bd529279c01b05bc | 463359a6596598c0c6cceb6e30f393d77eca0a89 | refs/heads/master | 2021-01-12T12:21:10.659708 | 2018-09-02T12:37:58 | 2018-09-02T12:37:58 | 72,452,959 | 0 | 0 | null | 2017-05-28T11:41:09 | 2016-10-31T16:00:45 | Python | UTF-8 | Python | false | false | 7,317 | py | import unittest
from typing import Tuple, List
from multiprocess.connection import Pipe
try:
from multiprocess.connection import PipeConnection
except ImportError:
PipeConnection = object
from ....characters.moves import MoveDescriptor
from ....characters.units import Entity
from ....board import Builder
from ....characters.moves import Path
from ....characters.units import Unit
from ....controls.controllers import Bot
from ....controls.events import BotEvent, SpecialEvent
from ....controls.wrappers.bot import BotControllerWrapper
from ....game import Core, UnfeasibleMoveException, API
MOVE1 = "MOVE1"
MOVE2 = "MOVE2"
MSG1 = "DO_MOVE1"
MSG2 = "DO_MOVE2"
class ExampleBotControllerWrapper(BotControllerWrapper):
def isMoveDescriptorAllowed(self, move_descriptor) -> bool:
return type(move_descriptor) == str and move_descriptor[0:4] == 'MOVE'
class ExampleAPI(API):
def _decodeMoveFromPositiveNumber(self, player_number: int, encoded_move: int) -> MoveDescriptor:
pass
def _encodeMoveIntoPositiveNumber(self, player_number: int, move_descriptor: MoveDescriptor) -> int:
pass
def createMoveForDescriptor(self, unit: Unit, move_descriptor: MoveDescriptor, max_moves: int = -1,
force: bool = False, is_step: bool=False) -> Path:
raise UnfeasibleMoveException()
def __init__(self, game: Core):
super().__init__(game)
self.move1 = 0
self.move2 = 0
def isItOneTestMethod(self):
if isinstance(self.game, ExampleGame):
return True
return False
def performMove1(self):
self.move1 += 1
def performMove2(self):
self.move2 += 1
class ExampleGame(Core):
@property
def _teamKillAllowed(self) -> bool:
return False
@property
def _suicideAllowed(self) -> bool:
return False
def _collidePlayers(self, player1, player2, tile_id, frontal: bool = False, entity: Entity=None):
pass
class ExampleBot(Bot):
@property
def possibleMoves(self) -> List[MoveDescriptor]:
return []
def _getGameStateAPI(self, game: Core):
return ExampleAPI(game)
def reactToEvents(self, events: List[BotEvent]):
for event in events:
new_move_event = event.moveDescriptor
if new_move_event == MOVE1:
self.gameState.performMove1()
elif new_move_event == MOVE2:
self.gameState.performMove2()
return super().reactToEvents(events)
def _isMoveInteresting(self, player_number: int, new_move_event) -> bool:
return True
def _isMoveAllowed(self, move: str) -> bool:
if type(move) == str and move[0:4] == 'MOVE':
return True
return False
def selectMoveFollowingTeammateMessage(self, teammate_number: int, message):
if message == MSG1:
return MOVE1
elif message == MSG2:
return MOVE2
def _selectNewMove(self, game_state: ExampleAPI):
return "MOVE1-" + str(game_state.move1) + '/' + "MOVE2-" + str(game_state.move2)
class TestBotControllerWrapper(unittest.TestCase):
def setUp(self):
self.game = ExampleGame(Builder(10, 10, 7, 6).create())
self.game.addUnit(Unit(1), 1, (0, 0))
self.bot1 = ExampleBot(1)
self.bot1.gameState = self.game.copy()
self.linker1 = ExampleBotControllerWrapper(self.bot1)
self.game.addUnit(Unit(2), 1, (0, 0))
self.bot2 = ExampleBot(2)
self.bot2.gameState = self.game.copy()
self.linker2 = ExampleBotControllerWrapper(self.bot2)
self.game_info_pipe_parent1, self.game_info_pipe_child1 = Pipe() # type: Tuple[PipeConnection, PipeConnection]
self.game_info_pipe_parent2, self.game_info_pipe_child2 = Pipe() # type: Tuple[PipeConnection, PipeConnection]
self.move_pipe_parent1, self.move_pipe_child1 = Pipe() # type: Tuple[PipeConnection, PipeConnection]
self.move_pipe_parent2, self.move_pipe_child2 = Pipe() # type: Tuple[PipeConnection, PipeConnection]
self.linker1.setMainPipe(self.move_pipe_child1)
self.linker1.setGameInfoPipe(self.game_info_pipe_child1)
self.linker2.setMainPipe(self.move_pipe_child2)
self.linker2.setGameInfoPipe(self.game_info_pipe_child2)
self.collaboration_pipe_1, self.collaboration_pipe_2 = Pipe()
self.linker1.addCollaborationPipe(2, self.collaboration_pipe_1)
self.linker2.addCollaborationPipe(1, self.collaboration_pipe_2)
def test_invalid_type_sent(self):
"""
Tests that the linker raises an error when a message that is not a "BotEvent" is sent
"""
self.move_pipe_parent1.send("")
self.assertRaises(TypeError, self.linker1._routine, self.game_info_pipe_child1, self.move_pipe_child1)
def test_send_move(self):
"""
Tests that moves are sent correctly, that they affect the GameState and that the AI responds well
"""
move1_event = BotEvent(1, MOVE1)
move2_event = BotEvent(1, MOVE2)
self.move_pipe_parent1.send(move1_event)
self.move_pipe_parent1.send(move1_event)
self.move_pipe_parent1.send(move1_event)
self.move_pipe_parent1.send(move2_event)
self.linker1._routine()
self.assertFalse(self.move_pipe_parent1.poll())
self.linker1._routine()
self.assertTrue(self.move_pipe_parent1.poll())
self.assertEqual(self.move_pipe_parent1.recv(), "MOVE1-3/MOVE2-1")
def test_send_message_to_teammate(self):
"""
Tests that messages are sent well between two teammates
"""
self.bot1.sendMessageToTeammate(2, MSG1)
self.linker1._routine() # Will send the message
self.linker2._routine() # Will receive the message
self.assertTrue(self.move_pipe_parent2.poll())
self.assertEqual(self.move_pipe_parent2.recv(), "MOVE1")
def test_send_end_event(self):
"""
Checks if the linker's logical loop ends correctly when it receives the end event
"""
self.game_info_pipe_parent1.send(SpecialEvent(SpecialEvent.END))
self.linker1.run()
# Should run indefinitely if no flag was sent
self.assertTrue(True)
def test_unit_dead(self):
"""
Checks if the linker blocks the incoming message of a dead unit, and starts to send again when resurrected
"""
self.game_info_pipe_parent1.send(SpecialEvent(SpecialEvent.UNIT_KILLED))
self.move_pipe_parent1.send(BotEvent(1, MOVE1))
self.linker1._routine()
self.assertFalse(self.move_pipe_parent1.poll())
self.linker1._routine() # Message blocked
self.assertFalse(self.move_pipe_parent1.poll())
self.game_info_pipe_parent1.send(SpecialEvent(SpecialEvent.RESURRECT_UNIT))
self.move_pipe_parent1.send(BotEvent(1, MOVE2))
self.linker1._routine() # Message received
self.linker1._routine() # Message sent
self.assertTrue(self.move_pipe_parent1.poll())
# The message "MOVE1" was correctly received while the unit was dead => the game state is updated
# while the unit is dead
self.assertEqual(self.move_pipe_parent1.recv(), "MOVE1-1/MOVE2-1")
| [
"angeal1105@gmail.com"
] | angeal1105@gmail.com |
64ef7179aea9c2d661955269e63c1c7311a1037c | c78ce4f66cc964c230ad60fbf2ced6b4811eab89 | /0x0C-python-almost_a_circle/models/rectangle.py | 42da51e34b0deffedfd7071f7594571e3157540a | [] | no_license | jebichii/holbertonschool-higher_level_programming-1 | 89026557909851dd775ae355f036db89ebd9adb9 | 741953aa479af90e8eac6f1315415eff4a20224f | refs/heads/master | 2023-03-15T14:58:27.062528 | 2020-06-11T07:21:23 | 2020-06-11T07:21:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,270 | py | #!/usr/bin/python3
"""Provides a class to represent a rectangle
"""
from models.base import Base
class Rectangle(Base):
"""Representation of a rectangle
"""
HEADERS = ('id', 'width', 'height', 'x', 'y')
def __init__(self, width, height, x=0, y=0, id=None):
"""Instantiate a rectangle
"""
super().__init__(id)
self.width = width
self.height = height
self.x = x
self.y = y
def __str__(self):
"""Get a string representation of a rectangle
"""
return "[{type}] ({id}) {x}/{y} - {width}/{height}".format(
type=self.__class__.__name__,
id=self.id,
width=self.__width,
height=self.__height,
x=self.__x,
y=self.__y
)
@property
def width(self):
"""Get private instance attribute 'width'
"""
return self.__width
@width.setter
def width(self, width):
"""Set private instance attribute 'width'
"""
if not isinstance(width, int):
raise TypeError("width must be an integer")
if width < 1:
raise ValueError("width must be > 0")
self.__width = width
@property
def height(self):
"""Get private instance attribute 'height'
"""
return self.__height
@height.setter
def height(self, height):
"""Set private instance attribute 'height'
"""
if not isinstance(height, int):
raise TypeError("height must be an integer")
if height < 1:
raise ValueError("height must be > 0")
self.__height = height
@property
def x(self):
"""Get private instance attribute 'x'
"""
return self.__x
@x.setter
def x(self, x):
"""Set private instance attribute 'x'
"""
if not isinstance(x, int):
raise TypeError("x must be an integer")
if x < 0:
raise ValueError("x must be >= 0")
self.__x = x
@property
def y(self):
"""Get private instance attribute 'y'
"""
return self.__y
@y.setter
def y(self, y):
"""Set private instance attribute 'y'
"""
if not isinstance(y, int):
raise TypeError("y must be an integer")
if y < 0:
raise ValueError("y must be >= 0")
self.__y = y
def area(self):
"""Return the area of a rectangle
"""
return self.__width * self.__height
def display(self):
"""Print a text representation of a rectangle
"""
print("\n" * self.__y, end="")
print("\n".join([" " * self.__x + "#" * self.__width] * self.__height))
def to_dictionary(self):
"""Get a dictionary representation of a rectangle
"""
return {key: getattr(self, key) for key in self.__class__.HEADERS}
def update(self, *args, **kwargs):
"""Update the attributes of a object
"""
if args:
for pair in zip(self.HEADERS, args):
setattr(self, *pair)
else:
for key in kwargs:
if key in self.HEADERS:
setattr(self, key, kwargs[key])
| [
"pdeyoreo@gmail.com"
] | pdeyoreo@gmail.com |
f54ccd9b35fcc5700879cdbd97e2f7e94d0704ac | 77db6591c5884204d6016bfa89b33691bac38813 | /lbst/migrations/0008_delete_parcels.py | 91d01710672e0f30065b7338bf188b1d05e9f3e1 | [] | no_license | jbukoski/iltf-signal-webmap-suite | 4fc0aafa977e911a1071872f7adbaf2e7d0da37c | b8374e9cfcc80501a8f632721a7cb9b76e668f6b | refs/heads/master | 2021-03-27T11:20:37.174667 | 2020-12-31T18:03:20 | 2020-12-31T18:03:20 | 79,853,039 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-10-20 04:54
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('lbst', '0007_auto_20171020_0453'),
]
operations = [
migrations.DeleteModel(
name='parcels',
),
]
| [
"jacob.bukoski@yale.edu"
] | jacob.bukoski@yale.edu |
b09045ad72a51dcaad3ea6dd2cf2c6b90953bc15 | 316b8375a7ef8095f09973d13f5a49bc7fbe7580 | /leetcode/746.py | c1d8ec3f17c479eabb62b24a67fab00b5e1725c1 | [] | no_license | zhaolijian/suanfa | 9a8d23fbca01d994f7eef24631783c4b7ed25683 | 4f3b25f360f30c0e604ba4dc4d5774ccb5f25b32 | refs/heads/master | 2023-06-08T17:12:41.522937 | 2021-06-27T08:13:16 | 2021-06-27T08:13:16 | 313,269,459 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,006 | py | # 数组的每个索引作为一个阶梯,第 i个阶梯对应着一个非负数的体力花费值 cost[i](索引从0开始)。
# 每当你爬上一个阶梯你都要花费对应的体力花费值,然后你可以选择继续爬一个阶梯或者爬两个阶梯。
# 您需要找到达到楼层顶部的最低花费。在开始时,你可以选择从索引为 0 或 1 的元素作为初始阶梯。
# 方法1
class Solution:
def minCostClimbingStairs(self, cost) -> int:
length = len(cost)
dp = [0] * (length + 1)
for i in range(2, length + 1):
dp[i] = min(dp[i - 2] + cost[i - 2], dp[i - 1] + cost[i - 1])
return dp[-1]
# 方法2
class Solution:
def minCostClimbingStairs(self, cost) -> int:
length = len(cost)
ll_value, l_value, cur = 0, 0, 0
for i in range(2, length + 1):
cur = min(ll_value + cost[i - 2], l_value + cost[i - 1])
ll_value = l_value
l_value = cur
return cur | [
"820913569@qq.com"
] | 820913569@qq.com |
097d8374a255ccaa7ec5bbd988be8ef1ae39bea0 | 7111511ef0cca1bcf84a76d49419fad504d78f6e | /test331scaping_Writer_nfl.py | 27f9826cb9dbf89cb2644dbd8d396d44450c712c | [] | no_license | blockchain99/pythonlecture | 7800033cd62251b0eec8cf3b93f253175d9cb2e8 | 198e1b6d68db72e4a5009f988c503958ad7ab444 | refs/heads/master | 2020-12-12T14:21:53.626918 | 2020-01-15T19:02:07 | 2020-01-15T19:02:07 | 234,148,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,163 | py | ##not completed !
import requests
from bs4 import BeautifulSoup
from csv import writer, DictWriter
response = requests.get("http://en.wikipedia.org/wiki/NFL_win-loss_records")
# print(response.text)
with open('test331Write1nfl.text', 'w') as file:
file.write(response.text)
# print("==============================================================")
# #go to above url -> open developer tool in chrome.
soup = BeautifulSoup(response.text, "html.parser")
# # articles = soup.find_all("tbody")
# # articles = soup.select(".wikitable.sortable.jquery-tablesorter")
# articles = soup.select(".wikitable")
# # articles = soup.find_all(class_="wikitable")
# print(articles)
# print("-------------------csv Writer---------------------")
# with open("test331nflWriter.csv", "w") as file:
# csv_writer = writer(file)
# csv_writer.writerow(["Rank", "Team", "Won","Lost","Tied","Pct.","First NFL Season", "Total Games", "Divison"])
# td_tags = articles.find("td")
# for td_tag in td_tags:
# #get_text: access the inner text in an element("a")
# # print(article.find("a").get_text()) #anchor tag -> convert to text
# rank = td_tag[0].get_text()
# team = td_tag[1].find("a").get_text()
# won = td_tag[2].get_text()
# lost = td_tag[3].get_text()
# tied = td_tag[4].get_text()
# pct = td_tag[5].get_text()
# first = td_tag[6].get_text()
# total = td_tag[7].find("a").get_text()
# division = td_tag[8].find("a").get_text()
# # csv_writer.writerow([rank, team, won, lost, tied, pct, first, total, division])
############ one table scap ##############
# from bs4 import BeautifulSoup
# import csv
# html = open("table.html").read()
# soup = BeautifulSoup(html)
table = soup.find("table")
output_rows = []
for table_row in table.findAll('tr'):
columns = table_row.findAll('td')
output_row = []
for column in columns:
output_row.append(column.text)
output_rows.append(output_row)
with open('output.csv', 'wb') as csvfile:
writer = csv.writer(csvfile)
writer.writerows(output_rows)
| [
"shinebytheriver@yahoo.com"
] | shinebytheriver@yahoo.com |
83691555e2c9290c3b999d0373c56b611f949fc5 | cf4f3c181dc04c4e698b53c3bb5dd5373b0cc1f4 | /meridian/acupoints/tiaokou23.py | c01be663e38206cbf5f9d8dea8e5b7bee0d48952 | [
"Apache-2.0"
] | permissive | sinotradition/meridian | da3bba6fe42d3f91397bdf54520b3085f7c3bf1d | 8c6c1762b204b72346be4bbfb74dedd792ae3024 | refs/heads/master | 2021-01-10T03:20:18.367965 | 2015-12-14T14:58:35 | 2015-12-14T14:58:35 | 46,456,260 | 5 | 3 | null | 2015-11-29T15:00:20 | 2015-11-19T00:21:00 | Python | UTF-8 | Python | false | false | 236 | py | #!/usr/bin/python
#coding=utf-8
'''
@author: sheng
@license:
'''
SPELL=u'tiáokǒu'
CN=u'条口'
NAME=u'tiaokou23'
CHANNEL='stomach'
CHANNEL_FULLNAME='StomachChannelofFoot-Yangming'
SEQ='ST38'
if __name__ == '__main__':
pass
| [
"sinotradition@gmail.com"
] | sinotradition@gmail.com |
02e61c8774835d8f58181e6154d935bfe0f92a5c | 12f006a0e5d75ef2349d4ae519c1c9cac5309761 | /Solution_305.py | c3dacdc43397e43d3cb4084735962f719e35b8d7 | [] | no_license | TimothySjiang/leetcodepy | c613db16282eade713e01b7d641c0f5b341ec84b | ef64e46b8833a684b8b0355ce576b767a0e03596 | refs/heads/master | 2020-07-01T14:48:35.953841 | 2020-01-12T06:19:44 | 2020-01-12T06:19:44 | 201,199,810 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,260 | py | class Solution:
def numIslands2(self, m: int, n: int, positions: List[List[int]]) -> List[int]:
uf = UnionFind()
res = []
board = [[0 for i in range(n)] for i in range(m)]
def detect(i, j):
if i < 0 or i > m - 1 or j < 0 or j > n - 1 or board[i][j] != 1:
return None
else:
return (i, j)
for i, j in positions:
board[i][j] = 1
uf.union((i, j), detect(i - 1, j))
uf.union((i, j), detect(i + 1, j))
uf.union((i, j), detect(i, j - 1))
uf.union((i, j), detect(i, j + 1))
res.append(len({uf.find(x) for x in uf.uf}))
return res
class UnionFind:
def __init__(self):
self.uf = {}
def same(self, a, b):
return self.find(a) == self.find(b)
def union(self, a, b):
self.uf.setdefault(a, a)
if not b:
return None
else:
self.uf.setdefault(b, b)
self.uf[self.find(a)] = self.find(b)
def find(self, node):
path = []
while node != self.uf[node]:
path.append(node)
node = self.uf[node]
for n in path:
self.uf[n] = node
return node
| [
"shjiang@ucdavis.edu"
] | shjiang@ucdavis.edu |
3cfdc2c73a2715bf71926835cd6c115602db7ba1 | 3fbd26091ebbc13913f9c7be1aaf10d477c79536 | /week01/zuoye/requests_maoyan/.history/maoyan_20200626235954.py | 2b50c6bd80acfa54c79e82eea959c439fb55b91e | [] | no_license | shuncon/Python001-class01 | d28faf3d5d8e9ea4cee93bcae7143a26fd8c472e | df19758181cdaf37f30d4b518600fc4612590499 | refs/heads/master | 2022-11-13T19:31:27.019214 | 2020-07-10T14:58:25 | 2020-07-10T14:58:25 | 273,135,541 | 0 | 0 | null | 2020-06-18T03:46:56 | 2020-06-18T03:46:55 | null | UTF-8 | Python | false | false | 727 | py | #-*-conding:utf-8 -*-
import requests
import lxml
from bs4 import BeautifulSoup as bfs
user_agent= 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'
# header = {'user-agent' : user_agent}
myurl = 'https://maoyan.com/films/1222268'
'https://maoyan.com/films/1217023',
header = {}
header ['user-agent'] = user_agent
response = requests.get(myurl,herders=harder)
selector = lxml.etree.HTML(response.text)
#电影名称
dy_name = selector.xpath(' //*[@id='movie-brief-container']/h1/text()')
print(f'电影名称:{dy_name}')
# response = requests.get(myurl, headers=header)
# # print (response.text)
# # print (f'返回状态码: {response.status.code}') | [
"1428834423@qq.com"
] | 1428834423@qq.com |
0726af48353fb1ef2669ff906b763eb57f032402 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/opflex/idepdfwconndeniedhist15min.py | 81cd175dacf6fa15d7fa2f312d3d2aad8596597e | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,599 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class IDEpDfwConnDeniedHist15min(Mo):
"""
A class that represents historical statistics for DFW connection denied statistics in a 15 minute sampling interval. This class updates every 5 minutes.
"""
meta = StatsClassMeta("cobra.model.opflex.IDEpDfwConnDeniedHist15min", "DFW connection denied statistics")
counter = CounterMeta("perPortLimit", CounterCategory.COUNTER, "connections", "denied per port limit connections")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "perPortLimitCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "perPortLimitPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "perPortLimitMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "perPortLimitMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "perPortLimitAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "perPortLimitSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "perPortLimitThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "perPortLimitTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "perPortLimitRate"
meta._counters.append(counter)
counter = CounterMeta("globalLimit", CounterCategory.COUNTER, "connections", "denied global limit connections")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "globalLimitCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "globalLimitPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "globalLimitMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "globalLimitMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "globalLimitAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "globalLimitSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "globalLimitThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "globalLimitTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "globalLimitRate"
meta._counters.append(counter)
meta.moClassName = "opflexIDEpDfwConnDeniedHist15min"
meta.rnFormat = "HDopflexIDEpDfwConnDenied15min-%(index)s"
meta.category = MoCategory.STATS_HISTORY
meta.label = "historical DFW connection denied statistics stats in 15 minute"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = True
meta.parentClasses.add("cobra.model.opflex.IDEp")
meta.superClasses.add("cobra.model.opflex.IDEpDfwConnDeniedHist")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Hist")
meta.rnPrefixes = [
('HDopflexIDEpDfwConnDenied15min-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "globalLimitAvg", "globalLimitAvg", 19231, PropCategory.IMPLICIT_AVG)
prop.label = "denied global limit connections average value"
prop.isOper = True
prop.isStats = True
meta.props.add("globalLimitAvg", prop)
prop = PropMeta("str", "globalLimitCum", "globalLimitCum", 19227, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "denied global limit connections cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("globalLimitCum", prop)
prop = PropMeta("str", "globalLimitMax", "globalLimitMax", 19230, PropCategory.IMPLICIT_MAX)
prop.label = "denied global limit connections maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("globalLimitMax", prop)
prop = PropMeta("str", "globalLimitMin", "globalLimitMin", 19229, PropCategory.IMPLICIT_MIN)
prop.label = "denied global limit connections minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("globalLimitMin", prop)
prop = PropMeta("str", "globalLimitPer", "globalLimitPer", 19228, PropCategory.IMPLICIT_PERIODIC)
prop.label = "denied global limit connections periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("globalLimitPer", prop)
prop = PropMeta("str", "globalLimitRate", "globalLimitRate", 19235, PropCategory.IMPLICIT_RATE)
prop.label = "denied global limit connections rate"
prop.isOper = True
prop.isStats = True
meta.props.add("globalLimitRate", prop)
prop = PropMeta("str", "globalLimitSpct", "globalLimitSpct", 19232, PropCategory.IMPLICIT_SUSPECT)
prop.label = "denied global limit connections suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("globalLimitSpct", prop)
prop = PropMeta("str", "globalLimitThr", "globalLimitThr", 19233, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "denied global limit connections thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("globalLimitThr", prop)
prop = PropMeta("str", "globalLimitTr", "globalLimitTr", 19234, PropCategory.IMPLICIT_TREND)
prop.label = "denied global limit connections trend"
prop.isOper = True
prop.isStats = True
meta.props.add("globalLimitTr", prop)
prop = PropMeta("str", "index", "index", 19128, PropCategory.REGULAR)
prop.label = "History Index"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("index", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "perPortLimitAvg", "perPortLimitAvg", 19252, PropCategory.IMPLICIT_AVG)
prop.label = "denied per port limit connections average value"
prop.isOper = True
prop.isStats = True
meta.props.add("perPortLimitAvg", prop)
prop = PropMeta("str", "perPortLimitCum", "perPortLimitCum", 19248, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "denied per port limit connections cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("perPortLimitCum", prop)
prop = PropMeta("str", "perPortLimitMax", "perPortLimitMax", 19251, PropCategory.IMPLICIT_MAX)
prop.label = "denied per port limit connections maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("perPortLimitMax", prop)
prop = PropMeta("str", "perPortLimitMin", "perPortLimitMin", 19250, PropCategory.IMPLICIT_MIN)
prop.label = "denied per port limit connections minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("perPortLimitMin", prop)
prop = PropMeta("str", "perPortLimitPer", "perPortLimitPer", 19249, PropCategory.IMPLICIT_PERIODIC)
prop.label = "denied per port limit connections periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("perPortLimitPer", prop)
prop = PropMeta("str", "perPortLimitRate", "perPortLimitRate", 19256, PropCategory.IMPLICIT_RATE)
prop.label = "denied per port limit connections rate"
prop.isOper = True
prop.isStats = True
meta.props.add("perPortLimitRate", prop)
prop = PropMeta("str", "perPortLimitSpct", "perPortLimitSpct", 19253, PropCategory.IMPLICIT_SUSPECT)
prop.label = "denied per port limit connections suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("perPortLimitSpct", prop)
prop = PropMeta("str", "perPortLimitThr", "perPortLimitThr", 19254, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "denied per port limit connections thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("perPortLimitThr", prop)
prop = PropMeta("str", "perPortLimitTr", "perPortLimitTr", 19255, PropCategory.IMPLICIT_TREND)
prop.label = "denied per port limit connections trend"
prop.isOper = True
prop.isStats = True
meta.props.add("perPortLimitTr", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "index"))
def __init__(self, parentMoOrDn, index, markDirty=True, **creationProps):
namingVals = [index]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"bkhoward@live.com"
] | bkhoward@live.com |
055b1b4762a72fc2f85d6bbed013e839d6ab0320 | 096b0e98f698e7b8c46566cd73039ec66c87f525 | /Environmental/DRAXIS/Python/vice_env_buildings_once/consumer.py | 1cabe6b241c62390a5436d2c9e019254069e132d | [
"CC-BY-4.0"
] | permissive | georgiapant/DataCrawlers | 8ba90491255e1f86b92e0ca5f78bce931c3d9553 | 1f2d2d4ab04d34fba1c90efc4119a1d40c964921 | refs/heads/master | 2022-12-09T18:24:30.947686 | 2020-07-15T13:39:08 | 2020-09-01T12:22:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,432 | py | """
This code is open-sourced software licensed under the MIT license. (http://opensource.org/licenses/MIT)
Copyright 2020 Stergios Bampakis, DRAXIS ENVIRONMENTAL S.A.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
DISCLAIMER
This code is used to crawl/parse data from file Thessaloniki-boatstops_public.xlsx provided by DRAXIS ENVIRONMENTAL S.A.
described in D5.3.
By downloading this code, you agree to contact the corresponding data provider
and verify you are allowed to use (including, but not limited, crawl/parse/download/store/process)
all data obtained from the data source.
"""
import os
import json
from kafka import KafkaConsumer
from elastic import ElasticSearchClient, RequestError
from dotenv import load_dotenv
from constants import *
load_dotenv()
es = ElasticSearchClient(os.getenv('ES_HOST'), os.getenv('ES_PORT'),
use_ssl=os.getenv('ES_USE_SSL', False),
verify_certs=os.getenv('ES_VERIFY_CERTS', False),
http_auth=(os.getenv('ES_USER'), os.getenv('ES_PASSWORD')) if os.getenv('ES_USER') else None,
ca_certs=os.getenv('ES_CA_CERTS', None),
timeout=60)
geo_point_mapping = es.define_custom_geo_shape_mapping("geometry")
es.create_index(ELASTICSEARCH_INDEX, geo_point_mapping)
kafka_consumer = KafkaConsumer(KAFKA_TOPIC,
bootstrap_servers=["{}:{}".format(os.getenv('KAFKA_HOST'), os.getenv('KAFKA_PORT'))],
# auto_offset_reset='earliest',
security_protocol=os.getenv('KAFKA_SECURITY_PROTOCOL', 'PLAINTEXT'),
ssl_cafile=os.getenv('KAFKA_CA_FILE', None),
ssl_certfile=os.getenv('KAFKA_CERT_FILE', None),
ssl_keyfile=os.getenv('KAFKA_KEY_FILE', None),
group_id='group_' + KAFKA_TOPIC,
value_deserializer=lambda m: json.loads(m.decode('utf8')))
c = 0
denied_docs = 0
for msg in kafka_consumer:
c += 1
print("Consumed: {} messages".format(c))
# data are already processed in the appropriate way from producer, so just insert them to DB
try:
print(es.insert_doc(msg.value))
except RequestError as e:
denied_docs += 1
logger.error(e.info)
logger.error("Denied docs: {}".format(denied_docs))
continue
| [
"ababakis@draxis.gr"
] | ababakis@draxis.gr |
163344918b63b083811ad2b9058150401e41c853 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_045/ch68_2019_06_07_09_21_26_405440.py | 3fe982878107d4f1707a636956f08b24b66b2f0b | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | def separa_trios(n):
l=[]
i=0
while i<len(n):
l.append(n[i:i+3])
i+=3
return l
| [
"you@example.com"
] | you@example.com |
7f90dfab2752f533bb230b138138aea3033928ff | f1a4b824faea817e3f33e477a9a6f9c7d522f72d | /tests/conftest.py | 65b4cf65266d9b03aa101c30a0b97aba88b47768 | [
"MIT"
] | permissive | singingwolfboy/flask-dance | ea94200492befe9b380c4521b084afd925b8458e | 27add7576da008a00867521bc84529d1e4a0fc1e | refs/heads/main | 2023-08-24T18:05:55.465730 | 2023-05-10T18:26:21 | 2023-05-10T18:26:21 | 23,831,381 | 928 | 211 | MIT | 2023-09-12T19:49:00 | 2014-09-09T11:53:06 | Python | UTF-8 | Python | false | false | 336 | py | import pytest
import responses as resp_module
@pytest.fixture
def responses(request):
"""
Set up the `responses` module for mocking HTTP requests
https://github.com/getsentry/responses
"""
resp_module.start()
def done():
resp_module.stop()
resp_module.reset()
request.addfinalizer(done)
| [
"david@davidbaumgold.com"
] | david@davidbaumgold.com |
15a3f81360b0d4f94f1a8cfbd7aa45d2d89ab447 | df16e0876aab8e16df5389290a1aa85928c070fc | /extraction/speaker_pitch_means.py | 21b51de667d116ca9462e2396869a4784ca4860a | [] | no_license | mlml/bestiary-set-up | da351b7557b25bd0529df354fc66a82be9247ab4 | dbb7c7d20f7ed7e2371022b1d57d2c75f08e27e4 | refs/heads/master | 2021-09-27T02:55:58.760230 | 2018-11-05T18:44:52 | 2018-11-05T18:44:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,591 | py | t = {'suborn_1067': [197.07859924084633, 57.645448937625744], 'suborn_107': [184.06262996941945, 35.98054169716696],
'suborn_1141': [201.77963903865458, 60.52524463280524], 'suborn_1154': [176.9936643414031, 65.0082908967629],
'suborn_1224': [234.655061425061, 23.592033711730963], 'suborn_1243': [195.26758469259636, 54.4267716054063],
'suborn_1244': [209.7923118093934, 48.161415028134215], 'suborn_1253': [216.79141018669705, 50.050102034678176],
'suborn_1254': [134.16702796467655, 37.64854789893217], 'suborn_1256': [213.90947284726087, 52.9027446023989],
'suborn_1257': [225.804134062411, 43.521814920340965], 'suborn_1258': [196.38368448637362, 43.21776512587265],
'suborn_1259': [199.1905546320082, 32.4634013004697], 'suborn_1260': [156.41484992987353, 58.8126765551579],
'suborn_1262': [102.0846736596736, 60.95597900133222], 'suborn_1263': [166.62398486197674, 42.52517900384401],
'suborn_1264': [218.71514803033978, 50.32572822222848], 'suborn_1265': [177.34282249273232, 40.048718385457946],
'suborn_1266': [199.84003601749353, 43.01791371781912], 'suborn_1267': [172.87927204770006, 35.073632254137145],
'suborn_1268': [101.61216038300412, 36.71600642589693], 'suborn_1269': [105.69035276073583, 38.47387505033989],
'suborn_1273': [123.85672326981965, 28.845476165521667], 'suborn_1284': [127.87656396450248, 39.89596026682189],
'suborn_207': [174.47530274361404, 68.12563132880051], 'suborn_28': [162.2998307244255, 39.96853855227387],
'suborn_930': [228.04168718185988, 57.19089481512006], 'suborn_933': [203.1910681546438, 43.73575202915789],
'suborq_1067': [207.0641391249613, 56.72967370336273], 'suborq_107': [191.52707108680798, 33.3777145731099],
'suborq_1141': [212.92524594499244, 57.38306315803578], 'suborq_1154': [196.87068071099426, 60.37737511110263],
'suborq_1224': [236.4496654877692, 23.684412163580376], 'suborq_1229': [182.49715496506644, 47.93234702627208],
'suborq_1237': [189.11508799683693, 55.549985939324664], 'suborq_1240': [197.08180480247862, 41.40719046797095],
'suborq_1241': [200.04561835748865, 41.533951314292416], 'suborq_1243': [198.5856590629705, 46.40385489575409],
'suborq_1244': [237.47315698981578, 53.789978237361154], 'suborq_1253': [242.06114625368056, 61.41951224850626],
'suborq_1254': [137.08234928043566, 32.83111755137383], 'suborq_1256': [216.5163743574232, 44.09540343015144],
'suborq_1257': [222.4199925093639, 37.92664129669618], 'suborq_1258': [203.40582938796626, 48.2259926766474],
'suborq_1259': [205.2553963490876, 37.28276419173133], 'suborq_1260': [167.2287715076414, 48.67468486255113],
'suborq_1264': [230.0246087301588, 44.37433609659744], 'suborq_1265': [177.1661920260376, 38.36616151815163],
'suborq_1266': [206.46545382418265, 38.55046776609597], 'suborq_1267': [181.04969483183015, 30.03552954097632],
'suborq_1268': [119.32061151079131, 37.14940716495099], 'suborq_1269': [113.57714186369915, 47.89746778092613],
'suborq_1273': [128.48285239852387, 34.483561687879984], 'suborq_1284': [146.51708491228058, 48.56165561724346],
'suborq_207': [184.76936980830575, 60.08737932591383], 'suborq_28': [170.93739439888634, 40.71293929625588],
'suborq_793': [181.179645915092, 54.94643607550864], 'suborq_874': [200.55944940968737, 44.2790219346174],
'suborq_930': [251.83187792511777, 62.79384445560059], 'suborq_933': [216.55518478843058, 42.38925781718354]}
from statistics import median
print(median(x[0] for x in t.values()))
print(median(x[1] for x in t.values())) | [
"michael.e.mcauliffe@gmail.com"
] | michael.e.mcauliffe@gmail.com |
8415371bb92a7e51b7d22eadc0a43ab589c1fa2d | d33b768704278b6eba4f8d9c6e367cce594a457d | /generator.py | 84ac793b320dffe8d343a5ea870eade593062db8 | [] | no_license | ashishjsharda/PythonSamples | b71ecfe5768a86bbea4ac3ec683f83b633e85d97 | 433e14707ff1ce08d2e5110521caf08969db08ea | refs/heads/master | 2021-01-19T21:09:01.697894 | 2020-03-27T10:37:18 | 2020-03-27T10:37:18 | 88,614,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | '''
Created on Jul 31, 2019
Using Yield
@author: asharda
'''
def gen_nums():
n=0
while n<4:
yield n
n+=1
for num in gen_nums():
print(num)
| [
"noreply@github.com"
] | ashishjsharda.noreply@github.com |
a10b849dc80dd5b27d663b2a62652a1f7a072f47 | 94e7c790d17ba08e8a2a74077dd8b75e7ac120b0 | /chapter05/Exercise20b_05.py | dd6b8c148d72cebee8f387e7cd2addbe1a9bdb3b | [] | no_license | lutfar9427/Exercises_Solution_of_INTRODUCTION_TO_PROGRAMMING_USING_Python | 9632e515428685dcaa7d057cf52f0e191e9f7ae0 | d037475316e6c6b7c6a7a7023318ef4ab4ed3f8d | refs/heads/master | 2020-09-02T09:04:44.990668 | 2018-10-20T00:50:12 | 2018-10-20T00:50:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 484 | py | '''
*5.20 (Display four patterns using loops) Use nested loops that display the following
patterns in four separate programs:
Pattern B
1 2 3 4 5 6
1 2 3 4 5
1 2 3 4
1 2 3
1 2
1
/**
* @author BASSAM FARAMAWI
* @email tiodaronzi3@yahoo.com
* @since 2018
*/
'''
print("Pattern B")
for n in range(6, 0, -1): # A for loop for printing the pattern
for k in range(6 - n + 1, 6 + 1):
print(k, end=" ")
print()
| [
"tiodaronzi3@yahoo.com"
] | tiodaronzi3@yahoo.com |
5d0829c8e3743612b1c4359ecc4e7b74619061bc | 385224b7699b8cf4358f93eea06518a92fe2d40b | /Data Representation/Linear-Models/wide_deep.py | 13169a6deeb07b52b95b42ecd09730c6d4110fea | [] | no_license | afcarl/tensorflow-machine-learning | 293ab4e513ff46a82e308dbce1fefba4831bdb18 | 60c6b62025a932948d6d96eaf611b35df5e39cda | refs/heads/master | 2020-08-21T20:49:30.237014 | 2018-02-02T06:45:17 | 2018-02-02T06:45:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,294 | py | """Example code for TensorFlow Wide & Deep Tutorial using tf.estimator API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import shutil
import sys
import tensorflow as tf
_CSV_COLUMNS = [
'age', 'workclass', 'fnlwgt', 'education', 'education_num',
'marital_status', 'occupation', 'relationship', 'race', 'gender',
'capital_gain', 'capital_loss', 'hours_per_week', 'native_country',
'income_bracket'
]
_CSV_COLUMN_DEFAULTS = [[0], [''], [0], [''], [0], [''], [''], [''], [''], [''],
[0], [0], [0], [''], ['']]
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_dir', type=str, default='/tmp/census_model',
help='Base directory for the model.')
parser.add_argument(
'--model_type', type=str, default='wide_deep',
help="Valid model types: {'wide', 'deep', 'wide_deep'}.")
parser.add_argument(
'--train_epochs', type=int, default=40, help='Number of training epochs.')
parser.add_argument(
'--epochs_per_eval', type=int, default=2,
help='The number of training epochs to run between evaluations.')
parser.add_argument(
'--batch_size', type=int, default=40, help='Number of examples per batch.')
parser.add_argument(
'--train_data', type=str, default='/tmp/census_data/adult.data',
help='Path to the training data.')
parser.add_argument(
'--test_data', type=str, default='/tmp/census_data/adult.test',
help='Path to the test data.')
_NUM_EXAMPLES = {
'train': 32561,
'validation': 16281,
}
def build_model_columns():
"""Builds a set of wide and deep feature columns."""
# Continuous columns
age = tf.feature_column.numeric_column('age')
education_num = tf.feature_column.numeric_column('education_num')
capital_gain = tf.feature_column.numeric_column('capital_gain')
capital_loss = tf.feature_column.numeric_column('capital_loss')
hours_per_week = tf.feature_column.numeric_column('hours_per_week')
education = tf.feature_column.categorical_column_with_vocabulary_list(
'education', [
'Bachelors', 'HS-grad', '11th', 'Masters', '9th', 'Some-college',
'Assoc-acdm', 'Assoc-voc', '7th-8th', 'Doctorate', 'Prof-school',
'5th-6th', '10th', '1st-4th', 'Preschool', '12th'])
marital_status = tf.feature_column.categorical_column_with_vocabulary_list(
'marital_status', [
'Married-civ-spouse', 'Divorced', 'Married-spouse-absent',
'Never-married', 'Separated', 'Married-AF-spouse', 'Widowed'])
relationship = tf.feature_column.categorical_column_with_vocabulary_list(
'relationship', [
'Husband', 'Not-in-family', 'Wife', 'Own-child', 'Unmarried',
'Other-relative'])
workclass = tf.feature_column.categorical_column_with_vocabulary_list(
'workclass', [
'Self-emp-not-inc', 'Private', 'State-gov', 'Federal-gov',
'Local-gov', '?', 'Self-emp-inc', 'Without-pay', 'Never-worked'])
# To show an example of hashing:
occupation = tf.feature_column.categorical_column_with_hash_bucket(
'occupation', hash_bucket_size=1000)
# Transformations.
age_buckets = tf.feature_column.bucketized_column(
age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
# Wide columns and deep columns.
base_columns = [
education, marital_status, relationship, workclass, occupation,
age_buckets,
]
crossed_columns = [
tf.feature_column.crossed_column(
['education', 'occupation'], hash_bucket_size=1000),
tf.feature_column.crossed_column(
[age_buckets, 'education', 'occupation'], hash_bucket_size=1000),
]
wide_columns = base_columns + crossed_columns
deep_columns = [
age,
education_num,
capital_gain,
capital_loss,
hours_per_week,
tf.feature_column.indicator_column(workclass),
tf.feature_column.indicator_column(education),
tf.feature_column.indicator_column(marital_status),
tf.feature_column.indicator_column(relationship),
# To show an example of embedding
tf.feature_column.embedding_column(occupation, dimension=8),
]
return wide_columns, deep_columns
def build_estimator(model_dir, model_type):
"""Build an estimator appropriate for the given model type."""
wide_columns, deep_columns = build_model_columns()
hidden_units = [100, 75, 50, 25]
# Create a tf.estimator.RunConfig to ensure the model is run on CPU, which
# trains faster than GPU for this model.
run_config = tf.estimator.RunConfig().replace(
session_config=tf.ConfigProto(device_count={'GPU': 0}))
if model_type == 'wide':
return tf.estimator.LinearClassifier(
model_dir=model_dir,
feature_columns=wide_columns,
config=run_config)
elif model_type == 'deep':
return tf.estimator.DNNClassifier(
model_dir=model_dir,
feature_columns=deep_columns,
hidden_units=hidden_units,
config=run_config)
else:
return tf.estimator.DNNLinearCombinedClassifier(
model_dir=model_dir,
linear_feature_columns=wide_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=hidden_units,
config=run_config)
def input_fn(data_file, num_epochs, shuffle, batch_size):
"""Generate an input function for the Estimator."""
assert tf.gfile.Exists(data_file), (
'%s not found. Please make sure you have either run data_download.py or '
'set both arguments --train_data and --test_data.' % data_file)
def parse_csv(value):
print('Parsing', data_file)
columns = tf.decode_csv(value, record_defaults=_CSV_COLUMN_DEFAULTS)
features = dict(zip(_CSV_COLUMNS, columns))
labels = features.pop('income_bracket')
return features, tf.equal(labels, '>50K')
# Extract lines from input files using the Dataset API.
dataset = tf.data.TextLineDataset(data_file)
if shuffle:
dataset = dataset.shuffle(buffer_size=_NUM_EXAMPLES['train'])
dataset = dataset.map(parse_csv, num_parallel_calls=5)
# We call repeat after shuffling, rather than before, to prevent separate
# epochs from blending together.
dataset = dataset.repeat(num_epochs)
dataset = dataset.batch(batch_size)
iterator = dataset.make_one_shot_iterator()
features, labels = iterator.get_next()
return features, labels
def main(unused_argv):
# Clean up the model directory if present
shutil.rmtree(FLAGS.model_dir, ignore_errors=True)
model = build_estimator(FLAGS.model_dir, FLAGS.model_type)
# Train and evaluate the model every `FLAGS.epochs_per_eval` epochs.
for n in range(FLAGS.train_epochs // FLAGS.epochs_per_eval):
model.train(input_fn=lambda: input_fn(
FLAGS.train_data, FLAGS.epochs_per_eval, True, FLAGS.batch_size))
results = model.evaluate(input_fn=lambda: input_fn(
FLAGS.test_data, 1, False, FLAGS.batch_size))
# Display evaluation metrics
print('Results at epoch', (n + 1) * FLAGS.epochs_per_eval)
print('-' * 60)
for key in sorted(results):
print('%s: %s' % (key, results[key]))
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| [
"le_j6@denison.edu"
] | le_j6@denison.edu |
cf35d907cc4cd9d30e2eeeef7859e0dc51fd7629 | f2c58d0b254dd5586266df23f57265abe322dc8c | /other/views.py | c2b449fa20ab7daf09e1904192b04355b67633e1 | [] | no_license | Shubham101491/Big-Store | f01ff85193bb77040be82385498c66faa5ac619a | 96f842c7856d2b8f6559844e0272828e0b449c6a | refs/heads/master | 2023-02-08T01:28:50.005570 | 2021-01-03T14:34:54 | 2021-01-03T14:34:54 | 326,142,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 685 | py | from django.shortcuts import render
from bigstore import settings
def shipping(request):
return render(request, 'other/shipping.html', {"BASE_URL": settings.BASE_URL})
def offer(request):
return render(request, 'other/offer.html', {"BASE_URL": settings.BASE_URL})
def wishlist(request):
return render(request, 'other/wishlist.html', {"BASE_URL": settings.BASE_URL})
def single(request):
return render(request, 'other/single.html', {"BASE_URL": settings.BASE_URL})
def terms(request):
return render(request, 'other/terms.html', {"BASE_URL": settings.BASE_URL})
def faqs(request):
return render(request, 'other/faqs.html', {"BASE_URL": settings.BASE_URL})
| [
"55647943+Shubham101491@users.noreply.github.com"
] | 55647943+Shubham101491@users.noreply.github.com |
735a17afc637ade1bc3e2b3f4087317f8f507f75 | 2228bdb924cb807b2ee546e5e5c93ef1b53518bb | /robocrys/condense/site.py | 584b2ae9ce987a1fa27bec971925a2df2ae566fb | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-hdf5",
"BSD-2-Clause"
] | permissive | kgmat/robocrystallographer | eb1ac5b7bb0d9a9037820a0bd63b136a919b4146 | 94291e31713e785fe83003c7568a3626d70054fe | refs/heads/master | 2022-11-06T22:18:43.281007 | 2020-07-24T16:46:44 | 2020-07-24T16:46:44 | 282,270,877 | 0 | 0 | NOASSERTION | 2020-07-24T16:43:33 | 2020-07-24T16:43:32 | null | UTF-8 | Python | false | false | 33,465 | py | """
This module provides a class to extract geometry and neighbor information.
TODO:
* distortion of geometry e.g. elongated along an axis
"""
from collections import defaultdict
from typing import Dict, Any, List, Union, Tuple, Optional
import numpy as np
from pymatgen import Composition
from pymatgen.analysis.graphs import StructureGraph
from pymatgen.core.periodic_table import get_el_sp
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.coord import get_angle
from pymatgen.util.string import formula_double_format
from robocrys.condense.fingerprint import get_site_fingerprints
from robocrys.util import connected_geometries, get_el, defaultdict_to_dict
class SiteAnalyzer(object):
"""Class to extract information on site geometry and bonding.
Attributes:
symmetry_labels: A :obj:`dict` mapping the site indices to the symmetry
label for that site. If two sites are symmetrically equivalent they
share the same symmetry label. The numbering begins at 1 for each
element in the structure.
equivalent_sites: A :obj:`list` of indices mapping each site in
the structure to a symmetrically or structurally equivalent site,
depending on the value of ``use_symmetry_equivalent_sites``.
Args:
bonded_structure: A bonded structure with nearest neighbor data
included. For example generated using
:class:`pymatgen.analysis.local_env.CrystalNN` or
:class:`pymatgen.analysis.local_env.VoronoiNN`.
use_symmetry_equivalent_sites: Whether to use symmetry to determine if
sites are inequivalent. If ``False``, the site geometry and (next)
nearest neighbor information will be used.
symprec: The tolerance used when determining the symmetry of
the structure. The symmetry can used both to determine if multiple
sites are symmetrically equivalent and to obtain the symmetry labels
for each site.
minimum_geometry_op: The minimum geometrical order parameter for a
geometry match to be returned.
use_iupac_formula (bool, optional): Whether to order formulas
by the iupac "electronegativity" series, defined in
Table VI of "Nomenclature of Inorganic Chemistry (IUPAC
Recommendations 2005)". This ordering effectively follows
the groups and rows of the periodic table, except the
Lanthanides, Actanides and hydrogen. If set to ``False``, the
elements will be ordered according to the electronegativity values.
"""
def __init__(self,
bonded_structure: StructureGraph,
use_symmetry_equivalent_sites: bool = False,
symprec: float = 0.01,
minimum_geometry_op: float = 0.4,
use_iupac_formula: bool = True):
self.bonded_structure = bonded_structure
self.use_iupac_formula = use_iupac_formula
self.minimum_geometry_op = minimum_geometry_op
self.site_fingerprints = get_site_fingerprints(
bonded_structure.structure)
sga = SpacegroupAnalyzer(bonded_structure.structure,
symprec=symprec)
equivalent_sites = sga.get_symmetry_dataset()['equivalent_atoms']
if use_symmetry_equivalent_sites:
self.equivalent_sites = list(equivalent_sites)
else:
self.equivalent_sites = self._calculate_equivalent_sites()
self.symmetry_labels = self._calculate_symmetry_labels(equivalent_sites)
def get_site_geometry(self, site_index: int
) -> Dict[str, Union[str, float]]:
"""Gets the bonding geometry of a site.
For example, "octahedral" or "square-planar".
Args:
site_index: The site index (zero based).
Returns:
The site geometry information formatted at as::
{'type': geometry_type, 'likeness': order_parameter}
Where ``geometry_type`` is a :obj:`str` corresponding to the
geometry type (e.g. octahedral) and ``order_parameter`` is a
:obj:`float` indicating whether how close the geometry is to the
perfect geometry. If the largest geometrical order parameter falls
beneath :attr:`robocrys.site.SiteAnalyzer.minimum_geometry_op`, the
geometry type will be returned as "X-coordinate", where X is the
coordination number.
"""
# get fingerprint as a list of tuples, e.g. [("op name", val), ...]
site_fingerprint: List[Tuple[str, int]] = list(
self.site_fingerprints[site_index].items())
# get coordination number with largest weight, ignore op names with
# just the coordination number weight (e.g. containing "wt")
parameter = max(site_fingerprint,
key=lambda x: x[1] if "wt" not in x[0] else 0)
if parameter[1] < self.minimum_geometry_op:
cn = parameter[0].split()[-1].split('_')[-1]
geometry = "{}-coordinate".format(cn)
likeness = 1.
else:
# return the geometry type without the CN at the end, e.g.
# "square co-planar CN_4" -> "square co-planar"
geometry = " ".join(parameter[0].split()[:-1])
geometry = "single-bond" if geometry == "sgl_bd" else geometry
likeness = parameter[1]
return {'type': geometry, 'likeness': likeness}
def get_nearest_neighbors(self, site_index: int,
inc_inequivalent_site_index: bool = True
) -> List[Dict[str, Any]]:
"""Gets information about the bonded nearest neighbors.
Args:
site_index: The site index (zero based).
inc_inequivalent_site_index: Whether to include the inequivalent
site indices in the nearest neighbor information.
Returns:
For each site bonded to ``site_index``, returns a :obj:`dict`
with the format::
{'element': el, 'dist': distance}
If ``inc_inequivalent_site_index=True``, the data will have an
additional key ``'inequiv_index'`` corresponding to the inequivalent
site index. E.g. if two sites are structurally/symmetrically
equivalent (depending on the value of ``self.use_symmetry_equivalent_sites`` then
they will have the same ``inequiv_index``.
"""
nn_sites = self.bonded_structure.get_connected_sites(site_index)
if inc_inequivalent_site_index:
return [{'element': str(site.site.specie),
'inequiv_index': self.equivalent_sites[site.index],
'dist': site.dist} for site in nn_sites]
else:
return [{'element': str(site.site.specie),
'dist': site.dist} for site in nn_sites]
def get_next_nearest_neighbors(self, site_index: int,
inc_inequivalent_site_index: bool = True
) -> List[Dict[str, Any]]:
"""Gets information about the bonded next nearest neighbors.
Args:
site_index: The site index (zero based).
inc_inequivalent_site_index: Whether to include the inequivalent
site indices.
Returns:
A list of the next nearest neighbor information. For each next
nearest neighbor site, returns a :obj:`dict` with the format::
{'element': el, 'connectivity': con, 'geometry': geom,
'angles': angles, 'distance': distance}
The ``connectivity`` property is the connectivity type to the
next nearest neighbor, e.g. "face", "corner", or
"edge". The ``geometry`` property gives the geometry of the
next nearest neighbor site. See the ``get_site_geometry`` method for
the format of this data. The ``angles`` property gives the bond
angles between the site and the next nearest neighbour. Returned as
a :obj:`list` of :obj:`int`. Multiple bond angles are given when
the two sites share more than nearest neighbor (e.g. if they are
face-sharing or edge-sharing). The ``distance`` property gives the
distance between the site and the next nearest neighbor.
If ``inc_inequivalent_site_index=True``, the data will have an
additional key ``'inequiv_index'`` corresponding to the inequivalent
site index. E.g. if two sites are structurally/symmetrically
equivalent (depending on the value of ``self.use_symmetry_equivalent_sites`` then
they will have the same ``inequiv_index``.
"""
def get_coords(a_site_index, a_site_image):
return np.asarray(
self.bonded_structure.structure.lattice.get_cartesian_coords(
self.bonded_structure.structure.frac_coords[a_site_index] +
a_site_image))
nn_sites = self.bonded_structure.get_connected_sites(site_index)
next_nn_sites = [site for nn_site in nn_sites for site in
self.bonded_structure.get_connected_sites(
nn_site.index, jimage=nn_site.jimage)]
nn_sites_set = set((site.index, site.jimage) for site in nn_sites)
seen_nnn_sites = set()
next_nn_summary = []
for nnn_site in next_nn_sites:
if (nnn_site.index == site_index and nnn_site.jimage == (0, 0, 0)
or (nnn_site.index, nnn_site.jimage) in seen_nnn_sites):
# skip the nnn site if it is the original atom of interest
continue
seen_nnn_sites.add((nnn_site.index, nnn_site.jimage))
sites = set((site.index, site.jimage) for site in
self.bonded_structure.get_connected_sites(
nnn_site.index, jimage=nnn_site.jimage))
shared_sites = nn_sites_set.intersection(sites)
n_shared_atoms = len(shared_sites)
if n_shared_atoms == 1:
connectivity = 'corner'
elif n_shared_atoms == 2:
connectivity = 'edge'
else:
connectivity = 'face'
site_coords = get_coords(site_index, (0, 0, 0))
nnn_site_coords = get_coords(nnn_site.index, nnn_site.jimage)
nn_site_coords = [get_coords(nn_site_index, nn_site_image)
for nn_site_index, nn_site_image in shared_sites]
# can't just use Structure.get_angles to calculate angles as it
# doesn't take into account the site image
angles = [get_angle(site_coords - x, nnn_site_coords - x)
for x in nn_site_coords]
distance = np.linalg.norm(site_coords - nnn_site_coords)
geometry = self.get_site_geometry(nnn_site.index)
summary = {'element': str(nnn_site.site.specie),
'connectivity': connectivity,
'geometry': geometry,
'angles': angles,
'distance': distance}
if inc_inequivalent_site_index:
summary['inequiv_index'] = self.equivalent_sites[nnn_site.index]
next_nn_summary.append(summary)
return next_nn_summary
def get_site_summary(self, site_index: int) -> Dict[str, Any]:
"""Gets a summary of the site information.
Args:
site_index: The site index (zero based).
Returns:
A summary of the site information, formatted as::
{
'element': 'Mo4+',
'geometry': {
'likesness': 0.5544,
'type': 'pentagonal pyramidal'
},
'nn': [2, 2, 2, 2, 2, 2],
'nnn': {'edge': [0, 0, 0, 0, 0, 0]},
'poly_formula': 'S6',
'sym_labels': (1,)
}
Where ``element`` is the species string (if the species has
oxidation states, these will be included in the string). The
``geometry`` key is the geometry information as produced by
:meth:`SiteAnalyzer.get_site_geometry`. The `nn` key lists
the site indices of the nearest neighbor bonding sites. Note the
inequivalent site index is given for each site. The `nnn` key gives
the next nearest neighbor information, broken up by the connectivity
to that neighbor. The ``poly_formula`` key gives the formula of the
bonded nearest neighbors. ``poly_formula`` will be ``None`` if the
site geometry is not in :data:`robocrys.util.connected_geometries`.
The ``sym_labels`` key gives the symmetry labels of the site. If
two sites are symmetrically equivalent they share the same symmetry
label. The numbering begins at 1 for each element in the structure.
If :attr:`SiteAnalyzer.use_symmetry_inequivalnt_sites` is ``False``,
each site may have more than one symmetry label, as structural
features have instead been used to determine the site equivalences,
i.e. two sites are symmetrically distinct but share the same
geometry, nearest neighbor and next nearest neighbor properties.
"""
element = str(self.bonded_structure.structure[site_index].specie)
geometry = self.get_site_geometry(site_index)
nn_sites = self.get_nearest_neighbors(
site_index, inc_inequivalent_site_index=True)
nn_indices = [nn_site['inequiv_index'] for nn_site in nn_sites]
nnn_sites = self.get_next_nearest_neighbors(
site_index, inc_inequivalent_site_index=True)
nnn = defaultdict(list)
for nnn_site in nnn_sites:
nnn[nnn_site['connectivity']].append(nnn_site['inequiv_index'])
nnn = dict(nnn)
equiv_sites = [i for i in range(len(self.equivalent_sites))
if self.equivalent_sites[i] == self.equivalent_sites[
site_index]]
sym_labels = tuple(set([self.symmetry_labels[x] for x in equiv_sites]))
poly_formula = self._get_poly_formula(geometry, nn_sites, nnn_sites)
return {'element': element, 'geometry': geometry, 'nn': nn_indices,
'nnn': nnn, 'poly_formula': poly_formula,
'sym_labels': sym_labels}
def get_bond_distance_summary(self, site_index: int
) -> Dict[int, List[float]]:
"""Gets the bond distance summary for a site.
Args:
site_index: The site index (zero based).
Returns:
The bonding data for the site, formatted as::
{to_site: [dist_1, dist_2, dist_3, ...]}
Where ``to_site`` is the index of a nearest neighbor site
and ``dist_1`` etc are the bond distances as :obj:`float`.
"""
bonds = defaultdict(list)
for nn_site in self.get_nearest_neighbors(site_index):
to_site = nn_site['inequiv_index']
bonds[to_site].append(nn_site['dist'])
return defaultdict_to_dict(bonds)
def get_connectivity_angle_summary(self, site_index: int
) -> Dict[int, Dict[str, List[float]]]:
"""Gets the connectivity angle summary for a site.
The connectivity angles are the angles between a site and its
next nearest neighbors.
Args:
site_index: The site index (zero based).
Returns:
The connectivity angle data for the site, formatted as::
{
to_site: {
connectivity_a: [angle_1, angle_2, ...]
connectivity_b: [angle_1, angle_2, ...]
}
}
Where ``to_site`` is the index of a next nearest neighbor site,
``connectivity_a`` etc are the bonding connectivity type, e.g.
``'edge'`` or ``'corner'`` (for edge-sharing and corner-sharing
connectivity), and ``angle_1`` etc are the bond angles as
:obj:`float`.
"""
connectivities = defaultdict(lambda: defaultdict(list))
for nnn_site in self.get_next_nearest_neighbors(
site_index, inc_inequivalent_site_index=True):
to_site = nnn_site['inequiv_index']
connectivity = nnn_site['connectivity']
connectivities[to_site][connectivity].extend(nnn_site['angles'])
return defaultdict_to_dict(connectivities)
def get_nnn_distance_summary(self, site_index: int
) -> Dict[int, Dict[str, List[float]]]:
"""Gets the next nearest neighbor distance summary for a site.
Args:
site_index: The site index (zero based).
Returns:
The connectivity distance data for the site, formatted as::
{
to_site: {
connectivity_a: [distance_1, distance_2, ...]
connectivity_b: [distance_1, distance_2, ...]
}
}
Where ``to_site`` is the index of a next nearest neighbor site,
``connectivity_a`` etc are the bonding connectivity type, e.g.
``'edge'`` or ``'corner'`` (for edge-sharing and corner-sharing
connectivity), and ``distance_1`` etc are the bond angles as
:obj:`float`.
"""
connectivities = defaultdict(lambda: defaultdict(list))
for nnn_site in self.get_next_nearest_neighbors(
site_index, inc_inequivalent_site_index=True):
to_site = nnn_site['inequiv_index']
connectivity = nnn_site['connectivity']
connectivities[to_site][connectivity].append(nnn_site['distance'])
return defaultdict_to_dict(connectivities)
def get_all_site_summaries(self):
"""Gets the site summaries for all sites.
Returns:
The site summaries for all sites, formatted as::
{
site_index: site_summary
}
Where ``site_summary`` has the same format as produced by
:meth:`SiteAnalyzer.get_site_summary`.
"""
return {site: self.get_site_summary(site)
for site in set(self.equivalent_sites)}
def get_all_bond_distance_summaries(self
) -> Dict[int, Dict[int, List[float]]]:
"""Gets the bond distance summaries for all sites.
Returns:
The bond distance summaries for all sites, formatted as::
{
from_site: {
to_site: distances
}
}
Where ``from_site`` and ``to_site`` are site indices and
``distances`` is a :obj:`list` of :obj:`float` of bond distances.
"""
return {from_site: self.get_bond_distance_summary(from_site)
for from_site in set(self.equivalent_sites)}
def get_all_connectivity_angle_summaries(self) -> Dict[
int, Dict[int, Dict[str, List[float]]]]:
"""Gets the connectivity angle summaries for all sites.
The connectivity angles are the angles between a site and its
next nearest neighbors.
Returns:
The connectivity angle summaries for all sites, formatted as::
{
from_site: {
to_site: {
connectivity: angles
}
}
}
Where ``from_site`` and ``to_site`` are the site indices of
two sites, ``connectivity`` is the connectivity type (e.g.
``'edge'`` or ``'face'``) and ``angles`` is a :obj:`list` of
:obj:`float` of connectivity angles.
"""
return {from_site: self.get_connectivity_angle_summary(from_site)
for from_site in set(self.equivalent_sites)}
def get_all_nnn_distance_summaries(
self
) -> Dict[int, Dict[int, Dict[str, List[float]]]]:
"""Gets the next nearest neighbor distance summaries for all sites.
Returns:
The next nearest neighbor distance summaries for all sites,
formatted as::
{
from_site: {
to_site: {
connectivity: distances
}
}
}
Where ``from_site`` and ``to_site`` are the site indices of
two sites, ``connectivity`` is the connectivity type (e.g.
``'edge'`` or ``'face'``) and ``distances`` is a :obj:`list` of
:obj:`float` of distances.
"""
return {from_site: self.get_nnn_distance_summary(from_site)
for from_site in set(self.equivalent_sites)}
def get_inequivalent_site_indices(self, site_indices: List[int]
) -> List[int]:
"""Gets the inequivalent site indices from a list of site indices.
Args:
site_indices: The site indices.
Returns:
The inequivalent site indices. For example, if a structure has 4
sites where the first two are equivalent and the last two are
inequivalent. If ``site_indices=[0, 1, 2, 3]`` the output will be::
[0, 0, 2, 3]
"""
return list(self.equivalent_sites[i] for i in site_indices)
def _calculate_equivalent_sites(self,
likeness_tol: float = 0.001,
bond_dist_tol: float = 0.01,
bond_angle_tol: float = 0.1
) -> List[int]:
"""Determines the indices of the structurally inequivalent sites.
Args:
likeness_tol: The tolerance used to determine if two likeness
parameters are the same.
bond_dist_tol: The tolerance used to determine if two bond lengths
are the same.
bond_angle_tol: The tolerance used to determine if two bond angles
are the same.
Two sites are considered equivalent if they are the same element, and
have the same geometry and (next) nearest neighbors.
Returns:
A :obj:`list` of indices mapping each site in the structure to a
structurally equivalent site. For example, if the first two sites
are equivalent and the last two are both inequivalent, the data will
be formatted as::
[0, 0, 2, 3]
"""
# TODO: Use site fingerprint rather than geometry type.
inequiv_sites = {}
equivalent_sites = []
for site_index, site in enumerate(self.bonded_structure.structure):
element = get_el_sp(site.specie)
geometry = self.get_site_geometry(site_index)
nn_sites = self.get_nearest_neighbors(
site_index, inc_inequivalent_site_index=False)
nnn_sites = self.get_next_nearest_neighbors(
site_index, inc_inequivalent_site_index=False)
matched = False
for inequiv_index, inequiv_site in inequiv_sites.items():
elem_match = element == inequiv_site['element']
geom_match = geometries_match(
geometry, inequiv_site['geometry'],
likeness_tol=likeness_tol)
nn_match = nn_summaries_match(
nn_sites, inequiv_site['nn_sites'],
bond_dist_tol=bond_dist_tol)
nnn_match = nnn_summaries_match(
nnn_sites, inequiv_site['nnn_sites'],
bond_angle_tol=bond_angle_tol)
if elem_match and geom_match and nn_match and nnn_match:
equivalent_sites.append(inequiv_index)
matched = True
break
if not matched:
# no matches therefore store original site index
equivalent_sites.append(site_index)
site_data = {'element': element,
'geometry': geometry,
'nn_sites': nn_sites,
'nnn_sites': nnn_sites}
inequiv_sites[site_index] = site_data
return equivalent_sites
def _calculate_symmetry_labels(self, sym_equivalent_atoms: List[int]
) -> List[int]:
"""Calculates the symmetry labels for all sites in the structure.
The symmetry labels number the sites in the structure. If two sites
are symmetrically equivalent they share the same symmetry label. The
numbering begins at 1 for each element in the structure.
Args:
sym_equivalent_atoms: A :obj:`list` of indices mapping each site in
the structure to a symmetrically equivalent site. The data
should be formatted as given by the ``equivalent_atoms`` key in
:meth`SpacegroupAnalyzer.get_symmetry_dataset()`.
Returns:
A mapping between the site index and symmetry label for that site.
"""
symmetry_labels = dict()
# this way is a little long winded but works if the sites aren't
# grouped together by element
for specie in self.bonded_structure.structure.species:
el_indices = self.bonded_structure.structure.indices_from_symbol(
get_el(specie))
equiv_indices = [sym_equivalent_atoms[x] for x in el_indices]
count = 1
equiv_index_to_sym_label = {}
for el_index, equiv_index in zip(el_indices, equiv_indices):
if equiv_index in equiv_index_to_sym_label:
symmetry_labels[el_index] = (equiv_index_to_sym_label[
equiv_index])
else:
equiv_index_to_sym_label[equiv_index] = count
symmetry_labels[el_index] = count
count += 1
return [symmetry_labels[i] for i in sorted(symmetry_labels.keys())]
def _get_poly_formula(self,
geometry: Dict[str, Any],
nn_sites: List[Dict[str, Any]],
nnn_sites: List[Dict[str, Any]]
) -> Optional[str]:
"""Gets the polyhedra formula of the nearest neighbor atoms.
The polyhedral formula is effectively the sorted nearest neighbor
atoms in a reduced format. For example, if the nearest neighbors are
3 I atoms, 2 Br atoms and 1 Cl atom, the polyhedral formula will be
"I3Br2Cl". The polyhedral formula will be ``None`` if the site geometry
is not in :data:`robocrys.util.connected_geometries`.
Args:
geometry: The site geometry as produced by
:meth:`SiteAnalyzer.get_site_geometry`.
nn_sites: The nearest neighbor sites as produced by
:meth:`SiteAnalyzer.get_nearest_neighbors`.
nnn_sites: The next nearest neighbor sites as produced by
:meth:`SiteAnalyzer.get_next_nearest_neighbors`.
Returns:
The polyhedral formula if the site geometry is in
:data:`robocrys.util.connected_geometries` else ``None``.
"""
def order_elements(el):
if self.use_iupac_formula:
return [get_el_sp(el).X, el]
else:
return [get_el_sp(el).iupac_ordering, el]
nnn_geometries = [nnn_site['geometry'] for nnn_site in nnn_sites]
poly_formula = None
if (geometry['type'] in connected_geometries and
any([nnn_geometry['type'] in connected_geometries
for nnn_geometry in nnn_geometries])):
nn_els = [get_el(nn_site['element']) for nn_site in nn_sites]
comp = Composition("".join(nn_els))
el_amt_dict = comp.get_el_amt_dict()
poly_formula = ""
for e in sorted(el_amt_dict.keys(), key=order_elements):
poly_formula += e
poly_formula += formula_double_format(el_amt_dict[e])
return poly_formula
def geometries_match(geometry_a: Dict[str, Any],
geometry_b: Dict[str, Any],
likeness_tol: float = 0.001
) -> bool:
"""Determine whether two site geometries match.
Geometry data should be formatted the same as produced by
:meth:`robocrys.site.SiteAnalyzer.get_site_geometry`.
Args:
geometry_a: The first set of geometry data.
geometry_b: The second set of geometry data.
likeness_tol: The tolerance used to determine if two likeness parameters
are the same.
Returns:
Whether the two geometries are the same.
"""
return (geometry_a['type'] == geometry_b['type'] and
abs(geometry_a['likeness'] - geometry_b['likeness']) < likeness_tol)
def nn_summaries_match(nn_sites_a: List[Dict[str, Union[int, str]]],
nn_sites_b: List[Dict[str, Union[int, str]]],
bond_dist_tol: float = 0.01,
match_bond_dists: bool = True) -> bool:
"""Determine whether two sets of nearest neighbors match.
Nearest neighbor data should be formatted the same as produced by
:meth:`robocrys.site.SiteAnalyzer.get_nearest_neighbors`.
Args:
nn_sites_a: The first set of nearest neighbors.
nn_sites_b: The second set of nearest neighbors.
bond_dist_tol: The tolerance used to determine if two bond lengths
are the same.
match_bond_dists: Whether to consider bond distances when matching.
Returns:
Whether the two sets of nearest neighbors match.
"""
def nn_sites_order(nn_site):
return [nn_site['element'], nn_site['dist']]
if len(nn_sites_a) != len(nn_sites_b):
return False
nn_sites_a = sorted(nn_sites_a, key=nn_sites_order)
nn_sites_b = sorted(nn_sites_b, key=nn_sites_order)
dists_match = [abs(site_a['dist'] - site_b['dist']) < bond_dist_tol
if match_bond_dists else True
for site_a, site_b in zip(nn_sites_a, nn_sites_b)]
elements_match = [site_a['element'] == site_b['element']
for site_a, site_b in zip(nn_sites_a, nn_sites_b)]
return all(d and e for d, e in zip(dists_match, elements_match))
def nnn_summaries_match(nnn_sites_a: List[Dict[str, Any]],
nnn_sites_b: List[Dict[str, Any]],
likeness_tol: float = 0.001,
bond_angle_tol: float = 0.1,
match_bond_angles: bool = True):
"""Determine whether two sets of next nearest neighbors match.
Next nearest neighbor data should be formatted the same as produced by
:meth:`robocrys.site.SiteAnalyzer.get_next_nearest_neighbors`.
Args:
nnn_sites_a: The first set of next nearest neighbors.
nnn_sites_b: The second set of next nearest neighbors.
likeness_tol: The tolerance used to determine if two likeness parameters
are the same.
bond_angle_tol: The tolerance used to determine if two bond angles
are the same.
match_bond_angles: Whether to consider bond angles when matching.
Returns:
Whether the two sets of next nearest neighbors match.
"""
def nnn_sites_order(nnn_site):
return [nnn_site['element'], nnn_site['geometry']['type'],
nnn_site['connectivity'], sorted(nnn_site['angles'])]
if len(nnn_sites_a) != len(nnn_sites_b):
return False
nnn_sites_a = sorted(nnn_sites_a, key=nnn_sites_order)
nnn_sites_b = sorted(nnn_sites_b, key=nnn_sites_order)
elements_match = [site_a['element'] == site_b['element']
for site_a, site_b in zip(nnn_sites_a, nnn_sites_b)]
cons_match = [site_a['connectivity'] == site_b['connectivity']
for site_a, site_b in zip(nnn_sites_a, nnn_sites_b)]
geoms_match = [geometries_match(site_a['geometry'], site_b['geometry'],
likeness_tol=likeness_tol)
for site_a, site_b in zip(nnn_sites_a, nnn_sites_b)]
angles_match = [all([abs(a_a - a_b) < bond_angle_tol for a_a, a_b in
zip(sorted(site_a['angles']),
sorted(site_b['angles']))])
if match_bond_angles else True
for site_a, site_b in zip(nnn_sites_a, nnn_sites_b)]
return all(e and c and g and a for e, c, g, a in
zip(elements_match, cons_match, geoms_match, angles_match))
| [
"alexganose@googlemail.com"
] | alexganose@googlemail.com |
6c13b1510aa2d1e894f03e08801f3572e56b017a | a5a99f646e371b45974a6fb6ccc06b0a674818f2 | /PhysicsTools/PatExamples/test/producePatMcMatch_cfg.py | 5ab4881d7f71ebc06a1455a4c45f7d7d8ded42d0 | [
"Apache-2.0"
] | permissive | cms-sw/cmssw | 4ecd2c1105d59c66d385551230542c6615b9ab58 | 19c178740257eb48367778593da55dcad08b7a4f | refs/heads/master | 2023-08-23T21:57:42.491143 | 2023-08-22T20:22:40 | 2023-08-22T20:22:40 | 10,969,551 | 1,006 | 3,696 | Apache-2.0 | 2023-09-14T19:14:28 | 2013-06-26T14:09:07 | C++ | UTF-8 | Python | false | false | 1,632 | py | # Start with a skeleton process which gets imported with the following line
from PhysicsTools.PatAlgos.patTemplate_cfg import *
# Load the standard PAT config
process.load( "PhysicsTools.PatAlgos.patSequences_cff" )
# Load the exercise config
process.load( "PhysicsTools.PatExamples.mcMatch_cfi" ) # The file to modify!
# Modify the default config according to needed exercise settings
# You can comment these lines in order to run the default rather than
# your OWN MC matching from PhysicsTools/PatExamples/python/mcMatching_cfi
# CAUTION: Uncommented, this does NOT run out-of-the-box!
# Own muon match
process.makeAllLayer1Muons.remove( process.muonMatch )
process.makeAllLayer1Muons += process.myMuonMatch
process.makeAllLayer1Muons.remove( process.allLayer1Muons )
process.makeAllLayer1Muons += process.allLayer1Muons
process.allLayer1Muons.genParticleMatch = "myMuonMatch"
process.allLayer1Muons.embedGenMatch = True
# Own jet match to MC jets
process.makeAllLayer1Jets.remove( process.jetGenJetMatch )
process.makeAllLayer1Jets += process.myJetGenJetMatch
process.makeAllLayer1Jets.remove( process.allLayer1Jets )
process.makeAllLayer1Jets += process.allLayer1Jets
process.allLayer1Jets.genJetMatch = "myJetGenJetMatch"
# Define the path
process.p = cms.Path(
process.patDefaultSequence
)
process.maxEvents.input = 1000 # Reduce number of events for testing.
process.out.fileName = 'edmPatMcMatch.root'
process.out.outputCommands += [ 'keep recoGenParticles_genParticles_*_*' ] # Keeps the MC objects for references
process.options.wantSummary = False # to suppress the long output at the end of the job
| [
"giulio.eulisse@gmail.com"
] | giulio.eulisse@gmail.com |
8954d8187ee611a1397e099108f1803f3fa5ff35 | b503e79ccfca67c8114f5bd7a215f5ae993a0ba4 | /airflow/providers/amazon/aws/operators/sagemaker_endpoint_config.py | bbf2be11441f1d4d1f2d7d7be3d6bc723ee9308a | [
"Apache-2.0",
"BSD-3-Clause",
"Python-2.0",
"MIT"
] | permissive | github/incubator-airflow | df1d9780f862ea1df8261ea6015dd50a4583f983 | 73f70e00b9fd294057f8ca6b714a85622f6d5dd5 | refs/heads/gh-2.0.2 | 2023-07-29T18:08:43.140580 | 2022-09-14T18:23:42 | 2022-09-14T18:23:42 | 80,634,006 | 24 | 27 | Apache-2.0 | 2023-04-18T04:24:36 | 2017-02-01T15:34:55 | Python | UTF-8 | Python | false | false | 2,217 | py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.operators.sagemaker_base import SageMakerBaseOperator
from airflow.utils.decorators import apply_defaults
class SageMakerEndpointConfigOperator(SageMakerBaseOperator):
"""
Create a SageMaker endpoint config.
This operator returns The ARN of the endpoint config created in Amazon SageMaker
:param config: The configuration necessary to create an endpoint config.
For details of the configuration parameter see :py:meth:`SageMaker.Client.create_endpoint_config`
:type config: dict
:param aws_conn_id: The AWS connection ID to use.
:type aws_conn_id: str
"""
integer_fields = [['ProductionVariants', 'InitialInstanceCount']]
@apply_defaults
def __init__(self, *, config: dict, **kwargs):
super().__init__(config=config, **kwargs)
self.config = config
def execute(self, context) -> dict:
self.preprocess_config()
self.log.info('Creating SageMaker Endpoint Config %s.', self.config['EndpointConfigName'])
response = self.hook.create_endpoint_config(self.config)
if response['ResponseMetadata']['HTTPStatusCode'] != 200:
raise AirflowException('Sagemaker endpoint config creation failed: %s' % response)
else:
return {'EndpointConfig': self.hook.describe_endpoint_config(self.config['EndpointConfigName'])}
| [
"noreply@github.com"
] | github.noreply@github.com |
fcb09819fc0640389e046c9fbd0577354ac1e6c1 | 7ad5582d0f59de00c68e1f1dec626be68ac6332e | /src/test/parser/pattern/nodes/test_one_or_more.py | 52252322db65c60fdc295a1cb4f70372188b9286 | [
"MIT"
] | permissive | ebegen/program-y | ddb5525cb992de7f80f793742876bc9285e27b2d | 4ceb6a31c5ead813faad1b454f0c432e93d6ef7d | refs/heads/master | 2021-01-11T14:01:42.053013 | 2017-06-15T20:39:37 | 2017-06-15T20:39:37 | 94,932,566 | 1 | 0 | null | 2017-06-20T20:32:34 | 2017-06-20T20:32:34 | null | UTF-8 | Python | false | false | 1,734 | py | from programy.parser.exceptions import ParserException
from test.parser.pattern.nodes.base import PatternTestBaseClass
from programy.parser.pattern.nodes.oneormore import PatternOneOrMoreWildCardNode
class PatternOneOrMoreWildCardNodeTests(PatternTestBaseClass):
def test_invalid_wildcard(self):
with self.assertRaises(ParserException) as raised:
node = PatternOneOrMoreWildCardNode("X")
self.assertIsNone(node)
def test_star(self):
node = PatternOneOrMoreWildCardNode("*")
self.assertIsNotNone(node)
self.assertFalse(node.is_root())
self.assertFalse(node.is_priority())
self.assertFalse(node.is_zero_or_more())
self.assertTrue(node.is_one_or_more())
self.assertFalse(node.is_set())
self.assertFalse(node.is_bot())
self.assertFalse(node.is_template())
self.assertFalse(node.is_that())
self.assertFalse(node.is_topic())
self.assertTrue(node.is_wildcard())
self.assertIsNotNone(node.children)
self.assertFalse(node.has_children())
self.assertEqual(node.wildcard, "*")
self.assertTrue(node.equivalent(PatternOneOrMoreWildCardNode("*")))
self.assertFalse(node.equals(self.bot, "testid", "*"))
self.assertEqual(node.to_string(), "ONEORMORE [P(0)^(0)#(0)C(0)_(0)*(0)To(0)Th(0)Te(0)] wildcard=[*]")
def test_underline(self):
node = PatternOneOrMoreWildCardNode("_")
self.assertIsNotNone(node)
self.assertEqual(node.wildcard, "_")
self.assertTrue(node.equivalent(PatternOneOrMoreWildCardNode("_")))
self.assertEqual(node.to_string(), "ONEORMORE [P(0)^(0)#(0)C(0)_(0)*(0)To(0)Th(0)Te(0)] wildcard=[_]")
| [
"keith@keithsterling.com"
] | keith@keithsterling.com |
f40f42124108ad851319c5860eb45008d4a69137 | 5a281cb78335e06c631181720546f6876005d4e5 | /manila-8.0.0/manila/tests/share/drivers/test_helpers.py | 9894cf48346b73c3fcb620c4a4d48501384d5743 | [
"Apache-2.0"
] | permissive | scottwedge/OpenStack-Stein | d25b2a5bb54a714fc23f0ff0c11fb1fdacad85e8 | 7077d1f602031dace92916f14e36b124f474de15 | refs/heads/master | 2021-03-22T16:07:19.561504 | 2020-03-15T01:31:10 | 2020-03-15T01:31:10 | 247,380,811 | 0 | 0 | Apache-2.0 | 2020-03-15T01:24:15 | 2020-03-15T01:24:15 | null | UTF-8 | Python | false | false | 32,350 | py | # Copyright 2015 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import ddt
import mock
from oslo_config import cfg
from manila.common import constants as const
from manila import exception
import manila.share.configuration
from manila.share.drivers import helpers
from manila import test
from manila.tests import fake_compute
from manila.tests import fake_utils
from manila.tests.share.drivers import test_generic
CONF = cfg.CONF
@ddt.ddt
class NFSHelperTestCase(test.TestCase):
"""Test case for NFS helper."""
def setUp(self):
super(NFSHelperTestCase, self).setUp()
fake_utils.stub_out_utils_execute(self)
self.fake_conf = manila.share.configuration.Configuration(None)
self._ssh_exec = mock.Mock(return_value=('', ''))
self._execute = mock.Mock(return_value=('', ''))
self._helper = helpers.NFSHelper(self._execute, self._ssh_exec,
self.fake_conf)
ip = '10.254.0.3'
self.server = fake_compute.FakeServer(
ip=ip, public_address=ip, instance_id='fake_instance_id')
self.share_name = 'fake_share_name'
def test_init_helper(self):
# mocks
self.mock_object(
self._helper, '_ssh_exec',
mock.Mock(side_effect=exception.ProcessExecutionError(
stderr='command not found')))
# run
self.assertRaises(exception.ManilaException,
self._helper.init_helper, self.server)
# asserts
self._helper._ssh_exec.assert_called_once_with(
self.server, ['sudo', 'exportfs'])
def test_init_helper_log(self):
# mocks
self.mock_object(
self._helper, '_ssh_exec',
mock.Mock(side_effect=exception.ProcessExecutionError(
stderr='fake')))
# run
self._helper.init_helper(self.server)
# asserts
self._helper._ssh_exec.assert_called_once_with(
self.server, ['sudo', 'exportfs'])
@ddt.data(
{"server": {"public_address": "1.2.3.4"}, "version": 4},
{"server": {"public_address": "1001::1002"}, "version": 6},
{"server": {"public_address": "1.2.3.4", "admin_ip": "5.6.7.8"},
"version": 4},
{"server": {"public_address": "1.2.3.4", "ip": "9.10.11.12"},
"version": 4},
{"server": {"public_address": "1001::1001", "ip": "1001::1002"},
"version": 6},
{"server": {"public_address": "1001::1002", "admin_ip": "1001::1002"},
"version": 6},
{"server": {"public_addresses": ["1001::1002"]}, "version": 6},
{"server": {"public_addresses": ["1.2.3.4", "1001::1002"]},
"version": {"1.2.3.4": 4, "1001::1002": 6}},
)
@ddt.unpack
def test_create_exports(self, server, version):
result = self._helper.create_exports(server, self.share_name)
expected_export_locations = []
path = os.path.join(CONF.share_mount_path, self.share_name)
service_address = server.get("admin_ip", server.get("ip"))
version_copy = version
def convert_address(address, version):
if version == 4:
return address
return "[%s]" % address
if 'public_addresses' in server:
pairs = list(map(lambda addr: (addr, False),
server['public_addresses']))
else:
pairs = [(server['public_address'], False)]
service_address = server.get("admin_ip", server.get("ip"))
if service_address:
pairs.append((service_address, True))
for ip, is_admin in pairs:
if isinstance(version_copy, dict):
version = version_copy.get(ip)
expected_export_locations.append({
"path": "%s:%s" % (convert_address(ip, version), path),
"is_admin_only": is_admin,
"metadata": {
"export_location_metadata_example": "example",
},
})
self.assertEqual(expected_export_locations, result)
@ddt.data(const.ACCESS_LEVEL_RW, const.ACCESS_LEVEL_RO)
def test_update_access(self, access_level):
expected_mount_options = '%s,no_subtree_check,no_root_squash'
self.mock_object(self._helper, '_sync_nfs_temp_and_perm_files')
local_path = os.path.join(CONF.share_mount_path, self.share_name)
exec_result = ' '.join([local_path, '2.2.2.3'])
self.mock_object(self._helper, '_ssh_exec',
mock.Mock(return_value=(exec_result, '')))
access_rules = [
test_generic.get_fake_access_rule('1.1.1.1', access_level),
test_generic.get_fake_access_rule('2.2.2.2', access_level),
test_generic.get_fake_access_rule('2.2.2.3', access_level)]
add_rules = [
test_generic.get_fake_access_rule('2.2.2.2', access_level),
test_generic.get_fake_access_rule('2.2.2.3', access_level),
test_generic.get_fake_access_rule('5.5.5.0/24', access_level)]
delete_rules = [
test_generic.get_fake_access_rule('3.3.3.3', access_level),
test_generic.get_fake_access_rule('4.4.4.4', access_level, 'user'),
test_generic.get_fake_access_rule('0.0.0.0/0', access_level)]
self._helper.update_access(self.server, self.share_name, access_rules,
add_rules=add_rules,
delete_rules=delete_rules)
local_path = os.path.join(CONF.share_mount_path, self.share_name)
self._helper._ssh_exec.assert_has_calls([
mock.call(self.server, ['sudo', 'exportfs']),
mock.call(self.server, ['sudo', 'exportfs', '-u',
':'.join(['3.3.3.3', local_path])]),
mock.call(self.server, ['sudo', 'exportfs', '-u',
':'.join(['*',
local_path])]),
mock.call(self.server, ['sudo', 'exportfs', '-o',
expected_mount_options % access_level,
':'.join(['2.2.2.2', local_path])]),
mock.call(self.server, ['sudo', 'exportfs', '-o',
expected_mount_options % access_level,
':'.join(['5.5.5.0/24',
local_path])]),
])
self._helper._sync_nfs_temp_and_perm_files.assert_has_calls([
mock.call(self.server), mock.call(self.server)])
@ddt.data({'access': '10.0.0.1', 'result': '10.0.0.1'},
{'access': '10.0.0.1/32', 'result': '10.0.0.1'},
{'access': '10.0.0.0/24', 'result': '10.0.0.0/24'},
{'access': '1001::1001', 'result': '[1001::1001]'},
{'access': '1001::1000/128', 'result': '[1001::1000]'},
{'access': '1001::1000/124', 'result': '[1001::1000]/124'})
@ddt.unpack
def test__get_parsed_address_or_cidr(self, access, result):
self.assertEqual(result,
self._helper._get_parsed_address_or_cidr(access))
@ddt.data('10.0.0.265', '10.0.0.1/33', '1001::10069', '1001::1000/129')
def test__get_parsed_address_or_cidr_with_invalid_access(self, access):
self.assertRaises(ValueError,
self._helper._get_parsed_address_or_cidr,
access)
def test_update_access_invalid_type(self):
access_rules = [test_generic.get_fake_access_rule(
'2.2.2.2', const.ACCESS_LEVEL_RW, access_type='fake'), ]
self.assertRaises(
exception.InvalidShareAccess,
self._helper.update_access,
self.server,
self.share_name,
access_rules,
[],
[])
def test_update_access_invalid_level(self):
access_rules = [test_generic.get_fake_access_rule(
'2.2.2.2', 'fake_level', access_type='ip'), ]
self.assertRaises(
exception.InvalidShareAccessLevel,
self._helper.update_access,
self.server,
self.share_name,
access_rules,
[],
[])
def test_update_access_delete_invalid_rule(self):
delete_rules = [test_generic.get_fake_access_rule(
'lala', 'fake_level', access_type='user'), ]
self.mock_object(self._helper, '_sync_nfs_temp_and_perm_files')
self._helper.update_access(self.server, self.share_name, [],
[], delete_rules)
self._helper._sync_nfs_temp_and_perm_files.assert_called_with(
self.server)
def test_get_host_list(self):
fake_exportfs = ('/shares/share-1\n\t\t20.0.0.3\n'
'/shares/share-1\n\t\t20.0.0.6\n'
'/shares/share-2\n\t\t10.0.0.2\n'
'/shares/share-2\n\t\t10.0.0.5\n'
'/shares/share-3\n\t\t30.0.0.4\n'
'/shares/share-3\n\t\t30.0.0.7\n')
expected = ['20.0.0.3', '20.0.0.6']
result = self._helper.get_host_list(fake_exportfs, '/shares/share-1')
self.assertEqual(expected, result)
@ddt.data({"level": const.ACCESS_LEVEL_RW, "ip": "1.1.1.1",
"expected": "1.1.1.1"},
{"level": const.ACCESS_LEVEL_RO, "ip": "1.1.1.1",
"expected": "1.1.1.1"},
{"level": const.ACCESS_LEVEL_RW, "ip": "fd12:abcd::10",
"expected": "[fd12:abcd::10]"},
{"level": const.ACCESS_LEVEL_RO, "ip": "fd12:abcd::10",
"expected": "[fd12:abcd::10]"})
@ddt.unpack
def test_update_access_recovery_mode(self, level, ip, expected):
expected_mount_options = '%s,no_subtree_check,no_root_squash'
access_rules = [test_generic.get_fake_access_rule(
ip, level), ]
self.mock_object(self._helper, '_sync_nfs_temp_and_perm_files')
self.mock_object(self._helper, 'get_host_list',
mock.Mock(return_value=[ip]))
self._helper.update_access(self.server, self.share_name, access_rules,
[], [])
local_path = os.path.join(CONF.share_mount_path, self.share_name)
self._ssh_exec.assert_has_calls([
mock.call(self.server, ['sudo', 'exportfs']),
mock.call(
self.server, ['sudo', 'exportfs', '-u',
':'.join([expected,
local_path])]),
mock.call(self.server, ['sudo', 'exportfs', '-o',
expected_mount_options % level,
':'.join([expected, local_path])]),
])
self._helper._sync_nfs_temp_and_perm_files.assert_called_with(
self.server)
def test_sync_nfs_temp_and_perm_files(self):
self._helper._sync_nfs_temp_and_perm_files(self.server)
self._helper._ssh_exec.assert_has_calls(
[mock.call(self.server, mock.ANY) for i in range(1)])
@ddt.data('/foo/bar', '5.6.7.8:/bar/quuz', '5.6.7.9:/foo/quuz',
'[1001::1001]:/foo/bar', '[1001::1000]/:124:/foo/bar')
def test_get_exports_for_share_single_ip(self, export_location):
server = dict(public_address='1.2.3.4')
result = self._helper.get_exports_for_share(server, export_location)
path = export_location.split(':')[-1]
expected_export_locations = [
{"is_admin_only": False,
"path": "%s:%s" % (server["public_address"], path),
"metadata": {"export_location_metadata_example": "example"}}
]
self.assertEqual(expected_export_locations, result)
@ddt.data('/foo/bar', '5.6.7.8:/bar/quuz', '5.6.7.9:/foo/quuz')
def test_get_exports_for_share_multi_ip(self, export_location):
server = dict(public_addresses=['1.2.3.4', '1.2.3.5'])
result = self._helper.get_exports_for_share(server, export_location)
path = export_location.split(':')[-1]
expected_export_locations = list(map(
lambda addr: {
"is_admin_only": False,
"path": "%s:%s" % (addr, path),
"metadata": {"export_location_metadata_example": "example"}
},
server['public_addresses'])
)
self.assertEqual(expected_export_locations, result)
@ddt.data(
{'public_address_with_suffix': 'foo'},
{'with_prefix_public_address': 'bar'},
{'with_prefix_public_address_and_with_suffix': 'quuz'}, {})
def test_get_exports_for_share_with_error(self, server):
export_location = '1.2.3.4:/foo/bar'
self.assertRaises(
exception.ManilaException,
self._helper.get_exports_for_share, server, export_location)
@ddt.data('/foo/bar', '5.6.7.8:/foo/bar', '5.6.7.88:fake:/foo/bar',
'[1001::1002]:/foo/bar', '[1001::1000]/124:/foo/bar')
def test_get_share_path_by_export_location(self, export_location):
result = self._helper.get_share_path_by_export_location(
dict(), export_location)
self.assertEqual('/foo/bar', result)
@ddt.data(
('/shares/fake_share1\n\t\t1.1.1.10\n'
'/shares/fake_share2\n\t\t1.1.1.16\n'
'/mnt/fake_share1 1.1.1.11', False),
('/shares/fake_share_name\n\t\t1.1.1.10\n'
'/shares/fake_share_name\n\t\t1.1.1.16\n'
'/mnt/fake_share1\n\t\t1.1.1.11', True),
('/mnt/fake_share_name\n\t\t1.1.1.11\n'
'/shares/fake_share_name\n\t\t1.1.1.10\n'
'/shares/fake_share_name\n\t\t1.1.1.16\n', True))
@ddt.unpack
def test_disable_access_for_maintenance(self, output, hosts_match):
fake_maintenance_path = "fake.path"
self._helper.configuration.share_mount_path = '/shares'
local_path = os.path.join(self._helper.configuration.share_mount_path,
self.share_name)
def fake_ssh_exec(*args, **kwargs):
if 'exportfs' in args[1] and '-u' not in args[1]:
return output, ''
else:
return '', ''
self.mock_object(self._helper, '_ssh_exec',
mock.Mock(side_effect=fake_ssh_exec))
self.mock_object(self._helper, '_sync_nfs_temp_and_perm_files')
self.mock_object(self._helper, '_get_maintenance_file_path',
mock.Mock(return_value=fake_maintenance_path))
self._helper.disable_access_for_maintenance(
self.server, self.share_name)
self._helper._ssh_exec.assert_any_call(
self.server,
['cat', const.NFS_EXPORTS_FILE,
'|', 'grep', self.share_name,
'|', 'sudo', 'tee', fake_maintenance_path]
)
self._helper._ssh_exec.assert_has_calls([
mock.call(self.server, ['sudo', 'exportfs']),
])
if hosts_match:
self._helper._ssh_exec.assert_has_calls([
mock.call(self.server, ['sudo', 'exportfs', '-u',
':'.join(['1.1.1.10', local_path])]),
mock.call(self.server, ['sudo', 'exportfs', '-u',
':'.join(['1.1.1.16', local_path])]),
])
self._helper._sync_nfs_temp_and_perm_files.assert_called_once_with(
self.server
)
def test_restore_access_after_maintenance(self):
fake_maintenance_path = "fake.path"
self.mock_object(self._helper, '_get_maintenance_file_path',
mock.Mock(return_value=fake_maintenance_path))
self.mock_object(self._helper, '_ssh_exec')
self._helper.restore_access_after_maintenance(
self.server, self.share_name)
self._helper._ssh_exec.assert_called_once_with(
self.server,
['cat', fake_maintenance_path,
'|', 'sudo', 'tee', '-a', const.NFS_EXPORTS_FILE,
'&&', 'sudo', 'exportfs', '-r', '&&', 'sudo', 'rm', '-f',
fake_maintenance_path]
)
@ddt.ddt
class CIFSHelperIPAccessTestCase(test.TestCase):
"""Test case for CIFS helper with IP access."""
def setUp(self):
super(CIFSHelperIPAccessTestCase, self).setUp()
self.server_details = {'instance_id': 'fake',
'public_address': '1.2.3.4', }
self.share_name = 'fake_share_name'
self.fake_conf = manila.share.configuration.Configuration(None)
self._ssh_exec = mock.Mock(return_value=('', ''))
self._execute = mock.Mock(return_value=('', ''))
self._helper = helpers.CIFSHelperIPAccess(self._execute,
self._ssh_exec,
self.fake_conf)
self.access = dict(
access_level=const.ACCESS_LEVEL_RW,
access_type='ip',
access_to='1.1.1.1')
def test_init_helper(self):
self._helper.init_helper(self.server_details)
self._helper._ssh_exec.assert_called_once_with(
self.server_details,
['sudo', 'net', 'conf', 'list'],
)
def test_create_export_share_does_not_exist(self):
def fake_ssh_exec(*args, **kwargs):
if 'showshare' in args[1]:
raise exception.ProcessExecutionError()
else:
return '', ''
self.mock_object(self._helper, '_ssh_exec',
mock.Mock(side_effect=fake_ssh_exec))
ret = self._helper.create_exports(self.server_details, self.share_name)
expected_location = [{
"is_admin_only": False,
"path": "\\\\%s\\%s" % (
self.server_details['public_address'], self.share_name),
"metadata": {"export_location_metadata_example": "example"}
}]
self.assertEqual(expected_location, ret)
share_path = os.path.join(
self._helper.configuration.share_mount_path,
self.share_name)
self._helper._ssh_exec.assert_has_calls([
mock.call(
self.server_details,
['sudo', 'net', 'conf', 'showshare', self.share_name, ]
),
mock.call(
self.server_details,
[
'sudo', 'net', 'conf', 'addshare', self.share_name,
share_path, 'writeable=y', 'guest_ok=y',
]
),
mock.call(self.server_details, mock.ANY),
])
def test_create_export_share_does_not_exist_exception(self):
self.mock_object(self._helper, '_ssh_exec',
mock.Mock(
side_effect=[exception.ProcessExecutionError(),
Exception('')]
))
self.assertRaises(
exception.ManilaException, self._helper.create_exports,
self.server_details, self.share_name)
def test_create_exports_share_exist_recreate_true(self):
ret = self._helper.create_exports(
self.server_details, self.share_name, recreate=True)
expected_location = [{
"is_admin_only": False,
"path": "\\\\%s\\%s" % (
self.server_details['public_address'], self.share_name),
"metadata": {"export_location_metadata_example": "example"}
}]
self.assertEqual(expected_location, ret)
share_path = os.path.join(
self._helper.configuration.share_mount_path,
self.share_name)
self._helper._ssh_exec.assert_has_calls([
mock.call(
self.server_details,
['sudo', 'net', 'conf', 'showshare', self.share_name, ]
),
mock.call(
self.server_details,
['sudo', 'net', 'conf', 'delshare', self.share_name, ]
),
mock.call(
self.server_details,
[
'sudo', 'net', 'conf', 'addshare', self.share_name,
share_path, 'writeable=y', 'guest_ok=y',
]
),
mock.call(self.server_details, mock.ANY),
])
def test_create_export_share_exist_recreate_false(self):
self.assertRaises(
exception.ShareBackendException,
self._helper.create_exports,
self.server_details,
self.share_name,
recreate=False,
)
self._helper._ssh_exec.assert_has_calls([
mock.call(
self.server_details,
['sudo', 'net', 'conf', 'showshare', self.share_name, ]
),
])
def test_remove_exports(self):
self._helper.remove_exports(self.server_details, self.share_name)
self._helper._ssh_exec.assert_called_once_with(
self.server_details,
['sudo', 'net', 'conf', 'delshare', self.share_name],
)
def test_remove_export_forcibly(self):
delshare_command = ['sudo', 'net', 'conf', 'delshare', self.share_name]
def fake_ssh_exec(*args, **kwargs):
if delshare_command == args[1]:
raise exception.ProcessExecutionError()
else:
return ('', '')
self.mock_object(self._helper, '_ssh_exec',
mock.Mock(side_effect=fake_ssh_exec))
self._helper.remove_exports(self.server_details, self.share_name)
self._helper._ssh_exec.assert_has_calls([
mock.call(
self.server_details,
['sudo', 'net', 'conf', 'delshare', self.share_name],
),
mock.call(
self.server_details,
['sudo', 'smbcontrol', 'all', 'close-share', self.share_name],
),
])
def test_update_access_wrong_access_level(self):
access_rules = [test_generic.get_fake_access_rule(
'2.2.2.2', const.ACCESS_LEVEL_RO), ]
self.assertRaises(
exception.InvalidShareAccessLevel,
self._helper.update_access,
self.server_details,
self.share_name,
access_rules,
[],
[])
def test_update_access_wrong_access_type(self):
access_rules = [test_generic.get_fake_access_rule(
'2.2.2.2', const.ACCESS_LEVEL_RW, access_type='fake'), ]
self.assertRaises(
exception.InvalidShareAccess,
self._helper.update_access,
self.server_details,
self.share_name,
access_rules,
[],
[])
def test_update_access(self):
access_rules = [test_generic.get_fake_access_rule(
'1.1.1.1', const.ACCESS_LEVEL_RW), ]
self._helper.update_access(self.server_details, self.share_name,
access_rules, [], [])
self._helper._ssh_exec.assert_called_once_with(
self.server_details, ['sudo', 'net', 'conf', 'setparm',
self.share_name, 'hosts allow',
'1.1.1.1'])
def test_get_allow_hosts(self):
self.mock_object(self._helper, '_ssh_exec',
mock.Mock(
return_value=('1.1.1.1 2.2.2.2 3.3.3.3', '')))
expected = ['1.1.1.1', '2.2.2.2', '3.3.3.3']
result = self._helper._get_allow_hosts(
self.server_details, self.share_name)
self.assertEqual(expected, result)
cmd = ['sudo', 'net', 'conf', 'getparm', self.share_name,
'hosts allow']
self._helper._ssh_exec.assert_called_once_with(
self.server_details, cmd)
@ddt.data(
'', '1.2.3.4:/nfs/like/export', '/1.2.3.4/foo', '\\1.2.3.4\\foo',
'//1.2.3.4\\mixed_slashes_and_backslashes_one',
'\\\\1.2.3.4/mixed_slashes_and_backslashes_two')
def test__get_share_group_name_from_export_location(self, export_location):
self.assertRaises(
exception.InvalidShare,
self._helper._get_share_group_name_from_export_location,
export_location)
@ddt.data('//5.6.7.8/foo', '\\\\5.6.7.8\\foo')
def test_get_exports_for_share(self, export_location):
server = dict(public_address='1.2.3.4')
self.mock_object(
self._helper, '_get_share_group_name_from_export_location',
mock.Mock(side_effect=(
self._helper._get_share_group_name_from_export_location)))
result = self._helper.get_exports_for_share(server, export_location)
expected_export_location = [{
"is_admin_only": False,
"path": "\\\\%s\\foo" % server['public_address'],
"metadata": {"export_location_metadata_example": "example"}
}]
self.assertEqual(expected_export_location, result)
(self._helper._get_share_group_name_from_export_location.
assert_called_once_with(export_location))
@ddt.data(
{'public_address_with_suffix': 'foo'},
{'with_prefix_public_address': 'bar'},
{'with_prefix_public_address_and_with_suffix': 'quuz'}, {})
def test_get_exports_for_share_with_exception(self, server):
export_location = '1.2.3.4:/foo/bar'
self.assertRaises(
exception.ManilaException,
self._helper.get_exports_for_share, server, export_location)
@ddt.data('//5.6.7.8/foo', '\\\\5.6.7.8\\foo')
def test_get_share_path_by_export_location(self, export_location):
fake_path = ' /bar/quuz\n '
fake_server = dict()
self.mock_object(
self._helper, '_ssh_exec',
mock.Mock(return_value=(fake_path, 'fake')))
self.mock_object(
self._helper, '_get_share_group_name_from_export_location',
mock.Mock(side_effect=(
self._helper._get_share_group_name_from_export_location)))
result = self._helper.get_share_path_by_export_location(
fake_server, export_location)
self.assertEqual('/bar/quuz', result)
self._helper._ssh_exec.assert_called_once_with(
fake_server, ['sudo', 'net', 'conf', 'getparm', 'foo', 'path'])
(self._helper._get_share_group_name_from_export_location.
assert_called_once_with(export_location))
def test_disable_access_for_maintenance(self):
allowed_hosts = ['test', 'test2']
maintenance_path = os.path.join(
self._helper.configuration.share_mount_path,
"%s.maintenance" % self.share_name)
self.mock_object(self._helper, '_set_allow_hosts')
self.mock_object(self._helper, '_get_allow_hosts',
mock.Mock(return_value=allowed_hosts))
self._helper.disable_access_for_maintenance(
self.server_details, self.share_name)
self._helper._get_allow_hosts.assert_called_once_with(
self.server_details, self.share_name)
self._helper._set_allow_hosts.assert_called_once_with(
self.server_details, [], self.share_name)
valid_cmd = ['echo', "'test test2'", '|', 'sudo', 'tee',
maintenance_path]
self._helper._ssh_exec.assert_called_once_with(
self.server_details, valid_cmd)
def test_restore_access_after_maintenance(self):
fake_maintenance_path = "test.path"
self.mock_object(self._helper, '_set_allow_hosts')
self.mock_object(self._helper, '_get_maintenance_file_path',
mock.Mock(return_value=fake_maintenance_path))
self.mock_object(self._helper, '_ssh_exec',
mock.Mock(side_effect=[("fake fake2", 0), "fake"]))
self._helper.restore_access_after_maintenance(
self.server_details, self.share_name)
self._helper._set_allow_hosts.assert_called_once_with(
self.server_details, ['fake', 'fake2'], self.share_name)
self._helper._ssh_exec.assert_any_call(
self.server_details, ['cat', fake_maintenance_path])
self._helper._ssh_exec.assert_any_call(
self.server_details, ['sudo', 'rm', '-f', fake_maintenance_path])
@ddt.ddt
class CIFSHelperUserAccessTestCase(test.TestCase):
"""Test case for CIFS helper with user access."""
access_rw = dict(
access_level=const.ACCESS_LEVEL_RW,
access_type='user',
access_to='manila-user')
access_ro = dict(
access_level=const.ACCESS_LEVEL_RO,
access_type='user',
access_to='manila-user')
def setUp(self):
super(CIFSHelperUserAccessTestCase, self).setUp()
self.server_details = {'instance_id': 'fake',
'public_address': '1.2.3.4', }
self.share_name = 'fake_share_name'
self.fake_conf = manila.share.configuration.Configuration(None)
self._ssh_exec = mock.Mock(return_value=('', ''))
self._execute = mock.Mock(return_value=('', ''))
self._helper = helpers.CIFSHelperUserAccess(
self._execute, self._ssh_exec, self.fake_conf)
def test_update_access_exception_type(self):
access_rules = [test_generic.get_fake_access_rule(
'user1', const.ACCESS_LEVEL_RW, access_type='ip')]
self.assertRaises(exception.InvalidShareAccess,
self._helper.update_access, self.server_details,
self.share_name, access_rules, [], [])
def test_update_access(self):
access_list = [test_generic.get_fake_access_rule(
'user1', const.ACCESS_LEVEL_RW, access_type='user'),
test_generic.get_fake_access_rule(
'user2', const.ACCESS_LEVEL_RO, access_type='user')]
self._helper.update_access(self.server_details, self.share_name,
access_list, [], [])
self._helper._ssh_exec.assert_has_calls([
mock.call(self.server_details,
['sudo', 'net', 'conf', 'setparm', self.share_name,
'valid users', 'user1']),
mock.call(self.server_details,
['sudo', 'net', 'conf', 'setparm', self.share_name,
'read list', 'user2'])
])
def test_update_access_exception_level(self):
access_rules = [test_generic.get_fake_access_rule(
'user1', 'fake_level', access_type='user'), ]
self.assertRaises(
exception.InvalidShareAccessLevel,
self._helper.update_access,
self.server_details,
self.share_name,
access_rules,
[],
[])
@ddt.ddt
class NFSSynchronizedTestCase(test.TestCase):
@helpers.nfs_synchronized
def wrapped_method(self, server, share_name):
return server['instance_id'] + share_name
@ddt.data(
({'lock_name': 'FOO', 'instance_id': 'QUUZ'}, 'nfs-FOO'),
({'instance_id': 'QUUZ'}, 'nfs-QUUZ'),
)
@ddt.unpack
def test_with_lock_name(self, server, expected_lock_name):
share_name = 'fake_share_name'
self.mock_object(
helpers.utils, 'synchronized',
mock.Mock(side_effect=helpers.utils.synchronized))
result = self.wrapped_method(server, share_name)
self.assertEqual(server['instance_id'] + share_name, result)
helpers.utils.synchronized.assert_called_once_with(
expected_lock_name, external=True)
| [
"Wayne Gong@minbgong-winvm.cisco.com"
] | Wayne Gong@minbgong-winvm.cisco.com |
6985bd54d85367c3737c150e198b75089cbd5908 | a0376b1b780488c7f97c324cbc6de51f029b21c9 | /GULBI GOURI SHANKER_4AL18EC018.py.py | 983f54cef68d3643fd05afa74f12d9b3a47eddd9 | [] | no_license | alvas-education-foundation/ECE-2year-Code-Challenge | 7af0b0e887ec3359f1546ba8d0c1f30d99e9d295 | 3ffe3e196890433c62b74263de4717795176d3da | refs/heads/master | 2022-10-14T02:01:39.165085 | 2020-06-13T10:29:07 | 2020-06-13T10:29:07 | 266,272,378 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,223 | py | correct = True
while correct:
name = input("what is the user_name?")
if name == "micheal".lower():
password = input("please enter your password: ")
if password == "e3$WT89x":
print("you have successfully login.")
break
if password != "e3$WT89x":
passwordi = input("invalid password,would you like to try user_name and password again?y/n: ")
if passwordi == "y".lower():
correct = True
correct=0:
while count < 3:
print("invalid password.")
else:
print("account blocked.")
correct+=1
break
else:
print("thank you for trying to login,goodbye. ")
quit()
if name != "micheal".lower():
username = input("username incorrect,would you like to try the username again?y/n: ")
if username == "y".lower():
correct == True
else:
print("thankyou for trying to login")
quit()
# Code has syntax error
File "<string>", line 14
correct=0:
^
SyntaxError: invalid syntax
| [
"noreply@github.com"
] | alvas-education-foundation.noreply@github.com |
58213c5f3427d53904ddc1ebb3dc309ff9fde3a7 | 236dd1755695fd582b85381cfd40be1886b1459f | /about/migrations/0003_aboutindexpage_body_pua.py | 8bc184adeb760b89fc65b4f53879634b283ad730 | [] | no_license | davebshow/lenguapurhepecha | bbdb841085adbd5b0f42764a7e4bcafd202068c8 | c6ab3ada2b770a0bcca210fa53bb1281857e9168 | refs/heads/master | 2020-06-16T11:37:31.100321 | 2016-11-29T18:03:43 | 2016-11-29T18:03:43 | 75,107,817 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 504 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-29 01:54
from __future__ import unicode_literals
from django.db import migrations
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('about', '0002_auto_20161129_0114'),
]
operations = [
migrations.AddField(
model_name='aboutindexpage',
name='body_pua',
field=wagtail.wagtailcore.fields.RichTextField(blank=True),
),
]
| [
"davebshow@gmail.com"
] | davebshow@gmail.com |
678ab04dfd419260daa2c3e9c5de70af34821483 | 091a6200be74bf6577c86f623665bcc24e16b02b | /CircuitPython_Templates/mp3_multi_file_playback/code.py | 33f93bfe528b1f5f01e69dcdde308dc8596fa399 | [
"MIT"
] | permissive | adafruit/Adafruit_Learning_System_Guides | b5f7bce40a16da64e7a79d4b39de032f2cca41d4 | 5eaa7a15a437c533b89f359a25983e24bb6b5438 | refs/heads/main | 2023-09-05T18:31:41.621956 | 2023-09-05T15:36:09 | 2023-09-05T15:36:09 | 105,065,494 | 937 | 937 | MIT | 2023-09-12T18:48:53 | 2017-09-27T20:22:44 | C | UTF-8 | Python | false | false | 2,920 | py | # SPDX-FileCopyrightText: 2021 Kattni Rembor for Adafruit Industries
# SPDX-License-Identifier: MIT
"""
CircuitPython multiple MP3 playback example.
Plays two MP3 files consecutively, once time each.
Remove this line and all of the following docstring content before submitting to the Learn repo.
INCLUDE THE MP3 FILES IN THIS DIRECTORY IN A DIRECTORY WITH THE RESULTING CODE.PY FILE.
Choose the setup section appropriate for the board into which this template code is going. The
default is for SAMD51 boards.
If the setup is commented out, uncomment it. Regardless, ALWAYS delete the comment above the chosen
setup is commented out, uncomment it. Regardless, ALWAYS delete the comment above the chosen
imports/setup and all other setup options so that the example includes ONLY the appropriate list
of imports and the hardware setup. For example, a generic SAMD51 example should be:
import board
import audiomp3
import audioio
audio = audioio.AudioOut(board.A0)
mp3files = ["slow.mp3", "happy.mp3"]
with open(mp3files[0], "rb") as mp3:
decoder = audiomp3.MP3Decoder(mp3)
for filename in mp3files:
with open(filename, "rb") as decoder.file:
audio.play(decoder)
print("Playing:", filename)
while audio.playing:
pass
The example content, as above, should contain NO commented out code, NO setup comment labels, and
NO other commented out setup code.
"""
import board
import audiomp3
# For most SAMD51 boards
import audioio
audio = audioio.AudioOut(board.A0)
# For most RP2040 and nRF boards
# import audiopwmio
#
# audio = audiopwmio.PWMAudioOut(board.A0)
# For MacroPad, Circuit Playground Bluefruit, and any RP2040 or nRF boards with a built-in speaker
# and requiring you to enable the SPEAKER_ENABLE pin
# import audiopwmio
# import digitalio
#
# shutdown = digitalio.DigitalInOut(board.SPEAKER_ENABLE)
# shutdown.switch_to_output(True)
# audio = audiopwmio.PWMAudioOut(board.SPEAKER)
# For any SAMD51 boards with a built in speaker and requiring you to enable the SPEAKER_ENABLE pin
# import audioio
# import digitalio
#
# shutdown = digitalio.DigitalInOut(board.SPEAKER_ENABLE)
# shutdown.switch_to_output(True)
# audio = audioio.AudioOut(board.SPEAKER)
# For CLUE or nRF boards with built-in speaker and no SPEAKER_ENABLE pin
# import audiopwmio
#
# audio = audiopwmio.PWMAudioOut(board.SPEAKER)
# For any SAMD51 boards with a built in speaker and no SPEAKER_ENABLE pin
# import audioio
#
# audio = audioio.AudioOut(board.SPEAKER)
mp3files = ["slow.mp3", "happy.mp3"]
with open(mp3files[0], "rb") as mp3:
decoder = audiomp3.MP3Decoder(mp3)
for filename in mp3files:
with open(filename, "rb") as decoder.file:
audio.play(decoder)
print("Playing:", filename)
while audio.playing:
pass
print("Done playing!")
| [
"kattni@adafruit.com"
] | kattni@adafruit.com |
cfbd00fd91d4aaf5d1b858f999bbb011bd8c64b4 | 48603962470b1858984342cc649b6b5376db4b9e | /user/admin.py | 4e46d6e667974c16ec527eda8660e9918a20b743 | [] | no_license | hacking-mango/hackerthon_team9_backend | 7589c0217652c61f033c292feb0f31e62db1328b | 779c010c743ae71f6ff6cb6222b1811606416079 | refs/heads/main | 2023-06-16T15:14:05.181076 | 2021-07-11T03:29:13 | 2021-07-11T03:29:13 | 382,746,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | from django.contrib import admin
from .models import User
@admin.register(User)
class UserAdmin(admin.ModelAdmin):
list_display = ("nickname", "email", "profile_image", "created_at", "position")
list_display_links = (
"nickname",
"email",
)
| [
"tjdntjr123@gmail.com"
] | tjdntjr123@gmail.com |
ded69c52bf5e9c754f8bbb682bd3910d27bf8512 | 1ed4e96c20da03fbd3aa4f18d4b004a59d8f89e5 | /Repo/venv/Lib/site-packages/torch/utils/cpp_extension.py | 3f6ec30448dff109ebdc5a552a6f5e31e08dd470 | [] | no_license | donhatkha/CS2225.CH1501 | eebc854864dc6fe72a3650f640787de11d4e82b7 | 19d4dd3b11f8c9560d0d0a93882298637cacdc80 | refs/heads/master | 2023-07-19T13:27:17.862158 | 2021-02-08T07:19:05 | 2021-02-08T07:19:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 78,130 | py | import copy
import glob
import imp
import os
import re
import shlex
import setuptools
import subprocess
import sys
import sysconfig
import warnings
import collections
import torch
import torch._appdirs
from .file_baton import FileBaton
from ._cpp_extension_versioner import ExtensionVersioner
from .hipify import hipify_python
from .hipify.hipify_python import get_hip_file_path, GeneratedFileCleaner
from typing import List, Optional
from setuptools.command.build_ext import build_ext
IS_WINDOWS = sys.platform == 'win32'
# Taken directly from python stdlib < 3.9
# See https://github.com/pytorch/pytorch/issues/48617
def _nt_quote_args(args: Optional[List[str]]) -> List[str]:
"""Quote command-line arguments for DOS/Windows conventions.
Just wraps every argument which contains blanks in double quotes, and
returns a new argument list.
"""
# Cover None-type
if not args:
return []
return [f'"{arg}"' if ' ' in arg else arg for arg in args]
def _find_cuda_home() -> Optional[str]:
r'''Finds the CUDA install path.'''
# Guess #1
cuda_home = os.environ.get('CUDA_HOME') or os.environ.get('CUDA_PATH')
if cuda_home is None:
# Guess #2
try:
which = 'where' if IS_WINDOWS else 'which'
with open(os.devnull, 'w') as devnull:
nvcc = subprocess.check_output([which, 'nvcc'],
stderr=devnull).decode().rstrip('\r\n')
cuda_home = os.path.dirname(os.path.dirname(nvcc))
except Exception:
# Guess #3
if IS_WINDOWS:
cuda_homes = glob.glob(
'C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v*.*')
if len(cuda_homes) == 0:
cuda_home = ''
else:
cuda_home = cuda_homes[0]
else:
cuda_home = '/usr/local/cuda'
if not os.path.exists(cuda_home):
cuda_home = None
if cuda_home and not torch.cuda.is_available():
print("No CUDA runtime is found, using CUDA_HOME='{}'".format(cuda_home))
return cuda_home
def _find_rocm_home() -> Optional[str]:
r'''Finds the ROCm install path.'''
# Guess #1
rocm_home = os.environ.get('ROCM_HOME') or os.environ.get('ROCM_PATH')
if rocm_home is None:
# Guess #2
try:
hipcc = subprocess.check_output(
['which', 'hipcc'], stderr=subprocess.DEVNULL).decode().rstrip('\r\n')
# this will be either <ROCM_HOME>/hip/bin/hipcc or <ROCM_HOME>/bin/hipcc
rocm_home = os.path.dirname(os.path.dirname(hipcc))
if os.path.basename(rocm_home) == 'hip':
rocm_home = os.path.dirname(rocm_home)
except Exception:
# Guess #3
rocm_home = '/opt/rocm'
if not os.path.exists(rocm_home):
rocm_home = None
if rocm_home and torch.version.hip is None:
print("No ROCm runtime is found, using ROCM_HOME='{}'".format(rocm_home))
return rocm_home
def _join_rocm_home(*paths) -> str:
r'''
Joins paths with ROCM_HOME, or raises an error if it ROCM_HOME is not set.
This is basically a lazy way of raising an error for missing $ROCM_HOME
only once we need to get any ROCm-specific path.
'''
if ROCM_HOME is None:
raise EnvironmentError('ROCM_HOME environment variable is not set. '
'Please set it to your ROCm install root.')
elif IS_WINDOWS:
raise EnvironmentError('Building PyTorch extensions using '
'ROCm and Windows is not supported.')
return os.path.join(ROCM_HOME, *paths)
MINIMUM_GCC_VERSION = (5, 0, 0)
MINIMUM_MSVC_VERSION = (19, 0, 24215)
ABI_INCOMPATIBILITY_WARNING = '''
!! WARNING !!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
Your compiler ({}) may be ABI-incompatible with PyTorch!
Please use a compiler that is ABI-compatible with GCC 5.0 and above.
See https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html.
See https://gist.github.com/goldsborough/d466f43e8ffc948ff92de7486c5216d6
for instructions on how to install GCC 5 or higher.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!! WARNING !!
'''
WRONG_COMPILER_WARNING = '''
!! WARNING !!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
Your compiler ({user_compiler}) is not compatible with the compiler Pytorch was
built with for this platform, which is {pytorch_compiler} on {platform}. Please
use {pytorch_compiler} to to compile your extension. Alternatively, you may
compile PyTorch from source using {user_compiler}, and then you can also use
{user_compiler} to compile your extension.
See https://github.com/pytorch/pytorch/blob/master/CONTRIBUTING.md for help
with compiling PyTorch from source.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!! WARNING !!
'''
ROCM_HOME = _find_rocm_home()
MIOPEN_HOME = _join_rocm_home('miopen') if ROCM_HOME else None
IS_HIP_EXTENSION = True if ((ROCM_HOME is not None) and (torch.version.hip is not None)) else False
ROCM_VERSION = None
if torch.version.hip is not None:
ROCM_VERSION = tuple(int(v) for v in torch.version.hip.split('.')[:2])
CUDA_HOME = _find_cuda_home()
CUDNN_HOME = os.environ.get('CUDNN_HOME') or os.environ.get('CUDNN_PATH')
# PyTorch releases have the version pattern major.minor.patch, whereas when
# PyTorch is built from source, we append the git commit hash, which gives
# it the below pattern.
BUILT_FROM_SOURCE_VERSION_PATTERN = re.compile(r'\d+\.\d+\.\d+\w+\+\w+')
COMMON_MSVC_FLAGS = ['/MD', '/wd4819', '/wd4251', '/wd4244', '/wd4267', '/wd4275', '/wd4018', '/wd4190', '/EHsc']
MSVC_IGNORE_CUDAFE_WARNINGS = [
'base_class_has_different_dll_interface',
'field_without_dll_interface',
'dll_interface_conflict_none_assumed',
'dll_interface_conflict_dllexport_assumed'
]
COMMON_NVCC_FLAGS = [
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
'--expt-relaxed-constexpr'
]
COMMON_HIPCC_FLAGS = [
'-fPIC',
'-D__HIP_PLATFORM_HCC__=1',
'-DCUDA_HAS_FP16=1',
'-D__HIP_NO_HALF_OPERATORS__=1',
'-D__HIP_NO_HALF_CONVERSIONS__=1',
]
JIT_EXTENSION_VERSIONER = ExtensionVersioner()
PLAT_TO_VCVARS = {
'win32' : 'x86',
'win-amd64' : 'x86_amd64',
}
def _is_binary_build() -> bool:
return not BUILT_FROM_SOURCE_VERSION_PATTERN.match(torch.version.__version__)
def _accepted_compilers_for_platform() -> List[str]:
# gnu-c++ and gnu-cc are the conda gcc compilers
return ['clang++', 'clang'] if sys.platform.startswith('darwin') else ['g++', 'gcc', 'gnu-c++', 'gnu-cc']
def get_default_build_root() -> str:
r'''
Returns the path to the root folder under which extensions will built.
For each extension module built, there will be one folder underneath the
folder returned by this function. For example, if ``p`` is the path
returned by this function and ``ext`` the name of an extension, the build
folder for the extension will be ``p/ext``.
This directory is **user-specific** so that multiple users on the same
machine won't meet permission issues.
'''
return os.path.realpath(torch._appdirs.user_cache_dir(appname='torch_extensions'))
def check_compiler_ok_for_platform(compiler: str) -> bool:
r'''
Verifies that the compiler is the expected one for the current platform.
Arguments:
compiler (str): The compiler executable to check.
Returns:
True if the compiler is gcc/g++ on Linux or clang/clang++ on macOS,
and always True for Windows.
'''
if IS_WINDOWS:
return True
which = subprocess.check_output(['which', compiler], stderr=subprocess.STDOUT)
# Use os.path.realpath to resolve any symlinks, in particular from 'c++' to e.g. 'g++'.
compiler_path = os.path.realpath(which.decode().strip())
# Check the compiler name
if any(name in compiler_path for name in _accepted_compilers_for_platform()):
return True
# If ccache is used the compiler path is /usr/bin/ccache. Check by -v flag.
version_string = subprocess.check_output([compiler, '-v'], stderr=subprocess.STDOUT).decode()
if sys.platform.startswith('linux'):
# Check for 'gcc' or 'g++'
pattern = re.compile("^COLLECT_GCC=(.*)$", re.MULTILINE)
results = re.findall(pattern, version_string)
if len(results) != 1:
return False
compiler_path = os.path.realpath(results[0].strip())
return any(name in compiler_path for name in _accepted_compilers_for_platform())
if sys.platform.startswith('darwin'):
# Check for 'clang' or 'clang++'
return version_string.startswith("Apple clang")
return False
def check_compiler_abi_compatibility(compiler) -> bool:
r'''
Verifies that the given compiler is ABI-compatible with PyTorch.
Arguments:
compiler (str): The compiler executable name to check (e.g. ``g++``).
Must be executable in a shell process.
Returns:
False if the compiler is (likely) ABI-incompatible with PyTorch,
else True.
'''
if not _is_binary_build():
return True
if os.environ.get('TORCH_DONT_CHECK_COMPILER_ABI') in ['ON', '1', 'YES', 'TRUE', 'Y']:
return True
# First check if the compiler is one of the expected ones for the particular platform.
if not check_compiler_ok_for_platform(compiler):
warnings.warn(WRONG_COMPILER_WARNING.format(
user_compiler=compiler,
pytorch_compiler=_accepted_compilers_for_platform()[0],
platform=sys.platform))
return False
if sys.platform.startswith('darwin'):
# There is no particular minimum version we need for clang, so we're good here.
return True
try:
if sys.platform.startswith('linux'):
minimum_required_version = MINIMUM_GCC_VERSION
versionstr = subprocess.check_output([compiler, '-dumpfullversion', '-dumpversion'])
version = versionstr.decode().strip().split('.')
else:
minimum_required_version = MINIMUM_MSVC_VERSION
compiler_info = subprocess.check_output(compiler, stderr=subprocess.STDOUT)
match = re.search(r'(\d+)\.(\d+)\.(\d+)', compiler_info.decode().strip())
version = (0, 0, 0) if match is None else match.groups()
except Exception:
_, error, _ = sys.exc_info()
warnings.warn('Error checking compiler version for {}: {}'.format(compiler, error))
return False
if tuple(map(int, version)) >= minimum_required_version:
return True
compiler = '{} {}'.format(compiler, ".".join(version))
warnings.warn(ABI_INCOMPATIBILITY_WARNING.format(compiler))
return False
# See below for why we inherit BuildExtension from object.
# https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj-when
class BuildExtension(build_ext, object):
r'''
A custom :mod:`setuptools` build extension .
This :class:`setuptools.build_ext` subclass takes care of passing the
minimum required compiler flags (e.g. ``-std=c++14``) as well as mixed
C++/CUDA compilation (and support for CUDA files in general).
When using :class:`BuildExtension`, it is allowed to supply a dictionary
for ``extra_compile_args`` (rather than the usual list) that maps from
languages (``cxx`` or ``nvcc``) to a list of additional compiler flags to
supply to the compiler. This makes it possible to supply different flags to
the C++ and CUDA compiler during mixed compilation.
``use_ninja`` (bool): If ``use_ninja`` is ``True`` (default), then we
attempt to build using the Ninja backend. Ninja greatly speeds up
compilation compared to the standard ``setuptools.build_ext``.
Fallbacks to the standard distutils backend if Ninja is not available.
.. note::
By default, the Ninja backend uses #CPUS + 2 workers to build the
extension. This may use up too many resources on some systems. One
can control the number of workers by setting the `MAX_JOBS` environment
variable to a non-negative number.
'''
@classmethod
def with_options(cls, **options):
r'''
Returns a subclass with alternative constructor that extends any original keyword
arguments to the original constructor with the given options.
'''
class cls_with_options(cls): # type: ignore
def __init__(self, *args, **kwargs):
kwargs.update(options)
super().__init__(*args, **kwargs)
return cls_with_options
def __init__(self, *args, **kwargs) -> None:
super(BuildExtension, self).__init__(*args, **kwargs)
self.no_python_abi_suffix = kwargs.get("no_python_abi_suffix", False)
self.use_ninja = kwargs.get('use_ninja', True)
if self.use_ninja:
# Test if we can use ninja. Fallback otherwise.
msg = ('Attempted to use ninja as the BuildExtension backend but '
'{}. Falling back to using the slow distutils backend.')
if not is_ninja_available():
warnings.warn(msg.format('we could not find ninja.'))
self.use_ninja = False
def finalize_options(self) -> None:
super().finalize_options()
if self.use_ninja:
self.force = True
def build_extensions(self) -> None:
self._check_abi()
for extension in self.extensions:
self._add_compile_flag(extension, '-DTORCH_API_INCLUDE_EXTENSION_H')
# See note [Pybind11 ABI constants]
for name in ["COMPILER_TYPE", "STDLIB", "BUILD_ABI"]:
val = getattr(torch._C, f"_PYBIND11_{name}")
if val is not None and not IS_WINDOWS:
self._add_compile_flag(extension, f'-DPYBIND11_{name}="{val}"')
self._define_torch_extension_name(extension)
self._add_gnu_cpp_abi_flag(extension)
# Register .cu, .cuh and .hip as valid source extensions.
self.compiler.src_extensions += ['.cu', '.cuh', '.hip']
# Save the original _compile method for later.
if self.compiler.compiler_type == 'msvc':
self.compiler._cpp_extensions += ['.cu', '.cuh']
original_compile = self.compiler.compile
original_spawn = self.compiler.spawn
else:
original_compile = self.compiler._compile
def append_std14_if_no_std_present(cflags) -> None:
# NVCC does not allow multiple -std to be passed, so we avoid
# overriding the option if the user explicitly passed it.
cpp_format_prefix = '/{}:' if self.compiler.compiler_type == 'msvc' else '-{}='
cpp_flag_prefix = cpp_format_prefix.format('std')
cpp_flag = cpp_flag_prefix + 'c++14'
if not any(flag.startswith(cpp_flag_prefix) for flag in cflags):
cflags.append(cpp_flag)
def unix_cuda_flags(cflags):
_ccbin = os.getenv("CC")
return (COMMON_NVCC_FLAGS +
['--compiler-options', "'-fPIC'"] +
cflags + _get_cuda_arch_flags(cflags) +
(['-ccbin', _ccbin] if _ccbin is not None else []))
def convert_to_absolute_paths_inplace(paths):
# Helper function. See Note [Absolute include_dirs]
if paths is not None:
for i in range(len(paths)):
if not os.path.isabs(paths[i]):
paths[i] = os.path.abspath(paths[i])
def unix_wrap_single_compile(obj, src, ext, cc_args, extra_postargs, pp_opts) -> None:
# Copy before we make any modifications.
cflags = copy.deepcopy(extra_postargs)
try:
original_compiler = self.compiler.compiler_so
if _is_cuda_file(src):
nvcc = [_join_rocm_home('bin', 'hipcc') if IS_HIP_EXTENSION else _join_cuda_home('bin', 'nvcc')]
self.compiler.set_executable('compiler_so', nvcc)
if isinstance(cflags, dict):
cflags = cflags['nvcc']
if IS_HIP_EXTENSION:
cflags = cflags + _get_rocm_arch_flags(cflags)
else:
cflags = unix_cuda_flags(cflags)
elif isinstance(cflags, dict):
cflags = cflags['cxx']
if IS_HIP_EXTENSION:
cflags = cflags + COMMON_HIPCC_FLAGS
append_std14_if_no_std_present(cflags)
original_compile(obj, src, ext, cc_args, cflags, pp_opts)
finally:
# Put the original compiler back in place.
self.compiler.set_executable('compiler_so', original_compiler)
def unix_wrap_ninja_compile(sources,
output_dir=None,
macros=None,
include_dirs=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
depends=None):
r"""Compiles sources by outputting a ninja file and running it."""
# NB: I copied some lines from self.compiler (which is an instance
# of distutils.UnixCCompiler). See the following link.
# https://github.com/python/cpython/blob/f03a8f8d5001963ad5b5b28dbd95497e9cc15596/Lib/distutils/ccompiler.py#L564-L567
# This can be fragile, but a lot of other repos also do this
# (see https://github.com/search?q=_setup_compile&type=Code)
# so it is probably OK; we'll also get CI signal if/when
# we update our python version (which is when distutils can be
# upgraded)
# Use absolute path for output_dir so that the object file paths
# (`objects`) get generated with absolute paths.
output_dir = os.path.abspath(output_dir)
# See Note [Absolute include_dirs]
convert_to_absolute_paths_inplace(self.compiler.include_dirs)
_, objects, extra_postargs, pp_opts, _ = \
self.compiler._setup_compile(output_dir, macros,
include_dirs, sources,
depends, extra_postargs)
common_cflags = self.compiler._get_cc_args(pp_opts, debug, extra_preargs)
extra_cc_cflags = self.compiler.compiler_so[1:]
with_cuda = any(map(_is_cuda_file, sources))
# extra_postargs can be either:
# - a dict mapping cxx/nvcc to extra flags
# - a list of extra flags.
if isinstance(extra_postargs, dict):
post_cflags = extra_postargs['cxx']
else:
post_cflags = list(extra_postargs)
if IS_HIP_EXTENSION:
post_cflags += COMMON_HIPCC_FLAGS
append_std14_if_no_std_present(post_cflags)
cuda_post_cflags = None
cuda_cflags = None
if with_cuda:
cuda_cflags = common_cflags
if isinstance(extra_postargs, dict):
cuda_post_cflags = extra_postargs['nvcc']
else:
cuda_post_cflags = list(extra_postargs)
if IS_HIP_EXTENSION:
cuda_post_cflags = cuda_post_cflags + _get_rocm_arch_flags(cuda_post_cflags)
cuda_post_cflags = cuda_post_cflags + COMMON_HIPCC_FLAGS
else:
cuda_post_cflags = unix_cuda_flags(cuda_post_cflags)
append_std14_if_no_std_present(cuda_post_cflags)
cuda_cflags = [shlex.quote(f) for f in cuda_cflags]
cuda_post_cflags = [shlex.quote(f) for f in cuda_post_cflags]
_write_ninja_file_and_compile_objects(
sources=sources,
objects=objects,
cflags=[shlex.quote(f) for f in extra_cc_cflags + common_cflags],
post_cflags=[shlex.quote(f) for f in post_cflags],
cuda_cflags=cuda_cflags,
cuda_post_cflags=cuda_post_cflags,
build_directory=output_dir,
verbose=True,
with_cuda=with_cuda)
# Return *all* object filenames, not just the ones we just built.
return objects
def win_cuda_flags(cflags):
return (COMMON_NVCC_FLAGS +
cflags + _get_cuda_arch_flags(cflags))
def win_wrap_single_compile(sources,
output_dir=None,
macros=None,
include_dirs=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
depends=None):
self.cflags = copy.deepcopy(extra_postargs)
extra_postargs = None
def spawn(cmd):
# Using regex to match src, obj and include files
src_regex = re.compile('/T(p|c)(.*)')
src_list = [
m.group(2) for m in (src_regex.match(elem) for elem in cmd)
if m
]
obj_regex = re.compile('/Fo(.*)')
obj_list = [
m.group(1) for m in (obj_regex.match(elem) for elem in cmd)
if m
]
include_regex = re.compile(r'((\-|\/)I.*)')
include_list = [
m.group(1)
for m in (include_regex.match(elem) for elem in cmd) if m
]
if len(src_list) >= 1 and len(obj_list) >= 1:
src = src_list[0]
obj = obj_list[0]
if _is_cuda_file(src):
nvcc = _join_cuda_home('bin', 'nvcc')
if isinstance(self.cflags, dict):
cflags = self.cflags['nvcc']
elif isinstance(self.cflags, list):
cflags = self.cflags
else:
cflags = []
cflags = win_cuda_flags(cflags) + ['--use-local-env']
for flag in COMMON_MSVC_FLAGS:
cflags = ['-Xcompiler', flag] + cflags
for ignore_warning in MSVC_IGNORE_CUDAFE_WARNINGS:
cflags = ['-Xcudafe', '--diag_suppress=' + ignore_warning] + cflags
cmd = [nvcc, '-c', src, '-o', obj] + include_list + cflags
elif isinstance(self.cflags, dict):
cflags = COMMON_MSVC_FLAGS + self.cflags['cxx']
cmd += cflags
elif isinstance(self.cflags, list):
cflags = COMMON_MSVC_FLAGS + self.cflags
cmd += cflags
return original_spawn(cmd)
try:
self.compiler.spawn = spawn
return original_compile(sources, output_dir, macros,
include_dirs, debug, extra_preargs,
extra_postargs, depends)
finally:
self.compiler.spawn = original_spawn
def win_wrap_ninja_compile(sources,
output_dir=None,
macros=None,
include_dirs=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
depends=None):
if not self.compiler.initialized:
self.compiler.initialize()
output_dir = os.path.abspath(output_dir)
# Note [Absolute include_dirs]
# Convert relative path in self.compiler.include_dirs to absolute path if any,
# For ninja build, the build location is not local, the build happens
# in a in script created build folder, relative path lost their correctness.
# To be consistent with jit extension, we allow user to enter relative include_dirs
# in setuptools.setup, and we convert the relative path to absolute path here
convert_to_absolute_paths_inplace(self.compiler.include_dirs)
_, objects, extra_postargs, pp_opts, _ = \
self.compiler._setup_compile(output_dir, macros,
include_dirs, sources,
depends, extra_postargs)
common_cflags = extra_preargs or []
cflags = []
if debug:
cflags.extend(self.compiler.compile_options_debug)
else:
cflags.extend(self.compiler.compile_options)
common_cflags.extend(COMMON_MSVC_FLAGS)
cflags = cflags + common_cflags + pp_opts
with_cuda = any(map(_is_cuda_file, sources))
# extra_postargs can be either:
# - a dict mapping cxx/nvcc to extra flags
# - a list of extra flags.
if isinstance(extra_postargs, dict):
post_cflags = extra_postargs['cxx']
else:
post_cflags = list(extra_postargs)
append_std14_if_no_std_present(post_cflags)
cuda_post_cflags = None
cuda_cflags = None
if with_cuda:
cuda_cflags = ['--use-local-env']
for common_cflag in common_cflags:
cuda_cflags.append('-Xcompiler')
cuda_cflags.append(common_cflag)
for ignore_warning in MSVC_IGNORE_CUDAFE_WARNINGS:
cuda_cflags.append('-Xcudafe')
cuda_cflags.append('--diag_suppress=' + ignore_warning)
cuda_cflags.extend(pp_opts)
if isinstance(extra_postargs, dict):
cuda_post_cflags = extra_postargs['nvcc']
else:
cuda_post_cflags = list(extra_postargs)
cuda_post_cflags = win_cuda_flags(cuda_post_cflags)
cflags = _nt_quote_args(cflags)
post_cflags = _nt_quote_args(post_cflags)
if with_cuda:
cuda_cflags = _nt_quote_args(cuda_cflags)
cuda_post_cflags = _nt_quote_args(cuda_post_cflags)
_write_ninja_file_and_compile_objects(
sources=sources,
objects=objects,
cflags=cflags,
post_cflags=post_cflags,
cuda_cflags=cuda_cflags,
cuda_post_cflags=cuda_post_cflags,
build_directory=output_dir,
verbose=True,
with_cuda=with_cuda)
# Return *all* object filenames, not just the ones we just built.
return objects
# Monkey-patch the _compile or compile method.
# https://github.com/python/cpython/blob/dc0284ee8f7a270b6005467f26d8e5773d76e959/Lib/distutils/ccompiler.py#L511
if self.compiler.compiler_type == 'msvc':
if self.use_ninja:
self.compiler.compile = win_wrap_ninja_compile
else:
self.compiler.compile = win_wrap_single_compile
else:
if self.use_ninja:
self.compiler.compile = unix_wrap_ninja_compile
else:
self.compiler._compile = unix_wrap_single_compile
build_ext.build_extensions(self)
def get_ext_filename(self, ext_name):
# Get the original shared library name. For Python 3, this name will be
# suffixed with "<SOABI>.so", where <SOABI> will be something like
# cpython-37m-x86_64-linux-gnu.
ext_filename = super(BuildExtension, self).get_ext_filename(ext_name)
# If `no_python_abi_suffix` is `True`, we omit the Python 3 ABI
# component. This makes building shared libraries with setuptools that
# aren't Python modules nicer.
if self.no_python_abi_suffix:
# The parts will be e.g. ["my_extension", "cpython-37m-x86_64-linux-gnu", "so"].
ext_filename_parts = ext_filename.split('.')
# Omit the second to last element.
without_abi = ext_filename_parts[:-2] + ext_filename_parts[-1:]
ext_filename = '.'.join(without_abi)
return ext_filename
def _check_abi(self):
# On some platforms, like Windows, compiler_cxx is not available.
if hasattr(self.compiler, 'compiler_cxx'):
compiler = self.compiler.compiler_cxx[0]
elif IS_WINDOWS:
compiler = os.environ.get('CXX', 'cl')
else:
compiler = os.environ.get('CXX', 'c++')
check_compiler_abi_compatibility(compiler)
# Warn user if VC env is activated but `DISTUILS_USE_SDK` is not set.
if IS_WINDOWS and 'VSCMD_ARG_TGT_ARCH' in os.environ and 'DISTUTILS_USE_SDK' not in os.environ:
msg = ('It seems that the VC environment is activated but DISTUTILS_USE_SDK is not set.'
'This may lead to multiple activations of the VC env.'
'Please set `DISTUTILS_USE_SDK=1` and try again.')
raise UserWarning(msg)
def _add_compile_flag(self, extension, flag):
extension.extra_compile_args = copy.deepcopy(extension.extra_compile_args)
if isinstance(extension.extra_compile_args, dict):
for args in extension.extra_compile_args.values():
args.append(flag)
else:
extension.extra_compile_args.append(flag)
def _define_torch_extension_name(self, extension):
# pybind11 doesn't support dots in the names
# so in order to support extensions in the packages
# like torch._C, we take the last part of the string
# as the library name
names = extension.name.split('.')
name = names[-1]
define = '-DTORCH_EXTENSION_NAME={}'.format(name)
self._add_compile_flag(extension, define)
def _add_gnu_cpp_abi_flag(self, extension):
# use the same CXX ABI as what PyTorch was compiled with
self._add_compile_flag(extension, '-D_GLIBCXX_USE_CXX11_ABI=' + str(int(torch._C._GLIBCXX_USE_CXX11_ABI)))
def CppExtension(name, sources, *args, **kwargs):
r'''
Creates a :class:`setuptools.Extension` for C++.
Convenience method that creates a :class:`setuptools.Extension` with the
bare minimum (but often sufficient) arguments to build a C++ extension.
All arguments are forwarded to the :class:`setuptools.Extension`
constructor.
Example:
>>> from setuptools import setup
>>> from torch.utils.cpp_extension import BuildExtension, CppExtension
>>> setup(
name='extension',
ext_modules=[
CppExtension(
name='extension',
sources=['extension.cpp'],
extra_compile_args=['-g']),
],
cmdclass={
'build_ext': BuildExtension
})
'''
include_dirs = kwargs.get('include_dirs', [])
include_dirs += include_paths()
kwargs['include_dirs'] = include_dirs
library_dirs = kwargs.get('library_dirs', [])
library_dirs += library_paths()
kwargs['library_dirs'] = library_dirs
libraries = kwargs.get('libraries', [])
libraries.append('c10')
libraries.append('torch')
libraries.append('torch_cpu')
libraries.append('torch_python')
kwargs['libraries'] = libraries
kwargs['language'] = 'c++'
return setuptools.Extension(name, sources, *args, **kwargs)
def CUDAExtension(name, sources, *args, **kwargs):
r'''
Creates a :class:`setuptools.Extension` for CUDA/C++.
Convenience method that creates a :class:`setuptools.Extension` with the
bare minimum (but often sufficient) arguments to build a CUDA/C++
extension. This includes the CUDA include path, library path and runtime
library.
All arguments are forwarded to the :class:`setuptools.Extension`
constructor.
Example:
>>> from setuptools import setup
>>> from torch.utils.cpp_extension import BuildExtension, CUDAExtension
>>> setup(
name='cuda_extension',
ext_modules=[
CUDAExtension(
name='cuda_extension',
sources=['extension.cpp', 'extension_kernel.cu'],
extra_compile_args={'cxx': ['-g'],
'nvcc': ['-O2']})
],
cmdclass={
'build_ext': BuildExtension
})
'''
library_dirs = kwargs.get('library_dirs', [])
library_dirs += library_paths(cuda=True)
kwargs['library_dirs'] = library_dirs
libraries = kwargs.get('libraries', [])
libraries.append('c10')
libraries.append('torch')
libraries.append('torch_cpu')
libraries.append('torch_python')
if IS_HIP_EXTENSION:
assert ROCM_VERSION is not None
libraries.append('amdhip64' if ROCM_VERSION >= (3, 5) else 'hip_hcc')
libraries.append('c10_hip')
libraries.append('torch_hip')
else:
libraries.append('cudart')
libraries.append('c10_cuda')
libraries.append('torch_cuda')
kwargs['libraries'] = libraries
include_dirs = kwargs.get('include_dirs', [])
include_dirs += include_paths(cuda=True)
kwargs['include_dirs'] = include_dirs
kwargs['language'] = 'c++'
return setuptools.Extension(name, sources, *args, **kwargs)
def include_paths(cuda: bool = False) -> List[str]:
'''
Get the include paths required to build a C++ or CUDA extension.
Args:
cuda: If `True`, includes CUDA-specific include paths.
Returns:
A list of include path strings.
'''
here = os.path.abspath(__file__)
torch_path = os.path.dirname(os.path.dirname(here))
lib_include = os.path.join(torch_path, 'include')
paths = [
lib_include,
# Remove this once torch/torch.h is officially no longer supported for C++ extensions.
os.path.join(lib_include, 'torch', 'csrc', 'api', 'include'),
# Some internal (old) Torch headers don't properly prefix their includes,
# so we need to pass -Itorch/lib/include/TH as well.
os.path.join(lib_include, 'TH'),
os.path.join(lib_include, 'THC')
]
if cuda and IS_HIP_EXTENSION:
paths.append(os.path.join(lib_include, 'THH'))
paths.append(_join_rocm_home('include'))
if MIOPEN_HOME is not None:
paths.append(os.path.join(MIOPEN_HOME, 'include'))
elif cuda:
cuda_home_include = _join_cuda_home('include')
# if we have the Debian/Ubuntu packages for cuda, we get /usr as cuda home.
# but gcc doesn't like having /usr/include passed explicitly
if cuda_home_include != '/usr/include':
paths.append(cuda_home_include)
if CUDNN_HOME is not None:
paths.append(os.path.join(CUDNN_HOME, 'include'))
return paths
def library_paths(cuda: bool = False) -> List[str]:
r'''
Get the library paths required to build a C++ or CUDA extension.
Args:
cuda: If `True`, includes CUDA-specific library paths.
Returns:
A list of library path strings.
'''
paths = []
# We need to link against libtorch.so
here = os.path.abspath(__file__)
torch_path = os.path.dirname(os.path.dirname(here))
lib_path = os.path.join(torch_path, 'lib')
paths.append(lib_path)
if cuda and IS_HIP_EXTENSION:
lib_dir = 'lib'
paths.append(_join_rocm_home(lib_dir))
elif cuda:
if IS_WINDOWS:
lib_dir = 'lib/x64'
else:
lib_dir = 'lib64'
if (not os.path.exists(_join_cuda_home(lib_dir)) and
os.path.exists(_join_cuda_home('lib'))):
# 64-bit CUDA may be installed in 'lib' (see e.g. gh-16955)
# Note that it's also possible both don't exist (see
# _find_cuda_home) - in that case we stay with 'lib64'.
lib_dir = 'lib'
paths.append(_join_cuda_home(lib_dir))
if CUDNN_HOME is not None:
paths.append(os.path.join(CUDNN_HOME, lib_dir))
return paths
def load(name,
sources: List[str],
extra_cflags=None,
extra_cuda_cflags=None,
extra_ldflags=None,
extra_include_paths=None,
build_directory=None,
verbose=False,
with_cuda: Optional[bool] = None,
is_python_module=True,
keep_intermediates=True):
r'''
Loads a PyTorch C++ extension just-in-time (JIT).
To load an extension, a Ninja build file is emitted, which is used to
compile the given sources into a dynamic library. This library is
subsequently loaded into the current Python process as a module and
returned from this function, ready for use.
By default, the directory to which the build file is emitted and the
resulting library compiled to is ``<tmp>/torch_extensions/<name>``, where
``<tmp>`` is the temporary folder on the current platform and ``<name>``
the name of the extension. This location can be overridden in two ways.
First, if the ``TORCH_EXTENSIONS_DIR`` environment variable is set, it
replaces ``<tmp>/torch_extensions`` and all extensions will be compiled
into subfolders of this directory. Second, if the ``build_directory``
argument to this function is supplied, it overrides the entire path, i.e.
the library will be compiled into that folder directly.
To compile the sources, the default system compiler (``c++``) is used,
which can be overridden by setting the ``CXX`` environment variable. To pass
additional arguments to the compilation process, ``extra_cflags`` or
``extra_ldflags`` can be provided. For example, to compile your extension
with optimizations, pass ``extra_cflags=['-O3']``. You can also use
``extra_cflags`` to pass further include directories.
CUDA support with mixed compilation is provided. Simply pass CUDA source
files (``.cu`` or ``.cuh``) along with other sources. Such files will be
detected and compiled with nvcc rather than the C++ compiler. This includes
passing the CUDA lib64 directory as a library directory, and linking
``cudart``. You can pass additional flags to nvcc via
``extra_cuda_cflags``, just like with ``extra_cflags`` for C++. Various
heuristics for finding the CUDA install directory are used, which usually
work fine. If not, setting the ``CUDA_HOME`` environment variable is the
safest option.
Args:
name: The name of the extension to build. This MUST be the same as the
name of the pybind11 module!
sources: A list of relative or absolute paths to C++ source files.
extra_cflags: optional list of compiler flags to forward to the build.
extra_cuda_cflags: optional list of compiler flags to forward to nvcc
when building CUDA sources.
extra_ldflags: optional list of linker flags to forward to the build.
extra_include_paths: optional list of include directories to forward
to the build.
build_directory: optional path to use as build workspace.
verbose: If ``True``, turns on verbose logging of load steps.
with_cuda: Determines whether CUDA headers and libraries are added to
the build. If set to ``None`` (default), this value is
automatically determined based on the existence of ``.cu`` or
``.cuh`` in ``sources``. Set it to `True`` to force CUDA headers
and libraries to be included.
is_python_module: If ``True`` (default), imports the produced shared
library as a Python module. If ``False``, loads it into the process
as a plain dynamic library.
Returns:
If ``is_python_module`` is ``True``, returns the loaded PyTorch
extension as a Python module. If ``is_python_module`` is ``False``
returns nothing (the shared library is loaded into the process as a side
effect).
Example:
>>> from torch.utils.cpp_extension import load
>>> module = load(
name='extension',
sources=['extension.cpp', 'extension_kernel.cu'],
extra_cflags=['-O2'],
verbose=True)
'''
return _jit_compile(
name,
[sources] if isinstance(sources, str) else sources,
extra_cflags,
extra_cuda_cflags,
extra_ldflags,
extra_include_paths,
build_directory or _get_build_directory(name, verbose),
verbose,
with_cuda,
is_python_module,
keep_intermediates=keep_intermediates)
def load_inline(name,
cpp_sources,
cuda_sources=None,
functions=None,
extra_cflags=None,
extra_cuda_cflags=None,
extra_ldflags=None,
extra_include_paths=None,
build_directory=None,
verbose=False,
with_cuda=None,
is_python_module=True,
with_pytorch_error_handling=True,
keep_intermediates=True):
r'''
Loads a PyTorch C++ extension just-in-time (JIT) from string sources.
This function behaves exactly like :func:`load`, but takes its sources as
strings rather than filenames. These strings are stored to files in the
build directory, after which the behavior of :func:`load_inline` is
identical to :func:`load`.
See `the
tests <https://github.com/pytorch/pytorch/blob/master/test/test_cpp_extensions.py>`_
for good examples of using this function.
Sources may omit two required parts of a typical non-inline C++ extension:
the necessary header includes, as well as the (pybind11) binding code. More
precisely, strings passed to ``cpp_sources`` are first concatenated into a
single ``.cpp`` file. This file is then prepended with ``#include
<torch/extension.h>``.
Furthermore, if the ``functions`` argument is supplied, bindings will be
automatically generated for each function specified. ``functions`` can
either be a list of function names, or a dictionary mapping from function
names to docstrings. If a list is given, the name of each function is used
as its docstring.
The sources in ``cuda_sources`` are concatenated into a separate ``.cu``
file and prepended with ``torch/types.h``, ``cuda.h`` and
``cuda_runtime.h`` includes. The ``.cpp`` and ``.cu`` files are compiled
separately, but ultimately linked into a single library. Note that no
bindings are generated for functions in ``cuda_sources`` per se. To bind
to a CUDA kernel, you must create a C++ function that calls it, and either
declare or define this C++ function in one of the ``cpp_sources`` (and
include its name in ``functions``).
See :func:`load` for a description of arguments omitted below.
Args:
cpp_sources: A string, or list of strings, containing C++ source code.
cuda_sources: A string, or list of strings, containing CUDA source code.
functions: A list of function names for which to generate function
bindings. If a dictionary is given, it should map function names to
docstrings (which are otherwise just the function names).
with_cuda: Determines whether CUDA headers and libraries are added to
the build. If set to ``None`` (default), this value is
automatically determined based on whether ``cuda_sources`` is
provided. Set it to ``True`` to force CUDA headers
and libraries to be included.
with_pytorch_error_handling: Determines whether pytorch error and
warning macros are handled by pytorch instead of pybind. To do
this, each function ``foo`` is called via an intermediary ``_safe_foo``
function. This redirection might cause issues in obscure cases
of cpp. This flag should be set to ``False`` when this redirect
causes issues.
Example:
>>> from torch.utils.cpp_extension import load_inline
>>> source = \'\'\'
at::Tensor sin_add(at::Tensor x, at::Tensor y) {
return x.sin() + y.sin();
}
\'\'\'
>>> module = load_inline(name='inline_extension',
cpp_sources=[source],
functions=['sin_add'])
.. note::
By default, the Ninja backend uses #CPUS + 2 workers to build the
extension. This may use up too many resources on some systems. One
can control the number of workers by setting the `MAX_JOBS` environment
variable to a non-negative number.
'''
build_directory = build_directory or _get_build_directory(name, verbose)
if isinstance(cpp_sources, str):
cpp_sources = [cpp_sources]
cuda_sources = cuda_sources or []
if isinstance(cuda_sources, str):
cuda_sources = [cuda_sources]
cpp_sources.insert(0, '#include <torch/extension.h>')
# If `functions` is supplied, we create the pybind11 bindings for the user.
# Here, `functions` is (or becomes, after some processing) a map from
# function names to function docstrings.
if functions is not None:
module_def = []
module_def.append('PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {')
if isinstance(functions, str):
functions = [functions]
if isinstance(functions, list):
# Make the function docstring the same as the function name.
functions = dict((f, f) for f in functions)
elif not isinstance(functions, dict):
raise ValueError(
"Expected 'functions' to be a list or dict, but was {}".format(
type(functions)))
for function_name, docstring in functions.items():
if with_pytorch_error_handling:
module_def.append(
'm.def("{0}", torch::wrap_pybind_function({0}), "{1}");'
.format(function_name, docstring))
else:
module_def.append('m.def("{0}", {0}, "{1}");'.format(function_name, docstring))
module_def.append('}')
cpp_sources += module_def
cpp_source_path = os.path.join(build_directory, 'main.cpp')
with open(cpp_source_path, 'w') as cpp_source_file:
cpp_source_file.write('\n'.join(cpp_sources))
sources = [cpp_source_path]
if cuda_sources:
cuda_sources.insert(0, '#include <torch/types.h>')
cuda_sources.insert(1, '#include <cuda.h>')
cuda_sources.insert(2, '#include <cuda_runtime.h>')
cuda_source_path = os.path.join(build_directory, 'cuda.cu')
with open(cuda_source_path, 'w') as cuda_source_file:
cuda_source_file.write('\n'.join(cuda_sources))
sources.append(cuda_source_path)
return _jit_compile(
name,
sources,
extra_cflags,
extra_cuda_cflags,
extra_ldflags,
extra_include_paths,
build_directory,
verbose,
with_cuda,
is_python_module,
keep_intermediates=keep_intermediates)
def _jit_compile(name,
sources,
extra_cflags,
extra_cuda_cflags,
extra_ldflags,
extra_include_paths,
build_directory: str,
verbose: bool,
with_cuda: Optional[bool],
is_python_module,
keep_intermediates=True) -> None:
if with_cuda is None:
with_cuda = any(map(_is_cuda_file, sources))
with_cudnn = any(['cudnn' in f for f in extra_ldflags or []])
old_version = JIT_EXTENSION_VERSIONER.get_version(name)
version = JIT_EXTENSION_VERSIONER.bump_version_if_changed(
name,
sources,
build_arguments=[extra_cflags, extra_cuda_cflags, extra_ldflags, extra_include_paths],
build_directory=build_directory,
with_cuda=with_cuda
)
if version > 0:
if version != old_version and verbose:
print('The input conditions for extension module {} have changed. '.format(name) +
'Bumping to version {0} and re-building as {1}_v{0}...'.format(version, name))
name = '{}_v{}'.format(name, version)
if version != old_version:
baton = FileBaton(os.path.join(build_directory, 'lock'))
if baton.try_acquire():
try:
with GeneratedFileCleaner(keep_intermediates=keep_intermediates) as clean_ctx:
if IS_HIP_EXTENSION and (with_cuda or with_cudnn):
hipify_python.hipify(
project_directory=build_directory,
output_directory=build_directory,
includes=os.path.join(build_directory, '*'),
extra_files=[os.path.abspath(s) for s in sources],
show_detailed=verbose,
is_pytorch_extension=True,
clean_ctx=clean_ctx
)
_write_ninja_file_and_build_library(
name=name,
sources=sources,
extra_cflags=extra_cflags or [],
extra_cuda_cflags=extra_cuda_cflags or [],
extra_ldflags=extra_ldflags or [],
extra_include_paths=extra_include_paths or [],
build_directory=build_directory,
verbose=verbose,
with_cuda=with_cuda)
finally:
baton.release()
else:
baton.wait()
elif verbose:
print('No modifications detected for re-loaded extension '
'module {}, skipping build step...'.format(name))
if verbose:
print(f'Loading extension module {name}...')
return _import_module_from_library(name, build_directory, is_python_module)
def _write_ninja_file_and_compile_objects(
sources: List[str],
objects,
cflags,
post_cflags,
cuda_cflags,
cuda_post_cflags,
build_directory: str,
verbose: bool,
with_cuda: Optional[bool]) -> None:
verify_ninja_availability()
if IS_WINDOWS:
compiler = os.environ.get('CXX', 'cl')
else:
compiler = os.environ.get('CXX', 'c++')
check_compiler_abi_compatibility(compiler)
if with_cuda is None:
with_cuda = any(map(_is_cuda_file, sources))
build_file_path = os.path.join(build_directory, 'build.ninja')
if verbose:
print(f'Emitting ninja build file {build_file_path}...')
_write_ninja_file(
path=build_file_path,
cflags=cflags,
post_cflags=post_cflags,
cuda_cflags=cuda_cflags,
cuda_post_cflags=cuda_post_cflags,
sources=sources,
objects=objects,
ldflags=None,
library_target=None,
with_cuda=with_cuda)
if verbose:
print('Compiling objects...')
_run_ninja_build(
build_directory,
verbose,
# It would be better if we could tell users the name of the extension
# that failed to build but there isn't a good way to get it here.
error_prefix='Error compiling objects for extension')
def _write_ninja_file_and_build_library(
name,
sources: List[str],
extra_cflags,
extra_cuda_cflags,
extra_ldflags,
extra_include_paths,
build_directory: str,
verbose: bool,
with_cuda: Optional[bool]) -> None:
verify_ninja_availability()
if IS_WINDOWS:
compiler = os.environ.get('CXX', 'cl')
else:
compiler = os.environ.get('CXX', 'c++')
check_compiler_abi_compatibility(compiler)
if with_cuda is None:
with_cuda = any(map(_is_cuda_file, sources))
extra_ldflags = _prepare_ldflags(
extra_ldflags or [],
with_cuda,
verbose)
build_file_path = os.path.join(build_directory, 'build.ninja')
if verbose:
print(f'Emitting ninja build file {build_file_path}...')
# NOTE: Emitting a new ninja build file does not cause re-compilation if
# the sources did not change, so it's ok to re-emit (and it's fast).
_write_ninja_file_to_build_library(
path=build_file_path,
name=name,
sources=sources,
extra_cflags=extra_cflags or [],
extra_cuda_cflags=extra_cuda_cflags or [],
extra_ldflags=extra_ldflags or [],
extra_include_paths=extra_include_paths or [],
with_cuda=with_cuda)
if verbose:
print('Building extension module {}...'.format(name))
_run_ninja_build(
build_directory,
verbose,
error_prefix="Error building extension '{}'".format(name))
def is_ninja_available():
r'''
Returns ``True`` if the `ninja <https://ninja-build.org/>`_ build system is
available on the system, ``False`` otherwise.
'''
with open(os.devnull, 'wb') as devnull:
try:
subprocess.check_call('ninja --version'.split(), stdout=devnull)
except OSError:
return False
else:
return True
def verify_ninja_availability():
r'''
Raises ``RuntimeError`` if `ninja <https://ninja-build.org/>`_ build system is not
available on the system, does nothing otherwise.
'''
if not is_ninja_available():
raise RuntimeError("Ninja is required to load C++ extensions")
def _prepare_ldflags(extra_ldflags, with_cuda, verbose):
here = os.path.abspath(__file__)
torch_path = os.path.dirname(os.path.dirname(here))
lib_path = os.path.join(torch_path, 'lib')
if IS_WINDOWS:
python_path = os.path.dirname(sys.executable)
python_lib_path = os.path.join(python_path, 'libs')
extra_ldflags.append('c10.lib')
if with_cuda:
extra_ldflags.append('c10_cuda.lib')
extra_ldflags.append('torch_cpu.lib')
if with_cuda:
extra_ldflags.append('torch_cuda.lib')
# /INCLUDE is used to ensure torch_cuda is linked against in a project that relies on it.
# Related issue: https://github.com/pytorch/pytorch/issues/31611
extra_ldflags.append('-INCLUDE:?warp_size@cuda@at@@YAHXZ')
extra_ldflags.append('torch.lib')
extra_ldflags.append('torch_python.lib')
extra_ldflags.append('/LIBPATH:{}'.format(python_lib_path))
extra_ldflags.append('/LIBPATH:{}'.format(lib_path))
else:
extra_ldflags.append('-L{}'.format(lib_path))
extra_ldflags.append('-lc10')
if with_cuda:
extra_ldflags.append('-lc10_hip' if IS_HIP_EXTENSION else '-lc10_cuda')
extra_ldflags.append('-ltorch_cpu')
if with_cuda:
extra_ldflags.append('-ltorch_hip' if IS_HIP_EXTENSION else '-ltorch_cuda')
extra_ldflags.append('-ltorch')
extra_ldflags.append('-ltorch_python')
if with_cuda:
if verbose:
print('Detected CUDA files, patching ldflags')
if IS_WINDOWS:
extra_ldflags.append('/LIBPATH:{}'.format(
_join_cuda_home('lib/x64')))
extra_ldflags.append('cudart.lib')
if CUDNN_HOME is not None:
extra_ldflags.append(os.path.join(CUDNN_HOME, 'lib/x64'))
elif not IS_HIP_EXTENSION:
extra_ldflags.append('-L{}'.format(_join_cuda_home('lib64')))
extra_ldflags.append('-lcudart')
if CUDNN_HOME is not None:
extra_ldflags.append('-L{}'.format(os.path.join(CUDNN_HOME, 'lib64')))
elif IS_HIP_EXTENSION:
assert ROCM_VERSION is not None
extra_ldflags.append('-L{}'.format(_join_rocm_home('lib')))
extra_ldflags.append('-lamdhip64' if ROCM_VERSION >= (3, 5) else '-lhip_hcc')
return extra_ldflags
def _get_cuda_arch_flags(cflags: Optional[List[str]] = None) -> List[str]:
r'''
Determine CUDA arch flags to use.
For an arch, say "6.1", the added compile flag will be
``-gencode=arch=compute_61,code=sm_61``.
For an added "+PTX", an additional
``-gencode=arch=compute_xx,code=compute_xx`` is added.
See select_compute_arch.cmake for corresponding named and supported arches
when building with CMake.
'''
# If cflags is given, there may already be user-provided arch flags in it
# (from `extra_compile_args`)
if cflags is not None:
for flag in cflags:
if 'arch' in flag:
return []
# Note: keep combined names ("arch1+arch2") above single names, otherwise
# string replacement may not do the right thing
named_arches = collections.OrderedDict([
('Kepler+Tesla', '3.7'),
('Kepler', '3.5+PTX'),
('Maxwell+Tegra', '5.3'),
('Maxwell', '5.0;5.2+PTX'),
('Pascal', '6.0;6.1+PTX'),
('Volta', '7.0+PTX'),
('Turing', '7.5+PTX'),
('Ampere', '8.0;8.6+PTX'),
])
supported_arches = ['3.5', '3.7', '5.0', '5.2', '5.3', '6.0', '6.1', '6.2',
'7.0', '7.2', '7.5', '8.0', '8.6']
valid_arch_strings = supported_arches + [s + "+PTX" for s in supported_arches]
# The default is sm_30 for CUDA 9.x and 10.x
# First check for an env var (same as used by the main setup.py)
# Can be one or more architectures, e.g. "6.1" or "3.5;5.2;6.0;6.1;7.0+PTX"
# See cmake/Modules_CUDA_fix/upstream/FindCUDA/select_compute_arch.cmake
_arch_list = os.environ.get('TORCH_CUDA_ARCH_LIST', None)
# If not given, determine what's needed for the GPU that can be found
if not _arch_list:
capability = torch.cuda.get_device_capability()
arch_list = ['{}.{}'.format(capability[0], capability[1])]
else:
# Deal with lists that are ' ' separated (only deal with ';' after)
_arch_list = _arch_list.replace(' ', ';')
# Expand named arches
for named_arch, archval in named_arches.items():
_arch_list = _arch_list.replace(named_arch, archval)
arch_list = _arch_list.split(';')
flags = []
for arch in arch_list:
if arch not in valid_arch_strings:
raise ValueError("Unknown CUDA arch ({}) or GPU not supported".format(arch))
else:
num = arch[0] + arch[2]
flags.append('-gencode=arch=compute_{},code=sm_{}'.format(num, num))
if arch.endswith('+PTX'):
flags.append('-gencode=arch=compute_{},code=compute_{}'.format(num, num))
return list(set(flags))
def _get_rocm_arch_flags(cflags: Optional[List[str]] = None) -> List[str]:
# If cflags is given, there may already be user-provided arch flags in it
# (from `extra_compile_args`)
if cflags is not None:
for flag in cflags:
if 'amdgpu-target' in flag:
return ['-fno-gpu-rdc']
return [
'--amdgpu-target=gfx803',
'--amdgpu-target=gfx900',
'--amdgpu-target=gfx906',
'--amdgpu-target=gfx908',
'-fno-gpu-rdc'
]
def _get_build_directory(name: str, verbose: bool) -> str:
root_extensions_directory = os.environ.get('TORCH_EXTENSIONS_DIR')
if root_extensions_directory is None:
root_extensions_directory = get_default_build_root()
if verbose:
print('Using {} as PyTorch extensions root...'.format(
root_extensions_directory))
build_directory = os.path.join(root_extensions_directory, name)
if not os.path.exists(build_directory):
if verbose:
print(f'Creating extension directory {build_directory}...')
# This is like mkdir -p, i.e. will also create parent directories.
os.makedirs(build_directory, exist_ok=True)
return build_directory
def _get_num_workers(verbose: bool) -> Optional[int]:
max_jobs = os.environ.get('MAX_JOBS')
if max_jobs is not None and max_jobs.isdigit():
if verbose:
print('Using envvar MAX_JOBS ({}) as the number of workers...'.format(max_jobs))
return int(max_jobs)
if verbose:
print('Allowing ninja to set a default number of workers... '
'(overridable by setting the environment variable MAX_JOBS=N)')
return None
def _run_ninja_build(build_directory: str, verbose: bool, error_prefix: str) -> None:
command = ['ninja', '-v']
num_workers = _get_num_workers(verbose)
if num_workers is not None:
command.extend(['-j', str(num_workers)])
env = os.environ.copy()
# Try to activate the vc env for the users
if IS_WINDOWS and 'VSCMD_ARG_TGT_ARCH' not in env:
from distutils.util import get_platform
from distutils._msvccompiler import _get_vc_env
plat_name = get_platform()
plat_spec = PLAT_TO_VCVARS[plat_name]
vc_env = _get_vc_env(plat_spec)
vc_env = {k.upper(): v for k, v in vc_env.items()}
for k, v in env.items():
uk = k.upper()
if uk not in vc_env:
vc_env[uk] = v
env = vc_env
try:
sys.stdout.flush()
sys.stderr.flush()
if sys.version_info >= (3, 5):
# Warning: don't pass stdout=None to subprocess.run to get output.
# subprocess.run assumes that sys.__stdout__ has not been modified and
# attempts to write to it by default. However, when we call _run_ninja_build
# from ahead-of-time cpp extensions, the following happens:
# 1) If the stdout encoding is not utf-8, setuptools detachs __stdout__.
# https://github.com/pypa/setuptools/blob/7e97def47723303fafabe48b22168bbc11bb4821/setuptools/dist.py#L1110
# (it probably shouldn't do this)
# 2) subprocess.run (on POSIX, with no stdout override) relies on
# __stdout__ not being detached:
# https://github.com/python/cpython/blob/c352e6c7446c894b13643f538db312092b351789/Lib/subprocess.py#L1214
# To work around this, we pass in the fileno directly and hope that
# it is valid.
stdout_fileno = 1
subprocess.run(
command,
stdout=stdout_fileno if verbose else subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=build_directory,
check=True,
env=env)
else:
subprocess.check_output(
command,
stderr=subprocess.STDOUT,
cwd=build_directory,
env=env)
except subprocess.CalledProcessError as e:
# Python 2 and 3 compatible way of getting the error object.
_, error, _ = sys.exc_info()
# error.output contains the stdout and stderr of the build attempt.
message = error_prefix
# `error` is a CalledProcessError (which has an `ouput`) attribute, but
# mypy thinks it's Optional[BaseException] and doesn't narrow
if hasattr(error, 'output') and error.output: # type: ignore
message += ": {}".format(error.output.decode()) # type: ignore
raise RuntimeError(message) from e
def _import_module_from_library(module_name, path, is_python_module):
# https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path
file, path, description = imp.find_module(module_name, [path])
# Close the .so file after load.
with file:
if is_python_module:
return imp.load_module(module_name, file, path, description)
else:
torch.ops.load_library(path)
def _write_ninja_file_to_build_library(path,
name,
sources,
extra_cflags,
extra_cuda_cflags,
extra_ldflags,
extra_include_paths,
with_cuda) -> None:
extra_cflags = [flag.strip() for flag in extra_cflags]
extra_cuda_cflags = [flag.strip() for flag in extra_cuda_cflags]
extra_ldflags = [flag.strip() for flag in extra_ldflags]
extra_include_paths = [flag.strip() for flag in extra_include_paths]
# Turn into absolute paths so we can emit them into the ninja build
# file wherever it is.
user_includes = [os.path.abspath(file) for file in extra_include_paths]
# include_paths() gives us the location of torch/extension.h
system_includes = include_paths(with_cuda)
# sysconfig.get_paths()['include'] gives us the location of Python.h
system_includes.append(sysconfig.get_paths()['include'])
# Windows does not understand `-isystem`.
if IS_WINDOWS:
user_includes += system_includes
system_includes.clear()
common_cflags = ['-DTORCH_EXTENSION_NAME={}'.format(name)]
common_cflags.append('-DTORCH_API_INCLUDE_EXTENSION_H')
# Note [Pybind11 ABI constants]
#
# Pybind11 before 2.4 used to build an ABI strings using the following pattern:
# f"__pybind11_internals_v{PYBIND11_INTERNALS_VERSION}{PYBIND11_INTERNALS_KIND}{PYBIND11_BUILD_TYPE}__"
# Since 2.4 compier type, stdlib and build abi parameters are also encoded like this:
# f"__pybind11_internals_v{PYBIND11_INTERNALS_VERSION}{PYBIND11_INTERNALS_KIND}{PYBIND11_COMPILER_TYPE}{PYBIND11_STDLIB}{PYBIND11_BUILD_ABI}{PYBIND11_BUILD_TYPE}__"
#
# This was done in order to further narrow down the chances of compiler ABI incompatibility
# that can cause a hard to debug segfaults.
# For PyTorch extensions we want to relax those restrictions and pass compiler, stdlib and abi properties
# captured during PyTorch native library compilation in torch/csrc/Module.cpp
for pname in ["COMPILER_TYPE", "STDLIB", "BUILD_ABI"]:
pval = getattr(torch._C, f"_PYBIND11_{pname}")
if pval is not None and not IS_WINDOWS:
common_cflags.append(f'-DPYBIND11_{pname}=\\"{pval}\\"')
common_cflags += [f'-I{include}' for include in user_includes]
common_cflags += [f'-isystem {include}' for include in system_includes]
common_cflags += ['-D_GLIBCXX_USE_CXX11_ABI=' + str(int(torch._C._GLIBCXX_USE_CXX11_ABI))]
if IS_WINDOWS:
cflags = common_cflags + COMMON_MSVC_FLAGS + extra_cflags
cflags = _nt_quote_args(cflags)
else:
cflags = common_cflags + ['-fPIC', '-std=c++14'] + extra_cflags
if with_cuda and IS_HIP_EXTENSION:
cuda_flags = ['-DWITH_HIP'] + cflags + COMMON_HIPCC_FLAGS
cuda_flags += extra_cuda_cflags
cuda_flags += _get_rocm_arch_flags(cuda_flags)
sources = [s if not _is_cuda_file(s) else
os.path.abspath(os.path.join(
path, get_hip_file_path(os.path.relpath(s, path))))
for s in sources]
elif with_cuda:
cuda_flags = common_cflags + COMMON_NVCC_FLAGS + _get_cuda_arch_flags()
if IS_WINDOWS:
for flag in COMMON_MSVC_FLAGS:
cuda_flags = ['-Xcompiler', flag] + cuda_flags
for ignore_warning in MSVC_IGNORE_CUDAFE_WARNINGS:
cuda_flags = ['-Xcudafe', '--diag_suppress=' + ignore_warning] + cuda_flags
cuda_flags = _nt_quote_args(cuda_flags)
cuda_flags += _nt_quote_args(extra_cuda_cflags)
else:
cuda_flags += ['--compiler-options', "'-fPIC'"]
cuda_flags += extra_cuda_cflags
if not any(flag.startswith('-std=') for flag in cuda_flags):
cuda_flags.append('-std=c++14')
if os.getenv("CC") is not None:
cuda_flags = ['-ccbin', os.getenv("CC")] + cuda_flags
else:
cuda_flags = None
def object_file_path(source_file: str) -> str:
# '/path/to/file.cpp' -> 'file'
file_name = os.path.splitext(os.path.basename(source_file))[0]
if _is_cuda_file(source_file) and with_cuda:
# Use a different object filename in case a C++ and CUDA file have
# the same filename but different extension (.cpp vs. .cu).
target = '{}.cuda.o'.format(file_name)
else:
target = '{}.o'.format(file_name)
return target
objects = list(map(object_file_path, sources))
if IS_WINDOWS:
ldflags = ['/DLL'] + extra_ldflags
else:
ldflags = ['-shared'] + extra_ldflags
# The darwin linker needs explicit consent to ignore unresolved symbols.
if sys.platform.startswith('darwin'):
ldflags.append('-undefined dynamic_lookup')
elif IS_WINDOWS:
ldflags = _nt_quote_args(ldflags)
ext = 'pyd' if IS_WINDOWS else 'so'
library_target = '{}.{}'.format(name, ext)
_write_ninja_file(
path=path,
cflags=cflags,
post_cflags=None,
cuda_cflags=cuda_flags,
cuda_post_cflags=None,
sources=sources,
objects=objects,
ldflags=ldflags,
library_target=library_target,
with_cuda=with_cuda)
def _write_ninja_file(path,
cflags,
post_cflags,
cuda_cflags,
cuda_post_cflags,
sources,
objects,
ldflags,
library_target,
with_cuda) -> None:
r"""Write a ninja file that does the desired compiling and linking.
`path`: Where to write this file
`cflags`: list of flags to pass to $cxx. Can be None.
`post_cflags`: list of flags to append to the $cxx invocation. Can be None.
`cuda_cflags`: list of flags to pass to $nvcc. Can be None.
`cuda_postflags`: list of flags to append to the $nvcc invocation. Can be None.
`sources`: list of paths to source files
`objects`: list of desired paths to objects, one per source.
`ldflags`: list of flags to pass to linker. Can be None.
`library_target`: Name of the output library. Can be None; in that case,
we do no linking.
`with_cuda`: If we should be compiling with CUDA.
"""
def sanitize_flags(flags):
if flags is None:
return []
else:
return [flag.strip() for flag in flags]
cflags = sanitize_flags(cflags)
post_cflags = sanitize_flags(post_cflags)
cuda_cflags = sanitize_flags(cuda_cflags)
cuda_post_cflags = sanitize_flags(cuda_post_cflags)
ldflags = sanitize_flags(ldflags)
# Sanity checks...
assert len(sources) == len(objects)
assert len(sources) > 0
if IS_WINDOWS:
compiler = os.environ.get('CXX', 'cl')
else:
compiler = os.environ.get('CXX', 'c++')
# Version 1.3 is required for the `deps` directive.
config = ['ninja_required_version = 1.3']
config.append('cxx = {}'.format(compiler))
if with_cuda:
if IS_HIP_EXTENSION:
nvcc = _join_rocm_home('bin', 'hipcc')
else:
nvcc = _join_cuda_home('bin', 'nvcc')
config.append('nvcc = {}'.format(nvcc))
flags = ['cflags = {}'.format(' '.join(cflags))]
flags.append('post_cflags = {}'.format(' '.join(post_cflags)))
if with_cuda:
flags.append('cuda_cflags = {}'.format(' '.join(cuda_cflags)))
flags.append('cuda_post_cflags = {}'.format(' '.join(cuda_post_cflags)))
flags.append('ldflags = {}'.format(' '.join(ldflags)))
# Turn into absolute paths so we can emit them into the ninja build
# file wherever it is.
sources = [os.path.abspath(file) for file in sources]
# See https://ninja-build.org/build.ninja.html for reference.
compile_rule = ['rule compile']
if IS_WINDOWS:
compile_rule.append(
' command = cl /showIncludes $cflags -c $in /Fo$out $post_cflags')
compile_rule.append(' deps = msvc')
else:
compile_rule.append(
' command = $cxx -MMD -MF $out.d $cflags -c $in -o $out $post_cflags')
compile_rule.append(' depfile = $out.d')
compile_rule.append(' deps = gcc')
if with_cuda:
cuda_compile_rule = ['rule cuda_compile']
cuda_compile_rule.append(
' command = $nvcc $cuda_cflags -c $in -o $out $cuda_post_cflags')
# Emit one build rule per source to enable incremental build.
build = []
for source_file, object_file in zip(sources, objects):
is_cuda_source = _is_cuda_file(source_file) and with_cuda
rule = 'cuda_compile' if is_cuda_source else 'compile'
if IS_WINDOWS:
source_file = source_file.replace(':', '$:')
object_file = object_file.replace(':', '$:')
source_file = source_file.replace(" ", "$ ")
object_file = object_file.replace(" ", "$ ")
build.append('build {}: {} {}'.format(object_file, rule, source_file))
if library_target is not None:
link_rule = ['rule link']
if IS_WINDOWS:
cl_paths = subprocess.check_output(['where',
'cl']).decode().split('\r\n')
if len(cl_paths) >= 1:
cl_path = os.path.dirname(cl_paths[0]).replace(':', '$:')
else:
raise RuntimeError("MSVC is required to load C++ extensions")
link_rule.append(
' command = "{}/link.exe" $in /nologo $ldflags /out:$out'.format(
cl_path))
else:
link_rule.append(' command = $cxx $in $ldflags -o $out')
link = ['build {}: link {}'.format(library_target, ' '.join(objects))]
default = ['default {}'.format(library_target)]
else:
link_rule, link, default = [], [], []
# 'Blocks' should be separated by newlines, for visual benefit.
blocks = [config, flags, compile_rule]
if with_cuda:
blocks.append(cuda_compile_rule)
blocks += [link_rule, build, link, default]
with open(path, 'w') as build_file:
for block in blocks:
lines = '\n'.join(block)
build_file.write('{}\n\n'.format(lines))
def _join_cuda_home(*paths) -> str:
r'''
Joins paths with CUDA_HOME, or raises an error if it CUDA_HOME is not set.
This is basically a lazy way of raising an error for missing $CUDA_HOME
only once we need to get any CUDA-specific path.
'''
if CUDA_HOME is None:
raise EnvironmentError('CUDA_HOME environment variable is not set. '
'Please set it to your CUDA install root.')
return os.path.join(CUDA_HOME, *paths)
def _is_cuda_file(path: str) -> bool:
valid_ext = ['.cu', '.cuh']
if IS_HIP_EXTENSION:
valid_ext.append('.hip')
return os.path.splitext(path)[1] in valid_ext
| [
"59596379+khado2359@users.noreply.github.com"
] | 59596379+khado2359@users.noreply.github.com |
1ef30fbace05797d49c902a596b908d39fd97ed2 | f889bc01147869459c0a516382e7b95221295a7b | /test/test_body_64.py | d35630adc76fa2b92c13394be8a3dbecbb91f624 | [] | no_license | wildatheart/magento2-api-client | 249a86f5c0289743f8df5b0324ccabd76f326512 | e6a707f85b37c6c3e4ef3ff78507a7deb8f71427 | refs/heads/master | 2021-07-14T16:01:17.644472 | 2017-10-18T13:33:08 | 2017-10-18T13:33:08 | 107,412,121 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 871 | py | # coding: utf-8
"""
Magento Community
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.body_64 import Body64
class TestBody64(unittest.TestCase):
""" Body64 unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testBody64(self):
"""
Test Body64
"""
# FIXME: construct object with mandatory attributes with example values
#model = swagger_client.models.body_64.Body64()
pass
if __name__ == '__main__':
unittest.main()
| [
"sander@wildatheart.eu"
] | sander@wildatheart.eu |
60efe30010d22cb52db0a4c4d10f599a859c97e4 | bcf85b8dc5aa1c98af4a61c0404b0be7563ebfee | /ecommerc2-API/src/orders/migrations/0005_auto_20170124_1611.py | ca9e35c278df75c8f47761c1a3bba7a3ef69730f | [] | no_license | karolius/ecommerc2-api | 39bdf662d54cb4be8db27d346efd27a7fbc8be84 | efaf0cc5b7251e1b55fc08f68d7fab00974d25e7 | refs/heads/master | 2021-01-11T09:10:17.455407 | 2017-02-16T23:45:16 | 2017-02-16T23:45:16 | 81,380,329 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,057 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-01-24 15:11
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('orders', '0004_order'),
]
operations = [
migrations.AlterField(
model_name='order',
name='billing_address',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='billing_address', to='orders.UserAddress'),
),
migrations.AlterField(
model_name='order',
name='shipping_address',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='shipping_address', to='orders.UserAddress'),
),
migrations.AlterField(
model_name='order',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='orders.UserCheckout'),
),
]
| [
"karolius127@gmail.com"
] | karolius127@gmail.com |
f353000de3bc8694b405d19bd1acbf833e3fe217 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /115_testing/_exercises/_templates/temp/Github/_Level_2/python_test_api-master/test_update_issue.py | 34fc6833a45bee855026e38aae7fa03bdf8eda83 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 2,365 | py | ____ base_api ______ BaseApi
______ xmltodict
______ d_t_
______ requests
c_ TestUpdateIssue(BaseApi
___ test_update_issue
issue_id _ _create_issue() # obviously we need to create an issue before updating it
url _ base_url + '/issue/' + issue_id
current_time _ st.(d_t_.d_t_.now())[0:-7]
issue_summary _ 'Summary updated at ' + current_time
issue_description _ 'Description updated at ' + current_time
params _ {
'summary': issue_summary,
'description': issue_description
}
r _ request(url, 'post', params)
assert_for_status_code_and_content_type(r, 200, 'text/plain;charset=UTF-8')
url _ base_url + '/issue/' + issue_id
r _ request(url, 'get')
response_dict _ xmltodict.parse(r.text)
assert_for_status_code_and_content_type(r, 200)
aE..(response_dict['issue']['@id'], issue_id)
___ field __ response_dict['issue']['field']:
__ field['@name'] __ 'summary':
aE..(field['value'], issue_summary)
__ field['@name'] __ 'description':
aE..(field['value'], issue_description)
___ test_update_not_existing_issue
url _ base_url + '/issue/' + 'kjhfkaskafk'
current_time _ st.(d_t_.d_t_.now())[0:-7]
issue_summary _ 'Summary updated at ' + current_time
issue_description _ 'Description updated at ' + current_time
params _ {
'summary': issue_summary,
'description': issue_description
}
r _ request(url, 'post', params)
response_dict _ xmltodict.parse(r.text)
assert_for_status_code_and_content_type(r, 404)
aT..(response_dict['error'])
___ test_update_issue_without_credentials
issue_id _ _create_issue()
url _ base_url + '/issue/' + issue_id
current_time _ st.(d_t_.d_t_.now())[0:-7]
issue_summary _ 'Summary updated at ' + current_time
issue_description _ 'Description updated at ' + current_time
params _ {
'summary': issue_summary,
'description': issue_description
}
r _ requests.post(url, params)
assert_for_status_code_and_content_type(r, 401)
response_dict _ xmltodict.parse(r.text)
aT..(response_dict['error'])
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
b7b54593d0b42f343c2a103add3226a69f8d758e | 1b2369715f47c9276f3dd458541d0b62cf5ba237 | /models.py | 1014224267a3e98c3240a25bef788a2aecbf1fa0 | [] | no_license | Virucek/gb_framework | 5a68cdf4f09867db3704ec589e937ddbe68b27f0 | 50893554c80583243ed301ab52e4bc46875ad241 | refs/heads/main | 2023-02-13T14:01:57.808400 | 2021-01-04T22:20:07 | 2021-01-04T22:20:07 | 319,729,864 | 0 | 0 | null | 2021-01-04T22:20:20 | 2020-12-08T18:44:10 | Python | UTF-8 | Python | false | false | 5,503 | py | """ Модели проекта """
import json
from patterns.behavioral.observer import Observer, Subject
from patterns.creational.prototype import PrototypeMixin
from patterns.orm.unit_of_work import DomainObject
class User:
def __init__(self, name):
self.name = name
class Student(User, DomainObject):
def __init__(self, name):
self.courses = []
super(Student, self).__init__(name)
class Teacher(User, DomainObject):
pass
class UserFactory:
types = {
'student': Student,
'teacher': Teacher,
}
@classmethod
def create(cls, type_name, name):
return cls.types[type_name](name)
# Категории Курсов
class Category(DomainObject):
# id_ = 0
def __init__(self, name, parent_category=None):
# self.id = Category.id_
# Category.id_ += 1
self.name = name
self.courses = []
self.parent_category = parent_category
self.child_categories = []
def __getitem__(self, item):
return self.courses[item]
@property
def course_count(self):
res = len(self.courses)
if self.child_categories:
for cat_ in self.child_categories:
res += cat_.course_count
return res
def add_child(self, category):
self.child_categories.append(category)
# Курсы
class Course(PrototypeMixin, Subject):
def __init__(self, name, category):
self.name = name
self.category = category
self.category.courses.append(self)
self.students = []
super().__init__()
def __getitem__(self, item):
return self.students[item]
def add_student(self, student):
self.students.append(student)
student.courses.append(self)
self._subject_state = student
self._notify()
return CourseStudent(self, student)
@property
def new_student(self):
return self._subject_state
class OnlineCourse(Course, DomainObject):
pass
class OfflineCourse(Course, DomainObject):
pass
class CourseFactory:
types = {
'online': OnlineCourse,
'offline': OfflineCourse,
}
@classmethod
def create(cls, type_name, name, category):
return cls.types[type_name](name, category)
class SmsNotifier(Observer):
def update(self, subject):
print(f'SMS: студент {subject.new_student.name} присоединился к курсу {subject.name}')
class EmailNotifier(Observer):
def update(self, subject):
print(f'EMAIL: студент {subject.new_student.name} присоединился к курсу {subject.name}')
class CourseStudent(DomainObject): # Курс - студент, связь многие ко многим
def __init__(self, course, student):
self.course = course
self.student = student
class BaseSerializer:
def __init__(self, object):
self.object = object
def save(self):
try:
return json.dumps(self.object)
except TypeError as e:
print(f'Problem trying to serialize object to json:\n {e}')
def load(self):
try:
return json.loads(self.object)
except json.JSONDecodeError as e:
print(f'Problem trying to deserialize object from json:\n {e}')
# Основной интерфейс
class MainInterface:
def __init__(self):
self.teachers = []
self.students = []
self.courses = []
self.categories = []
@staticmethod
def create_user(type_name, name):
return UserFactory.create(type_name, name)
@staticmethod
def create_category(name, parent_category=None):
category = Category(name, parent_category)
if parent_category is not None:
parent_category.add_child(category)
return category
@staticmethod
def create_course(type_name, name, category):
return CourseFactory.create(type_name, name, category)
def get_category_by_id(self, category_id):
for cat in self.categories:
if cat.id == category_id:
return cat
raise Exception(f'Категория с id {id} отсутствует')
def get_courses_by_category(self, category_id):
try:
category_id = int(category_id)
except ValueError:
print('Category id должен быть числом!')
else:
category = self.get_category_by_id(category_id)
return category.courses
@staticmethod
def get_course_types():
return list(CourseFactory.types.keys())
def get_course_by_name(self, name):
for course in self.courses:
if course.name == name:
return course
raise Exception(f'Курс с именем {name} отсутствует')
def get_category_tree(self):
categories_list = []
if self.categories:
for cat in self.categories:
if cat.parent_category is None:
categories_list.append(cat)
return categories_list
def get_students_by_course(self, course):
course = self.get_course_by_name(course)
return course.students
def get_student_by_name(self, name):
for student in self.students:
if student.name == name:
return student
raise Exception(f'Студент с именем {name} отсутствует')
| [
"aykin.yakov@gmail.com"
] | aykin.yakov@gmail.com |
3b41186613bbe1df0198661313cd981b26eee414 | 8531bee5e42d82ffc09ef274ccb2333ca02f03cc | /python/boj/2566.py | 4a6668ddf97b156af575a8387cf04a1e04a2ed03 | [] | no_license | ChoHyoungSeo/Algorithm_prac | 969f193f9ba349d0df3c1d7645fe3f42ec7581c4 | 02327623b5ea2211f4618e60b0bdcc61e16e1c5a | refs/heads/master | 2023-08-17T11:26:55.159895 | 2023-08-17T10:52:34 | 2023-08-17T10:52:34 | 210,917,024 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | ans = 0
ans_idx01 = 0
ans_idx02 = 0
for i in range(9):
tmp = list(map(int, input().split()))
if max(tmp) > ans:
ans_idx01 = i
ans_idx02 = tmp.index(max(tmp))
ans = max(tmp)
print(ans)
print(ans_idx01 + 1, ans_idx02+1) | [
"francesco.johs@gmail.com"
] | francesco.johs@gmail.com |
de8b7b31daba8b0d65d76e9630be8f028152e974 | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/playground/mnurolcay/2011/util/misc/q4wine/actions.py | 461a1ecbe7b16a54ce9395b37efa96f01b7fe94b | [] | no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 702 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/copyleft/gpl.txt
from pisi.actionsapi import cmaketools
from pisi.actionsapi import get
from pisi.actionsapi import pisitools
WorkDir="q4wine-%s" % get.srcVERSION().replace("_0", "-r")
def setup():
cmaketools.configure("-DWITH_ICOUTILS=ON \
-DWITH_WINETRIKS=ON \
-DWITH_WINEAPPDB=ON \
-DWITH_DBUS=ON")
def build():
cmaketools.make()
def install():
cmaketools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodoc("ChangeLog", "copying", "LICENSE", "README")
| [
"yusuf.aydemir@istanbul.com"
] | yusuf.aydemir@istanbul.com |
bc99649fa3b238fc810e29b01a44612be761c05b | 7ebc4e9ade9c0c0312c87d74f94929e5c3bf96a6 | /aaai/compute-results-transfer-pca.py | 647d34f500f58a76c7c7f9d996bed06f9acb9606 | [] | no_license | nipunbatra/transferable-energy-breakdown-old | 7fa02dd84b0eb37875c190c5a06bfc9d1a2a9218 | bc12de92d620d33e1ca4cf841af341eb3d4bcd76 | refs/heads/master | 2021-08-22T09:24:40.322431 | 2017-11-29T21:18:43 | 2017-11-29T21:18:43 | 78,712,453 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,526 | py | from aaai18.common import compute_rmse_fraction
from create_matrix import *
appliance_index = {appliance: APPLIANCES_ORDER.index(appliance) for appliance in APPLIANCES_ORDER}
import os
import pickle
source, target = sys.argv[1:]
cost = 'l21'
out = {}
for static_fac in ['None']:
out[static_fac] = {}
for lam in [0]:
out[static_fac][lam] = {}
for train_percentage in range(10, 110, 10):
out[static_fac][lam][train_percentage] ={}
for random_seed in range(5):
out[static_fac][lam][train_percentage][random_seed] = {}
name = "{}-{}-{}-{}-{}-{}-{}".format(source, target, static_fac, lam, random_seed, train_percentage,
cost)
directory = os.path.expanduser('~/aaai2017/pca-transfer_{}_{}_{}/'.format(source, target, cost))
filename = os.path.join(directory, name + '.pkl')
try:
pr = pickle.load(open(filename, 'r'))
pred = pr['Predictions']
for appliance in APPLIANCES_ORDER[1:]:
prediction = pred[appliance]
if appliance == "hvac":
prediction = prediction[range(4, 10)]
out[static_fac][lam][train_percentage][random_seed][appliance]= \
compute_rmse_fraction(appliance, prediction, target)[2]
print("Computed for: {}".format(name))
except Exception, e:
print(e)
print("Exception")
out[static_fac][lam][train_percentage] = pd.DataFrame(out[static_fac][lam][train_percentage]).mean(axis=1)
pickle.dump(out, open("predictions/pca-{}-{}-transfer-cv.pkl".format(source, target),"w"))
| [
"nipunb@iiitd.ac.in"
] | nipunb@iiitd.ac.in |
483ce075ad6c47c5733a61979e6a25dc728bad6b | 81344c55ed60bf12818d1a0ec246f3c24c79cb4c | /力扣习题/459重复的子字符串/repeatedsubstringpattern.py | 00588e92c616933821de561132b5d6d05e859862 | [
"MIT"
] | permissive | lollipopnougat/AlgorithmLearning | 7d5c4a37bd5c814c5caea6963e81fbe0cb44b7b7 | cb13caa0159f0179d3c1bacfb1801d156c7d1344 | refs/heads/master | 2023-05-11T04:47:09.758889 | 2023-05-07T06:55:48 | 2023-05-07T06:55:48 | 194,078,151 | 7 | 2 | MIT | 2023-03-25T01:23:44 | 2019-06-27T10:53:08 | Python | UTF-8 | Python | false | false | 105 | py | class Solution:
def repeatedSubstringPattern(self, s: str) -> bool:
return s in (s + s)[1:-1] | [
"ab2defg145@gmail.com"
] | ab2defg145@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.