hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c2fef72932d1f5f8c2eaf7b25c542eee04452f11 | 269 | py | Python | user/encryption/__init__.py | sanjaymarison/tkinteruser | a858b32f5e83276430f8647550f7e535e8f44490 | [
"MIT"
] | null | null | null | user/encryption/__init__.py | sanjaymarison/tkinteruser | a858b32f5e83276430f8647550f7e535e8f44490 | [
"MIT"
] | null | null | null | user/encryption/__init__.py | sanjaymarison/tkinteruser | a858b32f5e83276430f8647550f7e535e8f44490 | [
"MIT"
] | null | null | null | from cryptography.fernet import Fernet
def decrypt(arg):
secret_key = ""
cipher_suite = Fernet(secret_key)
return cipher_suite.decrypt(arg)
def encrypt(arg):
secret_key = ""
cipher_suite = Fernet(secret_key)
return str(cipher_suite.encrypt(bytes(arg,"utf-8"))) | 22.416667 | 53 | 0.758364 |
455706e87d0bb1e48be6f1a10f635c6deb37c04a | 43,384 | py | Python | slaveNode.py | vinci-project/vinciShard | 1c4ce8fceeff802c750e263fd6587cc2d22ca323 | [
"MIT"
] | null | null | null | slaveNode.py | vinci-project/vinciShard | 1c4ce8fceeff802c750e263fd6587cc2d22ca323 | [
"MIT"
] | 5 | 2020-03-02T11:23:26.000Z | 2022-03-11T23:52:45.000Z | slaveNode.py | vinci-project/vinciShard | 1c4ce8fceeff802c750e263fd6587cc2d22ca323 | [
"MIT"
] | null | null | null | from PyQt5.QtCore import QCoreApplication
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtCore import QMutex
from PyQt5.QtCore import QTimer
from PyQt5.QtCore import QThread
from vncCrypto import VncCrypto
from vncCrypto.mrkl import VncTree
from vncCrypto.signChecker import SignChecker
from netTools import NetEngine
from jsonMaker import JsonPackets
from datetime import datetime, date, time
import json, sys, os, redis, pymongo, math
from pymongo import MongoClient
class slaveWorker(QThread):
floodPacket = pyqtSignal(str, str, int)
sendPacket = pyqtSignal(str, str, int)
setPurifyList = pyqtSignal(list)
def __init__(self, myAddress: str, privateKey: str):
super().__init__()
self.cryptor = VncCrypto()
self.transactionMemory = set()
self.precommitedBlockHash = None
self.signatureMemory = []
self.transactionsInPBlock = set()
self.stemAddress = None
self.stemPublicKey = "0320ab99dee836df538e5e09a7c692c0aef02d91a11ce711992b95835f28243242"
self.nodesCount = 0
self.myAddress = myAddress
self.packetStack = list()
self.priorPacketStack = list()
self.stackMutex = QMutex()
self.signChecker = SignChecker()
self.version = "0.1.0"
self.redis_host = os.getenv("REDIS_PORT_6379_TCP_ADDR") or 'localhost'
self.redis_port = os.getenv("REDIS_PORT_6379_TCP_PORT") or '6379'
self.mongo_host = os.getenv("MONGO_PORT_27017_TCP_ADDR") or 'localhost'
self.mongo_port = 27017
self.mongo_conn = MongoClient(self.mongo_host, self.mongo_port)
self.redis = redis.StrictRedis(self.redis_host, self.redis_port, db=1)
self.mongo = self.mongo_conn.vncsphere
self.mongo_conn.drop_database(self.mongo) # CLEAR MONGODB VNCSPHERE DATABASE
self.lastCheckBlockBalance = 0
self.currentTokenTypes = ["VINCI"]
self.blockReward = 10
self.blocks_in_genesis = 10000000
self.timeToGarbageCollector = 300*1000 #5 minutes
self.MAX_TRAN_COUNT_IN_BLOCK = 9000
self.MAX_TRAN_FOR_USER_IN_BLOCK = 9000
self.tempMoneySum = 0
self.updateBalanceStep = 1000
#----------------------------------------- TOKENOMIKS BASIC
self.voteFeeArray = {"VT": 10, "UFT": 100, "DFT": 100}
self.allTokens = 23*1000*1000 #23 mln tokens
self.tokensToReward = self.allTokens * 0.26 #reward tokens for 4 years
#self.freezeToAT = 16*1000
self.freezeToAT = 16
self.freezeToVT = 1
self.fee = 0
self.freeZoneHeight = 12*60*24*365*4
#self.freeZoneHeight = 10
#-----------------------------------------
#self.cryptor.generateKeys()
print("PRIVATE KEY!", privateKey)
self.cryptor.setPrivateKey(privateKey)
print("PUBLIC KEY!", self.cryptor.getPublicKey())
self.redis.flushdb()
print(self.redis.set("VERSION", self.version))
##################### TEST
#self.redis.zadd("BALANCE:VINCI",10000, "0323f264fd64a684db1e36a2c97b58867e0625f797008206216576fea2114bdbca")
#self.redis.zadd("BALANCE:NBL",10000, "027426df275f98bb0f66bb4f1a92352296be233b115415f709540e8caa80d820f2")
#self.redis.zadd("RAW TRANSACTIONS", 1541777858, '{"TT":"ET","SENDER":"0323f264fd64a684db1e36a2c97b58867e0625f797008206216576fea2114bdbca","RECEIVER":"027426df275f98bb0f66bb4f1a92352296be233b115415f709540e8caa80d820f2","STT":"VINCI","STC":"100","RTT":"NBL","RTC":"10","TST":"1541777858","SIGNATURE":"3b229fe53c21b472e82d4eec2a9bbde9c340c243b80fbd7ee1897b065708603352df740f1493d4d7215802065be5fde34fd8e0cae65afa11b5d69dc0cfd0a01a00"}')
@staticmethod
def isfloat(value):
try:
float(value)
return True
except:
return False
@pyqtSlot(str, str)
def appendPacketToStack(self, address: str, packet: str):
# if not self.signChecker.checkTran(packet):
# return
jpacket = json.loads(packet)
packetType = jpacket["TT"]
self.stackMutex.lock()
if packetType == "SG" or packetType == "CT" or packetType == "BL" or packetType == "PURIFIER":
self.packetStack.insert(0, (address, packet))
else:
self.packetStack.append((address, packet))
self.stackMutex.unlock()
def getAnyFee(self, token_type):
if token_type == "VINCI":
return self.fee
else:
return 0
def getAnyVoteFee(self, voteType):
if self.voteFeeArray.get(voteType) is None:
return 0
else:
return self.voteFeeArray.get(voteType)
def handler(self, address: str, packet: str):
jpacket = json.loads(packet)
print("RECIVE PACKET: ", jpacket["TT"])
if jpacket["TT"] == "SG":
if self.precommitedBlockHash == jpacket.get("HASH"):
self.signatureMemory.append(jpacket.get("SIGNPB"))
else:
print("Recive old block SG - failed and return")
return
signNeedCount = len(set(self.signatureMemory))
if signNeedCount >= int((2 / 3) * self.nodesCount):
# CLEAR TRAN LIST #
self.transactionMemory = self.transactionMemory - set(self.transactionsInPBlock)
# CLEAR TRAN LIST #
self.cttime = 0
print(self.signatureMemory)
self.sendPacket.emit(self.stemAddress, JsonPackets.createCommitBlock(self.cryptor, self.precommitedBlock, self.signatureMemory), 1)
self.transactionsInPBlock.clear()
self.signatureMemory.clear()
return
if jpacket["TT"] == "CBRH":
print("CBRH KEEP!")
if jpacket["SENDER"] == self.stemPublicKey: #IF SENDER IS MASTER NODE
hash = jpacket["HASH"]
JTPCB = json.loads(self.precommitedBlock)
JTPCB.pop("SIGNATURE")
if VncTree.hash(json.dumps(JTPCB, separators=(',', ':'))) != hash:
print("BAD HASH!!! FROM CBRH:", VncTree.hash(self.precommitedBlock), "AND", hash)
# GIVE ME ALL SIZE BLOCK!
self.sendPacket.emit(self.stemAddress, JsonPackets.giveAllSizeBlock(self.cryptor, jpacket["BHEIGHT"]), 1)
return
block = json.loads(self.precommitedBlock)
block.update({"STEM_SIGNATURE": jpacket["STEM_SIGNATURE"]})
block.update({"NODE_SIGNATURES": jpacket["SIGNS"]})
self.mongo.vncchain.save(block)
#self.redis.zadd("VNCCHAIN", block["BHEIGHT"], json.dumps(block, separators=(',', ':')))
self.precommitedBlock = "" # CLEAR MEMMORY AFTER SAVE BLOCK IN CHAIN
self.checkBalance()
# # DELETE AFTER ENDING MAIN BLOCKCHAIN EXPLORER
# money = self.redis.zscore("MONEY MOVE", datetime.now().strftime("%Y-%m-%d"))
#
# if money is None:
# money = 0
# else:
# if slaveWorker.isfloat(money):
# money = float(money)
# else:
# print("CLEAR MONEY BUF-BUF")
# money = 0
# # DELETE AFTER ENDING MAIN BLOCKCHAIN EXPLORER
for tran in block["TRANSACTIONS"]:
stran = json.dumps(tran, separators=(',', ':'))
print("ZREEEEEEEM:", stran, self.redis.zrem("RAW TRANSACTIONS", stran))
self.redis.zrem("RAW TRANSACTIONS", stran)
if tran["TT"] == "ET":
sender = tran["SENDER"]
receiver = tran["RECEIVER"]
sign = tran["SIGNATURE"]
self.redis.zadd("COMPLETE TRANSACTIONS", int(block["BHEIGHT"]), VncTree.hash(sign))
self.redis.set("TRANSACTIONS:" + VncTree.hash(sign), stran)
if tran["TT"] == "ST":
sender = tran["SENDER"]
receiver = tran["RECEIVER"]
sign = tran["SIGNATURE"]
self.redis.zadd("COMPLETE TRANSACTIONS", int(block["BHEIGHT"]), VncTree.hash(sign))
self.redis.set("TRANSACTIONS:" + VncTree.hash(sign), stran)
if tran["TT"] == "AT":
sender = tran["SENDER"]
sign = tran["SIGNATURE"]
self.redis.zadd("COMPLETE TRANSACTIONS", int(block["BHEIGHT"]), VncTree.hash(sign))
self.redis.zadd("APPLICANTS", int(block["BHEIGHT"]), sender)
self.redis.set("TRANSACTIONS:" + VncTree.hash(sign), stran)
if tran["TT"] == "UAT":
sender = tran["SENDER"]
sign = tran["SIGNATURE"]
self.redis.zadd("COMPLETE TRANSACTIONS", int(block["BHEIGHT"]), VncTree.hash(sign))
self.redis.zrem("APPLICANTS", sender)
self.redis.set("TRANSACTIONS:" + VncTree.hash(sign), stran)
if tran["TT"] == "VT":
sender = tran["SENDER"]
receiver = tran["RECEIVER"]
votes = int(tran["VOTES"])
sign = tran["SIGNATURE"]
self.redis.zadd("COMPLETE TRANSACTIONS", int(block["BHEIGHT"]), VncTree.hash(sign))
self.redis.set("TRANSACTIONS:" + VncTree.hash(sign), stran)
tempVotes = self.redis.zscore("VOTES:" + sender, receiver)
if tempVotes == None:
tempVotes = 0
self.redis.zadd("VOTES:" + receiver, tempVotes + votes, sender)
tempUntVotes = self.redis.zscore("UNTVOTES", sender)
if tempUntVotes == None:
tempUntVotes = 0
self.redis.zadd("UNTVOTES", tempUntVotes + votes, sender)
if tran["TT"] == "UFT": #UPFEETRAN
sender = tran["SENDER"]
receiver = tran["RECEIVER"]
votes = int(tran["VOTES"])
sign = tran["SIGNATURE"]
self.redis.zadd("COMPLETE TRANSACTIONS", int(block["BHEIGHT"]), VncTree.hash(sign))
self.redis.set("TRANSACTIONS:" + VncTree.hash(sign), stran)
tempVotes = self.redis.zscore("UFT-VOTES:" + sender, receiver)
if tempVotes == None:
tempVotes = 0
self.redis.zadd("UFT-VOTES:" + sender, tempVotes + votes, receiver)
tempUntVotes = self.redis.zscore("UFT-UNTVOTES", sender)
if tempUntVotes == None:
tempUntVotes = 0
self.redis.zadd("UFT-UNTVOTES", tempUntVotes + votes, sender)
if tran["TT"] == "DFT": #DOWNFEETRAN
sender = tran["SENDER"]
receiver = tran["RECEIVER"]
votes = int(tran["VOTES"])
sign = tran["SIGNATURE"]
self.redis.zadd("COMPLETE TRANSACTIONS", int(block["BHEIGHT"]), VncTree.hash(sign))
self.redis.set("TRANSACTIONS:" + VncTree.hash(sign), stran)
tempVotes = self.redis.zscore("DFT-VOTES:" + sender, receiver)
if tempVotes == None:
tempVotes = 0
self.redis.zadd("DFT-VOTES:" + sender, tempVotes + votes, receiver)
tempUntVotes = self.redis.zscore("DFT-UNTVOTES", sender)
if tempUntVotes == None:
tempUntVotes = 0
self.redis.zadd("DFT-UNTVOTES", tempUntVotes + votes, sender)
if tran["TT"] == "UVT":
sender = tran["SENDER"]
receiver = tran["RECEIVER"]
sign = tran["SIGNATURE"]
self.redis.zadd("COMPLETE TRANSACTIONS", int(block["BHEIGHT"]), VncTree.hash(sign))
self.redis.zrem("VOTES:" + sender, receiver)
self.redis.set("TRANSACTIONS:" + VncTree.hash(sign), stran)
#self.redis.zadd("MONEY MOVE", money, datetime.now().strftime("%Y-%m-%d"))
if jpacket["TT"] == "CBR":
print("CBR KEEP!")
if jpacket["SENDER"] == self.stemPublicKey: #IF SENDER IS MASTER NODE
block = jpacket["BLOCK"]
block.update({"STEM_SIGNATURE": jpacket["SIGNATURE"]})
self.mongo.vncchain.save(block)
#self.redis.zadd("VNCCHAIN", block["BHEIGHT"], json.dumps(block, separators=(',', ':')))
self.checkBalance()
for tran in block["TRANSACTIONS"]:
self.redis.zrem("RAW TRANSACTIONS", json.dumps(tran, separators=(',', ':')))
if tran["TT"] == "ST":
sender = tran["SENDER"]
receiver = tran["RECEIVER"]
sign = tran["SIGNATURE"]
#money += float(tran["CTOKEN"])
self.redis.zadd("COMPLETE TRANSACTIONS", int(block["BHEIGHT"]), str(sender) + str(receiver) + str(sign))
if tran["TT"] == "AT":
sender = tran["SENDER"]
self.redis.zadd("APPLICANTS", int(block["BHEIGHT"]), sender)
if tran["TT"] == "UAT":
sender = tran["SENDER"]
self.redis.zrem("APPLICANTS", sender)
if tran["TT"] == "VT":
sender = tran["SENDER"]
receiver = tran["RECEIVER"]
votes = tran["VOTES"]
self.redis.zadd("VOTES:" + sender, votes, receiver)
temp_votes = self.redis.zscore("UNTVOTES", sender)
if temp_votes is None:
temp_votes = 0
self.redis.zadd("UNTVOTES", temp_votes + votes, sender)
if tran["TT"] == "UVT":
sender = tran["SENDER"]
receiver = tran["RECEIVER"]
self.redis.zrem("VOTES:" + sender, receiver)
del_votes = self.redis.zscore("VOTES:" + sender, receiver)
if del_votes is None:
del_votes = 0
self.redis.zrem("VOTES:" + sender, receiver)
temp_votes = self.redis.zscore("UNTVOTES", sender)
if temp_votes is None:
temp_votes = 0
self.redis.zadd("UNTVOTES", temp_votes - del_votes, sender)
#self.redis.zadd("MONEY MOVE", money, datetime.now().strftime("%Y-%m-%d"))
# IF BLOCK FROM STEM NODE
if jpacket["TT"] == "BL":
# IF NEED SIGNATURE WORK
transactions = jpacket["TRANSACTIONS"]
trans = []
for tran in transactions:
trans.append(json.dumps(tran, separators=(',', ':')))
unionTran = set(trans) - self.transactionMemory
okSign = True
if len(unionTran) != 0: # Build block with another transactions
for newTran in unionTran:
jnewTran = json.loads(newTran)
signature = jnewTran.pop("SIGNATURE")
sender = jnewTran.get("SENDER")
if not self.cryptor.verifyMessage(signature, sender, json.dumps(jpacket, separators=(',', ':'))):
self.sendPacket.emit(address, JsonPackets.createСomplaint(self.cryptor, packet, newTran), 1)
okSign = False
if okSign: # Good block!
precommitedBlock = json.dumps(jpacket, separators=(',', ':'))
self.sendPacket.emit(address,JsonPackets.createSignaturePrecommitedBlock(self.cryptor, precommitedBlock, VncTree.hash(precommitedBlock)), 1)
return
if jpacket["TT"] == "CT":
self.updateFee()
print("RECEIVE PACKET:", packet)
#blockchain_height = self.redis.zcard("VNCCHAIN")
blockchain_height = self.mongo.vncchain.find().count()
# pymongo_cursor = self.mongo.vncchain.find().sort("BHEIGHT", pymongo.ASCENDING).limit(1)
# blockchain_height = dict(pymongo_cursor).get("BHEIGHT")
if blockchain_height is None:
blockchain_height = 0
if blockchain_height != jpacket["NBH"]:
print("NBH not Supported in my version BlockChain!")
#print(blockchain_height, jpacket["NBH"])
self.sendPacket.emit(self.stemAddress, JsonPackets.badAnswer(self.cryptor), 1)
return False
txmas = []
txcount = self.redis.zcard("RAW TRANSACTIONS")
if txcount < self.MAX_TRAN_COUNT_IN_BLOCK:
txmas = self.redis.zrange("RAW TRANSACTIONS", 0, -1)
else:
txmas = self.redis.zrange("RAW TRANSACTIONS", 0, self.MAX_TRAN_COUNT_IN_BLOCK)
tranUserCount = {}
tempBalanceMemmory = {}
tempDictBalanceMemmory = {}
decodeTxmas = []
print("TXMAS_LEN:", len(txmas))
for tran in txmas:
try:
jtran = json.loads(tran)
except Exception:
print("000. GO TO HELL!", tran)
continue
tokenPrefix = "VINCI"
if jtran["TT"] == "ET":
print("STEP 1")
sender = jtran["SENDER"] # money sender
receiver = jtran["RECEIVER"] # money receiver
stt = jtran["STT"] # money sender token type
rtt = jtran["RTT"] # money receiver token type
stc = jtran["STC"] # money sender token count
rtc = jtran["RTC"] # money receiver token count
# if jtran.get("INIT_SIGNATURE") is not None:
# init_sign = jtran.pop("INIT_SIGNATURE")
# else:
# init_sign = None
#
# sign = jtran.pop("SIGNATURE")
#
# if self.cryptor.verifyMessage(sign, sender, json.dumps(tran, separators=(',', ':'))):
# tempBalance = tempBalanceMemmory.get(currentSender)
# CHECK DOUBLE MONEY SEND
tempSBalanceMemmory = tempDictBalanceMemmory.get(stt)
if tempSBalanceMemmory is None:
tempSBalanceMemmory = {}
tempSBalance = self.redis.zscore("BALANCE:" + stt, sender)
else:
tempSBalance = tempSBalanceMemmory.get(sender)
if tempSBalance is None:
tempSBalance = self.redis.zscore("BALANCE:" + stt, sender)
tempRBalanceMemmory = tempDictBalanceMemmory.get(rtt)
if tempRBalanceMemmory is None:
tempRBalanceMemmory = {}
tempRBalance = self.redis.zscore("BALANCE:" + rtt, receiver)
else:
tempRBalance = tempRBalanceMemmory.get(currentSender)
if tempRBalance is None:
tempRBalance = self.redis.zscore("BALANCE:" + rtt, receiver)
if tempSBalance is None:
print("1. GO TO HELL ET!")
self.redis.zadd("FAILED TRANSACTIONS", 1, VncTree.hash(jtran["SIGNATURE"]))
self.redis.zrem("RAW TRANSACTIONS", tran)
continue
if tempRBalance is None:
print("1. GO TO HELL ET!")
self.redis.zadd("FAILED TRANSACTIONS", 1, VncTree.hash(jtran["SIGNATURE"]))
self.redis.zrem("RAW TRANSACTIONS", tran)
continue
if stt == "VINCI":
totalSBalance = tempSBalance - self.getFreezeTokens(sender)
else:
totalSBalance = tempSBalance
if totalSBalance < (float(stc) + self.getAnyFee(stt)):
self.redis.zrem("RAW TRANSACTIONS", tran)
self.redis.zadd("FAILED TRANSACTIONS", 2, VncTree.hash(jtran["SIGNATURE"]))
print("2. GO TO HELL ET!")
continue
else:
tempSBalance = tempSBalance - (float(stc) + self.getAnyFee(stt))
if stt == "VINCI":
totalRBalance = tempRBalance - self.getFreezeTokens(receiver)
else:
totalRBalance = tempRBalance
if totalRBalance < (float(stc) + self.getAnyFee(rtt)):
self.redis.zrem("RAW TRANSACTIONS", tran)
self.redis.zadd("FAILED TRANSACTIONS", 2, VncTree.hash(jtran["SIGNATURE"]))
print("2. GO TO HELL ET!")
continue
else:
tempRBalance = tempRBalance - (float(rtc) + self.getAnyFee(rtt))
tempSBalanceMemmory.update({sender: tempSBalance})
tempRBalanceMemmory.update({receiver: tempRBalance})
tempDictBalanceMemmory.update({stt: tempSBalanceMemmory})
tempDictBalanceMemmory.update({rtt: tempRBalanceMemmory})
decodeTxmas.append(tran)
if jtran["TT"] == "AT":
currentSender = jtran["SENDER"]
# CHECK DOUBLE MONEY SEND
tempBalanceMemmory = tempDictBalanceMemmory.get("VINCI")
if tempBalanceMemmory is None:
tempBalanceMemmory = {}
tempBalance = self.redis.zscore("BALANCE:VINCI", currentSender)
else:
tempBalance = tempBalanceMemmory.get(currentSender)
if tempBalance is None:
tempBalance = self.redis.zscore("BALANCE:VINCI", currentSender)
if tempBalance is None:
print("1. GO TO HELL AT!")
self.redis.zadd("FAILED TRANSACTIONS", 1, VncTree.hash(jtran["SIGNATURE"]))
self.redis.zrem("RAW TRANSACTIONS", tran)
continue
if (tempBalance - self.getFreezeTokens(currentSender)) < self.freezeToAT:
self.redis.zrem("RAW TRANSACTIONS", tran)
self.redis.zadd("FAILED TRANSACTIONS", 2, VncTree.hash(jtran["SIGNATURE"]))
print("2. GO TO HELL AT!")
continue
else:
tempBalance = tempBalance - self.freezeToAT
tempBalanceMemmory.update({currentSender: tempBalance})
tempDictBalanceMemmory.update({"VINCI": tempBalanceMemmory})
decodeTxmas.append(tran)
if jtran["TT"] == "VT" or jtran["TT"] == "UFT" or jtran["TT"] == "DFT": # VOTE, UPFEE, DOWNFEE
currentSender = jtran["SENDER"]
# CHECK DOUBLE MONEY SEND
tempBalanceMemmory = tempDictBalanceMemmory.get("VINCI")
if tempBalanceMemmory is None:
tempBalanceMemmory = {}
tempBalance = self.redis.zscore("BALANCE:VINCI", currentSender)
else:
tempBalance = tempBalanceMemmory.get(currentSender)
if tempBalance is None:
tempBalance = self.redis.zscore("BALANCE:VINCI", currentSender)
if tempBalance is None:
print("1. GO TO HELL VT!")
self.redis.zadd("FAILED TRANSACTIONS", 1, VncTree.hash(jtran["SIGNATURE"]))
self.redis.zrem("RAW TRANSACTIONS", tran)
continue
if (tempBalance - self.getFreezeTokens(currentSender)) < self.freezeToVT*int(jtran["VOTES"]):
self.redis.zrem("RAW TRANSACTIONS", tran)
self.redis.zadd("FAILED TRANSACTIONS", 2, VncTree.hash(jtran["SIGNATURE"]))
print("2. GO TO HELL VT!")
continue
else:
if jtran["TT"] == "VT":
tempBalance = tempBalance - self.getAnyVoteFee(jtran["TT"])*int(jtran["VOTES"])
if jtran["TT"] == "UFT":
tempBalance = tempBalance - self.getAnyVoteFee(jtran["TT"])*int(jtran["VOTES"])
if jtran["TT"] == "DFT":
tempBalance = tempBalance - self.getAnyVoteFee(jtran["TT"])*int(jtran["VOTES"])
tempBalanceMemmory.update({currentSender: tempBalance})
tempDictBalanceMemmory.update({"VINCI": tempBalanceMemmory})
decodeTxmas.append(tran)
if jtran["TT"] == "ST":
currentSender = jtran["SENDER"]
# CHECK 3 TRAN FROM ONE USER
count = tranUserCount.get(currentSender)
if count is None:
tranUserCount.update({currentSender:1})
else:
if count >= self.MAX_TRAN_FOR_USER_IN_BLOCK:
continue
else:
tranUserCount.update({currentSender:(count + 1)})
# CHECK DOUBLE MONEY SEND
tempBalanceMemmory = tempDictBalanceMemmory.get(jtran["TTOKEN"])
if tempBalanceMemmory is None:
tempBalanceMemmory = {}
tempBalance = self.redis.zscore("BALANCE:" + jtran["TTOKEN"], currentSender)
else:
tempBalance = tempBalanceMemmory.get(currentSender)
if tempBalance is None:
tempBalance = self.redis.zscore("BALANCE:" + jtran["TTOKEN"], currentSender)
if tempBalance is None:
print("1. GO TO HELL!")
self.redis.zadd("FAILED TRANSACTIONS", 1, VncTree.hash(jtran["SIGNATURE"]))
self.redis.zrem("RAW TRANSACTIONS", tran)
continue
if (tempBalance - self.getFreezeTokens(currentSender)) < (float(jtran["CTOKEN"]) + self.fee):
self.redis.zrem("RAW TRANSACTIONS", tran)
self.redis.zadd("FAILED TRANSACTIONS", 2, VncTree.hash(jtran["SIGNATURE"]))
print("2. GO TO HELL!")
continue
else:
tempBalance = tempBalance - (float(jtran["CTOKEN"]) + self.fee)
tempBalanceMemmory.update({currentSender: tempBalance})
tempDictBalanceMemmory.update({jtran["TTOKEN"]: tempBalanceMemmory})
decodeTxmas.append(tran)
self.transactionsInPBlock = decodeTxmas
precommitBlock = JsonPackets.createPrecommitBlock(self.cryptor, self.version, self.transactionsInPBlock, blockchain_height, self.fee)
# CLEAR ZONE #
self.signatureMemory.clear()
# CLEAR ZONE #
self.precommitedBlock = precommitBlock
self.precommitedBlockHash = VncTree.hash(precommitBlock)
signature = json.loads(precommitBlock).get("SIGNATURE")
print("APPEND MY SIGNATURE!", signature)
self.signatureMemory.append(signature)
signNeedCount = len(set(self.signatureMemory))
if signNeedCount >= math.ceil((2 / 3) * self.nodesCount): # !!!ONLY WORK IN TESTNET WITH ONE NODE!!!
print("SIGN COMPLETE")
#self.transactionMemory = self.transactionMemory - self.transactionsInPBlock # ДОБАВИТЬ ПОИСК И УДАЛЕНИЕ
# CLEAR TRAN LIST #
self.sendPacket.emit(self.stemAddress, JsonPackets.createCommitBlock(self.cryptor, self.precommitedBlock, self.signatureMemory), 1)
self.transactionsInPBlock.clear()
self.signatureMemory.clear()
else:
print("SIGN NOT COMPLETE")
#print ("ZONE 2")
self.floodPacket.emit(precommitBlock, str(), 1)
return
if jpacket["TT"] == "PURIFIER":
#print("RECEIVE PACKET:", packet)
self.updateFee()
self.stemAddress = address
pymongo_cursor = self.mongo.vncchain.find().sort("BHEIGHT", pymongo.ASCENDING).limit(1)
blockchain_height = dict(pymongo_cursor).get("BHEIGHT")
if blockchain_height is None:
blockchain_height = 0
#blockchain_height = self.redis.zcard("VNCCHAIN")
tempList = jpacket["CLEAN_LIST"]
tempKeyList = jpacket["CLEAN_KEY_LIST"]
if len(tempList) != len(tempKeyList):
print("BAD PURIFIER BLOCK! STOP WORK!")
return
if blockchain_height == 0 or blockchain_height % self.blocks_in_genesis == 0:
self.mongo.vncchain.save(jpacket)
#self.redis.zadd("VNCCHAIN", jpacket["BHEIGHT"], json.dumps(jpacket, separators=(',', ':')))
NLIST = []
for temp in zip(tempList, tempKeyList):
NLIST.append({"ADDRESS": temp[0], "TYPE": "1", "PUBLICKEY": temp[1]})
self.redis.set("NODES LIST", json.dumps({"NLIST": NLIST}, separators=(',', ':')))
self.nodesCount = len(tempList)
self.setPurifyList.emit(tempList)
return
# if jpacket["TT"] == "BT":
# #print("RECEIVE BT")
# result = self.benchmark.benchmarkStart(jpacket["START"])
# packet = JsonPackets.myBenchmarkResult(self.cryptor, result, jpacket["START"])
# #print("SEND MBR")
# self.sendPacket.emit(address, packet, 1)
# return
# if jpacket["TT"] == "AT":
# print("RECEIVE PACKET:", packet)
# self.transactionMemory.add(packet)
# self.floodPacket.emit(packet, address, 0)
# return
# if jpacket["TT"] == "ST":
# #if not self.checkTranForBalance(jpacket):
# #return
# self.transactionMemory.add(packet)
# self.floodPacket.emit(packet, address, 0)
# return
def getFreezeTokens(self, wallet, token_type = "VINCI"):
freezeForVotes = self.redis.zscore("UNTVOTES", wallet)
if freezeForVotes is None:
freezeForVotes = 0
freezeForVotes = freezeForVotes*self.freezeToVT
if self.redis.zscore("APPLICANTS", wallet) is None:
freezeForApplicant = 0
else:
freezeForApplicant = self.freezeToAT
return freezeForApplicant + freezeForVotes
def garbageCollector(self):
bheight = self.mongo.vncchain.find().count()
if bheight is None:
bheight = 0
endBlock = bheight - 12*60
if endBlock < 0:
endBlock = 0
oldTxs = self.redis.zrangebyscore("COMPLETE TRANSACTIONS", '0', str(endBlock))
for tran in oldTxs:
print("SEARCH:", "TRANSACTIONS:" + tran.decode())
tranbody = self.redis.get("TRANSACTIONS:" + tran.decode())
if tranbody is None:
continue
jtranbody = json.loads(tranbody)
jtranbody.update({"_id": tran.decode()})
self.mongo.complete.save(jtranbody)
self.redis.delete("TRANSACTIONS:" + tran.decode())
self.redis.zrem("COMPLETE TRANSACTIONS", tran)
oldFTxs = self.redis.zrange("FAILED TRANSACTIONS", 0, -1)
for ftran in oldFTxs[0:int(len(oldFTxs)/2)]:
self.redis.zrem("FAILED TRANSACTIONS", ftran)
return True
def updateFee(self):
#bheight = self.redis.zcard("VNCCHAIN")
bheight = self.mongo.vncchain.find().count()
if bheight is None:
bheight = 0
TrXcount = len(self.redis.zrange("COMPLETE TRANSACTIONS", bheight - 1000, bheight))/1000
if bheight > self.freeZoneHeight:
if TrXcount >= 10000:
self.fee = 0.0001
if TrXcount >= 5000 or TrXcount < 10000:
self.fee = 0.001
if TrXcount >= 3000 or TrXcount < 5000:
self.fee = 0.003
if TrXcount >= 100 or TrXcount < 3000:
self.fee = 0.005
if TrXcount < 100:
self.fee = 0.01
print("CURRENT FEE IN THIS TIME:", self.fee)
return
def beApplicant(self):
if self.stemAddress is None:
return False
packet = JsonPackets.wantBeApplicant(self.cryptor)
#print ("SEND BEAPP", self.stemAddress)
self.sendPacket.emit(self.stemAddress, packet, 1)
def checkTranForBalance(self, jtran:dict):
sender = jtran["SENDER"]
type = jtran["TTOKEN"]
count = jtran["CTOKEN"]
walletBalance = self.BALANCE.get(sender)
if count < 0:
return False
if type != self.currentTokenType:
return False
if walletBalance is None:
return False
if walletBalance < count:
return False
return True
def checkTranForBalanceAttrs(self, sender, type, count):
walletBalance = self.BALANCE.get(sender)
if count < 0:
return False
if type != self.currentTokenType:
return False
if walletBalance is None:
return False
if walletBalance < count:
return False
return True
def sliceFloat(self, money:float):
return "{0:.8f}".format(money)
def balanceMainWorkRedis(self, bchain:list):
for jblock in bchain:
#jblock = json.loads(block)
if jblock['TT'] == "BL":
trans = jblock['TRANSACTIONS']
if self.redis.zscore("BALANCE:VINCI", jblock['SENDER']) is None:
self.redis.zadd("BALANCE:VINCI", 0, jblock['SENDER'])
blockSenderBalance = self.redis.zscore("BALANCE:VINCI", jblock['SENDER'])
blockSenderBalance += self.blockReward
print("BALANCE_BHEIGHT", jblock['BHEIGHT'])
print("FREEZZE:", self.getFreezeTokens(jblock['SENDER']))
print("ADDING BALANCE VINCI TO VALIDATOR:", blockSenderBalance - self.getFreezeTokens(jblock['SENDER']), jblock['SENDER'])
self.redis.zadd("BALANCE:VINCI", blockSenderBalance, jblock['SENDER'])
for tran in trans:
if tran["TT"] == "ST" and self.currentTokenTypes.count(tran["TTOKEN"]) and float(tran["CTOKEN"]) > 0:
tokenPrefix = tran["TTOKEN"]
if tran['SENDER'] == tran['RECEIVER']:
continue
if self.redis.zscore("BALANCE:" + tokenPrefix, tran['SENDER']) is None:
self.redis.zadd("BALANCE:" + tokenPrefix, 0, tran['SENDER'])
if self.redis.zscore("BALANCE:" + tokenPrefix, tran['RECEIVER']) is None:
self.redis.zadd("BALANCE:" + tokenPrefix, 0, tran['RECEIVER'])
senderBalance = self.redis.zscore("BALANCE:" + tokenPrefix, tran['SENDER'])
receiverBalance = self.redis.zscore("BALANCE:" + tokenPrefix, tran["RECEIVER"])
senderBalance = senderBalance - float(tran["CTOKEN"])
receiverBalance = receiverBalance + float(tran["CTOKEN"])
self.redis.zadd("BALANCE:" + tokenPrefix, self.sliceFloat(senderBalance), tran["SENDER"])
self.redis.zadd("BALANCE:" + tokenPrefix, self.sliceFloat(receiverBalance), tran["RECEIVER"])
if tran["TT"] == "ET" and self.currentTokenTypes.count(tran["STT"]) and self.currentTokenTypes.count(tran["RTT"]) and float(tran["STC"]) > 0 and float(tran["RTC"]) > 0:
tokenSenderPrefix = tran["STT"]
tokenReceiverPrefix = tran["RTT"]
if tran['SENDER'] == tran['RECEIVER']:
continue
#MONEY SEND
if self.redis.zscore("BALANCE:" + tokenSenderPrefix, tran['SENDER']) is None:
self.redis.zadd("BALANCE:" + tokenSenderPrefix, 0, tran['SENDER'])
if self.redis.zscore("BALANCE:" + tokenSenderPrefix, tran['RECEIVER']) is None:
self.redis.zadd("BALANCE:" + tokenSenderPrefix, 0, tran['RECEIVER'])
senderBalance = self.redis.zscore("BALANCE:" + tokenSenderPrefix, tran['SENDER'])
receiverBalance = self.redis.zscore("BALANCE:" + tokenSenderPrefix, tran["RECEIVER"])
senderBalance = senderBalance - float(tran["STC"])
receiverBalance = receiverBalance + float(tran["STC"])
self.redis.zadd("BALANCE:" + tokenSenderPrefix, self.sliceFloat(senderBalance), tran["SENDER"])
self.redis.zadd("BALANCE:" + tokenSenderPrefix, self.sliceFloat(receiverBalance), tran["RECEIVER"])
#GOODS SEND
if self.redis.zscore("BALANCE:" + tokenReceiverPrefix, tran['RECEIVER']) is None:
self.redis.zadd("BALANCE:" + tokenReceiverPrefix, 0, tran['RECEIVER'])
if self.redis.zscore("BALANCE:" + tokenReceiverPrefix, tran['SENDER']) is None:
self.redis.zadd("BALANCE:" + tokenReceiverPrefix, 0, tran['SENDER'])
receiverBalance = self.redis.zscore("BALANCE:" + tokenReceiverPrefix, tran['RECEIVER'])
senderBalance = self.redis.zscore("BALANCE:" + tokenReceiverPrefix, tran["SENDER"])
receiverBalance = receiverBalance - float(tran["RTC"])
senderBalance = senderBalance + float(tran["RTC"])
self.redis.zadd("BALANCE:" + tokenReceiverPrefix, self.sliceFloat(receiverBalance), tran["RECEIVER"])
self.redis.zadd("BALANCE:" + tokenReceiverPrefix, self.sliceFloat(senderBalance), tran["SENDER"])
def checkBalance(self, updateLen = -1):
#bheight = self.redis.zcard("VNCCHAIN")
#pymongo_cursor = self.mongo.vncchain.find().sort("BHEIGHT", pymongo.ASCENDING).limit(1)
#bheight = dict(pymongo_cursor).get("BHEIGHT")
bheight = self.mongo.vncchain.find().count()
if updateLen == -1:
print("START:", self.lastCheckBlockBalance, "STOP:", bheight)
bchain = list(self.mongo.vncchain.find({"BHEIGHT": {"$gte": int(self.lastCheckBlockBalance)}}))
print("BCH", bchain)
#bchain = self.redis.zrange("VNCCHAIN", self.lastCheckBlockBalance, bheight)
self.lastCheckBlockBalance = bheight
self.balanceMainWorkRedis(bchain)
else: ### STEP MODE
step = self.lastCheckBlockBalance
while step < bheight:
nextStep = step + self.updateBalanceStep
if nextStep > bheight:
nextStep = bheight
#bchain = self.redis.zrange("VNCCHAIN", step, nextStep)
bchain = list(self.mongo.vncchain.find([{"BHEIGHT":{"$gt":step}},{"BHEIGHT":{"$lt":nextStep}}]))
step = nextStep
self.balanceMainWorkRedis(bchain)
self.lastCheckBlockBalance = nextStep
return
def run(self):
while True:
self.stackMutex.lock()
if len(self.packetStack):
packet = self.packetStack.pop(0)
else:
self.stackMutex.unlock()
continue
self.stackMutex.unlock()
address = packet[0]
packet = packet[1]
#if self.signChecker.checkTran(packet): // Проверка подписи!
self.handler(address, packet)
if __name__ == '__main__':
app = QCoreApplication(sys.argv)
import socket # To Retrieve our IP address for Test-net purposes
privateKey = "YOUR PRIVATE NODE KEY"
# for arg in sys.argv:
# if arg == "--privateKey":
# if (len(sys.argv) >= sys.argv.index(arg)):
# privateKey = sys.argv[sys.argv.index(arg) + 1]
# else:
# print("Value after --privateKey not found!")
# exit(1)
#
# if privateKey == None:
# print("Private key not found!")
# exit(2)
slave = slaveWorker(socket.gethostbyname(socket.gethostname()), privateKey)
netEngine = NetEngine()
app.aboutToQuit.connect(netEngine.onAboutToQuit)
netEngine.newDataPacket.connect(slave.appendPacketToStack)
slave.floodPacket.connect(netEngine.floodPacketSignal)
slave.sendPacket.connect(netEngine.sendPacketSignal)
slave.setPurifyList.connect(netEngine.setRemoteAddresses)
garbageCollectorTimer = QTimer()
garbageCollectorTimer.setInterval(slave.timeToGarbageCollector)
garbageCollectorTimer.timeout.connect(slave.garbageCollector)
garbageCollectorTimer.start()
slave.start()
netEngine.runReceiver.emit("0.0.0.0")
sys.exit(app.exec())
| 45.475891 | 442 | 0.523995 |
675e1044ca461ac0fedced82a66a52723f2ccf86 | 187 | py | Python | scripts/field/lightning_tuto_5_0.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 54 | 2019-04-16T23:24:48.000Z | 2021-12-18T11:41:50.000Z | scripts/field/lightning_tuto_5_0.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 3 | 2019-05-19T15:19:41.000Z | 2020-04-27T16:29:16.000Z | scripts/field/lightning_tuto_5_0.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 49 | 2020-11-25T23:29:16.000Z | 2022-03-26T16:20:24.000Z | # Created by MechAviv
# ID :: [927020090]
# Hidden Street : Black Mage's Temple
sm.reservedEffect("Effect/Direction8.img/lightningTutorial/Scene1")
sm.sendDelay(9000)
sm.warp(927020070) | 23.375 | 67 | 0.770053 |
86b8c0d968a15df70bfd8acdf5740a5c2a47bca2 | 1,221 | py | Python | profiles/migrations/0001_initial.py | jgarber623/openstates.org | 0c514c955f7ffbe079c77c3ec00345b20818ad04 | [
"MIT"
] | null | null | null | profiles/migrations/0001_initial.py | jgarber623/openstates.org | 0c514c955f7ffbe079c77c3ec00345b20818ad04 | [
"MIT"
] | null | null | null | profiles/migrations/0001_initial.py | jgarber623/openstates.org | 0c514c955f7ffbe079c77c3ec00345b20818ad04 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.3 on 2020-01-21 17:52
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]
operations = [
migrations.CreateModel(
name="Profile",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("organization_name", models.CharField(max_length=100)),
("about", models.TextField()),
("feature_subscriptions", models.BooleanField(default=False)),
(
"user",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="profile",
to=settings.AUTH_USER_MODEL,
),
),
],
)
]
| 29.780488 | 78 | 0.473382 |
86d9bfe78a22d32d7cf7a24d5e04a0af0f52e606 | 515 | py | Python | ephyviewer/tests/test_eventlist.py | jpgill86/ephyviewer | 490c43c573c4d3473de6a9f3206eca2682e86816 | [
"MIT"
] | 30 | 2017-10-02T13:09:15.000Z | 2022-03-07T10:06:40.000Z | ephyviewer/tests/test_eventlist.py | samuelgarcia/ephyviewer | 469770eb7b5840523102b72e62f2d4fbe2bcc2ca | [
"MIT"
] | 106 | 2018-08-05T17:50:14.000Z | 2022-03-31T18:03:07.000Z | ephyviewer/tests/test_eventlist.py | samuelgarcia/ephyviewer | 469770eb7b5840523102b72e62f2d4fbe2bcc2ca | [
"MIT"
] | 6 | 2017-08-23T15:38:41.000Z | 2020-12-14T20:28:55.000Z | import ephyviewer
from ephyviewer.tests.testing_tools import make_fake_event_source
def test_eventlist(interactive=False):
source = make_fake_event_source()
app = ephyviewer.mkQApp()
view = ephyviewer.EventList(source=source, name='events')
win = ephyviewer.MainViewer(debug=True)
win.add_view(view)
if interactive:
win.show()
app.exec_()
else:
# close thread properly
win.close()
if __name__=='__main__':
test_eventlist(interactive=True)
| 18.392857 | 66 | 0.687379 |
4daaaf02a5bae5fcf78cb40387cec651ad253696 | 1,663 | py | Python | src/openprocurement/tender/openua/views/award_complaint.py | ProzorroUKR/openprocurement.api | 2855a99aa8738fb832ee0dbad4e9590bd3643511 | [
"Apache-2.0"
] | 10 | 2020-02-18T01:56:21.000Z | 2022-03-28T00:32:57.000Z | src/openprocurement/tender/openua/views/award_complaint.py | quintagroup/openprocurement.api | 2855a99aa8738fb832ee0dbad4e9590bd3643511 | [
"Apache-2.0"
] | 26 | 2018-07-16T09:30:44.000Z | 2021-02-02T17:51:30.000Z | src/openprocurement/tender/openua/views/award_complaint.py | ProzorroUKR/openprocurement.api | 2855a99aa8738fb832ee0dbad4e9590bd3643511 | [
"Apache-2.0"
] | 15 | 2019-08-08T10:50:47.000Z | 2022-02-05T14:13:36.000Z | # -*- coding: utf-8 -*-
from openprocurement.tender.core.views.award_complaint import (
BaseTenderAwardComplaintResource,
BaseTenderAwardClaimResource
)
from openprocurement.tender.core.views.complaint import (
BaseComplaintGetResource,
)
from openprocurement.tender.core.utils import optendersresource
@optendersresource(
name="aboveThresholdUA:Tender Award Complaints Get",
collection_path="/tenders/{tender_id}/awards/{award_id}/complaints",
path="/tenders/{tender_id}/awards/{award_id}/complaints/{complaint_id}",
procurementMethodType="aboveThresholdUA",
request_method=["GET"],
description="Tender award complaints get",
)
class TenderUAAwardComplaintGetResource(BaseComplaintGetResource):
""""""
@optendersresource(
name="aboveThresholdUA:Tender Award Complaints",
collection_path="/tenders/{tender_id}/awards/{award_id}/complaints",
path="/tenders/{tender_id}/awards/{award_id}/complaints/{complaint_id}",
procurementMethodType="aboveThresholdUA",
request_method=["POST", "PATCH"],
complaintType="complaint",
description="Tender award complaints",
)
class TenderUAAwardComplaintResource(BaseTenderAwardComplaintResource):
""" """
@optendersresource(
name="aboveThresholdUA:Tender Award Claims",
collection_path="/tenders/{tender_id}/awards/{award_id}/complaints",
path="/tenders/{tender_id}/awards/{award_id}/complaints/{complaint_id}",
procurementMethodType="aboveThresholdUA",
request_method=["POST", "PATCH"],
complaintType="claim",
description="Tender award claims",
)
class TenderUAAwardClaimResource(BaseTenderAwardClaimResource):
""" """
| 34.645833 | 76 | 0.756464 |
5e92a878a8a2b6a2e5faa17cd19db2a31d8cd293 | 1,198 | py | Python | google/ads/googleads/v6/enums/types/user_list_prepopulation_status.py | wxxlouisa/google-ads-python | f24137966f6bfcb765a9b1fae79f2d23041825fe | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v6/enums/types/user_list_prepopulation_status.py | wxxlouisa/google-ads-python | f24137966f6bfcb765a9b1fae79f2d23041825fe | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v6/enums/types/user_list_prepopulation_status.py | wxxlouisa/google-ads-python | f24137966f6bfcb765a9b1fae79f2d23041825fe | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v6.enums",
marshal="google.ads.googleads.v6",
manifest={"UserListPrepopulationStatusEnum",},
)
class UserListPrepopulationStatusEnum(proto.Message):
r"""Indicates status of prepopulation based on the rule."""
class UserListPrepopulationStatus(proto.Enum):
r"""Enum describing possible user list prepopulation status."""
UNSPECIFIED = 0
UNKNOWN = 1
REQUESTED = 2
FINISHED = 3
FAILED = 4
__all__ = tuple(sorted(__protobuf__.manifest))
| 29.219512 | 74 | 0.716194 |
faa74f641da02dbdd7c833c7514b00ed9c514a52 | 7,816 | py | Python | adam/config_manager.py | moeyensj/adam_home | 7dbe661ed9a04e9621ec4f5c9a0a9682cc37c227 | [
"MIT"
] | 9 | 2019-05-24T06:01:18.000Z | 2020-12-12T22:05:38.000Z | adam/config_manager.py | moeyensj/adam_home | 7dbe661ed9a04e9621ec4f5c9a0a9682cc37c227 | [
"MIT"
] | 90 | 2018-11-09T22:33:17.000Z | 2021-04-28T05:04:09.000Z | adam/config_manager.py | moeyensj/adam_home | 7dbe661ed9a04e9621ec4f5c9a0a9682cc37c227 | [
"MIT"
] | 7 | 2019-03-22T04:41:31.000Z | 2021-03-31T23:29:33.000Z | """ADAM configuration manager"""
import os
import os.path
import xdg.BaseDirectory as xdgb
import yaml
# filename of the config file (w/o the full path)
ADAM_CONFIG_FN = "config"
def _load_raw_config(config_file=None):
"""Load ADAM config from default locations or ``config_file``
Locates and loads the configuration information for ADAM. If
``config_file`` is not None, loads it and returns the de-serialized
YAML. If ``config_file`` is None, follows the XDG Base Directory
specification to locate a file named ``$.../adam/config``, usually
``~/.config/adam/config``.
Parameters
----------
config_file : str
Path to config file to load, or None to search default locations.
Returns
-------
str
The filename of the config
dict
De-serialized configuration in the form of nested dictionaries.
"""
if config_file is None:
# see if location is overridden via the environment
config_file = os.environ.get("ADAM_CONFIG", None)
if config_file is None:
# get the default location (if exists)
config_dir = next(xdgb.load_config_paths("adam"), None)
if config_dir is not None:
def_config_file = os.path.join(config_dir, ADAM_CONFIG_FN)
if os.path.exists(def_config_file):
config_file = def_config_file
if config_file is None:
return "", {'envs': {}}
# Load the config file (if we have it)
with open(config_file) as fp:
return config_file, yaml.safe_load(fp)
def _store_raw_config(data, config_file=None):
"""Save ADAM config to default location or ``config_file``
Saves the configuration in ``data`` (nested dicts) to ``config_file``
(if given) or to the default user-writable location given by the
XDG Base Directory specification (typically ``~/.config/adam/config``).
If the file already exists, atomically replaces it with the new data.
Permissions on the saved file are set to 0o0600.
Parameters
----------
data : dict
Configuration data to save
config_file : str
Path to config file to save, or None to save to default location.
"""
# get the place to write to from the environment
if config_file is None:
# see if location is overridden via the environment
config_file = os.environ.get("ADAM_CONFIG", None)
# get place to write from XDG spec
if config_file is None:
config_dir = xdgb.save_config_path("adam")
config_file = os.path.join(config_dir, ADAM_CONFIG_FN)
# atomically replace the old file (if any) with the new one
# also ensure permissions are restrictive (since this file holds secrets)
config_file_tmp = config_file + "." + str(os.getpid())
fd = os.open(config_file_tmp, os.O_CREAT | os.O_WRONLY, mode=0o600)
with open(fd, "w") as fp:
yaml.dump(data, fp, indent=2)
try:
os.rename(config_file_tmp, config_file)
except WindowsError:
os.remove(config_file)
os.rename(config_file_tmp, config_file)
return config_file
class ConfigManager(object):
"""Configuration object for ADAM client
A dict-like object holding the loaded ADAM configuration file.
Individual items can be get/set/deleted via the ``[]`` notation.
The keys must be fully-qualified dot-separated names, such as::
conf["envs.dev.workspace"] = " .... "
When a key does not refer to a leaf node, returns a nested dict
of the key's children, i.e.::
conf["envs.dev"] = " .... "
returns ``dict(url=..., workspace=..., token=....)``.
"""
def __init__(self, file_name=None, raw_config=None):
"""Load the ADAM configuration
Loads ADAM configuration from ``file_name``, or default config file
if ``file_name==None``. If ``raw_config`` is given, uses its
contents to load the configuration (``file_name`` is ignored in that
case). The typical use is to instantiate this class w.
``file_name`` and ``raw_config`` set to None (i.e., read from the
default config file).
Parameters
----------
file_name : str
Path to config file to load, or None to search default locations.
raw_config : dict
dict() of values to interpret as configuration data.
"""
if raw_config is not None:
self._source_filename, self._dest_filename, self._config = "", "", raw_config
else:
self._source_filename, self._config = _load_raw_config(file_name)
self._dest_filename = file_name
def __delitem__(self, key):
*parents, key = key.split('.')
c = self._config
for k in parents:
c = c[k]
del c[key]
def __getitem__(self, key):
c = self._config
for k in key.split('.'):
c = c[k]
return c
def __setitem__(self, key, value):
*parents, key = key.split('.')
cfg = self._config
for k in parents:
try:
cfg = cfg[k]
except KeyError:
cfg = cfg[k] = {}
cfg[key] = value
def get_config(self, environment=None):
"""Get configuration of an ADAM environment
If ``environment`` is given, equivalent to calling::
self[f"envs.{environment}"]
If ``environment`` is None, and ``self["default_env"]`` is set,
equivalent to calling:
self[f"envs.{self['default_env']}"]
If ``environment`` is None, and ``self["default_env"]`` is not set,
returns the first key in the ``self[envs]`` dict.
Parameters
----------
environment : str
environment name (e.g., ``prod`` or ``dev``)
Raises
------
KeyError
If the requested environment isn't found.
"""
# raises KeyError if environment not present, or
# a default environment is requested but not set
if environment is None:
# explicit default environment
environment = self._config.get('default_env', None)
if environment is None:
# first environment listed in the file
environment = next(iter(self._config['envs'].keys()))
return self._config['envs'][environment]
def set_config(self, name, cfg):
"""Set configuration of an ADAM environment
Equivalent to calling::
self[f"envs.{name}"] = cfg
Parameters
----------
name : str
environment name (e.g., ``prod`` or ``dev``)
cfg : dict
environment data
"""
if 'envs' not in self._config:
self._config['envs'] = {}
self._config['envs'][name] = cfg
def get_default_env(self):
return self._config.get('default_env', None)
def set_default_env(self, env_name):
if env_name in self._config['envs'].keys():
self._config['default_env'] = env_name
self.to_file()
def to_file(self, file_name=None):
"""Save configuration to ``file_name`` or the default location
Saves to location proscribed by XDG spec (typically ``~/.config/adam/config``)
or to ``file_name``, if it's not set to ``None``.
Parameters
----------
file_name : str
Path to config file to save, or None to save to default location.
"""
if file_name is None:
file_name = self._dest_filename
_store_raw_config(self._config, file_name)
def __str__(self):
ret = "# original config source: {}\n".format(self._source_filename)
ret += yaml.dump(self._config, indent=2)
return ret
| 31.516129 | 89 | 0.606704 |
81ead31f53ab2b94397401bdfca2895b5461cb84 | 62,930 | py | Python | venv/Lib/site-packages/h5py/tests/test_dataset.py | masterrey/SmartMachines | e48aff314b1171a13a39c3a41230d900bf090a1f | [
"Apache-2.0"
] | null | null | null | venv/Lib/site-packages/h5py/tests/test_dataset.py | masterrey/SmartMachines | e48aff314b1171a13a39c3a41230d900bf090a1f | [
"Apache-2.0"
] | null | null | null | venv/Lib/site-packages/h5py/tests/test_dataset.py | masterrey/SmartMachines | e48aff314b1171a13a39c3a41230d900bf090a1f | [
"Apache-2.0"
] | null | null | null | # This file is part of h5py, a Python interface to the HDF5 library.
#
# http://www.h5py.org
#
# Copyright 2008-2013 Andrew Collette and contributors
#
# License: Standard 3-clause BSD; see "license.txt" for full license terms
# and contributor agreement.
"""
Dataset testing operations.
Tests all dataset operations, including creation, with the exception of:
1. Slicing operations for read and write, handled by module test_slicing
2. Type conversion for read and write (currently untested)
"""
import pathlib
import sys
import numpy as np
import platform
import pytest
from .common import ut, TestCase
from .data_files import get_data_file_path
from h5py import File, Group, Dataset
from h5py._hl.base import is_empty_dataspace
from h5py import h5f, h5t
import h5py
import h5py._hl.selections as sel
class BaseDataset(TestCase):
def setUp(self):
self.f = File(self.mktemp(), 'w')
def tearDown(self):
if self.f:
self.f.close()
class TestRepr(BaseDataset):
"""
Feature: repr(Dataset) behaves sensibly
"""
def test_repr_open(self):
""" repr() works on live and dead datasets """
ds = self.f.create_dataset('foo', (4,))
self.assertIsInstance(repr(ds), str)
self.f.close()
self.assertIsInstance(repr(ds), str)
class TestCreateShape(BaseDataset):
"""
Feature: Datasets can be created from a shape only
"""
def test_create_scalar(self):
""" Create a scalar dataset """
dset = self.f.create_dataset('foo', ())
self.assertEqual(dset.shape, ())
def test_create_simple(self):
""" Create a size-1 dataset """
dset = self.f.create_dataset('foo', (1,))
self.assertEqual(dset.shape, (1,))
def test_create_integer(self):
""" Create a size-1 dataset with integer shape"""
dset = self.f.create_dataset('foo', 1)
self.assertEqual(dset.shape, (1,))
def test_create_extended(self):
""" Create an extended dataset """
dset = self.f.create_dataset('foo', (63,))
self.assertEqual(dset.shape, (63,))
self.assertEqual(dset.size, 63)
dset = self.f.create_dataset('bar', (6, 10))
self.assertEqual(dset.shape, (6, 10))
self.assertEqual(dset.size, (60))
def test_create_integer_extended(self):
""" Create an extended dataset """
dset = self.f.create_dataset('foo', 63)
self.assertEqual(dset.shape, (63,))
self.assertEqual(dset.size, 63)
dset = self.f.create_dataset('bar', (6, 10))
self.assertEqual(dset.shape, (6, 10))
self.assertEqual(dset.size, (60))
def test_default_dtype(self):
""" Confirm that the default dtype is float """
dset = self.f.create_dataset('foo', (63,))
self.assertEqual(dset.dtype, np.dtype('=f4'))
def test_missing_shape(self):
""" Missing shape raises TypeError """
with self.assertRaises(TypeError):
self.f.create_dataset('foo')
def test_long_double(self):
""" Confirm that the default dtype is float """
dset = self.f.create_dataset('foo', (63,), dtype=np.longdouble)
if platform.machine() in ['ppc64le']:
pytest.xfail("Storage of long double deactivated on %s" % platform.machine())
self.assertEqual(dset.dtype, np.longdouble)
@ut.skipIf(not hasattr(np, "complex256"), "No support for complex256")
def test_complex256(self):
""" Confirm that the default dtype is float """
dset = self.f.create_dataset('foo', (63,),
dtype=np.dtype('complex256'))
self.assertEqual(dset.dtype, np.dtype('complex256'))
def test_name_bytes(self):
dset = self.f.create_dataset(b'foo', (1,))
self.assertEqual(dset.shape, (1,))
dset2 = self.f.create_dataset(b'bar/baz', (2,))
self.assertEqual(dset2.shape, (2,))
class TestCreateData(BaseDataset):
"""
Feature: Datasets can be created from existing data
"""
def test_create_scalar(self):
""" Create a scalar dataset from existing array """
data = np.ones((), 'f')
dset = self.f.create_dataset('foo', data=data)
self.assertEqual(dset.shape, data.shape)
def test_create_extended(self):
""" Create an extended dataset from existing data """
data = np.ones((63,), 'f')
dset = self.f.create_dataset('foo', data=data)
self.assertEqual(dset.shape, data.shape)
def test_dataset_intermediate_group(self):
""" Create dataset with missing intermediate groups """
ds = self.f.create_dataset("/foo/bar/baz", shape=(10, 10), dtype='<i4')
self.assertIsInstance(ds, h5py.Dataset)
self.assertTrue("/foo/bar/baz" in self.f)
def test_reshape(self):
""" Create from existing data, and make it fit a new shape """
data = np.arange(30, dtype='f')
dset = self.f.create_dataset('foo', shape=(10, 3), data=data)
self.assertEqual(dset.shape, (10, 3))
self.assertArrayEqual(dset[...], data.reshape((10, 3)))
def test_appropriate_low_level_id(self):
" Binding Dataset to a non-DatasetID identifier fails with ValueError "
with self.assertRaises(ValueError):
Dataset(self.f['/'].id)
def check_h5_string(self, dset, cset, length):
tid = dset.id.get_type()
assert isinstance(tid, h5t.TypeStringID)
assert tid.get_cset() == cset
if length is None:
assert tid.is_variable_str()
else:
assert not tid.is_variable_str()
assert tid.get_size() == length
def test_create_bytestring(self):
""" Creating dataset with byte string yields vlen ASCII dataset """
def check_vlen_ascii(dset):
self.check_h5_string(dset, h5t.CSET_ASCII, length=None)
check_vlen_ascii(self.f.create_dataset('a', data=b'abc'))
check_vlen_ascii(self.f.create_dataset('b', data=[b'abc', b'def']))
check_vlen_ascii(self.f.create_dataset('c', data=[[b'abc'], [b'def']]))
check_vlen_ascii(self.f.create_dataset(
'd', data=np.array([b'abc', b'def'], dtype=object)
))
def test_create_np_s(self):
dset = self.f.create_dataset('a', data=np.array([b'abc', b'def'], dtype='S3'))
self.check_h5_string(dset, h5t.CSET_ASCII, length=3)
def test_create_strings(self):
def check_vlen_utf8(dset):
self.check_h5_string(dset, h5t.CSET_UTF8, length=None)
check_vlen_utf8(self.f.create_dataset('a', data='abc'))
check_vlen_utf8(self.f.create_dataset('b', data=['abc', 'def']))
check_vlen_utf8(self.f.create_dataset('c', data=[['abc'], ['def']]))
check_vlen_utf8(self.f.create_dataset(
'd', data=np.array(['abc', 'def'], dtype=object)
))
def test_create_np_u(self):
with self.assertRaises(TypeError):
self.f.create_dataset('a', data=np.array([b'abc', b'def'], dtype='U3'))
def test_empty_create_via_None_shape(self):
self.f.create_dataset('foo', dtype='f')
self.assertTrue(is_empty_dataspace(self.f['foo'].id))
def test_empty_create_via_Empty_class(self):
self.f.create_dataset('foo', data=h5py.Empty(dtype='f'))
self.assertTrue(is_empty_dataspace(self.f['foo'].id))
def test_create_incompatible_data(self):
# Shape tuple is incompatible with data
with self.assertRaises(ValueError):
self.f.create_dataset('bar', shape=4, data= np.arange(3))
class TestReadDirectly:
"""
Feature: Read data directly from Dataset into a Numpy array
"""
@pytest.mark.parametrize(
'source_shape,dest_shape,source_sel,dest_sel',
[
((100,), (100,), np.s_[0:10], np.s_[50:60]),
((70,), (100,), np.s_[50:60], np.s_[90:]),
((30, 10), (20, 20), np.s_[:20, :], np.s_[:, :10]),
((5, 7, 9), (6,), np.s_[2, :6, 3], np.s_[:]),
])
def test_read_direct(self, writable_file, source_shape, dest_shape, source_sel, dest_sel):
source_values = np.arange(np.product(source_shape), dtype="int64").reshape(source_shape)
dset = writable_file.create_dataset("dset", source_shape, data=source_values)
arr = np.full(dest_shape, -1, dtype="int64")
expected = arr.copy()
expected[dest_sel] = source_values[source_sel]
dset.read_direct(arr, source_sel, dest_sel)
np.testing.assert_array_equal(arr, expected)
def test_no_sel(self, writable_file):
dset = writable_file.create_dataset("dset", (10,), data=np.arange(10, dtype="int64"))
arr = np.ones((10,), dtype="int64")
dset.read_direct(arr)
np.testing.assert_array_equal(arr, np.arange(10, dtype="int64"))
def test_empty(self, writable_file):
empty_dset = writable_file.create_dataset("edset", dtype='int64')
arr = np.ones((100,), 'int64')
with pytest.raises(TypeError):
empty_dset.read_direct(arr, np.s_[0:10], np.s_[50:60])
def test_wrong_shape(self, writable_file):
dset = writable_file.create_dataset("dset", (100,), dtype='int64')
arr = np.ones((200,))
with pytest.raises(TypeError):
dset.read_direct(arr)
def test_not_c_contiguous(self, writable_file):
dset = writable_file.create_dataset("dset", (10, 10), dtype='int64')
arr = np.ones((10, 10), order='F')
with pytest.raises(TypeError):
dset.read_direct(arr)
class TestWriteDirectly:
"""
Feature: Write Numpy array directly into Dataset
"""
@pytest.mark.parametrize(
'source_shape,dest_shape,source_sel,dest_sel',
[
((100,), (100,), np.s_[0:10], np.s_[50:60]),
((70,), (100,), np.s_[50:60], np.s_[90:]),
((30, 10), (20, 20), np.s_[:20, :], np.s_[:, :10]),
((5, 7, 9), (6,), np.s_[2, :6, 3], np.s_[:]),
])
def test_write_direct(self, writable_file, source_shape, dest_shape, source_sel, dest_sel):
dset = writable_file.create_dataset('dset', dest_shape, dtype='int32', fillvalue=-1)
arr = np.arange(np.product(source_shape)).reshape(source_shape)
expected = np.full(dest_shape, -1, dtype='int32')
expected[dest_sel] = arr[source_sel]
dset.write_direct(arr, source_sel, dest_sel)
np.testing.assert_array_equal(dset[:], expected)
def test_empty(self, writable_file):
empty_dset = writable_file.create_dataset("edset", dtype='int64')
with pytest.raises(TypeError):
empty_dset.write_direct(np.ones((100,)), np.s_[0:10], np.s_[50:60])
def test_wrong_shape(self, writable_file):
dset = writable_file.create_dataset("dset", (100,), dtype='int64')
arr = np.ones((200,))
with pytest.raises(TypeError):
dset.write_direct(arr)
def test_not_c_contiguous(self, writable_file):
dset = writable_file.create_dataset("dset", (10, 10), dtype='int64')
arr = np.ones((10, 10), order='F')
with pytest.raises(TypeError):
dset.write_direct(arr)
class TestCreateRequire(BaseDataset):
"""
Feature: Datasets can be created only if they don't exist in the file
"""
def test_create(self):
""" Create new dataset with no conflicts """
dset = self.f.require_dataset('foo', (10, 3), 'f')
self.assertIsInstance(dset, Dataset)
self.assertEqual(dset.shape, (10, 3))
def test_create_existing(self):
""" require_dataset yields existing dataset """
dset = self.f.require_dataset('foo', (10, 3), 'f')
dset2 = self.f.require_dataset('foo', (10, 3), 'f')
self.assertEqual(dset, dset2)
def test_create_1D(self):
""" require_dataset with integer shape yields existing dataset"""
dset = self.f.require_dataset('foo', 10, 'f')
dset2 = self.f.require_dataset('foo', 10, 'f')
self.assertEqual(dset, dset2)
dset = self.f.require_dataset('bar', (10,), 'f')
dset2 = self.f.require_dataset('bar', 10, 'f')
self.assertEqual(dset, dset2)
dset = self.f.require_dataset('baz', 10, 'f')
dset2 = self.f.require_dataset(b'baz', (10,), 'f')
self.assertEqual(dset, dset2)
def test_shape_conflict(self):
""" require_dataset with shape conflict yields TypeError """
self.f.create_dataset('foo', (10, 3), 'f')
with self.assertRaises(TypeError):
self.f.require_dataset('foo', (10, 4), 'f')
def test_type_conflict(self):
""" require_dataset with object type conflict yields TypeError """
self.f.create_group('foo')
with self.assertRaises(TypeError):
self.f.require_dataset('foo', (10, 3), 'f')
def test_dtype_conflict(self):
""" require_dataset with dtype conflict (strict mode) yields TypeError
"""
dset = self.f.create_dataset('foo', (10, 3), 'f')
with self.assertRaises(TypeError):
self.f.require_dataset('foo', (10, 3), 'S10')
def test_dtype_exact(self):
""" require_dataset with exactly dtype match """
dset = self.f.create_dataset('foo', (10, 3), 'f')
dset2 = self.f.require_dataset('foo', (10, 3), 'f', exact=True)
self.assertEqual(dset, dset2)
def test_dtype_close(self):
""" require_dataset with convertible type succeeds (non-strict mode)
"""
dset = self.f.create_dataset('foo', (10, 3), 'i4')
dset2 = self.f.require_dataset('foo', (10, 3), 'i2', exact=False)
self.assertEqual(dset, dset2)
self.assertEqual(dset2.dtype, np.dtype('i4'))
class TestCreateChunked(BaseDataset):
"""
Feature: Datasets can be created by manually specifying chunks
"""
def test_create_chunks(self):
""" Create via chunks tuple """
dset = self.f.create_dataset('foo', shape=(100,), chunks=(10,))
self.assertEqual(dset.chunks, (10,))
def test_create_chunks_integer(self):
""" Create via chunks integer """
dset = self.f.create_dataset('foo', shape=(100,), chunks=10)
self.assertEqual(dset.chunks, (10,))
def test_chunks_mismatch(self):
""" Illegal chunk size raises ValueError """
with self.assertRaises(ValueError):
self.f.create_dataset('foo', shape=(100,), chunks=(200,))
def test_chunks_false(self):
""" Chunked format required for given storage options """
with self.assertRaises(ValueError):
self.f.create_dataset('foo', shape=(10,), maxshape=100, chunks=False)
def test_chunks_scalar(self):
""" Attempting to create chunked scalar dataset raises TypeError """
with self.assertRaises(TypeError):
self.f.create_dataset('foo', shape=(), chunks=(50,))
def test_auto_chunks(self):
""" Auto-chunking of datasets """
dset = self.f.create_dataset('foo', shape=(20, 100), chunks=True)
self.assertIsInstance(dset.chunks, tuple)
self.assertEqual(len(dset.chunks), 2)
def test_auto_chunks_abuse(self):
""" Auto-chunking with pathologically large element sizes """
dset = self.f.create_dataset('foo', shape=(3,), dtype='S100000000', chunks=True)
self.assertEqual(dset.chunks, (1,))
def test_scalar_assignment(self):
""" Test scalar assignment of chunked dataset """
dset = self.f.create_dataset('foo', shape=(3, 50, 50),
dtype=np.int32, chunks=(1, 50, 50))
# test assignment of selection smaller than chunk size
dset[1, :, 40] = 10
self.assertTrue(np.all(dset[1, :, 40] == 10))
# test assignment of selection equal to chunk size
dset[1] = 11
self.assertTrue(np.all(dset[1] == 11))
# test assignment of selection bigger than chunk size
dset[0:2] = 12
self.assertTrue(np.all(dset[0:2] == 12))
def test_auto_chunks_no_shape(self):
""" Auto-chunking of empty datasets not allowed"""
with pytest.raises(TypeError, match='Empty') as err:
self.f.create_dataset('foo', dtype='S100', chunks=True)
with pytest.raises(TypeError, match='Empty') as err:
self.f.create_dataset('foo', dtype='S100', maxshape=20)
class TestCreateFillvalue(BaseDataset):
"""
Feature: Datasets can be created with fill value
"""
def test_create_fillval(self):
""" Fill value is reflected in dataset contents """
dset = self.f.create_dataset('foo', (10,), fillvalue=4.0)
self.assertEqual(dset[0], 4.0)
self.assertEqual(dset[7], 4.0)
def test_property(self):
""" Fill value is recoverable via property """
dset = self.f.create_dataset('foo', (10,), fillvalue=3.0)
self.assertEqual(dset.fillvalue, 3.0)
self.assertNotIsInstance(dset.fillvalue, np.ndarray)
def test_property_none(self):
""" .fillvalue property works correctly if not set """
dset = self.f.create_dataset('foo', (10,))
self.assertEqual(dset.fillvalue, 0)
def test_compound(self):
""" Fill value works with compound types """
dt = np.dtype([('a', 'f4'), ('b', 'i8')])
v = np.ones((1,), dtype=dt)[0]
dset = self.f.create_dataset('foo', (10,), dtype=dt, fillvalue=v)
self.assertEqual(dset.fillvalue, v)
self.assertAlmostEqual(dset[4], v)
def test_exc(self):
""" Bogus fill value raises ValueError """
with self.assertRaises(ValueError):
dset = self.f.create_dataset('foo', (10,),
dtype=[('a', 'i'), ('b', 'f')], fillvalue=42)
class TestCreateNamedType(BaseDataset):
"""
Feature: Datasets created from an existing named type
"""
def test_named(self):
""" Named type object works and links the dataset to type """
self.f['type'] = np.dtype('f8')
dset = self.f.create_dataset('x', (100,), dtype=self.f['type'])
self.assertEqual(dset.dtype, np.dtype('f8'))
self.assertEqual(dset.id.get_type(), self.f['type'].id)
self.assertTrue(dset.id.get_type().committed())
@ut.skipIf('gzip' not in h5py.filters.encode, "DEFLATE is not installed")
class TestCreateGzip(BaseDataset):
"""
Feature: Datasets created with gzip compression
"""
def test_gzip(self):
""" Create with explicit gzip options """
dset = self.f.create_dataset('foo', (20, 30), compression='gzip',
compression_opts=9)
self.assertEqual(dset.compression, 'gzip')
self.assertEqual(dset.compression_opts, 9)
def test_gzip_implicit(self):
""" Create with implicit gzip level (level 4) """
dset = self.f.create_dataset('foo', (20, 30), compression='gzip')
self.assertEqual(dset.compression, 'gzip')
self.assertEqual(dset.compression_opts, 4)
def test_gzip_number(self):
""" Create with gzip level by specifying integer """
dset = self.f.create_dataset('foo', (20, 30), compression=7)
self.assertEqual(dset.compression, 'gzip')
self.assertEqual(dset.compression_opts, 7)
original_compression_vals = h5py._hl.dataset._LEGACY_GZIP_COMPRESSION_VALS
try:
h5py._hl.dataset._LEGACY_GZIP_COMPRESSION_VALS = tuple()
with self.assertRaises(ValueError):
dset = self.f.create_dataset('foo', (20, 30), compression=7)
finally:
h5py._hl.dataset._LEGACY_GZIP_COMPRESSION_VALS = original_compression_vals
def test_gzip_exc(self):
""" Illegal gzip level (explicit or implicit) raises ValueError """
with self.assertRaises((ValueError, RuntimeError)):
self.f.create_dataset('foo', (20, 30), compression=14)
with self.assertRaises(ValueError):
self.f.create_dataset('foo', (20, 30), compression=-4)
with self.assertRaises(ValueError):
self.f.create_dataset('foo', (20, 30), compression='gzip',
compression_opts=14)
@ut.skipIf('gzip' not in h5py.filters.encode, "DEFLATE is not installed")
class TestCreateCompressionNumber(BaseDataset):
"""
Feature: Datasets created with a compression code
"""
def test_compression_number(self):
""" Create with compression number of gzip (h5py.h5z.FILTER_DEFLATE) and a compression level of 7"""
original_compression_vals = h5py._hl.dataset._LEGACY_GZIP_COMPRESSION_VALS
try:
h5py._hl.dataset._LEGACY_GZIP_COMPRESSION_VALS = tuple()
dset = self.f.create_dataset('foo', (20, 30), compression=h5py.h5z.FILTER_DEFLATE, compression_opts=(7,))
finally:
h5py._hl.dataset._LEGACY_GZIP_COMPRESSION_VALS = original_compression_vals
self.assertEqual(dset.compression, 'gzip')
self.assertEqual(dset.compression_opts, 7)
def test_compression_number_invalid(self):
""" Create with invalid compression numbers """
with self.assertRaises(ValueError) as e:
self.f.create_dataset('foo', (20, 30), compression=-999)
self.assertIn("Invalid filter", str(e.exception))
with self.assertRaises(ValueError) as e:
self.f.create_dataset('foo', (20, 30), compression=100)
self.assertIn("Unknown compression", str(e.exception))
original_compression_vals = h5py._hl.dataset._LEGACY_GZIP_COMPRESSION_VALS
try:
h5py._hl.dataset._LEGACY_GZIP_COMPRESSION_VALS = tuple()
# Using gzip compression requires a compression level specified in compression_opts
with self.assertRaises(IndexError):
self.f.create_dataset('foo', (20, 30), compression=h5py.h5z.FILTER_DEFLATE)
finally:
h5py._hl.dataset._LEGACY_GZIP_COMPRESSION_VALS = original_compression_vals
@ut.skipIf('lzf' not in h5py.filters.encode, "LZF is not installed")
class TestCreateLZF(BaseDataset):
"""
Feature: Datasets created with LZF compression
"""
def test_lzf(self):
""" Create with explicit lzf """
dset = self.f.create_dataset('foo', (20, 30), compression='lzf')
self.assertEqual(dset.compression, 'lzf')
self.assertEqual(dset.compression_opts, None)
testdata = np.arange(100)
dset = self.f.create_dataset('bar', data=testdata, compression='lzf')
self.assertEqual(dset.compression, 'lzf')
self.assertEqual(dset.compression_opts, None)
self.f.flush() # Actually write to file
readdata = self.f['bar'][()]
self.assertArrayEqual(readdata, testdata)
def test_lzf_exc(self):
""" Giving lzf options raises ValueError """
with self.assertRaises(ValueError):
self.f.create_dataset('foo', (20, 30), compression='lzf',
compression_opts=4)
@ut.skipIf('szip' not in h5py.filters.encode, "SZIP is not installed")
class TestCreateSZIP(BaseDataset):
"""
Feature: Datasets created with LZF compression
"""
def test_szip(self):
""" Create with explicit szip """
dset = self.f.create_dataset('foo', (20, 30), compression='szip',
compression_opts=('ec', 16))
@ut.skipIf('shuffle' not in h5py.filters.encode, "SHUFFLE is not installed")
class TestCreateShuffle(BaseDataset):
"""
Feature: Datasets can use shuffling filter
"""
def test_shuffle(self):
""" Enable shuffle filter """
dset = self.f.create_dataset('foo', (20, 30), shuffle=True)
self.assertTrue(dset.shuffle)
@ut.skipIf('fletcher32' not in h5py.filters.encode, "FLETCHER32 is not installed")
class TestCreateFletcher32(BaseDataset):
"""
Feature: Datasets can use the fletcher32 filter
"""
def test_fletcher32(self):
""" Enable fletcher32 filter """
dset = self.f.create_dataset('foo', (20, 30), fletcher32=True)
self.assertTrue(dset.fletcher32)
@ut.skipIf('scaleoffset' not in h5py.filters.encode, "SCALEOFFSET is not installed")
class TestCreateScaleOffset(BaseDataset):
"""
Feature: Datasets can use the scale/offset filter
"""
def test_float_fails_without_options(self):
""" Ensure that a scale factor is required for scaleoffset compression of floating point data """
with self.assertRaises(ValueError):
dset = self.f.create_dataset('foo', (20, 30), dtype=float, scaleoffset=True)
def test_non_integer(self):
""" Check when scaleoffset is negetive"""
with self.assertRaises(ValueError):
dset = self.f.create_dataset('foo', (20, 30), dtype=float, scaleoffset=-0.1)
def test_unsupport_dtype(self):
""" Check when dtype is unsupported type"""
with self.assertRaises(TypeError):
dset = self.f.create_dataset('foo', (20, 30), dtype=bool, scaleoffset=True)
def test_float(self):
""" Scaleoffset filter works for floating point data """
scalefac = 4
shape = (100, 300)
range = 20 * 10 ** scalefac
testdata = (np.random.rand(*shape) - 0.5) * range
dset = self.f.create_dataset('foo', shape, dtype=float, scaleoffset=scalefac)
# Dataset reports that scaleoffset is in use
assert dset.scaleoffset is not None
# Dataset round-trips
dset[...] = testdata
filename = self.f.filename
self.f.close()
self.f = h5py.File(filename, 'r')
readdata = self.f['foo'][...]
# Test that data round-trips to requested precision
self.assertArrayEqual(readdata, testdata, precision=10 ** (-scalefac))
# Test that the filter is actually active (i.e. compression is lossy)
assert not (readdata == testdata).all()
def test_int(self):
""" Scaleoffset filter works for integer data with default precision """
nbits = 12
shape = (100, 300)
testdata = np.random.randint(0, 2 ** nbits - 1, size=shape)
# Create dataset; note omission of nbits (for library-determined precision)
dset = self.f.create_dataset('foo', shape, dtype=int, scaleoffset=True)
# Dataset reports scaleoffset enabled
assert dset.scaleoffset is not None
# Data round-trips correctly and identically
dset[...] = testdata
filename = self.f.filename
self.f.close()
self.f = h5py.File(filename, 'r')
readdata = self.f['foo'][...]
self.assertArrayEqual(readdata, testdata)
def test_int_with_minbits(self):
""" Scaleoffset filter works for integer data with specified precision """
nbits = 12
shape = (100, 300)
testdata = np.random.randint(0, 2 ** nbits, size=shape)
dset = self.f.create_dataset('foo', shape, dtype=int, scaleoffset=nbits)
# Dataset reports scaleoffset enabled with correct precision
self.assertTrue(dset.scaleoffset == 12)
# Data round-trips correctly
dset[...] = testdata
filename = self.f.filename
self.f.close()
self.f = h5py.File(filename, 'r')
readdata = self.f['foo'][...]
self.assertArrayEqual(readdata, testdata)
def test_int_with_minbits_lossy(self):
""" Scaleoffset filter works for integer data with specified precision """
nbits = 12
shape = (100, 300)
testdata = np.random.randint(0, 2 ** (nbits + 1) - 1, size=shape)
dset = self.f.create_dataset('foo', shape, dtype=int, scaleoffset=nbits)
# Dataset reports scaleoffset enabled with correct precision
self.assertTrue(dset.scaleoffset == 12)
# Data can be written and read
dset[...] = testdata
filename = self.f.filename
self.f.close()
self.f = h5py.File(filename, 'r')
readdata = self.f['foo'][...]
# Compression is lossy
assert not (readdata == testdata).all()
class TestExternal(BaseDataset):
"""
Feature: Datasets with the external storage property
"""
def test_contents(self):
""" Create and access an external dataset """
shape = (6, 100)
testdata = np.random.random(shape)
# create a dataset in an external file and set it
ext_file = self.mktemp()
external = [(ext_file, 0, h5f.UNLIMITED)]
dset = self.f.create_dataset('foo', shape, dtype=testdata.dtype, external=external)
dset[...] = testdata
assert dset.external is not None
# verify file's existence, size, and contents
with open(ext_file, 'rb') as fid:
contents = fid.read()
assert contents == testdata.tobytes()
def test_name_str(self):
""" External argument may be a file name str only """
self.f.create_dataset('foo', (6, 100), external=self.mktemp())
def test_name_path(self):
""" External argument may be a file name path only """
self.f.create_dataset('foo', (6, 100),
external=pathlib.Path(self.mktemp()))
def test_iter_multi(self):
""" External argument may be an iterable of multiple tuples """
ext_file = self.mktemp()
N = 100
external = iter((ext_file, x * 1000, 1000) for x in range(N))
dset = self.f.create_dataset('poo', (6, 100), external=external)
assert len(dset.external) == N
def test_invalid(self):
""" Test with invalid external lists """
shape = (6, 100)
ext_file = self.mktemp()
for exc_type, external in [
(TypeError, [ext_file]),
(TypeError, [ext_file, 0]),
(TypeError, [ext_file, 0, h5f.UNLIMITED]),
(ValueError, [(ext_file,)]),
(ValueError, [(ext_file, 0)]),
(ValueError, [(ext_file, 0, h5f.UNLIMITED, 0)]),
(TypeError, [(ext_file, 0, "h5f.UNLIMITED")]),
]:
with self.assertRaises(exc_type):
self.f.create_dataset('foo', shape, external=external)
class TestAutoCreate(BaseDataset):
"""
Feature: Datasets auto-created from data produce the correct types
"""
def assert_string_type(self, ds, cset, variable=True):
tid = ds.id.get_type()
self.assertEqual(type(tid), h5py.h5t.TypeStringID)
self.assertEqual(tid.get_cset(), cset)
if variable:
assert tid.is_variable_str()
def test_vlen_bytes(self):
"""Assigning byte strings produces a vlen string ASCII dataset """
self.f['x'] = b"Hello there"
self.assert_string_type(self.f['x'], h5py.h5t.CSET_ASCII)
self.f['y'] = [b"a", b"bc"]
self.assert_string_type(self.f['y'], h5py.h5t.CSET_ASCII)
self.f['z'] = np.array([b"a", b"bc"], dtype=np.object_)
self.assert_string_type(self.f['z'], h5py.h5t.CSET_ASCII)
def test_vlen_unicode(self):
"""Assigning unicode strings produces a vlen string UTF-8 dataset """
self.f['x'] = "Hello there" + chr(0x2034)
self.assert_string_type(self.f['x'], h5py.h5t.CSET_UTF8)
self.f['y'] = ["a", "bc"]
self.assert_string_type(self.f['y'], h5py.h5t.CSET_UTF8)
# 2D array; this only works with an array, not nested lists
self.f['z'] = np.array([["a", "bc"]], dtype=np.object_)
self.assert_string_type(self.f['z'], h5py.h5t.CSET_UTF8)
def test_string_fixed(self):
""" Assignment of fixed-length byte string produces a fixed-length
ascii dataset """
self.f['x'] = np.string_("Hello there")
ds = self.f['x']
self.assert_string_type(ds, h5py.h5t.CSET_ASCII, variable=False)
self.assertEqual(ds.id.get_type().get_size(), 11)
class TestCreateLike(BaseDataset):
def test_no_chunks(self):
self.f['lol'] = np.arange(25).reshape(5, 5)
self.f.create_dataset_like('like_lol', self.f['lol'])
dslike = self.f['like_lol']
self.assertEqual(dslike.shape, (5, 5))
self.assertIs(dslike.chunks, None)
def test_track_times(self):
orig = self.f.create_dataset('honda', data=np.arange(12),
track_times=True)
self.assertNotEqual(0, h5py.h5g.get_objinfo(orig._id).mtime)
similar = self.f.create_dataset_like('hyundai', orig)
self.assertNotEqual(0, h5py.h5g.get_objinfo(similar._id).mtime)
orig = self.f.create_dataset('ibm', data=np.arange(12),
track_times=False)
self.assertEqual(0, h5py.h5g.get_objinfo(orig._id).mtime)
similar = self.f.create_dataset_like('lenovo', orig)
self.assertEqual(0, h5py.h5g.get_objinfo(similar._id).mtime)
def test_maxshape(self):
""" Test when other.maxshape != other.shape """
other = self.f.create_dataset('other', (10,), maxshape=20)
similar = self.f.create_dataset_like('sim', other)
self.assertEqual(similar.shape, (10,))
self.assertEqual(similar.maxshape, (20,))
class TestChunkIterator(BaseDataset):
def test_no_chunks(self):
dset = self.f.create_dataset("foo", ())
with self.assertRaises(TypeError):
dset.iter_chunks()
def test_1d(self):
dset = self.f.create_dataset("foo", (100,), chunks=(32,))
expected = ((slice(0,32,1),), (slice(32,64,1),), (slice(64,96,1),),
(slice(96,100,1),))
self.assertEqual(list(dset.iter_chunks()), list(expected))
expected = ((slice(50,64,1),), (slice(64,96,1),), (slice(96,97,1),))
self.assertEqual(list(dset.iter_chunks(np.s_[50:97])), list(expected))
def test_2d(self):
dset = self.f.create_dataset("foo", (100,100), chunks=(32,64))
expected = ((slice(0, 32, 1), slice(0, 64, 1)), (slice(0, 32, 1),
slice(64, 100, 1)), (slice(32, 64, 1), slice(0, 64, 1)),
(slice(32, 64, 1), slice(64, 100, 1)), (slice(64, 96, 1),
slice(0, 64, 1)), (slice(64, 96, 1), slice(64, 100, 1)),
(slice(96, 100, 1), slice(0, 64, 1)), (slice(96, 100, 1),
slice(64, 100, 1)))
self.assertEqual(list(dset.iter_chunks()), list(expected))
expected = ((slice(48, 52, 1), slice(40, 50, 1)),)
self.assertEqual(list(dset.iter_chunks(np.s_[48:52,40:50])), list(expected))
class TestResize(BaseDataset):
"""
Feature: Datasets created with "maxshape" may be resized
"""
def test_create(self):
""" Create dataset with "maxshape" """
dset = self.f.create_dataset('foo', (20, 30), maxshape=(20, 60))
self.assertIsNot(dset.chunks, None)
self.assertEqual(dset.maxshape, (20, 60))
def test_create_1D(self):
""" Create dataset with "maxshape" using integer maxshape"""
dset = self.f.create_dataset('foo', (20,), maxshape=20)
self.assertIsNot(dset.chunks, None)
self.assertEqual(dset.maxshape, (20,))
dset = self.f.create_dataset('bar', 20, maxshape=20)
self.assertEqual(dset.maxshape, (20,))
def test_resize(self):
""" Datasets may be resized up to maxshape """
dset = self.f.create_dataset('foo', (20, 30), maxshape=(20, 60))
self.assertEqual(dset.shape, (20, 30))
dset.resize((20, 50))
self.assertEqual(dset.shape, (20, 50))
dset.resize((20, 60))
self.assertEqual(dset.shape, (20, 60))
def test_resize_1D(self):
""" Datasets may be resized up to maxshape using integer maxshape"""
dset = self.f.create_dataset('foo', 20, maxshape=40)
self.assertEqual(dset.shape, (20,))
dset.resize((30,))
self.assertEqual(dset.shape, (30,))
def test_resize_over(self):
""" Resizing past maxshape triggers an exception """
dset = self.f.create_dataset('foo', (20, 30), maxshape=(20, 60))
with self.assertRaises(Exception):
dset.resize((20, 70))
def test_resize_nonchunked(self):
""" Resizing non-chunked dataset raises TypeError """
dset = self.f.create_dataset("foo", (20, 30))
with self.assertRaises(TypeError):
dset.resize((20, 60))
def test_resize_axis(self):
""" Resize specified axis """
dset = self.f.create_dataset('foo', (20, 30), maxshape=(20, 60))
dset.resize(50, axis=1)
self.assertEqual(dset.shape, (20, 50))
def test_axis_exc(self):
""" Illegal axis raises ValueError """
dset = self.f.create_dataset('foo', (20, 30), maxshape=(20, 60))
with self.assertRaises(ValueError):
dset.resize(50, axis=2)
def test_zero_dim(self):
""" Allow zero-length initial dims for unlimited axes (issue 111) """
dset = self.f.create_dataset('foo', (15, 0), maxshape=(15, None))
self.assertEqual(dset.shape, (15, 0))
self.assertEqual(dset.maxshape, (15, None))
class TestDtype(BaseDataset):
"""
Feature: Dataset dtype is available as .dtype property
"""
def test_dtype(self):
""" Retrieve dtype from dataset """
dset = self.f.create_dataset('foo', (5,), '|S10')
self.assertEqual(dset.dtype, np.dtype('|S10'))
class TestLen(BaseDataset):
"""
Feature: Size of first axis is available via Python's len
"""
def test_len(self):
""" Python len() (under 32 bits) """
dset = self.f.create_dataset('foo', (312, 15))
self.assertEqual(len(dset), 312)
def test_len_big(self):
""" Python len() vs Dataset.len() """
dset = self.f.create_dataset('foo', (2 ** 33, 15))
self.assertEqual(dset.shape, (2 ** 33, 15))
if sys.maxsize == 2 ** 31 - 1:
with self.assertRaises(OverflowError):
len(dset)
else:
self.assertEqual(len(dset), 2 ** 33)
self.assertEqual(dset.len(), 2 ** 33)
class TestIter(BaseDataset):
"""
Feature: Iterating over a dataset yields rows
"""
def test_iter(self):
""" Iterating over a dataset yields rows """
data = np.arange(30, dtype='f').reshape((10, 3))
dset = self.f.create_dataset('foo', data=data)
for x, y in zip(dset, data):
self.assertEqual(len(x), 3)
self.assertArrayEqual(x, y)
def test_iter_scalar(self):
""" Iterating over scalar dataset raises TypeError """
dset = self.f.create_dataset('foo', shape=())
with self.assertRaises(TypeError):
[x for x in dset]
class TestStrings(BaseDataset):
"""
Feature: Datasets created with vlen and fixed datatypes correctly
translate to and from HDF5
"""
def test_vlen_bytes(self):
""" Vlen bytes dataset maps to vlen ascii in the file """
dt = h5py.string_dtype(encoding='ascii')
ds = self.f.create_dataset('x', (100,), dtype=dt)
tid = ds.id.get_type()
self.assertEqual(type(tid), h5py.h5t.TypeStringID)
self.assertEqual(tid.get_cset(), h5py.h5t.CSET_ASCII)
string_info = h5py.check_string_dtype(ds.dtype)
self.assertEqual(string_info.encoding, 'ascii')
def test_vlen_unicode(self):
""" Vlen unicode dataset maps to vlen utf-8 in the file """
dt = h5py.string_dtype()
ds = self.f.create_dataset('x', (100,), dtype=dt)
tid = ds.id.get_type()
self.assertEqual(type(tid), h5py.h5t.TypeStringID)
self.assertEqual(tid.get_cset(), h5py.h5t.CSET_UTF8)
string_info = h5py.check_string_dtype(ds.dtype)
self.assertEqual(string_info.encoding, 'utf-8')
def test_fixed_ascii(self):
""" Fixed-length bytes dataset maps to fixed-length ascii in the file
"""
dt = np.dtype("|S10")
ds = self.f.create_dataset('x', (100,), dtype=dt)
tid = ds.id.get_type()
self.assertEqual(type(tid), h5py.h5t.TypeStringID)
self.assertFalse(tid.is_variable_str())
self.assertEqual(tid.get_size(), 10)
self.assertEqual(tid.get_cset(), h5py.h5t.CSET_ASCII)
string_info = h5py.check_string_dtype(ds.dtype)
self.assertEqual(string_info.encoding, 'ascii')
self.assertEqual(string_info.length, 10)
def test_fixed_utf8(self):
dt = h5py.string_dtype(encoding='utf-8', length=5)
ds = self.f.create_dataset('x', (100,), dtype=dt)
tid = ds.id.get_type()
self.assertEqual(tid.get_cset(), h5py.h5t.CSET_UTF8)
s = 'cù'
ds[0] = s.encode('utf-8')
ds[1] = s
ds[2:4] = [s, s]
ds[4:6] = np.array([s, s], dtype=object)
ds[6:8] = np.array([s.encode('utf-8')] * 2, dtype=dt)
with self.assertRaises(TypeError):
ds[8:10] = np.array([s, s], dtype='U')
np.testing.assert_array_equal(ds[:8], np.array([s.encode('utf-8')] * 8, dtype='S'))
def test_fixed_unicode(self):
""" Fixed-length unicode datasets are unsupported (raise TypeError) """
dt = np.dtype("|U10")
with self.assertRaises(TypeError):
ds = self.f.create_dataset('x', (100,), dtype=dt)
def test_roundtrip_vlen_bytes(self):
""" writing and reading to vlen bytes dataset preserves type and content
"""
dt = h5py.string_dtype(encoding='ascii')
ds = self.f.create_dataset('x', (100,), dtype=dt)
data = b"Hello\xef"
ds[0] = data
out = ds[0]
self.assertEqual(type(out), bytes)
self.assertEqual(out, data)
def test_roundtrip_fixed_bytes(self):
""" Writing to and reading from fixed-length bytes dataset preserves
type and content """
dt = np.dtype("|S10")
ds = self.f.create_dataset('x', (100,), dtype=dt)
data = b"Hello\xef"
ds[0] = data
out = ds[0]
self.assertEqual(type(out), np.string_)
self.assertEqual(out, data)
def test_retrieve_vlen_unicode(self):
dt = h5py.string_dtype()
ds = self.f.create_dataset('x', (10,), dtype=dt)
data = "fàilte"
ds[0] = data
self.assertIsInstance(ds[0], bytes)
out = ds.asstr()[0]
self.assertIsInstance(out, str)
self.assertEqual(out, data)
def test_asstr(self):
ds = self.f.create_dataset('x', (10,), dtype=h5py.string_dtype())
data = "fàilte"
ds[0] = data
strwrap1 = ds.asstr('ascii')
with self.assertRaises(UnicodeDecodeError):
out = strwrap1[0]
# Different errors parameter
self.assertEqual(ds.asstr('ascii', 'ignore')[0], 'filte')
# latin-1 will decode it but give the wrong text
self.assertNotEqual(ds.asstr('latin-1')[0], data)
# len of ds
self.assertEqual(10, len(ds.asstr()))
# Array output
np.testing.assert_array_equal(
ds.asstr()[:1], np.array([data], dtype=object)
)
def test_asstr_fixed(self):
dt = h5py.string_dtype(length=5)
ds = self.f.create_dataset('x', (10,), dtype=dt)
data = 'cù'
ds[0] = np.array(data.encode('utf-8'), dtype=dt)
self.assertIsInstance(ds[0], np.bytes_)
out = ds.asstr()[0]
self.assertIsInstance(out, str)
self.assertEqual(out, data)
# Different errors parameter
self.assertEqual(ds.asstr('ascii', 'ignore')[0], 'c')
# latin-1 will decode it but give the wrong text
self.assertNotEqual(ds.asstr('latin-1')[0], data)
# Array output
np.testing.assert_array_equal(
ds.asstr()[:1], np.array([data], dtype=object)
)
def test_unicode_write_error(self):
"""Encoding error when writing a non-ASCII string to an ASCII vlen dataset"""
dt = h5py.string_dtype('ascii')
ds = self.f.create_dataset('x', (100,), dtype=dt)
data = "fàilte"
with self.assertRaises(UnicodeEncodeError):
ds[0] = data
def test_unicode_write_bytes(self):
""" Writing valid utf-8 byte strings to a unicode vlen dataset is OK
"""
dt = h5py.string_dtype()
ds = self.f.create_dataset('x', (100,), dtype=dt)
data = (u"Hello there" + chr(0x2034)).encode('utf8')
ds[0] = data
out = ds[0]
self.assertEqual(type(out), bytes)
self.assertEqual(out, data)
def test_vlen_bytes_write_ascii_str(self):
""" Writing an ascii str to ascii vlen dataset is OK
"""
dt = h5py.string_dtype('ascii')
ds = self.f.create_dataset('x', (100,), dtype=dt)
data = "ASCII string"
ds[0] = data
out = ds[0]
self.assertEqual(type(out), bytes)
self.assertEqual(out, data.encode('ascii'))
class TestCompound(BaseDataset):
"""
Feature: Compound types correctly round-trip
"""
def test_rt(self):
""" Compound types are read back in correct order (issue 236)"""
dt = np.dtype([ ('weight', np.float64),
('cputime', np.float64),
('walltime', np.float64),
('parents_offset', np.uint32),
('n_parents', np.uint32),
('status', np.uint8),
('endpoint_type', np.uint8), ])
testdata = np.ndarray((16,), dtype=dt)
for key in dt.fields:
testdata[key] = np.random.random((16,)) * 100
self.f['test'] = testdata
outdata = self.f['test'][...]
self.assertTrue(np.all(outdata == testdata))
self.assertEqual(outdata.dtype, testdata.dtype)
def test_assign(self):
dt = np.dtype([ ('weight', (np.float64, 3)),
('endpoint_type', np.uint8), ])
testdata = np.ndarray((16,), dtype=dt)
for key in dt.fields:
testdata[key] = np.random.random(size=testdata[key].shape) * 100
ds = self.f.create_dataset('test', (16,), dtype=dt)
for key in dt.fields:
ds[key] = testdata[key]
outdata = self.f['test'][...]
self.assertTrue(np.all(outdata == testdata))
self.assertEqual(outdata.dtype, testdata.dtype)
def test_fields(self):
dt = np.dtype([
('x', np.float64),
('y', np.float64),
('z', np.float64),
])
testdata = np.ndarray((16,), dtype=dt)
for key in dt.fields:
testdata[key] = np.random.random((16,)) * 100
self.f['test'] = testdata
# Extract multiple fields
np.testing.assert_array_equal(
self.f['test'].fields(['x', 'y'])[:], testdata[['x', 'y']]
)
# Extract single field
np.testing.assert_array_equal(
self.f['test'].fields('x')[:], testdata['x']
)
# Check len() on fields wrapper
assert len(self.f['test'].fields('x')) == 16
class TestSubarray(BaseDataset):
def test_write_list(self):
ds = self.f.create_dataset("a", (1,), dtype="3int8")
ds[0] = [1, 2, 3]
np.testing.assert_array_equal(ds[:], [[1, 2, 3]])
ds[:] = [[4, 5, 6]]
np.testing.assert_array_equal(ds[:], [[4, 5, 6]])
def test_write_array(self):
ds = self.f.create_dataset("a", (1,), dtype="3int8")
ds[0] = np.array([1, 2, 3])
np.testing.assert_array_equal(ds[:], [[1, 2, 3]])
ds[:] = np.array([[4, 5, 6]])
np.testing.assert_array_equal(ds[:], [[4, 5, 6]])
class TestEnum(BaseDataset):
"""
Feature: Enum datatype info is preserved, read/write as integer
"""
EDICT = {'RED': 0, 'GREEN': 1, 'BLUE': 42}
def test_create(self):
""" Enum datasets can be created and type correctly round-trips """
dt = h5py.enum_dtype(self.EDICT, basetype='i')
ds = self.f.create_dataset('x', (100, 100), dtype=dt)
dt2 = ds.dtype
dict2 = h5py.check_enum_dtype(dt2)
self.assertEqual(dict2, self.EDICT)
def test_readwrite(self):
""" Enum datasets can be read/written as integers """
dt = h5py.enum_dtype(self.EDICT, basetype='i4')
ds = self.f.create_dataset('x', (100, 100), dtype=dt)
ds[35, 37] = 42
ds[1, :] = 1
self.assertEqual(ds[35, 37], 42)
self.assertArrayEqual(ds[1, :], np.array((1,) * 100, dtype='i4'))
class TestFloats(BaseDataset):
"""
Test support for mini and extended-precision floats
"""
def _exectest(self, dt):
dset = self.f.create_dataset('x', (100,), dtype=dt)
self.assertEqual(dset.dtype, dt)
data = np.ones((100,), dtype=dt)
dset[...] = data
self.assertArrayEqual(dset[...], data)
@ut.skipUnless(hasattr(np, 'float16'), "NumPy float16 support required")
def test_mini(self):
""" Mini-floats round trip """
self._exectest(np.dtype('float16'))
# TODO: move these tests to test_h5t
def test_mini_mapping(self):
""" Test mapping for float16 """
if hasattr(np, 'float16'):
self.assertEqual(h5t.IEEE_F16LE.dtype, np.dtype('<f2'))
else:
self.assertEqual(h5t.IEEE_F16LE.dtype, np.dtype('<f4'))
class TestTrackTimes(BaseDataset):
"""
Feature: track_times
"""
def test_disable_track_times(self):
""" check that when track_times=False, the time stamp=0 (Jan 1, 1970) """
ds = self.f.create_dataset('foo', (4,), track_times=False)
ds_mtime = h5py.h5g.get_objinfo(ds._id).mtime
self.assertEqual(0, ds_mtime)
def test_invalid_track_times(self):
""" check that when give track_times an invalid value """
with self.assertRaises(TypeError):
self.f.create_dataset('foo', (4,), track_times='null')
class TestZeroShape(BaseDataset):
"""
Features of datasets with (0,)-shape axes
"""
def test_array_conversion(self):
""" Empty datasets can be converted to NumPy arrays """
ds = self.f.create_dataset('x', 0, maxshape=None)
self.assertEqual(ds.shape, np.array(ds).shape)
ds = self.f.create_dataset('y', (0,), maxshape=(None,))
self.assertEqual(ds.shape, np.array(ds).shape)
ds = self.f.create_dataset('z', (0, 0), maxshape=(None, None))
self.assertEqual(ds.shape, np.array(ds).shape)
def test_reading(self):
""" Slicing into empty datasets works correctly """
dt = [('a', 'f'), ('b', 'i')]
ds = self.f.create_dataset('x', (0,), dtype=dt, maxshape=(None,))
arr = np.empty((0,), dtype=dt)
self.assertEqual(ds[...].shape, arr.shape)
self.assertEqual(ds[...].dtype, arr.dtype)
self.assertEqual(ds[()].shape, arr.shape)
self.assertEqual(ds[()].dtype, arr.dtype)
# https://github.com/h5py/h5py/issues/1492
empty_regionref_xfail = pytest.mark.xfail(
h5py.version.hdf5_version_tuple == (1, 10, 6),
reason="Issue with empty region refs in HDF5 1.10.6",
)
class TestRegionRefs(BaseDataset):
"""
Various features of region references
"""
def setUp(self):
BaseDataset.setUp(self)
self.data = np.arange(100 * 100).reshape((100, 100))
self.dset = self.f.create_dataset('x', data=self.data)
self.dset[...] = self.data
def test_create_ref(self):
""" Region references can be used as slicing arguments """
slic = np.s_[25:35, 10:100:5]
ref = self.dset.regionref[slic]
self.assertArrayEqual(self.dset[ref], self.data[slic])
@empty_regionref_xfail
def test_empty_region(self):
ref = self.dset.regionref[:0]
out = self.dset[ref]
assert out.size == 0
# Ideally we should preserve shape (0, 100), but it seems this is lost.
@empty_regionref_xfail
def test_scalar_dataset(self):
ds = self.f.create_dataset("scalar", data=1.0, dtype='f4')
sid = h5py.h5s.create(h5py.h5s.SCALAR)
# Deselected
sid.select_none()
ref = h5py.h5r.create(ds.id, b'.', h5py.h5r.DATASET_REGION, sid)
assert ds[ref] == h5py.Empty(np.dtype('f4'))
# Selected
sid.select_all()
ref = h5py.h5r.create(ds.id, b'.', h5py.h5r.DATASET_REGION, sid)
assert ds[ref] == ds[()]
def test_ref_shape(self):
""" Region reference shape and selection shape """
slic = np.s_[25:35, 10:100:5]
ref = self.dset.regionref[slic]
self.assertEqual(self.dset.regionref.shape(ref), self.dset.shape)
self.assertEqual(self.dset.regionref.selection(ref), (10, 18))
class TestAstype(BaseDataset):
""".astype() wrapper & context manager
"""
def test_astype_ctx(self):
dset = self.f.create_dataset('x', (100,), dtype='i2')
dset[...] = np.arange(100)
with dset.astype('f8'):
self.assertArrayEqual(dset[...], np.arange(100, dtype='f8'))
with dset.astype('f4') as f4ds:
self.assertArrayEqual(f4ds[...], np.arange(100, dtype='f4'))
def test_astype_wrapper(self):
dset = self.f.create_dataset('x', (100,), dtype='i2')
dset[...] = np.arange(100)
arr = dset.astype('f4')[:]
self.assertArrayEqual(arr, np.arange(100, dtype='f4'))
def test_astype_wrapper_len(self):
dset = self.f.create_dataset('x', (100,), dtype='i2')
dset[...] = np.arange(100)
self.assertEqual(100, len(dset.astype('f4')))
class TestScalarCompound(BaseDataset):
"""
Retrieval of a single field from a scalar compound dataset should
strip the field info
"""
def test_scalar_compound(self):
dt = np.dtype([('a', 'i')])
dset = self.f.create_dataset('x', (), dtype=dt)
self.assertEqual(dset['a'].dtype, np.dtype('i'))
class TestVlen(BaseDataset):
def test_int(self):
dt = h5py.vlen_dtype(int)
ds = self.f.create_dataset('vlen', (4,), dtype=dt)
ds[0] = np.arange(3)
ds[1] = np.arange(0)
ds[2] = [1, 2, 3]
ds[3] = np.arange(1)
self.assertArrayEqual(ds[0], np.arange(3))
self.assertArrayEqual(ds[1], np.arange(0))
self.assertArrayEqual(ds[2], np.array([1, 2, 3]))
self.assertArrayEqual(ds[1], np.arange(0))
ds[0:2] = np.array([np.arange(5), np.arange(4)], dtype=object)
self.assertArrayEqual(ds[0], np.arange(5))
self.assertArrayEqual(ds[1], np.arange(4))
ds[0:2] = np.array([np.arange(3), np.arange(3)])
self.assertArrayEqual(ds[0], np.arange(3))
self.assertArrayEqual(ds[1], np.arange(3))
def test_reuse_from_other(self):
dt = h5py.vlen_dtype(int)
ds = self.f.create_dataset('vlen', (1,), dtype=dt)
self.f.create_dataset('vlen2', (1,), ds[()].dtype)
def test_reuse_struct_from_other(self):
dt = [('a', int), ('b', h5py.vlen_dtype(int))]
ds = self.f.create_dataset('vlen', (1,), dtype=dt)
fname = self.f.filename
self.f.close()
self.f = h5py.File(fname, 'a')
self.f.create_dataset('vlen2', (1,), self.f['vlen']['b'][()].dtype)
def test_convert(self):
dt = h5py.vlen_dtype(int)
ds = self.f.create_dataset('vlen', (3,), dtype=dt)
ds[0] = np.array([1.4, 1.2])
ds[1] = np.array([1.2])
ds[2] = [1.2, 2, 3]
self.assertArrayEqual(ds[0], np.array([1, 1]))
self.assertArrayEqual(ds[1], np.array([1]))
self.assertArrayEqual(ds[2], np.array([1, 2, 3]))
ds[0:2] = np.array([[0.1, 1.1, 2.1, 3.1, 4], np.arange(4)], dtype=object)
self.assertArrayEqual(ds[0], np.arange(5))
self.assertArrayEqual(ds[1], np.arange(4))
ds[0:2] = np.array([np.array([0.1, 1.2, 2.2]),
np.array([0.2, 1.2, 2.2])])
self.assertArrayEqual(ds[0], np.arange(3))
self.assertArrayEqual(ds[1], np.arange(3))
def test_multidim(self):
dt = h5py.vlen_dtype(int)
ds = self.f.create_dataset('vlen', (2, 2), dtype=dt)
ds[0, 0] = np.arange(1)
ds[:, :] = np.array([[np.arange(3), np.arange(2)],
[np.arange(1), np.arange(2)]], dtype=object)
ds[:, :] = np.array([[np.arange(2), np.arange(2)],
[np.arange(2), np.arange(2)]])
def _help_float_testing(self, np_dt, dataset_name='vlen'):
"""
Helper for testing various vlen numpy data types.
:param np_dt: Numpy datatype to test
:param dataset_name: String name of the dataset to create for testing.
"""
dt = h5py.vlen_dtype(np_dt)
ds = self.f.create_dataset(dataset_name, (5,), dtype=dt)
# Create some arrays, and assign them to the dataset
array_0 = np.array([1., 2., 30.], dtype=np_dt)
array_1 = np.array([100.3, 200.4, 98.1, -10.5, -300.0], dtype=np_dt)
# Test that a numpy array of different type gets cast correctly
array_2 = np.array([1, 2, 8], dtype=np.dtype('int32'))
casted_array_2 = array_2.astype(np_dt)
# Test that we can set a list of floats.
list_3 = [1., 2., 900., 0., -0.5]
list_array_3 = np.array(list_3, dtype=np_dt)
# Test that a list of integers gets casted correctly
list_4 = [-1, -100, 0, 1, 9999, 70]
list_array_4 = np.array(list_4, dtype=np_dt)
ds[0] = array_0
ds[1] = array_1
ds[2] = array_2
ds[3] = list_3
ds[4] = list_4
self.assertArrayEqual(array_0, ds[0])
self.assertArrayEqual(array_1, ds[1])
self.assertArrayEqual(casted_array_2, ds[2])
self.assertArrayEqual(list_array_3, ds[3])
self.assertArrayEqual(list_array_4, ds[4])
# Test that we can reassign arrays in the dataset
list_array_3 = np.array([0.3, 2.2], dtype=np_dt)
ds[0] = list_array_3[:]
self.assertArrayEqual(list_array_3, ds[0])
# Make sure we can close the file.
self.f.flush()
self.f.close()
def test_numpy_float16(self):
np_dt = np.dtype('float16')
self._help_float_testing(np_dt)
def test_numpy_float32(self):
np_dt = np.dtype('float32')
self._help_float_testing(np_dt)
def test_numpy_float64_from_dtype(self):
np_dt = np.dtype('float64')
self._help_float_testing(np_dt)
def test_numpy_float64_2(self):
np_dt = np.float64
self._help_float_testing(np_dt)
def test_non_contiguous_arrays(self):
"""Test that non-contiguous arrays are stored correctly"""
self.f.create_dataset('nc', (10,), dtype=h5py.vlen_dtype('bool'))
x = np.array([True, False, True, True, False, False, False])
self.f['nc'][0] = x[::2]
assert all(self.f['nc'][0] == x[::2]), f"{self.f['nc'][0]} != {x[::2]}"
self.f.create_dataset('nc2', (10,), dtype=h5py.vlen_dtype('int8'))
y = np.array([2, 4, 1, 5, -1, 3, 7])
self.f['nc2'][0] = y[::2]
assert all(self.f['nc2'][0] == y[::2]), f"{self.f['nc2'][0]} != {y[::2]}"
class TestLowOpen(BaseDataset):
def test_get_access_list(self):
""" Test H5Dget_access_plist """
ds = self.f.create_dataset('foo', (4,))
p_list = ds.id.get_access_plist()
def test_dapl(self):
""" Test the dapl keyword to h5d.open """
dapl = h5py.h5p.create(h5py.h5p.DATASET_ACCESS)
dset = self.f.create_dataset('x', (100,))
del dset
dsid = h5py.h5d.open(self.f.id, b'x', dapl)
self.assertIsInstance(dsid, h5py.h5d.DatasetID)
@ut.skipUnless(h5py.version.hdf5_version_tuple >= (1, 10, 5),
"chunk info requires HDF5 >= 1.10.5")
def test_get_chunk_details():
from io import BytesIO
buf = BytesIO()
with h5py.File(buf, 'w') as fout:
fout.create_dataset('test', shape=(100, 100), chunks=(10, 10), dtype='i4')
fout['test'][:] = 1
buf.seek(0)
with h5py.File(buf, 'r') as fin:
ds = fin['test'].id
assert ds.get_num_chunks() == 100
for j in range(100):
offset = tuple(np.array(np.unravel_index(j, (10, 10))) * 10)
si = ds.get_chunk_info(j)
assert si.chunk_offset == offset
assert si.filter_mask == 0
assert si.byte_offset is not None
assert si.size > 0
si = ds.get_chunk_info_by_coord((0, 0))
assert si.chunk_offset == (0, 0)
assert si.filter_mask == 0
assert si.byte_offset is not None
assert si.size > 0
def test_empty_shape(writable_file):
ds = writable_file.create_dataset('empty', dtype='int32')
assert ds.shape is None
assert ds.maxshape is None
def test_zero_storage_size():
# https://github.com/h5py/h5py/issues/1475
from io import BytesIO
buf = BytesIO()
with h5py.File(buf, 'w') as fout:
fout.create_dataset('empty', dtype='uint8')
buf.seek(0)
with h5py.File(buf, 'r') as fin:
assert fin['empty'].chunks is None
assert fin['empty'].id.get_offset() is None
assert fin['empty'].id.get_storage_size() == 0
def test_python_int_uint64(writable_file):
# https://github.com/h5py/h5py/issues/1547
data = [np.iinfo(np.int64).max, np.iinfo(np.int64).max + 1]
# Check creating a new dataset
ds = writable_file.create_dataset('x', data=data, dtype=np.uint64)
assert ds.dtype == np.dtype(np.uint64)
np.testing.assert_array_equal(ds[:], np.array(data, dtype=np.uint64))
# Check writing to an existing dataset
ds[:] = data
np.testing.assert_array_equal(ds[:], np.array(data, dtype=np.uint64))
def test_setitem_fancy_indexing(writable_file):
# https://github.com/h5py/h5py/issues/1593
arr = writable_file.create_dataset('data', (5, 1000, 2), dtype=np.uint8)
block = np.random.randint(255, size=(5, 3, 2))
arr[:, [0, 2, 4], ...] = block
def test_vlen_spacepad():
with File(get_data_file_path("vlen_string_dset.h5")) as f:
assert f["DS1"][0] == b"Parting"
def test_vlen_nullterm():
with File(get_data_file_path("vlen_string_dset_utc.h5")) as f:
assert f["ds1"][0] == b"2009-12-20T10:16:18.662409Z"
@pytest.mark.skipif(
h5py.version.hdf5_version_tuple < (1, 10, 3),
reason="Appears you cannot pass an unknown filter id for HDF5 < 1.10.3"
)
def test_allow_unknown_filter(writable_file):
# apparently 256-511 are reserved for testing purposes
fake_filter_id = 256
ds = writable_file.create_dataset(
'data', shape=(10, 10), dtype=np.uint8, compression=fake_filter_id,
allow_unknown_filter=True
)
assert str(fake_filter_id) in ds._filters
| 36.145893 | 117 | 0.602384 |
3c8d5cb12bdc6e04fdd795cc200e91ac00b9eb56 | 418 | py | Python | python/testData/debug/stepping/test_smart_step_into_decorator2.py | Sajaki/intellij-community | 6748af2c40567839d11fd652ec77ba263c074aad | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/debug/stepping/test_smart_step_into_decorator2.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 2 | 2022-02-19T09:45:05.000Z | 2022-02-27T20:32:55.000Z | python/testData/debug/stepping/test_smart_step_into_decorator2.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | def foo(i):
return i
def generate_power(exponent):
def decorator(f):
def inner(*args):
result = f(*args)
return exponent ** result
return inner
return decorator
@generate_power(foo(foo(3))) # breakpoint
@generate_power(foo(foo(5)))
def raise_three(n):
return n
@generate_power(2)
def raise_two(n):
return n
raise_three(raise_two(2)) # breakpoint
| 14.928571 | 42 | 0.624402 |
e7fcaf37b6a57ed7c08ef5f98c365a4fb2a75df7 | 12,075 | py | Python | cdpr/sdf/gen_cdpr.py | siddharthumakarthikeyan/Cable-Driven-Parallel-Robots-CDPR-Modelling | 4e8d991d55ae7da91b3c90773c679f3369a4dafa | [
"MIT"
] | 9 | 2021-06-01T12:19:58.000Z | 2022-02-28T12:30:09.000Z | src/cdpr_gazebo/sdf/gen_cdpr.py | balazs-bamer/cdpr-simulation | d8b6fa54eb4c150b2903e9c8ef629655d4739d7e | [
"MIT"
] | 1 | 2021-09-27T12:24:50.000Z | 2021-09-27T12:24:50.000Z | src/cdpr_gazebo/sdf/gen_cdpr.py | balazs-bamer/cdpr-simulation | d8b6fa54eb4c150b2903e9c8ef629655d4739d7e | [
"MIT"
] | 4 | 2020-12-03T03:01:15.000Z | 2022-03-10T01:51:59.000Z | #!/usr/bin/python
from mod_create import *
import yaml
import sys
import numpy as np
from math import *
import transformations as tr
from os.path import exists
if __name__ == '__main__':
if len(sys.argv) < 2:
print(' Give a yaml file' )
sys.exit(0)
model = sys.argv[1]
if not exists(model):
for ext in ['.yaml', '.yml', 'yaml','yml']:
if exists(model + ext):
model += ext
break
if not exists(model):
print(model + ' not found')
sys.exit(0)
d_config = yaml.load(file(model))
sim_cables = True
if 'sim_cables' in d_config:
sim_cables = d_config['sim_cables']
# check point values are all doubles for C++ parser
for i in xrange(len(d_config['points'])):
for j in xrange(3):
if sim_cables:
d_config['points'][i]['frame'][j] = float(d_config['points'][i]['frame'][j])
d_config['points'][i]['platform'][j] = float(d_config['points'][i]['platform'][j])
# same check for inertia matrix
for i in xrange(6):
d_config['platform']['inertia'][i] = float(d_config['platform']['inertia'][i])
# re-write config
with open(model,'w') as f:
yaml.dump(d_config, f)
config = DictsToNamespace(d_config)
config.frame.upper = [float(v) for v in config.frame.upper]
config.frame.lower = [float(v) for v in config.frame.lower]
name = model.split('.')[0]
# SDF building
sdf = etree.Element('sdf', version= '1.4')
model = etree.SubElement(sdf, 'model', name=name)
# frame
model.insert(2, etree.Comment('Definition of the robot frame'))
base_link = etree.SubElement(model, 'link', name= 'frame')
CreateNested(base_link, 'pose', '0 0 0 0 0 0')
BuildInertial(base_link, 100000)
# frame visual
if config.frame.type == 'box':
# default visual: cubic frame
# find corner points
points = []
lx,ly,lz = [config.frame.upper[i] - config.frame.lower[i] for i in xrange(3)]
for dx in [0,1]:
for dy in [0,1]:
for dz in [0,1]:
dxyz = [dx*lx, dy*ly, dz*lz]
points.append([config.frame.lower[i]+dxyz[i] for i in xrange(3)])
# create segments
ident = 0
for i,p1 in enumerate(points[:-1]):
for p2 in points[i+1:]:
dp = [p2[i]-p1[i] for i in xrange(3)]
if dp.count(0) == 2:
# middle of segment
pose = [p1[i]+dp[i]/2. for i in xrange(3)] + [0,0,0]
# find orientation
if dp[0] != 0:
pose[4] = pi/2
elif dp[1] != 0:
pose[3] = pi/2
# create link
ident += 1
CreateVisualCollision(base_link,'%s/geometry/cylinder/radius' % ident, config.frame.radius, color=config.frame.color, pose='%f %f %f %f %f %f' % tuple(pose), collision=True)
CreateNested(base_link, 'visual%s/geometry/cylinder/length' % ident, str(np.linalg.norm(dp)))
# create platform
model.insert(2, etree.Comment('Definition of the robot platform'))
link = etree.SubElement(model, 'link', name= 'platform')
CreateNested(link, 'pose', '%f %f %f %f %f %f' % tuple(config.platform.position.xyz + config.platform.position.rpy))
if config.platform.type == 'box':
pose = config.platform.position.xyz + config.platform.position.rpy
CreateVisualCollision(link, 'pf/geometry/box/size', '%f %f %f' % tuple(config.platform.size), collision=True, color=config.platform.color, mass = config.platform.mass, inertia=config.platform.inertia)
# platform translation and rotation
pf_t = np.array(config.platform.position.xyz).reshape(3,1)
pf_R = tr.euler_matrix(config.platform.position.rpy[0], config.platform.position.rpy[1], config.platform.position.rpy[2])[:3,:3]
# maximum length
l = np.linalg.norm([config.frame.upper[i] - config.frame.lower[i] for i in xrange(3)])
# create cables
if sim_cables:
model.insert(2, etree.Comment('Definition of the robot cables'))
z = [0,0,1]
for i, cbl in enumerate(config.points):
fp = np.array(cbl.frame).reshape(3,1) # frame attach point
# express platform attach point in world frame
pp = pf_t + np.dot(pf_R, np.array(cbl.platform).reshape(3,1))
# cable orientation
u = (pp - fp).reshape(3)
u = list(u/np.linalg.norm(u))
R = tr.rotation_matrix(np.arctan2(np.linalg.norm(np.cross(z,u)), np.dot(u,z)), np.cross(z,u))
# to RPY
rpy = list(tr.euler_from_matrix(R))
# rpy of z-axis
# cable position to stick to the platform
a = l/(2.*np.linalg.norm(pp-fp))
cp = list((pp - a*(pp-fp)).reshape(3))
# create cable
link = etree.SubElement(model, 'link', name= 'cable%i' % i)
CreateNested(link, 'pose', '%f %f %f %f %f %f' % tuple(cp + rpy))
CreateVisualCollision(link,'/geometry/cylinder/radius', config.cable.radius, color='Black', collision=False, mass = 0.001)
CreateNested(link, 'visual/geometry/cylinder/length', str(l))
'''
sph_link = etree.SubElement(model, 'link', name= 'sph%i' % i)
CreateNested(sph_link, 'pose', '%f %f %f 0 0 0' % tuple(cp))
CreateVisualCollision(sph_link,'sph%i/geometry/sphere/radius' % i, .015, color='Blue', collision=True)
'''
# virtual link around X
link = etree.SubElement(model, 'link', name= 'virt_X%i' % i)
BuildInertial(link, 0.001)
CreateNested(link, 'pose', '%f %f %f %f %f %f' % tuple(cbl.frame + rpy))
#CreateVisualCollision(link,'/geometry/cylinder/radius', .03, color='Red', collision=False)
#CreateNested(link, 'visual/geometry/cylinder/length', 0.3)
# revolute joint around X
joint = etree.SubElement(model, 'joint', name= 'rev_X%i' % i)
joint.set("type", "revolute")
CreateNested(joint, 'pose', '0 0 0 %f %f %f' % tuple(rpy))
CreateNested(joint, 'parent', 'frame')
CreateNested(joint, 'child', 'virt_X%i' % i)
CreateNested(joint, 'axis/xyz', '%f %f %f' % tuple(R[:3,0]))
CreateNested(joint, 'axis/limit/effort', config.joints.passive.effort)
CreateNested(joint, 'axis/limit/velocity', config.joints.passive.velocity)
CreateNested(joint, 'axis/dynamics/damping', config.joints.passive.damping)
# virtual link around Y
link = etree.SubElement(model, 'link', name= 'virt_Y%i' % i)
BuildInertial(link, 0.001)
CreateNested(link, 'pose', '%f %f %f %f %f %f' % tuple(cbl.frame + rpy))
#CreateVisualCollision(link,'/geometry/cylinder/radius', .05, color='Green', collision=False)
#CreateNested(link, 'visual/geometry/cylinder/length', 0.2)
# revolute joint around Y
joint = etree.SubElement(model, 'joint', name= 'rev_Y%i' % i)
joint.set("type", "revolute")
CreateNested(joint, 'pose', '0 0 0 %f %f %f' % tuple(rpy))
CreateNested(joint, 'parent', 'virt_X%i' % i)
CreateNested(joint, 'child', 'virt_Y%i' % i)
CreateNested(joint, 'axis/xyz', '%f %f %f' % tuple(R[:3,1]))
CreateNested(joint, 'axis/limit/effort', config.joints.passive.effort)
CreateNested(joint, 'axis/limit/velocity', config.joints.passive.velocity)
CreateNested(joint, 'axis/dynamics/damping', config.joints.passive.damping)
# prismatic joint
joint = etree.SubElement(model, 'joint', name= 'cable%i' % i)
joint.set("type", "prismatic")
#CreateNested(joint, 'pose', '0 0 0 %f %f %f' % tuple(rpy) )
CreateNested(joint, 'pose', '0 0 %f %f %f %f' % tuple([(a-1.)*l/2] + rpy) )
CreateNested(joint, 'parent', 'virt_Y%i' % i)
CreateNested(joint, 'child', 'cable%i' % i)
CreateNested(joint, 'axis/xyz', '%f %f %f' % tuple(-R[:3,2]))
CreateNested(joint, 'axis/limit/lower', -0.5*l)
CreateNested(joint, 'axis/limit/upper', 0.5*l)
CreateNested(joint, 'axis/limit/effort', config.joints.actuated.effort)
CreateNested(joint, 'axis/limit/velocity', config.joints.actuated.velocity)
CreateNested(joint, 'axis/dynamics/damping', config.joints.actuated.damping)
# rotation cable/pf X
link = etree.SubElement(model, 'link', name= 'virt_Xpf%i' % i)
BuildInertial(link, 0.001)
CreateNested(link, 'pose', '%f %f %f %f %f %f' % tuple(list(pp.reshape(3)) + rpy))
#CreateVisualCollision(link,'/geometry/cylinder/radius', .03, color='Red', collision=False)
#CreateNested(link, 'visual/geometry/cylinder/length', 0.3)
# revolute joint around X
joint = etree.SubElement(model, 'joint', name= 'rev_Xpf%i' % i)
joint.set("type", "revolute")
CreateNested(joint, 'pose', '0 0 0 %f %f %f' % tuple(rpy))
CreateNested(joint, 'parent', 'platform')
CreateNested(joint, 'child', 'virt_Xpf%i' % i)
CreateNested(joint, 'axis/xyz', '1 0 0')
CreateNested(joint, 'axis/limit/effort', config.joints.passive.effort)
CreateNested(joint, 'axis/limit/velocity', config.joints.passive.velocity)
CreateNested(joint, 'axis/dynamics/damping', config.joints.passive.damping)
# rotation cable/pf Y
link = etree.SubElement(model, 'link', name= 'virt_Ypf%i' % i)
BuildInertial(link, 0.001)
CreateNested(link, 'pose', '%f %f %f %f %f %f' % tuple(list(pp.reshape(3)) + rpy))
#CreateVisualCollision(link,'/geometry/cylinder/radius', .03, color='Red', collision=False)
#CreateNested(link, 'visual/geometry/cylinder/length', 0.3)
# revolute joint around Y
joint = etree.SubElement(model, 'joint', name= 'rev_Ypf%i' % i)
joint.set("type", "revolute")
CreateNested(joint, 'pose', '0 0 0 %f %f %f' % tuple(rpy))
CreateNested(joint, 'parent', 'virt_Xpf%i' % i)
CreateNested(joint, 'child', 'virt_Ypf%i' % i)
CreateNested(joint, 'axis/xyz', '0 1 0')
CreateNested(joint, 'axis/limit/effort', config.joints.passive.effort)
CreateNested(joint, 'axis/limit/velocity', config.joints.passive.velocity)
CreateNested(joint, 'axis/dynamics/damping', config.joints.passive.damping)
# rotation cable/pf Z
# revolute joint around Z
joint = etree.SubElement(model, 'joint', name= 'rev_Zpf%i' % i)
joint.set("type", "revolute")
CreateNested(joint, 'pose', '0 0 0 %f %f %f' % tuple(rpy))
CreateNested(joint, 'child', 'virt_Ypf%i' % i)
CreateNested(joint, 'parent', 'cable%i' % i)
CreateNested(joint, 'axis/xyz', '0 0 1')
CreateNested(joint, 'axis/limit/effort', config.joints.passive.effort)
CreateNested(joint, 'axis/limit/velocity', config.joints.passive.velocity)
CreateNested(joint, 'axis/dynamics/damping', config.joints.passive.damping)
print 'Simulating {} cables'.format(len(config.points))
else:
print 'Model does not simulate cables'
# control plugin
plug = etree.SubElement(model, 'plugin', name='cdpr_plugin', filename='libcdpr_plugin.so')
# write file
WriteSDF(sdf, name+'.sdf')
| 49.896694 | 208 | 0.564803 |
60312c6e3b6d73de8d7c1421db0ef703646aea47 | 76,944 | py | Python | pandas/core/indexing.py | mproszewska/pandas | 0afb1b14c359eece44f3885d5f20b40e07a9ccb6 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/core/indexing.py | mproszewska/pandas | 0afb1b14c359eece44f3885d5f20b40e07a9ccb6 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/core/indexing.py | mproszewska/pandas | 0afb1b14c359eece44f3885d5f20b40e07a9ccb6 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | from typing import TYPE_CHECKING, Hashable, List, Tuple, Union
import numpy as np
from pandas._libs.indexing import _NDFrameIndexerBase
from pandas._libs.lib import item_from_zerodim
from pandas.errors import AbstractMethodError
from pandas.util._decorators import doc
from pandas.core.dtypes.common import (
is_array_like,
is_hashable,
is_integer,
is_iterator,
is_list_like,
is_numeric_dtype,
is_object_dtype,
is_scalar,
is_sequence,
)
from pandas.core.dtypes.concat import concat_compat
from pandas.core.dtypes.generic import ABCDataFrame, ABCMultiIndex, ABCSeries
from pandas.core.dtypes.missing import _infer_fill_value, isna
import pandas.core.common as com
from pandas.core.construction import array as pd_array
from pandas.core.indexers import (
check_array_indexer,
is_list_like_indexer,
length_of_indexer,
)
from pandas.core.indexes.api import Index, InvalidIndexError
if TYPE_CHECKING:
from pandas import DataFrame # noqa:F401
# "null slice"
_NS = slice(None, None)
# the public IndexSlicerMaker
class _IndexSlice:
"""
Create an object to more easily perform multi-index slicing.
See Also
--------
MultiIndex.remove_unused_levels : New MultiIndex with no unused levels.
Notes
-----
See :ref:`Defined Levels <advanced.shown_levels>`
for further info on slicing a MultiIndex.
Examples
--------
>>> midx = pd.MultiIndex.from_product([['A0','A1'], ['B0','B1','B2','B3']])
>>> columns = ['foo', 'bar']
>>> dfmi = pd.DataFrame(np.arange(16).reshape((len(midx), len(columns))),
index=midx, columns=columns)
Using the default slice command:
>>> dfmi.loc[(slice(None), slice('B0', 'B1')), :]
foo bar
A0 B0 0 1
B1 2 3
A1 B0 8 9
B1 10 11
Using the IndexSlice class for a more intuitive command:
>>> idx = pd.IndexSlice
>>> dfmi.loc[idx[:, 'B0':'B1'], :]
foo bar
A0 B0 0 1
B1 2 3
A1 B0 8 9
B1 10 11
"""
def __getitem__(self, arg):
return arg
IndexSlice = _IndexSlice()
class IndexingError(Exception):
pass
class IndexingMixin:
"""
Mixin for adding .loc/.iloc/.at/.iat to Dataframes and Series.
"""
@property
def iloc(self) -> "_iLocIndexer":
"""
Purely integer-location based indexing for selection by position.
``.iloc[]`` is primarily integer position based (from ``0`` to
``length-1`` of the axis), but may also be used with a boolean
array.
Allowed inputs are:
- An integer, e.g. ``5``.
- A list or array of integers, e.g. ``[4, 3, 0]``.
- A slice object with ints, e.g. ``1:7``.
- A boolean array.
- A ``callable`` function with one argument (the calling Series or
DataFrame) and that returns valid output for indexing (one of the above).
This is useful in method chains, when you don't have a reference to the
calling object, but would like to base your selection on some value.
``.iloc`` will raise ``IndexError`` if a requested indexer is
out-of-bounds, except *slice* indexers which allow out-of-bounds
indexing (this conforms with python/numpy *slice* semantics).
See more at :ref:`Selection by Position <indexing.integer>`.
See Also
--------
DataFrame.iat : Fast integer location scalar accessor.
DataFrame.loc : Purely label-location based indexer for selection by label.
Series.iloc : Purely integer-location based indexing for
selection by position.
Examples
--------
>>> mydict = [{'a': 1, 'b': 2, 'c': 3, 'd': 4},
... {'a': 100, 'b': 200, 'c': 300, 'd': 400},
... {'a': 1000, 'b': 2000, 'c': 3000, 'd': 4000 }]
>>> df = pd.DataFrame(mydict)
>>> df
a b c d
0 1 2 3 4
1 100 200 300 400
2 1000 2000 3000 4000
**Indexing just the rows**
With a scalar integer.
>>> type(df.iloc[0])
<class 'pandas.core.series.Series'>
>>> df.iloc[0]
a 1
b 2
c 3
d 4
Name: 0, dtype: int64
With a list of integers.
>>> df.iloc[[0]]
a b c d
0 1 2 3 4
>>> type(df.iloc[[0]])
<class 'pandas.core.frame.DataFrame'>
>>> df.iloc[[0, 1]]
a b c d
0 1 2 3 4
1 100 200 300 400
With a `slice` object.
>>> df.iloc[:3]
a b c d
0 1 2 3 4
1 100 200 300 400
2 1000 2000 3000 4000
With a boolean mask the same length as the index.
>>> df.iloc[[True, False, True]]
a b c d
0 1 2 3 4
2 1000 2000 3000 4000
With a callable, useful in method chains. The `x` passed
to the ``lambda`` is the DataFrame being sliced. This selects
the rows whose index label even.
>>> df.iloc[lambda x: x.index % 2 == 0]
a b c d
0 1 2 3 4
2 1000 2000 3000 4000
**Indexing both axes**
You can mix the indexer types for the index and columns. Use ``:`` to
select the entire axis.
With scalar integers.
>>> df.iloc[0, 1]
2
With lists of integers.
>>> df.iloc[[0, 2], [1, 3]]
b d
0 2 4
2 2000 4000
With `slice` objects.
>>> df.iloc[1:3, 0:3]
a b c
1 100 200 300
2 1000 2000 3000
With a boolean array whose length matches the columns.
>>> df.iloc[:, [True, False, True, False]]
a c
0 1 3
1 100 300
2 1000 3000
With a callable function that expects the Series or DataFrame.
>>> df.iloc[:, lambda df: [0, 2]]
a c
0 1 3
1 100 300
2 1000 3000
"""
return _iLocIndexer("iloc", self)
@property
def loc(self) -> "_LocIndexer":
"""
Access a group of rows and columns by label(s) or a boolean array.
``.loc[]`` is primarily label based, but may also be used with a
boolean array.
Allowed inputs are:
- A single label, e.g. ``5`` or ``'a'``, (note that ``5`` is
interpreted as a *label* of the index, and **never** as an
integer position along the index).
- A list or array of labels, e.g. ``['a', 'b', 'c']``.
- A slice object with labels, e.g. ``'a':'f'``.
.. warning:: Note that contrary to usual python slices, **both** the
start and the stop are included
- A boolean array of the same length as the axis being sliced,
e.g. ``[True, False, True]``.
- A ``callable`` function with one argument (the calling Series or
DataFrame) and that returns valid output for indexing (one of the above)
See more at :ref:`Selection by Label <indexing.label>`
Raises
------
KeyError
If any items are not found.
See Also
--------
DataFrame.at : Access a single value for a row/column label pair.
DataFrame.iloc : Access group of rows and columns by integer position(s).
DataFrame.xs : Returns a cross-section (row(s) or column(s)) from the
Series/DataFrame.
Series.loc : Access group of values using labels.
Examples
--------
**Getting values**
>>> df = pd.DataFrame([[1, 2], [4, 5], [7, 8]],
... index=['cobra', 'viper', 'sidewinder'],
... columns=['max_speed', 'shield'])
>>> df
max_speed shield
cobra 1 2
viper 4 5
sidewinder 7 8
Single label. Note this returns the row as a Series.
>>> df.loc['viper']
max_speed 4
shield 5
Name: viper, dtype: int64
List of labels. Note using ``[[]]`` returns a DataFrame.
>>> df.loc[['viper', 'sidewinder']]
max_speed shield
viper 4 5
sidewinder 7 8
Single label for row and column
>>> df.loc['cobra', 'shield']
2
Slice with labels for row and single label for column. As mentioned
above, note that both the start and stop of the slice are included.
>>> df.loc['cobra':'viper', 'max_speed']
cobra 1
viper 4
Name: max_speed, dtype: int64
Boolean list with the same length as the row axis
>>> df.loc[[False, False, True]]
max_speed shield
sidewinder 7 8
Conditional that returns a boolean Series
>>> df.loc[df['shield'] > 6]
max_speed shield
sidewinder 7 8
Conditional that returns a boolean Series with column labels specified
>>> df.loc[df['shield'] > 6, ['max_speed']]
max_speed
sidewinder 7
Callable that returns a boolean Series
>>> df.loc[lambda df: df['shield'] == 8]
max_speed shield
sidewinder 7 8
**Setting values**
Set value for all items matching the list of labels
>>> df.loc[['viper', 'sidewinder'], ['shield']] = 50
>>> df
max_speed shield
cobra 1 2
viper 4 50
sidewinder 7 50
Set value for an entire row
>>> df.loc['cobra'] = 10
>>> df
max_speed shield
cobra 10 10
viper 4 50
sidewinder 7 50
Set value for an entire column
>>> df.loc[:, 'max_speed'] = 30
>>> df
max_speed shield
cobra 30 10
viper 30 50
sidewinder 30 50
Set value for rows matching callable condition
>>> df.loc[df['shield'] > 35] = 0
>>> df
max_speed shield
cobra 30 10
viper 0 0
sidewinder 0 0
**Getting values on a DataFrame with an index that has integer labels**
Another example using integers for the index
>>> df = pd.DataFrame([[1, 2], [4, 5], [7, 8]],
... index=[7, 8, 9], columns=['max_speed', 'shield'])
>>> df
max_speed shield
7 1 2
8 4 5
9 7 8
Slice with integer labels for rows. As mentioned above, note that both
the start and stop of the slice are included.
>>> df.loc[7:9]
max_speed shield
7 1 2
8 4 5
9 7 8
**Getting values with a MultiIndex**
A number of examples using a DataFrame with a MultiIndex
>>> tuples = [
... ('cobra', 'mark i'), ('cobra', 'mark ii'),
... ('sidewinder', 'mark i'), ('sidewinder', 'mark ii'),
... ('viper', 'mark ii'), ('viper', 'mark iii')
... ]
>>> index = pd.MultiIndex.from_tuples(tuples)
>>> values = [[12, 2], [0, 4], [10, 20],
... [1, 4], [7, 1], [16, 36]]
>>> df = pd.DataFrame(values, columns=['max_speed', 'shield'], index=index)
>>> df
max_speed shield
cobra mark i 12 2
mark ii 0 4
sidewinder mark i 10 20
mark ii 1 4
viper mark ii 7 1
mark iii 16 36
Single label. Note this returns a DataFrame with a single index.
>>> df.loc['cobra']
max_speed shield
mark i 12 2
mark ii 0 4
Single index tuple. Note this returns a Series.
>>> df.loc[('cobra', 'mark ii')]
max_speed 0
shield 4
Name: (cobra, mark ii), dtype: int64
Single label for row and column. Similar to passing in a tuple, this
returns a Series.
>>> df.loc['cobra', 'mark i']
max_speed 12
shield 2
Name: (cobra, mark i), dtype: int64
Single tuple. Note using ``[[]]`` returns a DataFrame.
>>> df.loc[[('cobra', 'mark ii')]]
max_speed shield
cobra mark ii 0 4
Single tuple for the index with a single label for the column
>>> df.loc[('cobra', 'mark i'), 'shield']
2
Slice from index tuple to single label
>>> df.loc[('cobra', 'mark i'):'viper']
max_speed shield
cobra mark i 12 2
mark ii 0 4
sidewinder mark i 10 20
mark ii 1 4
viper mark ii 7 1
mark iii 16 36
Slice from index tuple to index tuple
>>> df.loc[('cobra', 'mark i'):('viper', 'mark ii')]
max_speed shield
cobra mark i 12 2
mark ii 0 4
sidewinder mark i 10 20
mark ii 1 4
viper mark ii 7 1
"""
return _LocIndexer("loc", self)
@property
def at(self) -> "_AtIndexer":
"""
Access a single value for a row/column label pair.
Similar to ``loc``, in that both provide label-based lookups. Use
``at`` if you only need to get or set a single value in a DataFrame
or Series.
Raises
------
KeyError
If 'label' does not exist in DataFrame.
See Also
--------
DataFrame.iat : Access a single value for a row/column pair by integer
position.
DataFrame.loc : Access a group of rows and columns by label(s).
Series.at : Access a single value using a label.
Examples
--------
>>> df = pd.DataFrame([[0, 2, 3], [0, 4, 1], [10, 20, 30]],
... index=[4, 5, 6], columns=['A', 'B', 'C'])
>>> df
A B C
4 0 2 3
5 0 4 1
6 10 20 30
Get value at specified row/column pair
>>> df.at[4, 'B']
2
Set value at specified row/column pair
>>> df.at[4, 'B'] = 10
>>> df.at[4, 'B']
10
Get value within a Series
>>> df.loc[5].at['B']
4
"""
return _AtIndexer("at", self)
@property
def iat(self) -> "_iAtIndexer":
"""
Access a single value for a row/column pair by integer position.
Similar to ``iloc``, in that both provide integer-based lookups. Use
``iat`` if you only need to get or set a single value in a DataFrame
or Series.
Raises
------
IndexError
When integer position is out of bounds.
See Also
--------
DataFrame.at : Access a single value for a row/column label pair.
DataFrame.loc : Access a group of rows and columns by label(s).
DataFrame.iloc : Access a group of rows and columns by integer position(s).
Examples
--------
>>> df = pd.DataFrame([[0, 2, 3], [0, 4, 1], [10, 20, 30]],
... columns=['A', 'B', 'C'])
>>> df
A B C
0 0 2 3
1 0 4 1
2 10 20 30
Get value at specified row/column pair
>>> df.iat[1, 2]
1
Set value at specified row/column pair
>>> df.iat[1, 2] = 10
>>> df.iat[1, 2]
10
Get value within a series
>>> df.loc[0].iat[1]
2
"""
return _iAtIndexer("iat", self)
class _LocationIndexer(_NDFrameIndexerBase):
_valid_types: str
axis = None
def __call__(self, axis=None):
# we need to return a copy of ourselves
new_self = type(self)(self.name, self.obj)
if axis is not None:
axis = self.obj._get_axis_number(axis)
new_self.axis = axis
return new_self
def _get_setitem_indexer(self, key):
"""
Convert a potentially-label-based key into a positional indexer.
"""
if self.name == "loc":
self._ensure_listlike_indexer(key)
if self.axis is not None:
return self._convert_tuple(key, is_setter=True)
ax = self.obj._get_axis(0)
if isinstance(ax, ABCMultiIndex) and self.name != "iloc":
try:
return ax.get_loc(key)
except (TypeError, KeyError, InvalidIndexError):
# TypeError e.g. passed a bool
pass
if isinstance(key, tuple):
try:
return self._convert_tuple(key, is_setter=True)
except IndexingError:
pass
if isinstance(key, range):
return list(key)
try:
return self._convert_to_indexer(key, axis=0, is_setter=True)
except TypeError as e:
# invalid indexer type vs 'other' indexing errors
if "cannot do" in str(e):
raise
elif "unhashable type" in str(e):
raise
raise IndexingError(key) from e
def _ensure_listlike_indexer(self, key, axis=None):
"""
Ensure that a list-like of column labels are all present by adding them if
they do not already exist.
Parameters
----------
key : list-like of column labels
Target labels.
axis : key axis if known
"""
column_axis = 1
# column only exists in 2-dimensional DataFrame
if self.ndim != 2:
return
if isinstance(key, tuple):
# key may be a tuple if we are .loc
# in that case, set key to the column part of key
key = key[column_axis]
axis = column_axis
if (
axis == column_axis
and not isinstance(self.obj.columns, ABCMultiIndex)
and is_list_like_indexer(key)
and not com.is_bool_indexer(key)
and all(is_hashable(k) for k in key)
):
for k in key:
if k not in self.obj:
self.obj[k] = np.nan
def __setitem__(self, key, value):
if isinstance(key, tuple):
key = tuple(com.apply_if_callable(x, self.obj) for x in key)
else:
key = com.apply_if_callable(key, self.obj)
indexer = self._get_setitem_indexer(key)
self._has_valid_setitem_indexer(key)
iloc = self if self.name == "iloc" else self.obj.iloc
iloc._setitem_with_indexer(indexer, value)
def _validate_key(self, key, axis: int):
"""
Ensure that key is valid for current indexer.
Parameters
----------
key : scalar, slice or list-like
Key requested.
axis : int
Dimension on which the indexing is being made.
Raises
------
TypeError
If the key (or some element of it) has wrong type.
IndexError
If the key (or some element of it) is out of bounds.
KeyError
If the key was not found.
"""
raise AbstractMethodError(self)
def _has_valid_tuple(self, key: Tuple):
"""
Check the key for valid keys across my indexer.
"""
for i, k in enumerate(key):
if i >= self.ndim:
raise IndexingError("Too many indexers")
try:
self._validate_key(k, i)
except ValueError as err:
raise ValueError(
"Location based indexing can only have "
f"[{self._valid_types}] types"
) from err
def _is_nested_tuple_indexer(self, tup: Tuple) -> bool:
"""
Returns
-------
bool
"""
if any(isinstance(ax, ABCMultiIndex) for ax in self.obj.axes):
return any(is_nested_tuple(tup, ax) for ax in self.obj.axes)
return False
def _convert_tuple(self, key, is_setter: bool = False):
keyidx = []
if self.axis is not None:
axis = self.obj._get_axis_number(self.axis)
for i in range(self.ndim):
if i == axis:
keyidx.append(
self._convert_to_indexer(key, axis=axis, is_setter=is_setter)
)
else:
keyidx.append(slice(None))
else:
for i, k in enumerate(key):
if i >= self.ndim:
raise IndexingError("Too many indexers")
idx = self._convert_to_indexer(k, axis=i, is_setter=is_setter)
keyidx.append(idx)
return tuple(keyidx)
def _getitem_tuple_same_dim(self, tup: Tuple):
"""
Index with indexers that should return an object of the same dimension
as self.obj.
This is only called after a failed call to _getitem_lowerdim.
"""
retval = self.obj
for i, key in enumerate(tup):
if com.is_null_slice(key):
continue
retval = getattr(retval, self.name)._getitem_axis(key, axis=i)
# We should never have retval.ndim < self.ndim, as that should
# be handled by the _getitem_lowerdim call above.
assert retval.ndim == self.ndim
return retval
def _getitem_lowerdim(self, tup: Tuple):
# we can directly get the axis result since the axis is specified
if self.axis is not None:
axis = self.obj._get_axis_number(self.axis)
return self._getitem_axis(tup, axis=axis)
# we may have a nested tuples indexer here
if self._is_nested_tuple_indexer(tup):
return self._getitem_nested_tuple(tup)
# we maybe be using a tuple to represent multiple dimensions here
ax0 = self.obj._get_axis(0)
# ...but iloc should handle the tuple as simple integer-location
# instead of checking it as multiindex representation (GH 13797)
if isinstance(ax0, ABCMultiIndex) and self.name != "iloc":
result = self._handle_lowerdim_multi_index_axis0(tup)
if result is not None:
return result
if len(tup) > self.ndim:
raise IndexingError("Too many indexers. handle elsewhere")
for i, key in enumerate(tup):
if is_label_like(key):
# We don't need to check for tuples here because those are
# caught by the _is_nested_tuple_indexer check above.
section = self._getitem_axis(key, axis=i)
# We should never have a scalar section here, because
# _getitem_lowerdim is only called after a check for
# is_scalar_access, which that would be.
if section.ndim == self.ndim:
# we're in the middle of slicing through a MultiIndex
# revise the key wrt to `section` by inserting an _NS
new_key = tup[:i] + (_NS,) + tup[i + 1 :]
else:
# Note: the section.ndim == self.ndim check above
# rules out having DataFrame here, so we dont need to worry
# about transposing.
new_key = tup[:i] + tup[i + 1 :]
if len(new_key) == 1:
new_key = new_key[0]
# Slices should return views, but calling iloc/loc with a null
# slice returns a new object.
if com.is_null_slice(new_key):
return section
# This is an elided recursive call to iloc/loc
return getattr(section, self.name)[new_key]
raise IndexingError("not applicable")
def _getitem_nested_tuple(self, tup: Tuple):
# we have a nested tuple so have at least 1 multi-index level
# we should be able to match up the dimensionality here
# we have too many indexers for our dim, but have at least 1
# multi-index dimension, try to see if we have something like
# a tuple passed to a series with a multi-index
if len(tup) > self.ndim:
if self.name != "loc":
# This should never be reached, but lets be explicit about it
raise ValueError("Too many indices")
result = self._handle_lowerdim_multi_index_axis0(tup)
if result is not None:
return result
# this is a series with a multi-index specified a tuple of
# selectors
axis = self.axis or 0
return self._getitem_axis(tup, axis=axis)
# handle the multi-axis by taking sections and reducing
# this is iterative
obj = self.obj
axis = 0
for key in tup:
if com.is_null_slice(key):
axis += 1
continue
current_ndim = obj.ndim
obj = getattr(obj, self.name)._getitem_axis(key, axis=axis)
axis += 1
# if we have a scalar, we are done
if is_scalar(obj) or not hasattr(obj, "ndim"):
break
# has the dim of the obj changed?
# GH 7199
if obj.ndim < current_ndim:
axis -= 1
return obj
def _convert_to_indexer(self, key, axis: int, is_setter: bool = False):
raise AbstractMethodError(self)
def __getitem__(self, key):
if type(key) is tuple:
key = tuple(com.apply_if_callable(x, self.obj) for x in key)
if self._is_scalar_access(key):
try:
return self.obj._get_value(*key, takeable=self._takeable)
except (KeyError, IndexError, AttributeError):
# AttributeError for IntervalTree get_value
pass
return self._getitem_tuple(key)
else:
# we by definition only have the 0th axis
axis = self.axis or 0
maybe_callable = com.apply_if_callable(key, self.obj)
return self._getitem_axis(maybe_callable, axis=axis)
def _is_scalar_access(self, key: Tuple):
raise NotImplementedError()
def _getitem_tuple(self, tup: Tuple):
raise AbstractMethodError(self)
def _getitem_axis(self, key, axis: int):
raise NotImplementedError()
def _has_valid_setitem_indexer(self, indexer) -> bool:
raise AbstractMethodError(self)
def _getbool_axis(self, key, axis: int):
# caller is responsible for ensuring non-None axis
labels = self.obj._get_axis(axis)
key = check_bool_indexer(labels, key)
inds = key.nonzero()[0]
return self.obj._take_with_is_copy(inds, axis=axis)
@doc(IndexingMixin.loc)
class _LocIndexer(_LocationIndexer):
_takeable: bool = False
_valid_types = (
"labels (MUST BE IN THE INDEX), slices of labels (BOTH "
"endpoints included! Can be slices of integers if the "
"index is integers), listlike of labels, boolean"
)
# -------------------------------------------------------------------
# Key Checks
@doc(_LocationIndexer._validate_key)
def _validate_key(self, key, axis: int):
# valid for a collection of labels (we check their presence later)
# slice of labels (where start-end in labels)
# slice of integers (only if in the labels)
# boolean
pass
def _has_valid_setitem_indexer(self, indexer) -> bool:
return True
def _is_scalar_access(self, key: Tuple) -> bool:
"""
Returns
-------
bool
"""
# this is a shortcut accessor to both .loc and .iloc
# that provide the equivalent access of .at and .iat
# a) avoid getting things via sections and (to minimize dtype changes)
# b) provide a performant path
if len(key) != self.ndim:
return False
for i, k in enumerate(key):
if not is_scalar(k):
return False
ax = self.obj.axes[i]
if isinstance(ax, ABCMultiIndex):
return False
if isinstance(k, str) and ax._supports_partial_string_indexing:
# partial string indexing, df.loc['2000', 'A']
# should not be considered scalar
return False
if not ax.is_unique:
return False
return True
# -------------------------------------------------------------------
# MultiIndex Handling
def _multi_take_opportunity(self, tup: Tuple) -> bool:
"""
Check whether there is the possibility to use ``_multi_take``.
Currently the limit is that all axes being indexed, must be indexed with
list-likes.
Parameters
----------
tup : tuple
Tuple of indexers, one per axis.
Returns
-------
bool
Whether the current indexing,
can be passed through `_multi_take`.
"""
if not all(is_list_like_indexer(x) for x in tup):
return False
# just too complicated
if any(com.is_bool_indexer(x) for x in tup):
return False
return True
def _multi_take(self, tup: Tuple):
"""
Create the indexers for the passed tuple of keys, and
executes the take operation. This allows the take operation to be
executed all at once, rather than once for each dimension.
Improving efficiency.
Parameters
----------
tup : tuple
Tuple of indexers, one per axis.
Returns
-------
values: same type as the object being indexed
"""
# GH 836
d = {
axis: self._get_listlike_indexer(key, axis)
for (key, axis) in zip(tup, self.obj._AXIS_ORDERS)
}
return self.obj._reindex_with_indexers(d, copy=True, allow_dups=True)
# -------------------------------------------------------------------
def _getitem_iterable(self, key, axis: int):
"""
Index current object with an an iterable collection of keys.
Parameters
----------
key : iterable
Targeted labels.
axis: int
Dimension on which the indexing is being made.
Raises
------
KeyError
If no key was found. Will change in the future to raise if not all
keys were found.
Returns
-------
scalar, DataFrame, or Series: indexed value(s).
"""
# we assume that not com.is_bool_indexer(key), as that is
# handled before we get here.
self._validate_key(key, axis)
# A collection of keys
keyarr, indexer = self._get_listlike_indexer(key, axis, raise_missing=False)
return self.obj._reindex_with_indexers(
{axis: [keyarr, indexer]}, copy=True, allow_dups=True
)
def _getitem_tuple(self, tup: Tuple):
try:
return self._getitem_lowerdim(tup)
except IndexingError:
pass
# no multi-index, so validate all of the indexers
self._has_valid_tuple(tup)
# ugly hack for GH #836
if self._multi_take_opportunity(tup):
return self._multi_take(tup)
return self._getitem_tuple_same_dim(tup)
def _get_label(self, label, axis: int):
# GH#5667 this will fail if the label is not present in the axis.
return self.obj.xs(label, axis=axis)
def _handle_lowerdim_multi_index_axis0(self, tup: Tuple):
# we have an axis0 multi-index, handle or raise
axis = self.axis or 0
try:
# fast path for series or for tup devoid of slices
return self._get_label(tup, axis=axis)
except TypeError:
# slices are unhashable
pass
except KeyError as ek:
# raise KeyError if number of indexers match
# else IndexingError will be raised
if len(tup) <= self.obj.index.nlevels and len(tup) > self.ndim:
raise ek
return None
def _getitem_axis(self, key, axis: int):
key = item_from_zerodim(key)
if is_iterator(key):
key = list(key)
labels = self.obj._get_axis(axis)
key = labels._get_partial_string_timestamp_match_key(key)
if isinstance(key, slice):
self._validate_key(key, axis)
return self._get_slice_axis(key, axis=axis)
elif com.is_bool_indexer(key):
return self._getbool_axis(key, axis=axis)
elif is_list_like_indexer(key):
# an iterable multi-selection
if not (isinstance(key, tuple) and isinstance(labels, ABCMultiIndex)):
if hasattr(key, "ndim") and key.ndim > 1:
raise ValueError("Cannot index with multidimensional key")
return self._getitem_iterable(key, axis=axis)
# nested tuple slicing
if is_nested_tuple(key, labels):
locs = labels.get_locs(key)
indexer = [slice(None)] * self.ndim
indexer[axis] = locs
return self.obj.iloc[tuple(indexer)]
# fall thru to straight lookup
self._validate_key(key, axis)
return self._get_label(key, axis=axis)
def _get_slice_axis(self, slice_obj: slice, axis: int):
"""
This is pretty simple as we just have to deal with labels.
"""
# caller is responsible for ensuring non-None axis
obj = self.obj
if not need_slice(slice_obj):
return obj.copy(deep=False)
labels = obj._get_axis(axis)
indexer = labels.slice_indexer(
slice_obj.start, slice_obj.stop, slice_obj.step, kind="loc"
)
if isinstance(indexer, slice):
return self.obj._slice(indexer, axis=axis)
else:
# DatetimeIndex overrides Index.slice_indexer and may
# return a DatetimeIndex instead of a slice object.
return self.obj.take(indexer, axis=axis)
def _convert_to_indexer(self, key, axis: int, is_setter: bool = False):
"""
Convert indexing key into something we can use to do actual fancy
indexing on a ndarray.
Examples
ix[:5] -> slice(0, 5)
ix[[1,2,3]] -> [1,2,3]
ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz)
Going by Zen of Python?
'In the face of ambiguity, refuse the temptation to guess.'
raise AmbiguousIndexError with integer labels?
- No, prefer label-based indexing
"""
labels = self.obj._get_axis(axis)
if isinstance(key, slice):
return labels._convert_slice_indexer(key, kind="loc")
# see if we are positional in nature
is_int_index = labels.is_integer()
is_int_positional = is_integer(key) and not is_int_index
if is_scalar(key) or isinstance(labels, ABCMultiIndex):
# Otherwise get_loc will raise InvalidIndexError
# if we are a label return me
try:
return labels.get_loc(key)
except LookupError:
if isinstance(key, tuple) and isinstance(labels, ABCMultiIndex):
if len(key) == labels.nlevels:
return {"key": key}
raise
except TypeError:
pass
except ValueError:
if not is_int_positional:
raise
# a positional
if is_int_positional:
# if we are setting and its not a valid location
# its an insert which fails by definition
# always valid
return {"key": key}
if is_nested_tuple(key, labels):
return labels.get_locs(key)
elif is_list_like_indexer(key):
if com.is_bool_indexer(key):
key = check_bool_indexer(labels, key)
(inds,) = key.nonzero()
return inds
else:
# When setting, missing keys are not allowed, even with .loc:
return self._get_listlike_indexer(key, axis, raise_missing=True)[1]
else:
try:
return labels.get_loc(key)
except LookupError:
# allow a not found key only if we are a setter
if not is_list_like_indexer(key):
return {"key": key}
raise
def _get_listlike_indexer(self, key, axis: int, raise_missing: bool = False):
"""
Transform a list-like of keys into a new index and an indexer.
Parameters
----------
key : list-like
Targeted labels.
axis: int
Dimension on which the indexing is being made.
raise_missing: bool, default False
Whether to raise a KeyError if some labels were not found.
Will be removed in the future, and then this method will always behave as
if ``raise_missing=True``.
Raises
------
KeyError
If at least one key was requested but none was found, and
raise_missing=True.
Returns
-------
keyarr: Index
New index (coinciding with 'key' if the axis is unique).
values : array-like
Indexer for the return object, -1 denotes keys not found.
"""
ax = self.obj._get_axis(axis)
# Have the index compute an indexer or return None
# if it cannot handle:
indexer, keyarr = ax._convert_listlike_indexer(key)
# We only act on all found values:
if indexer is not None and (indexer != -1).all():
self._validate_read_indexer(
keyarr, indexer, axis, raise_missing=raise_missing
)
return ax[indexer], indexer
if ax.is_unique and not getattr(ax, "is_overlapping", False):
indexer = ax.get_indexer_for(keyarr)
keyarr = ax.reindex(keyarr)[0]
else:
keyarr, indexer, new_indexer = ax._reindex_non_unique(keyarr)
self._validate_read_indexer(keyarr, indexer, axis, raise_missing=raise_missing)
return keyarr, indexer
def _validate_read_indexer(
self, key, indexer, axis: int, raise_missing: bool = False
):
"""
Check that indexer can be used to return a result.
e.g. at least one element was found,
unless the list of keys was actually empty.
Parameters
----------
key : list-like
Targeted labels (only used to show correct error message).
indexer: array-like of booleans
Indices corresponding to the key,
(with -1 indicating not found).
axis: int
Dimension on which the indexing is being made.
raise_missing: bool
Whether to raise a KeyError if some labels are not found. Will be
removed in the future, and then this method will always behave as
if raise_missing=True.
Raises
------
KeyError
If at least one key was requested but none was found, and
raise_missing=True.
"""
ax = self.obj._get_axis(axis)
if len(key) == 0:
return
# Count missing values:
missing = (indexer < 0).sum()
if missing:
if missing == len(indexer):
axis_name = self.obj._get_axis_name(axis)
raise KeyError(f"None of [{key}] are in the [{axis_name}]")
# We (temporarily) allow for some missing keys with .loc, except in
# some cases (e.g. setting) in which "raise_missing" will be False
if raise_missing:
not_found = list(set(key) - set(ax))
raise KeyError(f"{not_found} not in index")
# we skip the warning on Categorical
# as this check is actually done (check for
# non-missing values), but a bit later in the
# code, so we want to avoid warning & then
# just raising
if not ax.is_categorical():
raise KeyError(
"Passing list-likes to .loc or [] with any missing labels "
"is no longer supported, see "
"https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#deprecate-loc-reindex-listlike" # noqa:E501
)
@doc(IndexingMixin.iloc)
class _iLocIndexer(_LocationIndexer):
_valid_types = (
"integer, integer slice (START point is INCLUDED, END "
"point is EXCLUDED), listlike of integers, boolean array"
)
_takeable = True
# -------------------------------------------------------------------
# Key Checks
def _validate_key(self, key, axis: int):
if com.is_bool_indexer(key):
if hasattr(key, "index") and isinstance(key.index, Index):
if key.index.inferred_type == "integer":
raise NotImplementedError(
"iLocation based boolean "
"indexing on an integer type "
"is not available"
)
raise ValueError(
"iLocation based boolean indexing cannot use "
"an indexable as a mask"
)
return
if isinstance(key, slice):
return
elif is_integer(key):
self._validate_integer(key, axis)
elif isinstance(key, tuple):
# a tuple should already have been caught by this point
# so don't treat a tuple as a valid indexer
raise IndexingError("Too many indexers")
elif is_list_like_indexer(key):
arr = np.array(key)
len_axis = len(self.obj._get_axis(axis))
# check that the key has a numeric dtype
if not is_numeric_dtype(arr.dtype):
raise IndexError(f".iloc requires numeric indexers, got {arr}")
# check that the key does not exceed the maximum size of the index
if len(arr) and (arr.max() >= len_axis or arr.min() < -len_axis):
raise IndexError("positional indexers are out-of-bounds")
else:
raise ValueError(f"Can only index by location with a [{self._valid_types}]")
def _has_valid_setitem_indexer(self, indexer) -> bool:
"""
Validate that a positional indexer cannot enlarge its target
will raise if needed, does not modify the indexer externally.
Returns
-------
bool
"""
if isinstance(indexer, dict):
raise IndexError("iloc cannot enlarge its target object")
else:
if not isinstance(indexer, tuple):
indexer = _tuplify(self.ndim, indexer)
for ax, i in zip(self.obj.axes, indexer):
if isinstance(i, slice):
# should check the stop slice?
pass
elif is_list_like_indexer(i):
# should check the elements?
pass
elif is_integer(i):
if i >= len(ax):
raise IndexError("iloc cannot enlarge its target object")
elif isinstance(i, dict):
raise IndexError("iloc cannot enlarge its target object")
return True
def _is_scalar_access(self, key: Tuple) -> bool:
"""
Returns
-------
bool
"""
# this is a shortcut accessor to both .loc and .iloc
# that provide the equivalent access of .at and .iat
# a) avoid getting things via sections and (to minimize dtype changes)
# b) provide a performant path
if len(key) != self.ndim:
return False
for k in key:
if not is_integer(k):
return False
return True
def _validate_integer(self, key: int, axis: int) -> None:
"""
Check that 'key' is a valid position in the desired axis.
Parameters
----------
key : int
Requested position.
axis : int
Desired axis.
Raises
------
IndexError
If 'key' is not a valid position in axis 'axis'.
"""
len_axis = len(self.obj._get_axis(axis))
if key >= len_axis or key < -len_axis:
raise IndexError("single positional indexer is out-of-bounds")
# -------------------------------------------------------------------
def _getitem_tuple(self, tup: Tuple):
self._has_valid_tuple(tup)
try:
return self._getitem_lowerdim(tup)
except IndexingError:
pass
return self._getitem_tuple_same_dim(tup)
def _get_list_axis(self, key, axis: int):
"""
Return Series values by list or array of integers.
Parameters
----------
key : list-like positional indexer
axis : int
Returns
-------
Series object
Notes
-----
`axis` can only be zero.
"""
try:
return self.obj._take_with_is_copy(key, axis=axis)
except IndexError as err:
# re-raise with different error message
raise IndexError("positional indexers are out-of-bounds") from err
def _getitem_axis(self, key, axis: int):
if isinstance(key, slice):
return self._get_slice_axis(key, axis=axis)
if isinstance(key, list):
key = np.asarray(key)
if com.is_bool_indexer(key):
self._validate_key(key, axis)
return self._getbool_axis(key, axis=axis)
# a list of integers
elif is_list_like_indexer(key):
return self._get_list_axis(key, axis=axis)
# a single integer
else:
key = item_from_zerodim(key)
if not is_integer(key):
raise TypeError("Cannot index by location index with a non-integer key")
# validate the location
self._validate_integer(key, axis)
return self.obj._ixs(key, axis=axis)
def _get_slice_axis(self, slice_obj: slice, axis: int):
# caller is responsible for ensuring non-None axis
obj = self.obj
if not need_slice(slice_obj):
return obj.copy(deep=False)
labels = obj._get_axis(axis)
labels._validate_positional_slice(slice_obj)
return self.obj._slice(slice_obj, axis=axis)
def _convert_to_indexer(self, key, axis: int, is_setter: bool = False):
"""
Much simpler as we only have to deal with our valid types.
"""
return key
def _get_setitem_indexer(self, key):
# GH#32257 Fall through to let numpy do validation
return key
# -------------------------------------------------------------------
def _setitem_with_indexer(self, indexer, value):
"""
_setitem_with_indexer is for setting values on a Series/DataFrame
using positional indexers.
If the relevant keys are not present, the Series/DataFrame may be
expanded.
This method is currently broken when dealing with non-unique Indexes,
since it goes from positional indexers back to labels when calling
BlockManager methods, see GH#12991, GH#22046, GH#15686.
"""
# also has the side effect of consolidating in-place
from pandas import Series
info_axis = self.obj._info_axis_number
# maybe partial set
take_split_path = self.obj._is_mixed_type
# if there is only one block/type, still have to take split path
# unless the block is one-dimensional or it can hold the value
if not take_split_path and self.obj._mgr.blocks:
(blk,) = self.obj._mgr.blocks
if 1 < blk.ndim: # in case of dict, keys are indices
val = list(value.values()) if isinstance(value, dict) else value
take_split_path = not blk._can_hold_element(val)
# if we have any multi-indexes that have non-trivial slices
# (not null slices) then we must take the split path, xref
# GH 10360, GH 27841
if isinstance(indexer, tuple) and len(indexer) == len(self.obj.axes):
for i, ax in zip(indexer, self.obj.axes):
if isinstance(ax, ABCMultiIndex) and not (
is_integer(i) or com.is_null_slice(i)
):
take_split_path = True
break
if isinstance(indexer, tuple):
nindexer = []
for i, idx in enumerate(indexer):
if isinstance(idx, dict):
# reindex the axis to the new value
# and set inplace
key, _ = convert_missing_indexer(idx)
# if this is the items axes, then take the main missing
# path first
# this correctly sets the dtype and avoids cache issues
# essentially this separates out the block that is needed
# to possibly be modified
if self.ndim > 1 and i == info_axis:
# add the new item, and set the value
# must have all defined axes if we have a scalar
# or a list-like on the non-info axes if we have a
# list-like
len_non_info_axes = (
len(_ax) for _i, _ax in enumerate(self.obj.axes) if _i != i
)
if any(not l for l in len_non_info_axes):
if not is_list_like_indexer(value):
raise ValueError(
"cannot set a frame with no "
"defined index and a scalar"
)
self.obj[key] = value
return
# add a new item with the dtype setup
self.obj[key] = _infer_fill_value(value)
new_indexer = convert_from_missing_indexer_tuple(
indexer, self.obj.axes
)
self._setitem_with_indexer(new_indexer, value)
return
# reindex the axis
# make sure to clear the cache because we are
# just replacing the block manager here
# so the object is the same
index = self.obj._get_axis(i)
labels = index.insert(len(index), key)
self.obj._mgr = self.obj.reindex(labels, axis=i)._mgr
self.obj._maybe_update_cacher(clear=True)
self.obj._is_copy = None
nindexer.append(labels.get_loc(key))
else:
nindexer.append(idx)
indexer = tuple(nindexer)
else:
indexer, missing = convert_missing_indexer(indexer)
if missing:
self._setitem_with_indexer_missing(indexer, value)
return
# set
item_labels = self.obj._get_axis(info_axis)
# align and set the values
if take_split_path:
# Above we only set take_split_path to True for 2D cases
assert self.ndim == 2
assert info_axis == 1
if not isinstance(indexer, tuple):
indexer = _tuplify(self.ndim, indexer)
if isinstance(value, ABCSeries):
value = self._align_series(indexer, value)
info_idx = indexer[info_axis]
if is_integer(info_idx):
info_idx = [info_idx]
labels = item_labels[info_idx]
# Ensure we have something we can iterate over
ilocs = info_idx
if isinstance(info_idx, slice):
ri = Index(range(len(self.obj.columns)))
ilocs = ri[info_idx]
plane_indexer = indexer[:1]
lplane_indexer = length_of_indexer(plane_indexer[0], self.obj.index)
# lplane_indexer gives the expected length of obj[indexer[0]]
if len(labels) == 1:
# We can operate on a single column
# require that we are setting the right number of values that
# we are indexing
if is_list_like_indexer(value) and 0 != lplane_indexer != len(value):
# Exclude zero-len for e.g. boolean masking that is all-false
raise ValueError(
"cannot set using a multi-index "
"selection indexer with a different "
"length than the value"
)
pi = plane_indexer[0] if lplane_indexer == 1 else plane_indexer
def isetter(loc, v):
# positional setting on column loc
ser = self.obj._ixs(loc, axis=1)
# perform the equivalent of a setitem on the info axis
# as we have a null slice or a slice with full bounds
# which means essentially reassign to the columns of a
# multi-dim object
# GH6149 (null slice), GH10408 (full bounds)
if isinstance(pi, tuple) and all(
com.is_null_slice(idx) or com.is_full_slice(idx, len(self.obj))
for idx in pi
):
ser = v
else:
# set the item, possibly having a dtype change
ser._consolidate_inplace()
ser = ser.copy()
ser._mgr = ser._mgr.setitem(indexer=pi, value=v)
ser._maybe_update_cacher(clear=True)
# reset the sliced object if unique
self.obj._iset_item(loc, ser)
# we need an iterable, with a ndim of at least 1
# eg. don't pass through np.array(0)
if is_list_like_indexer(value) and getattr(value, "ndim", 1) > 0:
# we have an equal len Frame
if isinstance(value, ABCDataFrame):
sub_indexer = list(indexer)
multiindex_indexer = isinstance(labels, ABCMultiIndex)
# TODO: we are implicitly assuming value.columns is unique
for loc in ilocs:
item = item_labels[loc]
if item in value:
sub_indexer[info_axis] = item
v = self._align_series(
tuple(sub_indexer), value[item], multiindex_indexer
)
else:
v = np.nan
isetter(loc, v)
# we have an equal len ndarray/convertible to our labels
# hasattr first, to avoid coercing to ndarray without reason.
# But we may be relying on the ndarray coercion to check ndim.
# Why not just convert to an ndarray earlier on if needed?
elif np.ndim(value) == 2:
# note that this coerces the dtype if we are mixed
# GH 7551
value = np.array(value, dtype=object)
if len(ilocs) != value.shape[1]:
raise ValueError(
"Must have equal len keys and value "
"when setting with an ndarray"
)
for i, loc in enumerate(ilocs):
# setting with a list, re-coerces
isetter(loc, value[:, i].tolist())
elif (
len(labels) == 1
and lplane_indexer == len(value)
and not is_scalar(plane_indexer[0])
):
# we have an equal len list/ndarray
# We only get here with len(labels) == len(ilocs) == 1
isetter(ilocs[0], value)
elif lplane_indexer == 0 and len(value) == len(self.obj.index):
# We get here in one case via .loc with a all-False mask
pass
else:
# per-label values
if len(ilocs) != len(value):
raise ValueError(
"Must have equal len keys and value "
"when setting with an iterable"
)
for loc, v in zip(ilocs, value):
isetter(loc, v)
else:
# scalar value
for loc in ilocs:
isetter(loc, value)
else:
if isinstance(indexer, tuple):
# if we are setting on the info axis ONLY
# set using those methods to avoid block-splitting
# logic here
if (
len(indexer) > info_axis
and is_integer(indexer[info_axis])
and all(
com.is_null_slice(idx)
for i, idx in enumerate(indexer)
if i != info_axis
)
and item_labels.is_unique
):
self.obj[item_labels[indexer[info_axis]]] = value
return
indexer = maybe_convert_ix(*indexer)
if isinstance(value, (ABCSeries, dict)):
# TODO(EA): ExtensionBlock.setitem this causes issues with
# setting for extensionarrays that store dicts. Need to decide
# if it's worth supporting that.
value = self._align_series(indexer, Series(value))
elif isinstance(value, ABCDataFrame):
value = self._align_frame(indexer, value)
# check for chained assignment
self.obj._check_is_chained_assignment_possible()
# actually do the set
self.obj._consolidate_inplace()
self.obj._mgr = self.obj._mgr.setitem(indexer=indexer, value=value)
self.obj._maybe_update_cacher(clear=True)
def _setitem_with_indexer_missing(self, indexer, value):
"""
Insert new row(s) or column(s) into the Series or DataFrame.
"""
from pandas import Series
# reindex the axis to the new value
# and set inplace
if self.ndim == 1:
index = self.obj.index
new_index = index.insert(len(index), indexer)
# we have a coerced indexer, e.g. a float
# that matches in an Int64Index, so
# we will not create a duplicate index, rather
# index to that element
# e.g. 0.0 -> 0
# GH#12246
if index.is_unique:
new_indexer = index.get_indexer([new_index[-1]])
if (new_indexer != -1).any():
return self._setitem_with_indexer(new_indexer, value)
# this preserves dtype of the value
new_values = Series([value])._values
if len(self.obj._values):
# GH#22717 handle casting compatibility that np.concatenate
# does incorrectly
new_values = concat_compat([self.obj._values, new_values])
self.obj._mgr = self.obj._constructor(
new_values, index=new_index, name=self.obj.name
)._mgr
self.obj._maybe_update_cacher(clear=True)
elif self.ndim == 2:
if not len(self.obj.columns):
# no columns and scalar
raise ValueError("cannot set a frame with no defined columns")
if isinstance(value, ABCSeries):
# append a Series
value = value.reindex(index=self.obj.columns, copy=True)
value.name = indexer
else:
# a list-list
if is_list_like_indexer(value):
# must have conforming columns
if len(value) != len(self.obj.columns):
raise ValueError("cannot set a row with mismatched columns")
value = Series(value, index=self.obj.columns, name=indexer)
self.obj._mgr = self.obj.append(value)._mgr
self.obj._maybe_update_cacher(clear=True)
def _align_series(self, indexer, ser: ABCSeries, multiindex_indexer: bool = False):
"""
Parameters
----------
indexer : tuple, slice, scalar
Indexer used to get the locations that will be set to `ser`.
ser : pd.Series
Values to assign to the locations specified by `indexer`.
multiindex_indexer : boolean, optional
Defaults to False. Should be set to True if `indexer` was from
a `pd.MultiIndex`, to avoid unnecessary broadcasting.
Returns
-------
`np.array` of `ser` broadcast to the appropriate shape for assignment
to the locations selected by `indexer`
"""
if isinstance(indexer, (slice, np.ndarray, list, Index)):
indexer = tuple([indexer])
if isinstance(indexer, tuple):
# flatten np.ndarray indexers
def ravel(i):
return i.ravel() if isinstance(i, np.ndarray) else i
indexer = tuple(map(ravel, indexer))
aligners = [not com.is_null_slice(idx) for idx in indexer]
sum_aligners = sum(aligners)
single_aligner = sum_aligners == 1
is_frame = self.ndim == 2
obj = self.obj
# are we a single alignable value on a non-primary
# dim (e.g. panel: 1,2, or frame: 0) ?
# hence need to align to a single axis dimension
# rather that find all valid dims
# frame
if is_frame:
single_aligner = single_aligner and aligners[0]
# we have a frame, with multiple indexers on both axes; and a
# series, so need to broadcast (see GH5206)
if sum_aligners == self.ndim and all(is_sequence(_) for _ in indexer):
ser = ser.reindex(obj.axes[0][indexer[0]], copy=True)._values
# single indexer
if len(indexer) > 1 and not multiindex_indexer:
len_indexer = len(indexer[1])
ser = np.tile(ser, len_indexer).reshape(len_indexer, -1).T
return ser
for i, idx in enumerate(indexer):
ax = obj.axes[i]
# multiple aligners (or null slices)
if is_sequence(idx) or isinstance(idx, slice):
if single_aligner and com.is_null_slice(idx):
continue
new_ix = ax[idx]
if not is_list_like_indexer(new_ix):
new_ix = Index([new_ix])
else:
new_ix = Index(new_ix)
if ser.index.equals(new_ix) or not len(new_ix):
return ser._values.copy()
return ser.reindex(new_ix)._values
# 2 dims
elif single_aligner:
# reindex along index
ax = self.obj.axes[1]
if ser.index.equals(ax) or not len(ax):
return ser._values.copy()
return ser.reindex(ax)._values
elif is_scalar(indexer):
ax = self.obj._get_axis(1)
if ser.index.equals(ax):
return ser._values.copy()
return ser.reindex(ax)._values
raise ValueError("Incompatible indexer with Series")
def _align_frame(self, indexer, df: ABCDataFrame):
is_frame = self.ndim == 2
if isinstance(indexer, tuple):
idx, cols = None, None
sindexers = []
for i, ix in enumerate(indexer):
ax = self.obj.axes[i]
if is_sequence(ix) or isinstance(ix, slice):
if isinstance(ix, np.ndarray):
ix = ix.ravel()
if idx is None:
idx = ax[ix]
elif cols is None:
cols = ax[ix]
else:
break
else:
sindexers.append(i)
if idx is not None and cols is not None:
if df.index.equals(idx) and df.columns.equals(cols):
val = df.copy()._values
else:
val = df.reindex(idx, columns=cols)._values
return val
elif (isinstance(indexer, slice) or is_list_like_indexer(indexer)) and is_frame:
ax = self.obj.index[indexer]
if df.index.equals(ax):
val = df.copy()._values
else:
# we have a multi-index and are trying to align
# with a particular, level GH3738
if (
isinstance(ax, ABCMultiIndex)
and isinstance(df.index, ABCMultiIndex)
and ax.nlevels != df.index.nlevels
):
raise TypeError(
"cannot align on a multi-index with out "
"specifying the join levels"
)
val = df.reindex(index=ax)._values
return val
raise ValueError("Incompatible indexer with DataFrame")
class _ScalarAccessIndexer(_NDFrameIndexerBase):
"""
Access scalars quickly.
"""
def _convert_key(self, key, is_setter: bool = False):
raise AbstractMethodError(self)
def __getitem__(self, key):
if not isinstance(key, tuple):
# we could have a convertible item here (e.g. Timestamp)
if not is_list_like_indexer(key):
key = tuple([key])
else:
raise ValueError("Invalid call for scalar access (getting)!")
key = self._convert_key(key)
return self.obj._get_value(*key, takeable=self._takeable)
def __setitem__(self, key, value):
if isinstance(key, tuple):
key = tuple(com.apply_if_callable(x, self.obj) for x in key)
else:
# scalar callable may return tuple
key = com.apply_if_callable(key, self.obj)
if not isinstance(key, tuple):
key = _tuplify(self.ndim, key)
key = list(self._convert_key(key, is_setter=True))
if len(key) != self.ndim:
raise ValueError("Not enough indexers for scalar access (setting)!")
self.obj._set_value(*key, value=value, takeable=self._takeable)
@doc(IndexingMixin.at)
class _AtIndexer(_ScalarAccessIndexer):
_takeable = False
def _convert_key(self, key, is_setter: bool = False):
"""
Require they keys to be the same type as the index. (so we don't
fallback)
"""
# GH 26989
# For series, unpacking key needs to result in the label.
# This is already the case for len(key) == 1; e.g. (1,)
if self.ndim == 1 and len(key) > 1:
key = (key,)
# allow arbitrary setting
if is_setter:
return list(key)
return key
@property
def _axes_are_unique(self) -> bool:
# Only relevant for self.ndim == 2
assert self.ndim == 2
return self.obj.index.is_unique and self.obj.columns.is_unique
def __getitem__(self, key):
if self.ndim == 2 and not self._axes_are_unique:
# GH#33041 fall back to .loc
if not isinstance(key, tuple) or not all(is_scalar(x) for x in key):
raise ValueError("Invalid call for scalar access (getting)!")
return self.obj.loc[key]
return super().__getitem__(key)
def __setitem__(self, key, value):
if self.ndim == 2 and not self._axes_are_unique:
# GH#33041 fall back to .loc
if not isinstance(key, tuple) or not all(is_scalar(x) for x in key):
raise ValueError("Invalid call for scalar access (setting)!")
self.obj.loc[key] = value
return
return super().__setitem__(key, value)
@doc(IndexingMixin.iat)
class _iAtIndexer(_ScalarAccessIndexer):
_takeable = True
def _convert_key(self, key, is_setter: bool = False):
"""
Require integer args. (and convert to label arguments)
"""
for a, i in zip(self.obj.axes, key):
if not is_integer(i):
raise ValueError("iAt based indexing can only have integer indexers")
return key
def _tuplify(ndim: int, loc: Hashable) -> Tuple[Union[Hashable, slice], ...]:
"""
Given an indexer for the first dimension, create an equivalent tuple
for indexing over all dimensions.
Parameters
----------
ndim : int
loc : object
Returns
-------
tuple
"""
_tup: List[Union[Hashable, slice]]
_tup = [slice(None, None) for _ in range(ndim)]
_tup[0] = loc
return tuple(_tup)
def convert_to_index_sliceable(obj: "DataFrame", key):
"""
If we are index sliceable, then return my slicer, otherwise return None.
"""
idx = obj.index
if isinstance(key, slice):
return idx._convert_slice_indexer(key, kind="getitem")
elif isinstance(key, str):
# we are an actual column
if key in obj.columns:
return None
# We might have a datetimelike string that we can translate to a
# slice here via partial string indexing
if idx._supports_partial_string_indexing:
try:
return idx._get_string_slice(key)
except (KeyError, ValueError, NotImplementedError):
return None
return None
def check_bool_indexer(index: Index, key) -> np.ndarray:
"""
Check if key is a valid boolean indexer for an object with such index and
perform reindexing or conversion if needed.
This function assumes that is_bool_indexer(key) == True.
Parameters
----------
index : Index
Index of the object on which the indexing is done.
key : list-like
Boolean indexer to check.
Returns
-------
np.array
Resulting key.
Raises
------
IndexError
If the key does not have the same length as index.
IndexingError
If the index of the key is unalignable to index.
"""
result = key
if isinstance(key, ABCSeries) and not key.index.equals(index):
result = result.reindex(index)
mask = isna(result._values)
if mask.any():
raise IndexingError(
"Unalignable boolean Series provided as "
"indexer (index of the boolean Series and of "
"the indexed object do not match)."
)
return result.astype(bool)._values
if is_object_dtype(key):
# key might be object-dtype bool, check_array_indexer needs bool array
result = np.asarray(result, dtype=bool)
elif not is_array_like(result):
# GH 33924
# key may contain nan elements, check_array_indexer needs bool array
result = pd_array(result, dtype=bool)
return check_array_indexer(index, result)
def convert_missing_indexer(indexer):
"""
Reverse convert a missing indexer, which is a dict
return the scalar indexer and a boolean indicating if we converted
"""
if isinstance(indexer, dict):
# a missing key (but not a tuple indexer)
indexer = indexer["key"]
if isinstance(indexer, bool):
raise KeyError("cannot use a single bool to index into setitem")
return indexer, True
return indexer, False
def convert_from_missing_indexer_tuple(indexer, axes):
"""
Create a filtered indexer that doesn't have any missing indexers.
"""
def get_indexer(_i, _idx):
return axes[_i].get_loc(_idx["key"]) if isinstance(_idx, dict) else _idx
return tuple(get_indexer(_i, _idx) for _i, _idx in enumerate(indexer))
def maybe_convert_ix(*args):
"""
We likely want to take the cross-product.
"""
ixify = True
for arg in args:
if not isinstance(arg, (np.ndarray, list, ABCSeries, Index)):
ixify = False
if ixify:
return np.ix_(*args)
else:
return args
def is_nested_tuple(tup, labels) -> bool:
"""
Returns
-------
bool
"""
# check for a compatible nested tuple and multiindexes among the axes
if not isinstance(tup, tuple):
return False
for k in tup:
if is_list_like(k) or isinstance(k, slice):
return isinstance(labels, ABCMultiIndex)
return False
def is_label_like(key) -> bool:
"""
Returns
-------
bool
"""
# select a label or row
return not isinstance(key, slice) and not is_list_like_indexer(key)
def need_slice(obj) -> bool:
"""
Returns
-------
bool
"""
return (
obj.start is not None
or obj.stop is not None
or (obj.step is not None and obj.step != 1)
)
def _non_reducing_slice(slice_):
"""
Ensure that a slice doesn't reduce to a Series or Scalar.
Any user-passed `subset` should have this called on it
to make sure we're always working with DataFrames.
"""
# default to column slice, like DataFrame
# ['A', 'B'] -> IndexSlices[:, ['A', 'B']]
kinds = (ABCSeries, np.ndarray, Index, list, str)
if isinstance(slice_, kinds):
slice_ = IndexSlice[:, slice_]
def pred(part) -> bool:
"""
Returns
-------
bool
True if slice does *not* reduce,
False if `part` is a tuple.
"""
# true when slice does *not* reduce, False when part is a tuple,
# i.e. MultiIndex slice
return (isinstance(part, slice) or is_list_like(part)) and not isinstance(
part, tuple
)
if not is_list_like(slice_):
if not isinstance(slice_, slice):
# a 1-d slice, like df.loc[1]
slice_ = [[slice_]]
else:
# slice(a, b, c)
slice_ = [slice_] # to tuplize later
else:
slice_ = [part if pred(part) else [part] for part in slice_]
return tuple(slice_)
def _maybe_numeric_slice(df, slice_, include_bool=False):
"""
Want nice defaults for background_gradient that don't break
with non-numeric data. But if slice_ is passed go with that.
"""
if slice_ is None:
dtypes = [np.number]
if include_bool:
dtypes.append(bool)
slice_ = IndexSlice[:, df.select_dtypes(include=dtypes).columns]
return slice_
| 33.222798 | 135 | 0.53331 |
4cf83e07370d4ca48fff182a2f7f45d15fd2781c | 4,071 | py | Python | get_qas_e.py | billyang98/UNITER | c7f0833f14aa9dcb1e251a986c72e49edde1bdd4 | [
"MIT"
] | null | null | null | get_qas_e.py | billyang98/UNITER | c7f0833f14aa9dcb1e251a986c72e49edde1bdd4 | [
"MIT"
] | null | null | null | get_qas_e.py | billyang98/UNITER | c7f0833f14aa9dcb1e251a986c72e49edde1bdd4 | [
"MIT"
] | null | null | null | import json
import lmdb
from pytorch_pretrained_bert import BertTokenizer
import sys
import msgpack
from lz4.frame import compress, decompress
from tqdm import tqdm
import random
class TxtLmdb(object):
def __init__(self, db_dir, readonly=True):
self.readonly = readonly
if readonly:
# training
self.env = lmdb.open(db_dir,
readonly=True, create=False)
self.txn = self.env.begin(buffers=True)
self.write_cnt = None
else:
# prepro
self.env = lmdb.open(db_dir, readonly=False, create=True,
map_size=4 * 1024**4)
self.txn = self.env.begin(write=True)
self.write_cnt = 0
def __del__(self):
if self.write_cnt:
self.txn.commit()
self.env.close()
def __getitem__(self, key):
return msgpack.loads(decompress(self.txn.get(key.encode('utf-8'))),
raw=False)
def __setitem__(self, key, value):
# NOTE: not thread safe
if self.readonly:
raise ValueError('readonly text DB')
ret = self.txn.put(key.encode('utf-8'),
compress(msgpack.dumps(value, use_bin_type=True)))
self.write_cnt += 1
if self.write_cnt % 1000 == 0:
self.txn.commit()
self.txn = self.env.begin(write=True)
self.write_cnt = 0
return ret
random.seed(0)
bl_db_path = '/scratch/cluster/billyang/vqa_dataset/txt_db/oov_datasets/oov_test_set.db'
baseline_ans_list = json.load(open('/scratch/cluster/billyang/uniter_image/vqa_joint_trained/results_test_normal_test/results_3000_all.json'))
bl_ans = {o['question_id']: o['answer'] for o in baseline_ans_list}
mexp_ans_list = json.load(open('vqa_txt_data/experiment_answers/results_test_synonyms_mask_2_ensemble_all.json')
mexp_ans = {o['question_id']: o['answer'] for o in mexp_ans_list}
ensemble_answers = {}
for ensemble in tqdm(range(1, 11)):
exp_name = 'results_test_synonyms_mask_2_ensemble_{}'.format(ensemble)
exp_ans_file = '/scratch/cluster/billyang/uniter_image/vqa_joint_trained/{}/results_3000_all.json'.format(exp_name)
exp_ans_list = json.load(open(exp_ans_file))
exp_ans = {o['question_id']: o['answer'] for o in exp_ans_list}
for qid, answer in exp_ans.items():
if qid not in ensemble_answers:
ensemble_answers[qid] = [answer]
else:
ensemble_answers[qid].append(answer)
def get_qas(name, exp_db_path, exp_ans_path, exp_comp_path, sample=-1):
exp_ans_list = json.load(open(exp_ans_path))
exp_ans = {o['question_id']: o['answer'] for o in exp_ans_list}
exp_comp = json.load(open(exp_comp_path))
rtw_list = exp_comp['rtw']
wtr_list = exp_comp['wtr']
if sample >= 0:
rtw_list = random.sample(rtw_list, min(sample, len(rtw_list)))
wtr_list = random.sample(wtr_list, min(sample, len(wtr_list)))
bl_db = TxtLmdb(bl_db_path)
exp_db = TxtLmdb(exp_db_path)
mexp_db = TxtLmdb(mexp_db_path)
qas_dict = {'rtw': {}, 'wtr': {}}
for list_name, q_list in [('rtw', rtw_list), ('wtr', wtr_list)]:
for qid in q_list:
bl_value = bl_db[qid]
exp_value = exp_db[qid]
mexp_value = mexp_db[qid]
bl_a = bl_ans[qid]
exp_a = exp_ans[qid]
bl_q = bl_value['toked_question']
exp_q = exp_value['toked_question']
img_fname = bl_value['img_fname']
qas_dict[list_name][qid] = {'bl_q': bl_q, 'exp_q': exp_q, 'mexp_q':
mexp_value['toked_question'], 'bl_ans': bl_a, 'exp_ans': exp_a,
'mexp_ans': mexp_ans[qid], 'img_fname': img_fname}
print("dumping")
json.dump(qas_dict, open('qas_{}.json'.format(name), 'w'))
get_qas("")
if __name__ == '__main__':
if len(sys.argv) == 6:
sample = int(sys.argv[5])
else:
sample = -1
get_qas(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sample)
| 35.4 | 142 | 0.621223 |
350225d149dc1b7f5583cad894ebec4a1bbe054a | 502 | py | Python | vizit/__init__.py | DCRichards/VizIt | d2fa241ccf46ea83995e57b87d288417d86df593 | [
"CNRI-Python"
] | 1 | 2015-12-07T21:59:48.000Z | 2015-12-07T21:59:48.000Z | vizit/__init__.py | DCRichards/VizIt | d2fa241ccf46ea83995e57b87d288417d86df593 | [
"CNRI-Python"
] | 2 | 2015-11-04T09:48:35.000Z | 2015-11-29T14:04:00.000Z | vizit/__init__.py | DCRichards/vizit | d2fa241ccf46ea83995e57b87d288417d86df593 | [
"CNRI-Python"
] | null | null | null | """
# # # ###### # #####
# # # # # #
# # # # # #
# # # # # #
# # # # # #
## # ###### # #
Simple dependency visualisation
by DCRichards
"""
import os
import parser
import graphs
import cli
import languages
def _create_graph(directory, lang, js):
graphs.generate(parser.parse(directory, lang), js)
def main():
cli_args = cli.get_args()
_create_graph(cli_args[0], languages.get(cli_args[1]), cli_args[2])
main() | 17.310345 | 71 | 0.498008 |
45e5bf796fb08d21cfcd037198d7e9606ef20a12 | 7,130 | py | Python | chemdataextractor/nlp/lexicon.py | edbeard/chemdataextractor-uvvis2018 | a5750d5313a250468e29d244cd4aeafdfc3250da | [
"MIT"
] | 6 | 2019-12-05T17:10:19.000Z | 2021-08-10T15:15:10.000Z | chemdataextractor/nlp/lexicon.py | edbeard/chemdataextractor-uvvis2018 | a5750d5313a250468e29d244cd4aeafdfc3250da | [
"MIT"
] | null | null | null | chemdataextractor/nlp/lexicon.py | edbeard/chemdataextractor-uvvis2018 | a5750d5313a250468e29d244cd4aeafdfc3250da | [
"MIT"
] | 2 | 2020-06-29T06:58:53.000Z | 2021-03-21T08:39:36.000Z | # -*- coding: utf-8 -*-
"""
chemdataextractor.nlp.lexicon
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2016 by Matt Swain.
:license: MIT, see LICENSE file for more details.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import six
from ..data import load_model
from ..text import word_shape, is_ascii, is_punct, like_url, like_number
from ..text.normalize import Normalizer, ChemNormalizer
from ..utils import Singleton
log = logging.getLogger(__name__)
class Lexeme(object):
""""""
__slots__ = ('text', 'normalized', 'lower', 'first', 'suffix', 'shape', 'length', 'upper_count', 'lower_count',
'digit_count', 'is_alpha', 'is_ascii', 'is_digit', 'is_lower', 'is_upper', 'is_title', 'is_punct',
'is_hyphenated', 'like_url', 'like_number', 'cluster')
def __init__(self, text, normalized, lower, first, suffix, shape, length, upper_count, lower_count, digit_count,
is_alpha, is_ascii, is_digit, is_lower, is_upper, is_title, is_punct, is_hyphenated, like_url,
like_number, cluster):
#: Original Lexeme text.
self.text = text
#: The Brown Word Cluster for this Lexeme.
self.cluster = cluster
#: Normalized text, using the Lexicon Normalizer.
self.normalized = normalized
#: Lowercase text.
self.lower = lower
#: First character.
self.first = first
#: Three-character suffix
self.suffix = suffix
#: Word shape. Derived by replacing every number with `d', every greek letter with `g', and every latin letter with `X' or `x' for uppercase and lowercase respectively.
self.shape = shape
#: Lexeme length.
self.length = length
#: Count of uppercase characters.
self.upper_count = upper_count
#: Count of lowercase characters.
self.lower_count = lower_count
#: Count of digits.
self.digit_count = digit_count
#: Whether the text is entirely alphabetical characters.
self.is_alpha = is_alpha
#: Whether the text is entirely ASCII characters.
self.is_ascii = is_ascii
#: Whether the text is entirely digits.
self.is_digit = is_digit
#: Whether the text is entirely lowercase.
self.is_lower = is_lower
#: Whether the text is entirely uppercase.
self.is_upper = is_upper
#: Whether the text is title cased.
self.is_title = is_title
#: Whether the text is entirely punctuation characters.
self.is_punct = is_punct
#: Whether the text is hyphenated.
self.is_hyphenated = is_hyphenated
#: Whether the text looks like a URL.
self.like_url = like_url
#: Whether the text looks like a number.
self.like_number = like_number
class Lexicon(six.with_metaclass(Singleton)):
""""""
#: The Normalizer for this Lexicon.
normalizer = Normalizer()
#: Path to the Brown clusters model file for this Lexicon.
clusters_path = None
def __init__(self):
""""""
self.lexemes = {}
self.clusters = {}
self._loaded_clusters = False
def __len__(self):
"""The current number of lexemes stored."""
return len(self.lexemes)
def add(self, text):
"""Add text to the lexicon.
:param string text: The text to add.
"""
# logging.debug('Adding to lexicon: %s' % text)
if text not in self.lexemes:
normalized = self.normalized(text)
self.lexemes[text] = Lexeme(
text=text,
normalized=normalized,
lower=self.lower(normalized),
first=self.first(normalized),
suffix=self.suffix(normalized),
shape=self.shape(normalized),
length=self.length(normalized),
upper_count=self.upper_count(normalized),
lower_count=self.lower_count(normalized),
digit_count=self.digit_count(normalized),
is_alpha=self.is_alpha(normalized),
is_ascii=self.is_ascii(normalized),
is_digit=self.is_digit(normalized),
is_lower=self.is_lower(normalized),
is_upper=self.is_upper(normalized),
is_title=self.is_title(normalized),
is_punct=self.is_punct(normalized),
is_hyphenated=self.is_hyphenated(normalized),
like_url=self.like_url(normalized),
like_number=self.like_number(normalized),
cluster=self.cluster(normalized)
)
def __getitem__(self, text):
"""Return the requested lexeme from the Lexicon.
:param string text: Text of the lexeme to retrieve.
:rtype: Lexeme
:returns: The requested Lexeme.
"""
self.add(text)
return self.lexemes[text]
def cluster(self, text):
""""""
if not self._loaded_clusters and self.clusters_path:
self.clusters = load_model(self.clusters_path)
self._loaded_clusters = True
return self.clusters.get(text, None)
def normalized(self, text):
""""""
return self.normalizer(text)
def lower(self, text):
""""""
return text.lower()
def first(self, text):
""""""
return text[:1]
def suffix(self, text):
""""""
return text[-3:]
def shape(self, text):
""""""
return word_shape(text)
def length(self, text):
""""""
return len(text)
def digit_count(self, text):
""""""
return sum(c.isdigit() for c in text)
def upper_count(self, text):
""""""
return sum(c.isupper() for c in text)
def lower_count(self, text):
""""""
return sum(c.islower() for c in text)
def is_alpha(self, text):
""""""
return text.isalpha()
def is_ascii(self, text):
""""""
return is_ascii(text)
def is_digit(self, text):
""""""
return text.isdigit()
def is_lower(self, text):
""""""
return text.islower()
def is_upper(self, text):
""""""
return text.isupper()
def is_title(self, text):
""""""
return text.istitle()
def is_punct(self, text):
""""""
return is_punct(text)
def is_hyphenated(self, text):
""""""
# TODO: What about '--'?
return '-' in text and not text == '-'
def like_url(self, text):
""""""
return like_url(text)
def like_number(self, text):
""""""
return like_number(text)
class ChemLexicon(Lexicon):
"""A Lexicon that is pre-configured with a Chemistry-aware Normalizer and Brown word clusters derived from a
chemistry corpus."""
normalizer = ChemNormalizer()
clusters_path = 'models/clusters_chem1500-1.0.pickle'
| 30.470085 | 176 | 0.591304 |
fe963bc5b7e8ba3e08ae755f51de6ca7952e8159 | 45,914 | py | Python | apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/website_sale/controllers/main.py | gtfarng/Odoo_migrade | 9cc28fae4c379e407645248a29d22139925eafe7 | [
"Apache-2.0"
] | 1 | 2019-12-19T01:53:13.000Z | 2019-12-19T01:53:13.000Z | apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/website_sale/controllers/main.py | gtfarng/Odoo_migrade | 9cc28fae4c379e407645248a29d22139925eafe7 | [
"Apache-2.0"
] | null | null | null | apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/website_sale/controllers/main.py | gtfarng/Odoo_migrade | 9cc28fae4c379e407645248a29d22139925eafe7 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import json
import logging
from werkzeug.exceptions import Forbidden
from odoo import http, tools, _
from odoo.http import request
from odoo.addons.base.ir.ir_qweb.fields import nl2br
from odoo.addons.website.models.website import slug
from odoo.addons.website.controllers.main import QueryURL
from odoo.exceptions import ValidationError
from odoo.addons.website_form.controllers.main import WebsiteForm
_logger = logging.getLogger(__name__)
PPG = 20 # Products Per Page
PPR = 4 # Products Per Row
class TableCompute(object):
def __init__(self):
self.table = {}
def _check_place(self, posx, posy, sizex, sizey):
res = True
for y in range(sizey):
for x in range(sizex):
if posx + x >= PPR:
res = False
break
row = self.table.setdefault(posy + y, {})
if row.setdefault(posx + x) is not None:
res = False
break
for x in range(PPR):
self.table[posy + y].setdefault(x, None)
return res
def process(self, products, ppg=PPG):
# Compute products positions on the grid
minpos = 0
index = 0
maxy = 0
for p in products:
x = min(max(p.website_size_x, 1), PPR)
y = min(max(p.website_size_y, 1), PPR)
if index >= ppg:
x = y = 1
pos = minpos
while not self._check_place(pos % PPR, pos / PPR, x, y):
pos += 1
# if 21st products (index 20) and the last line is full (PPR products in it), break
# (pos + 1.0) / PPR is the line where the product would be inserted
# maxy is the number of existing lines
# + 1.0 is because pos begins at 0, thus pos 20 is actually the 21st block
# and to force python to not round the division operation
if index >= ppg and ((pos + 1.0) / PPR) > maxy:
break
if x == 1 and y == 1: # simple heuristic for CPU optimization
minpos = pos / PPR
for y2 in range(y):
for x2 in range(x):
self.table[(pos / PPR) + y2][(pos % PPR) + x2] = False
self.table[pos / PPR][pos % PPR] = {
'product': p, 'x': x, 'y': y,
'class': " ".join(map(lambda x: x.html_class or '', p.website_style_ids))
}
if index <= ppg:
maxy = max(maxy, y + (pos / PPR))
index += 1
# Format table according to HTML needs
rows = self.table.items()
rows.sort()
rows = map(lambda x: x[1], rows)
for col in range(len(rows)):
cols = rows[col].items()
cols.sort()
x += len(cols)
rows[col] = [c for c in map(lambda x: x[1], cols) if c]
return rows
# TODO keep with input type hidden
class WebsiteSaleForm(WebsiteForm):
@http.route('/website_form/shop.sale.order', type='http', auth="public", methods=['POST'], website=True)
def website_form_saleorder(self, **kwargs):
model_record = request.env.ref('sale.model_sale_order')
try:
data = self.extract_data(model_record, kwargs)
except ValidationError, e:
return json.dumps({'error_fields': e.args[0]})
order = request.website.sale_get_order()
if data['record']:
order.write(data['record'])
if data['custom']:
values = {
'body': nl2br(data['custom']),
'model': 'sale.order',
'message_type': 'comment',
'no_auto_thread': False,
'res_id': order.id,
}
request.env['mail.message'].sudo().create(values)
if data['attachments']:
self.insert_attachment(model_record, order.id, data['attachments'])
return json.dumps({'id': order.id})
class WebsiteSale(http.Controller):
def get_attribute_value_ids(self, product):
""" list of selectable attributes of a product
:return: list of product variant description
(variant id, [visible attribute ids], variant price, variant sale price)
"""
# product attributes with at least two choices
product = product.with_context(quantity=1)
visible_attrs_ids = product.attribute_line_ids.filtered(lambda l: len(l.value_ids) > 1).mapped('attribute_id').ids
to_currency = request.website.get_current_pricelist().currency_id
attribute_value_ids = []
for variant in product.product_variant_ids:
if to_currency != product.currency_id:
price = variant.currency_id.compute(variant.website_public_price, to_currency)
else:
price = variant.website_public_price
visible_attribute_ids = [v.id for v in variant.attribute_value_ids if v.attribute_id.id in visible_attrs_ids]
attribute_value_ids.append([variant.id, visible_attribute_ids, variant.website_price, price])
return attribute_value_ids
def _get_search_order(self, post):
# OrderBy will be parsed in orm and so no direct sql injection
# id is added to be sure that order is a unique sort key
return 'website_published desc,%s , id desc' % post.get('order', 'website_sequence desc')
def _get_search_domain(self, search, category, attrib_values):
domain = request.website.sale_product_domain()
if search:
for srch in search.split(" "):
domain += [
'|', '|', '|', ('name', 'ilike', srch), ('description', 'ilike', srch),
('description_sale', 'ilike', srch), ('product_variant_ids.default_code', 'ilike', srch)]
if category:
domain += [('public_categ_ids', 'child_of', int(category))]
if attrib_values:
attrib = None
ids = []
for value in attrib_values:
if not attrib:
attrib = value[0]
ids.append(value[1])
elif value[0] == attrib:
ids.append(value[1])
else:
domain += [('attribute_line_ids.value_ids', 'in', ids)]
attrib = value[0]
ids = [value[1]]
if attrib:
domain += [('attribute_line_ids.value_ids', 'in', ids)]
return domain
@http.route([
'/shop',
'/shop/page/<int:page>',
'/shop/category/<model("product.public.category"):category>',
'/shop/category/<model("product.public.category"):category>/page/<int:page>'
], type='http', auth="public", website=True)
def shop(self, page=0, category=None, search='', ppg=False, **post):
if ppg:
try:
ppg = int(ppg)
except ValueError:
ppg = PPG
post["ppg"] = ppg
else:
ppg = PPG
attrib_list = request.httprequest.args.getlist('attrib')
attrib_values = [map(int, v.split("-")) for v in attrib_list if v]
attributes_ids = set([v[0] for v in attrib_values])
attrib_set = set([v[1] for v in attrib_values])
domain = self._get_search_domain(search, category, attrib_values)
keep = QueryURL('/shop', category=category and int(category), search=search, attrib=attrib_list, order=post.get('order'))
pricelist_context = dict(request.env.context)
if not pricelist_context.get('pricelist'):
pricelist = request.website.get_current_pricelist()
pricelist_context['pricelist'] = pricelist.id
else:
pricelist = request.env['product.pricelist'].browse(pricelist_context['pricelist'])
request.context = dict(request.context, pricelist=pricelist.id, partner=request.env.user.partner_id)
url = "/shop"
if search:
post["search"] = search
if category:
category = request.env['product.public.category'].browse(int(category))
url = "/shop/category/%s" % slug(category)
if attrib_list:
post['attrib'] = attrib_list
categs = request.env['product.public.category'].search([('parent_id', '=', False)])
Product = request.env['product.template']
parent_category_ids = []
if category:
parent_category_ids = [category.id]
current_category = category
while current_category.parent_id:
parent_category_ids.append(current_category.parent_id.id)
current_category = current_category.parent_id
product_count = Product.search_count(domain)
pager = request.website.pager(url=url, total=product_count, page=page, step=ppg, scope=7, url_args=post)
products = Product.search(domain, limit=ppg, offset=pager['offset'], order=self._get_search_order(post))
ProductAttribute = request.env['product.attribute']
if products:
# get all products without limit
selected_products = Product.search(domain, limit=False)
attributes = ProductAttribute.search([('attribute_line_ids.product_tmpl_id', 'in', selected_products.ids)])
else:
attributes = ProductAttribute.browse(attributes_ids)
from_currency = request.env.user.company_id.currency_id
to_currency = pricelist.currency_id
compute_currency = lambda price: from_currency.compute(price, to_currency)
values = {
'search': search,
'category': category,
'attrib_values': attrib_values,
'attrib_set': attrib_set,
'pager': pager,
'pricelist': pricelist,
'products': products,
'search_count': product_count, # common for all searchbox
'bins': TableCompute().process(products, ppg),
'rows': PPR,
'categories': categs,
'attributes': attributes,
'compute_currency': compute_currency,
'keep': keep,
'parent_category_ids': parent_category_ids,
}
if category:
values['main_object'] = category
return request.render("website_sale.products", values)
@http.route(['/shop/product/<model("product.template"):product>'], type='http', auth="public", website=True)
def product(self, product, category='', search='', **kwargs):
product_context = dict(request.env.context, active_id=product.id)
ProductCategory = request.env['product.public.category']
Rating = request.env['rating.rating']
if category:
category = ProductCategory.browse(int(category)).exists()
attrib_list = request.httprequest.args.getlist('attrib')
attrib_values = [map(int, v.split("-")) for v in attrib_list if v]
attrib_set = set([v[1] for v in attrib_values])
keep = QueryURL('/shop', category=category and category.id, search=search, attrib=attrib_list)
categs = ProductCategory.search([('parent_id', '=', False)])
pricelist = request.website.get_current_pricelist()
from_currency = request.env.user.company_id.currency_id
to_currency = pricelist.currency_id
compute_currency = lambda price: from_currency.compute(price, to_currency)
# get the rating attached to a mail.message, and the rating stats of the product
ratings = Rating.search([('message_id', 'in', product.website_message_ids.ids)])
rating_message_values = dict([(record.message_id.id, record.rating) for record in ratings])
rating_product = product.rating_get_stats([('website_published', '=', True)])
if not product_context.get('pricelist'):
product_context['pricelist'] = pricelist.id
product = product.with_context(product_context)
values = {
'search': search,
'category': category,
'pricelist': pricelist,
'attrib_values': attrib_values,
'compute_currency': compute_currency,
'attrib_set': attrib_set,
'keep': keep,
'categories': categs,
'main_object': product,
'product': product,
'get_attribute_value_ids': self.get_attribute_value_ids,
'rating_message_values': rating_message_values,
'rating_product': rating_product
}
return request.render("website_sale.product", values)
@http.route(['/shop/change_pricelist/<model("product.pricelist"):pl_id>'], type='http', auth="public", website=True)
def pricelist_change(self, pl_id, **post):
if (pl_id.selectable or pl_id == request.env.user.partner_id.property_product_pricelist) \
and request.website.is_pricelist_available(pl_id.id):
request.session['website_sale_current_pl'] = pl_id.id
request.website.sale_get_order(force_pricelist=pl_id.id)
return request.redirect(request.httprequest.referrer or '/shop')
@http.route(['/shop/pricelist'], type='http', auth="public", website=True)
def pricelist(self, promo, **post):
pricelist = request.env['product.pricelist'].sudo().search([('code', '=', promo)], limit=1)
if pricelist and not request.website.is_pricelist_available(pricelist.id):
return request.redirect("/shop/cart?code_not_available=1")
request.website.sale_get_order(code=promo)
return request.redirect("/shop/cart")
@http.route(['/shop/cart'], type='http', auth="public", website=True)
def cart(self, **post):
order = request.website.sale_get_order()
if order:
from_currency = order.company_id.currency_id
to_currency = order.pricelist_id.currency_id
compute_currency = lambda price: from_currency.compute(price, to_currency)
else:
compute_currency = lambda price: price
values = {
'website_sale_order': order,
'compute_currency': compute_currency,
'suggested_products': [],
}
if order:
_order = order
if not request.env.context.get('pricelist'):
_order = order.with_context(pricelist=order.pricelist_id.id)
values['suggested_products'] = _order._cart_accessories()
if post.get('type') == 'popover':
return request.render("website_sale.cart_popover", values)
if post.get('code_not_available'):
values['code_not_available'] = post.get('code_not_available')
return request.render("website_sale.cart", values)
@http.route(['/shop/cart/update'], type='http', auth="public", methods=['POST'], website=True, csrf=False)
def cart_update(self, product_id, add_qty=1, set_qty=0, **kw):
request.website.sale_get_order(force_create=1)._cart_update(
product_id=int(product_id),
add_qty=float(add_qty),
set_qty=float(set_qty),
attributes=self._filter_attributes(**kw),
)
return request.redirect("/shop/cart")
def _filter_attributes(self, **kw):
return {k: v for k, v in kw.items() if "attribute" in k}
@http.route(['/shop/cart/update_json'], type='json', auth="public", methods=['POST'], website=True, csrf=False)
def cart_update_json(self, product_id, line_id=None, add_qty=None, set_qty=None, display=True):
order = request.website.sale_get_order(force_create=1)
if order.state != 'draft':
request.website.sale_reset()
return {}
value = order._cart_update(product_id=product_id, line_id=line_id, add_qty=add_qty, set_qty=set_qty)
if not order.cart_quantity:
request.website.sale_reset()
return {}
if not display:
return None
order = request.website.sale_get_order()
value['cart_quantity'] = order.cart_quantity
from_currency = order.company_id.currency_id
to_currency = order.pricelist_id.currency_id
value['website_sale.cart_lines'] = request.env['ir.ui.view'].render_template("website_sale.cart_lines", {
'website_sale_order': order,
'compute_currency': lambda price: from_currency.compute(price, to_currency),
'suggested_products': order._cart_accessories()
})
return value
# ------------------------------------------------------
# Checkout
# ------------------------------------------------------
def checkout_redirection(self, order):
# must have a draft sale order with lines at this point, otherwise reset
if not order or order.state != 'draft':
request.session['sale_order_id'] = None
request.session['sale_transaction_id'] = None
return request.redirect('/shop')
# if transaction pending / done: redirect to confirmation
tx = request.env.context.get('website_sale_transaction')
if tx and tx.state != 'draft':
return request.redirect('/shop/payment/confirmation/%s' % order.id)
def checkout_values(self, **kw):
order = request.website.sale_get_order(force_create=1)
shippings = []
if order.partner_id != request.website.user_id.sudo().partner_id:
Partner = order.partner_id.with_context(show_address=1).sudo()
shippings = Partner.search([
("id", "child_of", order.partner_id.commercial_partner_id.ids),
'|', ("type", "=", "delivery"), ("id", "=", order.partner_id.commercial_partner_id.id)
], order='id desc')
if shippings:
if kw.get('partner_id') or 'use_billing' in kw:
if 'use_billing' in kw:
partner_id = order.partner_id.id
else:
partner_id = int(kw.get('partner_id'))
if partner_id in shippings.mapped('id'):
order.partner_shipping_id = partner_id
elif not order.partner_shipping_id:
last_order = request.env['sale.order'].sudo().search([("partner_id", "=", order.partner_id.id)], order='id desc', limit=1)
order.partner_shipping_id.id = last_order and last_order.id
values = {
'order': order,
'shippings': shippings,
'only_services': order and order.only_services or False
}
return values
def _get_mandatory_billing_fields(self):
return ["name", "email", "street", "city", "country_id"]
def _get_mandatory_shipping_fields(self):
return ["name", "street", "city", "country_id"]
def checkout_form_validate(self, mode, all_form_values, data):
# mode: tuple ('new|edit', 'billing|shipping')
# all_form_values: all values before preprocess
# data: values after preprocess
error = dict()
error_message = []
# Required fields from form
required_fields = filter(None, (all_form_values.get('field_required') or '').split(','))
# Required fields from mandatory field function
required_fields += mode[1] == 'shipping' and self._get_mandatory_shipping_fields() or self._get_mandatory_billing_fields()
# Check if state required
if data.get('country_id'):
country = request.env['res.country'].browse(int(data.get('country_id')))
if 'state_code' in country.get_address_fields() and country.state_ids:
required_fields += ['state_id']
# error message for empty required fields
for field_name in required_fields:
if not data.get(field_name):
error[field_name] = 'missing'
# email validation
if data.get('email') and not tools.single_email_re.match(data.get('email')):
error["email"] = 'error'
error_message.append(_('Invalid Email! Please enter a valid email address.'))
# vat validation
Partner = request.env['res.partner']
if data.get("vat") and hasattr(Partner, "check_vat"):
check_func = request.website.company_id.vat_check_vies and Partner.vies_vat_check or Partner.simple_vat_check
vat_country, vat_number = Partner._split_vat(data.get("vat"))
if not check_func(vat_country, vat_number):
error["vat"] = 'error'
if [err for err in error.values() if err == 'missing']:
error_message.append(_('Some required fields are empty.'))
return error, error_message
def _checkout_form_save(self, mode, checkout, all_values):
Partner = request.env['res.partner']
if mode[0] == 'new':
partner_id = Partner.sudo().create(checkout)
elif mode[0] == 'edit':
partner_id = int(all_values.get('partner_id', 0))
if partner_id:
# double check
order = request.website.sale_get_order()
shippings = Partner.sudo().search([("id", "child_of", order.partner_id.commercial_partner_id.ids)])
if partner_id not in shippings.mapped('id') and partner_id != order.partner_id.id:
return Forbidden()
Partner.browse(partner_id).sudo().write(checkout)
return partner_id
def values_preprocess(self, order, mode, values):
return values
def values_postprocess(self, order, mode, values, errors, error_msg):
new_values = {}
authorized_fields = request.env['ir.model'].sudo().search([('model', '=', 'res.partner')])._get_form_writable_fields()
for k, v in values.items():
# don't drop empty value, it could be a field to reset
if k in authorized_fields and v is not None:
new_values[k] = v
else: # DEBUG ONLY
if k not in ('field_required', 'partner_id', 'callback', 'submitted'): # classic case
_logger.debug("website_sale postprocess: %s value has been dropped (empty or not writable)" % k)
new_values['customer'] = True
new_values['team_id'] = request.website.salesteam_id and request.website.salesteam_id.id
lang = request.lang if request.lang in request.website.mapped('language_ids.code') else None
if lang:
new_values['lang'] = lang
if mode == ('edit', 'billing') and order.partner_id.type == 'contact':
new_values['type'] = 'other'
if mode[1] == 'shipping':
new_values['parent_id'] = order.partner_id.commercial_partner_id.id
new_values['type'] = 'delivery'
return new_values, errors, error_msg
@http.route(['/shop/address'], type='http', methods=['GET', 'POST'], auth="public", website=True)
def address(self, **kw):
Partner = request.env['res.partner'].with_context(show_address=1).sudo()
order = request.website.sale_get_order()
redirection = self.checkout_redirection(order)
if redirection:
return redirection
mode = (False, False)
def_country_id = order.partner_id.country_id
values, errors = {}, {}
partner_id = int(kw.get('partner_id', -1))
# IF PUBLIC ORDER
if order.partner_id.id == request.website.user_id.sudo().partner_id.id:
mode = ('new', 'billing')
country_code = request.session['geoip'].get('country_code')
if country_code:
def_country_id = request.env['res.country'].search([('code', '=', country_code)], limit=1)
else:
def_country_id = request.website.user_id.sudo().country_id
# IF ORDER LINKED TO A PARTNER
else:
if partner_id > 0:
if partner_id == order.partner_id.id:
mode = ('edit', 'billing')
else:
shippings = Partner.search([('id', 'child_of', order.partner_id.commercial_partner_id.ids)])
if partner_id in shippings.mapped('id'):
mode = ('edit', 'shipping')
else:
return Forbidden()
if mode:
values = Partner.browse(partner_id)
elif partner_id == -1:
mode = ('new', 'shipping')
else: # no mode - refresh without post?
return request.redirect('/shop/checkout')
# IF POSTED
if 'submitted' in kw:
pre_values = self.values_preprocess(order, mode, kw)
errors, error_msg = self.checkout_form_validate(mode, kw, pre_values)
post, errors, error_msg = self.values_postprocess(order, mode, pre_values, errors, error_msg)
if errors:
errors['error_message'] = error_msg
values = kw
else:
partner_id = self._checkout_form_save(mode, post, kw)
if mode[1] == 'billing':
order.partner_id = partner_id
order.onchange_partner_id()
elif mode[1] == 'shipping':
order.partner_shipping_id = partner_id
order.message_partner_ids = [(4, partner_id), (3, request.website.partner_id.id)]
if not errors:
return request.redirect(kw.get('callback') or '/shop/checkout')
country = 'country_id' in values and values['country_id'] != '' and request.env['res.country'].browse(int(values['country_id']))
country = country and country.exists() or def_country_id
render_values = {
'partner_id': partner_id,
'mode': mode,
'checkout': values,
'country': country,
'countries': country.get_website_sale_countries(mode=mode[1]),
"states": country.get_website_sale_states(mode=mode[1]),
'error': errors,
'callback': kw.get('callback'),
}
return request.render("website_sale.address", render_values)
@http.route(['/shop/checkout'], type='http', auth="public", website=True)
def checkout(self, **post):
order = request.website.sale_get_order()
redirection = self.checkout_redirection(order)
if redirection:
return redirection
if order.partner_id.id == request.website.user_id.sudo().partner_id.id:
return request.redirect('/shop/address')
for f in self._get_mandatory_billing_fields():
if not order.partner_id[f]:
return request.redirect('/shop/address?partner_id=%d' % order.partner_id.id)
values = self.checkout_values(**post)
# Avoid useless rendering if called in ajax
if post.get('xhr'):
return 'ok'
return request.render("website_sale.checkout", values)
@http.route(['/shop/confirm_order'], type='http', auth="public", website=True)
def confirm_order(self, **post):
order = request.website.sale_get_order()
redirection = self.checkout_redirection(order)
if redirection:
return redirection
order.onchange_partner_shipping_id()
order.order_line._compute_tax_id()
request.session['sale_last_order_id'] = order.id
request.website.sale_get_order(update_pricelist=True)
extra_step = request.env.ref('website_sale.extra_info_option')
if extra_step.active:
return request.redirect("/shop/extra_info")
return request.redirect("/shop/payment")
# ------------------------------------------------------
# Extra step
# ------------------------------------------------------
@http.route(['/shop/extra_info'], type='http', auth="public", website=True)
def extra_info(self, **post):
# Check that this option is activated
extra_step = request.env.ref('website_sale.extra_info_option')
if not extra_step.active:
return request.redirect("/shop/payment")
# check that cart is valid
order = request.website.sale_get_order()
redirection = self.checkout_redirection(order)
if redirection:
return redirection
# if form posted
if 'post_values' in post:
values = {}
for field_name, field_value in post.items():
if field_name in request.env['sale.order']._fields and field_name.startswith('x_'):
values[field_name] = field_value
if values:
order.write(values)
return request.redirect("/shop/payment")
values = {
'website_sale_order': order,
'post': post,
'escape': lambda x: x.replace("'", r"\'")
}
values.update(request.env['sale.order']._get_website_data(order))
return request.render("website_sale.extra_info", values)
# ------------------------------------------------------
# Payment
# ------------------------------------------------------
@http.route(['/shop/payment'], type='http', auth="public", website=True)
def payment(self, **post):
""" Payment step. This page proposes several payment means based on available
payment.acquirer. State at this point :
- a draft sale order with lines; otherwise, clean context / session and
back to the shop
- no transaction in context / session, or only a draft one, if the customer
did go to a payment.acquirer website but closed the tab without
paying / canceling
"""
SaleOrder = request.env['sale.order']
order = request.website.sale_get_order()
redirection = self.checkout_redirection(order)
if redirection:
return redirection
shipping_partner_id = False
if order:
if order.partner_shipping_id.id:
shipping_partner_id = order.partner_shipping_id.id
else:
shipping_partner_id = order.partner_invoice_id.id
values = {
'website_sale_order': order
}
values['errors'] = SaleOrder._get_errors(order)
values.update(SaleOrder._get_website_data(order))
if not values['errors']:
acquirers = request.env['payment.acquirer'].search(
[('website_published', '=', True), ('company_id', '=', order.company_id.id)]
)
values['acquirers'] = []
for acquirer in acquirers:
acquirer_button = acquirer.with_context(submit_class='btn btn-primary', submit_txt=_('Pay Now')).sudo().render(
'/',
order.amount_total,
order.pricelist_id.currency_id.id,
values={
'return_url': '/shop/payment/validate',
'partner_id': shipping_partner_id,
'billing_partner_id': order.partner_invoice_id.id,
}
)
acquirer.button = acquirer_button
values['acquirers'].append(acquirer)
values['tokens'] = request.env['payment.token'].search([('partner_id', '=', order.partner_id.id), ('acquirer_id', 'in', acquirers.ids)])
return request.render("website_sale.payment", values)
@http.route(['/shop/payment/transaction_token/confirm'], type='json', auth="public", website=True)
def payment_transaction_token_confirm(self, tx, **kwargs):
tx = request.env['payment.transaction'].sudo().browse(int(tx))
if (tx and request.website.sale_get_transaction() and
tx.id == request.website.sale_get_transaction().id and
tx.payment_token_id and
tx.partner_id == tx.sale_order_id.partner_id):
try:
s2s_result = tx.s2s_do_transaction()
valid_state = 'authorized' if tx.acquirer_id.auto_confirm == 'authorize' else 'done'
if not s2s_result or tx.state != valid_state:
return dict(success=False, error=_("Payment transaction failed (%s)") % tx.state_message)
else:
# Auto-confirm SO if necessary
tx._confirm_so()
return dict(success=True, url='/shop/payment/validate')
except Exception, e:
_logger.warning(_("Payment transaction (%s) failed : <%s>") % (tx.id, str(e)))
return dict(success=False, error=_("Payment transaction failed (Contact Administrator)"))
return dict(success=False, error='Tx missmatch')
@http.route(['/shop/payment/transaction_token'], type='http', methods=['POST'], auth="public", website=True)
def payment_transaction_token(self, tx_id, **kwargs):
tx = request.env['payment.transaction'].sudo().browse(int(tx_id))
if (tx and request.website.sale_get_transaction() and
tx.id == request.website.sale_get_transaction().id and
tx.payment_token_id and
tx.partner_id == tx.sale_order_id.partner_id):
return request.render("website_sale.payment_token_form_confirm", dict(tx=tx))
else:
return request.redirect("/shop/payment?error=no_token_or_missmatch_tx")
@http.route(['/shop/payment/transaction/<int:acquirer_id>'], type='json', auth="public", website=True)
def payment_transaction(self, acquirer_id, tx_type='form', token=None, **kwargs):
""" Json method that creates a payment.transaction, used to create a
transaction when the user clicks on 'pay now' button. After having
created the transaction, the event continues and the user is redirected
to the acquirer website.
:param int acquirer_id: id of a payment.acquirer record. If not set the
user is redirected to the checkout page
"""
Transaction = request.env['payment.transaction'].sudo()
# In case the route is called directly from the JS (as done in Stripe payment method)
so_id = kwargs.get('so_id')
so_token = kwargs.get('so_token')
if so_id and so_token:
order = request.env['sale.order'].sudo().search([('id', '=', so_id), ('access_token', '=', so_token)])
elif so_id:
order = request.env['sale.order'].search([('id', '=', so_id)])
else:
order = request.website.sale_get_order()
if not order or not order.order_line or acquirer_id is None:
return request.redirect("/shop/checkout")
assert order.partner_id.id != request.website.partner_id.id
# find an already existing transaction
tx = request.website.sale_get_transaction()
if tx:
if tx.sale_order_id.id != order.id or tx.state in ['error', 'cancel'] or tx.acquirer_id.id != acquirer_id:
tx = False
elif token and tx.payment_token_id and token != tx.payment_token_id.id:
# new or distinct token
tx = False
elif tx.state == 'draft': # button cliked but no more info -> rewrite on tx or create a new one ?
tx.write(dict(Transaction.on_change_partner_id(order.partner_id.id).get('value', {}), amount=order.amount_total, type=tx_type))
if not tx:
tx_values = {
'acquirer_id': acquirer_id,
'type': tx_type,
'amount': order.amount_total,
'currency_id': order.pricelist_id.currency_id.id,
'partner_id': order.partner_id.id,
'partner_country_id': order.partner_id.country_id.id,
'reference': Transaction.get_next_reference(order.name),
'sale_order_id': order.id,
}
if token and request.env['payment.token'].sudo().browse(int(token)).partner_id == order.partner_id:
tx_values['payment_token_id'] = token
tx = Transaction.create(tx_values)
request.session['sale_transaction_id'] = tx.id
# update quotation
order.write({
'payment_acquirer_id': acquirer_id,
'payment_tx_id': request.session['sale_transaction_id']
})
if token:
return request.env.ref('website_sale.payment_token_form').render(dict(tx=tx), engine='ir.qweb')
return tx.acquirer_id.with_context(submit_class='btn btn-primary', submit_txt=_('Pay Now')).sudo().render(
tx.reference,
order.amount_total,
order.pricelist_id.currency_id.id,
values={
'return_url': '/shop/payment/validate',
'partner_id': order.partner_shipping_id.id or order.partner_invoice_id.id,
'billing_partner_id': order.partner_invoice_id.id,
},
)
@http.route('/shop/payment/get_status/<int:sale_order_id>', type='json', auth="public", website=True)
def payment_get_status(self, sale_order_id, **post):
order = request.env['sale.order'].sudo().browse(sale_order_id)
assert order.id == request.session.get('sale_last_order_id')
values = {}
flag = False
if not order:
values.update({'not_order': True, 'state': 'error'})
else:
tx = request.env['payment.transaction'].sudo().search(
['|', ('sale_order_id', '=', order.id), ('reference', '=', order.name)], limit=1
)
if not tx:
if order.amount_total:
values.update({'tx_ids': False, 'state': 'error'})
else:
values.update({'tx_ids': False, 'state': 'done', 'validation': None})
else:
state = tx.state
flag = state == 'pending'
values.update({
'tx_ids': True,
'state': state,
'acquirer_id': tx.acquirer_id,
'validation': tx.acquirer_id.auto_confirm == 'none',
'tx_post_msg': tx.acquirer_id.post_msg or None
})
return {'recall': flag, 'message': request.env['ir.ui.view'].render_template("website_sale.order_state_message", values)}
@http.route('/shop/payment/validate', type='http', auth="public", website=True)
def payment_validate(self, transaction_id=None, sale_order_id=None, **post):
""" Method that should be called by the server when receiving an update
for a transaction. State at this point :
- UDPATE ME
"""
if transaction_id is None:
tx = request.website.sale_get_transaction()
else:
tx = request.env['payment.transaction'].browse(transaction_id)
if sale_order_id is None:
order = request.website.sale_get_order()
else:
order = request.env['sale.order'].sudo().browse(sale_order_id)
assert order.id == request.session.get('sale_last_order_id')
if not order or (order.amount_total and not tx):
return request.redirect('/shop')
if (not order.amount_total and not tx) or tx.state in ['pending', 'done', 'authorized']:
if (not order.amount_total and not tx):
# Orders are confirmed by payment transactions, but there is none for free orders,
# (e.g. free events), so confirm immediately
order.with_context(send_email=True).action_confirm()
elif tx and tx.state == 'cancel':
# cancel the quotation
order.action_cancel()
# clean context and session, then redirect to the confirmation page
request.website.sale_reset()
if tx and tx.state == 'draft':
return request.redirect('/shop')
return request.redirect('/shop/confirmation')
@http.route(['/shop/terms'], type='http', auth="public", website=True)
def terms(self, **kw):
return request.render("website_sale.terms")
@http.route(['/shop/confirmation'], type='http', auth="public", website=True)
def payment_confirmation(self, **post):
""" End of checkout process controller. Confirmation is basically seing
the status of a sale.order. State at this point :
- should not have any context / session info: clean them
- take a sale.order id, because we request a sale.order and are not
session dependant anymore
"""
sale_order_id = request.session.get('sale_last_order_id')
if sale_order_id:
order = request.env['sale.order'].sudo().browse(sale_order_id)
return request.render("website_sale.confirmation", {'order': order})
else:
return request.redirect('/shop')
@http.route(['/shop/print'], type='http', auth="public", website=True)
def print_saleorder(self):
sale_order_id = request.session.get('sale_last_order_id')
if sale_order_id:
pdf = request.env['report'].sudo().get_pdf([sale_order_id], 'sale.report_saleorder', data=None)
pdfhttpheaders = [('Content-Type', 'application/pdf'), ('Content-Length', len(pdf))]
return request.make_response(pdf, headers=pdfhttpheaders)
else:
return request.redirect('/shop')
@http.route(['/shop/tracking_last_order'], type='json', auth="public")
def tracking_cart(self, **post):
""" return data about order in JSON needed for google analytics"""
ret = {}
sale_order_id = request.session.get('sale_last_order_id')
if sale_order_id:
order = request.env['sale.order'].sudo().browse(sale_order_id)
ret = self.order_2_return_dict(order)
return ret
@http.route(['/shop/get_unit_price'], type='json', auth="public", methods=['POST'], website=True)
def get_unit_price(self, product_ids, add_qty, **kw):
products = request.env['product.product'].with_context({'quantity': add_qty}).browse(product_ids)
return {product.id: product.website_price / add_qty for product in products}
# ------------------------------------------------------
# Edit
# ------------------------------------------------------
@http.route(['/shop/add_product'], type='http', auth="user", methods=['POST'], website=True)
def add_product(self, name=None, category=0, **post):
product = request.env['product.product'].create({
'name': name or _("New Product"),
'public_categ_ids': category
})
return request.redirect("/shop/product/%s?enable_editor=1" % slug(product.product_tmpl_id))
@http.route(['/shop/change_styles'], type='json', auth="public")
def change_styles(self, id, style_id):
product = request.env['product.template'].browse(id)
remove = []
active = False
style_id = int(style_id)
for style in product.website_style_ids:
if style.id == style_id:
remove.append(style.id)
active = True
break
style = request.env['product.style'].browse(style_id)
if remove:
product.write({'website_style_ids': [(3, rid) for rid in remove]})
if not active:
product.write({'website_style_ids': [(4, style.id)]})
return not active
@http.route(['/shop/change_sequence'], type='json', auth="public")
def change_sequence(self, id, sequence):
product_tmpl = request.env['product.template'].browse(id)
if sequence == "top":
product_tmpl.set_sequence_top()
elif sequence == "bottom":
product_tmpl.set_sequence_bottom()
elif sequence == "up":
product_tmpl.set_sequence_up()
elif sequence == "down":
product_tmpl.set_sequence_down()
@http.route(['/shop/change_size'], type='json', auth="public")
def change_size(self, id, x, y):
product = request.env['product.template'].browse(id)
return product.write({'website_size_x': x, 'website_size_y': y})
def order_lines_2_google_api(self, order_lines):
""" Transforms a list of order lines into a dict for google analytics """
ret = []
for line in order_lines:
product = line.product_id
ret.append({
'id': line.order_id.id,
'sku': product.barcode or product.id,
'name': product.name or '-',
'category': product.categ_id.name or '-',
'price': line.price_unit,
'quantity': line.product_uom_qty,
})
return ret
def order_2_return_dict(self, order):
""" Returns the tracking_cart dict of the order for Google analytics basically defined to be inherited """
return {
'transaction': {
'id': order.id,
'affiliation': order.company_id.name,
'revenue': order.amount_total,
'tax': order.amount_tax,
'currency': order.currency_id.name
},
'lines': self.order_lines_2_google_api(order.order_line)
}
@http.route(['/shop/country_infos/<model("res.country"):country>'], type='json', auth="public", methods=['POST'], website=True)
def country_infos(self, country, mode, **kw):
return dict(
fields=country.get_address_fields(),
states=[(st.id, st.name, st.code) for st in country.get_website_sale_states(mode=mode)],
phone_code=country.phone_code
)
| 43.56167 | 148 | 0.591301 |
952a2a1e798c42052130ac3f1573e1ce9354dfdc | 124,179 | py | Python | tensorflow/python/ops/math_ops.py | wenming2014/tensorflow | a102a6a71844e194f3946f6318768c5367f1f16b | [
"Apache-2.0"
] | 1 | 2018-11-21T03:07:45.000Z | 2018-11-21T03:07:45.000Z | tensorflow/python/ops/math_ops.py | wenming2014/tensorflow | a102a6a71844e194f3946f6318768c5367f1f16b | [
"Apache-2.0"
] | null | null | null | tensorflow/python/ops/math_ops.py | wenming2014/tensorflow | a102a6a71844e194f3946f6318768c5367f1f16b | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic arithmetic operators.
See the [python/math_ops](python/math_ops) guide.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.eager import context
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gen_sparse_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_math_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
# Aliases for some automatically-generated names.
linspace = gen_math_ops.lin_space
arg_max = deprecation.deprecated(None, "Use `tf.math.argmax` instead")(arg_max) # pylint: disable=used-before-assignment
arg_min = deprecation.deprecated(None, "Use `tf.math.argmin` instead")(arg_min) # pylint: disable=used-before-assignment
tf_export(v1=["arg_max"])(arg_max)
tf_export(v1=["arg_min"])(arg_min)
# This is set by resource_variable_ops.py. It is included in this way since
# there is a circular dependency between math_ops and resource_variable_ops
_resource_variable_type = None
def _set_doc(doc):
def _decorator(func):
func.__doc__ = doc
return func
return _decorator
# pylint: disable=redefined-builtin
@tf_export(v1=["math.argmax", "argmax"])
@deprecation.deprecated_args(None, "Use the `axis` argument instead",
"dimension")
@_set_doc(
gen_math_ops.arg_max.__doc__.replace("dimensions", "axes").replace(
"dimension", "axis"))
def argmax(input,
axis=None,
name=None,
dimension=None,
output_type=dtypes.int64):
axis = deprecation.deprecated_argument_lookup(
"axis", axis, "dimension", dimension)
if axis is None:
axis = 0
return argmax_v2(input, axis, output_type, name)
@tf_export("math.argmax", "argmax", v1=[])
def argmax_v2(input,
axis=None,
output_type=dtypes.int64,
name=None):
"""Returns the index with the largest value across axes of a tensor.
Note that in case of ties the identity of the return value is not guaranteed.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`,
`qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
int32 or int64, must be in the range `-rank(input), rank(input))`.
Describes which axis of the input Tensor to reduce across. For vectors,
use axis = 0.
output_type: An optional `tf.DType` from: `tf.int32, tf.int64`.
Defaults to `tf.int64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `output_type`.
"""
return gen_math_ops.arg_max(input, axis, name=name, output_type=output_type)
@tf_export(v1=["math.argmin", "argmin"])
@deprecation.deprecated_args(None, "Use the `axis` argument instead",
"dimension")
@_set_doc(
gen_math_ops.arg_min.__doc__.replace("dimensions", "axes").replace(
"dimension", "axis"))
def argmin(input,
axis=None,
name=None,
dimension=None,
output_type=dtypes.int64):
axis = deprecation.deprecated_argument_lookup(
"axis", axis, "dimension", dimension)
if axis is None:
axis = 0
return argmin_v2(input, axis, output_type, name)
@tf_export("math.argmin", "argmin", v1=[])
def argmin_v2(input,
axis=None,
output_type=dtypes.int64,
name=None):
"""Returns the index with the smallest value across axes of a tensor.
Note that in case of ties the identity of the return value is not guaranteed.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`,
`qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
int32 or int64, must be in the range `-rank(input), rank(input))`.
Describes which axis of the input Tensor to reduce across. For vectors,
use axis = 0.
output_type: An optional `tf.DType` from: `tf.int32, tf.int64`.
Defaults to `tf.int64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `output_type`.
"""
return gen_math_ops.arg_min(input, axis, name=name, output_type=output_type)
# pylint: enable=redefined-builtin
# pylint: disable=anomalous-backslash-in-string,protected-access
# pylint: disable=g-docstring-has-escape
@tf_export("math.abs", "abs")
def abs(x, name=None): # pylint: disable=redefined-builtin
r"""Computes the absolute value of a tensor.
Given a tensor `x` of complex numbers, this operation returns a tensor of type
`float32` or `float64` that is the absolute value of each element in `x`. All
elements in `x` must be complex numbers of the form \\(a + bj\\). The
absolute value is computed as \\( \sqrt{a^2 + b^2}\\). For example:
```python
x = tf.constant([[-2.25 + 4.75j], [-3.25 + 5.75j]])
tf.abs(x) # [5.25594902, 6.60492229]
```
Args:
x: A `Tensor` or `SparseTensor` of type `float16`, `float32`, `float64`,
`int32`, `int64`, `complex64` or `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` the same size and type as `x` with absolute
values.
Note, for `complex64` or `complex128` input, the returned `Tensor` will be
of type `float32` or `float64`, respectively.
"""
with ops.name_scope(name, "Abs", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
if x.values.dtype.is_complex:
x_abs = gen_math_ops.complex_abs(
x.values, Tout=x.values.dtype.real_dtype, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_abs, dense_shape=x.dense_shape)
x_abs = gen_math_ops._abs(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_abs, dense_shape=x.dense_shape)
else:
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_complex:
return gen_math_ops.complex_abs(x, Tout=x.dtype.real_dtype, name=name)
return gen_math_ops._abs(x, name=name)
# pylint: enable=g-docstring-has-escape
# pylint: disable=redefined-builtin
def _bucketize(input, boundaries, name=None):
return gen_math_ops.bucketize(input=input, boundaries=boundaries, name=name)
# pylint: enable=redefined-builtin
class DivideDelegateWithName(object):
"""Use Python2/Python3 division delegation to implement divide for tensors."""
def __init__(self, x, name):
"""Construct DivideDelegateWithName.
Args:
x: Tensor to use as left operand in operator overloads
name: The name that is preferred for the op created.
"""
self.x = x
self.name = name
def __truediv__(self, y):
return _truediv_python3(self.x, y, self.name)
def __floordiv__(self, y):
return floordiv(self.x, y, self.name)
def __div__(self, y):
return _div_python2(self.x, y, self.name)
@tf_export("math.divide", "divide")
def divide(x, y, name=None):
"""Computes Python style division of `x` by `y`."""
if name is not None:
# Cannot use tensors operator overload, because it has no way to track
# override names. Use a dummy class to track the runtime division behavior
return DivideDelegateWithName(x, name) / y
else:
return x / y
@tf_export("math.multiply", "multiply")
def multiply(x, y, name=None):
return gen_math_ops.mul(x, y, name)
multiply.__doc__ = gen_math_ops.mul.__doc__.replace("Multiply", "`tf.multiply`")
# TODO(aselle): put deprecation in after another round of global code changes
@deprecation.deprecated(
"2016-12-30",
"`tf.mul(x, y)` is deprecated, please use `tf.multiply(x, y)` or `x * y`")
def _mul(x, y, name=None):
return gen_math_ops.mul(x, y, name)
_mul.__doc__ = (
gen_math_ops.mul.__doc__ + ("" if _mul.__doc__ is None else _mul.__doc__))
@tf_export("math.subtract", "subtract")
def subtract(x, y, name=None):
return gen_math_ops.sub(x, y, name)
subtract.__doc__ = gen_math_ops.sub.__doc__.replace("`Sub`", "`tf.subtract`")
# TODO(aselle): put deprecation in after another round of global code changes
@deprecation.deprecated(
"2016-12-30",
"`tf.sub(x, y)` is deprecated, please use `tf.subtract(x, y)` or `x - y`")
def _sub(x, y, name=None):
return gen_math_ops.sub(x, y, name)
_sub.__doc__ = (
gen_math_ops.sub.__doc__ + ("" if _sub.__doc__ is None else _sub.__doc__))
# pylint: disable=g-docstring-has-escape
@tf_export("math.negative", "negative")
def negative(x, name=None):
"""Computes numerical negative value element-wise.
I.e., \\(y = -x\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Neg", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_neg = gen_math_ops.neg(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_neg, dense_shape=x.dense_shape)
else:
return gen_math_ops.neg(x, name=name)
# pylint: enable=g-docstring-has-escape
# pylint: disable=g-docstring-has-escape
@deprecation.deprecated(
"2016-12-30",
"`tf.neg(x)` is deprecated, please use `tf.negative(x)` or `-x`")
def _neg(x, name=None):
"""Computes numerical negative value element-wise.
I.e., \\(y = -x\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
return negative(x, name)
# pylint: enable=g-docstring-has-escape
@tf_export("math.sign", "sign")
def sign(x, name=None):
"""Returns an element-wise indication of the sign of a number.
`y = sign(x) = -1` if `x < 0`; 0 if `x == 0` or `tf.is_nan(x)`; 1 if `x > 0`.
Zero is returned for NaN inputs.
For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
@compatibility(numpy)
Equivalent to numpy.sign except for the behavior for input values of NaN.
@end_compatibility
"""
with ops.name_scope(name, "Sign", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_sign = gen_math_ops.sign(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_sign, dense_shape=x.dense_shape)
else:
return gen_math_ops.sign(x, name=name)
@tf_export("math.square", "square")
def square(x, name=None):
r"""Computes square of x element-wise.
I.e., \\(y = x * x = x^2\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Square", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_square = gen_math_ops.square(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_square, dense_shape=x.dense_shape)
else:
return gen_math_ops.square(x, name=name)
@tf_export("math.sqrt", "sqrt")
def sqrt(x, name=None):
r"""Computes square root of x element-wise.
I.e., \\(y = \sqrt{x} = x^{1/2}\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Sqrt", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_sqrt = gen_math_ops.sqrt(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_sqrt, dense_shape=x.dense_shape)
else:
return gen_math_ops.sqrt(x, name=name)
@tf_export("math.erf", v1=["math.erf", "erf"])
@deprecation.deprecated_endpoints("erf")
def erf(x, name=None):
"""Computes the Gauss error function of `x` element-wise.
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Erf", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_erf = gen_math_ops.erf(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_erf, dense_shape=x.dense_shape)
else:
return gen_math_ops.erf(x, name=name)
@tf_export("math.scalar_mul", "scalar_mul")
def scalar_mul(scalar, x):
"""Multiplies a scalar times a `Tensor` or `IndexedSlices` object.
Intended for use in gradient code which might deal with `IndexedSlices`
objects, which are easy to multiply by a scalar but more expensive to
multiply with arbitrary tensors.
Args:
scalar: A 0-D scalar `Tensor`. Must have known shape.
x: A `Tensor` or `IndexedSlices` to be scaled.
Returns:
`scalar * x` of the same type (`Tensor` or `IndexedSlices`) as `x`.
Raises:
ValueError: if scalar is not a 0-D `scalar`.
"""
scalar = ops.convert_to_tensor(
scalar, dtype=x.dtype.base_dtype, name="scalar")
shape = scalar.get_shape()
if shape.ndims == 0:
if isinstance(x, ops.IndexedSlices):
return ops.IndexedSlices(scalar * x.values, x.indices, x.dense_shape)
else:
return scalar * x
else:
raise ValueError("Only scalar multiply works, got shape %s" % shape)
@tf_export("math.pow", "pow")
def pow(x, y, name=None): # pylint: disable=redefined-builtin
r"""Computes the power of one value to another.
Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for
corresponding elements in `x` and `y`. For example:
```python
x = tf.constant([[2, 2], [3, 3]])
y = tf.constant([[8, 16], [2, 3]])
tf.pow(x, y) # [[256, 65536], [9, 27]]
```
Args:
x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,
`complex64`, or `complex128`.
y: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,
`complex64`, or `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`.
"""
with ops.name_scope(name, "Pow", [x]) as name:
return gen_math_ops._pow(x, y, name=name)
# pylint: disable=redefined-builtin,redefined-outer-name
@tf_export("dtypes.complex", "complex")
def complex(real, imag, name=None):
r"""Converts two real numbers to a complex number.
Given a tensor `real` representing the real part of a complex number, and a
tensor `imag` representing the imaginary part of a complex number, this
operation returns complex numbers elementwise of the form \\(a + bj\\), where
*a* represents the `real` part and *b* represents the `imag` part.
The input tensors `real` and `imag` must have the same shape.
For example:
```python
real = tf.constant([2.25, 3.25])
imag = tf.constant([4.75, 5.75])
tf.complex(real, imag) # [[2.25 + 4.75j], [3.25 + 5.75j]]
```
Args:
real: A `Tensor`. Must be one of the following types: `float32`,
`float64`.
imag: A `Tensor`. Must have the same type as `real`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `complex64` or `complex128`.
"""
real = ops.convert_to_tensor(real, name="real")
imag = ops.convert_to_tensor(imag, name="imag")
with ops.name_scope(name, "Complex", [real, imag]) as name:
input_types = (real.dtype, imag.dtype)
if input_types == (dtypes.float64, dtypes.float64):
Tout = dtypes.complex128
elif input_types == (dtypes.float32, dtypes.float32):
Tout = dtypes.complex64
else:
raise TypeError("real and imag have incorrect types: "
"{} {}".format(real.dtype.name, imag.dtype.name))
return gen_math_ops._complex(real, imag, Tout=Tout, name=name)
@tf_export("math.real", v1=["math.real", "real"])
@deprecation.deprecated_endpoints("real")
def real(input, name=None):
r"""Returns the real part of a complex (or real) tensor.
Given a tensor `input`, this operation returns a tensor of type `float` that
is the real part of each element in `input` considered as a complex number.
For example:
```python
x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])
tf.real(x) # [-2.25, 3.25]
```
If `input` is already real, it is returned unchanged.
Args:
input: A `Tensor`. Must have numeric type.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Real", [input]) as name:
if input.dtype.is_complex:
real_dtype = input.dtype.real_dtype
return gen_math_ops.real(input, Tout=real_dtype, name=name)
else:
return input
@tf_export("math.imag", v1=["math.imag", "imag"])
@deprecation.deprecated_endpoints("imag")
def imag(input, name=None):
r"""Returns the imaginary part of a complex (or real) tensor.
Given a tensor `input`, this operation returns a tensor of type `float` that
is the imaginary part of each element in `input` considered as a complex
number. If `input` is real, a tensor of all zeros is returned.
For example:
```python
x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])
tf.imag(x) # [4.75, 5.75]
```
Args:
input: A `Tensor`. Must be one of the following types: `float`, `double`,
`complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Imag", [input]) as name:
if input.dtype.is_complex:
return gen_math_ops.imag(input, Tout=input.dtype.real_dtype, name=name)
else:
return array_ops.zeros_like(input)
@tf_export("math.angle", v1=["math.angle", "angle"])
@deprecation.deprecated_endpoints("angle")
def angle(input, name=None):
r"""Returns the element-wise argument of a complex (or real) tensor.
Given a tensor `input`, this operation returns a tensor of type `float` that
is the argument of each element in `input` considered as a complex number.
The elements in `input` are considered to be complex numbers of the form
\\(a + bj\\), where *a* is the real part and *b* is the imaginary part.
If `input` is real then *b* is zero by definition.
The argument returned by this function is of the form \\(atan2(b, a)\\).
If `input` is real, a tensor of all zeros is returned.
For example:
```
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.angle(input) ==> [2.0132, 1.056]
```
Args:
input: A `Tensor`. Must be one of the following types: `float`, `double`,
`complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Angle", [input]) as name:
if input.dtype.is_complex:
return gen_math_ops.angle(input, Tout=input.dtype.real_dtype, name=name)
else:
return array_ops.zeros_like(input)
# pylint: enable=redefined-outer-name,redefined-builtin
@tf_export("math.round", "round")
def round(x, name=None): # pylint: disable=redefined-builtin
"""Rounds the values of a tensor to the nearest integer, element-wise.
Rounds half to even. Also known as bankers rounding. If you want to round
according to the current system rounding mode use tf::cint.
For example:
```python
x = tf.constant([0.9, 2.5, 2.3, 1.5, -4.5])
tf.round(x) # [ 1.0, 2.0, 2.0, 2.0, -4.0 ]
```
Args:
x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, or `int64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as `x`.
"""
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_integer:
return x
else:
return gen_math_ops.round(x, name=name)
@tf_export("dtypes.cast", "cast")
def cast(x, dtype, name=None):
"""Casts a tensor to a new type.
The operation casts `x` (in case of `Tensor`) or `x.values`
(in case of `SparseTensor` or `IndexedSlices`) to `dtype`.
For example:
```python
x = tf.constant([1.8, 2.2], dtype=tf.float32)
tf.cast(x, tf.int32) # [1, 2], dtype=tf.int32
```
The operation supports data types (for `x` and `dtype`) of
`uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`, `int64`,
`float16`, `float32`, `float64`, `complex64`, `complex128`, `bfloat16`.
In case of casting from complex types (`complex64`, `complex128`) to real
types, only the real part of `x` is returned. In case of casting from real
types to complex types (`complex64`, `complex128`), the imaginary part of the
returned value is set to `0`. The handling of complex types here matches the
behavior of numpy.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices` of numeric type. It could
be `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`,
`int64`, `float16`, `float32`, `float64`, `complex64`, `complex128`,
`bfloat16`.
dtype: The destination type. The list of supported dtypes is the same as
`x`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` and
same type as `dtype`.
Raises:
TypeError: If `x` cannot be cast to the `dtype`.
"""
base_type = dtypes.as_dtype(dtype).base_dtype
if isinstance(x,
(ops.Tensor, _resource_variable_type)) and base_type == x.dtype:
return x
with ops.name_scope(name, "Cast", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
values_cast = cast(x.values, base_type, name=name)
x = sparse_tensor.SparseTensor(x.indices, values_cast, x.dense_shape)
elif isinstance(x, ops.IndexedSlices):
values_cast = cast(x.values, base_type, name=name)
x = ops.IndexedSlices(values_cast, x.indices, x.dense_shape)
else:
# TODO(josh11b): If x is not already a Tensor, we could return
# ops.convert_to_tensor(x, dtype=dtype, ...) here, but that
# allows some conversions that cast() can't do, e.g. casting numbers to
# strings.
x = ops.convert_to_tensor(x, name="x")
if x.dtype.base_dtype != base_type:
x = gen_math_ops.cast(x, base_type, name=name)
if x.dtype.is_complex and base_type.is_floating:
logging.warn("Casting complex to real discards imaginary part.")
return x
@tf_export("dtypes.saturate_cast", "saturate_cast")
def saturate_cast(value, dtype, name=None):
"""Performs a safe saturating cast of `value` to `dtype`.
This function casts the input to `dtype` without applying any scaling. If
there is a danger that values would over or underflow in the cast, this op
applies the appropriate clamping before the cast.
Args:
value: A `Tensor`.
dtype: The desired output `DType`.
name: A name for the operation (optional).
Returns:
`value` safely cast to `dtype`.
"""
# When casting to a type with smaller representable range, clamp.
# Note that this covers casting to unsigned types as well.
with ops.name_scope(name, "saturate_cast", [value]) as name:
value = ops.convert_to_tensor(value, name="value")
dtype = dtypes.as_dtype(dtype).base_dtype
if value.dtype.min < dtype.min:
value = gen_math_ops.maximum(value,
ops.convert_to_tensor(
dtype.min, dtype=value.dtype,
name="min"))
if value.dtype.max > dtype.max:
value = gen_math_ops.minimum(value,
ops.convert_to_tensor(
dtype.max, dtype=value.dtype,
name="max"))
return cast(value, dtype, name=name)
@deprecation.deprecated(date=None, instructions="Use tf.cast instead.")
@tf_export(v1=["to_float"])
def to_float(x, name="ToFloat"):
"""Casts a tensor to type `float32`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `float32`.
Raises:
TypeError: If `x` cannot be cast to the `float32`.
"""
return cast(x, dtypes.float32, name=name)
@deprecation.deprecated(date=None, instructions="Use tf.cast instead.")
@tf_export(v1=["to_double"])
def to_double(x, name="ToDouble"):
"""Casts a tensor to type `float64`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `float64`.
Raises:
TypeError: If `x` cannot be cast to the `float64`.
"""
return cast(x, dtypes.float64, name=name)
@deprecation.deprecated(date=None, instructions="Use tf.cast instead.")
@tf_export(v1=["to_int32"])
def to_int32(x, name="ToInt32"):
"""Casts a tensor to type `int32`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `int32`.
Raises:
TypeError: If `x` cannot be cast to the `int32`.
"""
return cast(x, dtypes.int32, name=name)
@deprecation.deprecated(date=None, instructions="Use tf.cast instead.")
@tf_export(v1=["to_int64"])
def to_int64(x, name="ToInt64"):
"""Casts a tensor to type `int64`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `int64`.
Raises:
TypeError: If `x` cannot be cast to the `int64`.
"""
return cast(x, dtypes.int64, name=name)
@deprecation.deprecated(date=None, instructions="Use tf.cast instead.")
@tf_export(v1=["to_bfloat16"])
def to_bfloat16(x, name="ToBFloat16"):
"""Casts a tensor to type `bfloat16`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `bfloat16`.
Raises:
TypeError: If `x` cannot be cast to the `bfloat16`.
"""
return cast(x, dtypes.bfloat16, name=name)
@deprecation.deprecated(date=None, instructions="Use tf.cast instead.")
@tf_export(v1=["to_complex64"])
def to_complex64(x, name="ToComplex64"):
"""Casts a tensor to type `complex64`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `complex64`.
Raises:
TypeError: If `x` cannot be cast to the `complex64`.
"""
return cast(x, dtypes.complex64, name=name)
@deprecation.deprecated(date=None, instructions="Use tf.cast instead.")
@tf_export(v1=["to_complex128"])
def to_complex128(x, name="ToComplex128"):
"""Casts a tensor to type `complex128`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `complex128`.
Raises:
TypeError: If `x` cannot be cast to the `complex128`.
"""
return cast(x, dtypes.complex128, name=name)
ops.Tensor._override_operator("__neg__", gen_math_ops.neg)
ops.Tensor._override_operator("__abs__", abs)
# __invert__ corresponds to the ~ operator. Here we follow the numpy convention
# ~ marks an elementwise bit-wise inverse. This is only implemented for boolean
# tensors and will throw a TypeError if used on nonboolean arrays
ops.Tensor._override_operator("__invert__", gen_math_ops.logical_not)
def _OverrideBinaryOperatorHelper(func, op_name, clazz_object=ops.Tensor):
"""Register operators with different tensor and scalar versions.
If `clazz_object` is `SparseTensor`, assumes `func` takes `(sp_indices,
sp_values, sp_shape, dense)` and outputs `(new_sp_values)`.
Args:
func: the operator
op_name: name of the operator being overridden
clazz_object: class to override for. Either `Tensor` or `SparseTensor`.
"""
def binary_op_wrapper(x, y):
with ops.name_scope(None, op_name, [x, y]) as name:
if isinstance(x, ops.Tensor) and isinstance(y, ops.Tensor):
return func(x, y, name=name)
elif not isinstance(y, sparse_tensor.SparseTensor):
try:
y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name="y")
except TypeError:
# If the RHS is not a tensor, it might be a tensor aware object
# that can implement the operator with knowledge of itself
# and the tensor.
if hasattr(type(y), "__r%s__" % op_name):
return NotImplemented
else:
raise
return func(x, y, name=name)
def binary_op_wrapper_sparse(sp_x, y):
with ops.name_scope(None, op_name, [sp_x, y]) as name:
y = ops.convert_to_tensor(y, dtype=sp_x.dtype.base_dtype, name="y")
return sparse_tensor.SparseTensor(sp_x.indices,
func(
sp_x.indices,
sp_x.values,
sp_x.dense_shape,
y,
name=name), sp_x.dense_shape)
def r_binary_op_wrapper(y, x):
with ops.name_scope(None, op_name, [x, y]) as name:
x = ops.convert_to_tensor(x, dtype=y.dtype.base_dtype, name="x")
return func(x, y, name=name)
# Propagate func.__doc__ to the wrappers
try:
doc = func.__doc__
except AttributeError:
doc = None
binary_op_wrapper.__doc__ = doc
r_binary_op_wrapper.__doc__ = doc
binary_op_wrapper_sparse.__doc__ = doc
if clazz_object is ops.Tensor:
clazz_object._override_operator("__%s__" % op_name, binary_op_wrapper)
del binary_op_wrapper
clazz_object._override_operator("__r%s__" % op_name, r_binary_op_wrapper)
del r_binary_op_wrapper
else:
clazz_object._override_operator("__%s__" % op_name,
binary_op_wrapper_sparse)
del binary_op_wrapper_sparse
# Conversion table for __truediv__. None entries mean no conversion required.
_TRUEDIV_TABLE = {
dtypes.uint8: dtypes.float32,
dtypes.int8: dtypes.float32,
dtypes.uint16: dtypes.float32,
dtypes.int16: dtypes.float32,
dtypes.int32: dtypes.float64,
dtypes.int64: dtypes.float64,
dtypes.bfloat16: None,
dtypes.float16: None,
dtypes.float32: None,
dtypes.float64: None,
dtypes.complex64: None,
dtypes.complex128: None,
}
# NOTE: the support of "sparse (true)div dense" is currently not baked in into
# "tf.(true_)div()". Until such an API decision is made, the supported usage is
# to explicitly use the "/" operator to invoke either truediv or div.
def _sparse_dense_truediv(sp_indices, sp_values, sp_shape, y, name=None):
"""Internal helper function for 'sp_t / dense_t'."""
with ops.name_scope(name, "truediv",
[sp_indices, sp_values, sp_shape, y]) as name:
sp_values = ops.convert_to_tensor(sp_values, name="sp_values")
y = ops.convert_to_tensor(y, name="y")
x_dtype = sp_values.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
sp_values = cast(sp_values, dtype)
y = cast(y, dtype)
return gen_sparse_ops.sparse_dense_cwise_div(
sp_indices, sp_values, sp_shape, y, name=name)
def _truediv_python3(x, y, name=None):
with ops.name_scope(name, "truediv", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y")
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
x = cast(x, dtype)
y = cast(y, dtype)
return gen_math_ops.real_div(x, y, name=name)
def _div_python2(x, y, name=None):
"""Divide two values using Python 2 semantics. Used for Tensor.__div__.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` returns the quotient of x and y.
"""
with ops.name_scope(name, "div", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype)
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
if x_dtype.is_floating or x_dtype.is_complex:
return gen_math_ops.real_div(x, y, name=name)
else:
return gen_math_ops.floor_div(x, y, name=name)
@tf_export("math.truediv", "truediv")
def truediv(x, y, name=None):
"""Divides x / y elementwise (using Python 3 division operator semantics).
NOTE: Prefer using the Tensor operator or tf.divide which obey Python
division operator semantics.
This function forces Python 3 division operator semantics where all integer
arguments are cast to floating types first. This op is generated by normal
`x / y` division in Python 3 and in Python 2.7 with
`from __future__ import division`. If you want integer division that rounds
down, use `x // y` or `tf.math.floordiv`.
`x` and `y` must have the same numeric type. If the inputs are floating
point, the output will have the same type. If the inputs are integral, the
inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`
and `int64` (matching the behavior of Numpy).
Args:
x: `Tensor` numerator of numeric type.
y: `Tensor` denominator of numeric type.
name: A name for the operation (optional).
Returns:
`x / y` evaluated in floating point.
Raises:
TypeError: If `x` and `y` have different dtypes.
"""
return _truediv_python3(x, y, name)
@deprecation.deprecated(
date=None,
instructions="Deprecated in favor of operator or tf.math.divide.")
@tf_export(v1=["div"])
def div(x, y, name=None):
"""Divides x / y elementwise (using Python 2 division operator semantics).
NOTE: Prefer using the Tensor division operator or tf.divide which obey Python
division operator semantics.
This function divides `x` and `y`, forcing Python 2.7 semantics. That is,
if one of `x` or `y` is a float, then the result will be a float.
Otherwise, the output will be an integer type. Flooring semantics are used
for integer division.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` returns the quotient of x and y.
"""
return _div_python2(x, y, name)
@tf_export("div_no_nan")
def div_no_nan(x, y, name=None):
"""Computes an unsafe divide which returns 0 if the y is zero.
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`.
y: A `Tensor` whose dtype is compatible with `x`.
name: A name for the operation (optional).
Returns:
The element-wise value of the x divided by y.
"""
with ops.name_scope(name, "div_no_nan", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype)
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
return gen_math_ops.div_no_nan(x, y, name=name)
# TODO(aselle): This should be removed
mod = gen_math_ops.floor_mod
# TODO(aselle): Deprecate this once all internal functionality uses
# tf.truncatediv
@tf_export("math.floordiv", v1=["math.floordiv", "floordiv"])
@deprecation.deprecated_endpoints("floordiv")
def floordiv(x, y, name=None):
"""Divides `x / y` elementwise, rounding toward the most negative integer.
The same as `tf.div(x,y)` for integers, but uses `tf.floor(tf.div(x,y))` for
floating point arguments so that the result is always an integer (though
possibly an integer represented as floating point). This op is generated by
`x // y` floor division in Python 3 and in Python 2.7 with
`from __future__ import division`.
`x` and `y` must have the same type, and the result will have the same type
as well.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` rounded down.
Raises:
TypeError: If the inputs are complex.
"""
with ops.name_scope(name, "floordiv", [x, y]) as name:
return gen_math_ops.floor_div(x, y, name=name)
realdiv = gen_math_ops.real_div
tf_export("realdiv")(realdiv)
truncatediv = gen_math_ops.truncate_div
tf_export("truncatediv")(truncatediv)
# TODO(aselle): Rename this to floordiv when we can.
floor_div = gen_math_ops.floor_div
tf_export("floor_div")(floor_div)
truncatemod = gen_math_ops.truncate_mod
tf_export("truncatemod")(truncatemod)
floormod = gen_math_ops.floor_mod
tf_export("floormod", "mod")(floormod)
def _mul_dispatch(x, y, name=None):
"""Dispatches cwise mul for "Dense*Dense" and "Dense*Sparse"."""
is_tensor_y = isinstance(y, ops.Tensor)
if is_tensor_y:
return gen_math_ops.mul(x, y, name=name)
else:
assert isinstance(y, sparse_tensor.SparseTensor) # Case: Dense * Sparse.
new_vals = gen_sparse_ops.sparse_dense_cwise_mul(y.indices, y.values,
y.dense_shape, x, name)
return sparse_tensor.SparseTensor(y.indices, new_vals, y.dense_shape)
# NOTE(aselle): When integer division is added for sparse_dense_cwise,
# div, truediv, and floordiv should be delegated appropriately for
# Python sematnics, analogous to dense cwise tensor operations.
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_div, "div",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(_sparse_dense_truediv, "truediv",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_mul, "mul",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(gen_math_ops.add, "add")
_OverrideBinaryOperatorHelper(gen_math_ops.sub, "sub")
_OverrideBinaryOperatorHelper(_mul_dispatch, "mul")
_OverrideBinaryOperatorHelper(_div_python2, "div")
_OverrideBinaryOperatorHelper(_truediv_python3, "truediv")
_OverrideBinaryOperatorHelper(floordiv, "floordiv")
_OverrideBinaryOperatorHelper(gen_math_ops.floor_mod, "mod")
_OverrideBinaryOperatorHelper(pow, "pow")
@tf_export("math.logical_xor", v1=["math.logical_xor", "logical_xor"])
@deprecation.deprecated_endpoints("logical_xor")
def logical_xor(x, y, name="LogicalXor"):
"""x ^ y = (x | y) & ~(x & y)."""
# TODO(alemi) Make this a cwise op if people end up relying on it.
return gen_math_ops.logical_and(
gen_math_ops.logical_or(x, y),
gen_math_ops.logical_not(gen_math_ops.logical_and(x, y)),
name=name)
_OverrideBinaryOperatorHelper(gen_math_ops.logical_and, "and")
_OverrideBinaryOperatorHelper(gen_math_ops.logical_or, "or")
_OverrideBinaryOperatorHelper(logical_xor, "xor")
ops.Tensor._override_operator("__lt__", gen_math_ops.less)
ops.Tensor._override_operator("__le__", gen_math_ops.less_equal)
ops.Tensor._override_operator("__gt__", gen_math_ops.greater)
ops.Tensor._override_operator("__ge__", gen_math_ops.greater_equal)
@tf_export("range")
def range(start, limit=None, delta=1, dtype=None, name="range"): # pylint: disable=redefined-builtin
"""Creates a sequence of numbers.
Creates a sequence of numbers that begins at `start` and extends by
increments of `delta` up to but not including `limit`.
The dtype of the resulting tensor is inferred from the inputs unless
it is provided explicitly.
Like the Python builtin `range`, `start` defaults to 0, so that
`range(n) = range(0, n)`.
For example:
```python
start = 3
limit = 18
delta = 3
tf.range(start, limit, delta) # [3, 6, 9, 12, 15]
start = 3
limit = 1
delta = -0.5
tf.range(start, limit, delta) # [3, 2.5, 2, 1.5]
limit = 5
tf.range(limit) # [0, 1, 2, 3, 4]
```
Args:
start: A 0-D `Tensor` (scalar). Acts as first entry in the range if
`limit` is not None; otherwise, acts as range limit and first entry
defaults to 0.
limit: A 0-D `Tensor` (scalar). Upper limit of sequence,
exclusive. If None, defaults to the value of `start` while the first
entry of the range defaults to 0.
delta: A 0-D `Tensor` (scalar). Number that increments
`start`. Defaults to 1.
dtype: The type of the elements of the resulting tensor.
name: A name for the operation. Defaults to "range".
Returns:
An 1-D `Tensor` of type `dtype`.
@compatibility(numpy)
Equivalent to np.arange
@end_compatibility
"""
if limit is None:
start, limit = 0, start
with ops.name_scope(name, "Range", [start, limit, delta]) as name:
start = ops.convert_to_tensor(start, dtype=dtype, name="start")
limit = ops.convert_to_tensor(limit, dtype=dtype, name="limit")
delta = ops.convert_to_tensor(delta, dtype=dtype, name="delta")
# infer dtype if not explicitly provided
if dtype is None:
dtype_hierarchy = [
dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64
]
assert all(arg.dtype in dtype_hierarchy for arg in [start, limit, delta])
inferred_dtype = max(
[arg.dtype for arg in [start, limit, delta]],
key=dtype_hierarchy.index)
start = cast(start, inferred_dtype)
limit = cast(limit, inferred_dtype)
delta = cast(delta, inferred_dtype)
return gen_math_ops._range(start, limit, delta, name=name)
# Reduction operations
def _ReductionDims(x, axis, reduction_indices):
"""Returns range(0, rank(x)) if reduction_indices is None."""
# TODO(aselle): Remove this after deprecation
if reduction_indices is not None:
if axis is not None:
raise ValueError("Can't specify both axis' and 'reduction_indices'.")
axis = reduction_indices
if axis is not None:
return axis
else:
# Fast path: avoid creating Rank and Range ops if ndims is known.
rank = common_shapes.rank(x)
if rank is not None:
return constant_op.constant(np.arange(rank), dtype=dtypes.int32)
if (isinstance(x, sparse_tensor.SparseTensor) and
x.dense_shape.shape.is_fully_defined()):
rank = x.dense_shape.shape.dims[0].value # sparse.dense_shape is 1-D.
return constant_op.constant(np.arange(rank), dtype=dtypes.int32)
# Otherwise, we rely on Range and Rank to do the right thing at run-time.
return range(0, array_ops.rank(x))
def _may_reduce_to_scalar(keepdims, axis, reduction_indices, output):
"""Set a reduction's output shape to be a scalar if we are certain."""
if not common_shapes.has_fully_defined_shape(output) and (not keepdims) and (
axis is None) and (reduction_indices is None):
output.set_shape(())
return output
@tf_export("math.reduce_sum", "reduce_sum")
@deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def reduce_sum(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes the sum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
x = tf.constant([[1, 1, 1], [1, 1, 1]])
tf.reduce_sum(x) # 6
tf.reduce_sum(x, 0) # [2, 2, 2]
tf.reduce_sum(x, 1) # [3, 3]
tf.reduce_sum(x, 1, keepdims=True) # [[3], [3]]
tf.reduce_sum(x, [0, 1]) # 6
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor, of the same dtype as the input_tensor.
@compatibility(numpy)
Equivalent to np.sum apart the fact that numpy upcast uint8 and int32 to
int64 while tensorflow returns the same dtype as the input.
@end_compatibility
"""
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
if keepdims is None:
keepdims = False
return _may_reduce_to_scalar(keepdims, axis, reduction_indices,
gen_math_ops._sum(
input_tensor,
_ReductionDims(input_tensor, axis,
reduction_indices),
keepdims,
name=name))
@tf_export(v1=["math.count_nonzero", "count_nonzero"])
@deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def count_nonzero(input_tensor,
axis=None,
keepdims=None,
dtype=dtypes.int64,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes number of nonzero elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
**NOTE** Floating point comparison to zero is done by exact floating point
equality check. Small values are **not** rounded to zero for purposes of
the nonzero check.
For example:
```python
x = tf.constant([[0, 1, 0], [1, 1, 0]])
tf.count_nonzero(x) # 3
tf.count_nonzero(x, 0) # [1, 2, 0]
tf.count_nonzero(x, 1) # [1, 2]
tf.count_nonzero(x, 1, keepdims=True) # [[1], [2]]
tf.count_nonzero(x, [0, 1]) # 3
```
**NOTE** Strings are compared against zero-length empty string `""`. Any
string with a size greater than zero is already considered as nonzero.
For example:
```python
x = tf.constant(["", "a", " ", "b", ""])
tf.count_nonzero(x) # 3, with "a", " ", and "b" as nonzero strings.
```
Args:
input_tensor: The tensor to reduce. Should be of numeric type, `bool`,
or `string`.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
dtype: The output dtype; defaults to `tf.int64`.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor (number of nonzero values).
"""
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
axis = deprecation.deprecated_argument_lookup(
"axis", axis,
"reduction_indices", reduction_indices
)
if keepdims is None:
keepdims = False
return count_nonzero_v2(input_tensor, axis, keepdims, dtype, name)
@tf_export("math.count_nonzero", v1=[])
def count_nonzero_v2(input, # pylint: disable=redefined-builtin
axis=None,
keepdims=None,
dtype=dtypes.int64,
name=None):
"""Computes number of nonzero elements across dimensions of a tensor.
Reduces `input` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
**NOTE** Floating point comparison to zero is done by exact floating point
equality check. Small values are **not** rounded to zero for purposes of
the nonzero check.
For example:
```python
x = tf.constant([[0, 1, 0], [1, 1, 0]])
tf.count_nonzero(x) # 3
tf.count_nonzero(x, 0) # [1, 2, 0]
tf.count_nonzero(x, 1) # [1, 2]
tf.count_nonzero(x, 1, keepdims=True) # [[1], [2]]
tf.count_nonzero(x, [0, 1]) # 3
```
**NOTE** Strings are compared against zero-length empty string `""`. Any
string with a size greater than zero is already considered as nonzero.
For example:
```python
x = tf.constant(["", "a", " ", "b", ""])
tf.count_nonzero(x) # 3, with "a", " ", and "b" as nonzero strings.
```
Args:
input: The tensor to reduce. Should be of numeric type, `bool`,
or `string`.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input), rank(input))`.
keepdims: If true, retains reduced dimensions with length 1.
dtype: The output dtype; defaults to `tf.int64`.
name: A name for the operation (optional).
Returns:
The reduced tensor (number of nonzero values).
"""
with ops.name_scope(name, "count_nonzero", [input]):
input = ops.convert_to_tensor(input, name="input")
# A scalar of 'zero' is enough as `not_equal` will broadcast.
zero = array_ops.zeros([], dtype=input.dtype)
return cast(
reduce_sum(
# int64 reduction happens on GPU
to_int64(gen_math_ops.not_equal(input, zero)),
axis=axis,
keepdims=keepdims),
dtype=dtype)
@tf_export("math.reduce_mean", "reduce_mean")
@deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def reduce_mean(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes the mean of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
x = tf.constant([[1., 1.], [2., 2.]])
tf.reduce_mean(x) # 1.5
tf.reduce_mean(x, 0) # [1.5, 1.5]
tf.reduce_mean(x, 1) # [1., 2.]
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.mean
Please note that `np.mean` has a `dtype` parameter that could be used to
specify the output type. By default this is `dtype=float64`. On the other
hand, `tf.reduce_mean` has an aggressive type inference from `input_tensor`,
for example:
```python
x = tf.constant([1, 0, 1, 0])
tf.reduce_mean(x) # 0
y = tf.constant([1., 0., 1., 0.])
tf.reduce_mean(y) # 0.5
```
@end_compatibility
"""
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
if keepdims is None:
keepdims = False
return _may_reduce_to_scalar(keepdims, axis, reduction_indices,
gen_math_ops.mean(
input_tensor,
_ReductionDims(input_tensor, axis,
reduction_indices),
keepdims,
name=name))
@tf_export("math.reduce_variance")
def reduce_variance(input_tensor, axis=None, keepdims=None, name=None):
"""Computes the variance of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
x = tf.constant([[1., 2.], [3., 4.]])
tf.reduce_variance(x) # 1.25
tf.reduce_variance(x, 0) # [1., 1.]
tf.reduce_variance(x, 1) # [0.25, 0.25]
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name scope for the associated operations (optional).
Returns:
The reduced tensor, of the same dtype as the input_tensor.
@compatibility(numpy)
Equivalent to np.var
Please note that `np.var` has a `dtype` parameter that could be used to
specify the output type. By default this is `dtype=float64`. On the other
hand, `tf.reduce_variance` has an aggressive type inference from
`input_tensor`,
@end_compatibility
"""
name = name if name else "reduce_variance"
with ops.name_scope(name):
means = reduce_mean(input_tensor, axis=axis, keepdims=True)
squared_deviations = square(input_tensor - means)
return reduce_mean(squared_deviations, axis=axis, keepdims=keepdims)
@tf_export("math.reduce_std")
def reduce_std(input_tensor, axis=None, keepdims=None, name=None):
"""Computes the standard deviation of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
x = tf.constant([[1., 2.], [3., 4.]])
tf.reduce_std(x) # 1.1180339887498949
tf.reduce_std(x, 0) # [1., 1.]
tf.reduce_std(x, 1) # [0.5, 0.5]
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name scope for the associated operations (optional).
Returns:
The reduced tensor, of the same dtype as the input_tensor.
@compatibility(numpy)
Equivalent to np.std
Please note that `np.std` has a `dtype` parameter that could be used to
specify the output type. By default this is `dtype=float64`. On the other
hand, `tf.reduce_std` has an aggressive type inference from `input_tensor`,
@end_compatibility
"""
name = name if name else "reduce_std"
with ops.name_scope(name):
variance = reduce_variance(input_tensor, axis=axis, keepdims=keepdims)
return sqrt(variance)
@tf_export("math.reduce_prod", "reduce_prod")
@deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def reduce_prod(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes the product of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.prod
@end_compatibility
"""
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
if keepdims is None:
keepdims = False
return _may_reduce_to_scalar(keepdims, axis, reduction_indices,
gen_math_ops.prod(
input_tensor,
_ReductionDims(input_tensor, axis,
reduction_indices),
keepdims,
name=name))
@tf_export("math.reduce_min", "reduce_min")
@deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def reduce_min(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes the minimum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have real numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.min
@end_compatibility
"""
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
if keepdims is None:
keepdims = False
return _may_reduce_to_scalar(keepdims, axis, reduction_indices,
gen_math_ops._min(
input_tensor,
_ReductionDims(input_tensor, axis,
reduction_indices),
keepdims,
name=name))
@tf_export("math.reduce_max", "reduce_max")
@deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def reduce_max(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes the maximum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have real numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.max
@end_compatibility
"""
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
if keepdims is None:
keepdims = False
return _may_reduce_to_scalar(keepdims, axis, reduction_indices,
gen_math_ops._max(
input_tensor,
_ReductionDims(input_tensor, axis,
reduction_indices),
keepdims,
name=name))
@tf_export("math.reduce_all", "reduce_all")
@deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def reduce_all(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes the "logical and" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
x = tf.constant([[True, True], [False, False]])
tf.reduce_all(x) # False
tf.reduce_all(x, 0) # [False, False]
tf.reduce_all(x, 1) # [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.all
@end_compatibility
"""
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
if keepdims is None:
keepdims = False
return _may_reduce_to_scalar(keepdims, axis, reduction_indices,
gen_math_ops._all(
input_tensor,
_ReductionDims(input_tensor, axis,
reduction_indices),
keepdims,
name=name))
@tf_export("math.reduce_any", "reduce_any")
@deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def reduce_any(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes the "logical or" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
x = tf.constant([[True, True], [False, False]])
tf.reduce_any(x) # True
tf.reduce_any(x, 0) # [True, True]
tf.reduce_any(x, 1) # [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.any
@end_compatibility
"""
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
if keepdims is None:
keepdims = False
return _may_reduce_to_scalar(keepdims, axis, reduction_indices,
gen_math_ops._any(
input_tensor,
_ReductionDims(input_tensor, axis,
reduction_indices),
keepdims,
name=name))
@tf_export("math.reduce_logsumexp", "reduce_logsumexp")
@deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def reduce_logsumexp(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes log(sum(exp(elements across dimensions of a tensor))).
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
This function is more numerically stable than log(sum(exp(input))). It avoids
overflows caused by taking the exp of large inputs and underflows caused by
taking the log of small inputs.
For example:
```python
x = tf.constant([[0., 0., 0.], [0., 0., 0.]])
tf.reduce_logsumexp(x) # log(6)
tf.reduce_logsumexp(x, 0) # [log(2), log(2), log(2)]
tf.reduce_logsumexp(x, 1) # [log(3), log(3)]
tf.reduce_logsumexp(x, 1, keepdims=True) # [[log(3)], [log(3)]]
tf.reduce_logsumexp(x, [0, 1]) # log(6)
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
"""
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
if keepdims is None:
keepdims = False
input_tensor = ops.convert_to_tensor(input_tensor)
with ops.name_scope(name, "ReduceLogSumExp", [input_tensor]) as name:
raw_max = reduce_max(
input_tensor,
axis=axis,
reduction_indices=reduction_indices,
keepdims=True)
my_max = array_ops.stop_gradient(
array_ops.where(
gen_math_ops.is_finite(raw_max), raw_max,
array_ops.zeros_like(raw_max)))
result = gen_math_ops.log(
reduce_sum(
gen_math_ops.exp(gen_math_ops.sub(input_tensor, my_max)),
axis,
keepdims=keepdims,
reduction_indices=reduction_indices))
if not keepdims:
my_max = array_ops.reshape(my_max, array_ops.shape(result))
result = gen_math_ops.add(result, my_max)
return _may_reduce_to_scalar(keepdims, axis, reduction_indices, result)
@tf_export("linalg.trace", v1=["linalg.trace", "trace"])
@deprecation.deprecated_endpoints("trace")
def trace(x, name=None):
"""Compute the trace of a tensor `x`.
`trace(x)` returns the sum along the main diagonal of each inner-most matrix
in x. If x is of rank `k` with shape `[I, J, K, ..., L, M, N]`, then output
is a tensor of rank `k-2` with dimensions `[I, J, K, ..., L]` where
`output[i, j, k, ..., l] = trace(x[i, j, i, ..., l, :, :])`
For example:
```python
x = tf.constant([[1, 2], [3, 4]])
tf.linalg.trace(x) # 5
x = tf.constant([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
tf.linalg.trace(x) # 15
x = tf.constant([[[1, 2, 3],
[4, 5, 6],
[7, 8, 9]],
[[-1, -2, -3],
[-4, -5, -6],
[-7, -8, -9]]])
tf.linalg.trace(x) # [15, -15]
```
Args:
x: tensor.
name: A name for the operation (optional).
Returns:
The trace of input tensor.
"""
with ops.name_scope(name, "Trace", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return reduce_sum(array_ops.matrix_diag_part(x), [-1], name=name)
@tf_export("linalg.matmul", "matmul")
def matmul(a,
b,
transpose_a=False,
transpose_b=False,
adjoint_a=False,
adjoint_b=False,
a_is_sparse=False,
b_is_sparse=False,
name=None):
"""Multiplies matrix `a` by matrix `b`, producing `a` * `b`.
The inputs must, following any transpositions, be tensors of rank >= 2
where the inner 2 dimensions specify valid matrix multiplication arguments,
and any further outer dimensions match.
Both matrices must be of the same type. The supported types are:
`float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`.
Either matrix can be transposed or adjointed (conjugated and transposed) on
the fly by setting one of the corresponding flag to `True`. These are `False`
by default.
If one or both of the matrices contain a lot of zeros, a more efficient
multiplication algorithm can be used by setting the corresponding
`a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
This optimization is only available for plain matrices (rank-2 tensors) with
datatypes `bfloat16` or `float32`.
For example:
```python
# 2-D tensor `a`
# [[1, 2, 3],
# [4, 5, 6]]
a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])
# 2-D tensor `b`
# [[ 7, 8],
# [ 9, 10],
# [11, 12]]
b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2])
# `a` * `b`
# [[ 58, 64],
# [139, 154]]
c = tf.matmul(a, b)
# 3-D tensor `a`
# [[[ 1, 2, 3],
# [ 4, 5, 6]],
# [[ 7, 8, 9],
# [10, 11, 12]]]
a = tf.constant(np.arange(1, 13, dtype=np.int32),
shape=[2, 2, 3])
# 3-D tensor `b`
# [[[13, 14],
# [15, 16],
# [17, 18]],
# [[19, 20],
# [21, 22],
# [23, 24]]]
b = tf.constant(np.arange(13, 25, dtype=np.int32),
shape=[2, 3, 2])
# `a` * `b`
# [[[ 94, 100],
# [229, 244]],
# [[508, 532],
# [697, 730]]]
c = tf.matmul(a, b)
# Since python >= 3.5 the @ operator is supported (see PEP 465).
# In TensorFlow, it simply calls the `tf.matmul()` function, so the
# following lines are equivalent:
d = a @ b @ [[10.], [11.]]
d = tf.matmul(tf.matmul(a, b), [[10.], [11.]])
```
Args:
a: `Tensor` of type `float16`, `float32`, `float64`, `int32`, `complex64`,
`complex128` and rank > 1.
b: `Tensor` with same type and rank as `a`.
transpose_a: If `True`, `a` is transposed before multiplication.
transpose_b: If `True`, `b` is transposed before multiplication.
adjoint_a: If `True`, `a` is conjugated and transposed before
multiplication.
adjoint_b: If `True`, `b` is conjugated and transposed before
multiplication.
a_is_sparse: If `True`, `a` is treated as a sparse matrix.
b_is_sparse: If `True`, `b` is treated as a sparse matrix.
name: Name for the operation (optional).
Returns:
A `Tensor` of the same type as `a` and `b` where each inner-most matrix is
the product of the corresponding matrices in `a` and `b`, e.g. if all
transpose or adjoint attributes are `False`:
`output`[..., i, j] = sum_k (`a`[..., i, k] * `b`[..., k, j]),
for all indices i, j.
Note: This is matrix product, not element-wise product.
Raises:
ValueError: If transpose_a and adjoint_a, or transpose_b and adjoint_b
are both set to True.
"""
with ops.name_scope(name, "MatMul", [a, b]) as name:
if transpose_a and adjoint_a:
raise ValueError("Only one of transpose_a and adjoint_a can be True.")
if transpose_b and adjoint_b:
raise ValueError("Only one of transpose_b and adjoint_b can be True.")
if context.executing_eagerly():
if not isinstance(a, (ops.EagerTensor, _resource_variable_type)):
a = ops.convert_to_tensor(a, name="a")
if not isinstance(b, (ops.EagerTensor, _resource_variable_type)):
b = ops.convert_to_tensor(b, name="b")
else:
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
# TODO(apassos) remove _shape_tuple here when it is not needed.
a_shape = a._shape_tuple() # pylint: disable=protected-access
b_shape = b._shape_tuple() # pylint: disable=protected-access
if (not a_is_sparse and
not b_is_sparse) and ((a_shape is None or len(a_shape) > 2) and
(b_shape is None or len(b_shape) > 2)):
# BatchMatmul does not support transpose, so we conjugate the matrix and
# use adjoint instead. Conj() is a noop for real matrices.
if transpose_a:
a = conj(a)
adjoint_a = True
if transpose_b:
b = conj(b)
adjoint_b = True
return gen_math_ops.batch_mat_mul(
a, b, adj_x=adjoint_a, adj_y=adjoint_b, name=name)
# Neither matmul nor sparse_matmul support adjoint, so we conjugate
# the matrix and use transpose instead. Conj() is a noop for real
# matrices.
if adjoint_a:
a = conj(a)
transpose_a = True
if adjoint_b:
b = conj(b)
transpose_b = True
use_sparse_matmul = False
if a_is_sparse or b_is_sparse:
sparse_matmul_types = [dtypes.bfloat16, dtypes.float32]
use_sparse_matmul = (
a.dtype in sparse_matmul_types and b.dtype in sparse_matmul_types)
if ((a.dtype == dtypes.bfloat16 or b.dtype == dtypes.bfloat16) and
a.dtype != b.dtype):
# matmul currently doesn't handle mixed-precision inputs.
use_sparse_matmul = True
if use_sparse_matmul:
ret = sparse_matmul(
a,
b,
transpose_a=transpose_a,
transpose_b=transpose_b,
a_is_sparse=a_is_sparse,
b_is_sparse=b_is_sparse,
name=name)
# sparse_matmul always returns float32, even with
# bfloat16 inputs. This prevents us from configuring bfloat16 training.
# casting to bfloat16 also matches non-sparse matmul behavior better.
if a.dtype == dtypes.bfloat16 and b.dtype == dtypes.bfloat16:
ret = cast(ret, dtypes.bfloat16)
return ret
else:
return gen_math_ops.mat_mul(
a, b, transpose_a=transpose_a, transpose_b=transpose_b, name=name)
@tf_export("linalg.matvec")
def matvec(a,
b,
transpose_a=False,
adjoint_a=False,
a_is_sparse=False,
b_is_sparse=False,
name=None):
"""Multiplies matrix `a` by vector `b`, producing `a` * `b`.
The matrix `a` must, following any transpositions, be a tensor of rank >= 2,
and we must have `shape(b) = shape(a)[:-2] + [shape(a)[-1]]`.
Both `a` and `b` must be of the same type. The supported types are:
`float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`.
Matrix `a` can be transposed or adjointed (conjugated and transposed) on
the fly by setting one of the corresponding flag to `True`. These are `False`
by default.
If one or both of the inputs contain a lot of zeros, a more efficient
multiplication algorithm can be used by setting the corresponding
`a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
This optimization is only available for plain matrices/vectors (rank-2/1
tensors) with datatypes `bfloat16` or `float32`.
For example:
```python
# 2-D tensor `a`
# [[1, 2, 3],
# [4, 5, 6]]
a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])
# 1-D tensor `b`
# [7, 9, 11]
b = tf.constant([7, 9, 11], shape=[3])
# `a` * `b`
# [ 58, 64]
c = tf.matvec(a, b)
# 3-D tensor `a`
# [[[ 1, 2, 3],
# [ 4, 5, 6]],
# [[ 7, 8, 9],
# [10, 11, 12]]]
a = tf.constant(np.arange(1, 13, dtype=np.int32),
shape=[2, 2, 3])
# 2-D tensor `b`
# [[13, 14, 15],
# [16, 17, 18]]
b = tf.constant(np.arange(13, 19, dtype=np.int32),
shape=[2, 3])
# `a` * `b`
# [[ 86, 212],
# [410, 563]]
c = tf.matvec(a, b)
```
Args:
a: `Tensor` of type `float16`, `float32`, `float64`, `int32`, `complex64`,
`complex128` and rank > 1.
b: `Tensor` with same type and rank = `rank(a) - 1`.
transpose_a: If `True`, `a` is transposed before multiplication.
adjoint_a: If `True`, `a` is conjugated and transposed before
multiplication.
a_is_sparse: If `True`, `a` is treated as a sparse matrix.
b_is_sparse: If `True`, `b` is treated as a sparse matrix.
name: Name for the operation (optional).
Returns:
A `Tensor` of the same type as `a` and `b` where each inner-most vector is
the product of the corresponding matrices in `a` and vectors in `b`, e.g. if
all transpose or adjoint attributes are `False`:
`output`[..., i] = sum_k (`a`[..., i, k] * `b`[..., k]), for all indices i.
Note: This is matrix-vector product, not element-wise product.
Raises:
ValueError: If transpose_a and adjoint_a are both set to True.
"""
with ops.name_scope(name, "MatVec", [a, b]) as name:
output = matmul(
a,
array_ops.expand_dims(b, axis=-1),
transpose_a=transpose_a,
adjoint_a=adjoint_a,
a_is_sparse=a_is_sparse,
b_is_sparse=b_is_sparse)
return array_ops.squeeze(output, axis=-1)
_OverrideBinaryOperatorHelper(matmul, "matmul")
sparse_matmul = gen_math_ops.sparse_mat_mul
tf_export(v1=["sparse_matmul"])(sparse_matmul)
@ops.RegisterStatistics("MatMul", "flops")
def _calc_mat_mul_flops(graph, node):
"""Calculates the compute resources needed for MatMul."""
transpose_a = node.attr["transpose_a"].b
a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
a_shape.assert_is_fully_defined()
if transpose_a:
k = int(a_shape[0])
else:
k = int(a_shape[1])
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
output_count = np.prod(output_shape.as_list())
return ops.OpStats("flops", (k * output_count * 2))
def _as_indexed_slices(x, optimize=True):
"""Convert 'x' to IndexedSlices.
Convert a dense Tensor to a block-sparse IndexedSlices.
Args:
x: Either a Tensor object, or an IndexedSlices object.
optimize: if true, attempt to optimize the conversion of 'x'.
Returns:
An IndexedSlices object.
Raises:
TypeError: If 'x' is not a Tensor or an IndexedSlices object.
"""
# TODO(touts): op_scope
if not isinstance(x, (ops.Tensor, ops.IndexedSlices)):
raise TypeError("Not a Tensor or IndexedSlices: %s" % type(x))
if isinstance(x, ops.IndexedSlices):
return x
x_shape = array_ops.shape_internal(x, optimize=optimize)
return ops.IndexedSlices(x, range(0, x_shape[0]), x_shape)
def _as_indexed_slices_list(inputs, optimize=True):
"""Convert all elements of 'inputs' to IndexedSlices.
Additionally, homogenize the types of all the indices to
either int32 or int64.
Args:
inputs: List containing either Tensor or IndexedSlices objects.
optimize: if true, attempt to optimize the conversion of each input.
Returns:
A list of IndexedSlices objects.
Raises:
TypeError: If 'inputs' is not a list or a tuple.
"""
if not isinstance(inputs, (list, tuple)):
raise TypeError("Expected a list or tuple, not a %s" % type(inputs))
outputs = [_as_indexed_slices(i, optimize=optimize) for i in inputs]
with_int32_index = [
o.indices for o in outputs if o.indices.dtype == dtypes.int32
]
if not with_int32_index or len(with_int32_index) == len(outputs):
return outputs
casted_outputs = []
for o in outputs:
if o.indices.dtype == dtypes.int32:
casted_outputs.append(
ops.IndexedSlices(o.values, cast(o.indices, dtypes.int64),
o.dense_shape))
else:
casted_outputs.append(o)
return casted_outputs
@tf_export("math.add_n", "add_n")
def add_n(inputs, name=None):
"""Adds all input tensors element-wise.
Args:
inputs: A list of `Tensor` or `IndexedSlices` objects, each with same shape
and type.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
if not inputs or not isinstance(inputs, (list, tuple)):
raise ValueError("inputs must be a list of at least one"
"Tensor/IndexedSlices with the same dtype and shape")
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, (ops.Tensor, ops.IndexedSlices)) for x in inputs):
raise ValueError("inputs must be a list of at least one"
"Tensor/IndexedSlices with the same dtype and shape")
if len(inputs) == 1:
if isinstance(inputs[0], ops.IndexedSlices):
values = inputs[0].values
else:
values = inputs[0]
if name:
return array_ops.identity(values, name=name)
return values
return gen_math_ops.add_n(inputs, name=name)
@tf_export("math.accumulate_n", v1=["math.accumulate_n", "accumulate_n"])
@deprecation.deprecated_endpoints("accumulate_n")
def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):
"""Returns the element-wise sum of a list of tensors.
Optionally, pass `shape` and `tensor_dtype` for shape and type checking,
otherwise, these are inferred.
`tf.math.accumulate_n` performs the same operation as `tf.add_n`, but does not
wait for all of its inputs to be ready before beginning to sum. This can
save memory if inputs are ready at different times, since minimum temporary
storage is proportional to the output size rather than the inputs size.
`accumulate_n` is differentiable (but wasn't previous to TensorFlow 1.7).
For example:
```python
a = tf.constant([[1, 2], [3, 4]])
b = tf.constant([[5, 0], [0, 6]])
tf.math.accumulate_n([a, b, a]) # [[7, 4], [6, 14]]
# Explicitly pass shape and type
tf.math.accumulate_n([a, b, a], shape=[2, 2], tensor_dtype=tf.int32)
# [[7, 4],
# [6, 14]]
```
Args:
inputs: A list of `Tensor` objects, each with same shape and type.
shape: Shape of elements of `inputs`.
tensor_dtype: The type of `inputs`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
def _input_error():
return ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if not inputs or not isinstance(inputs, (list, tuple)):
raise _input_error()
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, ops.Tensor) for x in inputs):
raise _input_error()
if not all(x.dtype == inputs[0].dtype for x in inputs):
raise _input_error()
if shape is not None:
shape = tensor_shape.as_shape(shape)
else:
shape = tensor_shape.unknown_shape()
for input_tensor in inputs:
if isinstance(input_tensor, ops.Tensor):
shape = shape.merge_with(input_tensor.get_shape())
# tensor_dtype is for safety only; operator's output type computed in C++
if tensor_dtype is not None and tensor_dtype != inputs[0].dtype:
raise TypeError("tensor_dtype is {}, but input is of type {}".format(
tensor_dtype, inputs[0].dtype))
if len(inputs) == 1 and name is None:
return inputs[0]
elif len(inputs) == 1 and name is not None:
return array_ops.identity(inputs[0], name=name)
elif context.executing_eagerly():
# TemporaryVariable not currently supported in eager mode; fall back
# onto AddN for now.
# TODO(frreiss) remove this once the lifetime of eager variables gets
# addressed
return add_n(inputs, name=name)
else:
return gen_math_ops.accumulate_nv2(inputs, name=name, shape=shape) # pylint: disable=protected-access
@ops.RegisterGradient("AccumulateNV2")
def _accumulate_n_grad(op, grad):
"""Same as gradient for AddN. Copies the gradient to all inputs."""
# Not broadcasting.
return [grad] * len(op.inputs)
@tf_export("math.sigmoid", "nn.sigmoid", "sigmoid")
def sigmoid(x, name=None):
"""Computes sigmoid of `x` element-wise.
Specifically, `y = 1 / (1 + exp(-x))`.
Args:
x: A Tensor with type `float16`, `float32`, `float64`, `complex64`,
or `complex128`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x`.
@compatibility(scipy)
Equivalent to scipy.special.expit
@end_compatibility
"""
with ops.name_scope(name, "Sigmoid", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.sigmoid(x, name=name)
@tf_export("math.log_sigmoid", v1=["math.log_sigmoid", "log_sigmoid"])
@deprecation.deprecated_endpoints("log_sigmoid")
def log_sigmoid(x, name=None):
"""Computes log sigmoid of `x` element-wise.
Specifically, `y = log(1 / (1 + exp(-x)))`. For numerical stability,
we use `y = -tf.nn.softplus(-x)`.
Args:
x: A Tensor with type `float32` or `float64`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x`.
"""
with ops.name_scope(name, "LogSigmoid", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.neg(gen_nn_ops.softplus(-x), name=name)
@tf_export("math.tanh", "nn.tanh", "tanh")
def tanh(x, name=None):
"""Computes hyperbolic tangent of `x` element-wise.
Args:
x: A Tensor or SparseTensor with type `float16`, `float32`, `double`,
`complex64`, or `complex128`.
name: A name for the operation (optional).
Returns:
A Tensor or SparseTensor respectively with the same type as `x`.
"""
with ops.name_scope(name, "Tanh", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_tanh = gen_math_ops.tanh(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_tanh, dense_shape=x.dense_shape)
else:
return gen_math_ops.tanh(x, name=name)
@tf_export("math.bincount", v1=["math.bincount", "bincount"])
@deprecation.deprecated_endpoints("bincount")
def bincount(arr,
weights=None,
minlength=None,
maxlength=None,
dtype=dtypes.int32):
"""Counts the number of occurrences of each value in an integer array.
If `minlength` and `maxlength` are not given, returns a vector with length
`tf.reduce_max(arr) + 1` if `arr` is non-empty, and length 0 otherwise.
If `weights` are non-None, then index `i` of the output stores the sum of the
value in `weights` at each index where the corresponding value in `arr` is
`i`.
Args:
arr: An int32 tensor of non-negative values.
weights: If non-None, must be the same shape as arr. For each value in
`arr`, the bin will be incremented by the corresponding weight instead
of 1.
minlength: If given, ensures the output has length at least `minlength`,
padding with zeros at the end if necessary.
maxlength: If given, skips values in `arr` that are equal or greater than
`maxlength`, ensuring that the output has length at most `maxlength`.
dtype: If `weights` is None, determines the type of the output bins.
Returns:
A vector with the same dtype as `weights` or the given `dtype`. The bin
values.
"""
arr = ops.convert_to_tensor(arr, name="arr", dtype=dtypes.int32)
array_is_nonempty = reduce_prod(array_ops.shape(arr)) > 0
output_size = cast(array_is_nonempty, dtypes.int32) * (reduce_max(arr) + 1)
if minlength is not None:
minlength = ops.convert_to_tensor(
minlength, name="minlength", dtype=dtypes.int32)
output_size = gen_math_ops.maximum(minlength, output_size)
if maxlength is not None:
maxlength = ops.convert_to_tensor(
maxlength, name="maxlength", dtype=dtypes.int32)
output_size = gen_math_ops.minimum(maxlength, output_size)
if weights is not None:
weights = ops.convert_to_tensor(weights, name="weights")
return gen_math_ops.unsorted_segment_sum(weights, arr, output_size)
weights = constant_op.constant([], dtype)
return gen_math_ops.bincount(arr, output_size, weights)
@tf_export("math.cumsum", "cumsum")
def cumsum(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative sum of the tensor `x` along `axis`.
By default, this op performs an inclusive cumsum, which means that the first
element of the input is identical to the first element of the output:
```python
tf.cumsum([a, b, c]) # [a, a + b, a + b + c]
```
By setting the `exclusive` kwarg to `True`, an exclusive cumsum is performed
instead:
```python
tf.cumsum([a, b, c], exclusive=True) # [0, a, a + b]
```
By setting the `reverse` kwarg to `True`, the cumsum is performed in the
opposite direction:
```python
tf.cumsum([a, b, c], reverse=True) # [a + b + c, b + c, c]
```
This is more efficient than using separate `tf.reverse` ops.
The `reverse` and `exclusive` kwargs can also be combined:
```python
tf.cumsum([a, b, c], exclusive=True, reverse=True) # [b + c, c, 0]
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
`complex128`, `qint8`, `quint8`, `qint32`, `half`.
axis: A `Tensor` of type `int32` (default: 0). Must be in the range
`[-rank(x), rank(x))`.
exclusive: If `True`, perform exclusive cumsum.
reverse: A `bool` (default: False).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Cumsum", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumsum(
x, axis, exclusive=exclusive, reverse=reverse, name=name)
@tf_export("math.cumprod", v1=["math.cumprod", "cumprod"])
@deprecation.deprecated_endpoints("cumprod")
def cumprod(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative product of the tensor `x` along `axis`.
By default, this op performs an inclusive cumprod, which means that the
first element of the input is identical to the first element of the output:
```python
tf.math.cumprod([a, b, c]) # [a, a * b, a * b * c]
```
By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
performed
instead:
```python
tf.math.cumprod([a, b, c], exclusive=True) # [1, a, a * b]
```
By setting the `reverse` kwarg to `True`, the cumprod is performed in the
opposite direction:
```python
tf.math.cumprod([a, b, c], reverse=True) # [a * b * c, b * c, c]
```
This is more efficient than using separate `tf.reverse` ops.
The `reverse` and `exclusive` kwargs can also be combined:
```python
tf.math.cumprod([a, b, c], exclusive=True, reverse=True) # [b * c, c, 1]
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
`complex128`, `qint8`, `quint8`, `qint32`, `half`.
axis: A `Tensor` of type `int32` (default: 0). Must be in the range
`[-rank(x), rank(x))`.
exclusive: If `True`, perform exclusive cumprod.
reverse: A `bool` (default: False).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Cumprod", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumprod(
x, axis, exclusive=exclusive, reverse=reverse, name=name)
@tf_export("math.conj", v1=["math.conj", "conj"])
@deprecation.deprecated_endpoints("conj")
def conj(x, name=None):
r"""Returns the complex conjugate of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
complex numbers that are the complex conjugate of each element in `input`. The
complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the
real part and *b* is the imaginary part.
The complex conjugate returned by this operation is of the form \\(a - bj\\).
For example:
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.math.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]
If `x` is real, it is returned unchanged.
Args:
x: `Tensor` to conjugate. Must have numeric or variant type.
name: A name for the operation (optional).
Returns:
A `Tensor` that is the conjugate of `x` (with the same type).
Raises:
TypeError: If `x` is not a numeric tensor.
"""
if isinstance(x, ops.Tensor):
dt = x.dtype
if dt.is_floating or dt.is_integer:
return x
with ops.name_scope(name, "Conj", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_complex or x.dtype == dtypes.variant:
return gen_math_ops.conj(x, name=name)
elif x.dtype.is_floating or x.dtype.is_integer:
return x
else:
raise TypeError(
"Expected numeric or variant tensor, got dtype %r" % x.dtype)
def _BroadcastShape(op):
"""Common shape function for binary operators that broadcast their inputs."""
return [
common_shapes.broadcast_shape(op.inputs[0].get_shape(),
op.inputs[1].get_shape())
]
def reduced_shape(input_shape, axes):
"""Helper function for reduction ops.
Args:
input_shape: 1-D Tensor, the shape of the Tensor being reduced.
axes: 1-D Tensor, the reduction axes.
Returns:
A 1-D Tensor, the output shape as if keepdims were set to True.
"""
# Example:
# cast needed for SparseTensor reductions
if context.executing_eagerly():
input_shape = input_shape.numpy()
axes = axes.numpy()
input_shape[axes] = 1
return input_shape
input_shape = to_int32(input_shape) # [2, 3, 5, 7]
axes = to_int32(axes) # [1, 2]
input_rank = array_ops.size(input_shape) # 4
axes = (axes + input_rank) % input_rank
axes_shape = array_ops.shape(axes) # [2]
return gen_data_flow_ops.dynamic_stitch( # [2, 1, 1, 7]
[
range(input_rank), # [0, 1, 2, 3]
axes
], # [1, 2]
[
input_shape, # [2, 3, 5, 7]
array_ops.fill(axes_shape, 1)
]) # [1, 1]
def _unsorted_segment_N(data, segment_ids, num_segments):
""" Helper function for unsorted_segment_mean/_sqrtN. Computes the number
of segment entries with 0-entries set to 1 to allow division by N.
"""
# bincount doesn't support negative indices so we use unsorted_segment_sum
segment_ids_shape = array_ops.shape_internal(segment_ids)
ones_tensor = array_ops.ones(segment_ids_shape, dtype=data.dtype)
N = gen_math_ops.unsorted_segment_sum(ones_tensor, segment_ids, num_segments)
# add dimensions for all non-reduced axes
ndims_output = data.shape.ndims - segment_ids.shape.ndims
broadcast_shape = [num_segments] + [1] * ndims_output
N = array_ops.reshape(N, broadcast_shape)
return gen_math_ops.maximum(N, 1)
@tf_export(
"math.unsorted_segment_mean",
v1=["math.unsorted_segment_mean", "unsorted_segment_mean"])
@deprecation.deprecated_endpoints("unsorted_segment_mean")
def unsorted_segment_mean(data, segment_ids, num_segments, name=None):
r"""Computes the mean along segments of a tensor.
Read [the section on
segmentation](https://tensorflow.org/api_guides/python/math_ops#segmentation)
for an explanation of segments.
This operator is similar to the unsorted segment sum operator found
[here](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
Instead of computing the sum over segments, it computes the mean of all
entries belonging to a segment such that:
\\(output_i = 1/N_i \sum_{j...} data[j...]\\) where the sum is over tuples
`j...` such that `segment_ids[j...] == i` with \\N_i\\ being the number of
occurrences of id \\i\\.
If there is no entry for a given segment ID `i`, it outputs 0.
If the given segment ID `i` is negative, the value is dropped and will not
be added to the sum of the segment.
Args:
data: A `Tensor` with floating point or complex dtype.
segment_ids: An integer tensor whose shape is a prefix of `data.shape`.
num_segments: An integer scalar `Tensor`. The number of distinct
segment IDs.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has same shape as data, except for the first `segment_ids.rank`
dimensions, which are replaced with a single dimension which has size
`num_segments`.
"""
with ops.name_scope(name, "UnsortedSegmentMean"):
data = ops.convert_to_tensor(data)
segment_ids = ops.convert_to_tensor(segment_ids)
N = _unsorted_segment_N(data, segment_ids, num_segments)
summed = gen_math_ops.unsorted_segment_sum(data, segment_ids, num_segments)
return summed / N
@tf_export(
"math.unsorted_segment_sqrt_n",
v1=["math.unsorted_segment_sqrt_n", "unsorted_segment_sqrt_n"])
@deprecation.deprecated_endpoints("unsorted_segment_sqrt_n")
def unsorted_segment_sqrt_n(data, segment_ids, num_segments, name=None):
r"""Computes the sum along segments of a tensor divided by the sqrt(N).
Read [the section on
segmentation](https://tensorflow.org/api_guides/python/math_ops#segmentation)
for an explanation of segments.
This operator is similar to the unsorted segment sum operator found
[here](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
Additionally to computing the sum over segments, it divides the results by
sqrt(N).
\\(output_i = 1/sqrt(N_i) \sum_{j...} data[j...]\\) where the sum is over
tuples `j...` such that `segment_ids[j...] == i` with \\N_i\\ being the
number of occurrences of id \\i\\.
If there is no entry for a given segment ID `i`, it outputs 0.
Note that this op only supports floating point and complex dtypes,
due to tf.sqrt only supporting these types.
If the given segment ID `i` is negative, the value is dropped and will not
be added to the sum of the segment.
Args:
data: A `Tensor` with floating point or complex dtype.
segment_ids: An integer tensor whose shape is a prefix of `data.shape`.
num_segments: An integer scalar `Tensor`. The number of distinct
segment IDs.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has same shape as data, except for the first `segment_ids.rank`
dimensions, which are replaced with a single dimension which has size
`num_segments`.
"""
with ops.name_scope(name, "UnsortedSegmentSqrtN"):
data = ops.convert_to_tensor(data)
segment_ids = ops.convert_to_tensor(segment_ids)
N = _unsorted_segment_N(data, segment_ids, num_segments)
summed = gen_math_ops.unsorted_segment_sum(data, segment_ids, num_segments)
return summed / gen_math_ops.sqrt(N)
@tf_export(v1=["sparse.segment_sum", "sparse_segment_sum"])
@deprecation.deprecated_endpoints("sparse_segment_sum")
def sparse_segment_sum(data, indices, segment_ids, name=None,
num_segments=None):
r"""Computes the sum along sparse segments of a tensor.
Read [the section on
segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation)
for an explanation of segments.
Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first
dimension, selecting a subset of dimension 0, specified by `indices`.
`segment_ids` is allowed to have missing ids, in which case the output will
be zeros at those indices. In those cases `num_segments` is used to determine
the size of the output.
For example:
```python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
# Select two rows, one segment.
tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
# => [[0 0 0 0]]
# Select two rows, two segment.
tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
# => [[ 1 2 3 4]
# [-1 -2 -3 -4]]
# With missing segment ids.
tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 2]),
num_segments=4)
# => [[ 1 2 3 4]
# [ 0 0 0 0]
# [-1 -2 -3 -4]
# [ 0 0 0 0]]
# Select all rows, two segments.
tf.sparse.segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
# => [[0 0 0 0]
# [5 6 7 8]]
# Which is equivalent to:
tf.segment_sum(c, tf.constant([0, 0, 1]))
```
Args:
data: A `Tensor` with data that will be assembled in the output.
indices: A 1-D `Tensor` with indices into `data`. Has same rank as
`segment_ids`.
segment_ids: A 1-D `Tensor` with indices into the output `Tensor`.
Values should be sorted and can be repeated.
name: A name for the operation (optional).
num_segments: An optional int32 scalar. Indicates the size of the output
`Tensor`.
Returns:
A `tensor` of the shape as data, except for dimension 0 which
has size `k`, the number of segments specified via `num_segments` or
inferred for the last element in `segments_ids`.
"""
if num_segments is not None:
return gen_math_ops.sparse_segment_sum_with_num_segments(
data=data,
indices=indices,
segment_ids=segment_ids,
num_segments=num_segments,
name=name)
else:
return gen_math_ops.sparse_segment_sum(
data=data, indices=indices, segment_ids=segment_ids, name=name)
@tf_export("sparse.segment_sum", v1=[])
def sparse_segment_sum_v2(data,
indices,
segment_ids,
num_segments=None,
name=None):
return sparse_segment_mean(
data, indices, segment_ids, name=name, num_segments=num_segments)
@tf_export(v1=["sparse.segment_mean", "sparse_segment_mean"])
@deprecation.deprecated_endpoints("sparse_segment_mean")
def sparse_segment_mean(data,
indices,
segment_ids,
name=None,
num_segments=None):
r"""Computes the mean along sparse segments of a tensor.
Read [the section on
segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation)
for an explanation of segments.
Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first
dimension, selecting a subset of dimension 0, specified by `indices`.
`segment_ids` is allowed to have missing ids, in which case the output will
be zeros at those indices. In those cases `num_segments` is used to determine
the size of the output.
Args:
data: A `Tensor` with data that will be assembled in the output.
indices: A 1-D `Tensor` with indices into `data`. Has same rank as
`segment_ids`.
segment_ids: A 1-D `Tensor` with indices into the output `Tensor`.
Values should be sorted and can be repeated.
name: A name for the operation (optional).
num_segments: An optional int32 scalar. Indicates the size of the output
`Tensor`.
Returns:
A `tensor` of the shape as data, except for dimension 0 which
has size `k`, the number of segments specified via `num_segments` or
inferred for the last element in `segments_ids`.
"""
if num_segments is not None:
return gen_math_ops.sparse_segment_mean_with_num_segments(
data=data,
indices=indices,
segment_ids=segment_ids,
num_segments=num_segments,
name=name)
else:
return gen_math_ops.sparse_segment_mean(
data=data, indices=indices, segment_ids=segment_ids, name=name)
@tf_export("sparse.segment_mean", v1=[])
def sparse_segment_mean_v2(data,
indices,
segment_ids,
num_segments=None,
name=None):
r"""Computes the mean along sparse segments of a tensor.
Read [the section on
segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation)
for an explanation of segments.
Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first
dimension, selecting a subset of dimension 0, specified by `indices`.
`segment_ids` is allowed to have missing ids, in which case the output will
be zeros at those indices. In those cases `num_segments` is used to determine
the size of the output.
Args:
data: A `Tensor` with data that will be assembled in the output.
indices: A 1-D `Tensor` with indices into `data`. Has same rank as
`segment_ids`.
segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
should be sorted and can be repeated.
num_segments: An optional int32 scalar. Indicates the size of the output
`Tensor`.
name: A name for the operation (optional).
Returns:
A `tensor` of the shape as data, except for dimension 0 which
has size `k`, the number of segments specified via `num_segments` or
inferred for the last element in `segments_ids`.
"""
return sparse_segment_mean(
data, indices, segment_ids, name=name, num_segments=num_segments)
@tf_export(v1=["sparse.segment_sqrt_n", "sparse_segment_sqrt_n"])
@deprecation.deprecated_endpoints("sparse_segment_sqrt_n")
def sparse_segment_sqrt_n(data,
indices,
segment_ids,
name=None,
num_segments=None):
r"""Computes the sum along sparse segments of a tensor divided by the sqrt(N).
`N` is the size of the segment being reduced.
Args:
data: A `Tensor` with data that will be assembled in the output.
indices: A 1-D `Tensor` with indices into `data`. Has same rank as
`segment_ids`.
segment_ids: A 1-D `Tensor` with indices into the output `Tensor`.
Values should be sorted and can be repeated.
name: A name for the operation (optional).
num_segments: An optional int32 scalar. Indicates the size of the output
`Tensor`.
Returns:
A `tensor` of the shape as data, except for dimension 0 which
has size `k`, the number of segments specified via `num_segments` or
inferred for the last element in `segments_ids`.
"""
if num_segments is not None:
return gen_math_ops.sparse_segment_sqrt_n_with_num_segments(
data=data,
indices=indices,
segment_ids=segment_ids,
num_segments=num_segments,
name=name)
else:
return gen_math_ops.sparse_segment_sqrt_n(
data=data, indices=indices, segment_ids=segment_ids, name=name)
@tf_export("sparse.segment_sqrt_n", v1=[])
def sparse_segment_sqrt_n_v2(data,
indices,
segment_ids,
num_segments=None,
name=None):
r"""Computes the sum along sparse segments of a tensor divided by the sqrt(N).
`N` is the size of the segment being reduced.
Args:
data: A `Tensor` with data that will be assembled in the output.
indices: A 1-D `Tensor` with indices into `data`. Has same rank as
`segment_ids`.
segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
should be sorted and can be repeated.
num_segments: An optional int32 scalar. Indicates the size of the output
`Tensor`.
name: A name for the operation (optional).
Returns:
A `tensor` of the shape as data, except for dimension 0 which
has size `k`, the number of segments specified via `num_segments` or
inferred for the last element in `segments_ids`.
"""
return sparse_segment_sqrt_n(
data, indices, segment_ids, name=name, num_segments=num_segments)
@tf_export("tensordot", "linalg.tensordot")
def tensordot(a, b, axes, name=None):
r"""Tensor contraction of a and b along specified axes.
Tensordot (also known as tensor contraction) sums the product of elements
from `a` and `b` over the indices specified by `a_axes` and `b_axes`.
The lists `a_axes` and `b_axes` specify those pairs of axes along which to
contract the tensors. The axis `a_axes[i]` of `a` must have the same dimension
as axis `b_axes[i]` of `b` for all `i` in `range(0, len(a_axes))`. The lists
`a_axes` and `b_axes` must have identical length and consist of unique
integers that specify valid axes for each of the tensors.
This operation corresponds to `numpy.tensordot(a, b, axes)`.
Example 1: When `a` and `b` are matrices (order 2), the case `axes = 1`
is equivalent to matrix multiplication.
Example 2: When `a` and `b` are matrices (order 2), the case
`axes = [[1], [0]]` is equivalent to matrix multiplication.
Example 3: Suppose that \\(a_{ijk}\\) and \\(b_{lmn}\\) represent two
tensors of order 3. Then, `contract(a, b, [[0], [2]])` is the order 4 tensor
\\(c_{jklm}\\) whose entry
corresponding to the indices \\((j,k,l,m)\\) is given by:
\\( c_{jklm} = \sum_i a_{ijk} b_{lmi} \\).
In general, `order(c) = order(a) + order(b) - 2*len(axes[0])`.
Args:
a: `Tensor` of type `float32` or `float64`.
b: `Tensor` with the same type as `a`.
axes: Either a scalar `N`, or a list or an `int32` `Tensor` of shape [2, k].
If axes is a scalar, sum over the last N axes of a and the first N axes of
b in order. If axes is a list or `Tensor` the first and second row contain
the set of unique integers specifying axes along which the contraction is
computed, for `a` and `b`, respectively. The number of axes for `a` and
`b` must be equal.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `a`.
Raises:
ValueError: If the shapes of `a`, `b`, and `axes` are incompatible.
IndexError: If the values in axes exceed the rank of the corresponding
tensor.
"""
def _tensordot_reshape(a, axes, flipped=False):
"""Helper method to perform transpose and reshape for contraction op.
This method is helpful in reducing `math_ops.tensordot` to `math_ops.matmul`
using `array_ops.transpose` and `array_ops.reshape`. The method takes a
tensor and performs the correct transpose and reshape operation for a given
set of indices. It returns the reshaped tensor as well as a list of indices
necessary to reshape the tensor again after matrix multiplication.
Args:
a: `Tensor`.
axes: List or `int32` `Tensor` of unique indices specifying valid axes of
`a`.
flipped: An optional `bool`. Defaults to `False`. If `True`, the method
assumes that `a` is the second argument in the contraction operation.
Returns:
A tuple `(reshaped_a, free_dims, free_dims_static)` where `reshaped_a` is
the tensor `a` reshaped to allow contraction via `matmul`, `free_dims` is
either a list of integers or an `int32` `Tensor`, depending on whether
the shape of a is fully specified, and free_dims_static is either a list
of integers and None values, or None, representing the inferred
static shape of the free dimensions
"""
if a.get_shape().is_fully_defined() and isinstance(axes, (list, tuple)):
shape_a = a.get_shape().as_list()
axes = [i if i >= 0 else i + len(shape_a) for i in axes]
free = [i for i in xrange(len(shape_a)) if i not in axes]
free_dims = [shape_a[i] for i in free]
prod_free = int(np.prod([shape_a[i] for i in free]))
prod_axes = int(np.prod([shape_a[i] for i in axes]))
perm = list(axes) + free if flipped else free + list(axes)
new_shape = [prod_axes, prod_free] if flipped else [prod_free, prod_axes]
reshaped_a = array_ops.reshape(array_ops.transpose(a, perm), new_shape)
return reshaped_a, free_dims, free_dims
else:
if a.get_shape().ndims is not None and isinstance(axes, (list, tuple)):
shape_a = a.get_shape().as_list()
axes = [i if i >= 0 else i + len(shape_a) for i in axes]
free = [i for i in xrange(len(shape_a)) if i not in axes]
axes_dims = [shape_a[i] for i in axes]
free_dims = [shape_a[i] for i in free]
free_dims_static = free_dims
axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name="axes")
free = ops.convert_to_tensor(free, dtype=dtypes.int32, name="free")
shape_a = array_ops.shape(a)
else:
free_dims_static = None
shape_a = array_ops.shape(a)
rank_a = array_ops.rank(a)
axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name="axes")
axes = array_ops.where(axes >= 0, axes, axes + rank_a)
free, _ = array_ops.setdiff1d(range(rank_a), axes)
free_dims = array_ops.gather(shape_a, free)
axes_dims = array_ops.gather(shape_a, axes)
prod_free_dims = reduce_prod(free_dims)
prod_axes_dims = reduce_prod(axes_dims)
if flipped:
perm = array_ops.concat([axes, free], 0)
new_shape = array_ops.stack([prod_axes_dims, prod_free_dims])
else:
perm = array_ops.concat([free, axes], 0)
new_shape = array_ops.stack([prod_free_dims, prod_axes_dims])
reshaped_a = array_ops.reshape(array_ops.transpose(a, perm), new_shape)
return reshaped_a, free_dims, free_dims_static
def _tensordot_axes(a, axes):
"""Generates two sets of contraction axes for the two tensor arguments."""
a_shape = a.get_shape()
if isinstance(axes, compat.integral_types):
if axes < 0:
raise ValueError("'axes' must be at least 0.")
if a_shape.ndims is not None:
if axes > a_shape.ndims:
raise ValueError("'axes' must not be larger than the number of "
"dimensions of tensor %s." % a)
return (list(xrange(a_shape.ndims - axes, a_shape.ndims)),
list(xrange(axes)))
else:
rank = array_ops.rank(a)
return (range(rank - axes, rank, dtype=dtypes.int32),
range(axes, dtype=dtypes.int32))
elif isinstance(axes, (list, tuple)):
if len(axes) != 2:
raise ValueError("'axes' must be an integer or have length 2.")
a_axes = axes[0]
b_axes = axes[1]
if isinstance(a_axes, compat.integral_types) and \
isinstance(b_axes, compat.integral_types):
a_axes = [a_axes]
b_axes = [b_axes]
if len(a_axes) != len(b_axes):
raise ValueError(
"Different number of contraction axes 'a' and 'b', %s != %s." %
(len(a_axes), len(b_axes)))
return a_axes, b_axes
else:
axes = ops.convert_to_tensor(axes, name="axes", dtype=dtypes.int32)
return axes[0], axes[1]
with ops.name_scope(name, "Tensordot", [a, b, axes]) as name:
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
a_axes, b_axes = _tensordot_axes(a, axes)
a_reshape, a_free_dims, a_free_dims_static = _tensordot_reshape(a, a_axes)
b_reshape, b_free_dims, b_free_dims_static = _tensordot_reshape(
b, b_axes, True)
ab_matmul = matmul(a_reshape, b_reshape)
if isinstance(a_free_dims, list) and isinstance(b_free_dims, list):
return array_ops.reshape(ab_matmul, a_free_dims + b_free_dims, name=name)
else:
a_free_dims = ops.convert_to_tensor(a_free_dims, dtype=dtypes.int32)
b_free_dims = ops.convert_to_tensor(b_free_dims, dtype=dtypes.int32)
product = array_ops.reshape(
ab_matmul, array_ops.concat([a_free_dims, b_free_dims], 0), name=name)
if a_free_dims_static is not None and b_free_dims_static is not None:
product.set_shape(a_free_dims_static + b_free_dims_static)
return product
@tf_export("math.polyval")
def polyval(coeffs, x, name=None):
r"""Computes the elementwise value of a polynomial.
If `x` is a tensor and `coeffs` is a list n + 1 tensors, this function returns
the value of the n-th order polynomial
p(x) = coeffs[n-1] + coeffs[n-2] * x + ... + coeffs[0] * x**(n-1)
evaluated using Horner's method, i.e.
p(x) = coeffs[n-1] + x * (coeffs[n-2] + ... + x * (coeffs[1] +
x * coeffs[0]))
Args:
coeffs: A list of `Tensor` representing the coefficients of the polynomial.
x: A `Tensor` representing the variable of the polynomial.
name: A name for the operation (optional).
Returns:
A `tensor` of the shape as the expression p(x) with usual broadcasting rules
for element-wise addition and multiplication applied.
@compatibility(numpy)
Equivalent to numpy.polyval.
@end_compatibility
"""
with ops.name_scope(name, "polyval", nest.flatten(coeffs) + [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if len(coeffs) < 1:
return array_ops.zeros_like(x, name=name)
coeffs = [
ops.convert_to_tensor(coeff, name=("coeff_%d" % index))
for index, coeff in enumerate(coeffs)
]
p = coeffs[0]
for c in coeffs[1:]:
p = c + p * x
return p
@tf_export("math.bessel_i0e")
def bessel_i0e(x, name=None):
"""Computes the Bessel i0e function of `x` element-wise.
Exponentially scaled modified Bessel function of order 0 defined as
`bessel_i0e(x) = exp(-abs(x)) bessel_i0(x)`.
This function is faster and numerically stabler than `bessel_i0(x)`.
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
@compatibility(scipy)
Equivalent to scipy.special.i0e
@end_compatibility
"""
with ops.name_scope(name, "bessel_i0e", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_i0e = gen_math_ops.bessel_i0e(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_i0e, dense_shape=x.dense_shape)
else:
return gen_math_ops.bessel_i0e(x, name=name)
@tf_export("math.bessel_i1e")
def bessel_i1e(x, name=None):
"""Computes the Bessel i1e function of `x` element-wise.
Exponentially scaled modified Bessel function of order 1 defined as
`bessel_i1e(x) = exp(-abs(x)) bessel_i1(x)`.
This function is faster and numerically stabler than `bessel_i1(x)`.
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
@compatibility(scipy)
Equivalent to scipy.special.i1e
@end_compatibility
"""
with ops.name_scope(name, "bessel_i1e", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_i1e = gen_math_ops.bessel_i1e(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_i1e, dense_shape=x.dense_shape)
else:
return gen_math_ops.bessel_i1e(x, name=name)
| 35.581375 | 121 | 0.658904 |
9741191579b1e4d68dc779e2701f1654cb2b0c9a | 84 | py | Python | tests/test_clean.py | imnetdb/imnetdb | f7e1ef41be2f3f2089c666dbf602ea99112bf516 | [
"Apache-2.0"
] | 5 | 2019-03-13T20:34:36.000Z | 2020-04-18T15:35:13.000Z | tests/test_clean.py | imnetdb/imnetdb | f7e1ef41be2f3f2089c666dbf602ea99112bf516 | [
"Apache-2.0"
] | 2 | 2019-02-24T18:22:10.000Z | 2019-05-07T10:35:32.000Z | tests/test_clean.py | imnetdb/imnetdb | f7e1ef41be2f3f2089c666dbf602ea99112bf516 | [
"Apache-2.0"
] | 2 | 2019-04-17T23:42:55.000Z | 2019-04-20T15:55:43.000Z |
def test_clean(imnetdb):
imnetdb.wipe_database()
imnetdb.ensure_database()
| 16.8 | 29 | 0.738095 |
33c4ebfc49fc5b9c64364db5016766cb048f0d4f | 46,573 | py | Python | pyNastran/op2/dev/pyyeti/op4.py | jtran10/pyNastran | 4aed8e05b91576c2b50ee835f0497a9aad1d2cb0 | [
"BSD-3-Clause"
] | null | null | null | pyNastran/op2/dev/pyyeti/op4.py | jtran10/pyNastran | 4aed8e05b91576c2b50ee835f0497a9aad1d2cb0 | [
"BSD-3-Clause"
] | null | null | null | pyNastran/op2/dev/pyyeti/op4.py | jtran10/pyNastran | 4aed8e05b91576c2b50ee835f0497a9aad1d2cb0 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Python tools for reading/writing Nastran .op4 files. Can read and
write all formats (as far as I know) with the restriction that the
output files created by this class are always double precision. The
binary files can be in big or little endian format.
Currently, all matrices are read into dense (non-sparse format)
matrices even if they were written a sparse format.
@author: Tim Widrick
"""
import struct
import warnings
import sys
import itertools as it
import numpy as np
class OP4(object):
"""
Class for reading/writing Nastran output4 (.op4) files.
See demo below and refer to the help on these functions for more
information: :func:`write`, :func:`dctload`, :func:`listload`,
:func:`dir`.
Examples
--------
Instantiate the class and create matrices for demo:
>>> import op4
>>> o4 = op4.OP4()
>>> import numpy as np
>>> r = np.random.randn(3, 5)
>>> c = 1j*np.random.randn(3, 5) + r
Write binary op4 file, with 'r' first:
>>> o4.write('testbin.op4', ['r', 'c'], [r, c])
Write ascii op4 file without caring about order:
>>> o4.write('testascii.op4', dict(r=r, c=c), binary=False)
To read an op4 file into a dictionary (indexed by the name in
lower case):
>>> dct = o4.dctload('testbin.op4')
Or, to read into a list:
>>> names, mats, forms, mtypes = o4.listload('testascii.op4')
Check some results:
>>> print(np.all(r == dct['r'][0]))
True
>>> if names[0] == 'c':
... print(np.all(c == mats[0]))
... else:
... print(np.all(c == mats[1]))
True
To print a 'directory' of an op4 file:
>>> d = o4.dir('testbin.op4')
r , 3 x 5 , form=2, mtype=2
c , 3 x 5 , form=2, mtype=4
Clean up:
>>> import os
>>> os.remove('testbin.op4')
>>> os.remove('testascii.op4')
"""
def __init__(self):
self._fileh = None
string = '%.1E' % 1.2
self._expdigits = len(string) - (string.find('E') + 2)
self._rows4bigmat = 65536
# Tunable value ... if number of values exceeds this, read
# with numpy.fromfile instead of struct.unpack.
self._rowsCutoff = 3000
def __del__(self):
if self._fileh:
self._fileh.close()
self._fileh = None
def _op4close(self):
if self._fileh:
self._fileh.close()
self._fileh = None
def _op4open_read(self, filename):
"""
Open binary or ascii op4 file for reading.
Sets these class variables:
_fileh : file handle
Value returned by open(). File is opened in 'r' mode if
ascii, 'rb' mode if binary.
_ascii : bool
True if file is ascii.
_dformat : bool
True if an ascii file uses 'D' instead of 'E' (eg, 1.4D3
instead of 1.4E3)
_bit64 : True or False
True if 'key' integers are 64-bit in binary op4 files.
_endian : string
Will be '' if no byte-swapping is required; otherwise,
either '>' or '<' for big-endian and little-endian,
respectively. Only used for binary files.
_Str_i4 : struct.Struct object
Precompiled for reading 4 byte integers
_Str_i : struct.Struct object
Precompiled for reading 4 or 8 byte integers
_bytes_i : integer
Either 4 or 8, to go with Str_i.
_str_sr : string
Either self._endian + b'%df' or self._endian + b'%dd',
depending on self._bit64; for reading single precision
reals.
_bytes_sr : integer
Number of bytes in single real.
_str_dr : string
self._endian + b'%dd', for reading double precision reals.
_wordsperdouble : integer
Either 1 or 2; 2 if self._bit64 is False.
"""
self._fileh = open(filename, 'rb')
header_bytes = self._fileh.read(16)
self._endian = b''
self._uendian = ''
self._dformat = False
# Assuming binary, check for a zero byte in the 'type' field;
# will have one at front or back if binary:
if header_bytes[12] == 0 or header_bytes[15] == 0:
self._ascii = False
if sys.byteorder == 'little':
if header_bytes[12] == 0:
self._endian = b'>'
self._uendian = '>'
else:
if header_bytes[12] != 0:
self._endian = b'<'
self._uendian = '<'
self._Str_i4 = struct.Struct(self._endian + b'i')
reclen = self._Str_i4.unpack(header_bytes[:4])[0]
if reclen == 48:
self._bit64 = True
self._Str_i = struct.Struct(self._endian + b'q')
self._bytes_i = 8
self._Str_ii = struct.Struct(self._endian + b'qq')
self._bytes_ii = 16
self._Str_iii = struct.Struct(self._endian + b'3q')
self._bytes_iii = 24
self._Str_iiii = struct.Struct(self._endian + b'4q')
self._bytes_iiii = 32
self._str_sr = self._endian + b'%dd'
self._str_sr_fromfile = np.dtype(self._uendian + 'f8')
self._bytes_sr = 8
self._wordsperdouble = 1
else:
self._bit64 = False
self._Str_i = self._Str_i4
self._bytes_i = 4
self._Str_ii = struct.Struct(self._endian + b'ii')
self._bytes_ii = 8
self._Str_iii = struct.Struct(self._endian + b'3i')
self._bytes_iii = 12
self._Str_iiii = struct.Struct(self._endian + b'4i')
self._bytes_iiii = 16
self._str_sr = self._endian + b'%df'
self._str_sr_fromfile = np.dtype(self._uendian + 'f4')
self._bytes_sr = 4
self._wordsperdouble = 2
self._str_dr = self._endian + b'%dd'
self._str_dr_fromfile = np.dtype(self._uendian + 'f8')
self._fileh.seek(0)
else:
self._ascii = True
self._fileh.readline()
self._fileh.readline()
line = self._fileh.readline().decode()
# sparse formats have integer header line:
if line.find('.') == -1:
line = self._fileh.readline().decode()
if line.find('D') > -1:
self._dformat = True
self._fileh.close()
self._fileh = open(filename, 'r')
def _skipop4_ascii(self, perline, rows, cols, mtype, numlen):
"""
Skip an op4 matrix - ascii.
Parameters
----------
perline : integer
Number of elements per line in the file.
rows : integer
Number of rows in matrix.
cols : integer
Number of columns in matrix.
mtype : integer
Nastran matrix type.
numlen : integer
String length for each number.
Returns
-------
None
On entry, file is positioned after the title line, but before
the first column is printed. On exit, the file is positioned
so the next readline will get the next title line.
"""
# read until next matrix:
if rows < 0 or rows >= self._rows4bigmat:
bigmat = True
else:
bigmat = False
if mtype & 1:
wper = 1
else:
wper = 2
line = self._fileh.readline()
c = int(line[:8]) - 1
r = int(line[8:16])
if r > 0:
while c < cols:
elems = int(line[16:24])
nlines = (elems + perline - 1) // perline
for _ in it.repeat(None, nlines):
self._fileh.readline()
line = self._fileh.readline()
c = int(line[:8]) - 1
elif bigmat:
while c < cols:
elems = int(line[16:24])
while elems > 0:
line = self._fileh.readline()
L = int(line[:8])-1 # L
elems -= L + 2
L //= wper
# read column as a long string
nlines = (L + perline - 1) // perline
for _ in it.repeat(None, nlines):
self._fileh.readline()
line = self._fileh.readline()
c = int(line[:8]) - 1
else:
while c < cols:
elems = int(line[16:24])
while elems > 0:
line = self._fileh.readline()
IS = int(line[:8])
L = (IS >> 16) - 1 # L
elems -= L + 1
L //= wper
# read column as a long string
nlines = (L + perline - 1) // perline
for _ in it.repeat(None, nlines):
self._fileh.readline()
line = self._fileh.readline()
c = int(line[:8]) - 1
self._fileh.readline()
def _check_name(self, name):
"""
Check name read from op4 file and put '_' on front if needed.
Returns new name (usually the same as the input name).
"""
if not (name[0].isalpha() or name[0] == '_'):
oldname, name = name, '_'+name
warnings.warn('Output4 file has matrix name: {0}. '
'Changing to {1}.'.format(oldname, name),
RuntimeWarning)
return name
def _loadop4_ascii(self, patternlist=None, listonly=False):
"""
Reads next matching matrix or returns information on the next
matrix in the ascii op4 file.
Parameters
----------
patternlist : list
List of string patterns; each matrix name is matched
against this list: if it matches any of the patterns, it
is read in.
listonly : bool
True if only reading name.
Returns
-------
tuple: (name, matrix, form, mtype)
name : string
Lower-case name of matrix.
matrix : 2d ndarray
The matrix.
form : integer
Nastran form of matrix.
mtype : integer
Nastran matrix type.
.. note:: All outputs will be None if reached EOF.
.. note:: The `matrix` output will be [rows, cols] of the
matrix if the matrix is skipped.
"""
while 1:
line = self._fileh.readline()
line = line.rstrip()
if line == '':
return None, None, None, None
cols = int(line[:8])
rows = int(line[8:16])
form = int(line[16:24])
mtype = int(line[24:32])
length = len(line)
if length > 40:
name = line[32:40].strip().lower()
else:
name = line[32:].lower()
name = self._check_name(name)
perline = 5
numlen = 16
if length > 44:
# 1P,3E24.16 <-- starts at position 40
numformat = line[43:]
p = numformat.find('E')
if p < 0:
p = numformat.find('D')
if p > 0:
perline = int(numformat[:p])
numlen = int(numformat[p+1:].split('.')[0])
if patternlist and name not in patternlist:
skip = 1
else:
skip = 0
if listonly or skip:
self._skipop4_ascii(perline, rows, cols,
mtype, numlen)
if listonly:
return name, (abs(rows), cols), form, mtype
else:
break
if rows < 0 or rows >= self._rows4bigmat:
rows = abs(rows)
bigmat = True
else:
bigmat = False
if mtype > 2:
# complex matrix ... just read as if it's real rows *= 2
# (must also use fortran ordering for this to work)
multiplier = 2
else:
multiplier = 1
# real matrix
X = np.zeros((rows*multiplier, cols), dtype=float, order='F')
if mtype & 1:
wper = 1
else:
wper = 2
line = self._fileh.readline()
linelen = perline * numlen
c = int(line[:8]) - 1
r = int(line[8:16])
if r > 0:
while c < cols:
elems = int(line[16:24])
r = (r-1)*multiplier
# read column as a long string
nlines = (elems - 1) // perline
blocklist = [ln[:linelen]
for ln in it.islice(self._fileh,
nlines)]
s = ''.join(blocklist) + self._fileh.readline()
if self._dformat:
s = s.replace('D', 'E')
a = 0
for i in range(elems):
b = a + numlen
X[r+i, c] = s[a:b]
a = b
line = self._fileh.readline()
c = int(line[:8]) - 1
r = int(line[8:16])
elif bigmat:
while c < cols:
elems = int(line[16:24])
while elems > 0:
line = self._fileh.readline()
L = int(line[:8])-1 # L
r = int(line[8:16])-1 # irow-1
elems -= L + 2
r *= multiplier
L //= wper
nlines = (L - 1) // perline
blocklist = [ln[:linelen]
for ln in it.islice(self._fileh,
nlines)]
s = ''.join(blocklist) + self._fileh.readline()
if self._dformat:
s = s.replace('D', 'E')
a = 0
for i in range(L):
b = a + numlen
X[r+i, c] = s[a:b]
a = b
line = self._fileh.readline()
c = int(line[:8]) - 1
r = int(line[8:16])
else:
while c < cols:
elems = int(line[16:24])
while elems > 0:
line = self._fileh.readline()
IS = int(line) # [:8])
L = (IS >> 16) - 1 # L
r = IS - ((L+1) << 16)-1 # irow-1
elems -= L + 1
r *= multiplier
L //= wper
nlines = (L - 1) // perline
blocklist = [ln[:linelen]
for ln in it.islice(self._fileh,
nlines)]
s = ''.join(blocklist) + self._fileh.readline()
if self._dformat:
s = s.replace('D', 'E')
a = 0
for i in range(L):
b = a + numlen
X[r+i, c] = s[a:b]
a = b
line = self._fileh.readline()
c = int(line[:8]) - 1
r = int(line[8:16])
self._fileh.readline()
if mtype > 2:
X.dtype = complex
return name, X, form, mtype
def _skipop4_binary(self, cols):
"""
Skip a binary op4 matrix.
Parameters
----------
cols : integer
Number of columns in matrix.
"""
# Scan matrix by column
icol = 1
bi = self._bytes_i
delta = 4 - bi
while icol <= cols:
# Read record length at start of record:
reclen = self._Str_i4.unpack(self._fileh.read(4))[0]
# Read column header
icol = self._Str_i.unpack(self._fileh.read(bi))[0]
self._fileh.seek(reclen + delta, 1)
def _loadop4_binary(self, patternlist=None, listonly=False):
"""
Reads next matching matrix or returns information on the next
matrix in the binary op4 file.
Parameters
----------
patternlist : list
List of string patterns; each matrix name is matched
against this list: if it matches any of the patterns, it
is read in.
listonly : bool
True if only reading name.
Returns
-------
tuple: (name, matrix, form, mtype)
name : string
Lower-case name of matrix.
matrix : 2d ndarray
The matrix.
form : integer
Nastran form of matrix.
mtype : integer
Nastran matrix type.
.. note:: All outputs will be None if reached EOF.
.. note:: The `matrix` output will be [rows, cols] of the matrix
if the matrix is skipped.
"""
fp = self._fileh
while 1:
if len(fp.read(4)) == 0:
return None, None, None, None
cols, rows, form, mtype =\
self._Str_iiii.unpack(fp.read(self._bytes_iiii))
# Read ascii name of matrix:
if self._bit64:
name = fp.read(16).decode()
else:
name = fp.read(8).decode()
name = self._check_name(name.lower().strip())
fp.read(4)
if patternlist and name not in patternlist:
skip = 1
else:
skip = 0
if listonly or skip:
self._skipop4_binary(cols)
if listonly:
return name, (abs(rows), cols), form, mtype
else:
break
if rows < 0 or rows >= self._rows4bigmat:
rows = abs(rows)
bigmat = True
else:
bigmat = False
if mtype > 2:
# complex matrix ... just read as if it's real rows *= 2
# (must also use fortran ordering for this to work)
multiplier = 2
else:
multiplier = 1
# real matrix
X = np.zeros((rows*multiplier, cols), dtype=float, order='F')
if mtype & 1:
numform = self._str_sr
numform2 = self._str_sr_fromfile
bytesreal = self._bytes_sr
wper = 1
else:
numform = self._str_dr
numform2 = self._str_dr_fromfile
bytesreal = 8
wper = self._wordsperdouble
reclen = self._Str_i4.unpack(fp.read(4))[0]
c, r, nwords =\
self._Str_iii.unpack(fp.read(self._bytes_iii))
c -= 1
if r > 0: # non sparse format
while c < cols:
r = (r-1)*multiplier
nwords //= wper
if nwords < self._rowsCutoff:
X[r:r+nwords, c] =\
struct.unpack(numform % nwords,
fp.read(bytesreal*nwords))
else:
X[r:r+nwords, c] = np.fromfile(fp, numform2,
nwords)
fp.read(4)
reclen = self._Str_i4.unpack(fp.read(4))[0]
c, r, nwords =\
self._Str_iii.unpack(fp.read(self._bytes_iii))
c -= 1
elif bigmat:
while c < cols:
# bigmat sparse format
# Read column data, one string of numbers at a time
# (strings of zeros are skipped)
while nwords > 0:
L, r = self._Str_ii.unpack(fp.read(self._bytes_ii))
nwords -= L + 1
L = (L-1) // wper
r = (r-1) * multiplier
if L < self._rowsCutoff:
X[r:r+L, c] =\
struct.unpack(numform % L,
fp.read(bytesreal*L))
else:
X[r:r+L, c] = np.fromfile(fp, numform2, L)
fp.read(4)
reclen = self._Str_i4.unpack(fp.read(4))[0]
c, r, nwords =\
self._Str_iii.unpack(fp.read(self._bytes_iii))
c -= 1
else:
while c < cols:
# non-bigmat sparse format
# Read column data, one string of numbers at a time
# (strings of zeros are skipped)
while nwords > 0:
IS = self._Str_i.unpack(fp.read(self._bytes_i))[0]
L = (IS >> 16) - 1 # L
r = IS - ((L+1) << 16) - 1 # irow-1
nwords -= L + 1 # words left
L //= wper
r *= multiplier
if L < self._rowsCutoff:
X[r:r+L, c] =\
struct.unpack(numform % L,
fp.read(bytesreal*L))
else:
X[r:r+L, c] = np.fromfile(fp, numform2, L)
fp.read(4)
reclen = self._Str_i4.unpack(fp.read(4))[0]
c, r, nwords =\
self._Str_iii.unpack(fp.read(self._bytes_iii))
c -= 1
# read final bytes of record and record marker
fp.read(reclen-3*self._bytes_i+4)
if mtype > 2:
X.dtype = complex
return name, X, form, mtype
def _sparse_col_stats(self, v):
"""
Returns locations of non-zero values and length of each
series.
Parameters
----------
v : ndarray
1d ndarray (the column of the matrix).
Returns
-------
ind : ndarray
m x 2 ndarray. m is number of non-zero sequences in v.
First column contains the indices to the start of each
sequence and the second column contains the length of
the sequence.
For example, if v is:
::
v = [ 0., 0., 0., 7., 5., 0., 6., 0., 2., 3.]
Then, ind will be:
::
ind = [[3 2]
[6 1]
[8 2]]
"""
pv = np.nonzero(v)[0]
dpv = np.diff(pv)
starts = np.nonzero(dpv != 1)[0] + 1
nrows = len(starts)+1
ind = np.zeros((nrows, 2), int)
ind[0, 0] = pv[0]
if nrows > 1:
ind[1:, 0] = pv[starts]
ind[0, 1] = starts[0] - 1
ind[1:-1, 1] = np.diff(starts) - 1
ind[-1, 1] = len(dpv) - len(starts) - sum(ind[:, 1])
ind[:, 1] += 1
return ind
def _write_ascii_header(self, op4_file, name, matrix, digits, bigmat=False):
"""
Utility routine that writes the header for ascii matrices.
Parameters
----------
op4_file : file handle
Output of open() using binary mode.
name : string
Name of matrix.
matrix : matrix
Matrix to write.
digits : integer
Number of significant digits after the decimal to include
in the ascii output.
bigmat : bool
If true, matrix is to be written in 'bigmat' format.
Returns
-------
tuple: (cols, multiplier, perline, numlen, numform)
cols : integer
Number of columns in matrix.
multiplier : integer
2 for complex, 1 for real.
perline : integer
Number of values written per row.
numlen : integer
Number of characters per value.
numform : string
Format string for numbers, eg: '%16.9E'.
"""
numlen = digits + 5 + self._expdigits # -1.digitsE-009
perline = 80 // numlen
rows, cols = matrix.shape
if rows == cols:
if np.allclose(matrix.T, matrix):
form = 6
else:
form = 1
else:
form = 2
if np.iscomplexobj(matrix):
mtype = 4
multiplier = 2
else:
mtype = 2
multiplier = 1
if bigmat:
if rows < self._rows4bigmat:
rows = -rows
op4_file.write('{0:8}{1:8}{2:8}{3:8}{4:8s}1P,{5}E{6}.{7}\n'.format(
cols, rows, form, mtype, name.upper(),
perline, numlen, digits))
numform = '%{0}.{1}E'.format(numlen, digits)
return cols, multiplier, perline, numlen, numform
def _write_ascii(self, op4_file, name, matrix, digits):
"""
Write a matrix to a file in ascii, non-sparse format.
Parameters
----------
op4_file : file handle
Output of open() using text mode.
name : string
Name of matrix.
matrix : matrix
Matrix to write.
digits : integer
Number of significant digits after the decimal to include
in the ascii output.
"""
(cols, multiplier, perline, unused_numlen, numform) = self._write_ascii_header(
op4_file, name, matrix, digits, bigmat=False)
for c in range(cols):
v = matrix[:, c]
if np.any(v):
pv = np.nonzero(v)[0]
s = pv[0, 0]
e = pv[0, -1]
elems = (e - s + 1) * multiplier
op4_file.write('{0:8}{1:8}{2:8}\n'.format(c+1, s+1, elems))
v = np.asarray(v[s:e+1]).flatten()
v.dtype = float
neven = ((elems - 1) // perline) * perline
for i in range(0, neven, perline):
for j in range(perline):
op4_file.write(numform % v[i+j])
op4_file.write('\n')
for i in range(neven, elems):
op4_file.write(numform % v[i])
op4_file.write('\n')
op4_file.write('{0:8}{1:8}{2:8}\n'.format(cols+1, 1, 1))
op4_file.write(numform % 2**.5)
op4_file.write('\n')
def _write_ascii_sparse_nonbigmat(self, op4_file, name, matrix, digits):
"""
Write a matrix to a file in ascii, non-bigmat sparse format.
Parameters
----------
op4_file : file handle
Output of open() using binary mode.
name : string
Name of matrix.
matrix : matrix
Matrix to write.
digits : integer
Number of significant digits after the decimal to include
in the ascii output.
.. note:: if rows > 65535, bigmat is turned on and the
:func:`write_ascii_sparse_bigmat` function is
called ...that's a Nastran rule.
"""
rows, cols = matrix.shape
if rows >= self._rows4bigmat:
self._write_ascii_sparse_bigmat(op4_file, name, matrix, digits)
return
(cols, multiplier, perline, numform) = self._write_ascii_header(
op4_file, name, matrix, digits, bigmat=False)
for c in range(cols):
v = matrix[:, c]
if np.any(v):
v = np.asarray(v).flatten()
ind = self._sparse_col_stats(v)
nwords = ind.shape[0] + 2*sum(ind[:, 1])*multiplier
op4_file.write('{0:8}{1:8}{2:8}\n'.format(c+1, 0, nwords))
for row in ind:
r = row[0]
L = row[1]*2*multiplier
IS = (r+1) + ((L+1) << 16)
op4_file.write('{0:12}\n'.format(IS))
string = v[r:r+row[1]]
string.dtype = float
elems = L // 2
neven = ((elems - 1) // perline) * perline
for i in range(0, neven, perline):
for j in range(perline):
op4_file.write(numform % string[i+j])
op4_file.write('\n')
for i in range(neven, elems):
op4_file.write(numform % string[i])
op4_file.write('\n')
op4_file.write('{0:8}{1:8}{2:8}\n'.format(cols+1, 1, 1))
op4_file.write(numform % 2**.5)
op4_file.write('\n')
def _write_ascii_sparse_bigmat(self, op4_file, name, matrix, digits):
"""
Write a matrix to a file in ascii, bigmat sparse format.
Parameters
----------
op4_file : file handle
Output of open() using binary mode.
name : string
Name of matrix.
matrix : matrix
Matrix to write.
digits : integer
Number of significant digits after the decimal to include
in the ascii output.
"""
(cols, multiplier, perline, unused_numlen, numform) = self._write_ascii_header(
op4_file, name, matrix, digits, bigmat=True)
for c in range(cols):
v = matrix[:, c]
if np.any(v):
v = np.asarray(v).flatten()
ind = self._sparse_col_stats(v)
nwords = 2*ind.shape[0] + 2*sum(ind[:, 1])*multiplier
op4_file.write('{0:8}{1:8}{2:8}\n'.format(c+1, 0, nwords))
for row in ind:
r = row[0]
L = row[1]*2*multiplier
op4_file.write('{0:8}{1:8}\n'.format(L+1, r+1))
string = v[r:r+row[1]]
string.dtype = float
elems = L // 2
neven = ((elems - 1) // perline) * perline
for i in range(0, neven, perline):
for j in range(perline):
op4_file.write(numform % string[i+j])
op4_file.write('\n')
for i in range(neven, elems):
op4_file.write(numform % string[i])
op4_file.write('\n')
op4_file.write('{0:8}{1:8}{2:8}\n'.format(cols+1, 1, 1))
op4_file.write(numform % 2**.5)
op4_file.write('\n')
def _write_binary_header(self, op4_file, name, matrix,
endian, bigmat=False):
"""
Utility routine that writes the header for binary matrices.
Parameters
----------
op4_file : file handle
Output of open() using binary mode.
name : string
Name of matrix.
matrix : matrix
Matrix to write.
endian : string
Endian setting for binary output: '' for native, '>' for
big-endian and '<' for little-endian.
bigmat : bool
If true, matrix is to be written in 'bigmat' format.
Returns
-------
tuple: (cols, multiplier)
cols : integer
Number of columns in matrix.
multiplier : integer
2 for complex, 1 for real.
"""
rows, cols = matrix.shape
if rows == cols:
if np.allclose(matrix.T, matrix):
form = 6
else:
form = 1
else:
form = 2
if np.iscomplexobj(matrix):
mtype = 4
multiplier = 2
else:
mtype = 2
multiplier = 1
# write 1st record (24 bytes: 4 4-byte ints, 1 8-byte string)
name = ('{0:<8}'.format(name.upper())).encode()
if bigmat:
if rows < self._rows4bigmat:
rows = -rows
op4_file.write(struct.pack(endian+'5i8si', 24, cols, rows,
form, mtype, name, 24))
return cols, multiplier
def _write_binary(self, op4_file, name, matrix, endian):
"""
Write a matrix to a file in double precision binary format.
Parameters
----------
op4_file : file handle
Output of open() using binary mode.
name : string
Name of matrix.
matrix : matrix
Matrix to write.
endian : string
Endian setting for binary output: '' for native, '>' for
big-endian and '<' for little-endian.
"""
cols, multiplier = self._write_binary_header(
op4_file, name, matrix, endian)
col_header = struct.Struct(endian+'4i')
col_trailer = struct.Struct(endian+'i')
for c in range(cols):
v = matrix[:, c]
if np.any(v):
pv = np.nonzero(v)[0]
s = pv[0, 0]
e = pv[0, -1]
elems = (e - s + 1) * multiplier
reclen = 3*4 + elems*8
op4_file.write(col_header.pack(reclen, c+1, s+1, 2*elems))
v = np.asarray(v[s:e+1]).flatten()
v.dtype = float
op4_file.write(struct.pack(endian+('%dd' % elems), *v))
op4_file.write(col_trailer.pack(reclen))
reclen = 3*4 + 8
op4_file.write(col_header.pack(reclen, cols+1, 1, 2))
op4_file.write(struct.pack(endian+'d', 2**.5))
op4_file.write(col_trailer.pack(reclen))
def _write_binary_sparse_nonbigmat(self, op4_file, name, matrix, endian):
"""
Write a matrix to a file in double precision binary, non-bigmat
sparse format.
Parameters
----------
op4_file : file handle
Output of open() using binary mode.
name : string
Name of matrix.
matrix : matrix
Matrix to write.
endian : string
Endian setting for binary output: '' for native, '>' for
big-endian and '<' for little-endian.
.. note:: if rows > 65535, bigmat is turned on and the
:func:`write_binary_sparse_bigmat` function is
called ...that's a Nastran rule.
"""
rows, cols = matrix.shape
if rows >= self._rows4bigmat:
self._write_binary_sparse_bigmat(op4_file, name, matrix, endian)
return
cols, multiplier = self._write_binary_header(
op4_file, name, matrix, endian)
col_header = struct.Struct(endian+'4i')
col_trailer = struct.Struct(endian+'i')
for c in range(cols):
v = matrix[:, c]
if np.any(v):
v = np.asarray(v).flatten()
ind = self._sparse_col_stats(v)
nwords = ind.shape[0] + 2*sum(ind[:, 1])*multiplier
reclen = (3 + nwords)*4
op4_file.write(col_header.pack(reclen, c+1, 0, nwords))
for row in ind:
r = row[0]
L = row[1]*2*multiplier
IS = (r+1) + ((L+1) << 16)
op4_file.write(col_trailer.pack(IS))
string = v[r:r+row[1]]
string.dtype = float
op4_file.write(struct.pack(endian+('%dd' % len(string)), *string))
op4_file.write(col_trailer.pack(reclen))
reclen = 3*4 + 8
op4_file.write(col_header.pack(reclen, cols+1, 1, 2))
op4_file.write(struct.pack(endian+'d', 2**.5))
op4_file.write(col_trailer.pack(reclen))
def _write_binary_sparse_bigmat(self, op4_file, name, matrix, endian):
"""
Write a matrix to a file in double precision binary, bigmat
sparse format.
Parameters
----------
op4_file : file handle
Output of open() using binary mode.
name : string
Name of matrix.
matrix : matrix
Matrix to write.
endian : string
Endian setting for binary output: '' for native, '>' for
big-endian and '<' for little-endian.
"""
cols, multiplier = self._write_binary_header(
op4_file, name, matrix, endian, True)
colHeader = struct.Struct(endian+'4i')
colTrailer = struct.Struct(endian+'i')
LrStruct = struct.Struct(endian+'ii')
for c in range(cols):
v = matrix[:, c]
if np.any(v):
v = np.asarray(v).flatten()
ind = self._sparse_col_stats(v)
nwords = 2*ind.shape[0] + 2*sum(ind[:, 1])*multiplier
reclen = (3 + nwords)*4
op4_file.write(colHeader.pack(reclen, c+1, 0, nwords))
for row in ind:
r = row[0]
L = row[1]*2*multiplier
op4_file.write(LrStruct.pack(L+1, r+1))
string = v[r:r+row[1]]
string.dtype = float
op4_file.write(struct.pack(endian+('%dd' % len(string)), *string))
op4_file.write(colTrailer.pack(reclen))
reclen = 3*4 + 8
op4_file.write(colHeader.pack(reclen, cols+1, 1, 2))
op4_file.write(struct.pack(endian+'d', 2**.5))
op4_file.write(colTrailer.pack(reclen))
def dctload(self, filename, namelist=None):
"""
Read all matching matrices from op4 file into dictionary.
Parameters
----------
filename : string
Name of op4 file to read.
namelist : list, string, or None
List of variable names to read in, or string with name of
the single variable to read in, or None. If None, all
matrices are read in.
Returns
-------
dct : dictionary
Keys are the lower-case matrix names and the values are a
tuple of: (matrix, form, mtype).
See also :func:`listload`, :func:`write`, :func:`dir`.
"""
if isinstance(namelist, str):
namelist = [namelist]
self._op4open_read(filename)
dct = {}
try:
if self._ascii:
loadfunc = self._loadop4_ascii
else:
loadfunc = self._loadop4_binary
while 1:
name, X, form, mtype =\
loadfunc(patternlist=namelist)
if not name:
break
dct[name] = X, form, mtype
finally:
self._op4close()
return dct
def listload(self, filename, namelist=None):
"""
Read all matching matrices from op4 file into a list; useful
if op4 file has duplicate names.
Parameters
----------
filename : string
Name of op4 file to read.
namelist : list, string, or None
List of variable names to read in, or string with name of
the single variable to read in, or None. If None, all
matrices are read in.
Returns
-------
tuple: (names, matrices, forms, mtypes)
names : list
Lower-case list of matrix names in order as read.
matrices : list
List of matrices in order as read.
forms : list
List of integers specifying the Nastran form of each
matrix.
mtypes : list
List of integers specifying the Nastran type of each
matrix.
See also :func:`dctload`, :func:`write`, :func:`dir`.
"""
if isinstance(namelist, str):
namelist = [namelist]
self._op4open_read(filename)
names = []
matrices = []
forms = []
mtypes = []
try:
if self._ascii:
loadfunc = self._loadop4_ascii
else:
loadfunc = self._loadop4_binary
while 1:
name, X, form, mtype =\
loadfunc(patternlist=namelist)
if not name:
break
names.append(name)
matrices.append(X)
forms.append(form)
mtypes.append(mtype)
finally:
self._op4close()
return names, matrices, forms, mtypes
def dir(self, filename, verbose=True):
"""
Directory of all matrices in op4 file.
Parameters
----------
filename : string
Name of op4 file to read.
verbose : bool
If true, directory will be printed to screen.
Returns
-------
tuple: (names, sizes, forms, mtypes)
names : list
Lower-case list of matrix names in order as read.
sizes : list
List of sizes [(r1, c1), (r2, c2), ...], for each
matrix.
forms : list
List of integers specifying the Nastran form of each
matrix.
mtypes : list
List of integers specifying the Nastran type of each
matrix.
See also :func:`dctload`, :func:`listload`, :func:`write`.
"""
self._op4open_read(filename)
names = []
sizes = []
forms = []
mtypes = []
try:
if self._ascii:
loadfunc = self._loadop4_ascii
else:
loadfunc = self._loadop4_binary
while 1:
name, X, form, mtype =\
loadfunc(listonly=True)
if not name:
break
names.append(name)
sizes.append(X)
forms.append(form)
mtypes.append(mtype)
if verbose:
for n, s, f, m in zip(names, sizes, forms, mtypes):
print('{0:8}, {1:6} x {2:<6}, form={3}, mtype={4}'
.format(n, s[0], s[1], f, m))
finally:
self._op4close()
return names, sizes, forms, mtypes
def write(self, filename, names, matrices=None,
binary=True, digits=16, endian='',
sparse=''):
"""
Write op4 file.
Parameters
----------
filename : string
Name of file.
names : string or list or dictionary
Matrix name or list of matrix names or dictionary indexed
by the names.
matrices : array or list
2d ndarray or list of 2d ndarrays. Ignored if `names` is
a dictionary.
binary : bool
If true, a double precision binary file is written;
otherwise an ascii file is created.
digits : integer
Number of significant digits after the decimal to include
in the ascii output. Ignored for binary files.
endian : string
Endian setting for binary output: '' for native, '>' for
big-endian and '<' for little-endian.
sparse : string
Empty or 'bigmat' or 'nonbigmat'. If set to 'bigmat' or
'nonbigmat', that sparse format is selected. Note that if
the number of rows is > 65535, then both the 'bigmat' and
'nonbigmat' options become 'bigmat'.
Returns
-------
None.
.. note:: To write multiple matrices that have the same name
or to write the matrices in a specific order, `names`
must be a list, not a dictionary. If a dictionary,
the matrices are written in alphabetical order.
See also :func:`dctload`, :func:`listload`, :func:`dir`.
Examples
--------
To write m, k, b, in that order to an ascii file::
import numpy as np
import op4
o4 = op4.OP4()
m = np.array([1, 2])
k = np.array([3, 5])
b = np.array([4, 6])
names = ['m', 'k', 'b']
values = [eval(v) for v in names]
o4.write('mkb.op4', names, values, False, 9)
Or, if you don't care about the order, you could create the
dictionary input:
::
o4.write('mkb.op4', dict(m=m, k=k, b=b),
binary=False, digits=9)
"""
if isinstance(names, dict):
k = sorted(names.keys())
matrices = [names[j] for j in k]
names = k
else:
if not isinstance(names, list):
names = [names]
if not isinstance(matrices, list):
matrices = [matrices]
# ensure double precision 2d arrays:
def ensure_2d_dp(m):
"""Ensures 2d double precision array"""
m = np.asmatrix(m)
if np.iscomplexobj(m):
if m.dtype != np.complex128:
return m.astype(np.complex128)
elif m.dtype != np.float64:
return m.astype(np.float64)
return m
matrices = [ensure_2d_dp(m) for m in matrices]
if binary:
if sparse == '':
wrtfunc = self._write_binary
elif sparse == 'bigmat':
wrtfunc = self._write_binary_sparse_bigmat
elif sparse == 'nonbigmat':
wrtfunc = self._write_binary_sparse_nonbigmat
else:
raise ValueError('invalid sparse option')
with open(filename, 'wb') as op4_file:
for name, matrix in zip(names, matrices):
wrtfunc(op4_file, name, matrix, endian)
else:
if sparse == '':
wrtfunc = self._write_ascii
elif sparse == 'bigmat':
wrtfunc = self._write_ascii_sparse_bigmat
elif sparse == 'nonbigmat':
wrtfunc = self._write_ascii_sparse_nonbigmat
else:
raise ValueError('invalid sparse option')
with open(filename, 'w') as op4_file:
for name, matrix in zip(names, matrices):
wrtfunc(op4_file, name, matrix, digits)
| 35.742901 | 87 | 0.4733 |
fe6e9e3a490463a17c1c6cccef23170355a5fe39 | 4,977 | py | Python | desktop/libs/notebook/src/notebook/sql_utils.py | maulikjs/hue | 59ac879b55bb6fb26ecb4e85f4c70836fc21173f | [
"Apache-2.0"
] | 1 | 2020-05-17T06:40:33.000Z | 2020-05-17T06:40:33.000Z | desktop/libs/notebook/src/notebook/sql_utils.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 4 | 2021-03-11T04:02:00.000Z | 2022-03-27T08:31:56.000Z | desktop/libs/notebook/src/notebook/sql_utils.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 1 | 2017-11-09T09:31:28.000Z | 2017-11-09T09:31:28.000Z | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import os
import re
import StringIO
from desktop.lib.i18n import smart_str
# Note: Might be replaceable by sqlparse.split
def get_statements(hql_query):
hql_query = strip_trailing_semicolon(hql_query)
hql_query_sio = StringIO.StringIO(hql_query)
statements = []
for (start_row, start_col), (end_row, end_col), statement in split_statements(hql_query_sio.read()):
statements.append({
'start': {
'row': start_row,
'column': start_col
},
'end': {
'row': end_row,
'column': end_col
},
'statement': strip_trailing_semicolon(statement.rstrip())
})
return statements
def get_current_statement(snippet):
# Multiquery, if not first statement or arrived to the last query
should_close = False
handle = snippet['result'].get('handle', {})
statement_id = handle.get('statement_id', 0)
statements_count = handle.get('statements_count', 1)
statements = get_statements(snippet['statement'])
statement_id = min(statement_id, len(statements) - 1) # In case of removal of statements
previous_statement_hash = compute_statement_hash(statements[statement_id]['statement'])
non_edited_statement = previous_statement_hash == handle.get('previous_statement_hash') or not handle.get('previous_statement_hash')
if handle.get('has_more_statements'):
should_close = True
if non_edited_statement:
statement_id += 1
else:
if non_edited_statement:
statement_id = 0
if statements_count != len(statements):
statement_id = min(statement_id, len(statements) - 1)
resp = {
'statement_id': statement_id,
'has_more_statements': statement_id < len(statements) - 1,
'statements_count': len(statements),
'previous_statement_hash': compute_statement_hash(statements[statement_id]['statement'])
}
resp.update(statements[statement_id])
return should_close, resp
def compute_statement_hash(statement):
return hashlib.sha224(smart_str(statement)).hexdigest()
def split_statements(hql):
"""
Split statements at semicolons ignoring the ones inside quotes and comments.
The comment symbols that come inside quotes should be ignored.
"""
statements = []
current = ''
prev = ''
between_quotes = None
is_comment = None
start_row = 0
start_col = 0
end_row = 0
end_col = len(hql) - 1
if hql.find(';') in (-1, len(hql) - 1):
return [((start_row, start_col), (end_row, end_col), hql)]
lines = hql.splitlines()
for row, line in enumerate(lines):
end_col = 0
end_row = row
if start_row == row and line.strip() == '': # ignore leading whitespace rows
start_row += 1
elif current.strip() == '': # reset start_row
start_row = row
start_col = 0
for col, c in enumerate(line):
current += c
if c in ('"', "'") and prev != '\\' and is_comment is None:
if between_quotes == c:
between_quotes = None
elif between_quotes is None:
between_quotes = c
elif c == '-' and prev == '-' and between_quotes is None and is_comment is None:
is_comment = True
elif c == ';':
if between_quotes is None and is_comment is None:
current = current.strip()
# Strip off the trailing semicolon
current = current[:-1]
if len(current) > 1:
statements.append(((start_row, start_col), (row, col + 1), current))
start_col = col + 1
current = ''
# This character holds no significance if it was escaped within a string
if prev == '\\' and between_quotes is not None:
c = ''
prev = c
end_col = col
is_comment = None
prev = os.linesep
if current != '':
current += os.linesep
if current and current != ';':
current = current.strip()
statements.append(((start_row, start_col), (end_row, end_col+1), current))
return statements
_SEMICOLON_WHITESPACE = re.compile(";\s*$")
def strip_trailing_semicolon(query):
"""As a convenience, we remove trailing semicolons from queries."""
s = _SEMICOLON_WHITESPACE.split(query, 2)
if len(s) > 1:
assert len(s) == 2
assert s[1] == ''
return s[0] | 31.700637 | 134 | 0.677316 |
b20a20e72db03d18c674aae1701945a468f6ff96 | 16,903 | py | Python | Assets/Python/Contrib/CvStrategyOverlay.py | macaurther/DOCUSA | 40586727c351d1b1130c05c2d4648cca3a8bacf5 | [
"MIT"
] | 93 | 2015-11-20T04:13:36.000Z | 2022-03-24T00:03:08.000Z | Assets/Python/Contrib/CvStrategyOverlay.py | macaurther/DOCUSA | 40586727c351d1b1130c05c2d4648cca3a8bacf5 | [
"MIT"
] | 206 | 2015-11-09T00:27:15.000Z | 2021-12-04T19:05:18.000Z | Assets/Python/Contrib/CvStrategyOverlay.py | dguenms/Dawn-of-Civilization | 1c4f510af97a869637cddb4c0859759158cea5ce | [
"MIT"
] | 117 | 2015-11-08T02:43:46.000Z | 2022-02-12T06:29:00.000Z | #-------------------------------------------------------------------------------
# Name: CvStrategyOverlay.py
# Purpose: Draws the strategy overlay itself.
# CvOverlayScreen.py does the editing.
# Contains:
# -Dot Mapper
# -Categorized signs
#
# Author: Del69, EmperorFool
#
# Created: 11/12/2008
#-------------------------------------------------------------------------------
from CvPythonExtensions import *
import BugCore
import BugPath
import BugUtil
import CvOverlayScreenUtils
import PlayerUtil
import SdToolKit
COLOR_KEYS = None
PALETTE_WIDTH = None
gc = CyGlobalContext()
StratLayerOpt = BugCore.game.StrategyOverlay
g_layers = {}
def init(paletteWidth=3, paletteColors=None):
global COLOR_KEYS, PALETTE_WIDTH
# setup palette width
if paletteWidth:
PALETTE_WIDTH = paletteWidth
else:
PALETTE_WIDTH = 10
# setup palette colors
if paletteColors:
COLOR_KEYS = paletteColors
else:
PALETTE_WIDTH = 10 # override because it has 127 colors
COLOR_KEYS = []
try:
for index in range(200):
info = gc.getColorInfo(index)
COLOR_KEYS.append(info.getType())
except:
pass
# create layers
DotMapLayer()
def getLayer(id):
return g_layers[id]
def callEachLayer(func, *args):
for layer in g_layers.itervalues():
func(layer, *args)
## Event Handlers
def onGameStart(argsList):
def callReset(layer):
layer.reset()
callEachLayer(callReset)
def onLoad(argsList):
def callRead(layer):
layer.read()
callEachLayer(callRead)
if StratLayerOpt.isShowDotMap():
getDotMap().redrawCities()
def onPreSave(argsList):
def callWrite(layer):
layer.write()
callEachLayer(callWrite)
def onBeginActivePlayerTurn(args):
def callBeginActivePlayerTurn(layer, ePlayer):
layer.onBeginActivePlayerTurn(ePlayer)
callEachLayer(callBeginActivePlayerTurn, args[0])
def onSwitchHotSeatPlayer(args):
def callSwitchHotSeatPlayer(layer, ePlayer):
layer.onSwitchHotSeatPlayer(ePlayer)
callEachLayer(callSwitchHotSeatPlayer, args[0])
MSG_ADD_CITY = 500
MSG_REMOVE_CITY = 501
def onModNetMessage(args):
iData1, iData2, iData3, iData4, iData5 = args
if iData1 == MSG_ADD_CITY:
getDotMap().addCityMessage(iData2, iData3, iData4, iData5)
elif iData1 == MSG_REMOVE_CITY:
getDotMap().removeCityMessage(iData2, iData3)
else:
return 0
return 1
def onEnabledOptionChanged(option, value):
pass
## Base Strategy Layer Class
class StrategyLayer(object):
"""
Provides common functionality for all of the strategy layers.
"""
def __init__(self, id):
self.MOD_SAVE_ID = "StrategyOverlay"
self.INVISIBLE_COLOR = NiColorA(0, 0, 0, 0)
self.id = id
self.visible = False
self.editing = False
self.dirty = False
g_layers[id] = self
self.reset()
def reset(self):
"""
Resets the data to a blank state and clears the dirty flag.
"""
self.dirty = False
def read(self):
"""
Reads the data from the game and clears the dirty flag.
"""
self.dirty = False
def write(self):
"""
Writes the data to the game and clears the dirty flag.
"""
self.dirty = False
def toggleVisibility(self):
if self.visible:
self.hide()
else:
self.show()
def show(self):
if not self.visible:
self.visible = True
return True
return False
def hide(self):
if self.visible:
self.freeze()
self.visible = False
return True
return False
def toggleEditing(self):
if not self.editing:
self.edit()
else:
self.freeze()
def edit(self):
if not self.editing:
self.show()
self.editing = True
return True
return False
def freeze(self):
if self.editing:
self.editing = False
return True
return False
def onBeginActivePlayerTurn(self, ePlayer):
pass
def onSwitchHotSeatPlayer(self, ePlayer):
pass
## ----------------------------------------------------------------------
## DOT MAP
## ----------------------------------------------------------------------
DOTMAP_LAYER = "DotMap"
X, Y = 0, 1 # used in point tuples instead of creating a new class
g_DotMap = None
class City:
"""
Holds the data for a single dot-mapped city.
"""
def __init__(self, point, color, layer):
self.point = point
self.color = color
self.layer = layer
def __eq__(self, other):
return self.point == other.point and self.color == other.color
def __str__(self):
return "(%d,%d) on %d" % (self.point[X], self.point[Y], self.layer)
def isAt(self, point):
return self.point == point
def samePoint(self, other):
return self.point == other.point
def sameColor(self, other):
return self.color == other.color
def sameLayer(self, other):
return self.layer == other.layer
def getDotMap():
global g_DotMap
if g_DotMap is None:
BugUtil.error("CvStrategyOverlay has not been initialized")
return g_DotMap
def hideDotMap(args=None):
getDotMap().hide()
StratLayerOpt.setShowDotMap(False)
def toggleDotMapVisibility(args=None):
getDotMap().toggleVisibility()
StratLayerOpt.setShowDotMap(getDotMap().visible)
def toggleDotMapEditMode(args=None):
getDotMap().toggleEditing()
if not getDotMap().editing and not StratLayerOpt.isShowDotMap():
getDotMap().hide()
def onShowDotMapOptionChanged(option, value):
if value:
getDotMap().show()
else:
getDotMap().hide()
def onDotMapOptionChanged(option, value):
getDotMap().optionChanged(option, value)
class DotMapLayer(StrategyLayer):
"""
Draws city crosses of different colors so the user can create a dot-map.
"""
def __init__(self):
super(DotMapLayer, self).__init__(DOTMAP_LAYER)
global g_DotMap
g_DotMap = self
# constants
self.CITY_SAVE_ID = "CityDataDict"
self.HIGHLIGHT_CROSS_LAYER = 8
self.FIRST_CROSS_LAYER = 9
self.NUM_CROSS_LAYERS = 36 #len(COLOR_KEYS)
self.DOT_LAYER = PlotLandscapeLayers.PLOT_LANDSCAPE_LAYER_NUMPAD_HELP
self.NO_DOT_STYLE = PlotStyles.PLOT_STYLE_NONE
self.MAX_DOT_STYLE = PlotStyles.PLOT_STYLE_WAVES
self.BFC_OFFSETS = []
for x in range(-2, 3):
for y in range(-2, 3):
if abs(x) != 2 or abs(y) != 2:
self.BFC_OFFSETS.append((x, y))
# default options
self.CROSS_ALPHA = 50.0
self.DOT_ALPHA = 50.0
self.HIGHLIGHT_CROSS_ALPHA = 100.0
self.HIGHLIGHT_DOT_ALPHA = 100.0
self.DRAW_DOTS = True
self.DOT_STYLE = PlotStyles.PLOT_STYLE_DOT_TARGET
self.readOptions()
# state
self.highlightedCity = None
def reset(self):
self.cities = {}
self.dirty = False
def read(self):
data = SdToolKit.sdGetGlobal(self.MOD_SAVE_ID, self.CITY_SAVE_ID)
self.clearCityLayers()
if data is not None:
self.cities = self.updateData(data)
self.dirty = False
else:
self.reset()
def updateData(self, data):
"""
Upgrade previous data formats to latest format.
"""
if len(data) == 0:
# empty, don't care
return data
for key, value in data.iteritems():
if isinstance(key, int):
# data in latest format
return data
else:
# old format, convert below
break
# find first living, human player and assign all data to them
# if none found, assign to player 0
for player in PlayerUtil.players(alive=True, human=True):
ePlayer = player.getID()
break
else:
ePlayer = 0
newData = {}
cities = {}
newData[ePlayer] = cities
for point, (color, layer) in data.iteritems():
# use new point-based layer scheme
grid = 6
layer = (point[X] % grid) * grid + (point[Y] % grid)
cities[point] = City(point, color, layer)
return newData
def write(self):
if self.dirty:
SdToolKit.sdSetGlobal(self.MOD_SAVE_ID, self.CITY_SAVE_ID, self.cities)
self.dirty = False
def show(self):
if super(DotMapLayer, self).show():
self.redrawCities()
def hide(self):
if super(DotMapLayer, self).hide():
self.clearCityLayers()
def edit(self):
if super(DotMapLayer, self).edit():
CvOverlayScreenUtils.showOverlayScreen()
def freeze(self):
if super(DotMapLayer, self).freeze():
self.unhighlightCity()
CvOverlayScreenUtils.hideOverlayScreen()
def onBeginActivePlayerTurn(self, ePlayer):
if StratLayerOpt.isShowDotMap():
self.show()
def onSwitchHotSeatPlayer(self, ePlayer):
self.hide()
def hasCities(self, ePlayer):
return ePlayer in self.cities
def hasCity(self, ePlayer, point):
return self.hasCities(ePlayer) and point in self.cities[ePlayer]
def getCities(self, ePlayer):
if self.hasCities(ePlayer):
return self.cities[ePlayer]
cities = {}
self.cities[ePlayer] = cities
return cities
def getCity(self, ePlayer, point):
if self.hasCities(ePlayer):
cities = self.cities[ePlayer]
if point in cities:
return cities[point]
return None
def iterCities(self, ePlayer):
"""
Iterates over the player's cities.
"""
if self.hasCities(ePlayer):
for city in self.getCities(ePlayer).itervalues():
yield city
def addCityAt(self, point, color, layer):
"""
Sends a message to add a city for the active player at the given point.
"""
CyMessageControl().sendModNetMessage(MSG_ADD_CITY, PlayerUtil.getActivePlayerID(), point[X] * 1000 + point[Y], color, layer)
def addCityMessage(self, ePlayer, xy, color, layer):
"""
Processes a message to add a city.
"""
x = xy / 1000
y = xy % 1000
city = City((x, y), color, layer)
self.addCity(ePlayer, city)
def addCity(self, ePlayer, city):
"""
Adds the city to the data set and draws its dot and cross.
"""
if self.hasCity(ePlayer, city.point):
oldCity = self.getCity(ePlayer, city.point)
if city == oldCity:
return
BugUtil.debug("DotMap - replacing city at %s", city.point)
self.removeCity(ePlayer, oldCity)
BugUtil.debug("DotMap - adding city %s", city)
self.getCities(ePlayer)[city.point] = city
self.dirty = True
if ePlayer == PlayerUtil.getActivePlayerID():
self.drawCity(city, self.CROSS_ALPHA, self.DOT_ALPHA)
def removeCityAt(self, point):
"""
Sends a message to remove the active player's city at the given point.
"""
ePlayer = PlayerUtil.getActivePlayerID()
if self.hasCity(ePlayer, point):
CyMessageControl().sendModNetMessage(MSG_REMOVE_CITY, ePlayer, point[X] * 1000 + point[Y], -1, -1)
else:
self.freeze()
def removeCityMessage(self, ePlayer, xy):
"""
Processes a message to remove a city.
"""
x = xy / 1000
y = xy % 1000
self.removeCity(ePlayer, self.getCity(ePlayer, (x, y)))
def removeCity(self, ePlayer, city):
"""
Removes the city from the data set and erases its dot and cross.
"""
if city:
BugUtil.debug("DotMap - removing city %s", city)
del self.getCities(ePlayer)[city.point]
self.dirty = True
if ePlayer == PlayerUtil.getActivePlayerID():
self.redrawCrosses(city.layer)
self.eraseDot(city, self.DOT_ALPHA)
else:
BugUtil.warn("City doesn't exist")
def highlightCity(self, point, color):
"""
Highlights the given city location by drawing it using the given color on the highlight layer.
Unhighlights the currently highlighted city if there is one.
If there is no city there (N), the new city is drawn (C).
If the city is on the same layer (S), nothing is done (N). --> WC
If the city is on a different layer (D), the city's layer is redrawn without it (W) and the new city is drawn (C).
"""
city = City(point, color, self.HIGHLIGHT_CROSS_LAYER)
if self.highlightedCity:
if self.highlightedCity == city:
return
else:
self.unhighlightCity()
self.highlightedCity = city
ePlayer = PlayerUtil.getActivePlayerID()
existingCity = self.getCity(ePlayer, point)
if existingCity is not None:
self.redrawCrosses(existingCity.layer, point)
self.eraseDot(existingCity, self.DOT_ALPHA)
self.drawCross(city, self.HIGHLIGHT_CROSS_ALPHA)
def unhighlightCity(self):
"""
Removes the highlight from the existing city location if there is one.
If there is no city there (N), the current layer is redrawn (L) and the dot is erased (d).
If the city is on the same layer (S), nothing is done (N). --> LC
If the city is on a different layer (D), the current layer is redrawn (L) and the city is drawn (C).
"""
if self.highlightedCity:
point = self.highlightedCity.point
self.clearHighlightCrossLayer()
ePlayer = PlayerUtil.getActivePlayerID()
city = self.getCity(ePlayer, point)
if city is not None:
self.drawCity(city, self.CROSS_ALPHA, self.DOT_ALPHA)
self.highlightedCity = None
def redrawCities(self):
"""
Erases all city layers and draws all of the cities.
"""
self.clearCityLayers()
self.drawCities()
def redrawCrosses(self, layer, skip=None):
"""
Erases the given layer and draws all city crosses in that layer.
"""
self.clearCrossLayer(layer)
self.drawCrosses(layer, skip)
def redrawDots(self):
"""
Erases and redraws all city dots as they are all in the same layer.
"""
self.clearDotLayer()
self.drawDots()
def drawCities(self, skip=None):
"""
Draws all of the cities except skip, if given.
"""
crossAlpha = self.CROSS_ALPHA
dotAlpha = self.DOT_ALPHA
for city in self.iterCities(PlayerUtil.getActivePlayerID()):
if not city.isAt(skip):
self.drawCity(city, crossAlpha, dotAlpha)
def drawCrosses(self, layer=None, skip=None):
"""
Draws the cross for every city in the given layer.
"""
crossAlpha = self.CROSS_ALPHA
for city in self.iterCities(PlayerUtil.getActivePlayerID()):
if not city.isAt(skip):
if layer is None or layer == city.layer:
self.drawCross(city, crossAlpha)
def drawDots(self, skip=None):
"""
Draws the dot for every city.
"""
dotAlpha = self.DOT_ALPHA
for city in self.iterCities(PlayerUtil.getActivePlayerID()):
if not city.isAt(skip):
self.drawDot(city, dotAlpha)
def drawCity(self, city, crossAlpha, dotAlpha):
"""
Draws the cross and dot for a single city.
"""
self.drawCross(city, crossAlpha)
self.drawDot(city, dotAlpha)
def drawCross(self, city, alpha):
"""
Draws the cross for a single city.
"""
x, y = city.point
color = gc.getColorInfo(city.color).getType()
layer = city.layer
for dx, dy in self.BFC_OFFSETS:
CyEngine().fillAreaBorderPlotAlt(x + dx, y + dy, layer, color, alpha)
def drawDot(self, city, alpha):
"""
Draws the dot for a single city.
"""
if self.DRAW_DOTS:
x, y = city.point
colorInfo = gc.getColorInfo(city.color)
if BugPath.isMac():
color = colorInfo.getColor()
CyEngine().addColoredPlot(x, y, NiColorA(color.r, color.g, color.b, alpha), self.DOT_LAYER)
else:
CyEngine().addColoredPlotAlt(x, y, self.DOT_STYLE, self.DOT_LAYER, colorInfo.getType(), alpha)
def eraseDot(self, city, alpha):
"""
Erases the dot for a single city.
"""
if self.DRAW_DOTS:
x, y = city.point
if BugPath.isMac():
CyEngine().addColoredPlot(x, y, self.INVISIBLE_COLOR, self.DOT_LAYER)
else:
CyEngine().addColoredPlotAlt(x, y, self.NO_DOT_STYLE, self.DOT_LAYER, "COLOR_BLACK", alpha)
def clearCityLayers(self):
"""
Erases all city crosses and dots.
"""
self.clearHighlightCrossLayer()
for index in range(self.NUM_CROSS_LAYERS):
self.clearCrossLayer(index + self.FIRST_CROSS_LAYER)
self.clearDotLayer()
def clearHighlightCrossLayer(self):
"""
Clears the indexed border layer.
"""
self.clearCrossLayer(self.HIGHLIGHT_CROSS_LAYER)
def clearCrossLayer(self, layer):
"""
Clears the indexed border layer.
"""
CyEngine().clearAreaBorderPlots(layer)
def clearDotLayer(self):
"""
Clears all the dots from screen.
"""
CyEngine().clearColoredPlots(self.DOT_LAYER)
def percentToAlpha(self, percent):
return min(100, max(0, percent)) / 100.0
def readOptions(self):
self.CROSS_ALPHA = self.percentToAlpha(StratLayerOpt.getDotMapBrightness())
self.DOT_ALPHA = self.percentToAlpha(StratLayerOpt.getDotMapBrightness())
self.HIGHLIGHT_CROSS_ALPHA = self.percentToAlpha(StratLayerOpt.getDotMapHighlightBrightness())
self.HIGHLIGHT_DOT_ALPHA = self.percentToAlpha(StratLayerOpt.getDotMapHighlightBrightness())
self.DRAW_DOTS = StratLayerOpt.isDotMapDrawDots()
self.DOT_STYLE = min(self.MAX_DOT_STYLE, max(0, StratLayerOpt.getDotMapDotIcon()))
def optionChanged(self, option, value):
"""
Redraws the layer if it is currently visible.
"""
self.unhighlightCity()
self.readOptions()
if self.visible:
self.redrawCities()
| 26.830159 | 127 | 0.666391 |
63cea0211c2733193283698cf6f102f466590638 | 1,936 | py | Python | aliyun-python-sdk-dds/aliyunsdkdds/request/v20151201/DescribeRegionsRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-dds/aliyunsdkdds/request/v20151201/DescribeRegionsRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-dds/aliyunsdkdds/request/v20151201/DescribeRegionsRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeRegionsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Dds', '2015-12-01', 'DescribeRegions','Dds')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId) | 35.851852 | 74 | 0.775826 |
776195424714bc7f96f031bb5460be553bdfedea | 2,142 | py | Python | google/colab/__init__.py | NIP-Data-Computation/colabtools | 75a58cb540bbdc9e8ce5a5fd71698868486012bf | [
"Apache-2.0"
] | null | null | null | google/colab/__init__.py | NIP-Data-Computation/colabtools | 75a58cb540bbdc9e8ce5a5fd71698868486012bf | [
"Apache-2.0"
] | null | null | null | google/colab/__init__.py | NIP-Data-Computation/colabtools | 75a58cb540bbdc9e8ce5a5fd71698868486012bf | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Colab Python APIs."""
from __future__ import absolute_import as _
from __future__ import division as _
from __future__ import print_function as _
from google.colab import _import_hooks
from google.colab import _installation_commands
from google.colab import _reprs
from google.colab import _shell_customizations
from google.colab import _system_commands
from google.colab import _tensorflow_magics
from google.colab import auth
from google.colab import data_table
from google.colab import drive
from google.colab import files
from google.colab import output
from google.colab import snippets
from google.colab import widgets
__all__ = [
'auth', 'data_table', 'drive', 'files', 'output', 'snippets', 'widgets'
]
__version__ = '0.0.1a2'
def _jupyter_nbextension_paths():
# See:
# http://testnb.readthedocs.io/en/latest/examples/Notebook/Distributing%20Jupyter%20Extensions%20as%20Python%20Packages.html#Defining-the-server-extension-and-nbextension
return [{
'dest': 'google.colab',
'section': 'notebook',
'src': 'resources',
}]
def load_ipython_extension(ipython):
"""Called by IPython when this module is loaded as an IPython extension."""
_shell_customizations.initialize()
_system_commands._register_magics(ipython) # pylint:disable=protected-access
_installation_commands._register_magics(ipython) # pylint:disable=protected-access
_import_hooks._register_hooks() # pylint:disable=protected-access
_tensorflow_magics._register_magics(ipython) # pylint:disable=protected-access
_reprs.enable_string_repr()
| 36.305085 | 172 | 0.782446 |
1473b6402fa73109c27b73ec080948f4f758576f | 1,075 | py | Python | klumpenplot.py | wklumpen/klumpenplot | 1605470208a8e12f7faf71424b2a5a2286c86a30 | [
"MIT"
] | null | null | null | klumpenplot.py | wklumpen/klumpenplot | 1605470208a8e12f7faf71424b2a5a2286c86a30 | [
"MIT"
] | null | null | null | klumpenplot.py | wklumpen/klumpenplot | 1605470208a8e12f7faf71424b2a5a2286c86a30 | [
"MIT"
] | null | null | null | import sys
import time
import PyQt5.QtWidgets as W
import PyQt5.QtGui as G
import PyQt5.QtSql as S
import PyQt5.QtCore as C
import PyQt5.Qt as Q
from gui.MCRouteUI import Ui_MainWindow
class MCRouteMainWindow(W.QMainWindow, Ui_MainWindow):
def __init__(self, app, parent=None):
super(MCRouteMainWindow, self).__init__(parent)
self.app = app
self.setupUi(self)
self.statusbar.showMessage("Welcome to MCRoute")
if __name__ == "__main__":
app = W.QApplication(sys.argv)
mw = MCRouteMainWindow(app)
raleway = G.QFont("Raleway", 10)
app.setFont(raleway)
# Create and display the splash screen
splash_pix = G.QPixmap('resources/images/mcroute-splash.png')
splash = W.QSplashScreen(splash_pix, C.Qt.WindowStaysOnTopHint)
splash.setMask(splash_pix.mask())
# splash.show()
app.processEvents()
icon = G.QIcon()
icon.addPixmap(G.QPixmap("resources/images/favicon.ico"), G.QIcon.Normal, G.QIcon.Off)
mw.setWindowIcon(icon)
mw.showMaximized()
splash.finish(mw)
sys.exit(app.exec_()) | 29.054054 | 90 | 0.706047 |
77e5df053a5b4994031bfdb6c4ab428e58f5e078 | 5,073 | py | Python | main/migrations/0020_auto_20211119_1542.py | uktrade/contractor-approval | 316ba7b2321f5aeea6dc83dcdaaadda887275f4d | [
"MIT"
] | null | null | null | main/migrations/0020_auto_20211119_1542.py | uktrade/contractor-approval | 316ba7b2321f5aeea6dc83dcdaaadda887275f4d | [
"MIT"
] | 1 | 2022-02-18T09:17:41.000Z | 2022-02-18T09:17:41.000Z | main/migrations/0020_auto_20211119_1542.py | uktrade/resourcing-approval | 316ba7b2321f5aeea6dc83dcdaaadda887275f4d | [
"MIT"
] | null | null | null | # Generated by Django 3.2.9 on 2021-11-19 15:42
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("chartofaccount", "0001_initial"),
("main", "0019_auto_20211119_1257"),
]
operations = [
migrations.RemoveField(
model_name="interimrequest",
name="cost_centre_code",
),
migrations.RemoveField(
model_name="interimrequest",
name="end_date",
),
migrations.RemoveField(
model_name="interimrequest",
name="name_of_contractor",
),
migrations.RemoveField(
model_name="interimrequest",
name="new_requirement",
),
migrations.RemoveField(
model_name="interimrequest",
name="project_name_role_title",
),
migrations.RemoveField(
model_name="interimrequest",
name="start_date",
),
migrations.RemoveField(
model_name="resourcingrequest",
name="days_required",
),
migrations.RemoveField(
model_name="resourcingrequest",
name="max_day_rate",
),
migrations.RemoveField(
model_name="resourcingrequest",
name="min_day_rate",
),
migrations.RemoveField(
model_name="resourcingrequest",
name="project_fees",
),
migrations.CreateModel(
name="FinancialInformation",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"area_of_work",
models.CharField(
choices=[
("investment", "Investment"),
("trade", "Trade"),
("ddat", "DDaT"),
("corporate", "Corporate"),
],
max_length=255,
verbose_name="Area of work for VAT reclaim",
),
),
(
"total_budget",
models.IntegerField(
verbose_name="Total Budget, including sourcing fees, expenses and interim labour cost"
),
),
(
"timesheet_and_expenses_validator",
models.CharField(
max_length=255,
verbose_name="Name of the Timesheet & Expenses Validator",
),
),
(
"min_day_rate",
models.IntegerField(
blank=True,
null=True,
verbose_name="Minimum anticipated day rate",
),
),
(
"max_day_rate",
models.IntegerField(
blank=True,
null=True,
verbose_name="Maximum anticipated day rate",
),
),
(
"days_required",
models.IntegerField(
blank=True,
null=True,
verbose_name="Total number of days required",
),
),
(
"project_fees",
models.IntegerField(
blank=True,
null=True,
verbose_name="Total project fees (exclude VAT)",
),
),
(
"cost_centre_code",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="+",
to="chartofaccount.costcentre",
verbose_name="Cost Centre/Team",
),
),
(
"programme_code",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="+",
to="chartofaccount.programmecode",
),
),
(
"resourcing_request",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="financial_information",
to="main.resourcingrequest",
),
),
],
),
]
| 33.156863 | 110 | 0.400946 |
a74465a816903d865cde9944e6d1b56272bbd9a9 | 2,629 | py | Python | tagging/backend/taggingbai.py | unlimitedaki/sudaNLP-Olympic-Winter-Games-taggingSystem-backend | dcc0bbc1046c8509f20477c81ae03cf0367d6c04 | [
"MIT"
] | 2 | 2019-07-29T02:43:44.000Z | 2020-08-31T02:16:20.000Z | tagging/backend/taggingbai.py | unlimitedaki/sudaNLP-Olympic-Winter-Games-taggingSystem-backend | dcc0bbc1046c8509f20477c81ae03cf0367d6c04 | [
"MIT"
] | null | null | null | tagging/backend/taggingbai.py | unlimitedaki/sudaNLP-Olympic-Winter-Games-taggingSystem-backend | dcc0bbc1046c8509f20477c81ae03cf0367d6c04 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.http import HttpResponse
import json
from django.views.decorators.csrf import csrf_exempt
from . import models
import hashlib
import os
dirDataLabeled = 'data/labeled/'
fnres = 'Bai.txt'
dirDataRaw = 'data/raw/'
@csrf_exempt
def rawdata(request):
returnMessage = {}
if request.method == "POST":
logfile = open('log.txt','w',encoding = 'utf-8')
datafile = request.FILES.get("datafile")
# task = request.POST.get('task')
if datafile:
des = open(dirDataRaw+datafile.name,'wb+')
for chunk in datafile.chunks():
des.write(chunk)
des.close()
des = open(dirDataRaw+datafile.name,'r',encoding = "utf-8")
for line in des:
try:
# qa = line.replace('\n','').split('\t')/
data = models.DataBai(sentence = line)
data.save()
except Exception as ex:
logfile.write(line)
des.close()
returnMessage['detail'] = "写入成功"
return HttpResponse(json.dumps(returnMessage),200)
def getUnlabeled():
data = models.DataBai.objects.filter(status = 0).first()
if not data:
data = models.DataBai.objects.filter(status = 1).first()
return data
def saveResult(data,result):
f = open(dirDataLabeled+fnres,'a',encoding = 'utf-8')
res = {}
res['sentence'] = data.sentence
res['results'] = result
f.write(json.dumps(res,ensure_ascii=False)+"\n")
@csrf_exempt
def readtext(request):
returnMessage ={}
returnStatus = 401
response = HttpResponse()
if request.method == "GET":
data = getUnlabeled()
if not data:
returnMessage['detail'] = "数据已标注完"
returnStatus = 401
else:
returnMessage['sentence'] = data.sentence
print(returnMessage)
data.status = 1
data.save()
response.set_cookie("id",data.id)
returnStatus= 200
elif request.method == "POST":
id = request.COOKIES['id']
data = models.DataBai.objects.get(id = id)
if data.status == 2:
returnMessage['detail'] = "数据被标注"
returnStatus = 401
else:
result = json.loads(request.body)['results']
data.status = 2
data.save()
saveResult(data,result)
returnMessage['detail'] = "标注成功"
returnStatus = 200
response.content = json.dumps(returnMessage)
response.status_code = returnStatus
return response
| 32.060976 | 71 | 0.574363 |
81942a0671f11b02a60e6be7f09e74ce56ade78c | 21,299 | py | Python | tensorflow_transform/saved/saved_transform_io_v2_test.py | Saiprasad16/transform | 774458bf0c296f8275fedf3ace303427654dace7 | [
"Apache-2.0"
] | 1 | 2021-05-10T10:52:06.000Z | 2021-05-10T10:52:06.000Z | tensorflow_transform/saved/saved_transform_io_v2_test.py | Saiprasad16/transform | 774458bf0c296f8275fedf3ace303427654dace7 | [
"Apache-2.0"
] | null | null | null | tensorflow_transform/saved/saved_transform_io_v2_test.py | Saiprasad16/transform | 774458bf0c296f8275fedf3ace303427654dace7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for saved_transform_io_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
# GOOGLE-INITIALIZATION
import numpy as np
import six
import tensorflow as tf
from tensorflow_transform import impl_helper
from tensorflow_transform import tf_utils
from tensorflow_transform import test_case
from tensorflow_transform.saved import saved_transform_io
from tensorflow_transform.saved import saved_transform_io_v2
# pylint: disable=g-direct-tensorflow-import
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.lib.io import file_io
# pylint: enable=g-direct-tensorflow-import
_TRANFORM_FN_EXPORT_TF_VERSION_TEST_CASES = [
dict(testcase_name='_exported_in_tf1', exported_in_tf1=True),
dict(testcase_name='_exported_in_tf2', exported_in_tf1=False)
]
def _get_preprocessing_fn_asset_table(asset_file):
def construct_table(asset_path):
initializer = tf.lookup.TextFileInitializer(
asset_path,
key_dtype=tf.string,
key_index=tf.lookup.TextFileIndex.WHOLE_LINE,
value_dtype=tf.int64,
value_index=tf.lookup.TextFileIndex.LINE_NUMBER)
return tf.lookup.StaticHashTable(initializer, default_value=-1)
def preprocessing_fn(inputs):
unused_table, output = tf_utils.construct_and_lookup_table(
construct_table, asset_file, inputs['input'])
return {'output': output}
return preprocessing_fn
def _get_preprocessing_fn_non_asset_table(asset_file):
del asset_file
def preprocessing_fn(inputs):
initializer = tf.lookup.KeyValueTensorInitializer(
keys=['foo', 'bar', 'baz'],
values=tf.cast(tf.range(3), tf.int64),
key_dtype=tf.string,
value_dtype=tf.int64)
table = tf.lookup.StaticHashTable(initializer, default_value=12)
return {
'output': table.lookup(inputs['input']),
}
return preprocessing_fn
_RE_EXPORT_TF2_TO_TF1_TEST_CASES = [
dict(
testcase_name='_asset_table',
preprocessing_fn_getter=_get_preprocessing_fn_asset_table,
expected_output=2,
test_input='baz',
asset_file_contents='foo\nbar\nbaz\n'),
dict(
testcase_name='_non_asset_table',
preprocessing_fn_getter=_get_preprocessing_fn_non_asset_table,
expected_output=2,
test_input='baz'),
]
# TODO(b/123241798): Find an open-source compatible way to access
# FLAGS.test_tmpdir.
def _create_test_saved_model(export_in_tf1,
input_specs,
preprocessing_fn,
export_path_suffix=None,
base_dir=None):
if not export_path_suffix:
export_path = os.path.join(tempfile.mkdtemp(dir=base_dir), 'export')
else:
export_path = os.path.join(
tempfile.mkdtemp(dir=base_dir), export_path_suffix)
if export_in_tf1:
with tf.compat.v1.Graph().as_default():
with tf.compat.v1.Session().as_default() as session:
inputs = {}
for key in six.iterkeys(input_specs):
tensor_spec = input_specs[key]
if isinstance(tensor_spec, tf.TensorSpec):
inputs[key] = tf.compat.v1.placeholder(
tensor_spec.dtype, shape=tensor_spec.shape)
elif isinstance(tensor_spec, tf.SparseTensorSpec):
inputs[key] = tf.compat.v1.sparse_placeholder(
tensor_spec.dtype, shape=tensor_spec.shape)
elif isinstance(tensor_spec, tf.RaggedTensorSpec):
inputs[key] = tf.compat.v1.ragged.placeholder(
tensor_spec._dtype, tensor_spec._ragged_rank, [])
else:
raise ValueError(
'TypeSpecs specified should be one of `tf.TensorSpec`, '
'`tf.SparseTensorSpec`, `tf.RaggedTensorSpec`')
outputs = preprocessing_fn(inputs)
# show that unrelated & unmapped placeholders do not interfere
tf.compat.v1.placeholder(tf.int64)
saved_transform_io.write_saved_transform_from_session(
session, inputs, outputs, export_path)
else:
transform_fn = impl_helper.get_traced_transform_fn(
preprocessing_fn=preprocessing_fn,
input_signature=input_specs,
base_temp_dir=None,
tensor_replacement_map=None,
output_keys_to_name_map=None)
saved_transform_io_v2.write_v2_saved_model(transform_fn, 'transform_fn',
export_path)
return export_path
class SavedTransformIOV2Test(test_case.TransformTestCase):
@classmethod
def setUpClass(cls):
test_case.skip_if_not_tf2('Tensorflow 2.x required.')
input_specs = {
'x': tf.TensorSpec([
None,
], dtype=tf.float32)
}
def preprocessing_fn(inputs):
output = (inputs['x'] - 2.0) / 5.0
return {'x_scaled': output}
cls._saved_model_path_v1 = _create_test_saved_model(True, input_specs,
preprocessing_fn,
'export_v1')
cls._saved_model_path_v2 = _create_test_saved_model(False, input_specs,
preprocessing_fn,
'export_v2')
def _get_saved_model_loader(self, exported_in_tf1):
if exported_in_tf1:
return saved_transform_io_v2.SavedModelLoader(self._saved_model_path_v1)
return saved_transform_io_v2.SavedModelLoader(self._saved_model_path_v2)
@test_case.named_parameters(*_TRANFORM_FN_EXPORT_TF_VERSION_TEST_CASES)
def test_apply_saved_transform(self, exported_in_tf1):
input_floats = tf.constant([1237.0]) # tf.float32
input_features = {'x': input_floats}
transformed_features = (
self._get_saved_model_loader(exported_in_tf1).apply_transform_model(
input_features))
self.assertEqual(['x_scaled'], list(transformed_features))
result_tensor = transformed_features['x_scaled']
self.assertIsInstance(result_tensor, tf.Tensor)
self.assertAllEqual(result_tensor.numpy(), [247.0])
@test_case.named_parameters(*_TRANFORM_FN_EXPORT_TF_VERSION_TEST_CASES)
def test_apply_saved_transform_dataset_map(self, exported_in_tf1):
ds = tf.data.Dataset.from_tensor_slices({'x': [[1237.0]]})
model_loader = self._get_saved_model_loader(exported_in_tf1)
def map_fn(inputs):
result = model_loader.apply_transform_model(inputs)
self.assertEqual(['x_scaled'], list(result))
result_tensor = result['x_scaled']
self.assertIsInstance(result_tensor, tf.Tensor)
self.assertEqual(result_tensor.shape.as_list(), [1])
return result
result_ds = ds.map(map_fn)
self.assertAllEqual(
list(result_ds.as_numpy_iterator()), [{
'x_scaled': [247.0]
}])
@test_case.named_parameters(*_TRANFORM_FN_EXPORT_TF_VERSION_TEST_CASES)
def test_apply_transform_extra_features_no_passthrough(self, exported_in_tf1):
with self.assertRaises(ValueError):
input_floats = tf.constant([1237.0]) # tf.float32
input_features = {
'x': input_floats,
'extra_1': tf.constant('1'),
'extra_2': tf.constant('2')
}
self._get_saved_model_loader(exported_in_tf1).apply_transform_model(
input_features)
@test_case.named_parameters(*_TRANFORM_FN_EXPORT_TF_VERSION_TEST_CASES)
def test_apply_transform_type_mismatch(self, exported_in_tf1):
with self.assertRaises(tf.errors.InvalidArgumentError):
input_strings = tf.constant(['bogus']) # tf.string
input_features = {'x': input_strings}
self._get_saved_model_loader(exported_in_tf1).apply_transform_model(
input_features)
@test_case.named_parameters(*_TRANFORM_FN_EXPORT_TF_VERSION_TEST_CASES)
def test_apply_transform_shape_mismatch(self, exported_in_tf1):
with self.assertRaises(ValueError):
input_floats = tf.constant(1237.0) # tf.float32
input_features = {'x': input_floats}
self._get_saved_model_loader(exported_in_tf1).apply_transform_model(
input_features)
@test_case.named_parameters(*_TRANFORM_FN_EXPORT_TF_VERSION_TEST_CASES)
def test_apply_saved_transform_to_tensor_inside_scope(self, exported_in_tf1):
with tf.compat.v1.name_scope('my_scope'):
input_floats = tf.constant([1237.0]) # tf.float32
input_features = {'x': input_floats}
transformed_features = (
self._get_saved_model_loader(exported_in_tf1).apply_transform_model(
input_features))
self.assertEqual(['x_scaled'], list(transformed_features))
result_tensor = transformed_features['x_scaled']
self.assertIsInstance(result_tensor, tf.Tensor)
self.assertAllEqual(result_tensor.numpy(), [247.0])
@test_case.named_parameters(*_TRANFORM_FN_EXPORT_TF_VERSION_TEST_CASES)
def test_apply_saved_transform_to_tensor_outside_scope(self, exported_in_tf1):
input_floats = tf.constant([1237.0]) # tf.float32
with tf.compat.v1.name_scope('my_scope'):
input_features = {'x': input_floats}
transformed_features = (
self._get_saved_model_loader(exported_in_tf1).apply_transform_model(
input_features))
self.assertEqual(['x_scaled'], list(transformed_features))
result_tensor = transformed_features['x_scaled']
self.assertIsInstance(result_tensor, tf.Tensor)
self.assertAllEqual(result_tensor.numpy(), [247.0])
@test_case.named_parameters(*_TRANFORM_FN_EXPORT_TF_VERSION_TEST_CASES)
def test_dense_roundtrip(self, exported_in_tf1):
input_specs = {'input': tf.TensorSpec([], dtype=tf.float32)}
def preprocessing_fn(inputs):
return {'output': inputs['input'] / 5.0}
export_path = _create_test_saved_model(
exported_in_tf1,
input_specs,
preprocessing_fn,
base_dir=self.get_temp_dir())
# Using a computed input gives confidence that the graphs are fused.
input_float = tf.constant(25.0) * 2
inputs = {'input': input_float}
saved_model_loader = saved_transform_io_v2.SavedModelLoader(export_path)
outputs = saved_model_loader.apply_transform_model(inputs)
# (25 * 2) / 5 = 10
self.assertEqual(10.0, outputs['output'].numpy())
@test_case.named_parameters(*_TRANFORM_FN_EXPORT_TF_VERSION_TEST_CASES)
def test_table_roundtrip(self, exported_in_tf1):
input_specs = {'input': tf.TensorSpec([], dtype=tf.string)}
def preprocessing_fn(inputs):
table_keys = ['cat', 'dog', 'giraffe']
initializer = tf.lookup.KeyValueTensorInitializer(
keys=table_keys,
values=tf.cast(tf.range(len(table_keys)), tf.int64),
key_dtype=tf.string,
value_dtype=tf.int64)
table = tf.lookup.StaticHashTable(initializer, default_value=-1)
return {'output': table.lookup(inputs['input'])}
export_path = _create_test_saved_model(
exported_in_tf1,
input_specs,
preprocessing_fn,
base_dir=self.get_temp_dir())
# Using a computed input gives confidence that the graphs are fused.
input_string = tf.constant('dog')
inputs = {'input': input_string}
saved_model_loader = saved_transform_io_v2.SavedModelLoader(export_path)
outputs = saved_model_loader.apply_transform_model(inputs)
self.assertEqual(1, outputs['output'].numpy())
@test_case.named_parameters(*_TRANFORM_FN_EXPORT_TF_VERSION_TEST_CASES)
def test_sparse_roundtrip(self, exported_in_tf1):
input_specs = {
'input': tf.SparseTensorSpec([None, None, None], dtype=tf.float32)
}
def preprocessing_fn(inputs):
return {'output': inputs['input'] / 5.0}
export_path = _create_test_saved_model(
exported_in_tf1,
input_specs,
preprocessing_fn,
base_dir=self.get_temp_dir())
indices = np.array([[3, 2, 0], [4, 5, 1]], dtype=np.int64)
values = np.array([1.0, 2.0], dtype=np.float32)
shape = np.array([7, 9, 2], dtype=np.int64)
input_sparse = tf.SparseTensor(
indices=indices, values=values, dense_shape=shape)
# Using a computed input gives confidence that the graphs are fused
inputs = {'input': input_sparse * 10}
saved_model_loader = saved_transform_io_v2.SavedModelLoader(export_path)
outputs = saved_model_loader.apply_transform_model(inputs)
result = outputs['output']
self.assertIsInstance(result, tf.SparseTensor)
# indices and shape unchanged; values multiplied by 10 and divided by 5
self.assertEqual(indices.tolist(), result.indices.numpy().tolist())
self.assertEqual([2.0, 4.0], result.values.numpy().tolist())
self.assertEqual(shape.tolist(), result.dense_shape.numpy().tolist())
@test_case.named_parameters(*_TRANFORM_FN_EXPORT_TF_VERSION_TEST_CASES)
def test_ragged_roundtrip(self, exported_in_tf1):
if not hasattr(meta_graph_pb2.TensorInfo, 'CompositeTensor'):
self.skipTest('This version of TensorFlow does not support '
'CompositeTenors in TensorInfo.')
input_specs = {
'input':
tf.RaggedTensorSpec(
shape=[None, None],
dtype=tf.float32,
ragged_rank=1,
row_splits_dtype=tf.int64)
}
def preprocessing_fn(inputs):
return {'output': inputs['input'] / 2.0}
export_path = _create_test_saved_model(
exported_in_tf1,
input_specs,
preprocessing_fn,
base_dir=self.get_temp_dir())
splits = np.array([0, 2, 3], dtype=np.int64)
values = np.array([1.0, 2.0, 4.0], dtype=np.float32)
input_ragged = tf.RaggedTensor.from_row_splits(values, splits)
# Using a computed input gives confidence that the graphs are fused
inputs = {'input': input_ragged * 10}
saved_model_loader = saved_transform_io_v2.SavedModelLoader(export_path)
outputs = saved_model_loader.apply_transform_model(inputs)
result = outputs['output']
self.assertIsInstance(result, tf.RaggedTensor)
# indices and shape unchanged; values multipled by 10 and divided by 2
self.assertAllEqual(splits, result.row_splits)
self.assertEqual([5.0, 10.0, 20.0], result.values.numpy().tolist())
@test_case.named_parameters(*_RE_EXPORT_TF2_TO_TF1_TEST_CASES)
def test_re_export_tf2_saved_model_to_tf1(self,
preprocessing_fn_getter,
expected_output,
test_input,
asset_file_contents=None):
asset_file = None
if asset_file_contents is not None:
asset_file_path = os.path.join(
tempfile.mkdtemp(dir=self.get_temp_dir()), 'asset')
file_io.write_string_to_file(asset_file_path, asset_file_contents)
asset_file = tf.constant(asset_file_path)
input_specs = {'input': tf.TensorSpec([], dtype=tf.string)}
export_path = _create_test_saved_model(
False,
input_specs,
preprocessing_fn_getter(asset_file),
base_dir=self.get_temp_dir())
if asset_file is not None:
os.remove(asset_file.numpy())
new_export_path = os.path.join(
tempfile.mkdtemp(dir=self.get_temp_dir()), 'export_v1')
builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(
new_export_path)
# TODO(b/175844561): Investigate why the variable names need to be different
# for the two graph and session contexts below.
with tf.compat.v1.Graph().as_default() as g1:
saved_model_loader = saved_transform_io_v2.SavedModelLoader(export_path)
if asset_file_contents is not None:
self.assertEqual(
1, len(g1.get_collection(tf.compat.v1.GraphKeys.ASSET_FILEPATHS)))
with tf.compat.v1.Session().as_default() as s1:
inputs = {'input': tf.compat.v1.placeholder(tf.string)}
outputs = saved_model_loader.apply_transform_model(inputs)
predict_signature_def = (
tf.compat.v1.saved_model.signature_def_utils.predict_signature_def(
inputs, outputs))
builder.add_meta_graph_and_variables(
s1, ['graph_tag'],
signature_def_map={'graph_signature': predict_signature_def},
assets_collection=tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.ASSET_FILEPATHS),
main_op=tf.compat.v1.tables_initializer())
builder.save()
shutil.rmtree(export_path)
with tf.compat.v1.Graph().as_default() as g2:
with tf.compat.v1.Session().as_default() as s2:
meta_graph_def = tf.compat.v1.saved_model.loader.load(
s2, ['graph_tag'], new_export_path)
signature = meta_graph_def.signature_def['graph_signature']
output = s2.run(
g2.get_tensor_by_name(signature.outputs['output'].name),
feed_dict={
g2.get_tensor_by_name(signature.inputs['input'].name):
test_input
})
self.assertEqual(expected_output, output)
if asset_file_contents is not None:
self.assertEqual(
1, len(g2.get_collection(tf.compat.v1.GraphKeys.ASSET_FILEPATHS)))
def test_stale_asset_collections_are_cleaned(self):
vocabulary_file = os.path.join(
tempfile.mkdtemp(dir=self.get_temp_dir()), 'asset')
file_io.write_string_to_file(vocabulary_file, 'foo bar baz')
input_specs = {'input': tf.TensorSpec([], dtype=tf.string)}
def preprocessing_fn(inputs):
initializer = tf.lookup.TextFileInitializer(
vocabulary_file,
key_dtype=tf.string,
key_index=tf.lookup.TextFileIndex.WHOLE_LINE,
value_dtype=tf.int64,
value_index=tf.lookup.TextFileIndex.LINE_NUMBER)
table = tf.lookup.StaticHashTable(initializer, default_value=12)
return {'output': table.lookup(inputs['input'])}
export_path = _create_test_saved_model(
False, input_specs, preprocessing_fn, base_dir=self.get_temp_dir())
# Load it and save it again repeatedly, verifying that the asset collections
# remain valid.
for it in [1, 2, 3]:
input_string = tf.constant('dog')
inputs = {'input': input_string}
saved_model_loader = saved_transform_io_v2.SavedModelLoader(export_path)
outputs = saved_model_loader.apply_transform_model(inputs)
self.assertEqual(12, outputs['output'])
new_export_path = os.path.join(
tempfile.mkdtemp(dir=self.get_temp_dir()), 'export_' + str(it))
tf.saved_model.save(saved_model_loader._imported, new_export_path)
shutil.rmtree(export_path)
export_path = new_export_path
def test_finalize(self):
input_keys = ['x']
output_keys = ['x_scaled']
input_specs = {
'x': tf.TensorSpec([
None,
], dtype=tf.float32),
'y': tf.TensorSpec([
None,
], dtype=tf.float32)
}
def preprocessing_fn(inputs):
output = (inputs['x'] - 2.0) / 5.0
return {'x_scaled': output, 'x_in': inputs['x'], 'y': inputs['y'] + 1}
export_path = _create_test_saved_model(
False, input_specs, preprocessing_fn, base_dir=self.get_temp_dir())
saved_model_loader = saved_transform_io_v2.SavedModelLoader(export_path)
input_features = {'x': tf.constant([1237.0])} # tf.float32
transformed_features = (
saved_model_loader.apply_transform_model(input_features))
self.assertCountEqual(['x_in', 'x_scaled'], list(transformed_features))
self.assertAllEqual(transformed_features['x_scaled'].numpy(), [247.0])
self.assertAllEqual(transformed_features['x_in'].numpy(), [1237.0])
# Since `finalize` is not thread-safe it is not recommended to call it after
# `apply_transform_model` has already been invoked. This is only for unit
# testing behavior differences.
saved_model_loader.finalize(input_keys, output_keys)
transformed_features = (
saved_model_loader.apply_transform_model(input_features))
self.assertEqual(['x_scaled'], list(transformed_features))
self.assertAllEqual(transformed_features['x_scaled'].numpy(), [247.0])
def test_optimize_concrete_function(self):
@tf.function(input_signature=[tf.TensorSpec([], dtype=tf.int64)])
def func(x):
_ = x + 1
z = x + 2
return z
concrete_function = func.get_concrete_function()
optimized_function = saved_transform_io_v2.optimize_concrete_function(
concrete_function)
self.assertLess(
len(optimized_function.graph.as_graph_def().node),
len(concrete_function.graph.as_graph_def().node))
if __name__ == '__main__':
test_case.main()
| 39.885768 | 80 | 0.690455 |
f34a673e78782f12f78dc42f99c83e93d9d3a39a | 2,060 | py | Python | tests/fractalmusic/test_fm_directions.py | alexgorji/musurgia | 81d37afbf1ac70348002a93299db228b5ed4a591 | [
"MIT"
] | null | null | null | tests/fractalmusic/test_fm_directions.py | alexgorji/musurgia | 81d37afbf1ac70348002a93299db228b5ed4a591 | [
"MIT"
] | 45 | 2020-02-24T19:37:00.000Z | 2021-04-06T16:13:56.000Z | tests/fractalmusic/test_fm_directions.py | alexgorji/musurgia | 81d37afbf1ac70348002a93299db228b5ed4a591 | [
"MIT"
] | null | null | null | from unittest import TestCase
from musurgia.fractaltree.fractalmusic import FractalMusic
import os
path = os.path.abspath(__file__).split(".")[0]
class Test(TestCase):
def setUp(self) -> None:
self.fm = FractalMusic(proportions=[1, 2, 3], tree_permutation_order=(3, 1, 2), duration=10)
def test_1(self):
self.fm.midi_generator.set_directions(1, -1, -1)
self.assertEqual([1, -1, -1], self.fm.midi_generator.directions)
self.assertEqual([-1, -1, 1], self.fm.tree_directions)
self.fm.add_layer()
# for node in self.fm.get_children():
# print(node.name)
# print(node.multi)
# print(node.permutation_order)
#
# text_path = path + '_test_1.txt'
#
# self.fm.write_infos(text_path)
directions = [leaf.midi_generator.directions for leaf in self.fm.traverse_leaves()]
self.assertEqual([[-1, -1, 1], [1, -1, -1], [-1, 1, -1]], directions)
def test_2(self):
self.fm.tree_directions = [1, 1, -1]
self.fm.midi_generator.midi_range = [60, 70]
self.fm.add_layer()
directions = [leaf.midi_generator.directions for leaf in self.fm.traverse_leaves()]
self.fm.add_layer()
midis = [leaf.midi_value for leaf in self.fm.traverse_leaves()]
self.assertEqual([[1, 1, -1], [-1, 1, 1], [1, -1, 1]], directions)
self.assertEqual([60.0, 63.0, 70.0, 63.0, 60.0, 61.0, 65.0, 70.0, 63.0], midis)
def test_3(self):
self.fm.tree_directions = [1, 1, -1]
self.fm.midi_generator.midi_range = [60, 70]
self.fm.permute_directions = False
self.fm.add_layer()
self.fm.add_layer()
midi_ranges = [[leaf.midi_generator.midi_range for leaf in child.get_leaves()] for child in
self.fm.get_children()]
result = [[[60.0, 63.0], [63.0, 70.0], [70.0, 60.0]], [[60.0, 62.0], [62.0, 63.0], [63.0, 61.0]],
[[63.0, 66.0], [66.0, 70.0], [70.0, 69.0]]]
self.assertEqual(result, midi_ranges)
| 39.615385 | 105 | 0.588835 |
3814a3d252b5b1ebfa5595280ad3300e3c1308e9 | 15,024 | py | Python | env/lib/python3.5/site-packages/pyasn1_modules/rfc6402.py | creekhead/RPI_google_asst | 65dc7b08bb8333f8977488f37f7d3ec652489a44 | [
"Apache-2.0"
] | 3 | 2019-04-10T20:36:57.000Z | 2019-04-11T04:10:09.000Z | env/lib/python3.5/site-packages/pyasn1_modules/rfc6402.py | creekhead/RPI_google_asst | 65dc7b08bb8333f8977488f37f7d3ec652489a44 | [
"Apache-2.0"
] | 11 | 2020-02-29T02:51:12.000Z | 2022-03-30T23:20:08.000Z | env/lib/python3.5/site-packages/pyasn1_modules/rfc6402.py | creekhead/RPI_google_asst | 65dc7b08bb8333f8977488f37f7d3ec652489a44 | [
"Apache-2.0"
] | 4 | 2020-05-04T18:53:09.000Z | 2021-09-17T15:02:19.000Z | # coding: utf-8
#
# This file is part of pyasn1-modules software.
#
# Created by Stanisław Pitucha with asn1ate tool.
# Copyright (c) 2005-2018, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pyasn1/license.html
#
# Certificate Management over CMS (CMC) Updates
#
# ASN.1 source from:
# http://www.ietf.org/rfc/rfc6402.txt
#
from pyasn1.type import char
from pyasn1.type import constraint
from pyasn1.type import namedtype
from pyasn1.type import namedval
from pyasn1.type import tag
from pyasn1.type import univ
from pyasn1.type import useful
from pyasn1_modules import rfc4211
from pyasn1_modules import rfc5280
from pyasn1_modules import rfc5652
MAX = float('inf')
def _buildOid(*components):
output = []
for x in tuple(components):
if isinstance(x, univ.ObjectIdentifier):
output.extend(list(x))
else:
output.append(int(x))
return univ.ObjectIdentifier(output)
class ChangeSubjectName(univ.Sequence):
pass
ChangeSubjectName.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('subject', rfc5280.Name()),
namedtype.OptionalNamedType('subjectAlt', rfc5280.GeneralNames())
)
class AttributeValue(univ.Any):
pass
class CMCStatus(univ.Integer):
pass
CMCStatus.namedValues = namedval.NamedValues(
('success', 0),
('failed', 2),
('pending', 3),
('noSupport', 4),
('confirmRequired', 5),
('popRequired', 6),
('partial', 7)
)
class PendInfo(univ.Sequence):
pass
PendInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('pendToken', univ.OctetString()),
namedtype.NamedType('pendTime', useful.GeneralizedTime())
)
bodyIdMax = univ.Integer(4294967295)
class BodyPartID(univ.Integer):
pass
BodyPartID.subtypeSpec = constraint.ValueRangeConstraint(0, bodyIdMax)
class BodyPartPath(univ.SequenceOf):
pass
BodyPartPath.componentType = BodyPartID()
BodyPartPath.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
class BodyPartReference(univ.Choice):
pass
BodyPartReference.componentType = namedtype.NamedTypes(
namedtype.NamedType('bodyPartID', BodyPartID()),
namedtype.NamedType('bodyPartPath', BodyPartPath())
)
class CMCFailInfo(univ.Integer):
pass
CMCFailInfo.namedValues = namedval.NamedValues(
('badAlg', 0),
('badMessageCheck', 1),
('badRequest', 2),
('badTime', 3),
('badCertId', 4),
('unsupportedExt', 5),
('mustArchiveKeys', 6),
('badIdentity', 7),
('popRequired', 8),
('popFailed', 9),
('noKeyReuse', 10),
('internalCAError', 11),
('tryLater', 12),
('authDataFail', 13)
)
class CMCStatusInfoV2(univ.Sequence):
pass
CMCStatusInfoV2.componentType = namedtype.NamedTypes(
namedtype.NamedType('cMCStatus', CMCStatus()),
namedtype.NamedType('bodyList', univ.SequenceOf(componentType=BodyPartReference())),
namedtype.OptionalNamedType('statusString', char.UTF8String()),
namedtype.OptionalNamedType(
'otherInfo', univ.Choice(
componentType=namedtype.NamedTypes(
namedtype.NamedType('failInfo', CMCFailInfo()),
namedtype.NamedType('pendInfo', PendInfo()),
namedtype.NamedType(
'extendedFailInfo', univ.Sequence(
componentType=namedtype.NamedTypes(
namedtype.NamedType('failInfoOID', univ.ObjectIdentifier()),
namedtype.NamedType('failInfoValue', AttributeValue()))
)
)
)
)
)
)
class GetCRL(univ.Sequence):
pass
GetCRL.componentType = namedtype.NamedTypes(
namedtype.NamedType('issuerName', rfc5280.Name()),
namedtype.OptionalNamedType('cRLName', rfc5280.GeneralName()),
namedtype.OptionalNamedType('time', useful.GeneralizedTime()),
namedtype.OptionalNamedType('reasons', rfc5280.ReasonFlags())
)
id_pkix = _buildOid(1, 3, 6, 1, 5, 5, 7)
id_cmc = _buildOid(id_pkix, 7)
id_cmc_batchResponses = _buildOid(id_cmc, 29)
id_cmc_popLinkWitness = _buildOid(id_cmc, 23)
class PopLinkWitnessV2(univ.Sequence):
pass
PopLinkWitnessV2.componentType = namedtype.NamedTypes(
namedtype.NamedType('keyGenAlgorithm', rfc5280.AlgorithmIdentifier()),
namedtype.NamedType('macAlgorithm', rfc5280.AlgorithmIdentifier()),
namedtype.NamedType('witness', univ.OctetString())
)
id_cmc_popLinkWitnessV2 = _buildOid(id_cmc, 33)
id_cmc_identityProofV2 = _buildOid(id_cmc, 34)
id_cmc_revokeRequest = _buildOid(id_cmc, 17)
id_cmc_recipientNonce = _buildOid(id_cmc, 7)
class ControlsProcessed(univ.Sequence):
pass
ControlsProcessed.componentType = namedtype.NamedTypes(
namedtype.NamedType('bodyList', univ.SequenceOf(componentType=BodyPartReference()))
)
class CertificationRequest(univ.Sequence):
pass
CertificationRequest.componentType = namedtype.NamedTypes(
namedtype.NamedType(
'certificationRequestInfo', univ.Sequence(
componentType=namedtype.NamedTypes(
namedtype.NamedType('version', univ.Integer()),
namedtype.NamedType('subject', rfc5280.Name()),
namedtype.NamedType(
'subjectPublicKeyInfo', univ.Sequence(
componentType=namedtype.NamedTypes(
namedtype.NamedType('algorithm', rfc5280.AlgorithmIdentifier()),
namedtype.NamedType('subjectPublicKey', univ.BitString())
)
)
),
namedtype.NamedType(
'attributes', univ.SetOf(
componentType=rfc5652.Attribute()).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))
)
)
)
),
namedtype.NamedType('signatureAlgorithm', rfc5280.AlgorithmIdentifier()),
namedtype.NamedType('signature', univ.BitString())
)
class TaggedCertificationRequest(univ.Sequence):
pass
TaggedCertificationRequest.componentType = namedtype.NamedTypes(
namedtype.NamedType('bodyPartID', BodyPartID()),
namedtype.NamedType('certificationRequest', CertificationRequest())
)
class TaggedRequest(univ.Choice):
pass
TaggedRequest.componentType = namedtype.NamedTypes(
namedtype.NamedType('tcr', TaggedCertificationRequest().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.NamedType('crm',
rfc4211.CertReqMsg().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('orm', univ.Sequence(componentType=namedtype.NamedTypes(
namedtype.NamedType('bodyPartID', BodyPartID()),
namedtype.NamedType('requestMessageType', univ.ObjectIdentifier()),
namedtype.NamedType('requestMessageValue', univ.Any())
))
.subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
)
id_cmc_popLinkRandom = _buildOid(id_cmc, 22)
id_cmc_statusInfo = _buildOid(id_cmc, 1)
id_cmc_trustedAnchors = _buildOid(id_cmc, 26)
id_cmc_transactionId = _buildOid(id_cmc, 5)
id_cmc_encryptedPOP = _buildOid(id_cmc, 9)
class PublishTrustAnchors(univ.Sequence):
pass
PublishTrustAnchors.componentType = namedtype.NamedTypes(
namedtype.NamedType('seqNumber', univ.Integer()),
namedtype.NamedType('hashAlgorithm', rfc5280.AlgorithmIdentifier()),
namedtype.NamedType('anchorHashes', univ.SequenceOf(componentType=univ.OctetString()))
)
class RevokeRequest(univ.Sequence):
pass
RevokeRequest.componentType = namedtype.NamedTypes(
namedtype.NamedType('issuerName', rfc5280.Name()),
namedtype.NamedType('serialNumber', univ.Integer()),
namedtype.NamedType('reason', rfc5280.CRLReason()),
namedtype.OptionalNamedType('invalidityDate', useful.GeneralizedTime()),
namedtype.OptionalNamedType('passphrase', univ.OctetString()),
namedtype.OptionalNamedType('comment', char.UTF8String())
)
id_cmc_senderNonce = _buildOid(id_cmc, 6)
id_cmc_authData = _buildOid(id_cmc, 27)
class TaggedContentInfo(univ.Sequence):
pass
TaggedContentInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('bodyPartID', BodyPartID()),
namedtype.NamedType('contentInfo', rfc5652.ContentInfo())
)
class IdentifyProofV2(univ.Sequence):
pass
IdentifyProofV2.componentType = namedtype.NamedTypes(
namedtype.NamedType('proofAlgID', rfc5280.AlgorithmIdentifier()),
namedtype.NamedType('macAlgId', rfc5280.AlgorithmIdentifier()),
namedtype.NamedType('witness', univ.OctetString())
)
class CMCPublicationInfo(univ.Sequence):
pass
CMCPublicationInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('hashAlg', rfc5280.AlgorithmIdentifier()),
namedtype.NamedType('certHashes', univ.SequenceOf(componentType=univ.OctetString())),
namedtype.NamedType('pubInfo', rfc4211.PKIPublicationInfo())
)
id_kp_cmcCA = _buildOid(rfc5280.id_kp, 27)
id_cmc_confirmCertAcceptance = _buildOid(id_cmc, 24)
id_cmc_raIdentityWitness = _buildOid(id_cmc, 35)
id_ExtensionReq = _buildOid(1, 2, 840, 113549, 1, 9, 14)
id_cct = _buildOid(id_pkix, 12)
id_cct_PKIData = _buildOid(id_cct, 2)
id_kp_cmcRA = _buildOid(rfc5280.id_kp, 28)
class CMCStatusInfo(univ.Sequence):
pass
CMCStatusInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('cMCStatus', CMCStatus()),
namedtype.NamedType('bodyList', univ.SequenceOf(componentType=BodyPartID())),
namedtype.OptionalNamedType('statusString', char.UTF8String()),
namedtype.OptionalNamedType(
'otherInfo', univ.Choice(
componentType=namedtype.NamedTypes(
namedtype.NamedType('failInfo', CMCFailInfo()),
namedtype.NamedType('pendInfo', PendInfo())
)
)
)
)
class DecryptedPOP(univ.Sequence):
pass
DecryptedPOP.componentType = namedtype.NamedTypes(
namedtype.NamedType('bodyPartID', BodyPartID()),
namedtype.NamedType('thePOPAlgID', rfc5280.AlgorithmIdentifier()),
namedtype.NamedType('thePOP', univ.OctetString())
)
id_cmc_addExtensions = _buildOid(id_cmc, 8)
id_cmc_modCertTemplate = _buildOid(id_cmc, 31)
class TaggedAttribute(univ.Sequence):
pass
TaggedAttribute.componentType = namedtype.NamedTypes(
namedtype.NamedType('bodyPartID', BodyPartID()),
namedtype.NamedType('attrType', univ.ObjectIdentifier()),
namedtype.NamedType('attrValues', univ.SetOf(componentType=AttributeValue()))
)
class OtherMsg(univ.Sequence):
pass
OtherMsg.componentType = namedtype.NamedTypes(
namedtype.NamedType('bodyPartID', BodyPartID()),
namedtype.NamedType('otherMsgType', univ.ObjectIdentifier()),
namedtype.NamedType('otherMsgValue', univ.Any())
)
class PKIData(univ.Sequence):
pass
PKIData.componentType = namedtype.NamedTypes(
namedtype.NamedType('controlSequence', univ.SequenceOf(componentType=TaggedAttribute())),
namedtype.NamedType('reqSequence', univ.SequenceOf(componentType=TaggedRequest())),
namedtype.NamedType('cmsSequence', univ.SequenceOf(componentType=TaggedContentInfo())),
namedtype.NamedType('otherMsgSequence', univ.SequenceOf(componentType=OtherMsg()))
)
class BodyPartList(univ.SequenceOf):
pass
BodyPartList.componentType = BodyPartID()
BodyPartList.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
id_cmc_responseBody = _buildOid(id_cmc, 37)
class AuthPublish(BodyPartID):
pass
class CMCUnsignedData(univ.Sequence):
pass
CMCUnsignedData.componentType = namedtype.NamedTypes(
namedtype.NamedType('bodyPartPath', BodyPartPath()),
namedtype.NamedType('identifier', univ.ObjectIdentifier()),
namedtype.NamedType('content', univ.Any())
)
class CMCCertId(rfc5652.IssuerAndSerialNumber):
pass
class PKIResponse(univ.Sequence):
pass
PKIResponse.componentType = namedtype.NamedTypes(
namedtype.NamedType('controlSequence', univ.SequenceOf(componentType=TaggedAttribute())),
namedtype.NamedType('cmsSequence', univ.SequenceOf(componentType=TaggedContentInfo())),
namedtype.NamedType('otherMsgSequence', univ.SequenceOf(componentType=OtherMsg()))
)
class ResponseBody(PKIResponse):
pass
id_cmc_statusInfoV2 = _buildOid(id_cmc, 25)
id_cmc_lraPOPWitness = _buildOid(id_cmc, 11)
class ModCertTemplate(univ.Sequence):
pass
ModCertTemplate.componentType = namedtype.NamedTypes(
namedtype.NamedType('pkiDataReference', BodyPartPath()),
namedtype.NamedType('certReferences', BodyPartList()),
namedtype.DefaultedNamedType('replace', univ.Boolean().subtype(value=1)),
namedtype.NamedType('certTemplate', rfc4211.CertTemplate())
)
id_cmc_regInfo = _buildOid(id_cmc, 18)
id_cmc_identityProof = _buildOid(id_cmc, 3)
class ExtensionReq(univ.SequenceOf):
pass
ExtensionReq.componentType = rfc5280.Extension()
ExtensionReq.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
id_kp_cmcArchive = _buildOid(rfc5280.id_kp, 28)
id_cmc_publishCert = _buildOid(id_cmc, 30)
id_cmc_dataReturn = _buildOid(id_cmc, 4)
class LraPopWitness(univ.Sequence):
pass
LraPopWitness.componentType = namedtype.NamedTypes(
namedtype.NamedType('pkiDataBodyid', BodyPartID()),
namedtype.NamedType('bodyIds', univ.SequenceOf(componentType=BodyPartID()))
)
id_aa = _buildOid(1, 2, 840, 113549, 1, 9, 16, 2)
id_aa_cmc_unsignedData = _buildOid(id_aa, 34)
id_cmc_getCert = _buildOid(id_cmc, 15)
id_cmc_batchRequests = _buildOid(id_cmc, 28)
id_cmc_decryptedPOP = _buildOid(id_cmc, 10)
id_cmc_responseInfo = _buildOid(id_cmc, 19)
id_cmc_changeSubjectName = _buildOid(id_cmc, 36)
class GetCert(univ.Sequence):
pass
GetCert.componentType = namedtype.NamedTypes(
namedtype.NamedType('issuerName', rfc5280.GeneralName()),
namedtype.NamedType('serialNumber', univ.Integer())
)
id_cmc_identification = _buildOid(id_cmc, 2)
id_cmc_queryPending = _buildOid(id_cmc, 21)
class AddExtensions(univ.Sequence):
pass
AddExtensions.componentType = namedtype.NamedTypes(
namedtype.NamedType('pkiDataReference', BodyPartID()),
namedtype.NamedType('certReferences', univ.SequenceOf(componentType=BodyPartID())),
namedtype.NamedType('extensions', univ.SequenceOf(componentType=rfc5280.Extension()))
)
class EncryptedPOP(univ.Sequence):
pass
EncryptedPOP.componentType = namedtype.NamedTypes(
namedtype.NamedType('request', TaggedRequest()),
namedtype.NamedType('cms', rfc5652.ContentInfo()),
namedtype.NamedType('thePOPAlgID', rfc5280.AlgorithmIdentifier()),
namedtype.NamedType('witnessAlgID', rfc5280.AlgorithmIdentifier()),
namedtype.NamedType('witness', univ.OctetString())
)
id_cmc_getCRL = _buildOid(id_cmc, 16)
id_cct_PKIResponse = _buildOid(id_cct, 3)
id_cmc_controlProcessed = _buildOid(id_cmc, 32)
class NoSignatureValue(univ.OctetString):
pass
id_ad_cmc = _buildOid(rfc5280.id_ad, 12)
id_alg_noSignature = _buildOid(id_pkix, 6, 2)
| 26.450704 | 120 | 0.721379 |
8706641e074997f38796f38414de17a58ba0c4f7 | 10,429 | py | Python | app/editor/level_editor/region_painter_menu.py | zerorock1312/lt-maker-master | 82f733683f9dba763a5de8567c41fd7cbcfb0173 | [
"MIT"
] | null | null | null | app/editor/level_editor/region_painter_menu.py | zerorock1312/lt-maker-master | 82f733683f9dba763a5de8567c41fd7cbcfb0173 | [
"MIT"
] | null | null | null | app/editor/level_editor/region_painter_menu.py | zerorock1312/lt-maker-master | 82f733683f9dba763a5de8567c41fd7cbcfb0173 | [
"MIT"
] | null | null | null | from PyQt5.QtWidgets import QPushButton, QLineEdit, \
QWidget, QVBoxLayout, QMessageBox, QCheckBox
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QIcon, QColor, QPixmap
from app.data.database import DB
from app.utilities import utils, str_utils
from app.utilities.data import Data
from app.extensions.custom_gui import PropertyBox, PropertyCheckBox, ComboBox, RightClickListView
from app.editor.base_database_gui import DragDropCollectionModel
from app.editor.custom_widgets import SkillBox
from app.events import regions
from app.editor import timer
class RegionMenu(QWidget):
def __init__(self, state_manager, map_view):
super().__init__()
self.state_manager = state_manager
self.map_view = map_view
self.current_level = DB.levels.get(
self.state_manager.state.selected_level)
if self.current_level:
self._data = self.current_level.regions
else:
self._data = Data()
grid = QVBoxLayout()
self.setLayout(grid)
def duplicate_func(model, index):
return False
self.view = RightClickListView(
(None, None, None), parent=self)
self.view.currentChanged = self.on_item_changed
self.model = RegionModel(self._data, self)
self.view.setModel(self.model)
grid.addWidget(self.view)
self.create_button = QPushButton("Create Region...")
self.create_button.clicked.connect(self.create_region)
grid.addWidget(self.create_button)
self.modify_region_widget = ModifyRegionWidget(self._data, self)
grid.addWidget(self.modify_region_widget)
if not len(self._data):
self.modify_region_widget.setEnabled(False)
self.last_touched_region = None
self.display = self.modify_region_widget
self.state_manager.subscribe_to_key(
RegionMenu.__name__, 'selected_level', self.set_current_level)
self.state_manager.subscribe_to_key(
RegionMenu.__name__, 'ui_refresh_signal', self._refresh_view)
timer.get_timer().tick_elapsed.connect(self.tick)
def tick(self):
status_box = self.modify_region_widget.status_box
status_box.model.layoutChanged.emit()
def _refresh_view(self, _=None):
self.model.layoutChanged.emit()
def update_list(self):
self.state_manager.change_and_broadcast('ui_refresh_signal', None)
def set_current_level(self, level_nid):
level = DB.levels.get(level_nid)
self.current_level = level
self._data = self.current_level.regions
self.model._data = self._data
self.model.update()
self.modify_region_widget._data = self._data
if len(self._data):
self.modify_region_widget.setEnabled(True)
reg = self._data[0]
if reg.position:
self.map_view.center_on_pos(reg.center)
self.modify_region_widget.set_current(reg)
else:
self.modify_region_widget.setEnabled(False)
def select(self, idx):
index = self.model.index(idx)
self.view.setCurrentIndex(index)
def deselect(self):
self.view.clearSelection()
def on_item_changed(self, curr, prev):
if self._data:
reg = self._data[curr.row()]
if reg.position:
self.map_view.center_on_pos(reg.center)
self.modify_region_widget.set_current(reg)
def get_current(self):
for index in self.view.selectionModel().selectedIndexes():
idx = index.row()
if len(self._data) > 0 and idx < len(self._data):
return self._data[idx]
return None
def create_region(self, example=None):
nid = str_utils.get_next_name('New Region', self._data.keys())
created_region = regions.Region(nid)
self._data.append(created_region)
self.modify_region_widget.setEnabled(True)
self.model.update()
# Select the region
idx = self._data.index(created_region.nid)
index = self.model.index(idx)
self.view.setCurrentIndex(index)
self.state_manager.change_and_broadcast('ui_refresh_signal', None)
return created_region
class RegionModel(DragDropCollectionModel):
def data(self, index, role):
if not index.isValid():
return None
if role == Qt.DisplayRole:
reg = self._data[index.row()]
text = reg.nid + ': ' + reg.region_type
if reg.region_type == 'status':
if reg.sub_nid:
text += ' ' + reg.sub_nid
elif reg.region_type == 'event':
if reg.sub_nid:
text += ' ' + reg.sub_nid
if reg.condition:
text += '\n' + reg.condition
return text
elif role == Qt.DecorationRole:
reg = self._data[index.row()]
color = utils.hash_to_color(utils.strhash(reg.nid))
pixmap = QPixmap(32, 32)
pixmap.fill(QColor(*color))
return QIcon(pixmap)
return None
def new(self, idx):
ok = self.window.create_region()
if ok:
self._data.move_index(len(self._data) - 1, idx + 1)
self.layoutChanged.emit()
def duplicate(self, idx):
view = self.window.view
obj = self._data[idx]
new_nid = str_utils.get_next_name(obj.nid, self._data.keys())
serialized_obj = obj.save()
new_obj = regions.Region.restore(serialized_obj)
new_obj.nid = new_nid
self._data.insert(idx + 1, new_obj)
self.layoutChanged.emit()
new_index = self.index(idx + 1)
view.setCurrentIndex(new_index)
return new_index
class ModifyRegionWidget(QWidget):
def __init__(self, data, parent=None, current=None):
super().__init__(parent)
self.window = parent
self._data = data
layout = QVBoxLayout()
self.setLayout(layout)
self.current = current
self.nid_box = PropertyBox("Unique ID", QLineEdit, self)
self.nid_box.edit.textChanged.connect(self.nid_changed)
self.nid_box.edit.editingFinished.connect(self.nid_done_editing)
layout.addWidget(self.nid_box)
self.region_type_box = PropertyBox("Region Type", ComboBox, self)
self.region_type_box.edit.addItems(regions.region_types)
# self.region_type_box.edit.setValue(self.current.region_type)
self.region_type_box.edit.currentIndexChanged.connect(
self.region_type_changed)
layout.addWidget(self.region_type_box)
self.sub_nid_box = PropertyBox("Event Name", QLineEdit, self)
# if self.current.sub_nid and self.current.region_type == 'Event':
# self.sub_nid_box.edit.setText(self.current.sub_nid)
self.sub_nid_box.edit.textChanged.connect(self.sub_nid_changed)
layout.addWidget(self.sub_nid_box)
self.condition_box = PropertyBox("Condition", QLineEdit, self)
# self.condition_box.edit.setText(self.current.condition)
self.condition_box.edit.textChanged.connect(self.condition_changed)
layout.addWidget(self.condition_box)
self.only_once_box = PropertyCheckBox("Only once?", QCheckBox, self)
self.only_once_box.edit.stateChanged.connect(self.only_once_changed)
layout.addWidget(self.only_once_box)
self.status_box = SkillBox(self)
# if self.current.sub_nid and self.current.region_type == 'Status':
# self.status_box.edit.setText(self.current.sub_nid)
self.status_box.edit.currentIndexChanged.connect(self.status_changed)
layout.addWidget(self.status_box)
self.sub_nid_box.hide()
self.condition_box.hide()
self.only_once_box.hide()
self.status_box.hide()
def nid_changed(self, text):
if self.current:
self.current.nid = text
self.window.update_list()
def nid_done_editing(self):
if not self.current:
return
# Check validity of nid!
other_nids = [d.nid for d in self._data.values()
if d is not self.current]
if self.current.nid in other_nids:
QMessageBox.warning(self.window, 'Warning',
'Region ID %s already in use' % self.current.nid)
self.current.nid = str_utils.get_next_name(
self.current.nid, other_nids)
self._data.update_nid(self.current, self.current.nid)
self.window.update_list()
def region_type_changed(self, index):
if not self.current:
return
self.current.region_type = self.region_type_box.edit.currentText().lower()
if self.current.region_type in ('normal', 'formation'):
self.sub_nid_box.hide()
self.condition_box.hide()
self.only_once_box.hide()
self.status_box.hide()
elif self.current.region_type == 'status':
self.sub_nid_box.hide()
self.condition_box.hide()
self.only_once_box.hide()
self.status_box.show()
elif self.current.region_type == 'event':
self.sub_nid_box.show()
self.condition_box.show()
self.only_once_box.show()
self.status_box.hide()
def sub_nid_changed(self, text):
self.current.sub_nid = text
self.window.update_list()
def condition_changed(self, text):
self.current.condition = text
self.window.update_list()
def only_once_changed(self, state):
self.current.only_once = bool(state)
def status_changed(self, index):
self.current.sub_nid = self.status_box.edit.currentText()
self.window.update_list()
def set_current(self, current):
self.current = current
self.nid_box.edit.setText(current.nid)
self.region_type_box.edit.setValue(current.region_type)
self.condition_box.edit.setText(current.condition)
self.only_once_box.edit.setChecked(bool(current.only_once))
if current.region_type == 'status':
self.status_box.edit.setValue(current.sub_nid)
elif current.region_type == 'event':
self.sub_nid_box.edit.setText(current.sub_nid)
else:
self.sub_nid_box.edit.setText('')
| 36.721831 | 97 | 0.641097 |
cee0523c9503447b318b408b8c5d2a9bd496ce4b | 770 | py | Python | items/models.py | stanwood/traidoo-api | 83e8599f2eb54352988bac27e2d4acd30734816d | [
"MIT"
] | 3 | 2020-05-05T12:12:09.000Z | 2020-05-08T08:48:16.000Z | items/models.py | stanwood/traidoo-api | 83e8599f2eb54352988bac27e2d4acd30734816d | [
"MIT"
] | 160 | 2020-05-19T13:03:43.000Z | 2022-03-12T00:35:28.000Z | items/models.py | stanwood/traidoo-api | 83e8599f2eb54352988bac27e2d4acd30734816d | [
"MIT"
] | null | null | null | from django.db import models
from django.utils.translation import gettext_lazy as _
from core.db.base import BaseAbstractModel
from products.models import Product
class Item(BaseAbstractModel):
product = models.ForeignKey(
Product,
related_name="items",
on_delete=models.CASCADE,
verbose_name=_("Product"),
)
latest_delivery_date = models.DateField(verbose_name=_("Latest delivery date"))
quantity = models.PositiveIntegerField(verbose_name=_("Quantity"))
valid_from = models.DateField(
null=True, blank=True, verbose_name=_("Date valid from")
)
class Meta:
unique_together = (("product", "latest_delivery_date"),)
verbose_name = _("Item")
verbose_name_plural = _("Items")
| 29.615385 | 83 | 0.696104 |
00468c18085a1f68c5f754c8b5206e7597379500 | 1,458 | py | Python | liionpack/protocols.py | Saransh-cpp/liionpack | 82ab00ad257ccb2bc8dbcb71bc08baa30fa9ed43 | [
"MIT"
] | 23 | 2021-09-28T15:48:48.000Z | 2022-03-15T10:34:35.000Z | liionpack/protocols.py | Saransh-cpp/liionpack | 82ab00ad257ccb2bc8dbcb71bc08baa30fa9ed43 | [
"MIT"
] | 131 | 2021-09-29T09:18:50.000Z | 2022-03-03T06:09:52.000Z | liionpack/protocols.py | Saransh-cpp/liionpack | 82ab00ad257ccb2bc8dbcb71bc08baa30fa9ed43 | [
"MIT"
] | 17 | 2021-09-29T13:14:00.000Z | 2022-03-24T11:01:19.000Z | #
# Experimental protocol
#
import numpy as np
def generate_protocol_from_experiment(experiment, flatten=True):
"""
Args:
experiment (pybamm.Experiment):
The experiment to generate the protocol from.
flatten (bool):
Default is True: return all steps in one list otherwise return a
list of lists for each operating command.
Returns:
list:
a sequence of terminal currents to apply at each timestep
"""
protocol = []
for i, op in enumerate(experiment.operating_conditions):
proto = []
t = op["time"]
dt = op["period"]
if t % dt != 0:
raise ValueError("Time must be an integer multiple of the period")
I, typ = op["electric"]
if typ != "A":
raise ValueError("Only constant current operations are supported")
if I.__class__ is str:
# drive cycle
dc_data = op["dc_data"]
proto.extend(dc_data[:, 1].tolist())
elif I.__class__ is np.ndarray:
# drive cycle old
proto.extend(I[:, 1].tolist())
else:
proto.extend([I] * int(t / dt))
if i == 0:
# Include initial state when not a drive cycle for first op
proto = [proto[0]] + proto
if flatten:
protocol.extend(proto)
else:
protocol.append(proto)
return protocol
| 28.588235 | 78 | 0.553498 |
22b3113470984777d0625ed95f14518e1241345f | 9,902 | py | Python | chemreac/symbolic.py | bjodah/chemreac | dbe38a10cf6b88e66192bcc998721b61aabbd9dc | [
"BSD-2-Clause"
] | 14 | 2015-03-11T21:46:15.000Z | 2020-06-06T16:01:38.000Z | chemreac/symbolic.py | bjodah/chemreac | dbe38a10cf6b88e66192bcc998721b61aabbd9dc | [
"BSD-2-Clause"
] | 20 | 2015-01-21T16:11:36.000Z | 2020-01-06T10:30:46.000Z | chemreac/symbolic.py | chemreac/chemreac | dbe38a10cf6b88e66192bcc998721b61aabbd9dc | [
"BSD-2-Clause"
] | 3 | 2015-08-13T12:06:17.000Z | 2021-12-17T01:12:20.000Z | # -*- coding: utf-8 -*-
"""
This module is used for verification of mathematical correctness in the
implementation (used by the test suite). It uses SymPy to to derivations
symbolically. Its performance is therefore too poor for use in integration
of large systems in general.
"""
from __future__ import print_function, division, absolute_import
from functools import reduce
import inspect
from itertools import product
from math import exp
from operator import add
import os
import sympy as sp
from .core import ReactionDiffusionBase
from .util.grid import padded_centers, pxci_to_bi, stencil_pxci_lbounds
FLAT, CYLINDRICAL, SPHERICAL = 'f', 'c', 's'
class SymRD(ReactionDiffusionBase):
@classmethod
def from_rd(cls, rd, **kwargs):
return cls(*tuple(kwargs.get(attr, getattr(rd, attr)) for attr in
inspect.getargspec(cls.__init__).args[1:]))
def expb(self, x):
if self.use_log2:
return 2**x
else:
return exp(x)
def __init__(self, n, stoich_active, stoich_prod, k, N=0, D=None,
z_chg=None, mobility=None, x=None, stoich_inact=None,
geom=FLAT, logy=False, logt=False, logx=False, nstencil=None,
lrefl=True, rrefl=True, auto_efield=False,
surf_chg=(0.0, 0.0), eps_rel=1.0, g_values=None,
g_value_parents=None, fields=None, modulated_rxns=None,
modulation=None, n_jac_diags=-1, use_log2=False, **kwargs):
# Save args
self.n = n
self.stoich_active = stoich_active
self.stoich_prod = stoich_prod
self.k = k
self.D = D if D is not None else [0]*n
self.z_chg = z_chg if z_chg is not None else [0]*n
self.mobility = mobility if mobility is not None else [0]*n
self.x = x if x is not None else [0, 1]
self.N = len(self.x) - 1
if N not in [None, 0]:
assert self.N == N
self.stoich_inact = stoich_inact or [[]*len(stoich_active)]
self.geom = geom
self.logy = logy
self.logt = logt
self.logx = logx
self.nstencil = nstencil or 3
self.lrefl = lrefl
self.rrefl = rrefl
self.auto_efield = auto_efield
self.surf_chg = surf_chg
self.eps_rel = eps_rel
self.g_values = g_values or []
self.g_value_parents = g_value_parents or []
self.fields = [] if fields is None else fields
self.modulated_rxns = [] if modulated_rxns is None else modulated_rxns
self.modulation = [] if modulation is None else modulation
_minus_one = -1
self.n_jac_diags = (int(os.environ.get('CHEMREAC_N_JAC_DIAGS', 1)) if
n_jac_diags is _minus_one else n_jac_diags)
self.use_log2 = use_log2
if kwargs:
raise KeyError("Don't know what to do with:", kwargs)
# Set attributes used later
self._t = sp.Symbol('t')
self._nsidep = (self.nstencil-1) // 2
if self.n_jac_diags == 0:
self.n_jac_diags = self._nsidep
self._y = sp.symbols('y:'+str(self.n*self.N))
self._xc = padded_centers(self.x, self._nsidep)
self._lb = stencil_pxci_lbounds(self.nstencil, self.N,
self.lrefl, self.rrefl)
self._xc_bi_map = pxci_to_bi(self.nstencil, self.N)
self._f = [0]*self.n*self.N
self.efield = [0]*self.N
# Reactions
for ri, (k, sactv, sinact, sprod) in enumerate(zip(
self.k, self.stoich_active, self.stoich_inact,
self.stoich_prod)):
c_actv = map(sactv.count, range(self.n))
c_inact = map(sinact.count, range(self.n))
c_prod = map(sprod.count, range(self.n))
c_totl = [nprd - nactv - ninact for nactv, ninact, nprd in zip(
c_actv, c_inact, c_prod)]
for bi in range(self.N):
r = k
if ri in self.modulated_rxns:
r *= self.modulation[self.modulated_rxns.index(ri)][bi]
for si in sactv:
r *= self.y(bi, si)
for si in range(self.n):
self._f[bi*self.n + si] += c_totl[si]*r
for fi, fld in enumerate(self.fields):
for bi in range(self.N):
if self.g_value_parents[fi] == -1:
gfact = 1
else:
gfact = self.y(bi, self.g_value_parents[fi])
for si in range(self.n):
self._f[bi*self.n + si] += sp.S(
fld[bi])*self.g_values[fi][si]*gfact
if self.N > 1:
# Diffusion
self.D_wghts = []
self.A_wghts = []
for bi in range(self.N):
local_x_serie = self._xc[
self._lb[bi]:self._lb[bi]+self.nstencil]
l_x_rnd = self._xc[bi+self._nsidep]
w = sp.finite_diff_weights(2, local_x_serie, l_x_rnd)
self.D_wghts.append(w[-1][-1])
self.A_wghts.append(w[-2][-1])
for wi in range(self.nstencil):
if self.logx:
if geom == FLAT:
self.D_wghts[bi][wi] -= w[-2][-1][wi]
elif geom == CYLINDRICAL:
self.A_wghts[bi][wi] += w[-3][-1][wi]
elif geom == SPHERICAL:
self.D_wghts[bi][wi] += w[-2][-1][wi]
self.A_wghts[bi][wi] += 2*w[-3][-1][wi]
else:
raise ValueError("Unknown geom: %s" % geom)
self.D_wghts[bi][wi] *= self.expb(-2*l_x_rnd)
self.A_wghts[bi][wi] *= self.expb(-l_x_rnd)
else:
if geom == FLAT:
pass
elif geom == CYLINDRICAL:
self.D_wghts[bi][wi] += w[-2][-1][wi]/l_x_rnd
self.A_wghts[bi][wi] += w[-3][-1][wi]/l_x_rnd
elif geom == SPHERICAL:
self.D_wghts[bi][wi] += 2*w[-2][-1][wi]/l_x_rnd
self.A_wghts[bi][wi] += 2*w[-3][-1][wi]/l_x_rnd
else:
raise ValueError("Unknown geom: %s" % geom)
for bi, (dw, aw) in enumerate(zip(self.D_wghts, self.A_wghts)):
for si in range(self.n):
d_terms = [dw[k]*self.y(
self._xc_bi_map[self._lb[bi]+k], si
) for k in range(self.nstencil)]
self._f[bi*self.n + si] += self.D[si]*reduce(
add, d_terms)
a_terms = [aw[k]*self.y(
self._xc_bi_map[self._lb[bi]+k], si
) for k in range(self.nstencil)]
self._f[bi*self.n + si] += (
self.mobility[si]*self.efield[bi]*reduce(
add, a_terms))
if self.logy or self.logt:
logbfactor = sp.log(2) if use_log2 else 1
for bi in range(self.N):
for si in range(self.n):
if self.logy:
self._f[bi*self.n+si] /= self.y(bi, si)
if not self.logt:
self._f[bi*self.n+si] /= logbfactor
if self.logt:
self._f[bi*self.n+si] *= (2**self._t) if self.use_log2 else sp.exp(self._t)
if not self.logy:
self._f[bi*self.n+si] *= logbfactor
def y(self, bi, si):
if self.logy:
return (2**self._y[bi*self.n+si]) if self.use_log2 else sp.exp(self._y[bi*self.n+si])
else:
return self._y[bi*self.n+si]
def f(self, t, y, fout):
subsd = dict(zip(self._y, y))
subsd[self._t] = t
fout[:] = [expr.xreplace(subsd) for expr in self._f]
@property
def jacobian(self):
try:
return self._jacobian
except AttributeError:
fmat = sp.Matrix(1, self.n*self.N, lambda q, i: self._f[i])
self._jacobian = fmat.jacobian(self._y)
return self._jacobian
def dense_jac(self, t, y, Jout):
subsd = dict(zip(self._y, y))
subsd[self._t] = t
for ri, row in enumerate(self.jacobian.tolist()):
for ci in range(max(0,
ri - self.n*self.n_jac_diags),
min(self.n*self.N,
ri + self.n*self.n_jac_diags + 1)):
Jout[ri, ci] = row[ci].xreplace(subsd)
# Jout[:, :] = [[expr for expr in row]
# for row in ]
dense_jac_rmaj = dense_jac_cmaj = dense_jac
def banded_jac(self, t, y, Jout):
from sym.util import banded_jacobian
jac = banded_jacobian(self._f, self._y,
self.n*self.n_jac_diags,
self.n*self.n_jac_diags)
width = 2*self.n*self.n_jac_diags + 1
if Jout.shape[0] == width:
pad = 0
elif Jout.shape[0] == 3*self.n*self.n_jac_diags + 1:
pad = self.n*self.n_jac_diags
else:
raise ValueError("Ill-shaped Jout")
subsd = dict(zip(self._y, y))
for ri, ci in product(range(width), range(self.n*self.N)):
Jout[ri + pad, ci] = sp.S(jac[ri, ci]).xreplace(subsd)
def compressed_jac(self, t, y, Jout):
from block_diag_ilu import get_compressed
J = self.alloc_jout(banded=False)
self.dense_jac(t, y, J)
Jout[:] = get_compressed(J, self.N, self.n, self.n_jac_diags)
| 41.087137 | 99 | 0.507574 |
cb777c8b43456298154441d19ab8645ee8594817 | 512 | py | Python | data-process/orchestration/airflow/src/dags/test_dag.py | dimastatz/video-streaming-analytics | 09ff799049920f81c6766a87286948c3eb09f65d | [
"MIT"
] | null | null | null | data-process/orchestration/airflow/src/dags/test_dag.py | dimastatz/video-streaming-analytics | 09ff799049920f81c6766a87286948c3eb09f65d | [
"MIT"
] | null | null | null | data-process/orchestration/airflow/src/dags/test_dag.py | dimastatz/video-streaming-analytics | 09ff799049920f81c6766a87286948c3eb09f65d | [
"MIT"
] | 1 | 2022-02-04T10:20:10.000Z | 2022-02-04T10:20:10.000Z | from airflow import DAG
from datetime import datetime
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator
def print_hello():
return 'Hello world from first Airflow DAG!'
dag = DAG('hello_world', description='Hello World DAG',
schedule_interval='0 12 * * *',
start_date=datetime(2017, 3, 20), catchup=False)
hello_operator = PythonOperator(task_id='hello_task', python_callable=print_hello, dag=dag)
hello_operator | 28.444444 | 91 | 0.765625 |
b0341733da7a53a516b8ea90b336c172851fca45 | 2,525 | py | Python | online_pharmacy/customer/migrations/0001_initial.py | geekyJock8/online_pharmacy | 892852857786ec17259b71f2a178896cd6d12e60 | [
"Apache-2.0"
] | 5 | 2020-09-09T13:59:17.000Z | 2021-09-30T07:20:55.000Z | online_pharmacy/customer/migrations/0001_initial.py | geekyJock8/online_pharmacy | 892852857786ec17259b71f2a178896cd6d12e60 | [
"Apache-2.0"
] | 10 | 2017-09-03T06:13:31.000Z | 2017-10-10T15:22:30.000Z | online_pharmacy/customer/migrations/0001_initial.py | geekyJock8/Online-Pharmacy | 892852857786ec17259b71f2a178896cd6d12e60 | [
"Apache-2.0"
] | 9 | 2017-09-03T04:59:18.000Z | 2019-10-17T11:33:18.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-25 14:10
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='address_list',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address_street', models.TextField()),
('address_city', models.CharField(max_length=20)),
('address_state', models.CharField(max_length=20)),
('address_pincode', models.IntegerField()),
],
),
migrations.CreateModel(
name='contact_customer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('contact_no', models.IntegerField()),
],
),
migrations.CreateModel(
name='customer',
fields=[
('username', models.CharField(max_length=20, primary_key=True, serialize=False)),
('password', models.CharField(max_length=20)),
('fname', models.CharField(max_length=100)),
('lname', models.CharField(max_length=100)),
('age', models.IntegerField()),
('email', models.EmailField(max_length=254)),
('cart_id', models.IntegerField()),
],
),
migrations.CreateModel(
name='notifications',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time_of_arrival', models.DateTimeField(auto_now_add=True)),
('content', models.TextField()),
('username', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='customer.customer')),
],
),
migrations.AddField(
model_name='contact_customer',
name='username',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='customer.customer'),
),
migrations.AddField(
model_name='address_list',
name='username',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='customer.customer'),
),
]
| 38.257576 | 117 | 0.569505 |
12c3ee719c86f387ec2e75b8b36fc1796f340947 | 957 | py | Python | stock/base_io/mysql_helper.py | WZYStudio/QuantJob | a3716533b054f923a5249b6902cc4a1ae031910b | [
"BSD-2-Clause"
] | null | null | null | stock/base_io/mysql_helper.py | WZYStudio/QuantJob | a3716533b054f923a5249b6902cc4a1ae031910b | [
"BSD-2-Clause"
] | null | null | null | stock/base_io/mysql_helper.py | WZYStudio/QuantJob | a3716533b054f923a5249b6902cc4a1ae031910b | [
"BSD-2-Clause"
] | null | null | null | from sqlalchemy import create_engine
from sqlalchemy_utils import database_exists, create_database
from sqlalchemy.orm import sessionmaker
_mysql_engine = None
_mysql_session = None
def get_db_url(db_name):
return "mysql+pymysql://wzy:123@192.168.50.100/" + db_name + "?charset=utf8"
def get_db_engine_by_name(db_name, echo=True):
url = get_db_url(db_name)
global _mysql_engine
if not _mysql_engine:
_mysql_engine = create_engine(url, echo=echo)
if not database_exists(url):
create_database(url)
# 非常寸、带横杠的数据库名,要用反引号 引上才可以用, 否则sql报错
use_db_dialect = 'USE ' + "`" + db_name + "`"
_mysql_engine.execute(use_db_dialect)
return _mysql_engine
def get_db_session_by_name(db_name):
global _mysql_session
if _mysql_session is None:
get_db_engine_by_name(db_name)
session_cls = sessionmaker(bind=_mysql_engine)
_mysql_session = session_cls()
return _mysql_session
| 23.925 | 80 | 0.730408 |
b1683235ce3eed40d85e1af45e98887c5ebc3ee3 | 6,506 | py | Python | calc/EPIC2Datetime.py | NOAA-PMEL/EcoFOCI_eofs | 18e5f1b3e2b76d5427a6227b266ae85c91a73289 | [
"MIT"
] | 1 | 2018-06-14T19:48:13.000Z | 2018-06-14T19:48:13.000Z | calc/EPIC2Datetime.py | NOAA-PMEL/EcoFOCI_eofs | 18e5f1b3e2b76d5427a6227b266ae85c91a73289 | [
"MIT"
] | 32 | 2020-08-28T03:24:00.000Z | 2022-03-28T14:15:11.000Z | calc/EPIC2Datetime.py | NOAA-PMEL/EcoFOCI_eofs | 18e5f1b3e2b76d5427a6227b266ae85c91a73289 | [
"MIT"
] | null | null | null | # filename: EPIC2Datetime.py
r'''Module to convert PMEL-EPIC timeword to a python datetime
Modifications
-------------
2018-6-12 : SBELL - python3 print statements
2016-11-14: SBELL - create routine to add datetime offset
'''
import datetime
from netCDF4 import date2num
__author__ = 'Shaun Bell'
__email__ = 'shaun.bell@noaa.gov'
__created__ = datetime.datetime(2016, 7, 21)
__modified__ = datetime.datetime(2016, 7, 21)
__version__ = "0.1.0"
__status__ = "Development"
def EPIC2Datetime( timeword_1, timeword_2):
r'''
PMEL-EPIC time stored in NetCDF files is defined as two timewords: time, time2.
The first (time) represents "True Julian Day",
the second (time2) is "msec since 0:00 GMT"
This allowed for integer representation of millisecond resolution
(without fear of floating point rounding issues) but is rarely encountered
in data that his one minute or sparser resolution.
The usage of two timewords for dimensions is not advised
(example above, time2 is a periodic value not well-suited for a dimension)
It is not supported by current CF convention which specifies using numeric values to represent:
"seconds since 2001-1-1 0:0:0" or "days since 2001-1-1 0:0:0" or "{units} since {initial date}"
as there is not reference date explicitly stated nor is there a calendar mentioned.
This routine will convert an EPIC two-time word value to a python datetime in order to ease
the usage with NetCDF date2num utility (for CF and COARDS conventions)
Parameters
----------
timeword_1 : array_like
first EPIC timeword (time)
timeword_2 : array-like
second EPIC timeword (time2)
Returns
-------
Outputs : array_like
Python datetime structure representing the EPIC datetime
Notes
-----
TODO
Examples
--------
TODO
References
----------
ftp://ftp.unidata.ucar.edu/pub/netcdf/Conventions/PMEL-EPIC/Conventions
As long as this document survives. It defines the conventions of the PMEL-EPIC standard.
A misprint exists that specifies a reference date of 1968-05-23 as 2400000 when it should be 2440000
http://aa.usno.navy.mil/data/docs/JulianDate.php
Converts the "true julian date" and confirms the misprint in the documentation.
(Note: the US Naval site above uses seconds since 12:00 UTC whereas EPIC uses seconds since 00:00 UTC)
'''
#We can used the defined date from the conventions
#1968-05-23 => 2440000
#4713-01-01 BCE => 0 (be aware that this uses the julian calendar not the gregorian or mixed
# and may result in a 10 day error if the calendar is not appropriately identified)
#
#Using a more modern reference date skips this problem and is sufficient if the dates of all data
# are after 1582.
ref_time_dt = datetime.datetime(1968, 5, 23)
ref_time_epic = 2440000
delta_days = [int(x) - ref_time_epic for x in timeword_1]
delta_seconds = [int(x)/1000 for x in timeword_2]
epic_dt = [ref_time_dt + datetime.timedelta(a,c) for a,c in zip(delta_days,delta_seconds)]
return(epic_dt)
def get_UDUNITS(epic_dt,time_since_str='days since 1900-1-1'):
'''Using netCDF4.date2num (also available in matplotlib) to convert a datetime to a time since reference date.
{units} since {yyyy-mm-dd}
Parameters
----------
epic_dt : array_like
list of EPIC times in datetime structure
time_since_str : str
string to represent {units} since {reference date}: eg days since 1981-08-31
Returns
-------
Outputs : array_like
numerical value of date since reference time in units specified
Notes
-----
See netCDF4.date2num for full examples. This program is just a wrapper to provide a fixed
string date in case not provided.
'''
udnum = date2num(epic_dt, time_since_str)
return(udnum)
def Datetime2EPIC(epic_dt):
r'''
Convert a datetime object into a PMEL-EPIC two word time value.
PMEL-EPIC time stored in NetCDF files is defined as two timewords: time, time2.
The first (time) represents "True Julian Day",
the second (time2) is "msec since 0:00 GMT"
Parameters
----------
epic_dt : array of datetime objects
Python datetime structure representing the EPIC datetime
Returns
-------
Outputs : array_like (time, time1)
time: array of integer values representing true julian day
time1: array of integer values representing milliseconds since 00:00 UTC
'''
ref_time_py = datetime.datetime.toordinal(datetime.datetime(1968, 5, 23))
ref_time_epic = 2440000
offset = ref_time_epic - ref_time_py
if not isinstance(epic_dt,list):
time = offset + epic_dt.toordinal()
time1 = int((epic_dt.hour * (60. * 60. * 1000.)) + \
(epic_dt.minute * (60. * 1000.)) + \
(epic_dt.second * (1000.)))
else:
time = [offset + x.toordinal() for x in epic_dt]
time1 = [int((x.hour * (60. * 60. * 1000.)) + \
(x.minute * (60. * 1000.)) + \
(x.second * (1000.))) for x in epic_dt]
return(time, time1 )
"""------------------------------------------------------------------------------------------------"""
def main():
pass
def test_1d():
testdate = EPIC2Datetime([2440000,],[43200000+3600*1000,])
print("\n{0}\n".format(testdate))
for time_format in ['days','hours','seconds']:
time_since_str = time_format + ' since 1900-1-1'
print("{0}:value \n{1}:units\n".format(get_UDUNITS(testdate,time_since_str),time_since_str))
def test_2d():
testdate = EPIC2Datetime([2440000,2450000],[43200000,0])
print("\n{0}\n".format(testdate))
for time_format in ['days','hours','seconds']:
time_since_str = time_format + ' since 1900-1-1'
print("{0}:value \n{1}:units\n".format(get_UDUNITS(testdate,time_since_str),time_since_str))
def test_1d_EPIC():
testdate = EPIC2Datetime([2440000,],[43200000+3600*1000,])
print(testdate)
testdate1 = Datetime2EPIC(testdate)
print(testdate1)
def test_2d_EPIC():
testdate = EPIC2Datetime([2440000,2450000],[43200000,0])
print(testdate)
testdate1 = Datetime2EPIC(testdate)
print(testdate1)
if __name__ == "__main__":
main()
| 33.709845 | 114 | 0.647249 |
aa3620de1576db31bc13b4b4fb067491584a8475 | 16,345 | py | Python | tests/test_sockets.py | AIEdX/emscripten | 5bcd268db583d0ac7de4aedcd29e439d69553176 | [
"MIT"
] | null | null | null | tests/test_sockets.py | AIEdX/emscripten | 5bcd268db583d0ac7de4aedcd29e439d69553176 | [
"MIT"
] | 1 | 2022-01-05T07:26:51.000Z | 2022-01-05T07:26:51.000Z | tests/test_sockets.py | AIEdX/emscripten | 5bcd268db583d0ac7de4aedcd29e439d69553176 | [
"MIT"
] | null | null | null | # Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
import multiprocessing
import os
import socket
import shutil
import sys
import time
from subprocess import Popen
if __name__ == '__main__':
raise Exception('do not run this file directly; do something like: tests/runner sockets')
import clang_native
import common
from common import BrowserCore, no_windows, create_file, test_file, read_file
from common import parameterized, requires_native_clang, PYTHON
from tools import shared, config, utils
from tools.shared import EMCC, path_from_root, run_process, CLANG_CC
npm_checked = False
def clean_processes(processes):
for p in processes:
if getattr(p, 'exitcode', None) is None and getattr(p, 'returncode', None) is None:
# ask nicely (to try and catch the children)
try:
p.terminate() # SIGTERM
except OSError:
pass
time.sleep(1)
# send a forcible kill immediately afterwards. If the process did not die before, this should clean it.
try:
p.terminate() # SIGKILL
except OSError:
pass
class WebsockifyServerHarness():
def __init__(self, filename, args, listen_port, do_server_check=True):
self.processes = []
self.filename = filename
self.listen_port = listen_port
self.target_port = listen_port - 1
self.args = args or []
self.do_server_check = do_server_check
def __enter__(self):
# compile the server
# NOTE empty filename support is a hack to support
# the current test_enet
if self.filename:
cmd = [CLANG_CC, test_file(self.filename), '-o', 'server', '-DSOCKK=%d' % self.target_port] + clang_native.get_clang_native_args() + self.args
print(cmd)
run_process(cmd, env=clang_native.get_clang_native_env())
process = Popen([os.path.abspath('server')])
self.processes.append(process)
import websockify
# start the websocket proxy
print('running websockify on %d, forward to tcp %d' % (self.listen_port, self.target_port), file=sys.stderr)
wsp = websockify.WebSocketProxy(verbose=True, listen_port=self.listen_port, target_host="127.0.0.1", target_port=self.target_port, run_once=True)
self.websockify = multiprocessing.Process(target=wsp.start_server)
self.websockify.start()
self.processes.append(self.websockify)
# Make sure both the actual server and the websocket proxy are running
for i in range(10):
try:
if self.do_server_check:
server_sock = socket.create_connection(('localhost', self.target_port), timeout=1)
server_sock.close()
proxy_sock = socket.create_connection(('localhost', self.listen_port), timeout=1)
proxy_sock.close()
break
except IOError:
time.sleep(1)
else:
clean_processes(self.processes)
raise Exception('[Websockify failed to start up in a timely manner]')
print('[Websockify on process %s]' % str(self.processes[-2:]))
return self
def __exit__(self, *args, **kwargs):
# try to kill the websockify proxy gracefully
if self.websockify.is_alive():
self.websockify.terminate()
self.websockify.join()
# clean up any processes we started
clean_processes(self.processes)
class CompiledServerHarness():
def __init__(self, filename, args, listen_port):
self.processes = []
self.filename = filename
self.listen_port = listen_port
self.args = args or []
def __enter__(self):
# assuming this is only used for WebSocket tests at the moment, validate that
# the ws module is installed
global npm_checked
if not npm_checked:
child = run_process(config.NODE_JS + ['-e', 'require("ws");'], check=False)
assert child.returncode == 0, '"ws" node module not found. you may need to run npm install'
npm_checked = True
# compile the server
proc = run_process([EMCC, '-Werror', test_file(self.filename), '-o', 'server.js', '-DSOCKK=%d' % self.listen_port] + self.args)
print('Socket server build: out:', proc.stdout or '', '/ err:', proc.stderr or '')
process = Popen(config.NODE_JS + ['server.js'])
self.processes.append(process)
return self
def __exit__(self, *args, **kwargs):
# clean up any processes we started
clean_processes(self.processes)
# always run these tests last
# make sure to use different ports in each one because it takes a while for the processes to be cleaned up
# Executes a native executable server process
class BackgroundServerProcess():
def __init__(self, args):
self.processes = []
self.args = args
def __enter__(self):
print('Running background server: ' + str(self.args))
process = Popen(self.args)
self.processes.append(process)
return self
def __exit__(self, *args, **kwargs):
clean_processes(self.processes)
def NodeJsWebSocketEchoServerProcess():
return BackgroundServerProcess(config.NODE_JS + [test_file('websocket/nodejs_websocket_echo_server.js')])
def PythonTcpEchoServerProcess(port):
return BackgroundServerProcess([PYTHON, test_file('websocket/tcp_echo_server.py'), port])
class sockets(BrowserCore):
emcc_args = []
@classmethod
def setUpClass(cls):
super().setUpClass()
print()
print('Running the socket tests. Make sure the browser allows popups from localhost.')
print()
# Use emscripten root for node module lookup. This is needed because the unit tests each
# run with CWD set to a temporary directory outside the emscripten tree.
print('Setting NODE_PATH=' + path_from_root('node_modules'))
os.environ['NODE_PATH'] = path_from_root('node_modules')
# Note: in the WebsockifyServerHarness and CompiledServerHarness tests below, explicitly use
# consecutive server listen ports, because server teardown might not occur deterministically
# (python dtor time) and is a bit racy.
# WebsockifyServerHarness uses two port numbers, x and x-1, so increment it by two.
# CompiledServerHarness only uses one. Start with 49160 & 49159 as the first server port
# addresses. If adding new tests, increment the used port addresses below.
@parameterized({
'websockify': [WebsockifyServerHarness, 49160, ['-DTEST_DGRAM=0']],
'tcp': [CompiledServerHarness, 49161, ['-DTEST_DGRAM=0']],
'udp': [CompiledServerHarness, 49162, ['-DTEST_DGRAM=1']],
# The following forces non-NULL addr and addlen parameters for the accept call
'accept_addr': [CompiledServerHarness, 49163, ['-DTEST_DGRAM=0', '-DTEST_ACCEPT_ADDR=1']],
})
def test_sockets_echo(self, harness_class, port, args):
if harness_class == WebsockifyServerHarness and common.EMTEST_LACKS_NATIVE_CLANG:
self.skipTest('requires native clang')
with harness_class(test_file('sockets/test_sockets_echo_server.c'), args, port) as harness:
self.btest_exit(test_file('sockets/test_sockets_echo_client.c'), args=['-DSOCKK=%d' % harness.listen_port] + args)
def test_sockets_echo_pthreads(self):
with CompiledServerHarness(test_file('sockets/test_sockets_echo_server.c'), [], 49161) as harness:
self.btest_exit(test_file('sockets/test_sockets_echo_client.c'), args=['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-DSOCKK=%d' % harness.listen_port])
def test_sdl2_sockets_echo(self):
with CompiledServerHarness('sdl2_net_server.c', ['-sUSE_SDL=2', '-sUSE_SDL_NET=2'], 49164) as harness:
self.btest_exit('sdl2_net_client.c', args=['-sUSE_SDL=2', '-sUSE_SDL_NET=2', '-DSOCKK=%d' % harness.listen_port])
@parameterized({
'websockify': [WebsockifyServerHarness, 49166, ['-DTEST_DGRAM=0']],
'tcp': [CompiledServerHarness, 49167, ['-DTEST_DGRAM=0']],
'udp': [CompiledServerHarness, 49168, ['-DTEST_DGRAM=1']],
# The following forces non-NULL addr and addlen parameters for the accept call
'accept_addr': [CompiledServerHarness, 49169, ['-DTEST_DGRAM=0', '-DTEST_ACCEPT_ADDR=1']],
})
def test_sockets_async_echo(self, harness_class, port, args):
if harness_class == WebsockifyServerHarness and common.EMTEST_LACKS_NATIVE_CLANG:
self.skipTest('requires native clang')
args.append('-DTEST_ASYNC=1')
with harness_class(test_file('sockets/test_sockets_echo_server.c'), args, port) as harness:
self.btest_exit(test_file('sockets/test_sockets_echo_client.c'), args=['-DSOCKK=%d' % harness.listen_port] + args)
def test_sockets_async_bad_port(self):
# Deliberately attempt a connection on a port that will fail to test the error callback and
# getsockopt
self.btest_exit(test_file('sockets/test_sockets_echo_client.c'), args=['-DSOCKK=49169', '-DTEST_ASYNC=1'])
@parameterized({
'websockify': [WebsockifyServerHarness, 49171, ['-DTEST_DGRAM=0']],
'tcp': [CompiledServerHarness, 49172, ['-DTEST_DGRAM=0']],
'udp': [CompiledServerHarness, 49173, ['-DTEST_DGRAM=1']],
})
def test_sockets_echo_bigdata(self, harness_class, port, args):
if harness_class == WebsockifyServerHarness and common.EMTEST_LACKS_NATIVE_CLANG:
self.skipTest('requires native clang')
sockets_include = '-I' + test_file('sockets')
# generate a large string literal to use as our message
message = ''
for i in range(256 * 256 * 2):
message += str(chr(ord('a') + (i % 26)))
# re-write the client test with this literal (it's too big to pass via command line)
src = read_file(test_file('sockets/test_sockets_echo_client.c'))
create_file('test_sockets_echo_bigdata.c', src.replace('#define MESSAGE "pingtothepong"', '#define MESSAGE "%s"' % message))
with harness_class(test_file('sockets/test_sockets_echo_server.c'), args, port) as harness:
self.btest_exit('test_sockets_echo_bigdata.c', args=[sockets_include, '-DSOCKK=%d' % harness.listen_port] + args)
@no_windows('This test is Unix-specific.')
def test_sockets_partial(self):
for harness in [
WebsockifyServerHarness(test_file('sockets/test_sockets_partial_server.c'), [], 49180),
CompiledServerHarness(test_file('sockets/test_sockets_partial_server.c'), [], 49181)
]:
with harness:
self.btest_exit(test_file('sockets/test_sockets_partial_client.c'), assert_returncode=165, args=['-DSOCKK=%d' % harness.listen_port])
@no_windows('This test is Unix-specific.')
def test_sockets_select_server_down(self):
for harness in [
WebsockifyServerHarness(test_file('sockets/test_sockets_select_server_down_server.c'), [], 49190, do_server_check=False),
CompiledServerHarness(test_file('sockets/test_sockets_select_server_down_server.c'), [], 49191)
]:
with harness:
self.btest_exit(test_file('sockets/test_sockets_select_server_down_client.c'), args=['-DSOCKK=%d' % harness.listen_port])
@no_windows('This test is Unix-specific.')
def test_sockets_select_server_closes_connection_rw(self):
for harness in [
WebsockifyServerHarness(test_file('sockets/test_sockets_echo_server.c'), ['-DCLOSE_CLIENT_AFTER_ECHO'], 49200),
CompiledServerHarness(test_file('sockets/test_sockets_echo_server.c'), ['-DCLOSE_CLIENT_AFTER_ECHO'], 49201)
]:
with harness:
self.btest_exit(test_file('sockets/test_sockets_select_server_closes_connection_client_rw.c'), args=['-DSOCKK=%d' % harness.listen_port])
@no_windows('This test uses Unix-specific build architecture.')
def test_enet(self):
# this is also a good test of raw usage of emconfigure and emmake
shared.try_delete('enet')
shutil.copytree(test_file('third_party', 'enet'), 'enet')
with utils.chdir('enet'):
self.run_process([path_from_root('emconfigure'), './configure', '--disable-shared'])
self.run_process([path_from_root('emmake'), 'make'])
enet = [self.in_dir('enet', '.libs', 'libenet.a'), '-I' + self.in_dir('enet', 'include')]
with CompiledServerHarness(test_file('sockets/test_enet_server.c'), enet, 49210) as harness:
self.btest_exit(test_file('sockets/test_enet_client.c'), args=enet + ['-DSOCKK=%d' % harness.listen_port])
@parameterized({
'native': [WebsockifyServerHarness, 59160, ['-DTEST_DGRAM=0']],
'tcp': [CompiledServerHarness, 59162, ['-DTEST_DGRAM=0']],
'udp': [CompiledServerHarness, 59164, ['-DTEST_DGRAM=1']],
})
def test_nodejs_sockets_echo(self, harness_class, port, args):
if harness_class == WebsockifyServerHarness and common.EMTEST_LACKS_NATIVE_CLANG:
self.skipTest('requires native clang')
# Basic test of node client against both a Websockified and compiled echo server.
with harness_class(test_file('sockets/test_sockets_echo_server.c'), args, port) as harness:
expected = 'do_msg_read: read 14 bytes'
self.do_runf(test_file('sockets/test_sockets_echo_client.c'), expected, emcc_args=['-DSOCKK=%d' % harness.listen_port] + args)
@requires_native_clang
def test_nodejs_sockets_echo_subprotocol(self):
# Test against a Websockified server with compile time configured WebSocket subprotocol. We use a Websockified
# server because as long as the subprotocol list contains binary it will configure itself to accept binary
# This test also checks that the connect url contains the correct subprotocols.
with WebsockifyServerHarness(test_file('sockets/test_sockets_echo_server.c'), [], 59166):
self.run_process([EMCC, '-Werror', test_file('sockets/test_sockets_echo_client.c'), '-o', 'client.js', '-sSOCKET_DEBUG', '-sWEBSOCKET_SUBPROTOCOL="base64, binary"', '-DSOCKK=59166'])
out = self.run_js('client.js')
self.assertContained('do_msg_read: read 14 bytes', out)
self.assertContained(['connect: ws://127.0.0.1:59166, base64,binary', 'connect: ws://127.0.0.1:59166/, base64,binary'], out)
# Test against a Websockified server with runtime WebSocket configuration. We specify both url and subprotocol.
# In this test we have *deliberately* used the wrong port '-DSOCKK=12345' to configure the echo_client.c, so
# the connection would fail without us specifying a valid WebSocket URL in the configuration.
print("\nTesting runtime WebSocket configuration.\n")
create_file('websocket_pre.js', '''
var Module = {
websocket: {
url: 'ws://localhost:59168/testA/testB',
subprotocol: 'text, base64, binary',
}
};
''')
with WebsockifyServerHarness(test_file('sockets/test_sockets_echo_server.c'), [], 59168):
self.run_process([EMCC, '-Werror', test_file('sockets/test_sockets_echo_client.c'), '-o', 'client.js', '--pre-js=websocket_pre.js', '-sSOCKET_DEBUG', '-DSOCKK=12345'])
out = self.run_js('client.js')
self.assertContained('do_msg_read: read 14 bytes', out)
self.assertContained('connect: ws://localhost:59168/testA/testB, text,base64,binary', out)
# Test Emscripten WebSockets API to send and receive text and binary messages against an echo server.
# N.B. running this test requires 'npm install ws' in Emscripten root directory
def test_websocket_send(self):
with NodeJsWebSocketEchoServerProcess():
self.btest_exit(test_file('websocket/test_websocket_send.c'), args=['-lwebsocket', '-sNO_EXIT_RUNTIME', '-sWEBSOCKET_DEBUG'])
# Test that native POSIX sockets API can be used by proxying calls to an intermediate WebSockets
# -> POSIX sockets bridge server
def test_posix_proxy_sockets(self):
# Build the websocket bridge server
self.run_process(['cmake', path_from_root('tools/websocket_to_posix_proxy')])
self.run_process(['cmake', '--build', '.'])
if os.name == 'nt': # This is not quite exact, instead of "isWindows()" this should be "If CMake defaults to building with Visual Studio", but there is no good check for that, so assume Windows==VS.
proxy_server = os.path.join(self.get_dir(), 'Debug', 'websocket_to_posix_proxy.exe')
else:
proxy_server = os.path.join(self.get_dir(), 'websocket_to_posix_proxy')
with BackgroundServerProcess([proxy_server, '8080']):
with PythonTcpEchoServerProcess('7777'):
# Build and run the TCP echo client program with Emscripten
self.btest_exit(test_file('websocket/tcp_echo_client.c'), args=['-lwebsocket', '-sPROXY_POSIX_SOCKETS', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
| 47.239884 | 202 | 0.719241 |
877d196fa42001decc4355f72625a92a199ec827 | 354 | py | Python | compmatscipy/data/elemental_gibbs_energies.py | YunyeongChoi/compmatscipy | 44d0fe9f4d3806a21ee3bfcbca24b42120d91193 | [
"MIT"
] | 5 | 2019-08-19T14:48:31.000Z | 2022-03-24T20:08:31.000Z | compmatscipy/data/elemental_gibbs_energies.py | YunyeongChoi/compmatscipy | 44d0fe9f4d3806a21ee3bfcbca24b42120d91193 | [
"MIT"
] | 1 | 2019-08-24T16:51:29.000Z | 2019-08-24T16:51:29.000Z | compmatscipy/data/elemental_gibbs_energies.py | YunyeongChoi/compmatscipy | 44d0fe9f4d3806a21ee3bfcbca24b42120d91193 | [
"MIT"
] | 5 | 2019-07-16T19:15:28.000Z | 2021-03-29T04:49:54.000Z | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import numpy as np
import os, json
this_dir, this_filename = os.path.split(__file__)
DATA_PATH = os.path.join(this_dir, "data", "elemental_gibbs_energies.json")
def elemental_gibbs_energies_data():
with open(DATA_PATH) as f:
return json.load(f) | 22.125 | 76 | 0.680791 |
e8e95e24e3a49c568248586a9fffa395190ad476 | 182 | py | Python | conftest.py | AleksandrSmoliak/mts_auto | c343141c847b9f44755db0a6a36eb17053615570 | [
"Apache-2.0"
] | null | null | null | conftest.py | AleksandrSmoliak/mts_auto | c343141c847b9f44755db0a6a36eb17053615570 | [
"Apache-2.0"
] | null | null | null | conftest.py | AleksandrSmoliak/mts_auto | c343141c847b9f44755db0a6a36eb17053615570 | [
"Apache-2.0"
] | null | null | null | from fixture.Application import Application
import pytest
@pytest.fixture
def app(request):
fixture = Application()
request.addfinalizer(fixture.destroy)
return fixture
| 20.222222 | 43 | 0.774725 |
38691d4cc3c74467092aa89dc0e9ed3ad6e97796 | 75 | py | Python | cv/version.py | skull8944/BachelorCapFilter | 363679d7c20798290cc567705ed73d05ec039649 | [
"bzip2-1.0.6"
] | 2 | 2021-07-08T13:20:47.000Z | 2022-01-16T13:04:34.000Z | cv/version.py | skull8944/BachelorCapFilter | 363679d7c20798290cc567705ed73d05ec039649 | [
"bzip2-1.0.6"
] | null | null | null | cv/version.py | skull8944/BachelorCapFilter | 363679d7c20798290cc567705ed73d05ec039649 | [
"bzip2-1.0.6"
] | null | null | null | opencv_version = "4.5.2.54"
contrib = True
headless = False
ci_build = True | 18.75 | 27 | 0.733333 |
1063e182c8416dcbdebfbd60b0b4025784b567d0 | 19,350 | gyp | Python | node.gyp | strongloop-forks/io.js | 4e58211bb7f638b689e5e8e407b70b3c29be952f | [
"Artistic-2.0"
] | 2 | 2021-02-13T08:39:17.000Z | 2021-02-19T13:44:11.000Z | node.gyp | strongloop-forks/io.js | 4e58211bb7f638b689e5e8e407b70b3c29be952f | [
"Artistic-2.0"
] | null | null | null | node.gyp | strongloop-forks/io.js | 4e58211bb7f638b689e5e8e407b70b3c29be952f | [
"Artistic-2.0"
] | null | null | null | {
'variables': {
'v8_use_snapshot%': 'true',
'node_use_dtrace%': 'false',
'node_use_etw%': 'false',
'node_use_perfctr%': 'false',
'node_has_winsdk%': 'false',
'node_shared_v8%': 'false',
'node_shared_zlib%': 'false',
'node_shared_http_parser%': 'false',
'node_shared_libuv%': 'false',
'node_use_openssl%': 'true',
'node_shared_openssl%': 'false',
'node_use_mdb%': 'false',
'node_v8_options%': '',
'library_files': [
'src/node.js',
'lib/_debug_agent.js',
'lib/_debugger.js',
'lib/_linklist.js',
'lib/assert.js',
'lib/buffer.js',
'lib/child_process.js',
'lib/console.js',
'lib/constants.js',
'lib/crypto.js',
'lib/cluster.js',
'lib/dgram.js',
'lib/dns.js',
'lib/domain.js',
'lib/events.js',
'lib/freelist.js',
'lib/fs.js',
'lib/http.js',
'lib/_http_agent.js',
'lib/_http_client.js',
'lib/_http_common.js',
'lib/_http_incoming.js',
'lib/_http_outgoing.js',
'lib/_http_server.js',
'lib/https.js',
'lib/module.js',
'lib/net.js',
'lib/os.js',
'lib/path.js',
'lib/process.js',
'lib/punycode.js',
'lib/querystring.js',
'lib/readline.js',
'lib/repl.js',
'lib/smalloc.js',
'lib/stream.js',
'lib/_stream_readable.js',
'lib/_stream_writable.js',
'lib/_stream_duplex.js',
'lib/_stream_transform.js',
'lib/_stream_passthrough.js',
'lib/string_decoder.js',
'lib/sys.js',
'lib/timers.js',
'lib/tls.js',
'lib/_tls_common.js',
'lib/_tls_legacy.js',
'lib/_tls_wrap.js',
'lib/tty.js',
'lib/url.js',
'lib/util.js',
'lib/v8.js',
'lib/vm.js',
'lib/zlib.js',
],
},
'targets': [
{
'target_name': 'iojs',
'type': 'executable',
'dependencies': [
'node_js2c#host',
'deps/cares/cares.gyp:cares'
],
'include_dirs': [
'src',
'tools/msvs/genfiles',
'deps/uv/src/ares',
'<(SHARED_INTERMEDIATE_DIR)' # for node_natives.h
],
'sources': [
'src/debug-agent.cc',
'src/async-wrap.cc',
'src/fs_event_wrap.cc',
'src/cares_wrap.cc',
'src/handle_wrap.cc',
'src/node.cc',
'src/node_buffer.cc',
'src/node_constants.cc',
'src/node_contextify.cc',
'src/node_file.cc',
'src/node_http_parser.cc',
'src/node_javascript.cc',
'src/node_main.cc',
'src/node_os.cc',
'src/node_v8.cc',
'src/node_v8_platform.cc',
'src/node_stat_watcher.cc',
'src/node_watchdog.cc',
'src/node_zlib.cc',
'src/node_i18n.cc',
'src/pipe_wrap.cc',
'src/signal_wrap.cc',
'src/smalloc.cc',
'src/spawn_sync.cc',
'src/string_bytes.cc',
'src/stream_wrap.cc',
'src/tcp_wrap.cc',
'src/timer_wrap.cc',
'src/tty_wrap.cc',
'src/process_wrap.cc',
'src/udp_wrap.cc',
'src/uv.cc',
# headers to make for a more pleasant IDE experience
'src/async-wrap.h',
'src/async-wrap-inl.h',
'src/base-object.h',
'src/base-object-inl.h',
'src/debug-agent.h',
'src/env.h',
'src/env-inl.h',
'src/handle_wrap.h',
'src/node.h',
'src/node_buffer.h',
'src/node_constants.h',
'src/node_file.h',
'src/node_http_parser.h',
'src/node_internals.h',
'src/node_javascript.h',
'src/node_root_certs.h',
'src/node_version.h',
'src/node_watchdog.h',
'src/node_wrap.h',
'src/node_i18n.h',
'src/pipe_wrap.h',
'src/queue.h',
'src/smalloc.h',
'src/tty_wrap.h',
'src/tcp_wrap.h',
'src/udp_wrap.h',
'src/req_wrap.h',
'src/string_bytes.h',
'src/stream_wrap.h',
'src/tree.h',
'src/util.h',
'src/util-inl.h',
'src/util.cc',
'deps/http_parser/http_parser.h',
'<(SHARED_INTERMEDIATE_DIR)/node_natives.h',
# javascript files to make for an even more pleasant IDE experience
'<@(library_files)',
# node.gyp is added to the project by default.
'common.gypi',
],
'defines': [
'NODE_ARCH="<(target_arch)"',
'NODE_PLATFORM="<(OS)"',
'NODE_TAG="<(node_tag)"',
'NODE_V8_OPTIONS="<(node_v8_options)"',
'NODE_WANT_INTERNALS=1',
],
'conditions': [
[ 'v8_enable_i18n_support==1', {
'defines': [ 'NODE_HAVE_I18N_SUPPORT=1' ],
'dependencies': [
'<(icu_gyp_path):icui18n',
'<(icu_gyp_path):icuuc',
],
'conditions': [
[ 'icu_small=="true"', {
'defines': [ 'NODE_HAVE_SMALL_ICU=1' ],
}]],
}],
[ 'node_use_openssl=="true"', {
'defines': [ 'HAVE_OPENSSL=1' ],
'sources': [
'src/node_crypto.cc',
'src/node_crypto_bio.cc',
'src/node_crypto_clienthello.cc',
'src/node_crypto.h',
'src/node_crypto_bio.h',
'src/node_crypto_clienthello.h',
'src/tls_wrap.cc',
'src/tls_wrap.h'
],
'conditions': [
[ 'node_shared_openssl=="false"', {
'dependencies': [
'./deps/openssl/openssl.gyp:openssl',
# For tests
'./deps/openssl/openssl.gyp:openssl-cli',
],
# Do not let unused OpenSSL symbols to slip away
'xcode_settings': {
'OTHER_LDFLAGS': [
'-Wl,-force_load,<(PRODUCT_DIR)/libopenssl.a',
],
},
'conditions': [
['OS in "linux freebsd"', {
'ldflags': [
'-Wl,--whole-archive <(PRODUCT_DIR)/libopenssl.a -Wl,--no-whole-archive',
],
}],
],
}]]
}, {
'defines': [ 'HAVE_OPENSSL=0' ]
}],
[ 'node_use_dtrace=="true"', {
'defines': [ 'HAVE_DTRACE=1' ],
'dependencies': [
'node_dtrace_header',
'specialize_node_d',
],
'include_dirs': [ '<(SHARED_INTERMEDIATE_DIR)' ],
#
# DTrace is supported on linux, solaris, mac, and bsd. There are
# three object files associated with DTrace support, but they're
# not all used all the time:
#
# node_dtrace.o all configurations
# node_dtrace_ustack.o not supported on mac and linux
# node_dtrace_provider.o All except OS X. "dtrace -G" is not
# used on OS X.
#
# Note that node_dtrace_provider.cc and node_dtrace_ustack.cc do not
# actually exist. They're listed here to trick GYP into linking the
# corresponding object files into the final "node" executable. These
# object files are generated by "dtrace -G" using custom actions
# below, and the GYP-generated Makefiles will properly build them when
# needed.
#
'sources': [ 'src/node_dtrace.cc' ],
'conditions': [
[ 'OS=="linux"', {
'sources': [
'<(SHARED_INTERMEDIATE_DIR)/node_dtrace_provider.o'
],
}],
[ 'OS!="mac" and OS!="linux"', {
'sources': [
'src/node_dtrace_ustack.cc',
'src/node_dtrace_provider.cc',
]
}
] ]
} ],
[ 'node_use_mdb=="true"', {
'dependencies': [ 'node_mdb' ],
'include_dirs': [ '<(SHARED_INTERMEDIATE_DIR)' ],
'sources': [
'src/node_mdb.cc',
],
} ],
[ 'node_use_etw=="true"', {
'defines': [ 'HAVE_ETW=1' ],
'dependencies': [ 'node_etw' ],
'sources': [
'src/node_win32_etw_provider.h',
'src/node_win32_etw_provider-inl.h',
'src/node_win32_etw_provider.cc',
'src/node_dtrace.cc',
'tools/msvs/genfiles/node_etw_provider.h',
'tools/msvs/genfiles/node_etw_provider.rc',
]
} ],
[ 'node_use_perfctr=="true"', {
'defines': [ 'HAVE_PERFCTR=1' ],
'dependencies': [ 'node_perfctr' ],
'sources': [
'src/node_win32_perfctr_provider.h',
'src/node_win32_perfctr_provider.cc',
'src/node_counters.cc',
'src/node_counters.h',
'tools/msvs/genfiles/node_perfctr_provider.rc',
]
} ],
[ 'v8_postmortem_support=="true"', {
'dependencies': [ 'deps/v8/tools/gyp/v8.gyp:postmortem-metadata' ],
'xcode_settings': {
'OTHER_LDFLAGS': [
'-Wl,-force_load,<(V8_BASE)',
],
},
}],
[ 'node_shared_v8=="false"', {
'sources': [
'deps/v8/include/v8.h',
'deps/v8/include/v8-debug.h',
],
'dependencies': [ 'deps/v8/tools/gyp/v8.gyp:v8' ],
}],
[ 'node_shared_zlib=="false"', {
'dependencies': [ 'deps/zlib/zlib.gyp:zlib' ],
}],
[ 'node_shared_http_parser=="false"', {
'dependencies': [ 'deps/http_parser/http_parser.gyp:http_parser' ],
}],
[ 'node_shared_libuv=="false"', {
'dependencies': [ 'deps/uv/uv.gyp:libuv' ],
}],
[ 'OS=="win"', {
'sources': [
'src/res/node.rc',
],
'defines': [
'FD_SETSIZE=1024',
# we need to use node's preferred "win32" rather than gyp's preferred "win"
'NODE_PLATFORM="win32"',
'_UNICODE=1',
],
'libraries': [ '-lpsapi.lib' ]
}, { # POSIX
'defines': [ '__POSIX__' ],
}],
[ 'OS=="mac"', {
# linking Corefoundation is needed since certain OSX debugging tools
# like Instruments require it for some features
'libraries': [ '-framework CoreFoundation' ],
'defines!': [
'NODE_PLATFORM="mac"',
],
'defines': [
# we need to use node's preferred "darwin" rather than gyp's preferred "mac"
'NODE_PLATFORM="darwin"',
],
}],
[ 'OS=="freebsd"', {
'libraries': [
'-lutil',
'-lkvm',
],
}],
[ 'OS=="solaris"', {
'libraries': [
'-lkstat',
'-lumem',
],
'defines!': [
'NODE_PLATFORM="solaris"',
],
'defines': [
# we need to use node's preferred "sunos"
# rather than gyp's preferred "solaris"
'NODE_PLATFORM="sunos"',
],
}],
[ 'OS=="freebsd" or OS=="linux"', {
'ldflags': [ '-Wl,-z,noexecstack' ],
}],
[ 'OS=="sunos"', {
'ldflags': [ '-Wl,-M,/usr/lib/ld/map.noexstk' ],
}],
[
'OS in "linux freebsd" and node_shared_v8=="false"', {
'ldflags': [
'-Wl,--whole-archive <(V8_BASE) -Wl,--no-whole-archive',
],
}],
],
'msvs_settings': {
'VCLinkerTool': {
'SubSystem': 1, # /subsystem:console
},
'VCManifestTool': {
'EmbedManifest': 'true',
'AdditionalManifestFiles': 'src/res/node.exe.extra.manifest'
}
},
},
# generate ETW header and resource files
{
'target_name': 'node_etw',
'type': 'none',
'conditions': [
[ 'node_use_etw=="true" and node_has_winsdk=="true"', {
'actions': [
{
'action_name': 'node_etw',
'inputs': [ 'src/res/node_etw_provider.man' ],
'outputs': [
'tools/msvs/genfiles/node_etw_provider.rc',
'tools/msvs/genfiles/node_etw_provider.h',
'tools/msvs/genfiles/node_etw_providerTEMP.BIN',
],
'action': [ 'mc <@(_inputs) -h tools/msvs/genfiles -r tools/msvs/genfiles' ]
}
]
} ]
]
},
# generate perf counter header and resource files
{
'target_name': 'node_perfctr',
'type': 'none',
'conditions': [
[ 'node_use_perfctr=="true" and node_has_winsdk=="true"', {
'actions': [
{
'action_name': 'node_perfctr_man',
'inputs': [ 'src/res/node_perfctr_provider.man' ],
'outputs': [
'tools/msvs/genfiles/node_perfctr_provider.h',
'tools/msvs/genfiles/node_perfctr_provider.rc',
'tools/msvs/genfiles/MSG00001.BIN',
],
'action': [ 'ctrpp <@(_inputs) '
'-o tools/msvs/genfiles/node_perfctr_provider.h '
'-rc tools/msvs/genfiles/node_perfctr_provider.rc'
]
},
],
} ]
]
},
{
'target_name': 'node_js2c',
'type': 'none',
'toolsets': ['host'],
'actions': [
{
'action_name': 'node_js2c',
'inputs': [
'<@(library_files)',
'./config.gypi',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/node_natives.h',
],
'conditions': [
[ 'node_use_dtrace=="false" and node_use_etw=="false"', {
'inputs': [ 'src/notrace_macros.py' ]
}],
[ 'node_use_perfctr=="false"', {
'inputs': [ 'src/perfctr_macros.py' ]
}]
],
'action': [
'<(python)',
'tools/js2c.py',
'<@(_outputs)',
'<@(_inputs)',
],
},
],
}, # end node_js2c
{
'target_name': 'node_dtrace_header',
'type': 'none',
'conditions': [
[ 'node_use_dtrace=="true" and OS!="linux"', {
'actions': [
{
'action_name': 'node_dtrace_header',
'inputs': [ 'src/node_provider.d' ],
'outputs': [ '<(SHARED_INTERMEDIATE_DIR)/node_provider.h' ],
'action': [ 'dtrace', '-h', '-xnolibs', '-s', '<@(_inputs)',
'-o', '<@(_outputs)' ]
}
]
} ],
[ 'node_use_dtrace=="true" and OS=="linux"', {
'actions': [
{
'action_name': 'node_dtrace_header',
'inputs': [ 'src/node_provider.d' ],
'outputs': [ '<(SHARED_INTERMEDIATE_DIR)/node_provider.h' ],
'action': [ 'dtrace', '-h', '-s', '<@(_inputs)',
'-o', '<@(_outputs)' ]
}
]
} ],
]
},
{
'target_name': 'node_mdb',
'type': 'none',
'conditions': [
[ 'node_use_mdb=="true"',
{
'dependencies': [ 'deps/mdb_v8/mdb_v8.gyp:mdb_v8' ],
'actions': [
{
'action_name': 'node_mdb',
'inputs': [ '<(PRODUCT_DIR)/obj.target/deps/mdb_v8/mdb_v8.so' ],
'outputs': [ '<(PRODUCT_DIR)/obj.target/node/src/node_mdb.o' ],
'conditions': [
[ 'target_arch=="ia32"', {
'action': [ 'elfwrap', '-o', '<@(_outputs)', '<@(_inputs)' ],
} ],
[ 'target_arch=="x64"', {
'action': [ 'elfwrap', '-64', '-o', '<@(_outputs)', '<@(_inputs)' ],
} ],
],
},
],
},
],
],
},
{
'target_name': 'node_dtrace_provider',
'type': 'none',
'conditions': [
[ 'node_use_dtrace=="true" and OS!="mac" and OS!="linux"', {
'actions': [
{
'action_name': 'node_dtrace_provider_o',
'inputs': [
'<(OBJ_DIR)/node/src/node_dtrace.o',
],
'outputs': [
'<(OBJ_DIR)/node/src/node_dtrace_provider.o'
],
'action': [ 'dtrace', '-G', '-xnolibs', '-s', 'src/node_provider.d',
'<@(_inputs)', '-o', '<@(_outputs)' ]
}
]
}],
[ 'node_use_dtrace=="true" and OS=="linux"', {
'actions': [
{
'action_name': 'node_dtrace_provider_o',
'inputs': [ 'src/node_provider.d' ],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/node_dtrace_provider.o'
],
'action': [
'dtrace', '-C', '-G', '-s', '<@(_inputs)', '-o', '<@(_outputs)'
],
}
],
}],
]
},
{
'target_name': 'node_dtrace_ustack',
'type': 'none',
'conditions': [
[ 'node_use_dtrace=="true" and OS!="mac" and OS!="linux"', {
'actions': [
{
'action_name': 'node_dtrace_ustack_constants',
'inputs': [
'<(V8_BASE)'
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/v8constants.h'
],
'action': [
'tools/genv8constants.py',
'<@(_outputs)',
'<@(_inputs)'
]
},
{
'action_name': 'node_dtrace_ustack',
'inputs': [
'src/v8ustack.d',
'<(SHARED_INTERMEDIATE_DIR)/v8constants.h'
],
'outputs': [
'<(OBJ_DIR)/node/src/node_dtrace_ustack.o'
],
'conditions': [
[ 'target_arch=="ia32"', {
'action': [
'dtrace', '-32', '-I<(SHARED_INTERMEDIATE_DIR)', '-Isrc',
'-C', '-G', '-s', 'src/v8ustack.d', '-o', '<@(_outputs)',
]
} ],
[ 'target_arch=="x64"', {
'action': [
'dtrace', '-64', '-I<(SHARED_INTERMEDIATE_DIR)', '-Isrc',
'-C', '-G', '-s', 'src/v8ustack.d', '-o', '<@(_outputs)',
]
} ],
]
},
]
} ],
]
},
{
'target_name': 'specialize_node_d',
'type': 'none',
'conditions': [
[ 'node_use_dtrace=="true"', {
'actions': [
{
'action_name': 'specialize_node_d',
'inputs': [
'src/node.d'
],
'outputs': [
'<(PRODUCT_DIR)/node.d',
],
'action': [
'tools/specialize_node_d.py',
'<@(_outputs)',
'<@(_inputs)',
'<@(OS)',
'<@(target_arch)',
],
},
],
} ],
]
}
] # end targets
}
| 30.66561 | 93 | 0.443152 |
c8bb947b9c010fbfbc4b7582471d9f0f028a6deb | 12,332 | py | Python | qa/rpc-tests/p2p-acceptblock.py | IluminumProject/iluminum | 9685edf64161c66205c89ee72d711b0144733735 | [
"MIT"
] | 1 | 2018-04-14T16:47:00.000Z | 2018-04-14T16:47:00.000Z | qa/rpc-tests/p2p-acceptblock.py | IluminumProject/iluminum | 9685edf64161c66205c89ee72d711b0144733735 | [
"MIT"
] | null | null | null | qa/rpc-tests/p2p-acceptblock.py | IluminumProject/iluminum | 9685edf64161c66205c89ee72d711b0144733735 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase
'''
AcceptBlockTest -- test processing of unrequested blocks.
Since behavior differs when receiving unrequested blocks from whitelisted peers
versus non-whitelisted peers, this tests the behavior of both (effectively two
separate tests running in parallel).
Setup: two nodes, node0 and node1, not connected to each other. Node0 does not
whitelist localhost, but node1 does. They will each be on their own chain for
this test.
We have one NodeConn connection to each, test_node and white_node respectively.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance.
3. Mine a block that forks the previous block, and deliver to each node from
corresponding peer.
Node0 should not process this block (just accept the header), because it is
unrequested and doesn't have more work than the tip.
Node1 should process because this is coming from a whitelisted peer.
4. Send another block that builds on the forking block.
Node0 should process this block but be stuck on the shorter chain, because
it's missing an intermediate block.
Node1 should reorg to this longer chain.
4b.Send 288 more blocks on the longer chain.
Node0 should process all but the last block (too far ahead in height).
Send all headers to Node1, and then send the last block in that chain.
Node1 should accept the block because it's coming from a whitelisted peer.
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
'''
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
def add_connection(self, conn):
self.connection = conn
# Track the last getdata message we receive (used in the test)
def on_getdata(self, conn, message):
self.last_getdata = message
# Spin until verack message is received from the node.
# We use this to signal that our test can begin. This
# is called from the testing thread, so it needs to acquire
# the global lock.
def wait_for_verack(self):
while True:
with mininode_lock:
if self.verack_received:
return
time.sleep(0.05)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
self.connection.send_message(msg_ping(nonce=self.ping_counter))
received_pong = False
sleep_time = 0.05
while not received_pong and timeout > 0:
time.sleep(sleep_time)
timeout -= sleep_time
with mininode_lock:
if self.last_pong.nonce == self.ping_counter:
received_pong = True
self.ping_counter += 1
return received_pong
class AcceptBlockTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("ILUMD", "iluminumd"),
help="bitcoind binary to test")
def setup_chain(self):
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"],
binary=self.options.testbinary))
self.nodes.append(start_node(1, self.options.tmpdir,
["-debug", "-whitelist=127.0.0.1"],
binary=self.options.testbinary))
def run_test(self):
# Setup the p2p connections and start up the network thread.
test_node = TestNode() # connects to node0 (not whitelisted)
white_node = TestNode() # connects to node1 (whitelisted)
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node))
test_node.add_connection(connections[0])
white_node.add_connection(connections[1])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
test_node.wait_for_verack()
white_node.wait_for_verack()
# 1. Have both nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int ("0x" + n.getbestblockhash() + "L", 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
# This should be accepted.
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in xrange(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
white_node.send_message(msg_block(blocks_h2[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 2)
print "First height 2 block accepted by both nodes"
# 3. Send another block that builds on the original tip.
blocks_h2f = [] # Blocks at height 2 that fork off the main chain
for i in xrange(2):
blocks_h2f.append(create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime+1))
blocks_h2f[i].solve()
test_node.send_message(msg_block(blocks_h2f[0]))
white_node.send_message(msg_block(blocks_h2f[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h2f[0].hash:
assert_equal(x['status'], "headers-only")
for x in self.nodes[1].getchaintips():
if x['hash'] == blocks_h2f[1].hash:
assert_equal(x['status'], "valid-headers")
print "Second height 2 block accepted only from whitelisted peer"
# 4. Now send another block that builds on the forking chain.
blocks_h3 = []
for i in xrange(2):
blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime+1))
blocks_h3[i].solve()
test_node.send_message(msg_block(blocks_h3[0]))
white_node.send_message(msg_block(blocks_h3[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
# Since the earlier block was not processed by node0, the new block
# can't be fully validated.
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h3[0].hash:
assert_equal(x['status'], "headers-only")
# But this block should be accepted by node0 since it has more work.
try:
self.nodes[0].getblock(blocks_h3[0].hash)
print "Unrequested more-work block accepted from non-whitelisted peer"
except:
raise AssertionError("Unrequested more work block was not processed")
# Node1 should have accepted and reorged.
assert_equal(self.nodes[1].getblockcount(), 3)
print "Successfully reorged to length 3 chain from whitelisted peer"
# 4b. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node0. Node1 should process the tip if
# we give it the headers chain leading to the tip.
tips = blocks_h3
headers_message = msg_headers()
all_blocks = [] # node0's blocks
for j in xrange(2):
for i in xrange(288):
next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime+1)
next_block.solve()
if j==0:
test_node.send_message(msg_block(next_block))
all_blocks.append(next_block)
else:
headers_message.headers.append(CBlockHeader(next_block))
tips[j] = next_block
time.sleep(2)
for x in all_blocks:
try:
self.nodes[0].getblock(x.hash)
if x == all_blocks[287]:
raise AssertionError("Unrequested block too far-ahead should have been ignored")
except:
if x == all_blocks[287]:
print "Unrequested block too far-ahead not processed"
else:
raise AssertionError("Unrequested block with more work should have been accepted")
headers_message.headers.pop() # Ensure the last block is unrequested
white_node.send_message(headers_message) # Send headers leading to tip
white_node.send_message(msg_block(tips[1])) # Now deliver the tip
try:
white_node.sync_with_ping()
self.nodes[1].getblock(tips[1].hash)
print "Unrequested block far ahead of tip accepted from whitelisted peer"
except:
raise AssertionError("Unrequested block from whitelisted peer not accepted")
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
test_node.send_message(msg_block(blocks_h2f[0]))
# Here, if the sleep is too short, the test could falsely succeed (if the
# node hasn't processed the block by the time the sleep returns, and then
# the node processes it and incorrectly advances the tip).
# But this would be caught later on, when we verify that an inv triggers
# a getdata request for this block.
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
print "Unrequested block that would complete more-work chain was ignored"
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_getdata = None
test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_getdata
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256)
print "Inv at tip triggered getdata for unprocessed block"
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(blocks_h2f[0]))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
print "Successfully reorged to longer chain from non-whitelisted peer"
[ c.disconnect_node() for c in connections ]
if __name__ == '__main__':
AcceptBlockTest().main()
| 42.378007 | 107 | 0.651395 |
d72208940cdfe9ab63f750dc5bb2246949d41043 | 615 | py | Python | pysachi/renderers/__init__.py | Nauja/pyrabbit | 43d4b958d7f0afd34d5eb2451086d0d9dc7f288a | [
"MIT"
] | 1 | 2020-11-29T19:13:56.000Z | 2020-11-29T19:13:56.000Z | pysachi/renderers/__init__.py | Nauja/pysachi | 43d4b958d7f0afd34d5eb2451086d0d9dc7f288a | [
"MIT"
] | null | null | null | pysachi/renderers/__init__.py | Nauja/pysachi | 43d4b958d7f0afd34d5eb2451086d0d9dc7f288a | [
"MIT"
] | null | null | null | __all__ = ["load", "html", "raw"]
from . import html
from . import raw
import pkg_resources
from typing import Any
def load(name: str) -> Any:
"""Get the first renderer identified by `name`.
The renderer must have been registered as an entry point for
`pysachi.renderers` via setuptools.
.. code-block:: python
renderer = pysachi.renderers.load("raw")
:param name: Renderer's name.
:returns: Found renderer or None.
"""
for entry_point in pkg_resources.iter_entry_points("pysachi.renderers"):
if entry_point.name == name:
return entry_point.load()
return None
| 24.6 | 76 | 0.686179 |
0cc3db77d08cfe534e8c97e0fc0811374d03bdbb | 2,337 | py | Python | tests/test_check_equal_real.py | cmarqu/checkv | ead86d930a2e6fba823f3f875cc8f6a051d8c6c8 | [
"BSD-3-Clause"
] | 1 | 2019-02-24T17:57:05.000Z | 2019-02-24T17:57:05.000Z | tests/test_check_equal_real.py | cmarqu/checkv | ead86d930a2e6fba823f3f875cc8f6a051d8c6c8 | [
"BSD-3-Clause"
] | 2 | 2019-02-24T17:42:08.000Z | 2019-03-01T09:23:29.000Z | tests/test_check_equal_real.py | cmarqu/checkv | ead86d930a2e6fba823f3f875cc8f6a051d8c6c8 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
"""Tests for ``check_equal()`` for floating point values."""
# Copyright checkv contributors
# Licensed under the Revised BSD License, see LICENSE for details.
# SPDX-License-Identifier: BSD-3-Clause
import checkv
def test_check_equal_real_0_0_pass(caplog):
checkv.check_equal(0.0, 0.0)
for record in caplog.records:
assert record.levelname == "INFO"
assert "Equality check passed - Got abs (0.0 - 0.0) <= 0.0." in caplog.text
def test_check_equal_real_0_1_pass(caplog):
checkv.check_equal(0.0, 1.0, max_diff=1.0)
for record in caplog.records:
assert record.levelname == "INFO"
assert "Equality check passed - Got abs (0.0 - 1.0) <= 1.0." in caplog.text
def test_check_equal_real_0_1_fail(caplog):
checkv.check_equal(0.0, 1.0)
for record in caplog.records:
assert record.levelname == "ERROR"
assert "Equality check failed - Got abs (0.0 - 1.0) > 0.0." in caplog.text
def test_check_equal_real_0_1_fail2(caplog):
checkv.check_equal(0.0, 1.0, max_diff=0.9)
for record in caplog.records:
assert record.levelname == "ERROR"
assert "Equality check failed - Got abs (0.0 - 1.0) > 0.9." in caplog.text
my_checker = "FIXME"
def test_check_equal_real_my_checker__0_0_pass(caplog):
checkv.check_equal(0.0, 0.0, checker=my_checker)
for record in caplog.records:
assert record.levelname == "INFO"
assert "Equality check passed - Got abs (0.0 - 0.0) <= 0.0." in caplog.text
def test_check_equal_real_my_checker__0_1_pass(caplog):
checkv.check_equal(0.0, 1.0, max_diff=1.0, checker=my_checker)
for record in caplog.records:
assert record.levelname == "INFO"
assert "Equality check passed - Got abs (0.0 - 1.0) <= 1.0." in caplog.text
def test_check_equal_real_my_checker__0_1_fail(caplog):
checkv.check_equal(0.0, 1.0, checker=my_checker)
for record in caplog.records:
assert record.levelname == "ERROR"
assert "Equality check failed - Got abs (0.0 - 1.0) > 0.0." in caplog.text
def test_check_equal_real_my_checker__0_1_fail2(caplog):
checkv.check_equal(0.0, 1.0, max_diff=0.9, checker=my_checker)
for record in caplog.records:
assert record.levelname == "ERROR"
assert "Equality check failed - Got abs (0.0 - 1.0) > 0.9." in caplog.text
| 30.350649 | 79 | 0.693624 |
b7cbdc73afe523dc0d7edb211797f494c20cb8e8 | 5,627 | py | Python | pydevices/RfxDevices/SPIDER.py | fluffynukeit/mdsplus | a204d2e9d26554bb035945595210f2a57d187250 | [
"BSD-2-Clause"
] | 53 | 2015-01-05T08:55:13.000Z | 2022-03-30T07:43:41.000Z | pydevices/RfxDevices/SPIDER.py | fluffynukeit/mdsplus | a204d2e9d26554bb035945595210f2a57d187250 | [
"BSD-2-Clause"
] | 1,231 | 2015-02-02T18:54:02.000Z | 2022-03-30T08:27:45.000Z | pydevices/RfxDevices/SPIDER.py | fluffynukeit/mdsplus | a204d2e9d26554bb035945595210f2a57d187250 | [
"BSD-2-Clause"
] | 44 | 2015-05-24T20:18:06.000Z | 2022-02-07T13:51:04.000Z | #
# Copyright (c) 2017, Massachusetts Institute of Technology All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from MDSplus import mdsExceptions, Device
class SPIDER(Device):
"""MARTe configuration"""
parts = [{'path': ':COMMENT', 'type': 'text'},
{'path': ':CAMERA_FREQ', 'type': 'numeric', 'value': 10},
{'path': ':CAMERA_START', 'type': 'numeric', 'value': 0},
{'path': ':CAMERA_DURAT', 'type': 'numeric', 'value': 60},
{'path': ':CAEN_FREQ', 'type': 'numeric', 'value': 2},
{'path': ':CAEN_START', 'type': 'numeric', 'value': 0},
{'path': ':CAEN_DURAT', 'type': 'numeric', 'value': 30},
{'path': ':NI_FREQ', 'type': 'numeric', 'value': 10000},
{'path': ':NI_START', 'type': 'numeric', 'value': 0},
{'path': ':NI_DURAT', 'type': 'numeric', 'value': 60},
{'path': ':NI6368_FREQ', 'type': 'numeric', 'value': 10000},
{'path': ':NI6368_START', 'type': 'numeric', 'value': 0},
{'path': ':NI6368_DURAT', 'type': 'numeric', 'value': 60},
{'path': ':BREAK_DEAD', 'type': 'numeric', 'value': 10},
{'path': ':BREAK_REC', 'type': 'numeric', 'value': 0},
{'path': '.WAVE_1', 'type': 'structure'},
{'path': '.WAVE_1:WAVE', 'type': 'signal'},
{'path': '.WAVE_1:MIN_X', 'type': 'numeric'},
{'path': '.WAVE_1:MAX_X', 'type': 'numeric'},
{'path': '.WAVE_1:MIN_Y', 'type': 'numeric'},
{'path': '.WAVE_1:MAX_Y', 'type': 'numeric'},
{'path': '.WAVE_2', 'type': 'structure'},
{'path': '.WAVE_2:WAVE', 'type': 'signal'},
{'path': '.WAVE_2:MIN_X', 'type': 'numeric'},
{'path': '.WAVE_2:MAX_X', 'type': 'numeric'},
{'path': '.WAVE_2:MIN_Y', 'type': 'numeric'},
{'path': '.WAVE_2:MAX_Y', 'type': 'numeric'},
{'path': '.WAVE_3', 'type': 'structure'},
{'path': '.WAVE_3:WAVE', 'type': 'signal'},
{'path': '.WAVE_3:MIN_X', 'type': 'numeric'},
{'path': '.WAVE_3:MAX_X', 'type': 'numeric'},
{'path': '.WAVE_3:MIN_Y', 'type': 'numeric'},
{'path': '.WAVE_3:MAX_Y', 'type': 'numeric'},
{'path': '.WAVE_4', 'type': 'structure'},
{'path': '.WAVE_4:WAVE', 'type': 'signal'},
{'path': '.WAVE_4:MIN_X', 'type': 'numeric'},
{'path': '.WAVE_4:MAX_X', 'type': 'numeric'},
{'path': '.WAVE_4:MIN_Y', 'type': 'numeric'},
{'path': '.WAVE_4:MAX_Y', 'type': 'numeric'},
{'path': '.WAVE_5', 'type': 'structure'},
{'path': '.WAVE_5:WAVE', 'type': 'signal'},
{'path': '.WAVE_5:MIN_X', 'type': 'numeric'},
{'path': '.WAVE_5:MAX_X', 'type': 'numeric'},
{'path': '.WAVE_5:MIN_Y', 'type': 'numeric'},
{'path': '.WAVE_5:MAX_Y', 'type': 'numeric'},
{'path': '.WAVE_6', 'type': 'structure'},
{'path': '.WAVE_6:WAVE', 'type': 'signal'},
{'path': '.WAVE_6:MIN_X', 'type': 'numeric'},
{'path': '.WAVE_6:MAX_X', 'type': 'numeric'},
{'path': '.WAVE_6:MIN_Y', 'type': 'numeric'},
{'path': '.WAVE_6:MAX_Y', 'type': 'numeric'},
{'path': '.WAVE_7', 'type': 'structure'},
{'path': '.WAVE_7:WAVE', 'type': 'signal'},
{'path': '.WAVE_7:MIN_X', 'type': 'numeric'},
{'path': '.WAVE_7:MAX_X', 'type': 'numeric'},
{'path': '.WAVE_7:MIN_Y', 'type': 'numeric'},
{'path': '.WAVE_7:MAX_Y', 'type': 'numeric'},
{'path': '.WAVE_8', 'type': 'structure'},
{'path': '.WAVE_8:WAVE', 'type': 'signal'},
{'path': '.WAVE_8:MIN_X', 'type': 'numeric'},
{'path': '.WAVE_8:MAX_X', 'type': 'numeric'},
{'path': '.WAVE_8:MIN_Y', 'type': 'numeric'},
{'path': '.WAVE_8:MAX_Y', 'type': 'numeric'},
{'path': '.WAVE_REC', 'type': 'structure'},
{'path': '.WAVE_REC:WAVE', 'type': 'signal'},
{'path': '.WAVE_REC:MIN_X', 'type': 'numeric'},
{'path': '.WAVE_REC:MAX_X', 'type': 'numeric'},
{'path': '.WAVE_REC:MIN_Y', 'type': 'numeric'},
{'path': '.WAVE_REC:MAX_Y', 'type': 'numeric'}]
| 56.27 | 80 | 0.531367 |
91964eb7c7d9afb61c06e1799d47815f036e45f1 | 2,086 | py | Python | tests/test_tab.py | FadedCoder/SwagLyrics-For-Spotify | c62794dca0c029b5be0f5eade3030d3c00787fe4 | [
"MIT"
] | 295 | 2019-03-16T05:00:21.000Z | 2022-03-25T02:07:24.000Z | tests/test_tab.py | FadedCoder/SwagLyrics-For-Spotify | c62794dca0c029b5be0f5eade3030d3c00787fe4 | [
"MIT"
] | 1,920 | 2019-03-13T10:46:30.000Z | 2022-03-31T04:17:28.000Z | tests/test_tab.py | FadedCoder/SwagLyrics-For-Spotify | c62794dca0c029b5be0f5eade3030d3c00787fe4 | [
"MIT"
] | 48 | 2019-03-15T08:57:58.000Z | 2022-02-06T05:34:11.000Z | """
Contains unit tests for tab.py
"""
import flask_testing
from SwSpotify import SpotifyNotRunning
from mock import patch
from swaglyrics.tab import app
class Tests(flask_testing.TestCase):
"""
Unit tests
"""
def setup(self):
pass
def create_app(self):
return app
@patch('SwSpotify.spotify.current', return_value=("Blank Space", "Taylor Swift"))
def test_lyrics_are_shown_in_tab(self, mock_song):
"""
that that tab.py is working
"""
with self.app.test_client() as c:
response = c.get('/')
self.assert_template_used("lyrics.html")
@patch('SwSpotify.spotify.current', side_effect=SpotifyNotRunning)
def test_no_lyrics_are_shown_in_tab(self, mock_song):
"""
that that tab.py is working when no song playing
"""
with self.app.test_client() as c:
response = c.get('/')
self.assertIn(b'Nothing playing at the moment.', response.data)
@patch('SwSpotify.spotify.current', side_effect=SpotifyNotRunning)
def test_songchanged_returns_no(self, mock_current):
"""
that that songChanged can return no
"""
with self.app.test_client() as c:
response = c.get('/songChanged')
self.assertEqual(response.data, b'no')
@patch('SwSpotify.spotify.current', return_value=("Blank Space", "Taylor Swift"))
def test_songchanged_can_raise_songplaying(self, mock_current):
"""
that that songChanged can raise SongPlaying
"""
with self.app.test_client() as c:
response = c.get('/songChanged')
self.assertEqual(response.data, b'no')
@patch('SwSpotify.spotify.current', return_value=('Rodeo', 'Lil Nas X'))
def test_songchanged_returns_yes(self, mock_current):
"""
that that songChanged can return yes
"""
with self.app.test_client() as c:
response = c.get('/songChanged')
self.assertEqual(response.data, b'yes')
if __name__ == '__main__':
pass
| 29.8 | 85 | 0.627517 |
39fb719eeca4cd58f120e82d05e72fece024b3d1 | 382 | py | Python | Email-Sender/email-sender.py | ragreenburg/Amazing-Python-Scripts | d91d0ddd312eb82ac307b58d6d09e6ca19384ea8 | [
"MIT"
] | 3 | 2021-01-14T13:54:22.000Z | 2021-11-15T11:26:51.000Z | Email-Sender/email-sender.py | Tanujcbe/Amazing-Python-Scripts | 4904df3b40e06f750b11ba807cf243eac60ebba3 | [
"MIT"
] | 1 | 2021-02-24T02:06:21.000Z | 2021-02-24T02:06:21.000Z | Email-Sender/email-sender.py | Tanujcbe/Amazing-Python-Scripts | 4904df3b40e06f750b11ba807cf243eac60ebba3 | [
"MIT"
] | 1 | 2021-01-20T08:01:54.000Z | 2021-01-20T08:01:54.000Z | import smtplib
to = input("Enter the Email of recipent:\n")
content = input("Enter the Content for E-Mail:\n")
def sendEmail(to, content):
server = smtplib.SMTP('smtp.gmail.com','587')
server.ehlo()
server.starttls()
server.login('senderemail@gmail.com','12345678')
server.sendmail('senderemail@gmail.com',to,content)
server.close()
sendEmail(to, content) | 27.285714 | 55 | 0.696335 |
2501038750e578086074bc868a99009ee60a7d7c | 831 | py | Python | TTS.py | MayukhGautam/Microbit-Python-Interface | 56ee9e4f33a871a7ad631d2e31b3bfdb0c93f6cc | [
"MIT"
] | null | null | null | TTS.py | MayukhGautam/Microbit-Python-Interface | 56ee9e4f33a871a7ad631d2e31b3bfdb0c93f6cc | [
"MIT"
] | null | null | null | TTS.py | MayukhGautam/Microbit-Python-Interface | 56ee9e4f33a871a7ad631d2e31b3bfdb0c93f6cc | [
"MIT"
] | 2 | 2019-09-13T18:21:46.000Z | 2019-09-16T01:02:35.000Z | from microbit import * #importing packages
import radio, speech
radio.config(queue=5, length=32, channel=7, power=6, address=0x75626974, group=0, data_rate=radio.RATE_1MBIT) #setting radio's settings
radio.on() #radio is on
while True: #pretty much while microbit is on
speech.say("A for send B for receive", speed=100)
display.scroll("A for send B for receive", delay=200, wait=True, monospace=False) #display info
if button_a.was_pressed(): #tests if button A was pressed and if it is sends a string to other radio's in same channel
radio.send("Yasir is TOO ALPHA")
if button_b.was_pressed(): #tests if button B was pressed and displays the msg received
msg = radio.receive()
speech.say(msg, speed=100)
display.scroll(msg, delay=200, wait=True, monospace=False)
| 39.571429 | 135 | 0.706378 |
615d8b6403daffd7cdabbca42fd4e195c34229d6 | 2,805 | py | Python | annotation-model/tools/make_tests.py | ziransun/wpt | ab8f451eb39eb198584d547f5d965ef54df2a86a | [
"BSD-3-Clause"
] | 8 | 2019-04-09T21:13:05.000Z | 2021-11-23T17:25:18.000Z | annotation-model/tools/make_tests.py | ziransun/wpt | ab8f451eb39eb198584d547f5d965ef54df2a86a | [
"BSD-3-Clause"
] | 49 | 2021-03-01T07:28:51.000Z | 2022-03-28T16:06:03.000Z | annotation-model/tools/make_tests.py | ziransun/wpt | ab8f451eb39eb198584d547f5d965ef54df2a86a | [
"BSD-3-Clause"
] | 11 | 2019-04-12T01:20:16.000Z | 2021-11-23T17:25:02.000Z | # Copyright (c) 2016 W3C
# Released under the W3C Test Suite License: see LICENSE.txt
# This tool creates .html test files for the WPT harness from corresponding .test
# files that it finds in the tree for this test collection.
from __future__ import print_function
import re
import time
import json
import fnmatch
import os
import shutil
import sys
import argparse
TESTTREE = '..'
DEFDIR = '../definitions'
MANUAL_TEMPLATE = 'template_manual'
JS_TEMPLATE = 'template_js'
parser = argparse.ArgumentParser()
parser.add_argument('--examples', action="store_const", const=1)
args = parser.parse_args()
# pull in the template
manualTemplate = open(MANUAL_TEMPLATE, "r").read()
autoTemplate = open(JS_TEMPLATE, "r").read()
defList = []
defnames = ""
# find all of the definitions
for curdir, subdirList, fileList in os.walk(DEFDIR, topdown=True):
for file in fnmatch.filter(fileList, "*.json"):
theFile = os.path.join(curdir, file)
try:
testJSON = json.load(open(theFile, "r"))
except ValueError as e:
print("parse of " + theFile + " failed: " + e[0])
else:
theFile = re.sub("\.\./", "", theFile)
defList.append(theFile)
if (len(defList)):
defNames = '"' + '",\n "'.join(defList) + '"'
# iterate over the folders looking for .test files
for curdir, subdirList, fileList in os.walk(TESTTREE, topdown=True):
# skip the definitions directory
subdirList[:] = [d for d in subdirList if d != "definitions"]
# skip the examples directory
if args.examples != 1:
subdirList[:] = [d for d in subdirList if d != "examples"]
for file in fnmatch.filter(fileList, "*.test"):
# for each .test file, create a corresponding .html file using the appropriate
# template
theFile = os.path.join(curdir, file)
try:
testJSON = json.load(open(theFile, "r"))
except ValueError as e:
print("parse of " + theFile + " failed: " + e[0])
else:
try:
testType = testJSON['testType']
except:
testType = "manual"
templateFile = manualTemplate
suffix = "-manual.html"
if testType == "automated":
templateFile = autoTemplate
suffix = ".html"
rfile = re.sub("\.\./", "", file)
# interesting pattern is {{TESTFILE}}
tcopy = re.sub("{{TESTFILE}}", rfile, templateFile)
tcopy = re.sub("{{SCHEMADEFS}}", defNames, tcopy)
try:
title = testJSON['name']
except:
title = file
tcopy = re.sub("{{TESTTITLE}}", title, tcopy)
# target file is basename of theFile + '-manual.html'
target = re.sub("\.test",suffix, theFile)
try:
out = open(target, "w")
out.write(tcopy)
out.close()
except:
print("Failed to create "+target)
else:
print("Created " + target)
| 26.462264 | 81 | 0.635651 |
9698708be9c82024aa4ca2061463de50c19bfbbd | 513 | py | Python | 24frame.py | yramsan/MCB-185-Homework | 6c3550aca0397b033d7e4403edda4b99b31fe192 | [
"MIT"
] | null | null | null | 24frame.py | yramsan/MCB-185-Homework | 6c3550aca0397b033d7e4403edda4b99b31fe192 | [
"MIT"
] | null | null | null | 24frame.py | yramsan/MCB-185-Homework | 6c3550aca0397b033d7e4403edda4b99b31fe192 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Write a program that prints out the position, frame, and letter of the DNA
# Try coding this with a single loop
# Try coding this with nested loops
# Credit to my new friend Hongtao for helping with this code!
dna = 'ATGGCCTTT'
# Single Loop
for i in range(len(dna)):
print(i, i % 3, dna[i])
print()
# Nested Loop
for i in range(0, len(dna)-2,3):
for j in range(3):
print(i+j, j, dna[i+j])
"""
python3 24frame.py
0 0 A
1 1 T
2 2 G
3 0 G
4 1 C
5 2 C
6 0 T
7 1 T
8 2 T
"""
| 15.545455 | 76 | 0.649123 |
bcc04f42ad1322d61d3147008dbdc90d39de21c5 | 4,672 | py | Python | home_app/feeds.py | Ymirrp/Home-page | 6ac9b5b76cc2b08298086c7e784685dad802c9d6 | [
"MIT"
] | null | null | null | home_app/feeds.py | Ymirrp/Home-page | 6ac9b5b76cc2b08298086c7e784685dad802c9d6 | [
"MIT"
] | 7 | 2020-04-18T04:54:05.000Z | 2020-04-29T14:49:46.000Z | home_app/feeds.py | Ymirrp/Home-page | 6ac9b5b76cc2b08298086c7e784685dad802c9d6 | [
"MIT"
] | null | null | null | import feedparser
import operator
import requests
from datetime import datetime, timedelta
def get_feed():
visir_feeds = feedparser.parse("https://www.visir.is/rss/frettir/")
mblinnlent_feeds = feedparser.parse("https://www.mbl.is/feeds/innlent/")
mblerlent_feeds = feedparser.parse("https://www.mbl.is/feeds/erlent/")
ruvinnlent_feeds = feedparser.parse("https://www.ruv.is/rss/innlent")
ruverlent_feeds = feedparser.parse("https://www.ruv.is/rss/erlent")
parsed_feed = parse_feed(
visir_feeds['entries'],
mblinnlent_feeds['entries'],
mblerlent_feeds['entries'],
ruvinnlent_feeds['entries'],
ruverlent_feeds['entries']
)
return parsed_feed
def parse_feed(visir_f, mbli_f, mble_f, ruvi_f, ruve_f):
news_lst = []
for f in visir_f:
parsed_v = parse_date('Visir', f)
news_lst.append(parsed_v)
for f in mbli_f:
parsed_m = parse_date('Mbl', f)
news_lst.append(parsed_m)
for f in mble_f:
parsed_m = parse_date('Mbl', f)
news_lst.append(parsed_m)
for f in ruvi_f:
parsed_m = parse_date('Ruv', f)
news_lst.append(parsed_m)
for m in ruve_f:
parsed_m = parse_date('Ruv', m)
news_lst.append(parsed_m)
news_lst.sort(key=operator.itemgetter('time'), reverse=True)
return news_lst
def parse_date(site, entry):
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
entry['site'] = site
p = entry['published']
day = p[5:7]
month = months.index(p[8:11]) + 1
month = '0' + str(month) if month < 10 else str(month)
year = p[12:16]
h = p[17:19]
m = p[20:22]
s = p[23:25]
entry['time'] = '{}{}{}{}{}{}'.format(year, month, day, h, m, s)
today = datetime.now()
then = datetime(int(year), int(month), int(day), int(h), int(m), int(s))
entry['time_diff'] = today - then
time_diff = entry['time_diff']
time_diff_seconds = int(time_diff.total_seconds())
m, s = divmod(time_diff_seconds, 60)
h, m = divmod(m, 60)
if h > 23:
if h // 24 == 1:
entry['time_passed'] = str(h // 24) + " degi síðan"
else:
entry['time_passed'] = str(h // 24) + " dögum síðan"
elif h > 0:
if h == 1:
entry['time_passed'] = str(h) + " klukkutíma síðan"
else:
entry['time_passed'] = str(h) + " klukkutímum síðan"
elif m > 0:
if m == 1:
entry['time_passed'] = str(m) + " mínutu síðan"
else:
entry['time_passed'] = str(m) + " mínutum síðan"
else:
if s == 1:
entry['time_passed'] = str(s) + " sekúndu síðan"
else:
entry['time_passed'] = str(s) + " sekúndum síðan"
return entry
def get_weather(lat, lon):
api_url = "http://api.openweathermap.org/data/2.5/weather?lat=" + lat + \
"&lon=" + lon + "&units=metric&appid=c5d83ac177a5989b9d9ee9f886892237"
print(api_url)
res = requests.get(api_url)
return parse_weather(res.json())
def parse_weather(w):
translation = {
"Thunderstorm": "Þrumuveður",
"Drizzle": "Skúrir",
"Rain": "Rigning",
"Snow": "Snjókoma",
"Clear": "Heiðskýrt",
"Clouds": "Skýjað"
}
city = w['name']
weather = translation[w['weather'][0]['main']]
wind = w['wind']['speed']
temp = int(round(w['main']['temp'], 0))
degree = get_degree(int(w['wind']['deg']))
present = datetime.now()
time_diff = present - timedelta(seconds=w['dt'])
time = time_diff.minute
img = "http://openweathermap.org/img/wn/" + w['weather'][0]['icon'] + '.png'
return {
"city": city,
"weather": weather,
"temp": temp,
"wind": wind,
"deg": degree,
"time": time,
"icon": img
}
def get_degree(deg):
ret = ''
if deg != 0:
deg = deg / 32
if deg < 3 or deg >= 31:
# ret = 'N'
ret = '↑'
elif 3 <= deg < 8:
# ret = 'N/A'
ret = '↗'
elif 8 <= deg < 11:
# ret = 'A'
ret = '→'
elif 11 <= deg < 16:
# ret = 'S/A'
ret = '↘'
elif 16 <= deg < 19:
# ret = 'S'
ret = '↓'
elif 19 <= deg < 24:
# ret = 'S/V'
ret = '↙'
elif 24 <= deg < 27:
# ret = 'V'
ret = '←'
elif 27 <= deg < 30:
# ret = 'N/V'
ret = '↖'
return ret
| 30.337662 | 98 | 0.52012 |
acfc9999ff6ab8bab908d85ee5910be1baa6607a | 2,801 | py | Python | evaluate/eval_cylib/test_cython.py | mondrasovic/reid_baseline_syncbn | 3d21a786fb1a0519caaa0572c649f750036689b5 | [
"MIT"
] | 1 | 2022-01-05T15:42:44.000Z | 2022-01-05T15:42:44.000Z | evaluate/eval_cylib/test_cython.py | mondrasovic/reid_baseline_syncbn | 3d21a786fb1a0519caaa0572c649f750036689b5 | [
"MIT"
] | null | null | null | evaluate/eval_cylib/test_cython.py | mondrasovic/reid_baseline_syncbn | 3d21a786fb1a0519caaa0572c649f750036689b5 | [
"MIT"
] | null | null | null | from __future__ import print_function
import sys
import os.path as osp
import timeit
import numpy as np
sys.path.insert(0, osp.dirname(osp.abspath(__file__)) + '/../..')
from torchreid.eval_metrics import evaluate
"""
Test the speed of cython-based evaluation code. The speed improvements
can be much bigger when using the real reid data, which contains a larger
amount of query and gallery images.
Note: you might encounter the following error:
'AssertionError: Error: all query identities do not appear in gallery'.
This is normal because the inputs are random numbers. Just try again.
"""
print('*** Compare running time ***')
setup = '''
import sys
import os.path as osp
import numpy as np
sys.path.insert(0, osp.dirname(osp.abspath(__file__)) + '/../..')
from torchreid.eval_metrics import evaluate
num_q = 30
num_g = 300
max_rank = 5
distmat = np.random.rand(num_q, num_g) * 20
q_pids = np.random.randint(0, num_q, size=num_q)
g_pids = np.random.randint(0, num_g, size=num_g)
q_camids = np.random.randint(0, 5, size=num_q)
g_camids = np.random.randint(0, 5, size=num_g)
'''
print('=> Using market1501\'s metric')
pytime = timeit.timeit(
'evaluate(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_cython=False)',
setup=setup,
number=20
)
cytime = timeit.timeit(
'evaluate(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_cython=True)',
setup=setup,
number=20
)
print('Python time: {} s'.format(pytime))
print('Cython time: {} s'.format(cytime))
print('Cython is {} times faster than python\n'.format(pytime / cytime))
print('=> Using cuhk03\'s metric')
pytime = timeit.timeit(
'evaluate(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_metric_cuhk03=True, use_cython=False)',
setup=setup,
number=20
)
cytime = timeit.timeit(
'evaluate(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_metric_cuhk03=True, use_cython=True)',
setup=setup,
number=20
)
print('Python time: {} s'.format(pytime))
print('Cython time: {} s'.format(cytime))
print('Cython is {} times faster than python\n'.format(pytime / cytime))
"""
print("=> Check precision")
num_q = 30
num_g = 300
max_rank = 5
distmat = np.random.rand(num_q, num_g) * 20
q_pids = np.random.randint(0, num_q, size=num_q)
g_pids = np.random.randint(0, num_g, size=num_g)
q_camids = np.random.randint(0, 5, size=num_q)
g_camids = np.random.randint(0, 5, size=num_g)
cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_cython=False)
print("Python:\nmAP = {} \ncmc = {}\n".format(mAP, cmc))
cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_cython=True)
print("Cython:\nmAP = {} \ncmc = {}\n".format(mAP, cmc))
"""
| 32.952941 | 113 | 0.695466 |
fed9a11709bbf79b6057ecc2f19bc0b49f583180 | 6,262 | py | Python | nssrc/com/citrix/netscaler/nitro/resource/config/network/linkset.py | mahabs/nitro | be74e1e177f5c205c16126bc9b023f2348788409 | [
"Apache-2.0"
] | null | null | null | nssrc/com/citrix/netscaler/nitro/resource/config/network/linkset.py | mahabs/nitro | be74e1e177f5c205c16126bc9b023f2348788409 | [
"Apache-2.0"
] | null | null | null | nssrc/com/citrix/netscaler/nitro/resource/config/network/linkset.py | mahabs/nitro | be74e1e177f5c205c16126bc9b023f2348788409 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class linkset(base_resource) :
""" Configuration for link set resource. """
def __init__(self) :
self._id = ""
self._ifnum = ""
self.___count = 0
@property
def id(self) :
"""Unique identifier for the linkset. Must be of the form LS/x, where x can be an integer from 1 to 32.
"""
try :
return self._id
except Exception as e:
raise e
@id.setter
def id(self, id) :
"""Unique identifier for the linkset. Must be of the form LS/x, where x can be an integer from 1 to 32.
"""
try :
self._id = id
except Exception as e:
raise e
@property
def ifnum(self) :
"""The interfaces to be bound to the linkset.
"""
try :
return self._ifnum
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(linkset_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.linkset
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.id) :
return str(self.id)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
""" Use this API to add linkset.
"""
try :
if type(resource) is not list :
addresource = linkset()
addresource.id = resource.id
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ linkset() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].id = resource[i].id
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
""" Use this API to delete linkset.
"""
try :
if type(resource) is not list :
deleteresource = linkset()
if type(resource) != type(deleteresource):
deleteresource.id = resource
else :
deleteresource.id = resource.id
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ linkset() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].id = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ linkset() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].id = resource[i].id
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the linkset resources that are configured on netscaler.
"""
try :
if not name :
obj = linkset()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = linkset()
obj.id = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [linkset() for _ in range(len(name))]
obj = [linkset() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = linkset()
obj[i].id = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
""" Use this API to fetch filtered set of linkset resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = linkset()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
""" Use this API to count the linkset resources configured on NetScaler.
"""
try :
obj = linkset()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
""" Use this API to count filtered the set of linkset resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = linkset()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class linkset_response(base_response) :
def __init__(self, length=1) :
self.linkset = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.linkset = [linkset() for _ in range(length)]
| 28.724771 | 109 | 0.6771 |
edc4d46516eece9e14a8692a5eb5e71e76cd3e6d | 204 | py | Python | commands/deviot_remove_library.py | Rod-O/Deviot | 45f59052a6f6f5d462d8dcddbf5836ef3b9f3eec | [
"Apache-2.0"
] | 327 | 2015-12-12T05:09:09.000Z | 2022-03-31T21:36:59.000Z | commands/deviot_remove_library.py | Rod-O/Deviot | 45f59052a6f6f5d462d8dcddbf5836ef3b9f3eec | [
"Apache-2.0"
] | 326 | 2016-01-03T14:03:00.000Z | 2022-03-02T09:35:47.000Z | commands/deviot_remove_library.py | Rod-O/Deviot | 45f59052a6f6f5d462d8dcddbf5836ef3b9f3eec | [
"Apache-2.0"
] | 87 | 2015-12-12T17:31:02.000Z | 2022-03-31T21:37:00.000Z | from sublime_plugin import WindowCommand
from ..libraries.libraries import Libraries
class DeviotRemoveLibraryCommand(WindowCommand):
def run(self):
Libraries().get_installed_list('remove')
| 25.5 | 48 | 0.789216 |
c34b3a9f076ec5bb1db008dd4fabfc85f1672820 | 26,364 | py | Python | tests/func/test_checkout.py | sahilbhosale63/dvc | 999c9e188801f971b75f51ca84f5bad533cb462c | [
"Apache-2.0"
] | null | null | null | tests/func/test_checkout.py | sahilbhosale63/dvc | 999c9e188801f971b75f51ca84f5bad533cb462c | [
"Apache-2.0"
] | null | null | null | tests/func/test_checkout.py | sahilbhosale63/dvc | 999c9e188801f971b75f51ca84f5bad533cb462c | [
"Apache-2.0"
] | null | null | null | import collections
import filecmp
import logging
import os
import shutil
import pytest
from mock import patch
from dvc.dvcfile import DVC_FILE_SUFFIX, PIPELINE_FILE, Dvcfile
from dvc.exceptions import (
CheckoutError,
CheckoutErrorSuggestGit,
ConfirmRemoveError,
DvcException,
NoOutputOrStageError,
)
from dvc.main import main
from dvc.remote.base import CloudCache, Remote
from dvc.remote.local import LocalRemoteTree
from dvc.remote.s3 import S3RemoteTree
from dvc.repo import Repo as DvcRepo
from dvc.stage import Stage
from dvc.stage.exceptions import StageFileDoesNotExistError
from dvc.system import System
from dvc.utils import relpath
from dvc.utils.fs import walk_files
from dvc.utils.yaml import dump_yaml, load_yaml
from tests.basic_env import TestDvc, TestDvcGit
from tests.func.test_repro import TestRepro
from tests.remotes import S3
logger = logging.getLogger("dvc")
class TestCheckout(TestRepro):
def setUp(self):
super().setUp()
stages = self.dvc.add(self.DATA_DIR)
self.assertEqual(len(stages), 1)
self.data_dir_stage = stages[0]
self.assertTrue(self.data_dir_stage is not None)
self.orig = "orig"
shutil.copy(self.FOO, self.orig)
os.unlink(self.FOO)
self.orig_dir = "orig_dir"
shutil.copytree(self.DATA_DIR, self.orig_dir)
shutil.rmtree(self.DATA_DIR)
def test(self):
self.dvc.checkout(force=True)
self._test_checkout()
def _test_checkout(self):
self.assertTrue(os.path.isfile(self.FOO))
self.assertTrue(filecmp.cmp(self.FOO, self.orig, shallow=False))
class TestCheckoutSingleStage(TestCheckout):
def test(self):
ret = main(["checkout", "--force", self.foo_stage.path])
self.assertEqual(ret, 0)
ret = main(["checkout", "--force", self.data_dir_stage.path])
self.assertEqual(ret, 0)
self._test_checkout()
class TestCheckoutCorruptedCacheFile(TestRepro):
def test(self):
cache = self.foo_stage.outs[0].cache_path
os.chmod(cache, 0o644)
with open(cache, "a") as fd:
fd.write("1")
with pytest.raises(CheckoutError):
self.dvc.checkout(force=True)
self.assertFalse(os.path.isfile(self.FOO))
self.assertFalse(os.path.isfile(cache))
class TestCheckoutCorruptedCacheDir(TestDvc):
def test(self):
# NOTE: using 'copy' so that cache and link don't have same inode
ret = main(["config", "cache.type", "copy"])
self.assertEqual(ret, 0)
self.dvc = DvcRepo(".")
stages = self.dvc.add(self.DATA_DIR)
self.assertEqual(len(stages), 1)
self.assertEqual(len(stages[0].outs), 1)
out = stages[0].outs[0]
# NOTE: modifying cache file for one of the files inside the directory
# to check if dvc will detect that the cache is corrupted.
entry = self.dvc.cache.local.load_dir_cache(out.checksum)[0]
entry_hash = entry[self.dvc.cache.local.tree.PARAM_CHECKSUM]
cache = os.fspath(self.dvc.cache.local.hash_to_path_info(entry_hash))
os.chmod(cache, 0o644)
with open(cache, "w+") as fobj:
fobj.write("1")
with pytest.raises(CheckoutError):
self.dvc.checkout(force=True)
self.assertFalse(os.path.exists(cache))
class TestCmdCheckout(TestCheckout):
def test(self):
ret = main(["checkout", "--force"])
self.assertEqual(ret, 0)
self._test_checkout()
class CheckoutBase(TestDvcGit):
GIT_IGNORE = ".gitignore"
def commit_data_file(self, fname, content="random text"):
with open(fname, "w") as fd:
fd.write(content)
stages = self.dvc.add(fname)
self.assertEqual(len(stages), 1)
self.assertTrue(stages[0] is not None)
self.dvc.scm.add([fname + ".dvc", ".gitignore"])
self.dvc.scm.commit("adding " + fname)
def read_ignored(self):
with open(self.GIT_IGNORE) as f:
return [s.strip("\n") for s in f.readlines()]
def outs_info(self, stage):
FileInfo = collections.namedtuple("FileInfo", "path inode")
paths = [
path
for output in stage["outs"]
for path in self.dvc.tree.walk_files(output["path"])
]
return [
FileInfo(path=path, inode=System.inode(path)) for path in paths
]
class TestRemoveFilesWhenCheckout(CheckoutBase):
def test(self):
fname = "file_in_a_branch"
branch_master = "master"
branch_1 = "b1"
self.dvc.scm.add(self.dvc.scm.untracked_files())
self.dvc.scm.commit("add all files")
# add the file into a separate branch
self.dvc.scm.checkout(branch_1, True)
ret = main(["checkout", "--force"])
self.assertEqual(ret, 0)
self.commit_data_file(fname)
# Checkout back in master
self.dvc.scm.checkout(branch_master)
self.assertTrue(os.path.exists(fname))
# Make sure `dvc checkout` removes the file
# self.dvc.checkout()
ret = main(["checkout", "--force"])
self.assertEqual(ret, 0)
self.assertFalse(os.path.exists(fname))
class TestCheckoutCleanWorkingDir(CheckoutBase):
@patch("dvc.prompt.confirm", return_value=True)
def test(self, mock_prompt):
mock_prompt.return_value = True
stages = self.dvc.add(self.DATA_DIR)
stage = stages[0]
working_dir_change = os.path.join(self.DATA_DIR, "not_cached.txt")
with open(working_dir_change, "w") as f:
f.write("not_cached")
ret = main(["checkout", stage.relpath])
self.assertEqual(ret, 0)
self.assertFalse(os.path.exists(working_dir_change))
@patch("dvc.prompt.confirm", return_value=False)
def test_force(self, mock_prompt):
mock_prompt.return_value = False
stages = self.dvc.add(self.DATA_DIR)
self.assertEqual(len(stages), 1)
stage = stages[0]
working_dir_change = os.path.join(self.DATA_DIR, "not_cached.txt")
with open(working_dir_change, "w") as f:
f.write("not_cached")
ret = main(["checkout", stage.relpath])
self.assertNotEqual(ret, 0)
mock_prompt.assert_called()
self.assertNotEqual(ret, 0)
self.assertRaises(DvcException)
class TestCheckoutSelectiveRemove(CheckoutBase):
def test(self):
# Use copy to test for changes in the inodes
ret = main(["config", "cache.type", "copy"])
self.assertEqual(ret, 0)
ret = main(["add", self.DATA_DIR])
self.assertEqual(0, ret)
stage_path = self.DATA_DIR + DVC_FILE_SUFFIX
stage = load_yaml(stage_path)
staged_files = self.outs_info(stage)
# move instead of remove, to lock inode assigned to stage_files[0].path
# if we were to use remove, we might end up with same inode assigned to
# newly checked out file
shutil.move(staged_files[0].path, "random_name")
ret = main(["checkout", "--force", stage_path])
self.assertEqual(ret, 0)
checkedout_files = self.outs_info(stage)
self.assertEqual(len(staged_files), len(checkedout_files))
self.assertEqual(staged_files[0].path, checkedout_files[0].path)
self.assertNotEqual(staged_files[0].inode, checkedout_files[0].inode)
self.assertEqual(staged_files[1].inode, checkedout_files[1].inode)
class TestGitIgnoreBasic(CheckoutBase):
def test(self):
fname1 = "file_1"
fname2 = "file_2"
fname3 = "file_3"
self.dvc.scm.add(self.dvc.scm.untracked_files())
self.dvc.scm.commit("add all files")
self.assertFalse(os.path.exists(self.GIT_IGNORE))
self.commit_data_file(fname1)
self.commit_data_file(fname2)
self.dvc.run(
single_stage=True,
cmd=f"python {self.CODE} {self.FOO} {fname3}",
deps=[self.CODE, self.FOO],
outs_no_cache=[fname3],
)
self.assertTrue(os.path.exists(self.GIT_IGNORE))
ignored = self.read_ignored()
self.assertEqual(len(ignored), 2)
self.assertIn("/" + fname1, ignored)
self.assertIn("/" + fname2, ignored)
class TestGitIgnoreWhenCheckout(CheckoutBase):
def test(self):
fname_master = "file_in_a_master"
branch_master = "master"
fname_branch = "file_in_a_branch"
branch_1 = "b1"
self.dvc.scm.add(self.dvc.scm.untracked_files())
self.dvc.scm.commit("add all files")
self.commit_data_file(fname_master)
self.dvc.scm.checkout(branch_1, True)
ret = main(["checkout", "--force"])
self.assertEqual(ret, 0)
self.commit_data_file(fname_branch)
self.dvc.scm.checkout(branch_master)
ret = main(["checkout", "--force"])
self.assertEqual(ret, 0)
ignored = self.read_ignored()
self.assertEqual(len(ignored), 1)
self.assertIn("/" + fname_master, ignored)
self.dvc.scm.checkout(branch_1)
ret = main(["checkout", "--force"])
self.assertEqual(ret, 0)
ignored = self.read_ignored()
self.assertIn("/" + fname_branch, ignored)
class TestCheckoutMissingMd5InStageFile(TestRepro):
def test(self):
d = load_yaml(self.file1_stage)
del d[Stage.PARAM_OUTS][0][LocalRemoteTree.PARAM_CHECKSUM]
del d[Stage.PARAM_DEPS][0][LocalRemoteTree.PARAM_CHECKSUM]
dump_yaml(self.file1_stage, d)
with pytest.raises(CheckoutError):
self.dvc.checkout(force=True)
class TestCheckoutEmptyDir(TestDvc):
def test(self):
dname = "empty_dir"
os.mkdir(dname)
stages = self.dvc.add(dname)
self.assertEqual(len(stages), 1)
stage = stages[0]
self.assertTrue(stage is not None)
self.assertEqual(len(stage.outs), 1)
stage.outs[0].remove()
self.assertFalse(os.path.exists(dname))
stats = self.dvc.checkout(force=True)
assert stats["added"] == [dname + os.sep]
self.assertTrue(os.path.isdir(dname))
self.assertEqual(len(os.listdir(dname)), 0)
class TestCheckoutNotCachedFile(TestDvc):
def test(self):
cmd = "python {} {} {}".format(self.CODE, self.FOO, "out")
self.dvc.add(self.FOO)
stage = self.dvc.run(
cmd=cmd,
deps=[self.FOO, self.CODE],
outs_no_cache=["out"],
single_stage=True,
)
self.assertTrue(stage is not None)
stats = self.dvc.checkout(force=True)
assert not any(stats.values())
class TestCheckoutWithDeps(TestRepro):
def test(self):
os.unlink(self.FOO)
os.unlink(self.file1)
self.assertFalse(os.path.exists(self.FOO))
self.assertFalse(os.path.exists(self.file1))
ret = main(["checkout", "--force", self.file1_stage, "--with-deps"])
self.assertEqual(ret, 0)
self.assertTrue(os.path.exists(self.FOO))
self.assertTrue(os.path.exists(self.file1))
class TestCheckoutDirectory(TestRepro):
def test(self):
stage = self.dvc.add(self.DATA_DIR)[0]
shutil.rmtree(self.DATA_DIR)
self.assertFalse(os.path.exists(self.DATA_DIR))
ret = main(["checkout", stage.path])
self.assertEqual(ret, 0)
self.assertTrue(os.path.exists(self.DATA_DIR))
class TestCheckoutHook(TestDvc):
@patch("sys.stdout.isatty", return_value=True)
@patch("dvc.prompt.input", side_effect=EOFError)
def test(self, mock_input, mock_isatty):
""" Test that dvc checkout handles EOFError gracefully, which is what
it will experience when running in a git hook.
"""
stages = self.dvc.add(self.DATA_DIR)
self.assertEqual(len(stages), 1)
stage = stages[0]
self.assertNotEqual(stage, None)
self.create(os.path.join(self.DATA_DIR, "test"), "test")
with self.assertRaises(ConfirmRemoveError):
self.dvc.checkout()
class TestCheckoutSuggestGit(TestRepro):
def test(self):
try:
self.dvc.checkout(targets="gitbranch")
except DvcException as exc:
self.assertIsInstance(exc, CheckoutErrorSuggestGit)
self.assertIsInstance(exc.__cause__, NoOutputOrStageError)
self.assertIsInstance(
exc.__cause__.__cause__, StageFileDoesNotExistError
)
try:
self.dvc.checkout(targets=self.FOO)
except DvcException as exc:
self.assertIsInstance(exc, CheckoutErrorSuggestGit)
self.assertIsInstance(exc.__cause__, NoOutputOrStageError)
self.assertIsNone(exc.__cause__.__cause__)
try:
self.dvc.checkout(targets="looks-like-dvcfile.dvc")
except DvcException as exc:
self.assertIsInstance(exc, CheckoutErrorSuggestGit)
self.assertIsInstance(exc.__cause__, StageFileDoesNotExistError)
self.assertIsNone(exc.__cause__.__cause__)
class TestCheckoutTargetRecursiveShouldNotRemoveOtherUsedFiles(TestDvc):
def test(self):
ret = main(["add", self.DATA_DIR, self.FOO, self.BAR])
self.assertEqual(0, ret)
ret = main(["checkout", "-R", self.DATA_DIR])
self.assertEqual(0, ret)
self.assertTrue(os.path.exists(self.FOO))
self.assertTrue(os.path.exists(self.BAR))
class TestCheckoutRecursiveNotDirectory(TestDvc):
def test(self):
ret = main(["add", self.FOO])
self.assertEqual(0, ret)
stats = self.dvc.checkout(targets=[self.FOO + ".dvc"], recursive=True)
assert stats == {"added": [], "modified": [], "deleted": []}
class TestCheckoutMovedCacheDirWithSymlinks(TestDvc):
def test(self):
ret = main(["config", "cache.type", "symlink"])
self.assertEqual(ret, 0)
ret = main(["add", self.FOO])
self.assertEqual(ret, 0)
ret = main(["add", self.DATA_DIR])
self.assertEqual(ret, 0)
self.assertTrue(System.is_symlink(self.FOO))
old_foo_link = os.path.realpath(self.FOO)
self.assertTrue(System.is_symlink(self.DATA))
old_data_link = os.path.realpath(self.DATA)
old_cache_dir = self.dvc.cache.local.cache_dir
new_cache_dir = old_cache_dir + "_new"
os.rename(old_cache_dir, new_cache_dir)
ret = main(["cache", "dir", new_cache_dir])
self.assertEqual(ret, 0)
ret = main(["checkout", "-f"])
self.assertEqual(ret, 0)
self.assertTrue(System.is_symlink(self.FOO))
new_foo_link = os.path.realpath(self.FOO)
self.assertTrue(System.is_symlink(self.DATA))
new_data_link = os.path.realpath(self.DATA)
self.assertEqual(
relpath(old_foo_link, old_cache_dir),
relpath(new_foo_link, new_cache_dir),
)
self.assertEqual(
relpath(old_data_link, old_cache_dir),
relpath(new_data_link, new_cache_dir),
)
def test_checkout_no_checksum(tmp_dir, dvc):
tmp_dir.gen("file", "file content")
stage = dvc.run(
outs=["file"], no_exec=True, cmd="somecmd", single_stage=True
)
with pytest.raises(CheckoutError):
dvc.checkout([stage.path], force=True)
assert not os.path.exists("file")
@pytest.mark.parametrize(
"link, link_test_func",
[("hardlink", System.is_hardlink), ("symlink", System.is_symlink)],
)
def test_checkout_relink(tmp_dir, dvc, link, link_test_func):
dvc.cache.local.cache_types = [link]
tmp_dir.dvc_gen({"dir": {"data": "text"}})
dvc.unprotect("dir/data")
assert not link_test_func("dir/data")
stats = dvc.checkout(["dir.dvc"], relink=True)
assert stats == empty_checkout
assert link_test_func("dir/data")
@pytest.mark.parametrize("link", ["hardlink", "symlink", "copy"])
def test_checkout_relink_protected(tmp_dir, dvc, link):
dvc.cache.local.cache_types = [link]
tmp_dir.dvc_gen("foo", "foo")
dvc.unprotect("foo")
assert os.access("foo", os.W_OK)
stats = dvc.checkout(["foo.dvc"], relink=True)
assert stats == empty_checkout
# NOTE: Windows symlink perms don't propagate to the target
if link == "copy" or (link == "symlink" and os.name == "nt"):
assert os.access("foo", os.W_OK)
else:
assert not os.access("foo", os.W_OK)
@pytest.mark.parametrize(
"target",
[os.path.join("dir", "subdir"), os.path.join("dir", "subdir", "file")],
)
def test_partial_checkout(tmp_dir, dvc, target):
tmp_dir.dvc_gen({"dir": {"subdir": {"file": "file"}, "other": "other"}})
shutil.rmtree("dir")
stats = dvc.checkout([target])
assert stats["added"] == ["dir" + os.sep]
assert list(walk_files("dir")) == [os.path.join("dir", "subdir", "file")]
empty_checkout = {"added": [], "deleted": [], "modified": []}
def test_stats_on_empty_checkout(tmp_dir, dvc, scm):
assert dvc.checkout() == empty_checkout
tmp_dir.dvc_gen(
{"dir": {"subdir": {"file": "file"}, "other": "other"}},
commit="initial",
)
assert dvc.checkout() == empty_checkout
def test_stats_on_checkout(tmp_dir, dvc, scm):
tmp_dir.dvc_gen(
{
"dir": {"subdir": {"file": "file"}, "other": "other"},
"foo": "foo",
"bar": "bar",
},
commit="initial",
)
scm.checkout("HEAD~")
stats = dvc.checkout()
assert set(stats["deleted"]) == {"dir" + os.sep, "foo", "bar"}
scm.checkout("-")
stats = dvc.checkout()
assert set(stats["added"]) == {"bar", "dir" + os.sep, "foo"}
tmp_dir.gen({"lorem": "lorem", "bar": "new bar", "dir2": {"file": "file"}})
(tmp_dir / "foo").unlink()
scm.repo.git.rm("foo.dvc")
tmp_dir.dvc_add(["bar", "lorem", "dir2"], commit="second")
scm.checkout("HEAD~")
stats = dvc.checkout()
assert set(stats["modified"]) == {"bar"}
assert set(stats["added"]) == {"foo"}
assert set(stats["deleted"]) == {"lorem", "dir2" + os.sep}
scm.checkout("-")
stats = dvc.checkout()
assert set(stats["modified"]) == {"bar"}
assert set(stats["added"]) == {"dir2" + os.sep, "lorem"}
assert set(stats["deleted"]) == {"foo"}
def test_checkout_stats_on_failure(tmp_dir, dvc, scm):
tmp_dir.dvc_gen(
{"foo": "foo", "dir": {"subdir": {"file": "file"}}, "other": "other"},
commit="initial",
)
stage = Dvcfile(dvc, "foo.dvc").stage
tmp_dir.dvc_gen({"foo": "foobar", "other": "other other"}, commit="second")
# corrupt cache
cache = stage.outs[0].cache_path
os.chmod(cache, 0o644)
with open(cache, "a") as fd:
fd.write("destroy cache")
scm.checkout("HEAD~")
with pytest.raises(CheckoutError) as exc:
dvc.checkout(force=True)
assert exc.value.stats == {
**empty_checkout,
"failed": ["foo"],
"modified": ["other"],
}
def test_stats_on_added_file_from_tracked_dir(tmp_dir, dvc, scm):
tmp_dir.dvc_gen(
{"dir": {"subdir": {"file": "file"}, "other": "other"}},
commit="initial",
)
tmp_dir.gen("dir/subdir/newfile", "newfile")
tmp_dir.dvc_add("dir", commit="add newfile")
scm.checkout("HEAD~")
assert dvc.checkout() == {**empty_checkout, "modified": ["dir" + os.sep]}
assert dvc.checkout() == empty_checkout
scm.checkout("-")
assert dvc.checkout() == {**empty_checkout, "modified": ["dir" + os.sep]}
assert dvc.checkout() == empty_checkout
def test_stats_on_updated_file_from_tracked_dir(tmp_dir, dvc, scm):
tmp_dir.dvc_gen(
{"dir": {"subdir": {"file": "file"}, "other": "other"}},
commit="initial",
)
tmp_dir.gen("dir/subdir/file", "what file?")
tmp_dir.dvc_add("dir", commit="update file")
scm.checkout("HEAD~")
assert dvc.checkout() == {**empty_checkout, "modified": ["dir" + os.sep]}
assert dvc.checkout() == empty_checkout
scm.checkout("-")
assert dvc.checkout() == {**empty_checkout, "modified": ["dir" + os.sep]}
assert dvc.checkout() == empty_checkout
def test_stats_on_removed_file_from_tracked_dir(tmp_dir, dvc, scm):
tmp_dir.dvc_gen(
{"dir": {"subdir": {"file": "file"}, "other": "other"}},
commit="initial",
)
(tmp_dir / "dir" / "subdir" / "file").unlink()
tmp_dir.dvc_add("dir", commit="removed file from subdir")
scm.checkout("HEAD~")
assert dvc.checkout() == {**empty_checkout, "modified": ["dir" + os.sep]}
assert dvc.checkout() == empty_checkout
scm.checkout("-")
assert dvc.checkout() == {**empty_checkout, "modified": ["dir" + os.sep]}
assert dvc.checkout() == empty_checkout
def test_stats_on_show_changes_does_not_show_summary(
tmp_dir, dvc, scm, caplog
):
tmp_dir.dvc_gen(
{"dir": {"subdir": {"file": "file"}}, "other": "other"},
commit="initial",
)
scm.checkout("HEAD~")
with caplog.at_level(logging.INFO, logger="dvc"):
caplog.clear()
assert main(["checkout"]) == 0
for out in ["D\tdir" + os.sep, "D\tother"]:
assert out in caplog.text
assert "modified" not in caplog.text
assert "deleted" not in caplog.text
assert "added" not in caplog.text
def test_stats_does_not_show_changes_by_default(tmp_dir, dvc, scm, caplog):
tmp_dir.dvc_gen(
{"dir": {"subdir": {"file": "file"}}, "other": "other"},
commit="initial",
)
scm.checkout("HEAD~")
with caplog.at_level(logging.INFO, logger="dvc"):
caplog.clear()
assert main(["checkout", "--summary"]) == 0
assert "2 files deleted" in caplog.text
assert "dir" not in caplog.text
assert "other" not in caplog.text
@pytest.mark.parametrize("link", ["hardlink", "symlink", "copy"])
def test_checkout_with_relink_existing(tmp_dir, dvc, link):
tmp_dir.dvc_gen("foo", "foo")
(tmp_dir / "foo").unlink()
tmp_dir.dvc_gen("bar", "bar")
dvc.cache.local.cache_types = [link]
stats = dvc.checkout(relink=True)
assert stats == {**empty_checkout, "added": ["foo"]}
def test_checkout_with_deps(tmp_dir, dvc):
tmp_dir.dvc_gen({"foo": "foo"})
dvc.run(
fname="copy_file.dvc",
cmd="echo foo > bar",
outs=["bar"],
deps=["foo"],
single_stage=True,
)
(tmp_dir / "bar").unlink()
(tmp_dir / "foo").unlink()
stats = dvc.checkout(["copy_file.dvc"], with_deps=False)
assert stats == {**empty_checkout, "added": ["bar"]}
(tmp_dir / "bar").unlink()
stats = dvc.checkout(["copy_file.dvc"], with_deps=True)
assert set(stats["added"]) == {"foo", "bar"}
def test_checkout_recursive(tmp_dir, dvc):
tmp_dir.gen({"dir": {"foo": "foo", "bar": "bar"}})
dvc.add("dir", recursive=True)
(tmp_dir / "dir" / "foo").unlink()
(tmp_dir / "dir" / "bar").unlink()
stats = dvc.checkout(["dir"], recursive=True)
assert set(stats["added"]) == {
os.path.join("dir", "foo"),
os.path.join("dir", "bar"),
}
@pytest.mark.skipif(
not S3.should_test(), reason="Only run with S3 credentials"
)
def test_checkout_for_external_outputs(tmp_dir, dvc):
dvc.cache.s3 = CloudCache(S3RemoteTree(dvc, {"url": S3.get_url()}))
remote = Remote(S3RemoteTree(dvc, {"url": S3.get_url()}))
file_path = remote.path_info / "foo"
remote.tree.s3.put_object(
Bucket=remote.path_info.bucket, Key=file_path.path, Body="foo"
)
dvc.add(str(remote.path_info / "foo"), external=True)
remote.tree.remove(file_path)
stats = dvc.checkout(force=True)
assert stats == {**empty_checkout, "added": [str(file_path)]}
assert remote.tree.exists(file_path)
remote.tree.s3.put_object(
Bucket=remote.path_info.bucket, Key=file_path.path, Body="foo\nfoo"
)
stats = dvc.checkout(force=True)
assert stats == {**empty_checkout, "modified": [str(file_path)]}
def test_checkouts_with_different_addressing(tmp_dir, dvc, run_copy):
tmp_dir.gen({"foo": "foo", "lorem": "lorem"})
run_copy("foo", "bar", name="copy-foo-bar")
run_copy("lorem", "ipsum", name="copy-lorem-ipsum")
(tmp_dir / "bar").unlink()
(tmp_dir / "ipsum").unlink()
assert set(dvc.checkout(PIPELINE_FILE)["added"]) == {"bar", "ipsum"}
(tmp_dir / "bar").unlink()
(tmp_dir / "ipsum").unlink()
assert set(dvc.checkout(":")["added"]) == {"bar", "ipsum"}
(tmp_dir / "bar").unlink()
assert dvc.checkout("copy-foo-bar")["added"] == ["bar"]
(tmp_dir / "bar").unlink()
assert dvc.checkout("dvc.yaml:copy-foo-bar")["added"] == ["bar"]
(tmp_dir / "bar").unlink()
assert dvc.checkout(":copy-foo-bar")["added"] == ["bar"]
(tmp_dir / "bar").unlink()
(tmp_dir / "data").mkdir()
with (tmp_dir / "data").chdir():
assert dvc.checkout(relpath(tmp_dir / "dvc.yaml") + ":copy-foo-bar")[
"added"
] == [relpath(tmp_dir / "bar")]
(tmp_dir / "bar").unlink()
assert dvc.checkout("bar")["added"] == ["bar"]
def test_checkouts_on_same_stage_name_and_output_name(tmp_dir, dvc, run_copy):
tmp_dir.gen("foo", "foo")
run_copy("foo", "bar", name="copy-foo-bar")
run_copy("foo", "copy-foo-bar", name="make_collision")
(tmp_dir / "bar").unlink()
(tmp_dir / "copy-foo-bar").unlink()
assert dvc.checkout("copy-foo-bar")["added"] == ["bar"]
assert dvc.checkout("./copy-foo-bar")["added"] == ["copy-foo-bar"]
def test_checkouts_for_pipeline_tracked_outs(tmp_dir, dvc, scm, run_copy):
tmp_dir.gen("foo", "foo")
stage1 = run_copy("foo", "bar", name="copy-foo-bar")
tmp_dir.gen("lorem", "lorem")
stage2 = run_copy("lorem", "ipsum", name="copy-lorem-ipsum")
for out in ["bar", "ipsum"]:
(tmp_dir / out).unlink()
assert dvc.checkout(["bar"])["added"] == ["bar"]
(tmp_dir / "bar").unlink()
assert set(dvc.checkout([PIPELINE_FILE])["added"]) == {"bar", "ipsum"}
for out in ["bar", "ipsum"]:
(tmp_dir / out).unlink()
assert set(dvc.checkout([stage1.addressing])["added"]) == {"bar"}
(tmp_dir / "bar").unlink()
assert set(dvc.checkout([stage2.addressing])["added"]) == {"ipsum"}
(tmp_dir / "ipsum").unlink()
assert set(dvc.checkout()["added"]) == {"bar", "ipsum"}
| 31.126328 | 79 | 0.620885 |
c4e2838ed3f41668f9aec4223d764bb3f93f241a | 1,417 | py | Python | src/camps/utils.py | pwelzel/bornhack-website | af794e6a2fba06e09626259c7768feb30ff394be | [
"BSD-3-Clause"
] | null | null | null | src/camps/utils.py | pwelzel/bornhack-website | af794e6a2fba06e09626259c7768feb30ff394be | [
"BSD-3-Clause"
] | null | null | null | src/camps/utils.py | pwelzel/bornhack-website | af794e6a2fba06e09626259c7768feb30ff394be | [
"BSD-3-Clause"
] | null | null | null | from camps.models import Camp
from django.utils import timezone
from django.contrib import admin
def get_current_camp():
try:
return Camp.objects.get(camp__contains=timezone.now())
except Camp.DoesNotExist:
return False
class CampPropertyListFilter(admin.SimpleListFilter):
"""
SimpleListFilter to filter models by camp when camp is
a property and not a real model field.
"""
title = 'Camp'
parameter_name = 'camp'
def lookups(self, request, model_admin):
# get the current queryset
qs = model_admin.get_queryset(request)
# get a list of the unique camps in the current queryset
unique_camps = set([item.camp for item in qs])
# loop over camps and yield each as a tuple
for camp in unique_camps:
yield (camp.slug, camp.title)
def queryset(self, request, queryset):
# if self.value() is None return everything
if not self.value():
return queryset
# ok, get the Camp
try:
camp = Camp.objects.get(slug=self.value())
except Camp.DoesNotExist:
# camp not found, return nothing
return queryset.model.objects.none()
# filter out items related to other camps
for item in queryset:
if item.camp != camp:
queryset = queryset.exclude(pk=item.pk)
return queryset
| 28.918367 | 64 | 0.631616 |
4fc1a3524deda8840f81b9ba9ba2c5642e25895b | 15,371 | py | Python | sagemaker/GENIE3-sagemaker.py | cs205-genie3-parallel/genie3-parallel | 6a1348036859e35f1d9bf68083a993ebe601717b | [
"MIT"
] | null | null | null | sagemaker/GENIE3-sagemaker.py | cs205-genie3-parallel/genie3-parallel | 6a1348036859e35f1d9bf68083a993ebe601717b | [
"MIT"
] | null | null | null | sagemaker/GENIE3-sagemaker.py | cs205-genie3-parallel/genie3-parallel | 6a1348036859e35f1d9bf68083a993ebe601717b | [
"MIT"
] | 1 | 2021-04-29T14:59:33.000Z | 2021-04-29T14:59:33.000Z | import argparse
import os
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor
from numpy import *
import time
from operator import itemgetter
from multiprocessing import Pool
import pandas as pd
import boto3
def compute_feature_importances(estimator):
importances = [e.tree_.compute_feature_importances(normalize=False)
for e in estimator.estimators_]
importances = asarray(importances)
# number of samples meeting these conditions / total number of samples
return sum(importances,axis=0) / len(estimator)
def get_link_list(VIM,gene_names=None,regulators='all',maxcount='all',file_name=None):
"""Gets the ranked list of (directed) regulatory links.
Parameters
----------
VIM: numpy array
Array as returned by the function GENIE3(), in which the element (i,j) is the score of the edge directed from the i-th gene to the j-th gene.
gene_names: list of strings, optional
List of length p, where p is the number of rows/columns in VIM, containing the names of the genes. The i-th item of gene_names must correspond to the i-th row/column of VIM. When the gene names are not provided, the i-th gene is named Gi.
default: None
regulators: list of strings, optional
List containing the names of the candidate regulators. When a list of regulators is provided, the names of all the genes must be provided (in gene_names), and the returned list contains only edges directed from the candidate regulators. When regulators is set to 'all', any gene can be a candidate regulator.
default: 'all'
maxcount: 'all' or positive integer, optional
Writes only the first maxcount regulatory links of the ranked list. When maxcount is set to 'all', all the regulatory links are written.
default: 'all'
file_name: string, optional
Writes the ranked list of regulatory links to the file file_name.
default: None
Returns
-------
The list of regulatory links, ordered according to the edge score. Auto-regulations do not appear in the list. Regulatory links with a score equal to zero are randomly permuted. In the ranked list of edges, each line has format:
regulator target gene score of edge
"""
# Check input arguments
# if not isinstance(VIM,ndarray):
# raise ValueError('VIM must be a square array')
# elif VIM.shape[0] != VIM.shape[1]:
# raise ValueError('VIM must be a square array')
ngenes = VIM.shape[0]
# if gene_names is None:
# gene_names = []
# else:
# if not isinstance(gene_names,(list,tuple,ndarray)):
# raise ValueError('input argument gene_names must be a list of gene names')
# elif len(gene_names) != ngenes:
# raise ValueError('input argument gene_names must be a list of length p, where p is the number of columns/genes in the expression data')
if regulators != 'all':
if not isinstance(regulators,(list,tuple)):
raise ValueError('input argument regulators must be a list of gene names')
if gene_names is None:
raise ValueError('the gene names must be specified (in input argument gene_names)')
else:
sIntersection = set(gene_names).intersection(set(regulators))
if not sIntersection:
raise ValueError('The genes must contain at least one candidate regulator')
if maxcount != 'all' and not isinstance(maxcount,int):
raise ValueError('input argument maxcount must be "all" or a positive integer')
if file_name and not isinstance(file_name,str):
raise ValueError('input argument file_name must be a string')
# Get the indices of the candidate regulators
if regulators == 'all':
input_idx = range(ngenes)
else:
input_idx = [i for i, gene in enumerate(gene_names) if gene in regulators]
# Get the non-ranked list of regulatory links
vInter = [(i,j,score) for (i,j),score in ndenumerate(VIM) if i in input_idx and i!=j]
# Rank the list according to the weights of the edges
vInter_sort = sorted(vInter,key=itemgetter(2),reverse=True)
nInter = len(vInter_sort)
# Random permutation of edges with score equal to 0
flag = 1
i = 0
while flag and i < nInter:
# print(f'nInter = {i}')
(TF_idx,target_idx,score) = vInter_sort[i]
if score == 0:
flag = 0
else:
i += 1
if not flag:
items_perm = vInter_sort[i:]
items_perm = random.permutation(items_perm)
vInter_sort[i:] = items_perm
# Write the ranked list of edges
nToWrite = nInter
if isinstance(maxcount,int) and maxcount >= 0 and maxcount < nInter:
nToWrite = maxcount
if file_name:
outfile = open(file_name,'w')
for i in range(nToWrite):
(TF_idx,target_idx,score) = vInter_sort[i]
TF_idx = int(TF_idx)
target_idx = int(target_idx)
outfile.write('%s\t%s\t%.6f\n' % (gene_names[TF_idx],gene_names[target_idx],score))
if gene_names is None:
for i in range(nToWrite):
(TF_idx,target_idx,score) = vInter_sort[i]
TF_idx = int(TF_idx)
target_idx = int(target_idx)
outfile.write('G%d\tG%d\t%.6f\n' % (TF_idx+1,target_idx+1,score))
outfile.close()
else:
if gene_names is None:
for i in range(nToWrite):
(TF_idx,target_idx,score) = vInter_sort[i]
TF_idx = int(TF_idx)
target_idx = int(target_idx)
print('G%d\tG%d\t%.6f' % (TF_idx+1,target_idx+1,score))
else:
for i in range(nToWrite):
(TF_idx,target_idx,score) = vInter_sort[i]
TF_idx = int(TF_idx)
target_idx = int(target_idx)
print('%s\t%s\t%.6f' % (gene_names[TF_idx],gene_names[target_idx],score))
def GENIE3(expr_data,gene_names=None,start_idx=None,stop_idx=None,regulators='all',tree_method='RF',K='sqrt',ntrees=1000,nthreads=1,n_jobs=1):
'''Computation of tree-based scores for all putative regulatory links.
Parameters
----------
expr_data: numpy array
Array containing gene expression values. Each row corresponds to a condition and each column corresponds to a gene.
gene_names: list of strings, optional
List of length p, where p is the number of columns in expr_data, containing the names of the genes. The i-th item of gene_names must correspond to the i-th column of expr_data.
default: None
regulators: list of strings, optional
List containing the names of the candidate regulators. When a list of regulators is provided, the names of all the genes must be provided (in gene_names). When regulators is set to 'all', any gene can be a candidate regulator.
default: 'all'
tree-method: 'RF' or 'ET', optional
Specifies which tree-based procedure is used: either Random Forest ('RF') or Extra-Trees ('ET')
default: 'RF'
K: 'sqrt', 'all' or a positive integer, optional
Specifies the number of selected attributes at each node of one tree: either the square root of the number of candidate regulators ('sqrt'), the total number of candidate regulators ('all'), or any positive integer.
default: 'sqrt'
ntrees: positive integer, optional
Specifies the number of trees grown in an ensemble.
default: 1000
nthreads: positive integer, optional
Number of threads used for parallel computing
default: 1
Returns
-------
An array in which the element (i,j) is the score of the edge directed from the i-th gene to the j-th gene. All diagonal elements are set to zero (auto-regulations are not considered). When a list of candidate regulators is provided, the scores of all the edges directed from a gene that is not a candidate regulator are set to zero.
G1, G2, G3, G4, G5, ... GN
num cols (genes) = N
num rows (samples) = M
k = sqrt(N)
ntrees = 1000
not even one gene, time complexity one tree = Nk logk
one gene, time complexity of one RF = ntrees * Nk logk
all genes, number of trees in all RF = ntrees * (N^2)k logk
G1 G5 0.0342 1
G1 G2 0.0324 1
G2 G13 0.0274 0
N^2 = 30,000^2 = 900,000,000
Threshold: e.g. 0.3
'''
time_start = time.time()
# Check input arguments
if not isinstance(expr_data,ndarray):
raise ValueError('expr_data must be an array in which each row corresponds to a condition/sample and each column corresponds to a gene')
ngenes = expr_data.shape[1]
if gene_names is None:
gene_names = []
else:
if not isinstance(gene_names,(list,tuple,ndarray)):
raise ValueError('input argument gene_names must be a list of gene names')
elif len(gene_names) != ngenes:
raise ValueError('input argument gene_names must be a list of length p, where p is the number of columns/genes in the expr_data')
if regulators != 'all':
if not isinstance(regulators,(list,tuple)):
raise ValueError('input argument regulators must be a list of gene names')
if gene_names is None:
raise ValueError('the gene names must be specified (in input argument gene_names)')
else:
sIntersection = set(gene_names).intersection(set(regulators))
if not sIntersection:
raise ValueError('the genes must contain at least one candidate regulator')
if tree_method != 'RF' and tree_method != 'ET':
raise ValueError('input argument tree_method must be "RF" (Random Forests) or "ET" (Extra-Trees)')
if K != 'sqrt' and K != 'all' and not isinstance(K,int):
raise ValueError('input argument K must be "sqrt", "all" or a stricly positive integer')
if isinstance(K,int) and K <= 0:
raise ValueError('input argument K must be "sqrt", "all" or a stricly positive integer')
if not isinstance(ntrees,int):
raise ValueError('input argument ntrees must be a stricly positive integer')
elif ntrees <= 0:
raise ValueError('input argument ntrees must be a stricly positive integer')
print('Tree method: ' + str(tree_method))
print('K: ' + str(K))
print('Number of trees: ' + str(ntrees))
print('\n')
# Get the indices of the candidate regulators
if regulators == 'all':
input_idx = list(range(ngenes))
else:
input_idx = [i for i, gene in enumerate(gene_names) if gene in regulators]
# Learn an ensemble of trees for each target gene, and compute scores for candidate regulators
# VIM = zeros((ngenes,ngenes))
VIM = zeros((stop_idx-start_idx,ngenes))
if nthreads > 1:
print('running jobs on %d threads' % nthreads)
# list of list of paramaters, len is ngenes
input_data = list()
for i in range(stop_idx-start_idx):
# Parameters of GENIE3 function, i refers to output_idx
input_data.append( [expr_data,i+start_idx,input_idx,tree_method,K,ntrees,n_jobs] )
# PARALLEL process targeting each gene
pool = Pool(nthreads)
alloutput = pool.map(wr_GENIE3_single, input_data)
# len(alloutput) is ngenes
for (i,vi) in alloutput:
VIM[i,:] = vi
else:
print('running single threaded jobs')
for i in range(stop_idx-start_idx):
print('Gene %d/%d...' % (i+start_idx,stop_idx))
vi = GENIE3_single(expr_data,i+start_idx,input_idx,tree_method,K,ntrees,n_jobs)
VIM[i,:] = vi
VIM = transpose(VIM)
time_end = time.time()
print("Elapsed time: %.2f seconds" % (time_end - time_start))
return VIM
# function for single thread
def wr_GENIE3_single(args):
return([args[1], GENIE3_single(args[0], args[1], args[2], args[3], args[4], args[5], args[6])])
# In parallel, split up the output_idx
def GENIE3_single(expr_data,output_idx,input_idx,tree_method,K,ntrees,n_jobs):
ngenes = expr_data.shape[1]
# Expression of target gene, select column
output = expr_data[:,output_idx]
# Normalize output data
output = output / std(output)
# Remove target gene from candidate regulators
input_idx = input_idx[:]
if output_idx in input_idx:
input_idx.remove(output_idx)
expr_data_input = expr_data[:,input_idx]
# Parameter K of the tree-based method
if (K == 'all') or (isinstance(K,int) and K >= len(input_idx)):
max_features = "auto"
else:
max_features = K
if tree_method == 'RF':
treeEstimator = RandomForestRegressor(n_estimators=ntrees,max_features=max_features,n_jobs=n_jobs)
elif tree_method == 'ET':
treeEstimator = ExtraTreesRegressor(n_estimators=ntrees,max_features=max_features,n_jobs=n_jobs)
# Learn ensemble of trees
treeEstimator.fit(expr_data_input,output)
# Compute importance scores
feature_importances = compute_feature_importances(treeEstimator)
vi = zeros(ngenes)
# for each target, all the other genes
vi[input_idx] = feature_importances
return vi
def preprocess_data(uri):
df = pd.read_csv(uri, sep='\t')
gene_names = df['Gene Name'].values
df = df.drop(['Gene ID'], axis=1)
df = df.set_index('Gene Name', drop=True)
df_T = df.T
df_T = df_T.fillna(0)
data = df_T.values
return data, gene_names
def upload_file_to_s3(s3_path, local_path, folder):
bucket = s3_path.split('/')[2] #bucket is always second as paths are S3://bucket/.././
file_path = '/'.join(s3_path.split('/')[3:])
response = s3.Object(bucket, os.path.join(file_path,folder)).upload_file(local_path)
return response
if __name__ =='__main__':
parser = argparse.ArgumentParser()
# hyperparameters sent by the client are passed as command-line arguments to the script.
parser.add_argument('--start_idx', type=int, default=0)
parser.add_argument('--stop_idx', type=int, default=10)
parser.add_argument('--nthreads', type=int, default=1)
parser.add_argument('--n_jobs', type=int, default=1)
parser.add_argument('--fname', type=str, default='output_ranking.txt')
# Data, model, and output directories
# opt/ml/output
parser.add_argument('--output-data-dir', type=str, default=os.environ.get('SM_OUTPUT_DATA_DIR'))
# parser.add_argument('--model-dir', type=str, default=os.environ.get('SM_MODEL_DIR'))
parser.add_argument('--train', type=str, default=os.environ.get('SM_CHANNEL_TRAIN'))
args, _ = parser.parse_known_args()
bucket_name = 'cs205-final'
s3 = boto3.resource('s3')
output_path = f"s3://{bucket_name}/output/"
data, gene_names = preprocess_data(os.path.join(args.train, "healthy.tsv"))
print(args.start_idx)
print(args.stop_idx)
VIM = GENIE3(data, gene_names=gene_names, start_idx=args.start_idx, stop_idx=args.stop_idx,nthreads=args.nthreads,n_jobs=args.n_jobs)
output_fname = os.path.join('/opt/ml/output/data', args.fname)
get_link_list(VIM, gene_names=gene_names, file_name=output_fname)
response = upload_file_to_s3(output_path, output_fname, args.fname)
print(response)
| 37.490244 | 336 | 0.663457 |
09bb6c945f223835c43a690b4d7339211d2e8936 | 232 | py | Python | app_init/service_trigger/args.py | kdeltared/tcex | 818c0d09256764f871e42d9ca5916f92d941d882 | [
"Apache-2.0"
] | 18 | 2017-01-09T22:17:49.000Z | 2022-01-24T20:46:42.000Z | app_init/service_trigger/args.py | kdeltared/tcex | 818c0d09256764f871e42d9ca5916f92d941d882 | [
"Apache-2.0"
] | 84 | 2017-04-11T13:47:49.000Z | 2022-03-21T20:12:57.000Z | app_init/service_trigger/args.py | kdeltared/tcex | 818c0d09256764f871e42d9ca5916f92d941d882 | [
"Apache-2.0"
] | 43 | 2017-01-05T20:40:26.000Z | 2022-03-31T19:18:02.000Z | """Service Args"""
from argparse import ArgumentParser
class Args:
"""Playbook Args"""
def __init__(self, parser: ArgumentParser):
"""Initialize class properties."""
parser.add_argument('--example_input')
| 21.090909 | 47 | 0.668103 |
b261e078c46924794044de24a0b52e13a4b3afdb | 37,690 | py | Python | synapse/http/matrixfederationclient.py | littlebenlittle/synapse | 0eccf531466d762ede0dd365284a8465bfb18d0f | [
"Apache-2.0"
] | null | null | null | synapse/http/matrixfederationclient.py | littlebenlittle/synapse | 0eccf531466d762ede0dd365284a8465bfb18d0f | [
"Apache-2.0"
] | null | null | null | synapse/http/matrixfederationclient.py | littlebenlittle/synapse | 0eccf531466d762ede0dd365284a8465bfb18d0f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cgi
import logging
import random
import sys
import urllib.parse
from io import BytesIO
from typing import Callable, Dict, List, Optional, Tuple, Union
import attr
import treq
from canonicaljson import encode_canonical_json
from prometheus_client import Counter
from signedjson.sign import sign_json
from twisted.internet import defer
from twisted.internet.error import DNSLookupError
from twisted.internet.interfaces import IReactorTime
from twisted.internet.task import _EPSILON, Cooperator
from twisted.web.http_headers import Headers
from twisted.web.iweb import IBodyProducer, IResponse
import synapse.metrics
import synapse.util.retryutils
from synapse.api.errors import (
Codes,
FederationDeniedError,
HttpResponseException,
RequestSendFailed,
SynapseError,
)
from synapse.http import QuieterFileBodyProducer
from synapse.http.client import (
BlacklistingAgentWrapper,
BlacklistingReactorWrapper,
BodyExceededMaxSize,
encode_query_args,
read_body_with_max_size,
)
from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
from synapse.logging.context import make_deferred_yieldable
from synapse.logging.opentracing import (
inject_active_span_byte_dict,
set_tag,
start_active_span,
tags,
)
from synapse.types import JsonDict
from synapse.util import json_decoder
from synapse.util.async_helpers import timeout_deferred
from synapse.util.metrics import Measure
logger = logging.getLogger(__name__)
outgoing_requests_counter = Counter(
"synapse_http_matrixfederationclient_requests", "", ["method"]
)
incoming_responses_counter = Counter(
"synapse_http_matrixfederationclient_responses", "", ["method", "code"]
)
MAX_LONG_RETRIES = 10
MAX_SHORT_RETRIES = 3
MAXINT = sys.maxsize
_next_id = 1
QueryArgs = Dict[str, Union[str, List[str]]]
@attr.s(slots=True, frozen=True)
class MatrixFederationRequest:
method = attr.ib(type=str)
"""HTTP method
"""
path = attr.ib(type=str)
"""HTTP path
"""
destination = attr.ib(type=str)
"""The remote server to send the HTTP request to.
"""
json = attr.ib(default=None, type=Optional[JsonDict])
"""JSON to send in the body.
"""
json_callback = attr.ib(default=None, type=Optional[Callable[[], JsonDict]])
"""A callback to generate the JSON.
"""
query = attr.ib(default=None, type=Optional[dict])
"""Query arguments.
"""
txn_id = attr.ib(default=None, type=Optional[str])
"""Unique ID for this request (for logging)
"""
uri = attr.ib(init=False, type=bytes)
"""The URI of this request
"""
def __attrs_post_init__(self) -> None:
global _next_id
txn_id = "%s-O-%s" % (self.method, _next_id)
_next_id = (_next_id + 1) % (MAXINT - 1)
object.__setattr__(self, "txn_id", txn_id)
destination_bytes = self.destination.encode("ascii")
path_bytes = self.path.encode("ascii")
if self.query:
query_bytes = encode_query_args(self.query)
else:
query_bytes = b""
# The object is frozen so we can pre-compute this.
uri = urllib.parse.urlunparse(
(b"matrix", destination_bytes, path_bytes, None, query_bytes, b"")
)
object.__setattr__(self, "uri", uri)
def get_json(self) -> Optional[JsonDict]:
if self.json_callback:
return self.json_callback()
return self.json
async def _handle_json_response(
reactor: IReactorTime,
timeout_sec: float,
request: MatrixFederationRequest,
response: IResponse,
start_ms: int,
) -> JsonDict:
"""
Reads the JSON body of a response, with a timeout
Args:
reactor: twisted reactor, for the timeout
timeout_sec: number of seconds to wait for response to complete
request: the request that triggered the response
response: response to the request
start_ms: Timestamp when request was made
Returns:
The parsed JSON response
"""
try:
check_content_type_is_json(response.headers)
# Use the custom JSON decoder (partially re-implements treq.json_content).
d = treq.text_content(response, encoding="utf-8")
d.addCallback(json_decoder.decode)
d = timeout_deferred(d, timeout=timeout_sec, reactor=reactor)
body = await make_deferred_yieldable(d)
except defer.TimeoutError as e:
logger.warning(
"{%s} [%s] Timed out reading response - %s %s",
request.txn_id,
request.destination,
request.method,
request.uri.decode("ascii"),
)
raise RequestSendFailed(e, can_retry=True) from e
except Exception as e:
logger.warning(
"{%s} [%s] Error reading response %s %s: %s",
request.txn_id,
request.destination,
request.method,
request.uri.decode("ascii"),
e,
)
raise
time_taken_secs = reactor.seconds() - start_ms / 1000
logger.info(
"{%s} [%s] Completed request: %d %s in %.2f secs - %s %s",
request.txn_id,
request.destination,
response.code,
response.phrase.decode("ascii", errors="replace"),
time_taken_secs,
request.method,
request.uri.decode("ascii"),
)
return body
class MatrixFederationHttpClient:
"""HTTP client used to talk to other homeservers over the federation
protocol. Send client certificates and signs requests.
Attributes:
agent (twisted.web.client.Agent): The twisted Agent used to send the
requests.
"""
def __init__(self, hs, tls_client_options_factory):
self.hs = hs
self.signing_key = hs.signing_key
self.server_name = hs.hostname
# We need to use a DNS resolver which filters out blacklisted IP
# addresses, to prevent DNS rebinding.
self.reactor = BlacklistingReactorWrapper(
hs.get_reactor(), None, hs.config.federation_ip_range_blacklist
)
user_agent = hs.version_string
if hs.config.user_agent_suffix:
user_agent = "%s %s" % (user_agent, hs.config.user_agent_suffix)
user_agent = user_agent.encode("ascii")
self.agent = MatrixFederationAgent(
self.reactor,
tls_client_options_factory,
user_agent,
hs.config.federation_ip_range_blacklist,
)
# Use a BlacklistingAgentWrapper to prevent circumventing the IP
# blacklist via IP literals in server names
self.agent = BlacklistingAgentWrapper(
self.agent, ip_blacklist=hs.config.federation_ip_range_blacklist,
)
self.clock = hs.get_clock()
self._store = hs.get_datastore()
self.version_string_bytes = hs.version_string.encode("ascii")
self.default_timeout = 60
def schedule(x):
self.reactor.callLater(_EPSILON, x)
self._cooperator = Cooperator(scheduler=schedule)
async def _send_request_with_optional_trailing_slash(
self,
request: MatrixFederationRequest,
try_trailing_slash_on_400: bool = False,
**send_request_args
) -> IResponse:
"""Wrapper for _send_request which can optionally retry the request
upon receiving a combination of a 400 HTTP response code and a
'M_UNRECOGNIZED' errcode. This is a workaround for Synapse <= v0.99.3
due to #3622.
Args:
request: details of request to be sent
try_trailing_slash_on_400: Whether on receiving a 400
'M_UNRECOGNIZED' from the server to retry the request with a
trailing slash appended to the request path.
send_request_args: A dictionary of arguments to pass to `_send_request()`.
Raises:
HttpResponseException: If we get an HTTP response code >= 300
(except 429).
Returns:
Parsed JSON response body.
"""
try:
response = await self._send_request(request, **send_request_args)
except HttpResponseException as e:
# Received an HTTP error > 300. Check if it meets the requirements
# to retry with a trailing slash
if not try_trailing_slash_on_400:
raise
if e.code != 400 or e.to_synapse_error().errcode != "M_UNRECOGNIZED":
raise
# Retry with a trailing slash if we received a 400 with
# 'M_UNRECOGNIZED' which some endpoints can return when omitting a
# trailing slash on Synapse <= v0.99.3.
logger.info("Retrying request with trailing slash")
# Request is frozen so we create a new instance
request = attr.evolve(request, path=request.path + "/")
response = await self._send_request(request, **send_request_args)
return response
async def _send_request(
self,
request: MatrixFederationRequest,
retry_on_dns_fail: bool = True,
timeout: Optional[int] = None,
long_retries: bool = False,
ignore_backoff: bool = False,
backoff_on_404: bool = False,
) -> IResponse:
"""
Sends a request to the given server.
Args:
request: details of request to be sent
retry_on_dns_fail: true if the request should be retied on DNS failures
timeout: number of milliseconds to wait for the response headers
(including connecting to the server), *for each attempt*.
60s by default.
long_retries: whether to use the long retry algorithm.
The regular retry algorithm makes 4 attempts, with intervals
[0.5s, 1s, 2s].
The long retry algorithm makes 11 attempts, with intervals
[4s, 16s, 60s, 60s, ...]
Both algorithms add -20%/+40% jitter to the retry intervals.
Note that the above intervals are *in addition* to the time spent
waiting for the request to complete (up to `timeout` ms).
NB: the long retry algorithm takes over 20 minutes to complete, with
a default timeout of 60s!
ignore_backoff: true to ignore the historical backoff data
and try the request anyway.
backoff_on_404: Back off if we get a 404
Returns:
Resolves with the HTTP response object on success.
Raises:
HttpResponseException: If we get an HTTP response code >= 300
(except 429).
NotRetryingDestination: If we are not yet ready to retry this
server.
FederationDeniedError: If this destination is not on our
federation whitelist
RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc.
"""
if timeout:
_sec_timeout = timeout / 1000
else:
_sec_timeout = self.default_timeout
if (
self.hs.config.federation_domain_whitelist is not None
and request.destination not in self.hs.config.federation_domain_whitelist
):
raise FederationDeniedError(request.destination)
limiter = await synapse.util.retryutils.get_retry_limiter(
request.destination,
self.clock,
self._store,
backoff_on_404=backoff_on_404,
ignore_backoff=ignore_backoff,
)
method_bytes = request.method.encode("ascii")
destination_bytes = request.destination.encode("ascii")
path_bytes = request.path.encode("ascii")
if request.query:
query_bytes = encode_query_args(request.query)
else:
query_bytes = b""
scope = start_active_span(
"outgoing-federation-request",
tags={
tags.SPAN_KIND: tags.SPAN_KIND_RPC_CLIENT,
tags.PEER_ADDRESS: request.destination,
tags.HTTP_METHOD: request.method,
tags.HTTP_URL: request.path,
},
finish_on_close=True,
)
# Inject the span into the headers
headers_dict = {} # type: Dict[bytes, List[bytes]]
inject_active_span_byte_dict(headers_dict, request.destination)
headers_dict[b"User-Agent"] = [self.version_string_bytes]
with limiter, scope:
# XXX: Would be much nicer to retry only at the transaction-layer
# (once we have reliable transactions in place)
if long_retries:
retries_left = MAX_LONG_RETRIES
else:
retries_left = MAX_SHORT_RETRIES
url_bytes = request.uri
url_str = url_bytes.decode("ascii")
url_to_sign_bytes = urllib.parse.urlunparse(
(b"", b"", path_bytes, None, query_bytes, b"")
)
while True:
try:
json = request.get_json()
if json:
headers_dict[b"Content-Type"] = [b"application/json"]
auth_headers = self.build_auth_headers(
destination_bytes, method_bytes, url_to_sign_bytes, json
)
data = encode_canonical_json(json)
producer = QuieterFileBodyProducer(
BytesIO(data), cooperator=self._cooperator
) # type: Optional[IBodyProducer]
else:
producer = None
auth_headers = self.build_auth_headers(
destination_bytes, method_bytes, url_to_sign_bytes
)
headers_dict[b"Authorization"] = auth_headers
logger.debug(
"{%s} [%s] Sending request: %s %s; timeout %fs",
request.txn_id,
request.destination,
request.method,
url_str,
_sec_timeout,
)
outgoing_requests_counter.labels(request.method).inc()
try:
with Measure(self.clock, "outbound_request"):
# we don't want all the fancy cookie and redirect handling
# that treq.request gives: just use the raw Agent.
request_deferred = self.agent.request(
method_bytes,
url_bytes,
headers=Headers(headers_dict),
bodyProducer=producer,
)
request_deferred = timeout_deferred(
request_deferred,
timeout=_sec_timeout,
reactor=self.reactor,
)
response = await request_deferred
except DNSLookupError as e:
raise RequestSendFailed(e, can_retry=retry_on_dns_fail) from e
except Exception as e:
raise RequestSendFailed(e, can_retry=True) from e
incoming_responses_counter.labels(
request.method, response.code
).inc()
set_tag(tags.HTTP_STATUS_CODE, response.code)
response_phrase = response.phrase.decode("ascii", errors="replace")
if 200 <= response.code < 300:
logger.debug(
"{%s} [%s] Got response headers: %d %s",
request.txn_id,
request.destination,
response.code,
response_phrase,
)
pass
else:
logger.info(
"{%s} [%s] Got response headers: %d %s",
request.txn_id,
request.destination,
response.code,
response_phrase,
)
# :'(
# Update transactions table?
d = treq.content(response)
d = timeout_deferred(
d, timeout=_sec_timeout, reactor=self.reactor
)
try:
body = await make_deferred_yieldable(d)
except Exception as e:
# Eh, we're already going to raise an exception so lets
# ignore if this fails.
logger.warning(
"{%s} [%s] Failed to get error response: %s %s: %s",
request.txn_id,
request.destination,
request.method,
url_str,
_flatten_response_never_received(e),
)
body = None
exc = HttpResponseException(
response.code, response_phrase, body
)
# Retry if the error is a 429 (Too Many Requests),
# otherwise just raise a standard HttpResponseException
if response.code == 429:
raise RequestSendFailed(exc, can_retry=True) from exc
else:
raise exc
break
except RequestSendFailed as e:
logger.info(
"{%s} [%s] Request failed: %s %s: %s",
request.txn_id,
request.destination,
request.method,
url_str,
_flatten_response_never_received(e.inner_exception),
)
if not e.can_retry:
raise
if retries_left and not timeout:
if long_retries:
delay = 4 ** (MAX_LONG_RETRIES + 1 - retries_left)
delay = min(delay, 60)
delay *= random.uniform(0.8, 1.4)
else:
delay = 0.5 * 2 ** (MAX_SHORT_RETRIES - retries_left)
delay = min(delay, 2)
delay *= random.uniform(0.8, 1.4)
logger.debug(
"{%s} [%s] Waiting %ss before re-sending...",
request.txn_id,
request.destination,
delay,
)
await self.clock.sleep(delay)
retries_left -= 1
else:
raise
except Exception as e:
logger.warning(
"{%s} [%s] Request failed: %s %s: %s",
request.txn_id,
request.destination,
request.method,
url_str,
_flatten_response_never_received(e),
)
raise
return response
def build_auth_headers(
self,
destination: Optional[bytes],
method: bytes,
url_bytes: bytes,
content: Optional[JsonDict] = None,
destination_is: Optional[bytes] = None,
) -> List[bytes]:
"""
Builds the Authorization headers for a federation request
Args:
destination: The destination homeserver of the request.
May be None if the destination is an identity server, in which case
destination_is must be non-None.
method: The HTTP method of the request
url_bytes: The URI path of the request
content: The body of the request
destination_is: As 'destination', but if the destination is an
identity server
Returns:
A list of headers to be added as "Authorization:" headers
"""
request = {
"method": method.decode("ascii"),
"uri": url_bytes.decode("ascii"),
"origin": self.server_name,
}
if destination is not None:
request["destination"] = destination.decode("ascii")
if destination_is is not None:
request["destination_is"] = destination_is.decode("ascii")
if content is not None:
request["content"] = content
request = sign_json(request, self.server_name, self.signing_key)
auth_headers = []
for key, sig in request["signatures"][self.server_name].items():
auth_headers.append(
(
'X-Matrix origin=%s,key="%s",sig="%s"'
% (self.server_name, key, sig)
).encode("ascii")
)
return auth_headers
async def put_json(
self,
destination: str,
path: str,
args: Optional[QueryArgs] = None,
data: Optional[JsonDict] = None,
json_data_callback: Optional[Callable[[], JsonDict]] = None,
long_retries: bool = False,
timeout: Optional[int] = None,
ignore_backoff: bool = False,
backoff_on_404: bool = False,
try_trailing_slash_on_400: bool = False,
) -> Union[JsonDict, list]:
""" Sends the specified json data using PUT
Args:
destination: The remote server to send the HTTP request to.
path: The HTTP path.
args: query params
data: A dict containing the data that will be used as
the request body. This will be encoded as JSON.
json_data_callback: A callable returning the dict to
use as the request body.
long_retries: whether to use the long retry algorithm. See
docs on _send_request for details.
timeout: number of milliseconds to wait for the response.
self._default_timeout (60s) by default.
Note that we may make several attempts to send the request; this
timeout applies to the time spent waiting for response headers for
*each* attempt (including connection time) as well as the time spent
reading the response body after a 200 response.
ignore_backoff: true to ignore the historical backoff data
and try the request anyway.
backoff_on_404: True if we should count a 404 response as
a failure of the server (and should therefore back off future
requests).
try_trailing_slash_on_400: True if on a 400 M_UNRECOGNIZED
response we should try appending a trailing slash to the end
of the request. Workaround for #3622 in Synapse <= v0.99.3. This
will be attempted before backing off if backing off has been
enabled.
Returns:
Succeeds when we get a 2xx HTTP response. The
result will be the decoded JSON body.
Raises:
HttpResponseException: If we get an HTTP response code >= 300
(except 429).
NotRetryingDestination: If we are not yet ready to retry this
server.
FederationDeniedError: If this destination is not on our
federation whitelist
RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc.
"""
request = MatrixFederationRequest(
method="PUT",
destination=destination,
path=path,
query=args,
json_callback=json_data_callback,
json=data,
)
start_ms = self.clock.time_msec()
response = await self._send_request_with_optional_trailing_slash(
request,
try_trailing_slash_on_400,
backoff_on_404=backoff_on_404,
ignore_backoff=ignore_backoff,
long_retries=long_retries,
timeout=timeout,
)
if timeout is not None:
_sec_timeout = timeout / 1000
else:
_sec_timeout = self.default_timeout
body = await _handle_json_response(
self.reactor, _sec_timeout, request, response, start_ms
)
return body
async def post_json(
self,
destination: str,
path: str,
data: Optional[JsonDict] = None,
long_retries: bool = False,
timeout: Optional[int] = None,
ignore_backoff: bool = False,
args: Optional[QueryArgs] = None,
) -> Union[JsonDict, list]:
""" Sends the specified json data using POST
Args:
destination: The remote server to send the HTTP request to.
path: The HTTP path.
data: A dict containing the data that will be used as
the request body. This will be encoded as JSON.
long_retries: whether to use the long retry algorithm. See
docs on _send_request for details.
timeout: number of milliseconds to wait for the response.
self._default_timeout (60s) by default.
Note that we may make several attempts to send the request; this
timeout applies to the time spent waiting for response headers for
*each* attempt (including connection time) as well as the time spent
reading the response body after a 200 response.
ignore_backoff: true to ignore the historical backoff data and
try the request anyway.
args: query params
Returns:
dict|list: Succeeds when we get a 2xx HTTP response. The
result will be the decoded JSON body.
Raises:
HttpResponseException: If we get an HTTP response code >= 300
(except 429).
NotRetryingDestination: If we are not yet ready to retry this
server.
FederationDeniedError: If this destination is not on our
federation whitelist
RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc.
"""
request = MatrixFederationRequest(
method="POST", destination=destination, path=path, query=args, json=data
)
start_ms = self.clock.time_msec()
response = await self._send_request(
request,
long_retries=long_retries,
timeout=timeout,
ignore_backoff=ignore_backoff,
)
if timeout:
_sec_timeout = timeout / 1000
else:
_sec_timeout = self.default_timeout
body = await _handle_json_response(
self.reactor, _sec_timeout, request, response, start_ms,
)
return body
async def get_json(
self,
destination: str,
path: str,
args: Optional[QueryArgs] = None,
retry_on_dns_fail: bool = True,
timeout: Optional[int] = None,
ignore_backoff: bool = False,
try_trailing_slash_on_400: bool = False,
) -> Union[JsonDict, list]:
""" GETs some json from the given host homeserver and path
Args:
destination: The remote server to send the HTTP request to.
path: The HTTP path.
args: A dictionary used to create query strings, defaults to
None.
timeout: number of milliseconds to wait for the response.
self._default_timeout (60s) by default.
Note that we may make several attempts to send the request; this
timeout applies to the time spent waiting for response headers for
*each* attempt (including connection time) as well as the time spent
reading the response body after a 200 response.
ignore_backoff: true to ignore the historical backoff data
and try the request anyway.
try_trailing_slash_on_400: True if on a 400 M_UNRECOGNIZED
response we should try appending a trailing slash to the end of
the request. Workaround for #3622 in Synapse <= v0.99.3.
Returns:
Succeeds when we get a 2xx HTTP response. The
result will be the decoded JSON body.
Raises:
HttpResponseException: If we get an HTTP response code >= 300
(except 429).
NotRetryingDestination: If we are not yet ready to retry this
server.
FederationDeniedError: If this destination is not on our
federation whitelist
RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc.
"""
request = MatrixFederationRequest(
method="GET", destination=destination, path=path, query=args
)
start_ms = self.clock.time_msec()
response = await self._send_request_with_optional_trailing_slash(
request,
try_trailing_slash_on_400,
backoff_on_404=False,
ignore_backoff=ignore_backoff,
retry_on_dns_fail=retry_on_dns_fail,
timeout=timeout,
)
if timeout is not None:
_sec_timeout = timeout / 1000
else:
_sec_timeout = self.default_timeout
body = await _handle_json_response(
self.reactor, _sec_timeout, request, response, start_ms
)
return body
async def delete_json(
self,
destination: str,
path: str,
long_retries: bool = False,
timeout: Optional[int] = None,
ignore_backoff: bool = False,
args: Optional[QueryArgs] = None,
) -> Union[JsonDict, list]:
"""Send a DELETE request to the remote expecting some json response
Args:
destination: The remote server to send the HTTP request to.
path: The HTTP path.
long_retries: whether to use the long retry algorithm. See
docs on _send_request for details.
timeout: number of milliseconds to wait for the response.
self._default_timeout (60s) by default.
Note that we may make several attempts to send the request; this
timeout applies to the time spent waiting for response headers for
*each* attempt (including connection time) as well as the time spent
reading the response body after a 200 response.
ignore_backoff: true to ignore the historical backoff data and
try the request anyway.
args: query params
Returns:
Succeeds when we get a 2xx HTTP response. The
result will be the decoded JSON body.
Raises:
HttpResponseException: If we get an HTTP response code >= 300
(except 429).
NotRetryingDestination: If we are not yet ready to retry this
server.
FederationDeniedError: If this destination is not on our
federation whitelist
RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc.
"""
request = MatrixFederationRequest(
method="DELETE", destination=destination, path=path, query=args
)
start_ms = self.clock.time_msec()
response = await self._send_request(
request,
long_retries=long_retries,
timeout=timeout,
ignore_backoff=ignore_backoff,
)
if timeout is not None:
_sec_timeout = timeout / 1000
else:
_sec_timeout = self.default_timeout
body = await _handle_json_response(
self.reactor, _sec_timeout, request, response, start_ms
)
return body
async def get_file(
self,
destination: str,
path: str,
output_stream,
args: Optional[QueryArgs] = None,
retry_on_dns_fail: bool = True,
max_size: Optional[int] = None,
ignore_backoff: bool = False,
) -> Tuple[int, Dict[bytes, List[bytes]]]:
"""GETs a file from a given homeserver
Args:
destination: The remote server to send the HTTP request to.
path: The HTTP path to GET.
output_stream: File to write the response body to.
args: Optional dictionary used to create the query string.
ignore_backoff: true to ignore the historical backoff data
and try the request anyway.
Returns:
Resolves with an (int,dict) tuple of
the file length and a dict of the response headers.
Raises:
HttpResponseException: If we get an HTTP response code >= 300
(except 429).
NotRetryingDestination: If we are not yet ready to retry this
server.
FederationDeniedError: If this destination is not on our
federation whitelist
RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc.
"""
request = MatrixFederationRequest(
method="GET", destination=destination, path=path, query=args
)
response = await self._send_request(
request, retry_on_dns_fail=retry_on_dns_fail, ignore_backoff=ignore_backoff
)
headers = dict(response.headers.getAllRawHeaders())
try:
d = read_body_with_max_size(response, output_stream, max_size)
d.addTimeout(self.default_timeout, self.reactor)
length = await make_deferred_yieldable(d)
except BodyExceededMaxSize:
msg = "Requested file is too large > %r bytes" % (max_size,)
logger.warning(
"{%s} [%s] %s", request.txn_id, request.destination, msg,
)
SynapseError(502, msg, Codes.TOO_LARGE)
except Exception as e:
logger.warning(
"{%s} [%s] Error reading response: %s",
request.txn_id,
request.destination,
e,
)
raise
logger.info(
"{%s} [%s] Completed: %d %s [%d bytes] %s %s",
request.txn_id,
request.destination,
response.code,
response.phrase.decode("ascii", errors="replace"),
length,
request.method,
request.uri.decode("ascii"),
)
return (length, headers)
def _flatten_response_never_received(e):
if hasattr(e, "reasons"):
reasons = ", ".join(
_flatten_response_never_received(f.value) for f in e.reasons
)
return "%s:[%s]" % (type(e).__name__, reasons)
else:
return repr(e)
def check_content_type_is_json(headers: Headers) -> None:
"""
Check that a set of HTTP headers have a Content-Type header, and that it
is application/json.
Args:
headers: headers to check
Raises:
RequestSendFailed: if the Content-Type header is missing or isn't JSON
"""
c_type = headers.getRawHeaders(b"Content-Type")
if c_type is None:
raise RequestSendFailed(
RuntimeError("No Content-Type header received from remote server"),
can_retry=False,
)
c_type = c_type[0].decode("ascii") # only the first header
val, options = cgi.parse_header(c_type)
if val != "application/json":
raise RequestSendFailed(
RuntimeError(
"Remote server sent Content-Type header of '%s', not 'application/json'"
% c_type,
),
can_retry=False,
)
| 35.861085 | 88 | 0.56901 |
afc4357c479d470f2cb639bab1d35798e8044476 | 3,061 | py | Python | eval/substitute_qmltpeq_equality.py | TobiasGleissner/embed_modal | 746e3efb6f4c6cf70cc5b67f9c8f2ea3657328ec | [
"BSD-3-Clause"
] | 5 | 2018-06-20T14:52:55.000Z | 2022-02-21T15:51:56.000Z | eval/substitute_qmltpeq_equality.py | leoprover/embed_modal | 746e3efb6f4c6cf70cc5b67f9c8f2ea3657328ec | [
"BSD-3-Clause"
] | 5 | 2018-04-11T22:05:37.000Z | 2020-04-13T14:30:09.000Z | eval/substitute_qmltpeq_equality.py | leoprover/embed_modal | 746e3efb6f4c6cf70cc5b67f9c8f2ea3657328ec | [
"BSD-3-Clause"
] | 1 | 2018-04-05T20:02:31.000Z | 2018-04-05T20:02:31.000Z | from common import get_problem_file_list, create_tree
import sys
import filters_for_the_qmltp
from pathlib import Path
# first apply formula has children: "qmltpeq@op1","@","op2"
# second apply formula has children: "qmltpeq","@","op1"
def exchangeQmltpEqualities(node):
if node.getRule() == "thf_apply_formula":
qmltpeq_at_op1_node = node.getChild(0)
if node.childCount() == 1: # "qmltpeq" and "@" were already removed
return
at_operator_node = node.getChild(1)
#op2_node = node.getChild(2)
if "qmltpeq" != qmltpeq_at_op1_node.getContent() and "qmltpeq" in qmltpeq_at_op1_node.getContent():
at_operator_node.getFirstTerminal().setContent("=")
qmltpeq_at_op1_node.removeChild(1) # @
qmltpeq_at_op1_node.removeChild(0) # qmltpeq
def removeQmltpeqTypeDeclaration(node):
if node.rule == "name" and "qmltpeq" in node.getContent():
tptp_input_node = node.parent.parent.parent
tptp_input_node.removeChildren()
tptp_input_node.setContent("")
def main(qmltp_dir,out_dir):
sys.setrecursionlimit(1500)
qmltp_path = Path(qmltp_dir)
out_path = Path(out_dir)
problem_file_list = get_problem_file_list(qmltp_path)
# problems that have the symbol "=" replaced by "customqmltpeq" or "customqmltpeqfromineq" +
# problems that contain the symbol "qmltpeq"
# all problems have an adequate axiomatization
problem_white_filter = filters_for_the_qmltp.qmltp_problems_containing_qmltpeq_symbol_with_axiomatization + \
filters_for_the_qmltp.qmltp_problems_containing_native_equality_with_axiomatization
#problem_white_filter = ["GSV107+1.p"]
problem_black_filter = None
for f in problem_file_list:
if problem_white_filter != None and not f.name in problem_white_filter:
continue
if problem_black_filter != None and f.name in problem_black_filter:
continue
outFileDir = out_path / f.name[:3]
outFilePath = outFileDir / f.name
if outFilePath.exists():
print(f,"already exists.")
continue
print("now processing",f)
with open(f,"r") as fh:
content = fh.read()
# replace symbol "customqmltpeq" with symbol "qmltpeq" for conformity
content = content.replace("customqmltpeq","qmltpeq")
# replace symbol "customqmltpeqfromineq" with "qmltpeq" for conformity
content = content.replace("customqmltpeqfromineq","qmltpeq")
root = create_tree(content)
# replace all (qmltpeq @ a @ b) by (a = b)
root.dfs(exchangeQmltpEqualities)
# replace all type sentences for the symbol "qmltpeq"
root.dfs(removeQmltpeqTypeDeclaration)
newProblem = str(root)
# write to file
outFileDir.mkdir(exist_ok=True)
with open(outFilePath,"w+") as fhw:
fhw.write(newProblem)
if __name__ == '__main__':
main(sys.argv[1],sys.argv[2]) | 43.728571 | 113 | 0.666122 |
0e3b96105c557328be7e4c9c62a56832ae2b0a62 | 184 | py | Python | coding/learn_python/function_object/clockdeco_param_demo1.py | yatao91/learning_road | e88dc43de98e35922bfc71c222ec71766851e618 | [
"MIT"
] | 3 | 2021-05-25T16:58:52.000Z | 2022-02-05T09:37:17.000Z | coding/learn_python/function_object/clockdeco_param_demo1.py | yataosu/learning_road | e88dc43de98e35922bfc71c222ec71766851e618 | [
"MIT"
] | null | null | null | coding/learn_python/function_object/clockdeco_param_demo1.py | yataosu/learning_road | e88dc43de98e35922bfc71c222ec71766851e618 | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
import time
from clockdeco_param import clock
@clock('{name}: {elapsed}s')
def snooze(seconds):
time.sleep(seconds)
for i in range(3):
snooze(.123)
| 14.153846 | 33 | 0.652174 |
0e5a610b289b17e88b1851e02bf587cfde059276 | 1,888 | py | Python | examples/ad_manager/v201902/creative_service/get_image_creatives.py | nlynch504/googleads-python-lib | 8f7bd7f987498c4651c969a7dc73e1d5fc965be2 | [
"Apache-2.0"
] | null | null | null | examples/ad_manager/v201902/creative_service/get_image_creatives.py | nlynch504/googleads-python-lib | 8f7bd7f987498c4651c969a7dc73e1d5fc965be2 | [
"Apache-2.0"
] | null | null | null | examples/ad_manager/v201902/creative_service/get_image_creatives.py | nlynch504/googleads-python-lib | 8f7bd7f987498c4651c969a7dc73e1d5fc965be2 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all image creatives.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
creative_service = client.GetService('CreativeService', version='v201902')
# Create a statement to select creatives.
statement = (ad_manager.StatementBuilder(version='v201902')
.Where('creativeType = :creativeType')
.WithBindVariable('creativeType', 'ImageCreative'))
# Retrieve a small amount of creatives at a time, paging
# through until all creatives have been retrieved.
while True:
response = creative_service.getCreativesByStatement(statement.ToStatement())
if 'results' in response and len(response['results']):
for creative in response['results']:
# Print out some information for each creative.
print('Creative with ID "%d" and name "%s" was found.\n' %
(creative['id'], creative['name']))
statement.offset += statement.limit
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
| 37.019608 | 80 | 0.722458 |
41ed768c90b2bf865396ccea5ffd432e688a9032 | 606 | py | Python | src/main/python/org/broadinstitute/hellbender/vqsr_cnn/__init__.py | yhcheng/gatk | 1ba826c7eded2e76a3c151c6fbc228d44c798b0b | [
"BSD-3-Clause"
] | null | null | null | src/main/python/org/broadinstitute/hellbender/vqsr_cnn/__init__.py | yhcheng/gatk | 1ba826c7eded2e76a3c151c6fbc228d44c798b0b | [
"BSD-3-Clause"
] | null | null | null | src/main/python/org/broadinstitute/hellbender/vqsr_cnn/__init__.py | yhcheng/gatk | 1ba826c7eded2e76a3c151c6fbc228d44c798b0b | [
"BSD-3-Clause"
] | null | null | null | from .vqsr_cnn.models import build_read_tensor_2d_and_annotations_model, build_tiny_2d_annotation_model, build_reference_annotation_model
from .vqsr_cnn.models import args_and_model_from_semantics, train_model_from_generators, build_small_2d_annotation_model
from .vqsr_cnn.tensor_maps import get_tensor_channel_map_from_args, tensor_shape_from_args
from .vqsr_cnn.arguments import parse_args, weight_path_from_args, annotations_from_args
from .vqsr_cnn.inference import score_and_write_batch
from .vqsr_cnn.plots import plot_roc_per_class
from ._version import __version__
from .vqsr_cnn.defines import *
| 67.333333 | 137 | 0.89769 |
8c32f11ed1720d324c280ea1b80e69e2491da83e | 7,471 | py | Python | crawler/prepare_dataset.py | awant/habr_crawler | b38ec828b075baca1a70408647ed58741be82393 | [
"MIT"
] | 1 | 2021-04-08T07:19:17.000Z | 2021-04-08T07:19:17.000Z | crawler/prepare_dataset.py | awant/habr_crawler | b38ec828b075baca1a70408647ed58741be82393 | [
"MIT"
] | 1 | 2021-11-16T11:49:53.000Z | 2021-11-16T11:49:53.000Z | crawler/prepare_dataset.py | awant/habr_crawler | b38ec828b075baca1a70408647ed58741be82393 | [
"MIT"
] | 2 | 2021-11-27T14:43:18.000Z | 2022-01-09T13:18:31.000Z | import pandas as pd
from bs4 import BeautifulSoup
import json
import re
from nltk.tokenize import sent_tokenize, word_tokenize
import statistics
import string
from nltk.corpus import stopwords
from nltk.stem.snowball import SnowballStemmer
from collections import Counter, defaultdict
from pprint import pprint
import os
from tqdm import tqdm
from shutil import copyfile
def get_meta_info(soup):
json_scripts = soup.find_all('script', {'type': 'application/ld+json'})
if not json_scripts:
return
json_script = json.loads(json_scripts[0].text)
return {
'link': json_script['mainEntityOfPage'].get('@id'),
'title': json_script.get('headline'),
'published_date': json_script['datePublished'][:10],
'published_time': json_script['datePublished'][11:16],
'modified_date': json_script['dateModified'][:10],
'modified_time': json_script['dateModified'][11:16],
'author_type': json_script['author'].get('@type'),
'author_name': json_script['author'].get('name'),
'description': json_script.get('description'),
'image': json_script.get('image', [''])[0],
'article_categories': ', '.join(json_script.get('about', []))
}
def get_votes(soup):
labels = soup.find('span', {'class': 'voting-wjt__counter voting-wjt__counter_positive js-score'})
if not labels:
return 0, 0
text = labels['title']
m = re.search(r'↑(\d+).*↓([-]?\d+)', text)
if len(m.groups()) < 2:
return 0, 0
return int(m.group(1)), int(m.group(2))
def get_bookmarks(soup):
label = soup.find('span', {'class': 'bookmark__counter js-favs_count'})
if not label:
return 0
return int(label.text)
def get_views(soup):
label = soup.find('span', {'class': 'post-stats__views-count'})
if not label:
return 0
text = label.text.replace(',', '.')
try:
if text[-1] == 'k':
text = float(text[:-1]) * 1000
return int(text)
except ValueError:
return 0
def get_comments(soup):
label = soup.find('span', {'class': 'post-stats__comments-count'})
if not label:
return 0
return int(label.text)
def get_target_counters(soup):
positive_votes, negative_votes = get_votes(soup)
return {
'positive_votes': positive_votes,
'negative_votes': negative_votes,
'rating': positive_votes - negative_votes,
'bookmarks': get_bookmarks(soup),
'views': get_views(soup),
'comments': get_comments(soup)
}
def get_body(soup):
soup_body = soup.find('div', {'class': 'post__body post__body_full'})
if not soup_body:
soup_body = soup.find('div', {'class': 'article__body'})
[x.extract() for x in soup_body.findAll('script')]
[x.extract() for x in soup_body.findAll('style')]
return soup_body.text
def get_meta_features(soup):
soup_body = soup.find('div', {'class': 'post__body post__body_full'})
if not soup_body:
soup_body = soup.find('div', {'class': 'article__body'})
[x.extract() for x in soup_body.findAll('script')]
[x.extract() for x in soup_body.findAll('style')]
href_count = len(soup_body.find_all('a', href=True))
img_count = len([x for x in soup_body.find_all('img')])
h3_count = len([x for x in soup_body.find_all('h3')])
i_count = len([x for x in soup_body.find_all('i')])
spoiler_count = len(soup_body.find_all('div', {'class': 'spoiler'}))
tags = soup.find('meta', {'name': 'keywords'})
if tags:
tags = tags.get('content')
else:
tags = soup.find_all('li', {'class': 'inline-list__item inline-list__item_tag'})
tags = ', '.join([x.text for x in tags])
return {
'href_count': href_count,
'img_count': img_count,
'tags': tags,
'h3_count': h3_count,
'i_count': i_count,
'spoiler_count': spoiler_count
}
def get_text_features(soup, language='russian'):
text = get_body(soup)
lines = list(filter(None, text.split('\n')))
joined_lines = ' '.join(lines)
sentences = sent_tokenize(joined_lines, language)
sent_lens = [len(x) for x in sentences]
if not sent_lens:
sent_lens = [0]
tokens = word_tokenize(text, language)
tokens_lens = [len(x) for x in tokens]
if not tokens_lens:
tokens_lens = [0]
alphabetic_tokens = [token.lower() for token in tokens if token.isalpha()]
table = str.maketrans('', '', string.punctuation)
stripped_atokens = [w.translate(table) for w in alphabetic_tokens]
stop_words = set(stopwords.words(language))
words = [tkn for tkn in stripped_atokens if tkn not in stop_words]
most_common_words = [x[0] for x in Counter(words).most_common(10)]
stemmer = SnowballStemmer(language)
words = [stemmer.stem(word) for word in words]
words_len = [len(x) for x in words]
if not words_len:
words_len = [0]
return {
'text_len': len(text),
'lines_count': len(lines),
'sentences_count': len(sentences),
'first_5_sentences': ' '.join(sentences[:5]),
'last_5_sentences': ' '.join(sentences[-5:]),
'max_sentence_len': max(sent_lens),
'min_sentence_len': min(sent_lens),
'mean_sentence_len': statistics.mean(sent_lens),
'median_sentence_len': statistics.median(sent_lens),
'tokens_count': len(tokens),
'max_token_len': max(tokens_lens),
'mean_token_len': statistics.mean(tokens_lens),
'median_token_len': statistics.median(tokens_lens),
'alphabetic_tokens_count': len(alphabetic_tokens),
'words_count': len(words),
'words_mean': statistics.mean(words_len),
'ten_most_common_words': ', '.join(most_common_words)
}, text
def parse_html(filepath):
soup = BeautifulSoup(open(filepath, 'r'), 'html.parser')
meta = get_meta_info(soup)
meta_features = get_meta_features(soup)
counters = get_target_counters(soup)
text_features, text = get_text_features(soup)
features = {**meta, **meta_features, **counters, **text_features}
jsonified_text_raw = {'text': text, 'link': features['link']}
return features, jsonified_text_raw
def build_dataset(filepath='data/pages', out_df='data/data.csv', out_text='data/texts.json'):
data = []
with open(out_text, 'w', encoding="utf-8") as f:
for (root, dirs, files) in tqdm(os.walk(filepath)):
for file in files:
fp = os.path.join(root, file)
features, jsonified_text_raw = parse_html(fp)
if features['sentences_count'] > 0 and features['link'] and features['negative_votes'] >= 0:
data.append(features)
f.write(json.dumps(jsonified_text_raw, ensure_ascii=False)+'\n')
df = pd.DataFrame(data)
df.to_csv(out_df, index=False)
def print_stats(filepath='data/pages'):
s = 0
data = defaultdict(int)
for (root, dirs, files) in os.walk(filepath):
date = root[-10:-6]
if date.startswith(filepath[:4]):
continue
data[date] += len(files)
s += len(files)
for date, cnt in sorted(list(data.items()))[::-1]:
print(date, cnt)
print(f'sum = {s}')
def test():
feat, text = parse_html('data/pages/2020-02-18/2384944')
pprint(feat)
def main():
# test()
# print_stats()
build_dataset()
if __name__ == '__main__':
main()
| 32.202586 | 108 | 0.631776 |
f3eb001987c3bfe7f29a6f69cab68359c42ebe1e | 893 | py | Python | ethinjest/model.py | agonopol/ethscan | e1c1433458e92a35b4729809bdf8e77f95ef32cd | [
"MIT"
] | null | null | null | ethinjest/model.py | agonopol/ethscan | e1c1433458e92a35b4729809bdf8e77f95ef32cd | [
"MIT"
] | null | null | null | ethinjest/model.py | agonopol/ethscan | e1c1433458e92a35b4729809bdf8e77f95ef32cd | [
"MIT"
] | 2 | 2019-06-04T11:51:34.000Z | 2019-06-04T15:51:38.000Z | from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, BigInteger, String, DateTime
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
Base = declarative_base(
)
class Status(Base):
__tablename__ = 'eth_addresses'
id = Column(Integer, primary_key=True, autoincrement=True)
address = Column(String)
balance = Column(BigInteger)
transactions = Column(BigInteger)
asof = Column(DateTime)
def __repr__(self):
return {'id': self.id, 'name': self.address, 'balance': self.balance, 'transactions': self.transactions,
'asof': self.asof.isoformat(' ')}
def init(path):
engine = create_engine(path, echo=True)
Base.metadata.create_all(engine)
return engine
def session(path='sqlite:///:memory:'):
Session = sessionmaker(bind=init(path))
return Session( )
| 27.060606 | 112 | 0.709966 |
1bf002e0c9d383ee4fcf7b7e7d9e56ad62ddde86 | 1,979 | py | Python | src/Clustering/decision_tree.py | ai-se/heroes_compsci | 613fd623a6da073b2c62c773ed902acb0c756809 | [
"MIT"
] | null | null | null | src/Clustering/decision_tree.py | ai-se/heroes_compsci | 613fd623a6da073b2c62c773ed902acb0c756809 | [
"MIT"
] | 12 | 2019-12-17T04:04:19.000Z | 2019-12-26T20:23:02.000Z | src/Clustering/decision_tree.py | ai-se/heroes_compsci | 613fd623a6da073b2c62c773ed902acb0c756809 | [
"MIT"
] | 1 | 2020-03-12T22:19:48.000Z | 2020-03-12T22:19:48.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import platform
import pandas as pd
from sklearn import tree
from sklearn.tree.export import export_text
import matplotlib.pyplot as plt
import math
def getDecisionTree(index):
if platform.system() == 'Darwin' or platform.system() == 'Linux':
file_name = 'Combined/combined_data_' + str(index) + '.csv'
source_projects = os.getcwd() + '/' + file_name
else:
file_name = 'Combined\\combined_data_' + str(index) + '.csv'
source_projects = os.getcwd() + '\\' + file_name
combined_df = pd.read_csv(source_projects)
n_samples = combined_df.shape[0]
sq = math.sqrt(n_samples)
myTree = tree.DecisionTreeClassifier(min_samples_leaf = 2, min_samples_split=int(sq), ccp_alpha=0.015)
myTree = myTree.fit(combined_df.drop(['Language', 'Project Name', 'git_url','Type', 'Latest commit year'], axis=1), combined_df['Type'])
return combined_df, myTree
sample_count = 100
starting_index = 14
xAttributes = [ 'Developers', 'Commit #', 'Closed Issues', 'Releases', 'Tags', 'Open Issues', 'Duration', 'Stars', 'Forks', 'Watchers']
counts = {}
for attr in xAttributes:
counts[attr] = 0
for index in range(starting_index, starting_index+sample_count):
combined_df, myTree = getDecisionTree(index)
#tree.plot_tree(myTree)
#plt.show()
r = export_text(myTree, feature_names=xAttributes, show_weights=True)
print('index = ' + str(index))
print(r)
#print('Importances:')
for i in range(0, len(xAttributes)):
imp = myTree.feature_importances_[i]
feature_name = xAttributes[i]
#print(feature_name + " = " + str(imp))
if (imp != 0):
val = counts.get(feature_name, 0)
counts[feature_name] = val+1
print("Attribute wise counts for the sample decision trees")
arr = [(k, counts[k]) for k in sorted(counts, key=counts.get, reverse=True)]
for pr in arr:
print(pr[0] + ' = ' + str(pr[1]))
| 35.339286 | 140 | 0.663972 |
a6c44db5abe01bf007f0db274dcbc64a537359cd | 344 | py | Python | packages/micropython-official/v1.10/esp8266/stubs/ustruct.py | TheVinhLuong102/micropy-stubs | 55ff1773008f7c4dfc3d70a403986486226eb6b3 | [
"MIT"
] | 18 | 2019-07-11T13:31:09.000Z | 2022-01-27T06:38:40.000Z | packages/micropython-official/v1.10/esp8266/stubs/ustruct.py | TheVinhLuong102/micropy-stubs | 55ff1773008f7c4dfc3d70a403986486226eb6b3 | [
"MIT"
] | 9 | 2019-09-01T21:44:49.000Z | 2022-02-04T20:55:08.000Z | packages/micropython-official/v1.10/esp8266/stubs/ustruct.py | TheVinhLuong102/micropy-stubs | 55ff1773008f7c4dfc3d70a403986486226eb6b3 | [
"MIT"
] | 6 | 2019-10-08T05:31:21.000Z | 2021-04-22T10:21:01.000Z | """
Module: 'ustruct' on esp8266 v1.10
"""
# MCU: (sysname='esp8266', nodename='esp8266', release='2.2.0-dev(9422289)', version='v1.10-8-g8b7039d7d on 2019-01-26', machine='ESP module with ESP8266')
# Stubber: 1.2.0
def calcsize():
pass
def pack():
pass
def pack_into():
pass
def unpack():
pass
def unpack_from():
pass
| 16.380952 | 155 | 0.639535 |
325da4015b8a98ce6e52dadacc7ab59b6fa9df49 | 36,717 | py | Python | desktop/core/src/desktop/api2.py | carlsonp/hue | efbe3a2f45724935816100a2134b7446e81e1981 | [
"Apache-2.0"
] | null | null | null | desktop/core/src/desktop/api2.py | carlsonp/hue | efbe3a2f45724935816100a2134b7446e81e1981 | [
"Apache-2.0"
] | null | null | null | desktop/core/src/desktop/api2.py | carlsonp/hue | efbe3a2f45724935816100a2134b7446e81e1981 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import json
import StringIO
import tempfile
import zipfile
from datetime import datetime
from django.contrib.auth.models import Group, User
from django.core import management
from django.http import HttpResponse
from django.shortcuts import redirect
from django.utils.html import escape
from django.utils.translation import ugettext as _
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.http import require_POST
from metadata.conf import has_navigator
from metadata.catalog_api import search_entities as metadata_search_entities, _highlight, search_entities_interactive as metadata_search_entities_interactive
from notebook.connectors.altus import SdxApi, AnalyticDbApi, DataEngApi
from notebook.connectors.base import Notebook
from notebook.views import upgrade_session_properties
from desktop.lib.django_util import JsonResponse
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.export_csvxls import make_response
from desktop.lib.i18n import smart_str, force_unicode
from desktop.models import Document2, Document, Directory, FilesystemException, uuid_default, \
UserPreferences, get_user_preferences, set_user_preferences, get_cluster_config
from desktop.conf import get_clusters
LOG = logging.getLogger(__name__)
def api_error_handler(func):
def decorator(*args, **kwargs):
response = {}
try:
return func(*args, **kwargs)
except Exception, e:
LOG.exception('Error running %s' % func)
response['status'] = -1
response['message'] = force_unicode(str(e))
finally:
if response:
return JsonResponse(response)
return decorator
@api_error_handler
def get_config(request):
config = get_cluster_config(request.user)
config['status'] = 0
return JsonResponse(config)
@api_error_handler
def get_context_namespaces(request, interface):
response = {}
namespaces = []
clusters = get_clusters(request.user).values()
namespaces.extend([{
'id': cluster['id'],
'name': cluster['name'],
'status': 'CREATED',
'computes': [cluster]
} for cluster in clusters if cluster.get('type') == 'direct'
])
if interface == 'hive' or interface == 'impala' or interface == 'report':
# From Altus SDX
if [cluster for cluster in clusters if cluster['type'] == 'altus']:
# Note: attaching computes to namespaces might be done via the frontend in the future
if interface == 'impala':
adb_clusters = AnalyticDbApi(request.user).list_clusters()['clusters']
for _cluster in adb_clusters: # Add "fake" namespace if needed
if not _cluster.get('namespaceCrn'):
_cluster['namespaceCrn'] = _cluster['crn']
_cluster['id'] = _cluster['crn']
_cluster['namespaceName'] = _cluster['clusterName']
_cluster['name'] = _cluster['clusterName']
else:
adb_clusters = []
sdx_namespaces = SdxApi(request.user).list_namespaces()
# Adding "fake" namespace for cluster without one
sdx_namespaces.extend([_cluster for _cluster in adb_clusters if not _cluster.get('namespaceCrn')])
namespaces.extend([{
'id': namespace.get('crn', 'None'),
'name': namespace.get('namespaceName'),
'status': namespace.get('status'),
'computes': [_cluster for _cluster in adb_clusters if _cluster.get('namespaceCrn') == namespace.get('crn')]
} for namespace in sdx_namespaces if namespace.get('status') == 'CREATED'
])
response[interface] = namespaces
response['status'] = 0
return JsonResponse(response)
@api_error_handler
def get_context_computes(request, interface):
response = {}
computes = []
clusters = get_clusters(request.user).values()
if interface == 'hive' or interface == 'impala' or interface == 'oozie' or interface == 'report':
computes.extend([{
'id': cluster['id'],
'name': cluster['name'],
'namespace': cluster['id'],
'interface': interface,
'type': cluster['type']
} for cluster in clusters if cluster.get('type') == 'direct'
])
if interface == 'impala' or interface == 'report':
if [cluster for cluster in clusters if cluster['type'] == 'altus']:
computes.extend([{
'id': cluster.get('crn'),
'name': cluster.get('clusterName'),
'status': cluster.get('status'),
'namespace': cluster.get('namespaceCrn', cluster.get('crn')),
'type': 'altus-dw'
} for cluster in AnalyticDbApi(request.user).list_clusters()['clusters'] if cluster.get('status') == 'CREATED' and cluster.get('cdhVersion') >= 'CDH515']
)
if interface == 'oozie' or interface == 'spark2':
if [cluster for cluster in clusters if cluster['type'] == 'altus']:
computes.extend([{
'id': cluster.get('crn'),
'name': cluster.get('clusterName'),
'status': cluster.get('status'),
'environmentType': cluster.get('environmentType'),
'serviceType': cluster.get('serviceType'),
'namespace': cluster.get('namespaceCrn'),
'type': 'altus-de'
} for cluster in DataEngApi(request.user).list_clusters()['clusters']]
)
# TODO if interface == 'spark2' keep only SPARK type
if interface == 'jobs':
for cluster in clusters:
cluster = {
'id': cluster.get('id'),
'name': cluster.get('name'),
'status': 'CREATED',
'environmentType': cluster.get('type'),
'serviceType': cluster.get('interface'),
'namespace': '',
'type': cluster.get('type')
}
if cluster.get('type') == 'altus':
cluster['name'] = 'Altus DE'
cluster['type'] = 'altus-de'
computes.append(cluster)
cluster = cluster.copy()
cluster['name'] = 'Altus Data Warehouse'
cluster['type'] = 'altus-dw'
computes.append(cluster)
response[interface] = computes
response['status'] = 0
return JsonResponse(response)
@api_error_handler
def search_documents(request):
"""
Returns the directories and documents based on given params that are accessible by the current user
Optional params:
perms=<mode> - Controls whether to retrieve owned, shared, or both. Defaults to both.
include_history=<bool> - Controls whether to retrieve history docs. Defaults to false.
include_trashed=<bool> - Controls whether to retrieve docs in the trash. Defaults to true.
include_managed=<bool> - Controls whether to retrieve docs generated by Hue. Defaults to false.
flatten=<bool> - Controls whether to return documents in a flat list, or roll up documents to a common directory
if possible. Defaults to true.
page=<n> - Controls pagination. Defaults to 1.
limit=<n> - Controls limit per page. Defaults to all.
type=<type> - Show documents of given type(s) (directory, query-hive, query-impala, query-mysql, etc).
Defaults to all. Can appear multiple times.
sort=<key> - Sort by the attribute <key>, which is one of: "name", "type", "owner", "last_modified"
Accepts the form "-last_modified", which sorts in descending order.
Defaults to "-last_modified".
text=<frag> - Search for fragment "frag" in names and descriptions.
"""
response = {
'documents': []
}
perms = request.GET.get('perms', 'both').lower()
include_history = json.loads(request.GET.get('include_history', 'false'))
include_trashed = json.loads(request.GET.get('include_trashed', 'true'))
include_managed = json.loads(request.GET.get('include_managed', 'false'))
flatten = json.loads(request.GET.get('flatten', 'true'))
if perms not in ['owned', 'shared', 'both']:
raise PopupException(_('Invalid value for perms, acceptable values are: owned, shared, both.'))
documents = Document2.objects.documents(
user=request.user,
perms=perms,
include_history=include_history,
include_trashed=include_trashed,
include_managed=include_managed
)
# Refine results
response.update(_filter_documents(request, queryset=documents, flatten=flatten))
# Paginate
response.update(_paginate(request, queryset=response['documents']))
# Serialize results
response['documents'] = [doc.to_dict() for doc in response.get('documents', [])]
return JsonResponse(response)
def _search(user, perms='both', include_history=False, include_trashed=False, include_managed=False, search_text=None, limit=25):
response = {
'documents': []
}
documents = Document2.objects.documents(
user=user,
perms=perms,
include_history=include_history,
include_trashed=include_trashed,
include_managed=include_managed
)
type_filters = None
sort = '-last_modified'
search_text = search_text
flatten = True
page = 1
# Refine results
response.update(__filter_documents(type_filters, sort, search_text, queryset=documents, flatten=flatten))
# Paginate
response.update(__paginate(page, limit, queryset=response['documents']))
return response
@api_error_handler
def get_document(request):
"""
Returns the document or directory found for the given uuid or path and current user.
If a directory is found, return any children documents too.
Optional params:
page=<n> - Controls pagination. Defaults to 1.
limit=<n> - Controls limit per page. Defaults to all.
type=<type> - Show documents of given type(s) (directory, query-hive, query-impala, query-mysql, etc). Default to all.
sort=<key> - Sort by the attribute <key>, which is one of:
"name", "type", "owner", "last_modified"
Accepts the form "-last_modified", which sorts in descending order.
Default to "-last_modified".
text=<frag> - Search for fragment "frag" in names and descriptions.
data=<false|true> - Return all the data of the document. Default to false.
dependencies=<false|true> - Return all the dependencies and dependents of the document. Default to false.
"""
path = request.GET.get('path', '/')
uuid = request.GET.get('uuid')
uuids = request.GET.get('uuids')
with_data = request.GET.get('data', 'false').lower() == 'true'
with_dependencies = request.GET.get('dependencies', 'false').lower() == 'true'
if uuids:
response = {
'data_list': [_get_document_helper(request, uuid, with_data, with_dependencies, path) for uuid in uuids.split(',')],
'status': 0
}
else:
response = _get_document_helper(request, uuid, with_data, with_dependencies, path)
return JsonResponse(response)
def _get_document_helper(request, uuid, with_data, with_dependencies, path):
if uuid:
if uuid.isdigit():
document = Document2.objects.document(user=request.user, doc_id=uuid)
else:
document = Document2.objects.get_by_uuid(user=request.user, uuid=uuid)
else: # Find by path
document = Document2.objects.get_by_path(user=request.user, path=path)
response = {
'document': document.to_dict(),
'parent': document.parent_directory.to_dict() if document.parent_directory else None,
'children': [],
'dependencies': [],
'dependents': [],
'data': '',
'status': 0
}
response['user_perms'] = {
'can_read': document.can_read(request.user),
'can_write': document.can_write(request.user)
}
if with_data:
data = json.loads(document.data)
# Upgrade session properties for Hive and Impala
if document.type.startswith('query'):
notebook = Notebook(document=document)
notebook = upgrade_session_properties(request, notebook)
data = json.loads(notebook.data)
if document.type == 'query-pig': # Import correctly from before Hue 4.0
properties = data['snippets'][0]['properties']
if 'hadoopProperties' not in properties:
properties['hadoopProperties'] = []
if 'parameters' not in properties:
properties['parameters'] = []
if 'resources' not in properties:
properties['resources'] = []
if data.get('uuid') != document.uuid: # Old format < 3.11
data['uuid'] = document.uuid
response['data'] = data
if with_dependencies:
response['dependencies'] = [dependency.to_dict() for dependency in document.dependencies.all()]
response['dependents'] = [dependent.to_dict() for dependent in document.dependents.exclude(is_history=True).all()]
# Get children documents if this is a directory
if document.is_directory:
directory = Directory.objects.get(id=document.id)
# If this is the user's home directory, fetch shared docs too
if document.is_home_directory:
children = directory.get_children_and_shared_documents(user=request.user)
response.update(_filter_documents(request, queryset=children, flatten=True))
else:
children = directory.get_children_documents()
response.update(_filter_documents(request, queryset=children, flatten=False))
# Paginate and serialize Results
if 'documents' in response:
response.update(_paginate(request, queryset=response['documents']))
# Rename documents to children
response['children'] = response.pop('documents')
response['children'] = [doc.to_dict() for doc in response['children']]
return response
@api_error_handler
def open_document(request):
doc_id = request.GET.get('id')
if doc_id.isdigit():
document = Document2.objects.document(user=request.user, doc_id=doc_id)
else:
document = Document2.objects.get_by_uuid(user=request.user, uuid=doc_id)
return redirect(document.get_absolute_url())
@api_error_handler
@require_POST
def move_document(request):
source_doc_uuid = json.loads(request.POST.get('source_doc_uuid'))
destination_doc_uuid = json.loads(request.POST.get('destination_doc_uuid'))
if not source_doc_uuid or not destination_doc_uuid:
raise PopupException(_('move_document requires source_doc_uuid and destination_doc_uuid'))
source = Document2.objects.get_by_uuid(user=request.user, uuid=source_doc_uuid, perm_type='write')
destination = Directory.objects.get_by_uuid(user=request.user, uuid=destination_doc_uuid, perm_type='write')
doc = source.move(destination, request.user)
return JsonResponse({
'status': 0,
'document': doc.to_dict()
})
@api_error_handler
@require_POST
def create_directory(request):
parent_uuid = json.loads(request.POST.get('parent_uuid'))
name = json.loads(request.POST.get('name'))
if not parent_uuid or not name:
raise PopupException(_('create_directory requires parent_uuid and name'))
parent_dir = Directory.objects.get_by_uuid(user=request.user, uuid=parent_uuid, perm_type='write')
directory = Directory.objects.create(name=name, owner=request.user, parent_directory=parent_dir)
return JsonResponse({
'status': 0,
'directory': directory.to_dict()
})
@api_error_handler
@require_POST
def update_document(request):
uuid = json.loads(request.POST.get('uuid'))
if not uuid:
raise PopupException(_('update_document requires uuid'))
document = Document2.objects.get_by_uuid(user=request.user, uuid=uuid, perm_type='write')
whitelisted_attrs = ['name', 'description']
for attr in whitelisted_attrs:
if request.POST.get(attr):
setattr(document, attr, request.POST.get(attr))
document.save(update_fields=whitelisted_attrs)
return JsonResponse({
'status': 0,
'document': document.to_dict()
})
@api_error_handler
@require_POST
def delete_document(request):
"""
Accepts a uuid and optional skip_trash parameter
(Default) skip_trash=false, flags a document as trashed
skip_trash=true, deletes it permanently along with any history dependencies
If directory and skip_trash=false, all dependencies will also be flagged as trash
If directory and skip_trash=true, directory must be empty (no dependencies)
"""
uuid = json.loads(request.POST.get('uuid'))
skip_trash = json.loads(request.POST.get('skip_trash', 'false'))
if not uuid:
raise PopupException(_('delete_document requires uuid'))
document = Document2.objects.get_by_uuid(user=request.user, uuid=uuid, perm_type='write')
if skip_trash:
document.delete()
else:
document.trash()
return JsonResponse({
'status': 0,
})
@api_error_handler
@require_POST
def copy_document(request):
uuid = json.loads(request.POST.get('uuid'), '""')
if not uuid:
raise PopupException(_('copy_document requires uuid'))
# Document2 and Document model objects are linked and both are saved when saving
document = Document2.objects.get_by_uuid(user=request.user, uuid=uuid)
# Document model object
document1 = document.doc.get()
if document.type == 'directory':
raise PopupException(_('Directory copy is not supported'))
name = document.name + '-copy'
# Make the copy of the Document2 model object
copy_document = document.copy(name=name, owner=request.user)
# Make the copy of Document model object too
document1.copy(content_object=copy_document, name=name, owner=request.user)
# Import workspace for all oozie jobs
if document.type == 'oozie-workflow2' or document.type == 'oozie-bundle2' or document.type == 'oozie-coordinator2':
from oozie.models2 import Workflow, Coordinator, Bundle, _import_workspace
# Update the name field in the json 'data' field
if document.type == 'oozie-workflow2':
workflow = Workflow(document=document)
workflow.update_name(name)
workflow.update_uuid(copy_document.uuid)
_import_workspace(request.fs, request.user, workflow)
copy_document.update_data({'workflow': workflow.get_data()['workflow']})
copy_document.save()
if document.type == 'oozie-bundle2' or document.type == 'oozie-coordinator2':
if document.type == 'oozie-bundle2':
bundle_or_coordinator = Bundle(document=document)
else:
bundle_or_coordinator = Coordinator(document=document)
json_data = bundle_or_coordinator.get_data_for_json()
json_data['name'] = name
json_data['uuid'] = copy_document.uuid
copy_document.update_data(json_data)
copy_document.save()
_import_workspace(request.fs, request.user, bundle_or_coordinator)
elif document.type == 'search-dashboard':
from dashboard.models import Collection2
collection = Collection2(request.user, document=document)
collection.data['collection']['label'] = name
collection.data['collection']['uuid'] = copy_document.uuid
copy_document.update_data({'collection': collection.data['collection']})
copy_document.save()
# Keep the document and data in sync
else:
copy_data = copy_document.data_dict
if 'name' in copy_data:
copy_data['name'] = name
if 'uuid' in copy_data:
copy_data['uuid'] = copy_document.uuid
copy_document.update_data(copy_data)
copy_document.save()
return JsonResponse({
'status': 0,
'document': copy_document.to_dict()
})
@api_error_handler
@require_POST
def restore_document(request):
"""
Accepts a uuid
Restores the document to /home
"""
uuids = json.loads(request.POST.get('uuids'))
if not uuids:
raise PopupException(_('restore_document requires comma separated uuids'))
for uuid in uuids.split(','):
document = Document2.objects.get_by_uuid(user=request.user, uuid=uuid, perm_type='write')
document.restore()
return JsonResponse({
'status': 0,
})
@api_error_handler
@require_POST
def share_document(request):
"""
Set who else or which other group can interact with the document.
Example of input: {'read': {'user_ids': [1, 2, 3], 'group_ids': [1, 2, 3]}}
"""
perms_dict = request.POST.get('data')
uuid = request.POST.get('uuid')
if not uuid or not perms_dict:
raise PopupException(_('share_document requires uuid and perms_dict'))
else:
perms_dict = json.loads(perms_dict)
uuid = json.loads(uuid)
doc = Document2.objects.get_by_uuid(user=request.user, uuid=uuid)
for name, perm in perms_dict.iteritems():
users = groups = None
if perm.get('user_ids'):
users = User.objects.in_bulk(perm.get('user_ids'))
else:
users = []
if perm.get('group_ids'):
groups = Group.objects.in_bulk(perm.get('group_ids'))
else:
groups = []
doc = doc.share(request.user, name=name, users=users, groups=groups)
return JsonResponse({
'status': 0,
'document': doc.to_dict()
})
@ensure_csrf_cookie
def export_documents(request):
if request.GET.get('documents'):
selection = json.loads(request.GET.get('documents'))
else:
selection = json.loads(request.POST.get('documents'))
# Only export documents the user has permissions to read
docs = Document2.objects.documents(user=request.user, perms='both', include_history=True, include_trashed=True).\
filter(id__in=selection).order_by('-id')
# Add any dependencies to the set of exported documents
export_doc_set = _get_dependencies(docs)
# For directories, add any children docs to the set of exported documents
export_doc_set.update(_get_dependencies(docs, deps_mode=False))
# Get PKs of documents to export
doc_ids = [doc.pk for doc in export_doc_set]
num_docs = len(doc_ids)
if len(selection) == 1 and num_docs >= len(selection) and docs[0].name:
filename = docs[0].name
else:
filename = 'hue-documents-%s-(%s)' % (datetime.today().strftime('%Y-%m-%d'), num_docs)
f = StringIO.StringIO()
if doc_ids:
doc_ids = ','.join(map(str, doc_ids))
management.call_command('dumpdata', 'desktop.Document2', primary_keys=doc_ids, indent=2, use_natural_foreign_keys=True, verbosity=2, stdout=f)
if request.GET.get('format') == 'json':
return JsonResponse(f.getvalue(), safe=False)
elif request.GET.get('format') == 'zip':
zfile = zipfile.ZipFile(f, 'w')
zfile.writestr("hue.json", f.getvalue())
for doc in docs:
if doc.type == 'notebook':
try:
from spark.models import Notebook
zfile.writestr("notebook-%s-%s.txt" % (doc.name, doc.id), smart_str(Notebook(document=doc).get_str()))
except Exception, e:
LOG.exception(e)
zfile.close()
response = HttpResponse(content_type="application/zip")
response["Content-Length"] = len(f.getvalue())
response['Content-Disposition'] = 'attachment; filename="%s".zip' % filename
response.write(f.getvalue())
return response
else:
return make_response(f.getvalue(), 'json', filename)
@ensure_csrf_cookie
def import_documents(request):
def is_reserved_directory(doc):
return doc['fields']['type'] == 'directory' and doc['fields']['name'] in (Document2.HOME_DIR, Document2.TRASH_DIR)
try:
if request.FILES.get('documents'):
documents = request.FILES['documents'].read()
else:
documents = json.loads(request.POST.get('documents'))
documents = json.loads(documents)
except ValueError, e:
raise PopupException(_('Failed to import documents, the file does not contain valid JSON.'))
# Validate documents
if not _is_import_valid(documents):
raise PopupException(_('Failed to import documents, the file does not contain the expected JSON schema for Hue documents.'))
docs = []
uuids_map = dict((doc['fields']['uuid'], None) for doc in documents if not is_reserved_directory(doc))
for doc in documents:
# Filter docs to import, ignoring reserved directories (home and Trash) and history docs
if not is_reserved_directory(doc):
# Remove any deprecated fields
if 'tags' in doc['fields']:
doc['fields'].pop('tags')
# If doc is not owned by current user, make a copy of the document with current user as owner
if doc['fields']['owner'][0] != request.user.username:
doc = _copy_document_with_owner(doc, request.user, uuids_map)
else: # Update existing doc or create new
doc = _create_or_update_document_with_owner(doc, request.user, uuids_map)
# For oozie docs replace dependent uuids with the newly created ones
if doc['fields']['type'].startswith('oozie-'):
doc = _update_imported_oozie_document(doc, uuids_map)
# If the doc contains any history dependencies, ignore them
# NOTE: this assumes that each dependency is exported as an array using the natural PK [uuid, version, is_history]
deps_minus_history = [dep for dep in doc['fields'].get('dependencies', []) if len(dep) >= 3 and not dep[2]]
doc['fields']['dependencies'] = deps_minus_history
# Replace illegal characters
if '/' in doc['fields']['name']:
new_name = doc['fields']['name'].replace('/', '-')
LOG.warn("Found illegal slash in document named: %s, renaming to: %s." % (doc['fields']['name'], new_name))
doc['fields']['name'] = new_name
# Set last modified date to now
doc['fields']['last_modified'] = datetime.now().replace(microsecond=0).isoformat()
docs.append(doc)
f = tempfile.NamedTemporaryFile(mode='w+', suffix='.json')
f.write(json.dumps(docs))
f.flush()
stdout = StringIO.StringIO()
try:
management.call_command('loaddata', f.name, verbosity=2, traceback=True, stdout=stdout)
Document.objects.sync()
if request.POST.get('redirect'):
return redirect(request.POST.get('redirect'))
else:
return JsonResponse({
'status': 0,
'message': stdout.getvalue(),
'count': len(documents),
'created_count': len([doc for doc in documents if doc['pk'] is None]),
'updated_count': len([doc for doc in documents if doc['pk'] is not None]),
'username': request.user.username,
'documents': [
dict([
('name', doc['fields']['name']),
('uuid', doc['fields']['uuid']),
('type', doc['fields']['type']),
('owner', doc['fields']['owner'][0])
]) for doc in docs]
})
except Exception, e:
LOG.error('Failed to run loaddata command in import_documents:\n %s' % stdout.getvalue())
return JsonResponse({'status': -1, 'message': smart_str(e)})
finally:
stdout.close()
def _update_imported_oozie_document(doc, uuids_map):
for key, value in uuids_map.iteritems():
if value:
doc['fields']['data'] = doc['fields']['data'].replace(key, value)
return doc
def user_preferences(request, key=None):
response = {'status': 0, 'data': {}}
if request.method != "POST":
response['data'] = get_user_preferences(request.user, key)
else:
if "set" in request.POST:
x = set_user_preferences(request.user, key, request.POST["set"])
response['data'] = {key: x.value}
elif "delete" in request.POST:
try:
x = UserPreferences.objects.get(user=request.user, key=key)
x.delete()
except UserPreferences.DoesNotExist:
pass
return JsonResponse(response)
def search_entities(request):
sources = json.loads(request.POST.get('sources')) or []
if 'documents' in sources:
search_text = json.loads(request.POST.get('query_s', ''))
entities = _search(user=request.user, search_text=search_text)
response = {
'entities': [{
'hue_name': _highlight(search_text, escape(e.name)),
'hue_description': _highlight(search_text, escape(e.description)),
'type': 'HUE',
'doc_type': escape(e.type),
'originalName': escape(e.name),
'link': e.get_absolute_url()
} for e in entities['documents']
],
'count': len(entities['documents']),
'status': 0
}
return JsonResponse(response)
else:
if has_navigator(request.user):
return metadata_search_entities(request)
else:
return JsonResponse({'status': 1, 'message': _('Navigator not enabled')})
def search_entities_interactive(request):
sources = json.loads(request.POST.get('sources')) or []
if 'documents' in sources:
search_text = json.loads(request.POST.get('query_s', ''))
limit = int(request.POST.get('limit', 25))
entities = _search(user=request.user, search_text=search_text, limit=limit)
response = {
'results': [{
'hue_name': _highlight(search_text, escape(e.name)),
'hue_description': _highlight(search_text, escape(e.description)),
'link': e.get_absolute_url(),
'doc_type': escape(e.type),
'type': 'HUE',
'uuid': e.uuid,
'parentUuid': e.parent_directory.uuid,
'originalName': escape(e.name)
} for e in entities['documents']
],
'count': len(entities['documents']),
'status': 0
}
return JsonResponse(response)
else:
if has_navigator(request.user):
return metadata_search_entities_interactive(request)
else:
return JsonResponse({'status': 1, 'message': _('Navigator not enabled')})
def _is_import_valid(documents):
"""
Validates the JSON file to be imported for schema correctness
:param documents: object loaded from JSON file
:return: True if schema seems valid, False otherwise
"""
return isinstance(documents, list) and \
all(isinstance(d, dict) for d in documents) and \
all(all(k in d for k in ('pk', 'model', 'fields')) for d in documents) and \
all(all(k in d['fields'] for k in ('uuid', 'owner')) for d in documents)
def _get_dependencies(documents, deps_mode=True):
"""
Given a list of Document2 objects, perform a depth-first search and return a set of documents with all
dependencies (excluding history docs) included
:param doc_set: set of Document2 objects to include
:param deps_mode: traverse dependencies relationship, otherwise traverse children relationship
"""
doc_set = set()
for doc in documents:
stack = [doc]
while stack:
curr_doc = stack.pop()
if curr_doc not in doc_set and not curr_doc.is_history:
doc_set.add(curr_doc)
if deps_mode:
deps_set = set(curr_doc.dependencies.all())
else:
deps_set = set(curr_doc.children.all())
stack.extend(deps_set - doc_set)
return doc_set
def _copy_document_with_owner(doc, owner, uuids_map):
home_dir = Directory.objects.get_home_directory(owner)
doc['fields']['owner'] = [owner.username]
doc['pk'] = None
doc['fields']['version'] = 1
# Retrieve from the import_uuids_map if it's already been reassigned, or assign a new UUID and map it
old_uuid = doc['fields']['uuid']
if uuids_map[old_uuid] is None:
uuids_map[old_uuid] = uuid_default()
doc['fields']['uuid'] = uuids_map[old_uuid]
# Update UUID in data if needed
if 'data' in doc['fields']:
data = json.loads(doc['fields']['data'])
if 'uuid' in data:
data['uuid'] = uuids_map[old_uuid]
doc['fields']['data'] = json.dumps(data)
# Remap parent directory if needed
parent_uuid = None
if doc['fields'].get('parent_directory'):
parent_uuid = doc['fields']['parent_directory'][0]
if parent_uuid is not None and parent_uuid in uuids_map.keys():
if uuids_map[parent_uuid] is None:
uuids_map[parent_uuid] = uuid_default()
doc['fields']['parent_directory'] = [uuids_map[parent_uuid], 1, False]
else:
if parent_uuid is not None:
LOG.warn('Could not find parent directory with UUID: %s in JSON import, will set parent to home directory' %
parent_uuid)
doc['fields']['parent_directory'] = [home_dir.uuid, home_dir.version, home_dir.is_history]
# Remap dependencies if needed
idx = 0
for dep_uuid, dep_version, dep_is_history in doc['fields']['dependencies']:
if dep_uuid not in uuids_map.keys():
LOG.warn('Could not find dependency UUID: %s in JSON import, may cause integrity errors if not found.' % dep_uuid)
else:
if uuids_map[dep_uuid] is None:
uuids_map[dep_uuid] = uuid_default()
doc['fields']['dependencies'][idx][0] = uuids_map[dep_uuid]
idx += 1
return doc
def _create_or_update_document_with_owner(doc, owner, uuids_map):
home_dir = Directory.objects.get_home_directory(owner)
create_new = False
try:
owned_docs = Document2.objects.filter(uuid=doc['fields']['uuid'], owner=owner).order_by('-last_modified')
if owned_docs.exists():
existing_doc = owned_docs[0]
doc['pk'] = existing_doc.pk
else:
create_new = True
except FilesystemException, e:
create_new = True
if create_new:
LOG.warn('Could not find document with UUID: %s, will create a new document on import.', doc['fields']['uuid'])
doc['pk'] = None
doc['fields']['version'] = 1
# Verify that parent exists, log warning and set parent to user's home directory if not found
if doc['fields']['parent_directory']:
uuid, version, is_history = doc['fields']['parent_directory']
if uuid not in uuids_map.keys() and \
not Document2.objects.filter(uuid=uuid, version=version, is_history=is_history).exists():
LOG.warn('Could not find parent document with UUID: %s, will set parent to home directory' % uuid)
doc['fields']['parent_directory'] = [home_dir.uuid, home_dir.version, home_dir.is_history]
# Verify that dependencies exist, raise critical error if any dependency not found
# Ignore history dependencies
if doc['fields']['dependencies']:
history_deps_list = []
for index, (uuid, version, is_history) in enumerate(doc['fields']['dependencies']):
if not uuid in uuids_map.keys() and not is_history and \
not Document2.objects.filter(uuid=uuid, version=version).exists():
raise PopupException(_('Cannot import document, dependency with UUID: %s not found.') % uuid)
elif is_history:
history_deps_list.insert(0, index) # Insert in decreasing order to facilitate delete
LOG.warn('History dependency with UUID: %s ignored while importing document %s' % (uuid, doc['fields']['name']))
# Delete history dependencies not found in the DB
for index in history_deps_list:
del doc['fields']['dependencies'][index]
return doc
def _filter_documents(request, queryset, flatten=True):
"""
Given optional querystring params extracted from the request, filter the given queryset of documents and return a
dictionary with the refined queryset and filter params
:param request: request object with params
:param queryset: Document2 queryset
:param flatten: Return all results in a flat list if true, otherwise roll up to common directory
"""
type_filters = request.GET.getlist('type', None)
sort = request.GET.get('sort', '-last_modified')
search_text = request.GET.get('text', None)
return __filter_documents(type_filters, sort, search_text, queryset, flatten)
def __filter_documents(type_filters, sort, search_text, queryset, flatten=True):
documents = queryset.search_documents(
types=type_filters,
search_text=search_text,
order_by=sort)
# Roll up documents to common directory
if not flatten:
documents = documents.exclude(parent_directory__in=documents)
count = documents.count()
return {
'documents': documents,
'count': count,
'types': type_filters,
'text': search_text,
'sort': sort
}
def _paginate(request, queryset):
"""
Given optional querystring params extracted from the request, slice the given queryset of documents for the given page
and limit, and return the updated queryset along with pagination params used.
:param request: request object with params
:param queryset: queryset
"""
page = int(request.GET.get('page', 1))
limit = int(request.GET.get('limit', 0))
return __paginate(page, limit, queryset)
def __paginate(page, limit, queryset):
if limit > 0:
offset = (page - 1) * limit
last = offset + limit
queryset = queryset.all()[offset:last]
return {
'documents': queryset,
'page': page,
'limit': limit
}
| 35.338787 | 161 | 0.685759 |
6912b7811b66826d24bb9c5503519c4677d31166 | 2,487 | py | Python | venv/Lib/site-packages/gevent/resolver/thread.py | asanka9/Quession-Discussion-App-Socket.Io-NLP | 95a49a8afa572dc3908a0bade45e424c3751f191 | [
"Apache-2.0"
] | 13 | 2018-03-28T23:07:01.000Z | 2022-03-12T06:01:21.000Z | venv/Lib/site-packages/gevent/resolver/thread.py | asanka9/Quession-Discussion-App-Socket.Io-NLP | 95a49a8afa572dc3908a0bade45e424c3751f191 | [
"Apache-2.0"
] | 11 | 2018-06-18T15:49:07.000Z | 2021-11-25T01:45:33.000Z | venv/Lib/site-packages/gevent/resolver/thread.py | asanka9/Quession-Discussion-App-Socket.Io-NLP | 95a49a8afa572dc3908a0bade45e424c3751f191 | [
"Apache-2.0"
] | 5 | 2018-03-28T23:07:05.000Z | 2021-12-09T19:02:00.000Z | # Copyright (c) 2012-2015 Denis Bilenko. See LICENSE for details.
"""
Native thread-based hostname resolver.
"""
import _socket
from gevent.hub import get_hub
__all__ = ['Resolver']
class Resolver(object):
"""
Implementation of the resolver API using native threads and native resolution
functions.
Using the native resolution mechanisms ensures the highest
compatibility with what a non-gevent program would return
including good support for platform specific configuration
mechanisms. The use of native (non-greenlet) threads ensures that
a caller doesn't block other greenlets.
This implementation also has the benefit of being very simple in comparison to
:class:`gevent.resolver_ares.Resolver`.
.. tip::
Most users find this resolver to be quite reliable in a
properly monkey-patched environment. However, there have been
some reports of long delays, slow performance or even hangs,
particularly in long-lived programs that make many, many DNS
requests. If you suspect that may be happening to you, try the
dnspython or ares resolver (and submit a bug report).
"""
def __init__(self, hub=None):
if hub is None:
hub = get_hub()
self.pool = hub.threadpool
if _socket.gaierror not in hub.NOT_ERROR:
# Do not cause lookup failures to get printed by the default
# error handler. This can be very noisy.
hub.NOT_ERROR += (_socket.gaierror, _socket.herror)
def __repr__(self):
return '<%s.%s at 0x%x pool=%r>' % (type(self).__module__,
type(self).__name__,
id(self), self.pool)
def close(self):
pass
# from briefly reading socketmodule.c, it seems that all of the functions
# below are thread-safe in Python, even if they are not thread-safe in C.
def gethostbyname(self, *args):
return self.pool.apply(_socket.gethostbyname, args)
def gethostbyname_ex(self, *args):
return self.pool.apply(_socket.gethostbyname_ex, args)
def getaddrinfo(self, *args, **kwargs):
return self.pool.apply(_socket.getaddrinfo, args, kwargs)
def gethostbyaddr(self, *args, **kwargs):
return self.pool.apply(_socket.gethostbyaddr, args, kwargs)
def getnameinfo(self, *args, **kwargs):
return self.pool.apply(_socket.getnameinfo, args, kwargs)
| 35.528571 | 82 | 0.664656 |
10a04c81ca608743765e7ecb1248a21ea1b1cdb8 | 2,519 | py | Python | qradar4py/endpoints/disaster_recovery.py | ryukisec/qradar4py | 958cdea92709778916f0ff8d84d75b18aaad4a66 | [
"MIT"
] | 10 | 2019-11-19T21:13:32.000Z | 2021-11-17T19:35:53.000Z | qradar4py/endpoints/disaster_recovery.py | ryukisec/qradar4py | 958cdea92709778916f0ff8d84d75b18aaad4a66 | [
"MIT"
] | 2 | 2021-05-21T16:15:16.000Z | 2021-07-20T12:34:49.000Z | qradar4py/endpoints/disaster_recovery.py | ryukisec/qradar4py | 958cdea92709778916f0ff8d84d75b18aaad4a66 | [
"MIT"
] | 6 | 2020-09-14T13:44:55.000Z | 2021-11-17T19:35:55.000Z | from urllib.parse import urljoin
from qradar4py.endpoints.api_endpoint import QRadarAPIEndpoint
from qradar4py.endpoints.api_endpoint import request_vars
from qradar4py.endpoints.api_endpoint import header_vars
class DisasterRecovery(QRadarAPIEndpoint):
"""
The QRadar API endpoint group /disaster_recovery and its endpoints.
"""
__baseurl = 'disaster_recovery/'
def __init__(self, url, header, verify):
super().__init__(urljoin(url, self.__baseurl),
header,
verify)
@request_vars('fields', 'filter')
def get_ariel_copy_profiles(self, *, fields=None, filter=None, **kwargs):
"""
GET /disaster_recovery/ariel_copy_profiles
Retrieves a list of the Ariel Copy Profiles.
"""
function_endpoint = urljoin(self._baseurl, 'ariel_copy_profiles')
return self._call('GET', function_endpoint, **kwargs)
@header_vars('fields')
def post_ariel_copy_profiles(self, *, arielCopyProfileDTO, fields=None, **kwargs):
"""
POST /disaster_recovery/ariel_copy_profiles
Creates a new Ariel Copy Profile.
"""
function_endpoint = urljoin(self._baseurl, 'ariel_copy_profiles')
return self._call('POST', function_endpoint, json=arielCopyProfileDTO, **kwargs)
@request_vars('fields')
def get_ariel_copy_profiles_by_id(self, id, *, fields=None, **kwargs):
"""
GET /disaster_recovery/ariel_copy_profiles/{id}
Retrieves a Ariel Copy Profile by ID.
"""
function_endpoint = urljoin(self._baseurl, 'ariel_copy_profiles/{id}'.format(id=id))
return self._call('GET', function_endpoint, **kwargs)
@header_vars('fields')
def post_ariel_copy_profiles_by_id(self, id, *, arielCopyProfileDTO, fields=None, **kwargs):
"""
POST /disaster_recovery/ariel_copy_profiles/{id}
Updates a Ariel Copy Profile by ID.
"""
function_endpoint = urljoin(self._baseurl, 'ariel_copy_profiles/{id}'.format(id=id))
return self._call('POST', function_endpoint, json=arielCopyProfileDTO, **kwargs)
def delete_ariel_copy_profiles_by_id(self, id, **kwargs):
"""
DELETE /disaster_recovery/ariel_copy_profiles/{id}
Deletes a Ariel Copy Profile by ID.
"""
function_endpoint = urljoin(self._baseurl, 'ariel_copy_profiles/{id}'.format(id=id))
return self._call('DELETE', function_endpoint, response_type='text/plain', **kwargs)
| 40.629032 | 96 | 0.676459 |
ed0611d6c2a229ec3e2020edd0a31fdc176201a6 | 8,684 | py | Python | utils/scrape_wandb.py | joeybose/vgraphgen | 37df882d6548ef8dc3f251dcfb6ecc6a6a91604d | [
"MIT"
] | null | null | null | utils/scrape_wandb.py | joeybose/vgraphgen | 37df882d6548ef8dc3f251dcfb6ecc6a6a91604d | [
"MIT"
] | null | null | null | utils/scrape_wandb.py | joeybose/vgraphgen | 37df882d6548ef8dc3f251dcfb6ecc6a6a91604d | [
"MIT"
] | null | null | null | import argparse
import csv
import json
import os
from statistics import mean
import wandb
import matplotlib
import numpy as np
from utils import project_name
def get_data(args, wandb_username, wandb_project):
# Accumulate data for all experiments.
data_experiments_auc = []
data_experiments_ap = []
data_experiments_metric = []
data_runs_auc = []
data_runs_ap = []
data_runs_metric = []
runs = list(args.api.runs(wandb_username+ "/"+ wandb_project))
if args.config_key:
filters = list(zip(args.config_key, args.config_val))
def keep_run(run):
for (k, v) in filters:
typed_val = type(run.config.get(k))
if typed_val != v:
return False
else:
return True
filtered_runs = filter(keep_run, runs)
else:
filtered_runs = [single_run for single_run in runs if args.name_str ==
single_run.name]
# Filter out crashed runs
filtered_runs = [single_run for single_run in filtered_runs if 'finished' in
single_run.state]
# Accumulate data for all runs of a given project
for my_run in filtered_runs:
raw_data = my_run.history(samples=100000)
keys = raw_data.keys().values
if not args.custom_metric:
test_keys = [key for key in keys if 'Test' in key]
else:
test_keys = [key for key in keys if args.metric in key]
for key in test_keys:
if 'AUC' in key:
data_points_auc = raw_data[key].dropna().values
data_experiments_auc.append(data_points_auc)
elif 'AP' in key:
data_points_ap = raw_data[key].dropna().values
data_experiments_ap.append(data_points_ap)
elif args.metric in key:
# data_points_metric = raw_data[key].dropna().values
data_points_metric = raw_data[args.metric].dropna().values
data_experiments_metric.append(data_points_metric)
last_data_points_auc = [data_run[-1] for data_run in data_experiments_auc]
last_data_points_ap = [data_run[-1] for data_run in data_experiments_ap]
last_data_points_metric = [data_run[-1] for data_run in data_experiments_metric]
return last_data_points_auc, last_data_points_ap, last_data_points_metric
def main(args):
# wandb_project = '{}-{}'.format(project_name(args.dataset),args.eval_set)
wandb_project = 'metrics-{}-{}'.format(project_name(args.dataset),args.eval_set)
wandb_username = args.wandb_uname
if args.graph_gen:
args.custom_metric = True
args.metric = 'Deg'
# args.metric = 'Deg_copy'
auc, ap, deg_metric = get_data(args, wandb_project=wandb_project,
wandb_username=wandb_username)
deg_metric = np.sort(deg_metric)[::-1]
mean_deg_metric, std_deg_metric = np.mean(deg_metric[0:args.top_k]), np.std(deg_metric[0:args.top_k])
args.metric = 'Clus'
# args.metric = 'Clus_copy'
auc, ap, clus_metric = get_data(args, wandb_project=wandb_project,
wandb_username=wandb_username)
clus_metric = np.sort(clus_metric)[::-1]
mean_clus_metric, std_clus_metric = np.mean(clus_metric[0:args.top_k]), np.std(clus_metric[0:args.top_k])
args.metric = 'Orb'
# args.metric = 'Orb_copy'
auc, ap, orb_metric = get_data(args, wandb_project=wandb_project,
wandb_username=wandb_username)
orb_metric = np.sort(orb_metric)[::-1]
mean_orb_metric, std_orb_metric = np.mean(orb_metric[0:args.top_k]), np.std(orb_metric[0:args.top_k])
args.metric = 'Spec.'
# args.metric = 'Spec_copy'
auc, ap, spec_metric = get_data(args, wandb_project=wandb_project,
wandb_username=wandb_username)
spec_metric = np.sort(spec_metric)[::-1]
mean_spec_metric, std_spec_metric = np.mean(spec_metric[0:args.top_k]), np.std(spec_metric[0:args.top_k])
args.metric = 'Acc'
# args.metric = 'Acc_copy'
auc, ap, acc_metric = get_data(args, wandb_project=wandb_project,
wandb_username=wandb_username)
acc_metric = np.sort(acc_metric)[::-1]
mean_acc_metric, std_acc_metric = np.mean(acc_metric[0:args.top_k]), np.std(acc_metric[0:args.top_k])
args.metric = 'Avg_CC'
auc, ap, cc_metric = get_data(args, wandb_project=wandb_project,
wandb_username=wandb_username)
cc_metric = np.sort(cc_metric)
mean_cc_metric, std_cc_metric = np.mean(cc_metric[0:args.top_k]), np.std(cc_metric[0:args.top_k])
args.metric = 'Avg_Tri'
auc, ap, tri_metric = get_data(args, wandb_project=wandb_project,
wandb_username=wandb_username)
tri_metric = np.sort(tri_metric)
mean_tri_metric, std_tri_metric = np.mean(tri_metric[0:args.top_k]), np.std(tri_metric[0:args.top_k])
args.metric = 'Avg_transitivity'
auc, ap, trans_metric = get_data(args, wandb_project=wandb_project,
wandb_username=wandb_username)
trans_metric = np.sort(trans_metric)
mean_trans_metric, std_trans_metric = np.mean(trans_metric[0:args.top_k]), np.std(trans_metric[0:args.top_k])
print("Test Mean: %s %f, Std: %f" % ("Deg", mean_deg_metric, std_deg_metric))
print("Test Mean: %s %f, Std: %f" % ("Clus", mean_clus_metric, std_clus_metric))
print("Test Mean: %s %f, Std: %f" % ("Orb", mean_orb_metric, std_orb_metric))
print("Test Mean: %s %f, Std: %f" % ("Spec.", mean_spec_metric, std_spec_metric))
print("Test Mean: %s %f, Std: %f" % ("Acc", mean_acc_metric, std_acc_metric))
print("Test Mean: %s %f, Std: %f" % ("Avg CC", mean_cc_metric, std_cc_metric))
print("Test Mean: %s %f, Std: %f" % ("Avg Tri", mean_tri_metric, std_tri_metric))
print("Test Mean: %s %f, Std: %f" % ("Avg Trans", mean_trans_metric, std_trans_metric))
return None
else:
auc, ap, metric = get_data(args, wandb_project=wandb_project,
wandb_username=wandb_username)
if len(auc) > 0:
auc = np.sort(auc)[::-1]
if len(auc) > args.top_k:
mean_auc, std_auc = np.mean(auc[0:args.top_k]), np.std(auc[0:args.top_k])
else:
mean_auc, std_auc = np.mean(auc), np.std(auc)
print("Test Mean: AUC %f, Std: %f" % (mean_auc, std_auc))
if len(ap) > 0:
ap = np.sort(ap)[::-1]
if len(ap) > args.top_k:
mean_ap, std_ap = np.mean(ap[0:args.top_k]), np.std(ap[0:args.top_k])
else:
mean_ap, std_ap = np.mean(ap), np.std(ap)
print("Test Mean: AP %f, Std: %f" % (mean_ap, std_ap))
if len(metric) > 0:
metric = np.sort(metric)[::-1]
if len(metric) > args.top_k:
mean_metric, std_metric = np.mean(metric[0:args.top_k]), np.std(metric[0:args.top_k])
else:
mean_metric, std_metric = np.mean(metric), np.std(metric)
print("Test Mean: %s %f, Std: %f" % (args.metric, mean_metric, std_metric))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, default='Train')
parser.add_argument('--name_str', type=str, default='')
parser.add_argument('--wandb_uname', type=str, default='')
parser.add_argument('--metric', type=str, default='Test MC Log Likelihood')
parser.add_argument('--custom_metric', action="store_true", default=False,
help='Custom non Test metric')
parser.add_argument('--graph_gen', action="store_true", default=False,
help='Report all graph gen Test metric')
parser.add_argument('--top_k', type=int, default=5, help='Return only top K runs')
parser.add_argument('--config_key', nargs='*', type=str, default=[])
parser.add_argument('--config_val', nargs='*', default=[])
parser.add_argument('--dataset', type=str, default='bdp')
parser.add_argument('--eval_set', default="test",
help="Whether to evaluate model on test set (default) or validation set.")
parser.add_argument('--get_step', nargs='+', default=5, type=int)
args = parser.parse_args()
with open('../settings.json') as f:
data = json.load(f)
args.wandb_apikey = data.get("wandbapikey")
os.environ['WANDB_API_KEY'] = args.wandb_apikey
args.api = wandb.Api()
main(args)
| 46.688172 | 117 | 0.61976 |
7e095b06fc1f0bec6139b7f69c27d1f909ed063a | 822 | py | Python | setup.py | frostinassiky/align1d | f4c568ed68876c641bc042614cecb354678673b8 | [
"Apache-2.0"
] | null | null | null | setup.py | frostinassiky/align1d | f4c568ed68876c641bc042614cecb354678673b8 | [
"Apache-2.0"
] | null | null | null | setup.py | frostinassiky/align1d | f4c568ed68876c641bc042614cecb354678673b8 | [
"Apache-2.0"
] | null | null | null | from setuptools import setup
from torch.utils.cpp_extension import CppExtension, BuildExtension
setup(
name='Align1D',
version="2.2.0",
author="Frost Mengmeng Xu",
author_email="xu.frost@gmail.com",
description="A small package for 1d aligment in cuda",
long_description="I will write a longer description here :)",
long_description_content_type="text/markdown",
url="https://github.com/Frostinassiky/G-TAD",
ext_modules=[
CppExtension(
name = 'Align1D',
sources = [
'Align1D_cuda.cpp',
'Align1D_cuda_kernal.cu',
],
extra_compile_args={'cxx': ['-std=c++14', '-fopenmp'],
'nvcc': ['--expt-relaxed-constexpr']}
)
],
cmdclass={
'build_ext': BuildExtension
})
| 30.444444 | 66 | 0.596107 |
bba776ff575fc9c549b48fd842242a461bc95101 | 652 | py | Python | shell/options.py | levindoneto/traffic_generator | 884d96893e7d9d09e0981bffac2e564a63344bc3 | [
"MIT"
] | null | null | null | shell/options.py | levindoneto/traffic_generator | 884d96893e7d9d09e0981bffac2e564a63344bc3 | [
"MIT"
] | null | null | null | shell/options.py | levindoneto/traffic_generator | 884d96893e7d9d09e0981bffac2e564a63344bc3 | [
"MIT"
] | null | null | null | def getArgs(argsLst):
"""
Get args from command prompt
@param: list\n
@returns: obj
"""
if (argsLst[1] != "-i" and argsLst[3] != "–p" and argsLst[5] != "–r"):
print("Invalid options given\npython gt.py –i destinationIp –p port -r bandwidth")
return -1
else:
try:
ip = str(argsLst[2])
port = int(argsLst[4])
bandwidth = float(argsLst[6])
except:
print("Invalid values given\npython gt.py –i STRING_IP –p INT_PORT -r FLOAT_BANDWIDTH")
return -1
return {
"ip": ip,
"port": port,
"bandwidth": bandwidth
} | 29.636364 | 99 | 0.52454 |
1ff9d0fd409d48ce01c74c2cee4e7c07572b6b98 | 1,484 | py | Python | src/smtphelp.py | jertel/elastalert-slackbot | c3597ce5f6e12ba360d09f19f48444b971d3191a | [
"MIT"
] | 10 | 2018-08-24T12:34:29.000Z | 2021-06-16T09:14:01.000Z | src/smtphelp.py | jertel/elastalert-slackbot | c3597ce5f6e12ba360d09f19f48444b971d3191a | [
"MIT"
] | 1 | 2018-07-20T22:59:32.000Z | 2020-09-01T20:48:25.000Z | src/smtphelp.py | jertel/elastalert-slackbot | c3597ce5f6e12ba360d09f19f48444b971d3191a | [
"MIT"
] | 4 | 2019-02-06T04:43:55.000Z | 2021-01-26T12:02:37.000Z | import logging
import os
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
log = logging.getLogger('smtphelp')
def send(conf, subject, body):
if conf['smtp'].get('secure'):
server = smtplib.SMTP_SSL(host=conf['smtp']['host'], timeout=conf['smtp']['timeoutSeconds'])
else:
server = smtplib.SMTP(host=conf['smtp']['host'], timeout=conf['smtp']['timeoutSeconds'])
if conf['smtp'].get('debug'):
log.info("Enabled debug logging")
server.set_debuglevel(True)
log.info("Connecting to server; host=%s; port=%d" % (conf['smtp']['host'], conf['smtp']['port']))
server.connect(host=conf['smtp']['host'],
port=conf['smtp']['port'])
server.ehlo()
if conf['smtp'].get('starttls'):
log.info("Enabled TLS secure mode")
server.starttls()
server.ehlo()
username = os.environ.get('SMTP_USERNAME')
password = os.environ.get('SMTP_PASSWORD')
if username and password:
log.info("Sending username and password")
server.login(username, password)
if conf['smtp'].get('subjectPrefix'):
subject = conf['smtp'].get('subjectPrefix') + subject
msg = MIMEMultipart()
msg['From'] = conf['smtp']['from']
msg['To'] = conf['smtp']['to']
msg['Subject'] = subject
msg.attach(MIMEText(str(body)))
message = msg.as_string()
log.info("Sending email; length=%d" % (len(message)))
server.sendmail(conf['smtp']['from'], conf['smtp']['to'], message)
server.quit()
| 30.916667 | 99 | 0.653639 |
5565943a7ec68077c0400fc63dae3cc68e84b34d | 8,433 | py | Python | web_tool/data_analy/views/views.py | githmy/vnpymy | f6a172629f0961bea13e9f10c8fc47de225094ec | [
"MIT"
] | 1 | 2021-04-09T06:46:35.000Z | 2021-04-09T06:46:35.000Z | web_tool/data_analy/views/views.py | githmy/vnpymy | f6a172629f0961bea13e9f10c8fc47de225094ec | [
"MIT"
] | 1 | 2021-12-31T02:38:36.000Z | 2021-12-31T02:38:36.000Z | web_tool/data_analy/views/views.py | githmy/vnpymy | f6a172629f0961bea13e9f10c8fc47de225094ec | [
"MIT"
] | 1 | 2021-06-27T12:13:47.000Z | 2021-06-27T12:13:47.000Z | from django.shortcuts import render
from django.views.generic import FormView, UpdateView
from django.http import HttpResponse, JsonResponse, HttpResponseRedirect
from common.mixins import JSONResponseMixin, AdminUserRequiredMixin
from common.utils import get_object_or_none
from common.tools.data_fit import iter_regression4allxy
from common.tools.data_confidenc import show_all_confids
from django import forms as oforms
from .. import forms
from django.views import View
import codecs
import xlrd
import xlwt
import uuid
import datetime
import pandas as pd
import os
from io import StringIO, BytesIO
import re
import copy
# Create your views here.
global gdatas
# 文件名:pandas
gdatas = {}
# 数据间的关系
global rdatas
rdatas = {}
# 数据的置信度
global cdatas
cdatas = {}
global ccollist
ccollist=[]
# 数据的拟合
global fdatas
fdatas = {}
def btUrldecode(urldata, colnames):
trim = dict()
trim['query'] = dict()
# 分片
trim['start'] = int(urldata.get('offset', 0)) # offset 偏移位置
entries = urldata.get('limit', 25) # 每页显示条目数
trim['offset'] = int(trim['start']) + int(entries) # 偏移条数位置
# 排序
order_type = urldata.get('orderType', 'asc')
order_col = urldata.get('orderName', 'pk')
trim['orderName'] = order_col if order_type == u'asc' else u'-' + order_col
# 查找表 每列
for colname in colnames:
tqur = urldata.get("search_{}".format(colname))
if tqur:
trim['query'][colname] = tqur
return trim
def index(request):
return render(request, 'data_analy/index.html')
# @login_required
def data_index(request):
"数据 原始输出 表头"
context = {}
context["collist"] = []
if len(gdatas.keys()) > 0:
tclist = gdatas[list(gdatas.keys())[0]].columns
context["collist"] = tclist
return render(request, 'data_analy/data_index.html', context)
def data_list(request):
"数据 原始输出 内容"
if len(gdatas.keys()) > 0:
tpd = gdatas[list(gdatas.keys())[0]]
qd = btUrldecode(request.GET, tpd.columns)
# 筛选+排序
outjson = data_list_core(tpd, qd)
outjson['_'] = request.GET.get('_', 0)
return JsonResponse(outjson)
else:
return JsonResponse({})
def data_list_core(inpandas, qd):
"数据筛选排序通用核心: 数据frame, 请求json"
# 筛选
newtpd = copy.deepcopy(inpandas)
newtpd = newtpd.applymap(str)
indlist = set(newtpd.index)
for coln in inpandas.columns:
if coln in qd['query']:
tind = newtpd[coln].str.find(qd['query'][coln])
tind = tind[tind > -1].index
indlist = indlist & set(tind)
indlist = sorted(indlist)
totals = len(indlist)
# 取转为str前的 pandas
newtpd = inpandas.iloc[indlist, :]
# 排序分页
if qd['orderName'] != "pk":
if qd['orderName'].startswith("-"):
qd['orderName'] = qd['orderName'].lstrip("-")
newtpd = newtpd.sort_values(by=[qd['orderName']], ascending=[False])
else:
newtpd = newtpd.sort_values(by=[qd['orderName']])
newtpd = newtpd.iloc[qd['start']:qd['offset'], :]
return {
'total': totals,
'data': query2dict(newtpd),
}
def prob_check_v(request):
"指标 汇总输出 表头"
context = {}
# 1. 列名 2. 平稳性 3.
context["collist"] = ["names", "mean", "std"]
return render(request, 'data_analy/prob_check_index.html', context)
def data_prob_check(request):
"指标 汇总输出 内容"
if len(rdatas.keys()) > 0:
tpd = rdatas[list(rdatas.keys())[0]]
return JsonResponse({
'total': tpd.shape[0],
'data': query2dict(tpd),
'_': request.GET.get('_', 0)
})
else:
return JsonResponse({})
def relation_v(request):
"相关性 汇总输出 表头"
context = {}
# 1. 关系对名字 2.
context["collist"] = ["names", "a c", "b c"]
return render(request, 'data_analy/relation_index.html', context)
def data_relation(request):
"相关性 汇总输出 内容"
if len(rdatas.keys()) > 0:
tpd = rdatas[list(rdatas.keys())[0]]
return JsonResponse({
'total': tpd.shape[0],
'data': query2dict(tpd),
'_': request.GET.get('_', 0)
})
else:
return JsonResponse({})
def confidence_v(request):
"置信度 汇总输出 表头"
context = {}
context["collist"] = []
if len(cdatas.keys()) > 0:
ccollist = cdatas[list(gdatas.keys())[0]].columns
context["collist"] = ccollist
return render(request, 'data_analy/confidence_index.html', context)
def data_confidence(request):
"置信度 汇总输出 内容"
if len(gdatas.keys()) > 0:
# if len(cdatas) > 0:
# ttnewtpd = cdatas[list(gdatas.keys())[0]]
# else:
tpd = gdatas[list(gdatas.keys())[0]]
tprob = request.GET.get("reply_prob")
tposit = request.GET.get("reply_posit")
if tposit == "":
tposit = None
else:
tposit = float(tposit)
if tprob == "":
tprob = 0.95
else:
tprob = float(tprob)
showjson = show_all_confids(tpd, prob=tprob, posit=tposit)
cdatas[list(gdatas.keys())[0]] = pd.DataFrame(showjson)
ttnewtpd = cdatas[list(gdatas.keys())[0]]
ccollist =ttnewtpd.columns
qd = btUrldecode(request.GET, ccollist)
outjson = data_list_core(ttnewtpd, qd)
outjson['_'] = request.GET.get('_', 0)
# print(outjson)
return JsonResponse(outjson)
else:
return JsonResponse({})
def fit_v(request):
"拟合 汇总输出 表头"
context = {}
context["collist"] = []
if len(fdatas.keys()) > 0:
tclist = fdatas[list(gdatas.keys())[0]].columns
context["collist"] = tclist
return render(request, 'data_analy/fit_index.html', context)
def data_fit(request):
"拟合 汇总输出 内容"
if len(gdatas.keys()) > 0:
if len(fdatas) > 0:
ttnewtpd = fdatas[list(gdatas.keys())[0]]
else:
# 判断生成拟合信息
tpd = gdatas[list(gdatas.keys())[0]]
showjson = iter_regression4allxy(tpd, max_combnum=2, test_size=0.2)
fdatas[list(gdatas.keys())[0]] = pd.DataFrame(showjson)
ttnewtpd = fdatas[list(gdatas.keys())[0]]
qd = btUrldecode(request.GET, ttnewtpd.columns)
outjson = data_list_core(ttnewtpd, qd)
outjson['_'] = request.GET.get('_', 0)
return JsonResponse(outjson)
else:
return JsonResponse({})
def data_clean(request):
# 文件名:pandas
global gdatas
gdatas = {}
# 数据间的关系
global rdatas
rdatas = {}
# 数据的置信度
global cdatas
cdatas = {}
# 数据的拟合
global fdatas
fdatas = {}
return JsonResponse({})
def query2dict(t_pandas):
lists = []
for id1, values in t_pandas.iterrows():
lists.append({"pk": id1, **values})
return lists
class DataExportView(View):
def get(self, request, *args, **kwargs):
filename = os.path.join(".", "template.xlsx")
response = HttpResponse(content_type='application/vnd.ms-excel')
response['Content-Disposition'] = 'attachment; filename="%s"' % filename
with codecs.open(filename, "rb") as f:
c = f.read()
response.write(c)
return response
def post(self, request, *args, **kwargs):
return JsonResponse({'redirect': ""})
class BulkImportDataView(JSONResponseMixin, FormView):
form_class = forms.FileForm
def form_valid(self, form):
file = form.cleaned_data['file']
wb = xlrd.open_workbook(filename=None, file_contents=file.read())
sheet1 = wb.sheet_by_index(0) # 第一个
header_ = sheet1.row_values(0) # 表格头
tsig = [1 if isinstance(field, str) else 0 for field in sheet1.row_values(1)]
pdjson = {col: [] for col in header_}
try:
for i1 in range(1, sheet1.nrows): # 遍历每行表格
row = sheet1.row_values(i1)
for id2, i2 in enumerate(row):
if tsig[id2]: # 处理时间格式问题
i2 = datetime.datetime.strptime("".join(i2.split('/')), "%Y%m%d%H%M%S%f")
i2 = i2.strftime('%Y-%m-%d %H:%M:%S %f')
pdjson[header_[id2]].append(i2)
data = {
'created': "ok",
}
gdatas[file.__str__()] = pd.DataFrame(pdjson)
except Exception as e:
data = {
'created': "error",
}
return self.render_json_response(data)
| 28.979381 | 97 | 0.588877 |
1c2c6eb508fbb7db0f4bc9542432ad2c73f2504b | 181 | py | Python | modules/__init__.py | trislaz/SimCLR | d7c8950a1afc8adad4e617e7ecd90a0d3828740f | [
"MIT"
] | null | null | null | modules/__init__.py | trislaz/SimCLR | d7c8950a1afc8adad4e617e7ecd90a0d3828740f | [
"MIT"
] | null | null | null | modules/__init__.py | trislaz/SimCLR | d7c8950a1afc8adad4e617e7ecd90a0d3828740f | [
"MIT"
] | null | null | null | from .simclr import SimCLR
from .nt_xent import NT_Xent
from .logistic_regression import LogisticRegression
from .lars import LARS
from .dataloader import dataset, datasetWSI_simple | 36.2 | 51 | 0.856354 |
cde79427ed88701fa6d7a2360d71bc37701560a6 | 6,216 | py | Python | neutron/objects/subnetpool.py | cleo4zheng/neutron | 6d65318308edfd984bdd0ff1ac7fef9486a040f7 | [
"Apache-2.0"
] | 1 | 2019-06-02T06:15:39.000Z | 2019-06-02T06:15:39.000Z | neutron/objects/subnetpool.py | cleo4zheng/neutron | 6d65318308edfd984bdd0ff1ac7fef9486a040f7 | [
"Apache-2.0"
] | 1 | 2019-08-16T14:02:19.000Z | 2019-08-16T14:02:19.000Z | neutron/objects/subnetpool.py | cleo4zheng/neutron | 6d65318308edfd984bdd0ff1ac7fef9486a040f7 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2015 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo_versionedobjects import base as obj_base
from oslo_versionedobjects import fields as obj_fields
from neutron.db import api as db_api
from neutron.db import models_v2 as models
from neutron.objects import base
from neutron.objects import common_types
@obj_base.VersionedObjectRegistry.register
class SubnetPool(base.NeutronDbObject):
# Version 1.0: Initial version
VERSION = '1.0'
db_model = models.SubnetPool
fields = {
'id': common_types.UUIDField(),
'project_id': obj_fields.StringField(nullable=True),
'name': obj_fields.StringField(nullable=True),
'ip_version': common_types.IPVersionEnumField(),
'default_prefixlen': common_types.IPNetworkPrefixLenField(),
'min_prefixlen': common_types.IPNetworkPrefixLenField(),
'max_prefixlen': common_types.IPNetworkPrefixLenField(),
'shared': obj_fields.BooleanField(),
'is_default': obj_fields.BooleanField(),
'default_quota': obj_fields.IntegerField(nullable=True),
'hash': obj_fields.StringField(nullable=True),
'address_scope_id': common_types.UUIDField(nullable=True),
'prefixes': common_types.ListOfIPNetworksField(nullable=True)
}
fields_no_update = ['id', 'project_id']
synthetic_fields = ['prefixes']
@classmethod
def modify_fields_from_db(cls, db_obj):
fields = super(SubnetPool, cls).modify_fields_from_db(db_obj)
if 'prefixes' in fields:
fields['prefixes'] = [
netaddr.IPNetwork(prefix.cidr)
for prefix in fields['prefixes']
]
return fields
@classmethod
def modify_fields_to_db(cls, fields):
result = super(SubnetPool, cls).modify_fields_to_db(fields)
if 'prefixes' in result:
result['prefixes'] = [
models.SubnetPoolPrefix(cidr=str(prefix),
subnetpool_id=result['id'])
for prefix in result['prefixes']
]
return result
def reload_prefixes(self):
prefixes = [
obj.cidr
for obj in SubnetPoolPrefix.get_objects(
self.obj_context,
subnetpool_id=self.id)
]
setattr(self, 'prefixes', prefixes)
self.obj_reset_changes(['prefixes'])
@classmethod
def get_object(cls, context, **kwargs):
with db_api.autonested_transaction(context.session):
pool_obj = super(SubnetPool, cls).get_object(context, **kwargs)
if pool_obj is not None:
pool_obj.reload_prefixes()
return pool_obj
@classmethod
def get_objects(cls, context, _pager=None, validate_filters=True,
**kwargs):
with db_api.autonested_transaction(context.session):
objs = super(SubnetPool, cls).get_objects(context, _pager,
validate_filters,
**kwargs)
for obj in objs:
obj.reload_prefixes()
return objs
# TODO(ihrachys): Consider extending base to trigger registered methods
def create(self):
synthetic_changes = self._get_changed_synthetic_fields()
with db_api.autonested_transaction(self.obj_context.session):
super(SubnetPool, self).create()
if 'prefixes' in synthetic_changes:
for prefix in self.prefixes:
prefix = SubnetPoolPrefix(
self.obj_context, subnetpool_id=self.id, cidr=prefix)
prefix.create()
self.reload_prefixes()
# TODO(ihrachys): Consider extending base to trigger registered methods
def update(self):
with db_api.autonested_transaction(self.obj_context.session):
synthetic_changes = self._get_changed_synthetic_fields()
super(SubnetPool, self).update()
if synthetic_changes:
if 'prefixes' in synthetic_changes:
SubnetPoolPrefix.delete_objects(self.obj_context,
subnetpool_id=self.id)
for prefix in self.prefixes:
prefix_obj = SubnetPoolPrefix(self.obj_context,
subnetpool_id=self.id,
cidr=prefix)
prefix_obj.create()
@obj_base.VersionedObjectRegistry.register
class SubnetPoolPrefix(base.NeutronDbObject):
# Version 1.0: Initial version
VERSION = '1.0'
db_model = models.SubnetPoolPrefix
fields = {
'subnetpool_id': common_types.UUIDField(),
'cidr': common_types.IPNetworkField(),
}
primary_keys = ['subnetpool_id', 'cidr']
# TODO(ihrachys): get rid of it once we switch the db model to using CIDR
# custom type
@classmethod
def modify_fields_to_db(cls, fields):
result = super(SubnetPoolPrefix, cls).modify_fields_to_db(fields)
if 'cidr' in result:
result['cidr'] = cls.filter_to_str(result['cidr'])
return result
# TODO(ihrachys): get rid of it once we switch the db model to using CIDR
# custom type
@classmethod
def modify_fields_from_db(cls, db_obj):
fields = super(SubnetPoolPrefix, cls).modify_fields_from_db(db_obj)
if 'cidr' in fields:
fields['cidr'] = netaddr.IPNetwork(fields['cidr'])
return fields
| 38.37037 | 78 | 0.623391 |
f592bcc0d23073c0d33f28b26aa27a43989664eb | 4,451 | py | Python | PRESUBMIT.py | conchoid/gae | a3746d0de863d481ac85e749ff6fae069b1dd119 | [
"Apache-2.0"
] | 1 | 2019-01-14T09:01:26.000Z | 2019-01-14T09:01:26.000Z | PRESUBMIT.py | Acidburn0zzz/gae | 2e2072ed4889f770e6e135554b716fd54a7d0646 | [
"Apache-2.0"
] | null | null | null | PRESUBMIT.py | Acidburn0zzz/gae | 2e2072ed4889f770e6e135554b716fd54a7d0646 | [
"Apache-2.0"
] | 1 | 2021-06-17T14:32:13.000Z | 2021-06-17T14:32:13.000Z | # Copyright 2015 The LUCI Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Top-level presubmit script.
See https://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for
details on the presubmit API built into depot_tools.
"""
import os
import re
import sys
def PreCommitGo(input_api, output_api, pcg_mode):
"""Run go-specific checks via pre-commit-go (pcg) if it's in PATH."""
if input_api.is_committing:
error_type = output_api.PresubmitError
else:
error_type = output_api.PresubmitPromptWarning
exe = 'pcg.exe' if sys.platform == 'win32' else 'pcg'
pcg = None
for p in os.environ['PATH'].split(os.pathsep):
pcg = os.path.join(p, exe)
if os.access(pcg, os.X_OK):
break
else:
return [
error_type(
'pre-commit-go executable (pcg) could not be found in PATH. All Go '
'checks are skipped. See https://github.com/maruel/pre-commit-go.')
]
cmd = [pcg, 'run', '-m', ','.join(pcg_mode)]
if input_api.verbose:
cmd.append('-v')
# pcg can figure out what files to check on its own based on upstream ref,
# but on PRESUBMIT try builder upsteram isn't set, and it's just 1 commit.
if os.getenv('PRESUBMIT_BUILDER', ''):
cmd.extend(['-r', 'HEAD~1'])
return input_api.RunTests([
input_api.Command(
name='pre-commit-go: %s' % ', '.join(pcg_mode),
cmd=cmd,
kwargs={},
message=error_type),
])
COPYRIGHT_TEMPLATE = """
Copyright YEARPATTERN The LUCI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
""".strip()
def header(input_api):
"""Returns the expected license header regexp for this project."""
current_year = int(input_api.time.strftime('%Y'))
allowed_years = (str(s) for s in reversed(xrange(2011, current_year + 1)))
years_re = '(' + '|'.join(allowed_years) + ')'
lines = [
('.*? ' + re.escape(line)) if line else '.*?'
for line in COPYRIGHT_TEMPLATE.splitlines()
]
lines[0] = lines[0].replace('YEARPATTERN', years_re)
return '\n'.join(lines) + '(?: \*/)?\n'
def source_file_filter(input_api):
"""Returns filter that selects source code files only."""
bl = list(input_api.DEFAULT_BLACK_LIST) + [
r'.+\.pb\.go$',
r'.+_string\.go$',
]
wl = list(input_api.DEFAULT_WHITE_LIST) + [
r'.+\.go$',
]
return lambda x: input_api.FilterSourceFile(x, white_list=wl, black_list=bl)
def CommonChecks(input_api, output_api):
results = []
results.extend(
input_api.canned_checks.CheckChangeHasNoStrayWhitespace(
input_api, output_api,
source_file_filter=source_file_filter(input_api)))
results.extend(
input_api.canned_checks.CheckLicense(
input_api, output_api, header(input_api),
source_file_filter=source_file_filter(input_api)))
return results
def CheckChangeOnUpload(input_api, output_api):
results = CommonChecks(input_api, output_api)
results.extend(PreCommitGo(input_api, output_api, ['lint', 'pre-commit']))
return results
def CheckChangeOnCommit(input_api, output_api):
results = CommonChecks(input_api, output_api)
results.extend(input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api))
results.extend(input_api.canned_checks.CheckDoNotSubmitInDescription(
input_api, output_api))
results.extend(input_api.canned_checks.CheckDoNotSubmitInFiles(
input_api, output_api))
results.extend(PreCommitGo(
input_api, output_api, ['lint', 'pre-commit', 'pre-push']))
return results
| 33.466165 | 80 | 0.714222 |
455e2585d7d120e69af0904138f8be5f47c4cc23 | 787 | py | Python | Entrenamiento Algoritmico/comparaciondosnumeros.py | andersonvelasco/Programming-2020B | bd843a406353b55ca83b9684c394aec1556aaddc | [
"Apache-2.0"
] | null | null | null | Entrenamiento Algoritmico/comparaciondosnumeros.py | andersonvelasco/Programming-2020B | bd843a406353b55ca83b9684c394aec1556aaddc | [
"Apache-2.0"
] | null | null | null | Entrenamiento Algoritmico/comparaciondosnumeros.py | andersonvelasco/Programming-2020B | bd843a406353b55ca83b9684c394aec1556aaddc | [
"Apache-2.0"
] | null | null | null | '''Script que compara 2 numeros ingresados por el usuario
y define cual numero es mayor'''
import os
os.system("cls") # thsi kine let you clear screen.
num1=0 #
num2=0 #
print ("Programa para calcular el mayor de dos números ")
num1=int(input("Ingrese primer número:"))
num2=int(input("Ingrese segundo número:"))
'''
EN lenguajes de programacion como java, c++, c#, javascript, php y otros
mas se utiliza las {}
Ejp:
if num1 > num2 {
print("El número mayor entre", num1," y ", num2," es: ", num1)
}esle{
print("El número mayor entre", num1," y ", num2," es: ", num2)
}
'''
if num1 > num2 :#":" reemplazan el entonces de pseudocódigo
print("El número mayor entre", num1," y ", num2," es: ", num1)
else:
print("El número mayor entre", num1," y ", num2," es: ", num2) | 31.48 | 72 | 0.656925 |
56551f3a66bb9d9fe2d00d8c8200da3a44cd73a3 | 3,088 | py | Python | viper/modules/misp_methods/galaxies.py | Mario-Kart-Felix/mal-scrap | bc396a15ea5b144eb1c0f05759d1f9419d6671df | [
"BSD-3-Clause"
] | 2 | 2015-12-17T20:25:09.000Z | 2017-10-08T19:14:57.000Z | viper/modules/misp_methods/galaxies.py | Mario-Kart-Felix/mal-scrap | bc396a15ea5b144eb1c0f05759d1f9419d6671df | [
"BSD-3-Clause"
] | 1 | 2015-01-05T18:07:13.000Z | 2015-01-07T21:43:57.000Z | viper/modules/misp_methods/galaxies.py | Mario-Kart-Felix/mal-scrap | bc396a15ea5b144eb1c0f05759d1f9419d6671df | [
"BSD-3-Clause"
] | 3 | 2017-10-18T00:56:53.000Z | 2020-05-24T09:38:54.000Z | # -*- coding: utf-8 -*-
# This file is part of Viper - https://github.com/viper-framework/viper
# See the file 'LICENSE' for copying permission.
try:
from pymispgalaxies import Clusters
HAVE_PYGALAXIES = True
except:
HAVE_PYGALAXIES = False
def _print_cluster_value(self, cluster_value):
self.log('success', 'Name: {}'.format(cluster_value.value))
if cluster_value.description:
self.log('info', 'Description: {}'.format(cluster_value.description))
if not cluster_value.meta:
return
for key, value in cluster_value.meta._json().items():
if isinstance(value, list):
self.log('info', '{}:'.format(key))
for e in value:
self.log('item', '{}'.format(e))
else:
self.log('info', '{}: {}'.format(key, value))
def galaxies(self):
if not HAVE_PYGALAXIES:
self.log('error', "Missing dependency, install PyMISPGalaxies (`pip install git+https://github.com/MISP/PyMISPGalaxies.git`)")
return
clusters = Clusters()
if self.args.list:
self.log('table', dict(header=['Name', 'Description'], rows=[(name, cluster.description)
for name, cluster in clusters.items()]))
elif self.args.search:
to_search = ' '.join(self.args.search)
matches = clusters.search(to_search)
if not matches:
self.log('error', 'No matches for "{}" in the clusters.'.format(to_search))
return
self.log('success', 'Clusters matching "{}":'.format(to_search))
for cluster, values in matches:
self.log('success', cluster.name)
for val in values:
_print_cluster_value(self, val)
elif self.args.details:
cluster = clusters.get(self.args.details)
if not cluster:
self.log('error', 'No cluster called "{}".'.format(self.args.details))
return
if not self.args.cluster_value:
# Show all values
self.log('info', cluster.description)
self.log('info', 'Type: ' + cluster.type)
self.log('info', 'Source: ' + cluster.source)
self.log('info', 'Authors: ' + ', '.join(cluster.authors))
self.log('info', 'UUID: ' + cluster.uuid)
self.log('info', 'Version: {}'.format(cluster.version))
self.log('info', 'Values:')
header = ['ID', 'Name', 'Description']
rows = []
i = 1
for name, value in cluster.items():
row = (i, value.value, value.description)
rows.append(row)
i += 1
self.log('table', dict(header=header, rows=rows))
else:
cluster_value = ' '.join(self.args.cluster_value)
# Show meta of a value
c_val = cluster.get(cluster_value)
if not c_val:
self.log('error', 'No cluster value called "{}".'.format(cluster_value))
return
_print_cluster_value(self, c_val)
| 39.589744 | 134 | 0.559262 |
5abcda0e341eedd2228328a0f10342718069ea21 | 203 | py | Python | tests/test_tweens.py | ptone/BirdFish | 53633ab95f3c81748398143e6411c127c1b6c76e | [
"BSD-3-Clause"
] | 3 | 2015-11-05T10:25:36.000Z | 2018-12-07T14:44:14.000Z | tests/test_tweens.py | ptone/BirdFish | 53633ab95f3c81748398143e6411c127c1b6c76e | [
"BSD-3-Clause"
] | null | null | null | tests/test_tweens.py | ptone/BirdFish | 53633ab95f3c81748398143e6411c127c1b6c76e | [
"BSD-3-Clause"
] | null | null | null | from birdfish import tween
def test_tween_jump():
b = 0
c = 1
d = 6
target = .5
tween_t = tween.LINEAR
val = tween.jump_time(tween_t, target, b, c, d)
assert int(val) == 3
| 15.615385 | 51 | 0.581281 |
d03d69e57256d66dc2bfba71a1e7ba7b875b46f5 | 2,187 | py | Python | floodsystem/plot.py | butanone/flooddefense | 04585b20389a39dc418756871b361d0ea2eaa871 | [
"MIT"
] | null | null | null | floodsystem/plot.py | butanone/flooddefense | 04585b20389a39dc418756871b361d0ea2eaa871 | [
"MIT"
] | null | null | null | floodsystem/plot.py | butanone/flooddefense | 04585b20389a39dc418756871b361d0ea2eaa871 | [
"MIT"
] | null | null | null | """This module contains functions which can provide plots of the water levels. It includes the module analysis.py."""
import matplotlib.pyplot as plt
from matplotlib.dates import date2num
from datetime import datetime
from floodsystem import analysis
def plot_water_levels(station, dates, levels):
"""This function creates a plot of the water levels against the time for a specific station."""
# Plot the levels against the timestamps
plt.plot(dates, levels, label="Past level")
# Plot the typical values as dashed lines
level_low=station.typical_range[0]
level_high=station.typical_range[1]
plot_low, plot_high = [], []
for i in dates:
plot_low.append(level_low)
plot_high.append(level_high)
plt.plot(dates, plot_low, "--", label="Typical low")
plt.plot(dates, plot_high, "--", label="Typical high")
# Add all the labels and titles
plt.xlabel("date")
plt.ylabel("water level (m)")
plt.xticks(rotation=45)
plt.title(station.name)
plt.legend()
# Show plot
plt.tight_layout()
plt.grid()
plt.show()
def plot_water_level_with_fit(station, dates, levels, p):
"""This function creates a plot of the water levels against the time for a specific station.
It also plots a best-fit polynomial curve for the water levels."""
# Plot the levels against the timestamps
plt.plot(dates, levels, label="Past level")
# Plot the typical values as dashed lines
level_low=station.typical_range[0]
level_high=station.typical_range[1]
plot_low, plot_high = [], []
for i in dates:
plot_low.append(level_low)
plot_high.append(level_high)
plt.plot(dates, plot_low, label="Typical low")
plt.plot(dates, plot_high, label="Typical high")
# Plot the best fit polynomial
poly, d0 = analysis.polyfit(dates, levels, p)
x=date2num(dates)
plt.plot(x, poly(x-d0), "--", label = "Best fit")
# Add all the labels and titles
plt.xlabel("date")
plt.ylabel("water level (m)")
plt.xticks(rotation=45)
plt.title(station.name)
plt.legend()
# Show plot
plt.tight_layout()
plt.grid()
plt.show() | 28.776316 | 117 | 0.678098 |
0df85c7b6cfcfa0d70765af9c43a7379f56ead71 | 42,418 | py | Python | Configuration/EventContent/python/EventContent_cff.py | RosmarieSchoefbeck/cmssw | 66b151a27eef46ff965508064071091ecb21395c | [
"Apache-2.0"
] | 1 | 2020-04-09T19:05:23.000Z | 2020-04-09T19:05:23.000Z | Configuration/EventContent/python/EventContent_cff.py | RosmarieSchoefbeck/cmssw | 66b151a27eef46ff965508064071091ecb21395c | [
"Apache-2.0"
] | null | null | null | Configuration/EventContent/python/EventContent_cff.py | RosmarieSchoefbeck/cmssw | 66b151a27eef46ff965508064071091ecb21395c | [
"Apache-2.0"
] | null | null | null | import FWCore.ParameterSet.Config as cms
#
# Event Content definition
#
# Data Tiers defined:
#
# LHE:
# include pure LHE production
#
# RAW , RECO, AOD:
# include reconstruction content
#
# RAWSIM, RECOSIM, AODSIM:
# include reconstruction and simulation
#
# GENRAW
# slimmed-down version of RAWSIM for small transient disk size during MC production, contains Gen+Rawdata
#
# PREMIX
# contains special Digi collection(s) for pre-mixing minbias events for pileup simulation
# Raw2Digi step is done on this file.
#
# PREMIXRAW
# extension of RAWSIM for output of second stage of PreMixing using the DataMixer.
#
# RAWDEBUG(RAWSIM+ALL_SIM_INFO), RAWDEBUGHLT(RAWDEBUG+HLTDEBUG)
#
# RAWSIMHLT (RAWSIM + HLTDEBUG)
#
# RAWRECOSIMHLT, RAWRECODEBUGHLT
#
# FEVT (RAW+RECO), FEVTSIM (RAWSIM+RECOSIM), FEVTDEBUG (FEVTSIM+ALL_SIM_INFO), FEVTDEBUGHLT (FEVTDEBUG+HLTDEBUG)
#
# $Id: EventContent_cff.py,v 1.54 2013/05/01 15:44:29 mikeh Exp $
#
#
#
#
# Recontruction Systems
#
#
from RecoLocalTracker.Configuration.RecoLocalTracker_EventContent_cff import *
from RecoLocalMuon.Configuration.RecoLocalMuon_EventContent_cff import *
from RecoLocalCalo.Configuration.RecoLocalCalo_EventContent_cff import *
from RecoEcal.Configuration.RecoEcal_EventContent_cff import *
from TrackingTools.Configuration.TrackingTools_EventContent_cff import *
from RecoTracker.Configuration.RecoTracker_EventContent_cff import *
from RecoJets.Configuration.RecoJets_EventContent_cff import *
from RecoMET.Configuration.RecoMET_EventContent_cff import *
from RecoMuon.Configuration.RecoMuon_EventContent_cff import *
from RecoBTau.Configuration.RecoBTau_EventContent_cff import *
from RecoBTag.Configuration.RecoBTag_EventContent_cff import *
from RecoTauTag.Configuration.RecoTauTag_EventContent_cff import *
from RecoVertex.Configuration.RecoVertex_EventContent_cff import *
from RecoPixelVertexing.Configuration.RecoPixelVertexing_EventContent_cff import *
from RecoEgamma.Configuration.RecoEgamma_EventContent_cff import *
from RecoParticleFlow.Configuration.RecoParticleFlow_EventContent_cff import *
from L1Trigger.Configuration.L1Trigger_EventContent_cff import *
from RecoVertex.BeamSpotProducer.BeamSpot_EventContent_cff import *
from CommonTools.ParticleFlow.EITopPAG_EventContent_cff import EITopPAGEventContent
from RecoPPS.Configuration.RecoCTPPS_EventContent_cff import *
from RecoHGCal.Configuration.RecoHGCal_EventContent_cff import *
# raw2digi that are already the final RECO/AOD products
from EventFilter.ScalersRawToDigi.Scalers_EventContent_cff import *
from EventFilter.OnlineMetaDataRawToDigi.OnlineMetaData_EventContent_cff import *
from EventFilter.Utilities.Tcds_EventContent_cff import *
#DigiToRaw content
from EventFilter.Configuration.DigiToRaw_EventContent_cff import *
#
#
# Simulation Systems
#
#
from GeneratorInterface.Configuration.GeneratorInterface_EventContent_cff import *
from SimG4Core.Configuration.SimG4Core_EventContent_cff import *
from SimTracker.Configuration.SimTracker_EventContent_cff import *
from SimMuon.Configuration.SimMuon_EventContent_cff import *
from SimCalorimetry.Configuration.SimCalorimetry_EventContent_cff import *
from SimFastTiming.Configuration.SimFastTiming_EventContent_cff import *
from SimGeneral.Configuration.SimGeneral_EventContent_cff import *
from IOMC.RandomEngine.IOMC_EventContent_cff import *
#
#
# HLT
#
#
from HLTrigger.Configuration.HLTrigger_EventContent_cff import *
#
#
# DQM
#
#
from DQMOffline.Configuration.DQMOffline_EventContent_cff import *
#
#
# NANOAOD
#
#
from PhysicsTools.NanoAOD.NanoAODEDMEventContent_cff import *
#
#
# FastSim
#
#
from FastSimulation.Configuration.EventContent_cff import FASTPUEventContent
import FastSimulation.Configuration.EventContent_cff as fastSimEC
from Configuration.Eras.Modifier_fastSim_cff import fastSim
fastSim.toModify(RecoLocalTrackerRECO, outputCommands = fastSimEC.RecoLocalTracker.outputCommands)
fastSim.toModify(RecoLocalTrackerFEVT, outputCommands = fastSimEC.RecoLocalTracker.outputCommands)
fastSim.toReplaceWith(SimG4CoreRAW, fastSimEC.SimRAW)
fastSim.toReplaceWith(SimG4CoreRECO, fastSimEC.SimRECO)
#
#
# Top level additional keep statements
#
#
CommonEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('keep *_logErrorHarvester_*_*')
)
#
#
# LHE Data Tier definition
#
#
LHEEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
splitLevel = cms.untracked.int32(0),
)
#
#
# RAW Data Tier definition
#
#
RAWEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *',
'keep FEDRawDataCollection_rawDataCollector_*_*',
'keep FEDRawDataCollection_source_*_*'),
splitLevel = cms.untracked.int32(0),
compressionAlgorithm=cms.untracked.string("LZMA"),
compressionLevel=cms.untracked.int32(4)
)
#
#
# RECO Data Tier definition
#
#
RECOEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
splitLevel = cms.untracked.int32(0),
)
#
#
# RAWRECO Data Tier definition
#
#
RAWRECOEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
splitLevel = cms.untracked.int32(0),
)
#
#
# AOD Data Tier definition
#
#
AODEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
eventAutoFlushCompressedSize=cms.untracked.int32(30*1024*1024),
compressionAlgorithm=cms.untracked.string("LZMA"),
compressionLevel=cms.untracked.int32(4)
)
#
#
# RAWAOD Data Tier definition
#
#
RAWAODEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
eventAutoFlushCompressedSize=cms.untracked.int32(30*1024*1024),
compressionAlgorithm=cms.untracked.string("LZMA"),
compressionLevel=cms.untracked.int32(4)
)
#
#
# RAWSIM Data Tier definition
# ===========================
#
# Here, we sacrifice memory and CPU time to decrease the on-disk size as
# much as possible. Given the current per-event GEN-SIM and DIGI-RECO times,
# the extra CPU time for LZMA compression works out to be ~1%. The GEN-SIM
# use case of reading a minbias event for `classic pileup` has a similar CPU
# impact.
# The memory increase appears to be closer to 50MB - but that should be
# acceptable as the introduction of multithreaded processing has bought us some
# breathing room.
#
RAWSIMEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
splitLevel = cms.untracked.int32(0),
eventAutoFlushCompressedSize=cms.untracked.int32(20*1024*1024),
compressionAlgorithm=cms.untracked.string("LZMA"),
compressionLevel=cms.untracked.int32(1)
)
#
#
# RAWSIMHLT Data Tier definition
#
#
RAWSIMHLTEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
splitLevel = cms.untracked.int32(0),
)
#
#
# RAWRECOSIMHLT Data Tier definition
#
#
RAWRECOSIMHLTEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
splitLevel = cms.untracked.int32(0),
)
#
#
# RAWRECODEBUGHLT Data Tier definition
#
#
RAWRECODEBUGHLTEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
splitLevel = cms.untracked.int32(0),
)
#
#
# RECOSIM Data Tier definition
#
#
RECOSIMEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
splitLevel = cms.untracked.int32(0),
)
#
#
# GENRAW Data Tier definition
#
#
GENRAWEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
splitLevel = cms.untracked.int32(0),
)
#
#
# AODSIM Data Tier definition
#
#
AODSIMEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
eventAutoFlushCompressedSize=cms.untracked.int32(30*1024*1024),
compressionAlgorithm=cms.untracked.string("LZMA"),
compressionLevel=cms.untracked.int32(4),
)
#
#
# FEVT Data Tier definition
#
#
FEVTEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
splitLevel = cms.untracked.int32(0),
)
FEVTHLTALLEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
splitLevel = cms.untracked.int32(0),
)
#
#
# FEVTSIM Data Tier definition
#
#
FEVTSIMEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
splitLevel = cms.untracked.int32(0),
)
#
#
# RAWDEBUG Data Tier definition
#
#
RAWDEBUGEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
splitLevel = cms.untracked.int32(0),
)
#
#
# RAWDEBUGHLT Data Tier definition
#
#
RAWDEBUGHLTEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
splitLevel = cms.untracked.int32(0),
)
#
#
# FEVTDEBUG Data Tier definition
#
#
FEVTDEBUGEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
splitLevel = cms.untracked.int32(0),
)
#
#
# FEVTDEBUGHLT Data Tier definition
#
#
FEVTDEBUGHLTEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
splitLevel = cms.untracked.int32(0),
)
#
#
# RECOSIMDEBUG Data Tier definition
#
#
RECODEBUGEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
splitLevel = cms.untracked.int32(0),
)
#
## HLTDEBUG tier definition
#
HLTDEBUGEventContent = cms.PSet(
#outputCommands = cms.untracked.vstring('drop *',
# 'keep *_hlt*_*_*')
outputCommands = cms.untracked.vstring('drop *',
'keep *_logErrorHarvester_*_*'),
splitLevel = cms.untracked.int32(0),
)
#
#
## DQM event content
#
#
DQMEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *',
'keep *_MEtoEDMConverter_*_*'),
splitLevel = cms.untracked.int32(0)
)
#Special Event Content for MixingModule and DataMixer
DATAMIXEREventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *',
'keep CSCDetIdCSCALCTDigiMuonDigiCollection_muonCSCDigis_MuonCSCALCTDigi_*',
'keep CSCDetIdCSCCLCTDigiMuonDigiCollection_muonCSCDigis_MuonCSCCLCTDigi_*',
'keep CSCDetIdCSCComparatorDigiMuonDigiCollection_muonCSCDigis_MuonCSCComparatorDigi_*',
'keep CSCDetIdCSCCorrelatedLCTDigiMuonDigiCollection_csctfDigis_*_*',
'keep CSCDetIdCSCCorrelatedLCTDigiMuonDigiCollection_muonCSCDigis_MuonCSCCorrelatedLCTDigi_*',
'keep CSCDetIdCSCRPCDigiMuonDigiCollection_muonCSCDigis_MuonCSCRPCDigi_*',
'keep CSCDetIdCSCStripDigiMuonDigiCollection_muonCSCDigis_MuonCSCStripDigi_*',
'keep CSCDetIdCSCWireDigiMuonDigiCollection_muonCSCDigis_MuonCSCWireDigi_*',
'keep DTLayerIdDTDigiMuonDigiCollection_muonDTDigis_*_*',
'keep PixelDigiedmDetSetVector_siPixelDigis_*_*',
'keep SiStripDigiedmDetSetVector_siStripDigis_*_*',
'keep RPCDetIdRPCDigiMuonDigiCollection_muonRPCDigis_*_*',
'keep HBHEDataFramesSorted_hcalDigis_*_*',
'keep HFDataFramesSorted_hcalDigis_*_*',
'keep HODataFramesSorted_hcalDigis_*_*',
'keep QIE10DataFrameHcalDataFrameContainer_hcalDigis_*_*',
'keep QIE11DataFrameHcalDataFrameContainer_hcalDigis_*_*',
'keep ZDCDataFramesSorted_hcalDigis_*_*',
'keep CastorDataFramesSorted_castorDigis_*_*',
'keep EBDigiCollection_ecalDigis_*_*',
'keep EEDigiCollection_ecalDigis_*_*',
'keep ESDigiCollection_ecalPreshowerDigis_*_*'),
splitLevel = cms.untracked.int32(0),
)
PREMIXEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
splitLevel = cms.untracked.int32(0),
)
MIXINGMODULEEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *',
'keep *_cfWriter_*_*'),
splitLevel = cms.untracked.int32(0),
)
# PREMIXRAW Data Tier definition
#
#
PREMIXRAWEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
splitLevel = cms.untracked.int32(0),
)
#
#
## RAW repacked event content definition
#
#
REPACKRAWEventContent = cms.PSet(
outputCommands = cms.untracked.vstring(
'drop *',
'drop FEDRawDataCollection_*_*_*',
'keep FEDRawDataCollection_rawDataRepacker_*_*',
'keep FEDRawDataCollection_virginRawDataRepacker_*_*',
'keep FEDRawDataCollection_rawDataReducedFormat_*_*'),
splitLevel = cms.untracked.int32(0),
)
REPACKRAWSIMEventContent = cms.PSet(
outputCommands = cms.untracked.vstring(),
splitLevel = cms.untracked.int32(0),
)
LHEEventContent.outputCommands.extend(GeneratorInterfaceLHE.outputCommands)
HLTDEBUGEventContent.outputCommands.extend(HLTDebugFEVT.outputCommands)
RAWEventContent.outputCommands.extend(L1TriggerRAW.outputCommands)
RAWEventContent.outputCommands.extend(HLTriggerRAW.outputCommands)
REPACKRAWEventContent.outputCommands.extend(L1TriggerRAW.outputCommands)
REPACKRAWEventContent.outputCommands.extend(HLTriggerRAW.outputCommands)
RECOEventContent.outputCommands.extend(RecoLocalTrackerRECO.outputCommands)
RECOEventContent.outputCommands.extend(RecoLocalMuonRECO.outputCommands)
RECOEventContent.outputCommands.extend(RecoLocalCaloRECO.outputCommands)
RECOEventContent.outputCommands.extend(RecoEcalRECO.outputCommands)
RECOEventContent.outputCommands.extend(TrackingToolsRECO.outputCommands)
RECOEventContent.outputCommands.extend(RecoTrackerRECO.outputCommands)
RECOEventContent.outputCommands.extend(RecoJetsRECO.outputCommands)
RECOEventContent.outputCommands.extend(RecoMETRECO.outputCommands)
RECOEventContent.outputCommands.extend(RecoMuonRECO.outputCommands)
RECOEventContent.outputCommands.extend(RecoBTauRECO.outputCommands)
RECOEventContent.outputCommands.extend(RecoBTagRECO.outputCommands)
RECOEventContent.outputCommands.extend(RecoTauTagRECO.outputCommands)
RECOEventContent.outputCommands.extend(RecoVertexRECO.outputCommands)
RECOEventContent.outputCommands.extend(RecoEgammaRECO.outputCommands)
RECOEventContent.outputCommands.extend(RecoPixelVertexingRECO.outputCommands)
RECOEventContent.outputCommands.extend(RecoParticleFlowRECO.outputCommands)
RECOEventContent.outputCommands.extend(BeamSpotRECO.outputCommands)
RECOEventContent.outputCommands.extend(L1TriggerRECO.outputCommands)
RECOEventContent.outputCommands.extend(HLTriggerRECO.outputCommands)
RECOEventContent.outputCommands.extend(MEtoEDMConverterRECO.outputCommands)
RECOEventContent.outputCommands.extend(EvtScalersRECO.outputCommands)
RECOEventContent.outputCommands.extend(OnlineMetaDataContent.outputCommands)
RECOEventContent.outputCommands.extend(TcdsEventContent.outputCommands)
RECOEventContent.outputCommands.extend(CommonEventContent.outputCommands)
RECOEventContent.outputCommands.extend(EITopPAGEventContent.outputCommands)
from Configuration.Eras.Modifier_ctpps_2016_cff import ctpps_2016
ctpps_2016.toModify(RECOEventContent, outputCommands = RECOEventContent.outputCommands + RecoCTPPSRECO.outputCommands)
from Configuration.Eras.Modifier_phase2_hgcal_cff import phase2_hgcal
phase2_hgcal.toModify(RECOEventContent,
outputCommands = RECOEventContent.outputCommands + TICL_RECO.outputCommands)
RAWRECOEventContent.outputCommands.extend(RECOEventContent.outputCommands)
RAWRECOEventContent.outputCommands.extend(cms.untracked.vstring(
'keep FEDRawDataCollection_rawDataCollector_*_*',
'keep FEDRawDataCollection_source_*_*'
))
AODEventContent.outputCommands.extend(RecoLocalTrackerAOD.outputCommands)
AODEventContent.outputCommands.extend(RecoLocalMuonAOD.outputCommands)
AODEventContent.outputCommands.extend(RecoLocalCaloAOD.outputCommands)
AODEventContent.outputCommands.extend(RecoEcalAOD.outputCommands)
AODEventContent.outputCommands.extend(TrackingToolsAOD.outputCommands)
AODEventContent.outputCommands.extend(RecoTrackerAOD.outputCommands)
AODEventContent.outputCommands.extend(RecoJetsAOD.outputCommands)
AODEventContent.outputCommands.extend(RecoMETAOD.outputCommands)
AODEventContent.outputCommands.extend(RecoMuonAOD.outputCommands)
AODEventContent.outputCommands.extend(RecoBTauAOD.outputCommands)
AODEventContent.outputCommands.extend(RecoBTagAOD.outputCommands)
AODEventContent.outputCommands.extend(RecoTauTagAOD.outputCommands)
AODEventContent.outputCommands.extend(RecoVertexAOD.outputCommands)
AODEventContent.outputCommands.extend(RecoEgammaAOD.outputCommands)
AODEventContent.outputCommands.extend(RecoParticleFlowAOD.outputCommands)
AODEventContent.outputCommands.extend(BeamSpotAOD.outputCommands)
AODEventContent.outputCommands.extend(L1TriggerAOD.outputCommands)
AODEventContent.outputCommands.extend(HLTriggerAOD.outputCommands)
AODEventContent.outputCommands.extend(MEtoEDMConverterAOD.outputCommands)
AODEventContent.outputCommands.extend(EvtScalersAOD.outputCommands)
AODEventContent.outputCommands.extend(OnlineMetaDataContent.outputCommands)
AODEventContent.outputCommands.extend(TcdsEventContent.outputCommands)
AODEventContent.outputCommands.extend(CommonEventContent.outputCommands)
ctpps_2016.toModify(AODEventContent, outputCommands = AODEventContent.outputCommands + RecoCTPPSAOD.outputCommands)
phase2_hgcal.toModify(AODEventContent,
outputCommands = AODEventContent.outputCommands + TICL_AOD.outputCommands)
RAWAODEventContent.outputCommands.extend(AODEventContent.outputCommands)
RAWAODEventContent.outputCommands.extend(cms.untracked.vstring(
'keep FEDRawDataCollection_rawDataCollector_*_*',
'keep FEDRawDataCollection_source_*_*'
))
RAWSIMEventContent.outputCommands.extend(RAWEventContent.outputCommands)
RAWSIMEventContent.outputCommands.extend(SimG4CoreRAW.outputCommands)
RAWSIMEventContent.outputCommands.extend(SimTrackerRAW.outputCommands)
RAWSIMEventContent.outputCommands.extend(SimMuonRAW.outputCommands)
RAWSIMEventContent.outputCommands.extend(SimCalorimetryRAW.outputCommands)
RAWSIMEventContent.outputCommands.extend(SimFastTimingRAW.outputCommands)
RAWSIMEventContent.outputCommands.extend(SimGeneralRAW.outputCommands)
RAWSIMEventContent.outputCommands.extend(GeneratorInterfaceRAW.outputCommands)
RAWSIMEventContent.outputCommands.extend(RecoGenJetsFEVT.outputCommands)
RAWSIMEventContent.outputCommands.extend(RecoGenMETFEVT.outputCommands)
RAWSIMEventContent.outputCommands.extend(DigiToRawFEVT.outputCommands)
RAWSIMEventContent.outputCommands.extend(MEtoEDMConverterFEVT.outputCommands)
RAWSIMEventContent.outputCommands.extend(IOMCRAW.outputCommands)
RAWSIMEventContent.outputCommands.extend(CommonEventContent.outputCommands)
RAWSIMHLTEventContent.outputCommands.extend(RAWSIMEventContent.outputCommands)
RAWSIMHLTEventContent.outputCommands.extend(HLTDebugRAW.outputCommands)
GENRAWEventContent.outputCommands.extend(RAWEventContent.outputCommands)
GENRAWEventContent.outputCommands.extend(GeneratorInterfaceRECO.outputCommands)
GENRAWEventContent.outputCommands.extend(SimG4CoreRECO.outputCommands)
GENRAWEventContent.outputCommands.extend(SimTrackerRAW.outputCommands)
GENRAWEventContent.outputCommands.extend(SimMuonRECO.outputCommands)
GENRAWEventContent.outputCommands.extend(SimCalorimetryRECO.outputCommands)
GENRAWEventContent.outputCommands.extend(SimFastTimingRECO.outputCommands)
GENRAWEventContent.outputCommands.extend(SimGeneralRECO.outputCommands)
GENRAWEventContent.outputCommands.extend(RecoGenMETFEVT.outputCommands)
GENRAWEventContent.outputCommands.extend(RecoGenJetsFEVT.outputCommands)
GENRAWEventContent.outputCommands.extend(MEtoEDMConverterFEVT.outputCommands)
GENRAWEventContent.outputCommands.extend(IOMCRAW.outputCommands)
GENRAWEventContent.outputCommands.extend(DigiToRawFEVT.outputCommands)
GENRAWEventContent.outputCommands.extend(CommonEventContent.outputCommands)
PREMIXEventContent.outputCommands.extend(SimGeneralRAW.outputCommands)
PREMIXEventContent.outputCommands.extend(IOMCRAW.outputCommands)
PREMIXEventContent.outputCommands.extend(CommonEventContent.outputCommands)
PREMIXEventContent.outputCommands.extend(SimTrackerPREMIX.outputCommands)
PREMIXEventContent.outputCommands.extend(SimCalorimetryPREMIX.outputCommands)
PREMIXEventContent.outputCommands.extend(SimFastTimingPREMIX.outputCommands)
PREMIXEventContent.outputCommands.extend(SimMuonPREMIX.outputCommands)
PREMIXEventContent.outputCommands.extend(SimGeneralPREMIX.outputCommands)
fastSim.toModify(PREMIXEventContent, outputCommands = PREMIXEventContent.outputCommands+fastSimEC.extraPremixContent)
PREMIXRAWEventContent.outputCommands.extend(RAWSIMEventContent.outputCommands)
PREMIXRAWEventContent.outputCommands.append('keep CrossingFramePlaybackInfoNew_*_*_*')
PREMIXRAWEventContent.outputCommands.append('drop CrossingFramePlaybackInfoNew_mix_*_*')
PREMIXRAWEventContent.outputCommands.append('keep *_*_MergedTrackTruth_*')
PREMIXRAWEventContent.outputCommands.append('keep *_*_StripDigiSimLink_*')
PREMIXRAWEventContent.outputCommands.append('keep *_*_PixelDigiSimLink_*')
PREMIXRAWEventContent.outputCommands.append('keep *_*_MuonCSCStripDigiSimLinks_*')
PREMIXRAWEventContent.outputCommands.append('keep *_*_MuonCSCWireDigiSimLinks_*')
PREMIXRAWEventContent.outputCommands.append('keep *_*_RPCDigiSimLink_*')
PREMIXRAWEventContent.outputCommands.append('keep DTLayerIdDTDigiSimLinkMuonDigiCollection_*_*_*')
fastSim.toModify(PREMIXEventContent, outputCommands = PREMIXEventContent.outputCommands+fastSimEC.extraPremixContent)
REPACKRAWSIMEventContent.outputCommands.extend(REPACKRAWEventContent.outputCommands)
REPACKRAWSIMEventContent.outputCommands.extend(SimG4CoreRAW.outputCommands)
REPACKRAWSIMEventContent.outputCommands.extend(SimTrackerRAW.outputCommands)
REPACKRAWSIMEventContent.outputCommands.extend(SimMuonRAW.outputCommands)
REPACKRAWSIMEventContent.outputCommands.extend(SimCalorimetryRAW.outputCommands)
REPACKRAWSIMEventContent.outputCommands.extend(SimFastTimingRAW.outputCommands)
REPACKRAWSIMEventContent.outputCommands.extend(SimGeneralRAW.outputCommands)
REPACKRAWSIMEventContent.outputCommands.extend(GeneratorInterfaceRAW.outputCommands)
REPACKRAWSIMEventContent.outputCommands.extend(RecoGenJetsFEVT.outputCommands)
REPACKRAWSIMEventContent.outputCommands.extend(RecoGenMETFEVT.outputCommands)
REPACKRAWSIMEventContent.outputCommands.extend(DigiToRawFEVT.outputCommands)
REPACKRAWSIMEventContent.outputCommands.extend(MEtoEDMConverterFEVT.outputCommands)
REPACKRAWSIMEventContent.outputCommands.extend(IOMCRAW.outputCommands)
REPACKRAWSIMEventContent.outputCommands.extend(CommonEventContent.outputCommands)
RECOSIMEventContent.outputCommands.extend(RECOEventContent.outputCommands)
RECOSIMEventContent.outputCommands.extend(GeneratorInterfaceRECO.outputCommands)
RECOSIMEventContent.outputCommands.extend(RecoGenMETRECO.outputCommands)
RECOSIMEventContent.outputCommands.extend(RecoGenJetsRECO.outputCommands)
RECOSIMEventContent.outputCommands.extend(SimG4CoreRECO.outputCommands)
RECOSIMEventContent.outputCommands.extend(SimTrackerRECO.outputCommands)
RECOSIMEventContent.outputCommands.extend(SimMuonRECO.outputCommands)
RECOSIMEventContent.outputCommands.extend(SimCalorimetryRECO.outputCommands)
RECOSIMEventContent.outputCommands.extend(SimFastTimingRECO.outputCommands)
RECOSIMEventContent.outputCommands.extend(SimGeneralRECO.outputCommands)
RECOSIMEventContent.outputCommands.extend(MEtoEDMConverterRECO.outputCommands)
AODSIMEventContent.outputCommands.extend(AODEventContent.outputCommands)
AODSIMEventContent.outputCommands.extend(GeneratorInterfaceAOD.outputCommands)
AODSIMEventContent.outputCommands.extend(SimG4CoreAOD.outputCommands)
AODSIMEventContent.outputCommands.extend(SimTrackerAOD.outputCommands)
AODSIMEventContent.outputCommands.extend(SimMuonAOD.outputCommands)
AODSIMEventContent.outputCommands.extend(SimCalorimetryAOD.outputCommands)
AODSIMEventContent.outputCommands.extend(SimFastTimingAOD.outputCommands)
AODSIMEventContent.outputCommands.extend(RecoGenJetsAOD.outputCommands)
AODSIMEventContent.outputCommands.extend(RecoGenMETAOD.outputCommands)
AODSIMEventContent.outputCommands.extend(SimGeneralAOD.outputCommands)
AODSIMEventContent.outputCommands.extend(MEtoEDMConverterAOD.outputCommands)
RAWRECOSIMHLTEventContent.outputCommands.extend(RAWRECOEventContent.outputCommands)
RAWRECOSIMHLTEventContent.outputCommands.extend(GeneratorInterfaceRECO.outputCommands)
RAWRECOSIMHLTEventContent.outputCommands.extend(RecoGenMETRECO.outputCommands)
RAWRECOSIMHLTEventContent.outputCommands.extend(RecoGenJetsRECO.outputCommands)
RAWRECOSIMHLTEventContent.outputCommands.extend(SimG4CoreRECO.outputCommands)
RAWRECOSIMHLTEventContent.outputCommands.extend(SimTrackerRECO.outputCommands)
RAWRECOSIMHLTEventContent.outputCommands.extend(SimMuonRECO.outputCommands)
RAWRECOSIMHLTEventContent.outputCommands.extend(SimCalorimetryRECO.outputCommands)
RAWRECOSIMHLTEventContent.outputCommands.extend(SimFastTimingRECO.outputCommands)
RAWRECOSIMHLTEventContent.outputCommands.extend(SimGeneralRECO.outputCommands)
RAWRECOSIMHLTEventContent.outputCommands.extend(MEtoEDMConverterRECO.outputCommands)
RAWRECOSIMHLTEventContent.outputCommands.extend(HLTDebugRAW.outputCommands)
RAWRECODEBUGHLTEventContent.outputCommands.extend(RAWRECOSIMHLTEventContent.outputCommands)
RAWRECODEBUGHLTEventContent.outputCommands.extend(SimGeneralFEVTDEBUG.outputCommands)
RAWRECODEBUGHLTEventContent.outputCommands.extend(SimTrackerDEBUG.outputCommands)
FEVTEventContent.outputCommands.extend(RAWEventContent.outputCommands)
FEVTEventContent.outputCommands.extend(RecoLocalTrackerRECO.outputCommands)
FEVTEventContent.outputCommands.extend(RecoLocalMuonRECO.outputCommands)
FEVTEventContent.outputCommands.extend(RecoLocalCaloRECO.outputCommands)
FEVTEventContent.outputCommands.extend(RecoEcalRECO.outputCommands)
FEVTEventContent.outputCommands.extend(TrackingToolsRECO.outputCommands)
FEVTEventContent.outputCommands.extend(RecoTrackerRECO.outputCommands)
FEVTEventContent.outputCommands.extend(RecoJetsRECO.outputCommands)
FEVTEventContent.outputCommands.extend(RecoMETRECO.outputCommands)
FEVTEventContent.outputCommands.extend(RecoMuonRECO.outputCommands)
FEVTEventContent.outputCommands.extend(RecoBTauRECO.outputCommands)
FEVTEventContent.outputCommands.extend(RecoBTagRECO.outputCommands)
FEVTEventContent.outputCommands.extend(RecoTauTagRECO.outputCommands)
FEVTEventContent.outputCommands.extend(RecoVertexRECO.outputCommands)
FEVTEventContent.outputCommands.extend(RecoEgammaRECO.outputCommands)
FEVTEventContent.outputCommands.extend(RecoPixelVertexingRECO.outputCommands)
FEVTEventContent.outputCommands.extend(RecoParticleFlowRECO.outputCommands)
FEVTEventContent.outputCommands.extend(BeamSpotRECO.outputCommands)
FEVTEventContent.outputCommands.extend(L1TriggerRECO.outputCommands)
FEVTEventContent.outputCommands.extend(HLTriggerRECO.outputCommands)
FEVTEventContent.outputCommands.extend(MEtoEDMConverterRECO.outputCommands)
FEVTEventContent.outputCommands.extend(EvtScalersRECO.outputCommands)
FEVTEventContent.outputCommands.extend(OnlineMetaDataContent.outputCommands)
FEVTEventContent.outputCommands.extend(TcdsEventContent.outputCommands)
FEVTEventContent.outputCommands.extend(CommonEventContent.outputCommands)
FEVTEventContent.outputCommands.extend(EITopPAGEventContent.outputCommands)
ctpps_2016.toModify(FEVTEventContent, outputCommands = FEVTEventContent.outputCommands + RecoCTPPSFEVT.outputCommands)
phase2_hgcal.toModify(FEVTEventContent,
outputCommands = FEVTEventContent.outputCommands + TICL_FEVT.outputCommands)
FEVTHLTALLEventContent.outputCommands.extend(FEVTEventContent.outputCommands)
FEVTHLTALLEventContent.outputCommands.append('keep *_*_*_HLT')
FEVTSIMEventContent.outputCommands.extend(RAWEventContent.outputCommands)
FEVTSIMEventContent.outputCommands.extend(SimG4CoreRAW.outputCommands)
FEVTSIMEventContent.outputCommands.extend(SimTrackerRAW.outputCommands)
FEVTSIMEventContent.outputCommands.extend(SimMuonRAW.outputCommands)
FEVTSIMEventContent.outputCommands.extend(SimCalorimetryRAW.outputCommands)
FEVTSIMEventContent.outputCommands.extend(SimFastTimingRAW.outputCommands)
FEVTSIMEventContent.outputCommands.extend(SimGeneralRAW.outputCommands)
FEVTSIMEventContent.outputCommands.extend(GeneratorInterfaceRAW.outputCommands)
FEVTSIMEventContent.outputCommands.extend(RecoGenJetsFEVT.outputCommands)
FEVTSIMEventContent.outputCommands.extend(RecoGenMETFEVT.outputCommands)
FEVTSIMEventContent.outputCommands.extend(DigiToRawFEVT.outputCommands)
FEVTSIMEventContent.outputCommands.extend(MEtoEDMConverterFEVT.outputCommands)
FEVTSIMEventContent.outputCommands.extend(IOMCRAW.outputCommands)
FEVTSIMEventContent.outputCommands.extend(RecoLocalTrackerRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(RecoLocalMuonRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(RecoLocalCaloRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(RecoEcalRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(TrackingToolsRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(RecoTrackerRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(RecoJetsRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(RecoMETRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(RecoMuonRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(RecoBTauRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(RecoBTagRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(RecoTauTagRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(RecoVertexRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(RecoEgammaRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(RecoPixelVertexingRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(RecoParticleFlowRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(BeamSpotRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(L1TriggerRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(HLTriggerRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(MEtoEDMConverterRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(GeneratorInterfaceRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(RecoGenMETRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(RecoGenJetsRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(SimG4CoreRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(SimTrackerRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(SimMuonRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(SimCalorimetryRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(SimFastTimingRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(SimGeneralRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(MEtoEDMConverterRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(EvtScalersRECO.outputCommands)
FEVTSIMEventContent.outputCommands.extend(CommonEventContent.outputCommands)
FEVTSIMEventContent.outputCommands.extend(EITopPAGEventContent.outputCommands)
FEVTSIMEventContent.outputCommands.extend(OnlineMetaDataContent.outputCommands)
FEVTSIMEventContent.outputCommands.extend(TcdsEventContent.outputCommands)
phase2_hgcal.toModify(FEVTSIMEventContent,
outputCommands = FEVTSIMEventContent.outputCommands + TICL_FEVT.outputCommands)
RAWDEBUGEventContent.outputCommands.extend(RAWSIMEventContent.outputCommands)
RAWDEBUGEventContent.outputCommands.extend(SimTrackerDEBUG.outputCommands)
RAWDEBUGEventContent.outputCommands.extend(SimGeneralFEVTDEBUG.outputCommands)
RAWDEBUGEventContent.outputCommands.extend(L1TriggerRAWDEBUG.outputCommands)
RAWDEBUGHLTEventContent.outputCommands.extend(RAWDEBUGEventContent.outputCommands)
RAWDEBUGHLTEventContent.outputCommands.extend(HLTDebugRAW.outputCommands)
FEVTDEBUGEventContent.outputCommands.extend(FEVTSIMEventContent.outputCommands)
FEVTDEBUGEventContent.outputCommands.extend(L1TriggerFEVTDEBUG.outputCommands)
FEVTDEBUGEventContent.outputCommands.extend(SimGeneralFEVTDEBUG.outputCommands)
FEVTDEBUGEventContent.outputCommands.extend(SimTrackerFEVTDEBUG.outputCommands)
FEVTDEBUGEventContent.outputCommands.extend(SimMuonFEVTDEBUG.outputCommands)
FEVTDEBUGEventContent.outputCommands.extend(SimCalorimetryFEVTDEBUG.outputCommands)
FEVTDEBUGEventContent.outputCommands.extend(SimFastTimingFEVTDEBUG.outputCommands)
FEVTDEBUGHLTEventContent.outputCommands.extend(FEVTDEBUGEventContent.outputCommands)
FEVTDEBUGHLTEventContent.outputCommands.extend(HLTDebugFEVT.outputCommands)
FEVTDEBUGHLTEventContent.outputCommands.append('keep *_*_MergedTrackTruth_*')
FEVTDEBUGHLTEventContent.outputCommands.append('keep *_*_StripDigiSimLink_*')
FEVTDEBUGHLTEventContent.outputCommands.append('keep *_*_PixelDigiSimLink_*')
RECODEBUGEventContent.outputCommands.extend(RECOSIMEventContent.outputCommands)
RECODEBUGEventContent.outputCommands.extend(SimGeneralFEVTDEBUG.outputCommands)
RECODEBUGEventContent.outputCommands.extend(SimTrackerDEBUG.outputCommands)
from Configuration.ProcessModifiers.premix_stage2_cff import premix_stage2
from Configuration.Eras.Modifier_phase2_tracker_cff import phase2_tracker
(premix_stage2 & phase2_tracker).toModify(FEVTDEBUGHLTEventContent, outputCommands = FEVTDEBUGHLTEventContent.outputCommands+[
'keep *_*_Phase2OTDigiSimLink_*'
])
from Configuration.Eras.Modifier_phase2_muon_cff import phase2_muon
(premix_stage2 & phase2_muon).toModify(FEVTDEBUGHLTEventContent, outputCommands = FEVTDEBUGHLTEventContent.outputCommands+[
'keep *_*_GEMDigiSimLink_*',
'keep *_*_GEMStripDigiSimLink_*',
'keep *_*_ME0DigiSimLink_*',
'keep *_*_ME0StripDigiSimLink_*',
])
REPACKRAWSIMEventContent.outputCommands.extend(['drop FEDRawDataCollection_source_*_*',
'drop FEDRawDataCollection_rawDataCollector_*_*'])
REPACKRAWEventContent.outputCommands.extend(['drop FEDRawDataCollection_source_*_*',
'drop FEDRawDataCollection_rawDataCollector_*_*'])
#from modules in Configuration.StandardSequence.Generator_cff fixGenInfo
REGENEventContent = cms.PSet(
inputCommands=cms.untracked.vstring(
'keep *',
'drop *_genParticles_*_*',
'drop *_genParticlesForJets_*_*',
'drop *_kt4GenJets_*_*',
'drop *_kt6GenJets_*_*',
'drop *_iterativeCone5GenJets_*_*',
'drop *_ak4GenJets_*_*',
'drop *_ak7GenJets_*_*',
'drop *_ak8GenJets_*_*',
'drop *_ak4GenJetsNoNu_*_*',
'drop *_ak8GenJetsNoNu_*_*',
'drop *_genCandidatesForMET_*_*',
'drop *_genParticlesForMETAllVisible_*_*',
'drop *_genMetCalo_*_*',
'drop *_genMetCaloAndNonPrompt_*_*',
'drop *_genMetTrue_*_*',
'drop *_genMetIC5GenJs_*_*'
)
)
def SwapKeepAndDrop(l):
r=[]
for item in l:
if 'keep ' in item:
r.append(item.replace('keep ','drop '))
elif 'drop ' in item:
r.append(item.replace('drop ','keep '))
return r
RESIMEventContent = cms.PSet(
inputCommands=cms.untracked.vstring('drop *')
)
RESIMEventContent.inputCommands.extend(IOMCRAW.outputCommands)
RESIMEventContent.inputCommands.extend(GeneratorInterfaceRAW.outputCommands)
#RESIMEventContent.inputCommands.extend(SwapKeepAndDrop(SimG4CoreRAW.outputCommands))
#RESIMEventContent.inputCommands.extend(SwapKeepAndDrop(GeneratorInterfaceRAW.outputCommands))
REDIGIEventContent = cms.PSet(
inputCommands=cms.untracked.vstring('drop *')
)
REDIGIEventContent.inputCommands.extend(SimG4CoreRAW.outputCommands)
REDIGIEventContent.inputCommands.extend(IOMCRAW.outputCommands)
REDIGIEventContent.inputCommands.extend(GeneratorInterfaceRAW.outputCommands)
REDIGIEventContent.inputCommands.append('drop *_randomEngineStateProducer_*_*')
########### and mini AOD
#
# MiniAOD is a bit special: the files tend to be so small that letting
# ROOT automatically determine when to flush is a surprisingly big overhead.
#
MINIAODEventContent= cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
eventAutoFlushCompressedSize=cms.untracked.int32(-900),
compressionAlgorithm=cms.untracked.string("LZMA"),
compressionLevel=cms.untracked.int32(4)
)
MINIAODSIMEventContent= cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
eventAutoFlushCompressedSize=cms.untracked.int32(-900),
compressionAlgorithm=cms.untracked.string("LZMA"),
compressionLevel=cms.untracked.int32(4)
)
MINIGENEventContent= cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
eventAutoFlushCompressedSize=cms.untracked.int32(15*1024*1024),
compressionAlgorithm=cms.untracked.string("LZMA"),
compressionLevel=cms.untracked.int32(4)
)
from PhysicsTools.PatAlgos.slimming.slimming_cff import MicroEventContent,MicroEventContentMC,MicroEventContentGEN
MINIAODEventContent.outputCommands.extend(MicroEventContent.outputCommands)
MINIAODSIMEventContent.outputCommands.extend(MicroEventContentMC.outputCommands)
MINIGENEventContent.outputCommands.extend(MicroEventContentGEN.outputCommands)
#### RAW+miniAOD
RAWMINIAODEventContent= cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
eventAutoFlushCompressedSize=cms.untracked.int32(20*1024*1024),
compressionAlgorithm=cms.untracked.string("LZMA"),
compressionLevel=cms.untracked.int32(4)
)
RAWMINIAODSIMEventContent= cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
eventAutoFlushCompressedSize=cms.untracked.int32(20*1024*1024),
compressionAlgorithm=cms.untracked.string("LZMA"),
compressionLevel=cms.untracked.int32(4)
)
RAWMINIAODEventContent.outputCommands.extend(MicroEventContent.outputCommands)
RAWMINIAODEventContent.outputCommands.extend(L1TriggerRAW.outputCommands)
RAWMINIAODEventContent.outputCommands.extend(HLTriggerRAW.outputCommands)
RAWMINIAODSIMEventContent.outputCommands.extend(MicroEventContentMC.outputCommands)
RAWMINIAODSIMEventContent.outputCommands.extend(SimG4CoreHLTAODSIM.outputCommands)
RAWMINIAODSIMEventContent.outputCommands.extend(L1TriggerRAW.outputCommands)
RAWMINIAODSIMEventContent.outputCommands.extend(HLTriggerRAW.outputCommands)
RAWMINIAODEventContent.outputCommands.extend(cms.untracked.vstring(
'keep FEDRawDataCollection_rawDataCollector_*_*',
'keep FEDRawDataCollection_source_*_*'
))
RAWMINIAODSIMEventContent.outputCommands.extend(cms.untracked.vstring(
'keep FEDRawDataCollection_rawDataCollector_*_*',
'keep FEDRawDataCollection_source_*_*'
))
#
#
# RAWSIM Data Tier definition
# Meant as means to temporarily hold the RAW + AODSIM information as to allow the
# L1+HLT to be rerun at a later time.
#
RAWAODSIMEventContent = cms.PSet(
outputCommands = cms.untracked.vstring('drop *'),
eventAutoFlushCompressedSize=cms.untracked.int32(20*1024*1024),
compressionAlgorithm=cms.untracked.string("LZMA"),
compressionLevel=cms.untracked.int32(4)
)
RAWAODSIMEventContent.outputCommands.extend(AODSIMEventContent.outputCommands)
RAWAODSIMEventContent.outputCommands.extend(L1TriggerRAW.outputCommands)
RAWAODSIMEventContent.outputCommands.extend(HLTriggerRAW.outputCommands)
RAWAODSIMEventContent.outputCommands.extend(SimG4CoreHLTAODSIM.outputCommands)
# in fastsim, normal digis are edaliases of simdigis
# drop the simdigis to avoid complaints from the outputmodule related to duplicated branches
for _entry in [FEVTDEBUGHLTEventContent,FEVTDEBUGEventContent,RECOSIMEventContent,AODSIMEventContent,RAWAODSIMEventContent]:
fastSim.toModify(_entry, outputCommands = _entry.outputCommands + fastSimEC.dropSimDigis)
for _entry in [MINIAODEventContent, MINIAODSIMEventContent]:
fastSim.toModify(_entry, outputCommands = _entry.outputCommands + fastSimEC.dropPatTrigger)
for _entry in [FEVTDEBUGEventContent,FEVTDEBUGHLTEventContent,FEVTEventContent]:
phase2_tracker.toModify(_entry, outputCommands = _entry.outputCommands + [
'keep Phase2TrackerDigiedmDetSetVector_mix_*_*',
'keep *_TTClustersFromPhase2TrackerDigis_*_*',
'keep *_TTStubsFromPhase2TrackerDigis_*_*'
])
from Configuration.Eras.Modifier_run2_GEM_2017_cff import run2_GEM_2017
from Configuration.Eras.Modifier_run3_GEM_cff import run3_GEM
from Configuration.Eras.Modifier_phase2_muon_cff import phase2_muon
from Configuration.Eras.Modifier_pp_on_AA_2018_cff import pp_on_AA_2018
for _entry in [FEVTDEBUGEventContent,FEVTDEBUGHLTEventContent,FEVTEventContent]:
run2_GEM_2017.toModify(_entry, outputCommands = _entry.outputCommands + ['keep *_muonGEMDigis_*_*'])
run3_GEM.toModify(_entry, outputCommands = _entry.outputCommands + ['keep *_muonGEMDigis_*_*'])
phase2_muon.toModify(_entry, outputCommands = _entry.outputCommands + ['keep *_muonGEMDigis_*_*'])
pp_on_AA_2018.toModify(_entry, outputCommands = _entry.outputCommands + ['keep FEDRawDataCollection_rawDataRepacker_*_*'])
from RecoLocalFastTime.Configuration.RecoLocalFastTime_EventContent_cff import RecoLocalFastTimeFEVT, RecoLocalFastTimeRECO, RecoLocalFastTimeAOD
from Configuration.Eras.Modifier_phase2_timing_layer_cff import phase2_timing_layer
def _addOutputCommands(mod, newCommands):
phase2_timing_layer.toModify(mod, outputCommands = mod.outputCommands + newCommands.outputCommands)
_addOutputCommands(FEVTDEBUGEventContent,RecoLocalFastTimeFEVT)
_addOutputCommands(FEVTDEBUGHLTEventContent,RecoLocalFastTimeFEVT)
_addOutputCommands(FEVTEventContent,RecoLocalFastTimeFEVT)
_addOutputCommands(RECOSIMEventContent,RecoLocalFastTimeRECO)
_addOutputCommands(AODSIMEventContent,RecoLocalFastTimeAOD)
from RecoMTD.Configuration.RecoMTD_EventContent_cff import RecoMTDFEVT, RecoMTDRECO, RecoMTDAOD
_addOutputCommands(FEVTDEBUGEventContent,RecoMTDFEVT)
_addOutputCommands(FEVTDEBUGHLTEventContent,RecoMTDFEVT)
_addOutputCommands(FEVTEventContent,RecoMTDFEVT)
_addOutputCommands(RECOSIMEventContent,RecoMTDRECO)
_addOutputCommands(AODSIMEventContent,RecoMTDAOD)
| 45.956663 | 145 | 0.823896 |
7c1200068d8782097646d90441312a73ff9ff3e5 | 7,625 | py | Python | JumpScale9Lib/clients/zero_os/Client.py | Jumpscale/lib9 | 82224784ef2a7071faeb48349007211c367bc673 | [
"Apache-2.0"
] | 2 | 2017-06-07T08:11:47.000Z | 2017-11-10T02:19:48.000Z | JumpScale9Lib/clients/zero_os/Client.py | Jumpscale/lib9 | 82224784ef2a7071faeb48349007211c367bc673 | [
"Apache-2.0"
] | 188 | 2017-06-21T06:16:13.000Z | 2020-06-17T14:20:24.000Z | JumpScale9Lib/clients/zero_os/Client.py | Jumpscale/lib9 | 82224784ef2a7071faeb48349007211c367bc673 | [
"Apache-2.0"
] | 3 | 2018-06-12T05:18:28.000Z | 2019-09-24T06:49:17.000Z | import json
import socket
import time
import uuid
import redis
from js9 import j
from . import typchk
from .AggregatorManager import AggregatorManager
from .BaseClient import BaseClient, DefaultTimeout
from .BridgeManager import BridgeManager
from .BtrfsManager import BtrfsManager
from .CGroupManager import CGroupManager
from .Config import Config
from .ContainerManager import ContainerManager
from .DiskManager import DiskManager
from .KvmManager import KvmManager
from .LogManager import LogManager
from .Nft import Nft
from .Response import Response
from .RTInfoManager import RTInfoManager
from .WebManager import WebManager
from .ZerotierManager import ZerotierManager
TEMPLATE = """
host = "127.0.0.1"
port = 6379
unixsocket = ""
password_ = ""
db = 0
ssl = true
timeout = 120
"""
JSConfigClientBase = j.tools.configmanager.base_class_config
class Client(BaseClient, JSConfigClientBase):
_raw_chk = typchk.Checker({
'id': str,
'command': str,
'arguments': typchk.Any(),
'queue': typchk.Or(str, typchk.IsNone()),
'max_time': typchk.Or(int, typchk.IsNone()),
'stream': bool,
'tags': typchk.Or([str], typchk.IsNone()),
})
def __init__(self, instance="main", data={}, parent=None, template=None, ui=None, interactive=True):
JSConfigClientBase.__init__(self, instance=instance, data=data, parent=parent, template=TEMPLATE, ui=ui, interactive=interactive)
timeout = self.config.data['timeout']
BaseClient.__init__(self, timeout=timeout)
self.__redis = None
self._container_manager = ContainerManager(self)
self._bridge_manager = BridgeManager(self)
self._disk_manager = DiskManager(self)
self._btrfs_manager = BtrfsManager(self)
self._zerotier = ZerotierManager(self)
self._kvm = KvmManager(self)
self._log_manager = LogManager(self)
self._nft = Nft(self)
self._config_manager = Config(self)
self._aggregator = AggregatorManager(self)
self._jwt_expire_timestamp = 0
self._web = WebManager(self)
self._rtinfo = RTInfoManager(self)
self._cgroup = CGroupManager(self)
@property
def _redis(self):
password = self.config.data['password_']
if password and not self._jwt_expire_timestamp:
self._jwt_expire_timestamp = j.clients.itsyouonline.jwt_expire_timestamp(password)
if self._jwt_expire_timestamp and self._jwt_expire_timestamp - 300 < time.time():
password = j.clients.itsyouonline.refresh_jwt_token(password, validity=3600)
self.config.data_set('password_', password)
self.config.save()
if self.__redis:
self.__redis = None
self._jwt_expire_timestamp = j.clients.itsyouonline.jwt_expire_timestamp(password)
if self.__redis is None:
if self.config.data['unixsocket']:
self.__redis = redis.Redis(unix_socket_path=self.config.data['unixsocket'], db=self.config.data['db'])
else:
timeout = self.config.data['timeout']
socket_timeout = (timeout + 5) if timeout else 15
socket_keepalive_options = dict()
if hasattr(socket, 'TCP_KEEPIDLE'):
socket_keepalive_options[socket.TCP_KEEPIDLE] = 1
if hasattr(socket, 'TCP_KEEPINTVL'):
socket_keepalive_options[socket.TCP_KEEPINTVL] = 1
if hasattr(socket, 'TCP_KEEPIDLE'):
socket_keepalive_options[socket.TCP_KEEPIDLE] = 1
self.__redis = redis.Redis(host=self.config.data['host'],
port=self.config.data['port'],
password=self.config.data['password_'],
db=self.config.data['db'], ssl=self.config.data['ssl'],
socket_timeout=socket_timeout,
socket_keepalive=True, socket_keepalive_options=socket_keepalive_options)
return self.__redis
@property
def container(self):
"""
Container manager
:return:
"""
return self._container_manager
@property
def bridge(self):
"""
Bridge manager
:return:
"""
return self._bridge_manager
@property
def disk(self):
"""
Disk manager
:return:
"""
return self._disk_manager
@property
def btrfs(self):
"""
Btrfs manager
:return:
"""
return self._btrfs_manager
@property
def zerotier(self):
"""
Zerotier manager
:return:
"""
return self._zerotier
@property
def kvm(self):
"""
KVM manager
:return:
"""
return self._kvm
@property
def log_manager(self):
"""
Logger manager
:return:
"""
return self._log_manager
@property
def nft(self):
"""
NFT manager
:return:
"""
return self._nft
@property
def config_manager(self):
"""
Config manager
:return:
"""
return self._config_manager
@property
def aggregator(self):
"""
Aggregator manager
:return:
"""
return self._aggregator
@property
def web(self):
"""
Web manager
:return:
"""
return self._web
@property
def rtinfo(self):
"""
RTInfo manager
:return:
"""
return self._rtinfo
@property
def cgroup(self):
"""
Cgroup manager
"""
return self._cgroup
def raw(self, command, arguments, queue=None, max_time=None, stream=False, tags=None, id=None):
"""
Implements the low level command call, this needs to build the command structure
and push it on the correct queue.
:param command: Command name to execute supported by the node (ex: core.system, info.cpu, etc...)
check documentation for list of built in commands
:param arguments: A dict of required command arguments depends on the command name.
:param queue: command queue (commands on the same queue are executed sequentially)
:param max_time: kill job server side if it exceeded this amount of seconds
:param stream: If True, process stdout and stderr are pushed to a special queue (stream:<id>) so
client can stream output
:param tags: job tags
:param id: job id. Generated if not supplied
:return: Response object
"""
if not id:
id = str(uuid.uuid4())
payload = {
'id': id,
'command': command,
'arguments': arguments,
'queue': queue,
'max_time': max_time,
'stream': stream,
'tags': tags,
}
self._raw_chk.check(payload)
flag = 'result:{}:flag'.format(id)
self._redis.rpush('core:default', json.dumps(payload))
if self._redis.brpoplpush(flag, flag, DefaultTimeout) is None:
TimeoutError('failed to queue job {}'.format(id))
self.logger.debug('%s >> g8core.%s(%s)', id, command, ', '.join(("%s=%s" % (k, v) for k, v in arguments.items())))
return Response(self, id)
def response_for(self, id):
return Response(self, id)
| 30.378486 | 137 | 0.592656 |
4893752bc4d6b3ae10da660cc7cf7b41a3035449 | 32,612 | py | Python | Agents/PlugloadAgent/plugload/agent.py | bopopescu/TP | 72ae5654d04053810b502366f558e79407b6cea8 | [
"Unlicense"
] | null | null | null | Agents/PlugloadAgent/plugload/agent.py | bopopescu/TP | 72ae5654d04053810b502366f558e79407b6cea8 | [
"Unlicense"
] | null | null | null | Agents/PlugloadAgent/plugload/agent.py | bopopescu/TP | 72ae5654d04053810b502366f558e79407b6cea8 | [
"Unlicense"
] | 1 | 2020-07-22T19:58:44.000Z | 2020-07-22T19:58:44.000Z | # -*- coding: utf-8 -*-
'''
Copyright (c) 2016, Virginia Tech
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those of the authors and should not be
interpreted as representing official policies, either expressed or implied, of the FreeBSD Project.
This material was prepared as an account of work sponsored by an agency of the United States Government. Neither the
United States Government nor the United States Department of Energy, nor Virginia Tech, nor any of their employees,
nor any jurisdiction or organization that has cooperated in the development of these materials, makes any warranty,
express or implied, or assumes any legal liability or responsibility for the accuracy, completeness, or usefulness or
any information, apparatus, product, software, or process disclosed, or represents that its use would not infringe
privately owned rights.
Reference herein to any specific commercial product, process, or service by trade name, trademark, manufacturer, or
otherwise does not necessarily constitute or imply its endorsement, recommendation, favoring by the United States
Government or any agency thereof, or Virginia Tech - Advanced Research Institute. The views and opinions of authors
expressed herein do not necessarily state or reflect those of the United States Government or any agency thereof.
VIRGINIA TECH – ADVANCED RESEARCH INSTITUTE
under Contract DE-EE0006352
#__author__ = "BEMOSS Team"
#__credits__ = ""
#__version__ = "2.0"
#__maintainer__ = "BEMOSS Team"
#__email__ = "aribemoss@gmail.com"
#__website__ = "www.bemoss.org"
#__created__ = "2014-09-12 12:04:50"
#__lastUpdated__ = "2016-03-14 11:23:33"
'''
import sys
import json
import importlib
import datetime
import psycopg2
from volttron.platform.agent import BaseAgent, PublishMixin, periodic
from volttron.platform.agent import utils, matching
from volttron.platform.messaging import headers as headers_mod
from bemoss_lib.communication.Email import EmailService
from bemoss_lib.communication.sms import SMSService
import psycopg2.extras
import settings
import socket
import threading
from bemoss_lib.databases.cassandraAPI import cassandraDB
def PlugloadAgent(config_path, **kwargs):
threadingLock = threading.Lock()
config = utils.load_config(config_path)
def get_config(name):
try:
return kwargs.pop(name)
except KeyError:
return config.get(name, '')
#1. @params agent
agent_id = get_config('agent_id')
device_monitor_time = get_config('device_monitor_time')
max_monitor_time = int(settings.DEVICES['max_monitor_time'])
cassandra_update_time = int(settings.DEVICES['cassandra_update_time'])
debug_agent = False
#Dictionary of Variables supposed to be saved in postgress database
agentAPImapping = dict(status=[], power=[], energy=[])
#Dictionary of Variables supposed to be saved into timeseries database
log_variables = dict(status='text',power='double',energy='double',offline_count='int')
tolerance = 0.5 #if the numerical variables change less than this percent, its not conisdered change and not logged
building_name = get_config('building_name')
zone_id = get_config('zone_id')
model = get_config('model')
device_type = get_config('type')
macaddress = get_config('macaddress')
address_get = get_config('address_get')
address_put = get_config('address_put')
vendor = get_config('vendor')
auth_header = get_config('auth_header')
smt_username = get_config('smt_username')
smt_password = get_config('smt_password')
address = get_config('address')
_address = address
_address = _address.replace('http://', '')
_address = _address.replace('https://', '')
try: # validate whether or not address is an ip address
socket.inet_aton(_address)
ip_address = _address
# print "yes ip_address is {}".format(ip_address)
except socket.error:
# print "yes ip_address is None"
ip_address = None
identifiable = get_config('identifiable')
# mac_address = get_config('mac_address')
#TODO get database parameters from settings.py, add db_table for specific table
db_host = get_config('db_host')
db_port = get_config('db_port')
db_database = get_config('db_database')
db_user = get_config('db_user')
db_password = get_config('db_password')
db_table_plugload = settings.DATABASES['default']['TABLE_plugload']
db_table_notification_event = settings.DATABASES['default']['TABLE_notification_event']
db_table_active_alert = settings.DATABASES['default']['TABLE_active_alert']
db_table_device_type = settings.DATABASES['default']['TABLE_device_type']
db_table_bemoss_notify = settings.DATABASES['default']['TABLE_bemoss_notify']
db_table_alerts_notificationchanneladdress = settings.DATABASES['default'][
'TABLE_alerts_notificationchanneladdress']
db_table_temp_time_counter = settings.DATABASES['default']['TABLE_temp_time_counter']
db_table_priority = settings.DATABASES['default']['TABLE_priority']
#construct _topic_Agent_UI based on data obtained from DB
_topic_Agent_UI_tail = building_name + '/' + str(zone_id) + '/' + agent_id
api = get_config('api')
apiLib = importlib.import_module("DeviceAPI.classAPI."+api)
#4.1 initialize plugload device object
if vendor == 'Digi':
gateway_id = get_config('gateway_id')
Plugload = apiLib.API(gateway_id=gateway_id, model=model, device_type=device_type, api=api, address=address, macaddress = macaddress, agent_id=agent_id, db_host=db_host, db_port=db_port, db_user=db_user, db_password=db_password, db_database=db_database)
else:
Plugload = apiLib.API(model=model, device_type=device_type, api=api, address=address, macaddress = macaddress, agent_id=agent_id, db_host=db_host, db_port=db_port, db_user=db_user, db_password=db_password, db_database=db_database, config_path=config_path)
print("{0}agent is initialized for {1} using API={2} at {3}".format(agent_id,
Plugload.get_variable('model'),
Plugload.get_variable('api'),
Plugload.get_variable('address')))
connection_renew_interval = Plugload.variables['connection_renew_interval']
#params notification_info
send_notification = False
email_fromaddr = settings.NOTIFICATION['email']['fromaddr']
email_recipients = settings.NOTIFICATION['email']['recipients']
email_username = settings.NOTIFICATION['email']['username']
email_password = settings.NOTIFICATION['email']['password']
email_mailServer = settings.NOTIFICATION['email']['mailServer']
notify_heartbeat = settings.NOTIFICATION['heartbeat']
class Agent(PublishMixin, BaseAgent):
"""Agent for querying WeatherUndergrounds API"""
# 1. agent initialization
def __init__(self, **kwargs):
#1. initialize all agent variables
super(Agent, self).__init__(**kwargs)
self.variables = kwargs
self.valid_data = False
self._keep_alive = True
self.flag = 1
self.event_ids = list()
self.time_sent_notifications = {}
self.notify_heartbeat = notify_heartbeat
self.ip_address = ip_address if ip_address != None else None
self.changed_variables = None
self.lastUpdateTime = None
self.subscriptionTime = datetime.datetime.now()
self.already_offline = False
#2. setup connection with db -> Connect to bemossdb database
try:
self.con = psycopg2.connect(host=db_host, port=db_port, database=db_database, user=db_user,
password=db_password)
self.cur = self.con.cursor() # open a cursor to perfomm database operations
print("{} connects to the database name {} successfully".format(agent_id, db_database))
except:
print("ERROR: {} fails to connect to the database name {}".format(agent_id, db_database))
#3. send notification to notify building admin
self.send_notification = send_notification
self.subject = 'Message from ' + agent_id
# These set and get methods allow scalability
def set_variable(self,k,v): #k=key, v=value
self.variables[k] = v
def get_variable(self,k):
return self.variables.get(k, None) #default of get_variable is none
# 2. agent setup method
def setup(self):
super(Agent, self).setup()
self.timer(10, self.deviceMonitorBehavior)
#TODO do this for all devices that supports event subscriptions
if api == 'classAPI_WeMo':
#Do a one time push when we start up so we don't have to wait for the periodic polling
try:
Plugload.startListeningEvents(threadingLock,self.updateStatus)
self.subscriptionTime=datetime.datetime.now()
except Exception as er:
print "Can't subscribe.", er
def updatePostgresDB(self):
try:
self.cur.execute("UPDATE "+db_table_plugload+" SET status=%s "
"WHERE plugload_id=%s",
(self.get_variable('status'), agent_id))
self.con.commit()
if self.get_variable('power')!=None:
#self.set_variable('power', int(self.get_variable('power')))
self.cur.execute("UPDATE "+db_table_plugload+" SET power=%s "
"WHERE plugload_id=%s",
(int(self.get_variable('power')), agent_id))
self.con.commit()
if self.ip_address != None:
psycopg2.extras.register_inet()
_ip_address = psycopg2.extras.Inet(self.ip_address)
self.cur.execute("UPDATE "+db_table_plugload+" SET ip_address=%s WHERE plugload_id=%s",
(_ip_address, agent_id))
self.con.commit()
print("{} updates database name {} during deviceMonitorBehavior successfully".format(agent_id,
db_database))
except:
print("ERROR: {} fails to update the database name {}".format(agent_id, db_database))
#Re-login / re-subcribe to devices periodically. The API might choose to have empty function if not necessary
@periodic(connection_renew_interval)
def renewConnection(self):
Plugload.renewConnection()
@periodic(device_monitor_time)
def deviceMonitorBehavior(self):
try:
Plugload.getDeviceStatus()
self.variables['status'] = Plugload.variables['status']
except Exception as er:
print er
print "device connection for {} is not successful".format(agent_id)
#TODO make tolerance more accessible
tolerance = 1
def isChanged(variable_name,value1,value2):
#Checks if two value of variable is to be considered different.
# Returns false if numerical value are different by less than the tolerance %
if variable_name=='status':
return value1 == value2 #strict comparision for status
elif variable_name in ['power','energy','offline_count']:
if value1 == 0:
return True if value2 != 0 else False
return True if 100*abs(value1-value2)/float(value1) > tolerance else False
self.changed_variables = dict()
for v in log_variables:
if v in Plugload.variables:
if v not in self.variables or isChanged(v, self.variables[v],Plugload.variables[v]):
self.variables[v] = Plugload.variables[v]
self.changed_variables[v] = log_variables[v]
else:
if v not in self.variables: #it won't be in self.variables either (in the first time)
self.changed_variables[v] = log_variables[v]
self.variables[v] = None
try:
with threadingLock:
if self.get_variable('offline_count')>=3:
self.cur.execute("UPDATE "+db_table_plugload+" SET network_status=%s WHERE plugload_id=%s",
('OFFLINE', agent_id))
self.con.commit()
if self.already_offline is False:
self.already_offline = True
_time_stamp_last_offline = str(datetime.datetime.now())
self.cur.execute("UPDATE "+db_table_plugload+" SET last_offline_time=%s "
"WHERE plugload_id=%s",
(_time_stamp_last_offline, agent_id))
self.con.commit()
else:
self.already_offline = False
self.cur.execute("UPDATE "+db_table_plugload+" SET network_status=%s WHERE plugload_id=%s",
('ONLINE', agent_id))
self.con.commit()
# Step: Check if any Device is OFFLINE
self.cur.execute("SELECT id FROM " + db_table_active_alert + " WHERE event_trigger_id=%s", ('5',))
if self.cur.rowcount != 0:
self.device_offline_detection()
# Update scan time
_time_stamp_last_scanned = str(datetime.datetime.now())
self.cur.execute("UPDATE "+db_table_plugload+" SET last_scanned_time=%s "
"WHERE plugload_id=%s",
(_time_stamp_last_scanned, agent_id))
self.con.commit()
except Exception as er:
print er
print("ERROR: {} failed to update database name {}".format(agent_id, db_database))
if len(self.changed_variables) == 0:
print 'nothing changed'
return
self.updateStatus()
#step6: debug agent knowledge
if debug_agent == True:
print("printing agent's knowledge")
for k, v in self.variables.items():
print (k, v)
print('')
def device_offline_detection(self):
self.cur.execute("SELECT nickname FROM " + db_table_plugload + " WHERE plugload_id=%s",
(agent_id,))
print agent_id
if self.cur.rowcount != 0:
device_nickname=self.cur.fetchone()[0]
print device_nickname
else:
device_nickname = ''
_db_notification_subject = 'BEMOSS Device {} {} went OFFLINE!!!'.format(device_nickname,agent_id)
_email_subject = '#Attention: BEMOSS Device {} {} went OFFLINE!!!'.format(device_nickname,agent_id)
_email_text = '#Attention: BEMOSS Device {} {} went OFFLINE!!!'.format(device_nickname,agent_id)
self.cur.execute("SELECT network_status FROM " + db_table_plugload + " WHERE plugload_id=%s",
(agent_id,))
self.network_status = self.cur.fetchone()[0]
print self.network_status
if self.network_status=="OFFLINE":
print "Found Device OFFLINE"
self.cur.execute("SELECT id FROM " + db_table_active_alert + " WHERE event_trigger_id=%s", ('5',))
self._active_alert_id = self.cur.fetchone()[0]
self.cur.execute(
"SELECT id FROM " + db_table_temp_time_counter + " WHERE alert_id=%s AND device_id=%s",
(str(self._active_alert_id), agent_id,))
# If this is the first detected violation
if self.cur.rowcount == 0:
print "first device offline detected"
# create counter in DB
self.cur.execute(
"INSERT INTO " + db_table_temp_time_counter + " VALUES(DEFAULT,%s,%s,%s,%s,%s)",
(self._active_alert_id, agent_id, '0', '0', '0'))
self.con.commit()
self.send_device_notification_db(_db_notification_subject, self._active_alert_id)
# Send email if exist
self.cur.execute("SELECT notify_address FROM " + db_table_alerts_notificationchanneladdress + " WHERE active_alert_id=%s AND notification_channel_id=%s",(self._active_alert_id,'1'))
if self.cur.rowcount != 0:
self._alert_email = self.cur.fetchall()
for single_email_1 in self._alert_email:
print single_email_1[0]
self.send_device_notification_email(single_email_1[0], _email_subject, _email_text)
# Send SMS if provided by user
self.cur.execute("SELECT notify_address FROM " + db_table_alerts_notificationchanneladdress + " WHERE active_alert_id=%s AND notification_channel_id=%s",(self._active_alert_id,'2'))
if self.cur.rowcount != 0:
self._alert_sms_phone_no = self.cur.fetchall()
for single_number in self._alert_sms_phone_no:
print single_number[0]
self.send_device_notification_sms(single_number[0], _email_subject)
else:
self.priority_counter(self._active_alert_id, _db_notification_subject)
else:
print "The Device is ONLINE"
def send_device_notification_db(self, _tampering_device_msg, _active_alert_id):
print " INSIDE send_device_notification_db"
# Find the priority id
self.cur.execute(
"SELECT priority_id FROM " + db_table_active_alert + " WHERE id=%s",
(str(_active_alert_id),))
self.priority_id = self.cur.fetchone()[0]
# Find the priority level
self.cur.execute(
"SELECT priority_level FROM " + db_table_priority + " WHERE id=%s",
str(self.priority_id))
self.priority_level = self.cur.fetchone()[0]
# Insert into DB the notification
self.cur.execute("INSERT INTO " + db_table_bemoss_notify + " VALUES(DEFAULT,%s,%s,%s,%s)",
(_tampering_device_msg,
str(datetime.datetime.now()), 'Alert', str(self.priority_level)))
self.con.commit()
# Find the number of notifications sent for the same alert and device
self.cur.execute(
"SELECT no_notifications_sent FROM " + db_table_temp_time_counter + " WHERE alert_id=%s AND device_id=%s",
(str(_active_alert_id), agent_id,))
self._no_notifications_sent = self.cur.fetchone()[0]
self.con.commit()
print self._no_notifications_sent
self._no_notifications_sent = int(self._no_notifications_sent) + 1
print self._no_notifications_sent
self.cur.execute(
"UPDATE " + db_table_temp_time_counter + " SET no_notifications_sent=%s WHERE alert_id=%s AND device_id=%s",
(str(self._no_notifications_sent), str(_active_alert_id), agent_id,))
self.con.commit()
def send_device_notification_email(self, _active_alert_email, _email_subject, _email_text):
#_email_subject = '#Attention: BEMOSS Device {} has detected a high level of CO2!!!'.format(agent_id)
# _email_text = 'Here is the detail of device status\n' + str(_tampering_device_msg) \
#_email_text = 'The CO2 level has exceeded the defined range'
emailService = EmailService()
# Send Email
emailService.sendEmail(email_fromaddr, _active_alert_email, email_username,
email_password, _email_subject, _email_text, email_mailServer)
def send_device_notification_sms(self, _active_alert_phone_number_misoperation, _sms_subject):
print "INSIDE send_device_notification_sms"
print _active_alert_phone_number_misoperation
smsService = SMSService()
smsService.sendSMS(email_fromaddr, _active_alert_phone_number_misoperation, email_username, email_password, _sms_subject, email_mailServer)
def priority_counter(self, _active_alert_id, _tampering_device_msg_1):
# Find the priority counter limit then compare it with priority_counter in priority table
# if greater than the counter limit then send notification and reset the value
# else just increase the counter
print "INSIDE the priority_counter"
_email_subject = '#Attention: BEMOSS Device {} went OFFLINE!!!'.format(agent_id)
_email_text = '#Attention: BEMOSS Device {} went OFFLINE!!!'.format(agent_id)
self.cur.execute(
"SELECT priority_counter FROM " + db_table_temp_time_counter + " WHERE alert_id=%s AND device_id=%s",
(str(_active_alert_id), agent_id,))
self.priority_count = self.cur.fetchone()[0]
self.con.commit()
# Find the priority id from active alert table
self.cur.execute(
"SELECT priority_id FROM " + db_table_active_alert + " WHERE id=%s",
(str(_active_alert_id),))
self.priority_id = self.cur.fetchone()[0]
self.con.commit()
# Find the priority limit from the priority table
self.cur.execute(
"SELECT priority_counter FROM " + db_table_priority + " WHERE id=%s",
(str(self.priority_id),))
self.priority_limit = self.cur.fetchone()[0]
self.con.commit()
# If the counter reaches the limit
if int(self.priority_count) > int(self.priority_limit):
# self._no_notifications_sent = int(self._no_notifications_sent) + 1
self.send_device_notification_db(_tampering_device_msg_1, _active_alert_id)
self.cur.execute(
"UPDATE " + db_table_temp_time_counter + " SET priority_counter=%s WHERE alert_id=%s AND device_id=%s",
('0', str(_active_alert_id), agent_id,))
self.con.commit()
print "INSIDE the priority counter exceeded the defined range"
# Send email if exist
self.cur.execute("SELECT notify_address FROM " + db_table_alerts_notificationchanneladdress + " WHERE active_alert_id=%s AND notification_channel_id=%s",(self._active_alert_id,'1'))
if self.cur.rowcount != 0:
self._alert_email = self.cur.fetchall()
for single_email_1 in self._alert_email:
print single_email_1[0]
self.send_device_notification_email(single_email_1[0], _email_subject, _email_text)
# Send SMS if provided by user
self.cur.execute("SELECT notify_address FROM " + db_table_alerts_notificationchanneladdress + " WHERE active_alert_id=%s AND notification_channel_id=%s",(self._active_alert_id,'2'))
if self.cur.rowcount != 0:
self._alert_sms_phone_no = self.cur.fetchall()
for single_number in self._alert_sms_phone_no:
print single_number[0]
self.send_device_notification_sms(single_number[0], _email_subject)
else:
self.priority_count = int(self.priority_count) + 1
self.cur.execute(
"UPDATE " + db_table_temp_time_counter + " SET priority_counter=%s WHERE alert_id=%s AND device_id=%s",
(str(self.priority_count), str(_active_alert_id), agent_id,))
@periodic(cassandra_update_time) #save all data every cassandra update time
def backupSaveData(self):
try:
with threadingLock:
Plugload.getDeviceStatus()
cassandraDB.insert(agent_id,Plugload.variables,log_variables)
print('Data Pushed to cassandra as a backup')
except Exception as er:
print("ERROR: {} fails to update cassandra database".format(agent_id))
print er
def updateStatus(self,states=None):
if states is not None:
print "got state change:",states
self.changed_variables = dict()
if(self.get_variable('status') != 'ON' if states['status']==1 else 'OFF'):
self.set_variable('status','ON' if states['status']==1 else 'OFF')
self.changed_variables['status'] = log_variables['status']
if 'power' in states:
if(self.get_variable('power') != states['power']):
self.changed_variables['power'] = log_variables['power']
self.set_variable('power',states['power'])
with threadingLock:
try:
cassandraDB.insert(agent_id,self.variables,log_variables)
print "cassandra success"
except Exception as er:
print("ERROR: {} fails to update cassandra database".format(agent_id))
print er
self.updatePostgresDB()
topic = '/agent/ui/'+device_type+'/device_status_response/'+_topic_Agent_UI_tail
# now = datetime.utcnow().isoformat(' ') + 'Z'
headers = {
'AgentID': agent_id,
headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.JSON,
# headers_mod.DATE: now,
}
if self.get_variable('power') is not None:
_data={'device_id':agent_id, 'status':self.get_variable('status'), 'power':self.get_variable('power')}
else:
_data={'device_id':agent_id, 'status':self.get_variable('status')}
print "published!!!!! :{}".format(self.get_variable('status'))
message = json.dumps(_data)
message = message.encode(encoding='utf_8')
self.publish(topic, headers, message)
# 4. updateUIBehavior (generic behavior)
@matching.match_exact('/ui/agent/'+device_type+'/device_status/'+_topic_Agent_UI_tail)
def updateUIBehavior(self, topic, headers, message, match):
print "{} agent got\nTopic: {topic}".format(self.get_variable("agent_id"), topic=topic)
print "Headers: {headers}".format(headers=headers)
print "Message: {message}\n".format(message=message)
#reply message
self.updateStatus()
# 5. deviceControlBehavior (generic behavior)
@matching.match_exact('/ui/agent/'+device_type+'/update/'+_topic_Agent_UI_tail)
def deviceControlBehavior(self,topic,headers,message,match):
print "{} agent got\nTopic: {topic}".format(self.get_variable("agent_id"),topic=topic)
print "Headers: {headers}".format(headers=headers)
print "Message: {message}\n".format(message=message)
#step1: change device status according to the receive message
if self.isPostmsgValid(message[0]): # check if the data is valid
setDeviceStatusResult = Plugload.setDeviceStatus(json.loads(message[0]))
#send reply message back to the UI
topic = '/agent/ui/'+device_type+'/update_response/'+_topic_Agent_UI_tail
# now = datetime.utcnow().isoformat(' ') + 'Z'
headers = {
'AgentID': agent_id,
headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.PLAIN_TEXT,
# headers_mod.DATE: now,
}
if setDeviceStatusResult:
message = 'success'
else:
message = 'failure'
else:
print("The POST message is invalid, check status setting and try again\n")
message = 'failure'
self.publish(topic, headers, message)
self.deviceMonitorBehavior() #Get device status, and get updated data
def isPostmsgValid(self, postmsg): # check validity of postmsg
dataValidity = False
try:
_data = json.dumps(postmsg)
_data = json.loads(_data)
for k,v in _data.items():
if k == 'status':
dataValidity = True
break
except:
dataValidity = True
print("dataValidity failed to validate data comes from UI")
return dataValidity
# 6. deviceIdentifyBehavior (generic behavior)
@matching.match_exact('/ui/agent/'+device_type+'/identify/'+_topic_Agent_UI_tail)
def deviceIdentifyBehavior(self,topic,headers,message,match):
print "{} agent got\nTopic: {topic}".format(self.get_variable("agent_id"),topic=topic)
print "Headers: {headers}".format(headers=headers)
print "Message: {message}\n".format(message=message)
#step1: change device status according to the receive message
identifyDeviceResult = Plugload.identifyDevice()
#TODO need to do additional checking whether the device setting is actually success!!!!!!!!
#step2: send reply message back to the UI
topic = '/agent/ui/identify_response/'+device_type+'/'+_topic_Agent_UI_tail
# now = datetime.utcnow().isoformat(' ') + 'Z'
headers = {
'AgentID': agent_id,
headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.PLAIN_TEXT,
# headers_mod.DATE: now,
}
if identifyDeviceResult:
message = 'success'
else:
message = 'failure'
self.publish(topic, headers, message)
Agent.__name__ = 'PlugloadAgent'
return Agent(**kwargs)
def main(argv=sys.argv):
'''Main method called by the eggsecutable.'''
utils.default_main(PlugloadAgent,
description='Plugload agent',
argv=argv)
if __name__ == '__main__':
# Entry point for script
try:
sys.exit(main())
except KeyboardInterrupt:
pass
| 51.683043 | 263 | 0.611585 |
03b6d7d3f1443833007b2ef49fd6feb6903fac06 | 218 | py | Python | openktn/native/ktn.py | uibcdf/OpenKinNet | 8ed5d3da561e7a0f62ac80cb02cc09116c08ac25 | [
"MIT"
] | null | null | null | openktn/native/ktn.py | uibcdf/OpenKinNet | 8ed5d3da561e7a0f62ac80cb02cc09116c08ac25 | [
"MIT"
] | null | null | null | openktn/native/ktn.py | uibcdf/OpenKinNet | 8ed5d3da561e7a0f62ac80cb02cc09116c08ac25 | [
"MIT"
] | null | null | null | class KTN():
def __init__(self):
self.temperature = None
self.time = None
self.microstates = {}
self.transitions = {}
self.weight = 0.0
self.symmetrized = False
| 15.571429 | 32 | 0.53211 |
ea604ee8c92f19a7168bf3426cdd8af73b80f0b8 | 1,708 | py | Python | orm/models.py | manukanne/cloudatlas-quiz-api | ccb75305fae23db6f72bd5386d39ae83e798bb62 | [
"Apache-2.0"
] | null | null | null | orm/models.py | manukanne/cloudatlas-quiz-api | ccb75305fae23db6f72bd5386d39ae83e798bb62 | [
"Apache-2.0"
] | null | null | null | orm/models.py | manukanne/cloudatlas-quiz-api | ccb75305fae23db6f72bd5386d39ae83e798bb62 | [
"Apache-2.0"
] | null | null | null | from mongoengine import Document, EmbeddedDocument, EmbeddedDocumentField, StringField, BooleanField, \
SequenceField, ListField, ReferenceField, EmailField, DO_NOTHING as DELETION_RULE_DO_NOTHING
class User(Document):
"""
MongoDB User document
"""
email = EmailField(primary_key=True)
first_name = StringField(required=True, min_length=3, max_length=30)
last_name = StringField(required=True, min_length=3, max_length=30)
password_hash = StringField(required=True)
disabled = BooleanField(default=True)
class Category(Document):
"""
MongoDB Category document
"""
identifier = SequenceField(primary_key=True)
title = StringField(unique=True, min_length=3, max_length=75)
description = StringField(max_length=255)
class Answer(EmbeddedDocument):
"""
MongoDB embedded Answer document
"""
identifier = SequenceField(primary_key=True)
answer_text = StringField(required=True, max_length=255)
is_correct = BooleanField()
class Question(EmbeddedDocument):
"""
MongoDB embedded Question document
"""
identifier = SequenceField(primary_key=True)
title = StringField(required=True, min_length=3, max_length=500)
answers = ListField(EmbeddedDocumentField(Answer))
class Quiz(Document):
"""
MongoDB Quiz document
"""
identifier = SequenceField(primary_key=True)
title = StringField(required=True, min_length=3, max_length=75)
description = StringField(required=False, max_length=255)
owner = ReferenceField(User)
questions = ListField(EmbeddedDocumentField(Question))
categories = ListField(ReferenceField(Category, reverse_delete_rule=DELETION_RULE_DO_NOTHING))
| 31.054545 | 103 | 0.740047 |
1ad6007afdee6cb2591d28f405a77730f650fadd | 9,187 | py | Python | lib/pymedphys/_experimental/streamlit/utilities/icom.py | ethanio12345/pymedphys | cb34c992de8d442eced3385018a194364060092d | [
"Apache-2.0"
] | 207 | 2019-01-29T09:53:04.000Z | 2022-03-26T10:34:03.000Z | lib/pymedphys/_experimental/streamlit/utilities/icom.py | ethanio12345/pymedphys | cb34c992de8d442eced3385018a194364060092d | [
"Apache-2.0"
] | 1,209 | 2019-01-29T07:52:27.000Z | 2022-03-31T20:11:24.000Z | lib/pymedphys/_experimental/streamlit/utilities/icom.py | ethanio12345/pymedphys | cb34c992de8d442eced3385018a194364060092d | [
"Apache-2.0"
] | 58 | 2019-03-29T09:06:18.000Z | 2022-03-24T07:44:22.000Z | # Copyright (C) 2020 Cancer Care Associates
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import datetime
import lzma
from pymedphys._imports import altair as alt
from pymedphys._imports import numpy as np
from pymedphys._imports import pandas as pd
from pymedphys._imports import streamlit as st
import pymedphys._icom.delivery as pmp_icom_delivery
import pymedphys._icom.extract as pmp_icom_extract
def read_icom_log(filepath):
with lzma.open(filepath, "r") as f:
icom_stream = f.read()
return icom_stream
def get_paths_by_date(icom_patients_directory, selected_date=None):
service_icom_paths = _get_service_icom_paths(icom_patients_directory)
timestamps = _get_file_datetimes(service_icom_paths)
path_dataframe = pd.concat([service_icom_paths, timestamps], axis=1)
timestamp_dates = timestamps.dt.date
dates = pd.Series(pd.unique(timestamp_dates)).sort_values(ascending=False)
if selected_date is None:
selected_date = st.selectbox("Date", list(dates))
selected_paths_by_date = path_dataframe.loc[selected_date == timestamp_dates]
selected_paths_by_date = selected_paths_by_date.sort_values(
"datetime", ascending=False
)
return selected_paths_by_date
def plot_all_relevant_times(all_relevant_times):
for key, data in all_relevant_times.items():
st.write(f"### Machine ID: `{key}`")
plot_relevant_times(data)
def plot_relevant_times(relevant_times, step=5, title=None):
raw_chart = (
alt.Chart(relevant_times)
.mark_bar()
.encode(x=alt.X("datetime", bin=alt.Bin(step=step * 60 * 1000)), y="count()")
)
if title is not None:
raw_chart = raw_chart.properties(title=title)
st.altair_chart(altair_chart=raw_chart, use_container_width=True)
def get_relevant_times_for_filepaths(filepaths):
all_relevant_times_list = collections.defaultdict(lambda: [])
for f in filepaths:
machine_id, relevant_times = _get_relevant_times(f)
filepath_series = pd.Series([f] * len(relevant_times), name="filepath")
if len(filepath_series) != len(relevant_times):
raise ValueError("Expected length to be consistent")
times_and_paths = pd.concat(
[relevant_times.reset_index()["datetime"], filepath_series], axis=1
)
all_relevant_times_list[machine_id].append(times_and_paths)
all_relevant_times = {}
for key, item in all_relevant_times_list.items():
all_relevant_times[key] = pd.concat(item, axis=0)
return all_relevant_times
def _get_service_icom_paths(root_directory):
service_mode_directories = [
item.name
for item in root_directory.glob("*")
# TODO: Fix the hardcoding of patients to search
if item.name.startswith("Deliver")
or item.name.startswith("WLutz")
or "QA" in item.name
]
service_icom_paths = []
for directory in service_mode_directories:
full_path = root_directory.joinpath(directory)
service_icom_paths += list(full_path.glob("*.xz"))
service_icom_paths = pd.Series(service_icom_paths, name="filepath")
return service_icom_paths
def _get_file_datetimes(icom_paths):
filestems = pd.Series([item.stem for item in icom_paths], name="filestem")
timestamps = pd.Series(
pd.to_datetime(filestems, format="%Y%m%d_%H%M%S"), name="datetime"
)
return timestamps
@st.cache(show_spinner=False, suppress_st_warning=True)
def _get_relevant_times(filepath):
icom_datetime, meterset, machine_id = get_icom_datetimes_meterset_machine(filepath)
machine_id = machine_id.dropna().unique()
if len(machine_id) > 1:
st.write(filepath)
st.write(machine_id)
raise ValueError("Only one machine id per file expected")
if len(machine_id) == 0:
machine_id = None
st.error(
f"The filepath `{filepath}` has no Machine ID. "
"This is unexpected. However, will attempt to continue "
"despite this."
)
else:
machine_id = machine_id[0]
diff_meterset = np.concatenate([[0], np.diff(meterset)])
relevant_rows = diff_meterset > 0
relevant_times = icom_datetime.loc[relevant_rows]
return machine_id, pd.Series(relevant_times, name="datetime")
@st.cache(show_spinner=False)
def get_icom_datetimes_meterset_machine(filepath):
icom_stream = read_icom_log(filepath)
icom_data_points = pmp_icom_extract.get_data_points(icom_stream)
icom_datetime = pd.to_datetime(
pd.Series([item[8:26].decode() for item in icom_data_points], name="datetime"),
format="%Y-%m-%d%H:%M:%S",
)
_adjust_icom_datetime_to_remove_duplicates(icom_datetime)
meterset = pd.Series(
[pmp_icom_extract.extract(item, "Delivery MU")[1] for item in icom_data_points],
name="meterset",
)
machine_id = pd.Series(
[pmp_icom_extract.extract(item, "Machine ID")[1] for item in icom_data_points],
name="machine_id",
)
return icom_datetime, meterset, machine_id
def _adjust_icom_datetime_to_remove_duplicates(icom_datetime):
_, unique_index, unique_counts = np.unique(
icom_datetime, return_index=True, return_counts=True
)
for index, count in zip(unique_index, unique_counts):
if count > 1:
time_delta = datetime.timedelta(seconds=1 / count)
for current_duplicate, icom_index in enumerate(
range(index + 1, index + count)
):
icom_datetime.iloc[icom_index] += time_delta * (current_duplicate + 1)
# TODO: Remove "allow_output_mutation" once determine what is causing
# the issue here.
@st.cache(show_spinner=False, allow_output_mutation=True)
def get_icom_dataset(filepath):
icom_stream = read_icom_log(filepath)
icom_data_points = pmp_icom_extract.get_data_points(icom_stream)
icom_datetime, meterset, machine_id = get_icom_datetimes_meterset_machine(filepath)
raw_delivery_items = pd.DataFrame(
[pmp_icom_delivery.get_delivery_data_items(item) for item in icom_data_points],
columns=["meterset", "gantry", "collimator", "mlc", "jaw"],
)
if np.all(meterset != raw_delivery_items["meterset"]):
raise ValueError("Expected meterset extractions to agree.")
turn_table = pd.Series(
[
pmp_icom_extract.extract(item, "Table Isocentric")[1]
for item in icom_data_points
],
name="turn_table",
)
energy = pd.Series(
[pmp_icom_extract.extract(item, "Energy")[1] for item in icom_data_points],
name="energy",
)
interlocks = pd.Series(
[pmp_icom_extract.extract(item, "Interlocks")[1] for item in icom_data_points],
name="interlocks",
)
beam_timer = pd.Series(
[pmp_icom_extract.extract(item, "Beam Timer")[1] for item in icom_data_points],
name="beam_timer",
)
width, length, centre_x, centre_y = _determine_width_length_centre(
raw_delivery_items["mlc"], raw_delivery_items["jaw"]
)
icom_dataset = pd.concat(
[
icom_datetime,
machine_id,
energy,
width,
length,
raw_delivery_items[["meterset", "gantry", "collimator"]],
turn_table,
interlocks,
beam_timer,
centre_x,
centre_y,
],
axis=1,
)
return icom_dataset
def _get_mean_unblocked_mlc_pos(mlc, jaw):
mlc_indices = np.arange(80)
leaf_centre_pos = np.array((mlc_indices - 39) * 5 - 2.5)
is_mlc_centre_blocked = np.invert(
(-jaw[:, 0][:, None] <= leaf_centre_pos[None, :])
& (jaw[:, 1][:, None] >= leaf_centre_pos[None, :])
)
mlc[is_mlc_centre_blocked, :] = np.nan
mean_mlc = np.nanmean(mlc, axis=1)
absolute_diff = np.abs(mlc - mean_mlc[:, None, :])
max_absolute_diff = np.nanmax(absolute_diff, axis=1)
mean_mlc[max_absolute_diff > 0.5] = np.nan
return mean_mlc
def _determine_width_length_centre(mlc, jaw):
jaw = np.array(list(jaw))
mlc = np.array(list(mlc))
mean_mlc = _get_mean_unblocked_mlc_pos(mlc, jaw)
width = pd.Series(np.sum(mean_mlc, axis=1), name="width")
length = pd.Series(jaw[:, 0] + jaw[:, 1], name="length")
centre_x = pd.Series((mean_mlc[:, 0] - mean_mlc[:, 1]) / 2, name="centre_x")
centre_y = pd.Series((jaw[:, 0] - jaw[:, 1]) / 2, name="centre_y")
length.loc[np.isnan(width)] = np.nan
centre_x.loc[np.isnan(width)] = np.nan
centre_y.loc[np.isnan(width)] = np.nan
return width, length, centre_x, centre_y
| 31.67931 | 88 | 0.679112 |
677681d0375668a03814dfad90c791bf6890b1e0 | 1,689 | py | Python | probatus/utils/_utils.py | anilkumarpanda/probatus | a6123b4da664dfc4f182dad1baa00e77decf4789 | [
"MIT"
] | null | null | null | probatus/utils/_utils.py | anilkumarpanda/probatus | a6123b4da664dfc4f182dad1baa00e77decf4789 | [
"MIT"
] | null | null | null | probatus/utils/_utils.py | anilkumarpanda/probatus | a6123b4da664dfc4f182dad1baa00e77decf4789 | [
"MIT"
] | null | null | null | # Copyright (c) 2020 ING Bank N.V.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
def class_name_from_object(obj):
return obj.__class__.__name__
def assure_list_of_strings(variable, variable_name):
if isinstance(variable, list):
return variable
elif isinstance(variable, str):
return [variable]
else:
raise(ValueError('{} needs to be either a string or list of strings.').format(variable_name))
def assure_list_values_allowed(variable, variable_name, allowed_values):
for value in variable:
if value not in allowed_values:
raise(ValueError('Value {} in variable {} is not allowed').format(value, variable_name))
| 45.648649 | 101 | 0.754885 |
1f80d3e36892460129223c17e3cf423efb9d47ab | 652 | py | Python | adlibre_tms/views.py | adlibre/Adlibre-TMS | 4c8de1e4448203fb267d38ec0f4ec9e64d58a21d | [
"BSD-3-Clause"
] | 26 | 2015-01-06T11:09:18.000Z | 2022-03-16T06:20:53.000Z | adlibre_tms/views.py | adlibre/Adlibre-TMS | 4c8de1e4448203fb267d38ec0f4ec9e64d58a21d | [
"BSD-3-Clause"
] | 4 | 2015-02-26T11:00:35.000Z | 2020-06-05T18:02:02.000Z | adlibre_tms/views.py | adlibre/Adlibre-TMS | 4c8de1e4448203fb267d38ec0f4ec9e64d58a21d | [
"BSD-3-Clause"
] | 16 | 2015-02-08T05:24:38.000Z | 2021-06-13T14:45:30.000Z | from django.conf import settings
from django import http
from django.template import Context, RequestContext, loader
def server_error(request, template_name='500.html'):
"""
500 error handler.
Don't try to render full context. Just minimal.
"""
t = loader.get_template(template_name)
c = Context({
'MEDIA_URL': settings.MEDIA_URL,
'STATIC_URL': settings.STATIC_URL,
})
return http.HttpResponseServerError(t.render(c))
def url_error(request, template_name='404.html'):
t = loader.get_template(template_name)
c = RequestContext(request)
return http.HttpResponseNotFound(t.render(c))
| 25.076923 | 59 | 0.703988 |
00564b8eaf6e8bcc5d672ddd1b49b869e8f92b0e | 8,817 | py | Python | src/planning/recordreplay_planner_nodes/test/recordreplay_planner_node.test.py | QS-L-1992/AutowareAuto | f35a22677cbd2309ecae36d52f83f035cd96b8cb | [
"Apache-2.0"
] | 19 | 2021-05-28T06:14:21.000Z | 2022-03-10T10:03:08.000Z | src/planning/recordreplay_planner_nodes/test/recordreplay_planner_node.test.py | QS-L-1992/AutowareAuto | f35a22677cbd2309ecae36d52f83f035cd96b8cb | [
"Apache-2.0"
] | 222 | 2021-10-29T22:00:27.000Z | 2022-03-29T20:56:34.000Z | src/planning/recordreplay_planner_nodes/test/recordreplay_planner_node.test.py | QS-L-1992/AutowareAuto | f35a22677cbd2309ecae36d52f83f035cd96b8cb | [
"Apache-2.0"
] | 14 | 2021-05-29T14:59:17.000Z | 2022-03-10T10:03:09.000Z | # Copyright 2020 Embotech AG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file contains tests for the record and replay behavior of the containing node.
# I'll start by conceptually documenting the tests I want to add, then implement them.
import unittest
import ament_index_python
import launch
import launch_ros.actions
import launch_testing
import rclpy
from rclpy.node import Node
from rclpy.action import ActionClient
from autoware_auto_msgs.action import RecordTrajectory, ReplayTrajectory
from autoware_auto_msgs.msg import VehicleKinematicState, Trajectory
import subprocess
import time
# Class to publish some dummy states
class MockStatePublisher(Node):
def __init__(self):
super().__init__("MockStatePublisher")
self.publisher_ = self.create_publisher(
VehicleKinematicState, "vehicle_kinematic_state", 10
)
def publish_a_state(self):
msg = VehicleKinematicState()
self.publisher_.publish(msg)
self.get_logger().info("Publishing ego state...")
# Class to listen for trajectories being published and count them
class MockTrajectoryMonitor(Node):
def __init__(self):
super().__init__("MockTrajectoryMonitor")
self.subscription_ = self.create_subscription(
Trajectory, "trajectory", self.listener_callback, 10
)
self.trajectories_seen = 0
def listener_callback(self, msg):
self.get_logger().info('Received: "{}"'.format(msg))
self.trajectories_seen += 1
class MockActionCaller(Node):
def __init__(self, node_name, action_type, action_name):
super().__init__(node_name)
self._action_client = ActionClient(self, action_type, action_name)
self.action_type = action_type
def cancel_done(self, future):
cancel_response = future.result()
if len(cancel_response.goals_canceling) > 0:
self.get_logger().info("Goal successfully canceled")
else:
self.get_logger().info("Goal failed to cancel")
def goal_response_callback(self, future):
goal_handle = future.result()
if not goal_handle.accepted:
self.get_logger().info("Goal rejected :(")
return
self.get_logger().info("Goal accepted :)")
self._goal_handle = goal_handle
def feedback_callback(self, feedback):
self.get_logger().info(
"Received feedback: {0}".format(feedback.feedback.sequence)
)
def manual_cancel(self):
self.get_logger().info("Canceling goal")
cancel_future = self._goal_handle.cancel_goal_async()
cancel_future.add_done_callback(self.cancel_done)
return cancel_future
def send_goal(self):
self.get_logger().info("Waiting for action server...")
self._action_client.wait_for_server()
self.get_logger().info("Sending goal request...")
goal_msg = self.action_type.Goal()
send_goal_future = self._action_client.send_goal_async(
goal_msg, feedback_callback=self.feedback_callback
)
send_goal_future.add_done_callback(self.goal_response_callback)
return send_goal_future
def generate_test_description():
test_nodes = launch_ros.actions.Node(
package="recordreplay_planner_nodes",
executable="recordreplay_planner_node_exe",
name="recordreplay_planner",
parameters=[
"{}/defaults.param.yaml".format(
ament_index_python.get_package_share_directory(
"recordreplay_planner_nodes"
)
)
],
)
# integration test
ld = launch.LaunchDescription(
[
test_nodes,
launch_testing.actions.ReadyToTest(),
]
)
# An array of all the checkers to be enumerated by the tests
return ld
def helper_pub_topic_one(name: str, msgtype: str):
return subprocess.run(["ros2", "topic", "pub", name, msgtype, "-1"])
# Test: Check if "happy case" of recording, then replaying works
class TestBasicUsage(unittest.TestCase):
def test_happy_case_works(self):
# ---- Recording
# - Start recordreplay_planner_node exe (done in launch description)
# - Start a recording action by sending a goal
mock_record_action_caller = MockActionCaller(
"MockRecordCaller", RecordTrajectory, "recordtrajectory"
)
# - Send goal, then wait for the goal sending to complete
send_goal_future = mock_record_action_caller.send_goal()
rclpy.spin_until_future_complete(mock_record_action_caller, send_goal_future)
rclpy.spin_once(mock_record_action_caller, timeout_sec=2) # apparently needed
# - Publish a few VehicleKinematicState messages
mock_publisher = MockStatePublisher()
for k in range(3):
time.sleep(0.1)
mock_publisher.publish_a_state() # FIXME(s.me) This does not appear to get seen
helper_pub_topic_one( # This does get seen
"/vehicle_kinematic_state",
"autoware_auto_msgs/msg/VehicleKinematicState",
)
# - Cancel recording action, then wait for the cancel action to complete
cancel_future = mock_record_action_caller.manual_cancel()
rclpy.spin_until_future_complete(mock_record_action_caller, cancel_future)
rclpy.spin_once(mock_record_action_caller, timeout_sec=2) # apparently needed
# ---- Replaying
# - Listen on the specified trajectory topic, storing to memory
mock_listener = MockTrajectoryMonitor()
# - Send it a "start replaying" action request
mock_replay_action_caller = MockActionCaller(
"MockReplayCaller", ReplayTrajectory, "replaytrajectory"
)
# - Send goal, wait for the goal sending to complete
send_goal_future = mock_replay_action_caller.send_goal()
rclpy.spin_until_future_complete(mock_replay_action_caller, send_goal_future)
rclpy.spin_once(mock_replay_action_caller, timeout_sec=2) # apparently needed
# - Publish a few VehicleKinematicState messages
for k in range(3):
time.sleep(0.1)
# FIXME(s.me) This does not appear to get seen by recordreplay node
mock_publisher.publish_a_state()
helper_pub_topic_one( # This does get seen
"/vehicle_kinematic_state",
"autoware_auto_msgs/msg/VehicleKinematicState",
)
time.sleep(0.1)
# Spin the recording a couple of times, otherwise it'll not reliably process the
# trajectory. FIXME(s.me) this has to be done more systematically, by for example
# having the listener in the launch description itself so it just spins by itself.
for kk in range(3):
rclpy.spin_once(mock_listener, timeout_sec=0.2)
# - Cancel replaying action, then wait for the cancellation to complete
cancel_future = mock_replay_action_caller.manual_cancel()
rclpy.spin_until_future_complete(mock_replay_action_caller, cancel_future)
rclpy.spin_once(mock_replay_action_caller, timeout_sec=2) # apparently needed
# - Verify that the replayed trajectories behaved as expected
# TODO(s.me): Make the mock_listener record what it sees and verify it matches
# expectations.
self.assertEqual(mock_listener.trajectories_seen, 3)
# TODO(s.me): Test: Check if an additional record action is rejected if one is already running
# - Start recordreplay_planner_node exe
# - Use ros2 commandline to send it a "start recording" action request
# - Attempt to start a second start, verify this is rejected
# TODO(s.me): Test: Check if an additional replay action is rejected if one is already running
# - Start recordreplay_planner_node exe
# - Record a bit of trajectory like in happy case test
# - Use ros2 commandline to send it a "start replaying" action request
# - Attempt to start a second start, verify this is rejected
# TODO(s.me): Test: Check if replay stops when the trajectory being put out becomes empty.
# This is not implemented in the actual code yet - maybe not stopping but just giving out
# the last one-state trajectory is a better idea.
| 39.186667 | 94 | 0.692639 |
066d6487508f081b22df184285c42ef0ad37c7ab | 1,110 | py | Python | chatBOT-Final/chat_app/venv/lib/python3.6/site-packages/py2neo/packages/neo4j/__init__.py | ashokupd81/Django-Chatbot | 0d199390b22b294830c1a68ad270c688517e7b11 | [
"MIT"
] | 2 | 2019-01-31T21:01:36.000Z | 2022-02-23T01:22:01.000Z | chatBOT-Final/chat_app/venv/lib/python3.6/site-packages/py2neo/packages/neo4j/__init__.py | ashokupd81/Django-Chatbot | 0d199390b22b294830c1a68ad270c688517e7b11 | [
"MIT"
] | 3 | 2019-01-31T10:30:39.000Z | 2019-01-31T10:31:46.000Z | chatBOT-Final/chat_app/venv/lib/python3.6/site-packages/py2neo/packages/neo4j/__init__.py | ashokupd81/Django-Chatbot | 0d199390b22b294830c1a68ad270c688517e7b11 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) 2002-2016 "Neo Technology,"
# Network Engine for Objects in Lund AB [http://neotechnology.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .meta import version as __version__
# Export current (v1) API. This should be updated to export the latest
# version of the API when a new one is added. This gives the option to
# `import neo4j.vX` for a specific version or `import neo4j` for the
# latest.
from .v1.constants import *
from .v1.exceptions import *
from .v1.session import *
from .v1.types import *
| 34.6875 | 74 | 0.741441 |
ed9e3bb2dbe1be6fad5cf54dd8fbcaa73f064c47 | 22,615 | py | Python | distributed/deploy/tests/test_local.py | jjerphan/distributed | 0ce8f2bcb84d306e9a095d75497857dce30145b5 | [
"BSD-3-Clause"
] | null | null | null | distributed/deploy/tests/test_local.py | jjerphan/distributed | 0ce8f2bcb84d306e9a095d75497857dce30145b5 | [
"BSD-3-Clause"
] | null | null | null | distributed/deploy/tests/test_local.py | jjerphan/distributed | 0ce8f2bcb84d306e9a095d75497857dce30145b5 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function, division, absolute_import
from functools import partial
import gc
import subprocess
import sys
from time import sleep
from threading import Lock
import unittest
import weakref
from tornado.ioloop import IOLoop
from tornado import gen
import pytest
from distributed import Client, Worker, Nanny
from distributed.deploy.local import LocalCluster, nprocesses_nthreads
from distributed.metrics import time
from distributed.utils_test import (
inc,
gen_test,
slowinc,
assert_cannot_connect,
assert_can_connect_locally_4,
assert_can_connect_from_everywhere_4,
assert_can_connect_from_everywhere_4_6,
captured_logger,
)
from distributed.utils_test import loop # noqa: F401
from distributed.utils import sync
from distributed.worker import TOTAL_MEMORY
from distributed.deploy.utils_test import ClusterTest
def test_simple(loop):
with LocalCluster(
4,
scheduler_port=0,
processes=False,
silence_logs=False,
dashboard_address=None,
loop=loop,
) as c:
with Client(c) as e:
x = e.submit(inc, 1)
x.result()
assert x.key in c.scheduler.tasks
assert any(w.data == {x.key: 2} for w in c.workers)
assert e.loop is c.loop
def test_local_cluster_supports_blocked_handlers(loop):
with LocalCluster(blocked_handlers=["run_function"], n_workers=0, loop=loop) as c:
with Client(c) as client:
with pytest.raises(ValueError) as exc:
client.run_on_scheduler(lambda x: x, 42)
assert "'run_function' handler has been explicitly disallowed in Scheduler" in str(
exc.value
)
@pytest.mark.skipif("sys.version_info[0] == 2", reason="fork issues")
def test_close_twice():
with LocalCluster() as cluster:
with Client(cluster.scheduler_address) as client:
f = client.map(inc, range(100))
client.gather(f)
with captured_logger("tornado.application") as log:
cluster.close()
cluster.close()
sleep(0.5)
log = log.getvalue()
assert not log
@pytest.mark.skipif("sys.version_info[0] == 2", reason="multi-loop")
def test_procs():
with LocalCluster(
2,
scheduler_port=0,
processes=False,
threads_per_worker=3,
dashboard_address=None,
silence_logs=False,
) as c:
assert len(c.workers) == 2
assert all(isinstance(w, Worker) for w in c.workers)
with Client(c.scheduler.address) as e:
assert all(w.ncores == 3 for w in c.workers)
assert all(isinstance(w, Worker) for w in c.workers)
repr(c)
with LocalCluster(
2,
scheduler_port=0,
processes=True,
threads_per_worker=3,
dashboard_address=None,
silence_logs=False,
) as c:
assert len(c.workers) == 2
assert all(isinstance(w, Nanny) for w in c.workers)
with Client(c.scheduler.address) as e:
assert all(v == 3 for v in e.ncores().values())
c.start_worker()
assert all(isinstance(w, Nanny) for w in c.workers)
repr(c)
def test_move_unserializable_data():
"""
Test that unserializable data is still fine to transfer over inproc
transports.
"""
with LocalCluster(
processes=False, silence_logs=False, dashboard_address=None
) as cluster:
assert cluster.scheduler_address.startswith("inproc://")
assert cluster.workers[0].address.startswith("inproc://")
with Client(cluster) as client:
lock = Lock()
x = client.scatter(lock)
y = client.submit(lambda x: x, x)
assert y.result() is lock
def test_transports_inproc():
"""
Test the transport chosen by LocalCluster depending on arguments.
"""
with LocalCluster(
1, processes=False, silence_logs=False, dashboard_address=None
) as c:
assert c.scheduler_address.startswith("inproc://")
assert c.workers[0].address.startswith("inproc://")
with Client(c.scheduler.address) as e:
assert e.submit(inc, 4).result() == 5
def test_transports_tcp():
# Have nannies => need TCP
with LocalCluster(
1, processes=True, silence_logs=False, dashboard_address=None
) as c:
assert c.scheduler_address.startswith("tcp://")
assert c.workers[0].address.startswith("tcp://")
with Client(c.scheduler.address) as e:
assert e.submit(inc, 4).result() == 5
def test_transports_tcp_port():
# Scheduler port specified => need TCP
with LocalCluster(
1,
processes=False,
scheduler_port=8786,
silence_logs=False,
dashboard_address=None,
) as c:
assert c.scheduler_address == "tcp://127.0.0.1:8786"
assert c.workers[0].address.startswith("tcp://")
with Client(c.scheduler.address) as e:
assert e.submit(inc, 4).result() == 5
@pytest.mark.skipif("sys.version_info[0] == 2", reason="")
class LocalTest(ClusterTest, unittest.TestCase):
Cluster = partial(LocalCluster, silence_logs=False, dashboard_address=None)
kwargs = {"dashboard_address": None}
@pytest.mark.skipif("sys.version_info[0] == 2", reason="")
def test_Client_with_local(loop):
with LocalCluster(
1, scheduler_port=0, silence_logs=False, dashboard_address=None, loop=loop
) as c:
with Client(c) as e:
assert len(e.ncores()) == len(c.workers)
assert c.scheduler_address in repr(c)
def test_Client_solo(loop):
with Client(loop=loop, silence_logs=False) as c:
pass
assert c.cluster.status == "closed"
@gen_test()
def test_duplicate_clients():
pytest.importorskip("bokeh")
c1 = yield Client(processes=False, silence_logs=False, dashboard_address=9876)
with pytest.warns(Exception) as info:
c2 = yield Client(processes=False, silence_logs=False, dashboard_address=9876)
assert "bokeh" in c1.cluster.scheduler.services
assert "bokeh" in c2.cluster.scheduler.services
assert any(
all(
word in str(msg.message).lower()
for word in ["9876", "running", "already in use"]
)
for msg in info.list
)
yield c1.close()
def test_Client_kwargs(loop):
with Client(loop=loop, processes=False, n_workers=2, silence_logs=False) as c:
assert len(c.cluster.workers) == 2
assert all(isinstance(w, Worker) for w in c.cluster.workers)
assert c.cluster.status == "closed"
def test_Client_twice(loop):
with Client(loop=loop, silence_logs=False, dashboard_address=None) as c:
with Client(loop=loop, silence_logs=False, dashboard_address=None) as f:
assert c.cluster.scheduler.port != f.cluster.scheduler.port
@pytest.mark.skipif("sys.version_info[0] == 2", reason="fork issues")
def test_defaults():
from distributed.worker import _ncores
with LocalCluster(
scheduler_port=0, silence_logs=False, dashboard_address=None
) as c:
assert sum(w.ncores for w in c.workers) == _ncores
assert all(isinstance(w, Nanny) for w in c.workers)
with LocalCluster(
processes=False, scheduler_port=0, silence_logs=False, dashboard_address=None
) as c:
assert sum(w.ncores for w in c.workers) == _ncores
assert all(isinstance(w, Worker) for w in c.workers)
assert len(c.workers) == 1
with LocalCluster(
n_workers=2, scheduler_port=0, silence_logs=False, dashboard_address=None
) as c:
if _ncores % 2 == 0:
expected_total_threads = max(2, _ncores)
else:
# n_workers not a divisor of _ncores => threads are overcommitted
expected_total_threads = max(2, _ncores + 1)
assert sum(w.ncores for w in c.workers) == expected_total_threads
with LocalCluster(
threads_per_worker=_ncores * 2,
scheduler_port=0,
silence_logs=False,
dashboard_address=None,
) as c:
assert len(c.workers) == 1
with LocalCluster(
n_workers=_ncores * 2,
scheduler_port=0,
silence_logs=False,
dashboard_address=None,
) as c:
assert all(w.ncores == 1 for w in c.workers)
with LocalCluster(
threads_per_worker=2,
n_workers=3,
scheduler_port=0,
silence_logs=False,
dashboard_address=None,
) as c:
assert len(c.workers) == 3
assert all(w.ncores == 2 for w in c.workers)
def test_worker_params():
with LocalCluster(
n_workers=2,
scheduler_port=0,
silence_logs=False,
dashboard_address=None,
memory_limit=500,
) as c:
assert [w.memory_limit for w in c.workers] == [500] * 2
def test_memory_limit_none():
with LocalCluster(
n_workers=2,
scheduler_port=0,
silence_logs=False,
processes=False,
dashboard_address=None,
memory_limit=None,
) as c:
w = c.workers[0]
assert type(w.data) is dict
assert w.memory_limit is None
def test_cleanup():
c = LocalCluster(2, scheduler_port=0, silence_logs=False, dashboard_address=None)
port = c.scheduler.port
c.close()
c2 = LocalCluster(
2, scheduler_port=port, silence_logs=False, dashboard_address=None
)
c.close()
def test_repeated():
with LocalCluster(
0, scheduler_port=8448, silence_logs=False, dashboard_address=None
) as c:
pass
with LocalCluster(
0, scheduler_port=8448, silence_logs=False, dashboard_address=None
) as c:
pass
@pytest.mark.parametrize("processes", [True, False])
def test_bokeh(loop, processes):
pytest.importorskip("bokeh")
requests = pytest.importorskip("requests")
with LocalCluster(
n_workers=0,
scheduler_port=0,
silence_logs=False,
loop=loop,
processes=processes,
dashboard_address=0,
) as c:
bokeh_port = c.scheduler.services["bokeh"].port
url = "http://127.0.0.1:%d/status/" % bokeh_port
start = time()
while True:
response = requests.get(url)
if response.ok:
break
assert time() < start + 20
sleep(0.01)
# 'localhost' also works
response = requests.get("http://localhost:%d/status/" % bokeh_port)
assert response.ok
with pytest.raises(requests.RequestException):
requests.get(url, timeout=0.2)
@pytest.mark.skipif(sys.version_info < (3, 6), reason="Unknown")
def test_blocks_until_full(loop):
with Client(loop=loop) as c:
assert len(c.ncores()) > 0
@gen_test()
def test_scale_up_and_down():
loop = IOLoop.current()
cluster = yield LocalCluster(
0,
scheduler_port=0,
processes=False,
silence_logs=False,
dashboard_address=None,
loop=loop,
asynchronous=True,
)
c = yield Client(cluster, asynchronous=True)
assert not cluster.workers
yield cluster.scale_up(2)
assert len(cluster.workers) == 2
assert len(cluster.scheduler.ncores) == 2
addr = cluster.workers[0].address
yield cluster.scale_down([addr])
assert len(cluster.workers) == 1
assert addr not in cluster.scheduler.ncores
yield c.close()
yield cluster.close()
def test_silent_startup():
code = """if 1:
from time import sleep
from distributed import LocalCluster
with LocalCluster(1, dashboard_address=None, scheduler_port=0):
sleep(1.5)
"""
out = subprocess.check_output(
[sys.executable, "-Wi", "-c", code], stderr=subprocess.STDOUT
)
out = out.decode()
try:
assert not out
except AssertionError:
print("=== Cluster stdout / stderr ===")
print(out)
raise
def test_only_local_access(loop):
with LocalCluster(
0, scheduler_port=0, silence_logs=False, dashboard_address=None, loop=loop
) as c:
sync(loop, assert_can_connect_locally_4, c.scheduler.port)
def test_remote_access(loop):
with LocalCluster(
0,
scheduler_port=0,
silence_logs=False,
dashboard_address=None,
host="",
loop=loop,
) as c:
sync(loop, assert_can_connect_from_everywhere_4_6, c.scheduler.port)
@pytest.mark.parametrize("n_workers", [None, 3])
def test_memory(loop, n_workers):
with LocalCluster(
n_workers=n_workers,
scheduler_port=0,
processes=False,
silence_logs=False,
dashboard_address=None,
loop=loop,
) as cluster:
assert sum(w.memory_limit for w in cluster.workers) <= TOTAL_MEMORY
@pytest.mark.parametrize("n_workers", [None, 3])
def test_memory_nanny(loop, n_workers):
with LocalCluster(
n_workers=n_workers,
scheduler_port=0,
processes=True,
silence_logs=False,
dashboard_address=None,
loop=loop,
) as cluster:
with Client(cluster.scheduler_address, loop=loop) as c:
info = c.scheduler_info()
assert (
sum(w["memory_limit"] for w in info["workers"].values()) <= TOTAL_MEMORY
)
def test_death_timeout_raises(loop):
with pytest.raises(gen.TimeoutError):
with LocalCluster(
scheduler_port=0,
silence_logs=False,
death_timeout=1e-10,
dashboard_address=None,
loop=loop,
) as cluster:
pass
@pytest.mark.skipif(sys.version_info < (3, 6), reason="Unknown")
def test_bokeh_kwargs(loop):
pytest.importorskip("bokeh")
with LocalCluster(
n_workers=0,
scheduler_port=0,
silence_logs=False,
loop=loop,
dashboard_address=0,
service_kwargs={"bokeh": {"prefix": "/foo"}},
) as c:
bs = c.scheduler.services["bokeh"]
assert bs.prefix == "/foo"
def test_io_loop_periodic_callbacks(loop):
with LocalCluster(loop=loop, silence_logs=False) as cluster:
assert cluster.scheduler.loop is loop
for pc in cluster.scheduler.periodic_callbacks.values():
assert pc.io_loop is loop
for worker in cluster.workers:
for pc in worker.periodic_callbacks.values():
assert pc.io_loop is loop
def test_logging():
"""
Workers and scheduler have logs even when silenced
"""
with LocalCluster(1, processes=False, dashboard_address=None) as c:
assert c.scheduler._deque_handler.deque
assert c.workers[0]._deque_handler.deque
def test_ipywidgets(loop):
ipywidgets = pytest.importorskip("ipywidgets")
with LocalCluster(
n_workers=0,
scheduler_port=0,
silence_logs=False,
loop=loop,
dashboard_address=False,
processes=False,
) as cluster:
cluster._ipython_display_()
box = cluster._cached_widget
assert isinstance(box, ipywidgets.Widget)
def test_scale(loop):
""" Directly calling scale both up and down works as expected """
with LocalCluster(
scheduler_port=0,
silence_logs=False,
loop=loop,
dashboard_address=False,
processes=False,
n_workers=0,
) as cluster:
assert not cluster.scheduler.workers
cluster.scale(3)
start = time()
while len(cluster.scheduler.workers) != 3:
sleep(0.01)
assert time() < start + 5, len(cluster.scheduler.workers)
sleep(0.2) # let workers settle # TODO: remove need for this
cluster.scale(2)
start = time()
while len(cluster.scheduler.workers) != 2:
sleep(0.01)
assert time() < start + 5, len(cluster.scheduler.workers)
def test_adapt(loop):
with LocalCluster(
scheduler_port=0,
silence_logs=False,
loop=loop,
dashboard_address=False,
processes=False,
n_workers=0,
) as cluster:
cluster.adapt(minimum=0, maximum=2, interval="10ms")
assert cluster._adaptive.minimum == 0
assert cluster._adaptive.maximum == 2
ref = weakref.ref(cluster._adaptive)
cluster.adapt(minimum=1, maximum=2, interval="10ms")
assert cluster._adaptive.minimum == 1
gc.collect()
# the old Adaptive class sticks around, not sure why
# start = time()
# while ref():
# sleep(0.01)
# gc.collect()
# assert time() < start + 5
start = time()
while len(cluster.scheduler.workers) != 1:
sleep(0.01)
assert time() < start + 5
def test_adapt_then_manual(loop):
""" We can revert from adaptive, back to manual """
with LocalCluster(
scheduler_port=0,
silence_logs=False,
loop=loop,
dashboard_address=False,
processes=False,
n_workers=8,
) as cluster:
sleep(0.1)
cluster.adapt(minimum=0, maximum=4, interval="10ms")
start = time()
while cluster.scheduler.workers or cluster.workers:
sleep(0.1)
assert time() < start + 5
assert not cluster.workers
with Client(cluster) as client:
futures = client.map(slowinc, range(1000), delay=0.1)
sleep(0.2)
cluster._adaptive.stop()
sleep(0.2)
cluster.scale(2)
start = time()
while len(cluster.scheduler.workers) != 2:
sleep(0.1)
assert time() < start + 5
def test_local_tls(loop):
from distributed.utils_test import tls_only_security
security = tls_only_security()
with LocalCluster(
n_workers=0,
scheduler_port=8786,
silence_logs=False,
security=security,
dashboard_address=False,
host="tls://0.0.0.0",
loop=loop,
) as c:
sync(
loop,
assert_can_connect_from_everywhere_4,
c.scheduler.port,
connection_args=security.get_connection_args("client"),
protocol="tls",
timeout=3,
)
# If we connect to a TLS localculster without ssl information we should fail
sync(
loop,
assert_cannot_connect,
addr="tcp://127.0.0.1:%d" % c.scheduler.port,
connection_args=security.get_connection_args("client"),
exception_class=RuntimeError,
)
@gen_test()
def test_scale_retires_workers():
class MyCluster(LocalCluster):
def scale_down(self, *args, **kwargs):
pass
loop = IOLoop.current()
cluster = yield MyCluster(
0,
scheduler_port=0,
processes=False,
silence_logs=False,
dashboard_address=None,
loop=loop,
asynchronous=True,
)
c = yield Client(cluster, asynchronous=True)
assert not cluster.workers
yield cluster.scale(2)
start = time()
while len(cluster.scheduler.workers) != 2:
yield gen.sleep(0.01)
assert time() < start + 3
yield cluster.scale(1)
start = time()
while len(cluster.scheduler.workers) != 1:
yield gen.sleep(0.01)
assert time() < start + 3
yield c.close()
yield cluster.close()
def test_local_tls_restart(loop):
from distributed.utils_test import tls_only_security
security = tls_only_security()
with LocalCluster(
n_workers=1,
scheduler_port=8786,
silence_logs=False,
security=security,
dashboard_address=False,
host="tls://0.0.0.0",
loop=loop,
) as c:
with Client(c.scheduler.address, loop=loop, security=security) as client:
print(c.workers, c.workers[0].address)
workers_before = set(client.scheduler_info()["workers"])
assert client.submit(inc, 1).result() == 2
client.restart()
workers_after = set(client.scheduler_info()["workers"])
assert client.submit(inc, 2).result() == 3
assert workers_before != workers_after
def test_default_process_thread_breakdown():
assert nprocesses_nthreads(1) == (1, 1)
assert nprocesses_nthreads(4) == (4, 1)
assert nprocesses_nthreads(5) == (5, 1)
assert nprocesses_nthreads(8) == (4, 2)
assert nprocesses_nthreads(12) in ((6, 2), (4, 3))
assert nprocesses_nthreads(20) == (5, 4)
assert nprocesses_nthreads(24) in ((6, 4), (8, 3))
assert nprocesses_nthreads(32) == (8, 4)
assert nprocesses_nthreads(40) in ((8, 5), (10, 4))
assert nprocesses_nthreads(80) in ((10, 8), (16, 5))
def test_asynchronous_property(loop):
with LocalCluster(
4,
scheduler_port=0,
processes=False,
silence_logs=False,
dashboard_address=None,
loop=loop,
) as cluster:
@gen.coroutine
def _():
assert cluster.asynchronous
cluster.sync(_)
def test_protocol_inproc(loop):
with LocalCluster(protocol="inproc://", loop=loop, processes=False) as cluster:
assert cluster.scheduler.address.startswith("inproc://")
def test_protocol_tcp(loop):
with LocalCluster(
protocol="tcp", loop=loop, n_workers=0, processes=False
) as cluster:
assert cluster.scheduler.address.startswith("tcp://")
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
def test_protocol_ip(loop):
with LocalCluster(
host="tcp://127.0.0.2", loop=loop, n_workers=0, processes=False
) as cluster:
assert cluster.scheduler.address.startswith("tcp://127.0.0.2")
class MyWorker(Worker):
pass
def test_worker_class_worker(loop):
with LocalCluster(
n_workers=2,
loop=loop,
worker_class=MyWorker,
processes=False,
scheduler_port=0,
dashboard_address=None,
) as cluster:
assert all(isinstance(w, MyWorker) for w in cluster.workers)
def test_worker_class_nanny(loop):
class MyNanny(Nanny):
pass
with LocalCluster(
n_workers=2,
loop=loop,
worker_class=MyNanny,
scheduler_port=0,
dashboard_address=None,
) as cluster:
assert all(isinstance(w, MyNanny) for w in cluster.workers)
if sys.version_info >= (3, 5):
from distributed.deploy.tests.py3_test_deploy import * # noqa F401
| 28.482368 | 88 | 0.624983 |
b500d10a6555275dc0645e410660fb9d4898d109 | 3,513 | py | Python | ucsmsdk/mometa/policy/PolicyControlledInstance.py | thinkitdata/ucsmsdk | da6599e1dbc1207a30eabe548a7e5791af5f476b | [
"Apache-2.0"
] | null | null | null | ucsmsdk/mometa/policy/PolicyControlledInstance.py | thinkitdata/ucsmsdk | da6599e1dbc1207a30eabe548a7e5791af5f476b | [
"Apache-2.0"
] | null | null | null | ucsmsdk/mometa/policy/PolicyControlledInstance.py | thinkitdata/ucsmsdk | da6599e1dbc1207a30eabe548a7e5791af5f476b | [
"Apache-2.0"
] | null | null | null | """This module contains the general information for PolicyControlledInstance ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class PolicyControlledInstanceConsts:
RESOLVE_TYPE_NAME = "name"
RESOLVE_TYPE_RN = "rn"
class PolicyControlledInstance(ManagedObject):
"""This is PolicyControlledInstance class."""
consts = PolicyControlledInstanceConsts()
naming_props = set([u'type', u'name'])
mo_meta = MoMeta("PolicyControlledInstance", "policyControlledInstance", "ctrlled-[type]-inst-[name]", VersionMeta.Version211a, "InputOutput", 0x1ff, [], ["admin"], [u'policyCommunication', u'policyConfigBackup', u'policyDateTime', u'policyDiscovery', u'policyDns', u'policyEquipment', u'policyFault', u'policyInfraFirmware', u'policyMEp', u'policyMonitoring', u'policyPortConfig', u'policyPowerMgmt', u'policyPsu', u'policySecurity', u'policyStorageAutoConfig'], [], [None])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version211a, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"def_dn": MoPropertyMeta("def_dn", "defDn", "string", VersionMeta.Version211a, MoPropertyMeta.CREATE_ONLY, 0x4, 0, 256, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"external_resolve_name": MoPropertyMeta("external_resolve_name", "externalResolveName", "string", VersionMeta.Version211a, MoPropertyMeta.CREATE_ONLY, 0x10, 0, 510, None, [], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version211a, MoPropertyMeta.NAMING, 0x20, 1, 510, None, [], []),
"resolve_type": MoPropertyMeta("resolve_type", "resolveType", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["name", "rn"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, 0x40, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version211a, MoPropertyMeta.READ_WRITE, 0x80, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"type": MoPropertyMeta("type", "type", "string", VersionMeta.Version211a, MoPropertyMeta.NAMING, 0x100, 1, 510, None, [], []),
}
prop_map = {
"childAction": "child_action",
"defDn": "def_dn",
"dn": "dn",
"externalResolveName": "external_resolve_name",
"name": "name",
"resolveType": "resolve_type",
"rn": "rn",
"sacl": "sacl",
"status": "status",
"type": "type",
}
def __init__(self, parent_mo_or_dn, type, name, **kwargs):
self._dirty_mask = 0
self.type = type
self.name = name
self.child_action = None
self.def_dn = None
self.external_resolve_name = None
self.resolve_type = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "PolicyControlledInstance", parent_mo_or_dn, **kwargs)
| 59.542373 | 479 | 0.66809 |
5ae499b859391390ed5e07585317e4d6fa07939f | 300 | py | Python | paraVerComoFuncionaAlgumasCoisas/alura/1-python3IntroOrientacaoObjetos/cliente.py | jonasht/pythonEstudos | 5e7d28e7bd82b9d1b08e795867fdbaa743f4b747 | [
"MIT"
] | null | null | null | paraVerComoFuncionaAlgumasCoisas/alura/1-python3IntroOrientacaoObjetos/cliente.py | jonasht/pythonEstudos | 5e7d28e7bd82b9d1b08e795867fdbaa743f4b747 | [
"MIT"
] | null | null | null | paraVerComoFuncionaAlgumasCoisas/alura/1-python3IntroOrientacaoObjetos/cliente.py | jonasht/pythonEstudos | 5e7d28e7bd82b9d1b08e795867fdbaa743f4b747 | [
"MIT"
] | null | null | null | class Cliente:
def __init__(self, nome) -> None:
self.__nome = nome
@property
def nome(self):
print('chamando property')
return self.__nome.title()
@nome.setter
def nome(self, novoNome):
print('chamando setter')
self.__nome = novoNome | 23.076923 | 37 | 0.586667 |
eed52d0d905b8fc11f24e0c03a36812f9219377c | 6,603 | py | Python | ode4jax/_src/ODEBase/controllers/controllers.py | PhilipVinc/netket_dynamics | 6e8009098c279271cb0f289ba9e85c039bb284e4 | [
"Apache-2.0"
] | 2 | 2021-10-02T20:29:44.000Z | 2021-10-02T20:38:28.000Z | ode4jax/_src/ODEBase/controllers/controllers.py | PhilipVinc/netket_dynamics | 6e8009098c279271cb0f289ba9e85c039bb284e4 | [
"Apache-2.0"
] | 11 | 2021-10-01T09:15:06.000Z | 2022-03-21T09:19:23.000Z | ode4jax/_src/ODEBase/controllers/controllers.py | PhilipVinc/netket_dynamics | 6e8009098c279271cb0f289ba9e85c039bb284e4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The NetKet Authors - All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable, Optional, Tuple, Type
from numbers import Number
from plum import dispatch
from builtins import RuntimeError, next
import dataclasses
from functools import partial
import jax
import jax.numpy as jnp
import numpy as np
from netket.utils import struct
from netket.utils.types import Array, PyTree
from ode4jax.base import AbstractIntegrator, AbstractSolution, alg_cache
from ..problem import ODEProblem
from ..solution import ODESolution
from ..options import DEOptions
from ..utils import strong_dtype
from .base import AbstractController
dtype = jnp.float64
# Standard integral (I) step size controller
class IController(AbstractController):
"""
IController()
The standard (integral) controller is the most basic step size controller.
This controller is usually the first one introduced in numerical analysis classes
but should only be used rarely in practice because of efficiency problems for
many problems/algorithms.
Construct an integral (I) step size controller adapting the time step
based on the formula
```
Δtₙ₊₁ = εₙ₊₁^(1/k) * Δtₙ
```
where `k = get_current_adaptive_order(alg, integrator.cache) + 1` and `εᵢ` is the
inverse of the error estimate `integrator.EEst` scaled by the tolerance
(Hairer, Nørsett, Wanner, 2008, Section II.4).
The step size factor is multiplied by the safety factor `gamma` and clipped to
the interval `[qmin, qmax]`.
A step will be accepted whenever the estimated error `integrator.EEst` is
less than or equal to unity. Otherwise, the step is rejected and re-tried with
the predicted step size.
## References
- Hairer, Nørsett, Wanner (2008)
Solving Ordinary Differential Equations I Nonstiff Problems
[DOI: 10.1007/978-3-540-78862-1](https://doi.org/10.1007/978-3-540-78862-1)
"""
@dispatch
def stepsize_controller(self, integrator, alg):
""" """
qmin = integrator.opts.qmin
qmax = integrator.opts.qmax
gamma = integrator.opts.gamma
EEst = integrator.EEst
expo = 1 / (get_current_adaptive_order(alg, integrator.cache) + 1)
qtmp = EEst ** expo / gamma
q = jnp.maximum(1 / qmax, jnp.minimum(1 / qmin, qtmp))
integrator.qold = integrator.dt / q
return q
@dispatch
def accept_step_controller(self, integrator):
"""
Checks whever the controller should accept a step based on the current
error estimate
"""
return integrator.EEst <= 1
@dispatch
def step_accept_controller(self, integrator, alg, q):
qsteady_min = integrator.opts.qsteady_min
qsteady_max = integrator.opts.qsteady_max
isok = (qsteady_min <= q) & (q <= qsteady_max)
q = jnp.where(isok, jnp.ones_like(q), q)
return integrator.dt / q
@dispatch
def step_reject_controller(self, integrator, alg):
integrator.dt = integrator.qold
# Standard integral (I) step size controller
@struct.dataclass
class PIController(AbstractController):
"""
PIController(beta1, beta2)
The proportional-integral (PI) controller is a widespread step size controller
with improved stability properties compared to the [`IController`](@ref).
This controller is the default for most algorithms in OrdinaryDiffEq.jl.
Construct a PI step size controller adapting the time step based on the formula
```
Δtₙ₊₁ = εₙ₊₁^β₁ * εₙ^β₂ * Δtₙ
```
where `εᵢ` are inverses of the error estimates scaled by the tolerance
(Hairer, Nørsett, Wanner, 2010, Section IV.2).
The step size factor is multiplied by the safety factor `gamma` and clipped to
the interval `[qmin, qmax]`.
A step will be accepted whenever the estimated error `integrator.EEst` is
less than or equal to unity. Otherwise, the step is rejected and re-tried with
the predicted step size.
!!! note
The coefficients `beta1, beta2` are not scaled by the order of the method,
in contrast to the [`PIDController`](@ref). For the `PIController`, this
scaling by the order must be done when the controller is constructed.
## References
- Hairer, Nørsett, Wanner (2010)
Solving Ordinary Differential Equations II Stiff and Differential-Algebraic Problems
[DOI: 10.1007/978-3-642-05221-7](https://doi.org/10.1007/978-3-642-05221-7)
- Hairer, Nørsett, Wanner (2008)
Solving Ordinary Differential Equations I Nonstiff Problems
[DOI: 10.1007/978-3-540-78862-1](https://doi.org/10.1007/978-3-540-78862-1)
"""
beta1: float
beta2: float
@dispatch
def stepsize_controller(self, integrator, alg):
""" """
qmin = integrator.opts.qmin
qmax = integrator.opts.qmax
gamma = integrator.opts.gamma
qold = integrator.qold
EEst = integrator.EEst
q11 = EEst ** self.beta1
q = q11 / qold ** self.beta2
integrator.q11 = q11
q = jnp.maximum(1 / qmax, jnp.minimum(1 / qmin, q / gamma))
return q
@dispatch
def accept_step_controller(self, integrator):
"""
Checks whever the controller should accept a step based on the current
error estimate
"""
return integrator.EEst <= 1
@dispatch
def step_accept_controller(self, integrator, alg, q):
qsteady_min = integrator.opts.qsteady_min
qsteady_max = integrator.opts.qsteady_max
qoldinit = integrator.opts.qoldinit
EEst = integrator.EEst
isok = (qsteady_min <= q) & (q <= qsteady_max)
q = jnp.where(isok, jnp.ones_like(q), q)
integrator.qold = jnp.maximum(EEst, qoldinit)
return integrator.dt / q
@dispatch
def step_reject_controller(self, integrator, alg):
qmin = integrator.opts.qmin
gamma = integrator.opts.gamma
q11 = integrator.q11
integrator.dt = integrator.dt / jnp.minimum(1 / qmin, q11 / gamma)
| 35.31016 | 90 | 0.685749 |
2ceed0745c1f3eccf9190a4a095693cec3cdcb2e | 2,611 | py | Python | lib/rucio/clis/daemons/reaper/reaper.py | efajardo/rucio | 460f394715568b937584ef671382b2b93add1758 | [
"Apache-2.0"
] | null | null | null | lib/rucio/clis/daemons/reaper/reaper.py | efajardo/rucio | 460f394715568b937584ef671382b2b93add1758 | [
"Apache-2.0"
] | null | null | null | lib/rucio/clis/daemons/reaper/reaper.py | efajardo/rucio | 460f394715568b937584ef671382b2b93add1758 | [
"Apache-2.0"
] | 1 | 2021-06-17T14:15:15.000Z | 2021-06-17T14:15:15.000Z | # Copyright 2012-2018 CERN for the benefit of the ATLAS collaboration.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Vincent Garonne, <vgaronne@gmail.com>, 2012-2018
# - Wen Guan, <wguan.icedew@gmail.com>, 2014
"""
Reaper is a daemon to manage file deletion
"""
import argparse
import signal
from rucio.daemons.reaper.reaper import run, stop
def get_parser():
"""
Returns the argparse parser.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--run-once", action="store_true", default=False, help='One iteration only')
parser.add_argument("--total-workers", action="store", default=1, type=int, help='Total number of workers per process')
parser.add_argument("--threads-per-worker", action="store", default=None, type=int, help='Total number of threads created by each worker')
parser.add_argument("--chunk-size", action="store", default=10, type=int, help='Chunk size')
parser.add_argument("--scheme", action="store", default=None, type=str, help='Force the reaper to use a particular protocol, e.g., mock.')
parser.add_argument('--greedy', action='store_true', default=False, help='Greedy mode')
parser.add_argument('--exclude-rses', action="store", default=None, type=str, help='RSEs expression to exclude RSEs')
parser.add_argument('--include-rses', action="store", default=None, type=str, help='RSEs expression to include RSEs')
parser.add_argument('--rses', nargs='+', type=str, help='List of RSEs')
parser.add_argument('--delay-seconds', action="store", default=3600, type=int, help='Delay to retry failed deletion')
return parser
def main():
signal.signal(signal.SIGTERM, stop)
parser = get_parser()
args = parser.parse_args()
try:
run(total_workers=args.total_workers, chunk_size=args.chunk_size, greedy=args.greedy,
once=args.run_once, scheme=args.scheme, rses=args.rses, threads_per_worker=args.threads_per_worker,
exclude_rses=args.exclude_rses, include_rses=args.include_rses, delay_seconds=args.delay_seconds)
except KeyboardInterrupt:
stop()
| 45.017241 | 142 | 0.723861 |
abe4a02627c231163599a0c50ab334a6cbbfc7c8 | 18,840 | py | Python | AutomatedTesting/Gem/PythonTests/Physics/TestSuite_Main.py | ganeshbelgur/o3de | cdae80d7f1ac13f21c7b1c03803dca205929ef09 | [
"Apache-2.0",
"MIT"
] | null | null | null | AutomatedTesting/Gem/PythonTests/Physics/TestSuite_Main.py | ganeshbelgur/o3de | cdae80d7f1ac13f21c7b1c03803dca205929ef09 | [
"Apache-2.0",
"MIT"
] | null | null | null | AutomatedTesting/Gem/PythonTests/Physics/TestSuite_Main.py | ganeshbelgur/o3de | cdae80d7f1ac13f21c7b1c03803dca205929ef09 | [
"Apache-2.0",
"MIT"
] | null | null | null | """
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import pytest
import os
import sys
import inspect
from ly_test_tools import LAUNCHERS
from ly_test_tools.o3de.editor_test import EditorSingleTest, EditorSharedTest, EditorParallelTest, EditorTestSuite
from .utils.FileManagement import FileManagement as fm
# Custom test spec, it provides functionality to override files
class EditorSingleTest_WithFileOverrides(EditorSingleTest):
# Specify here what files to override, [(original, override), ...]
files_to_override = [()]
# Base directory of the files (Default path is {ProjectName})
base_dir = None
# True will will search sub-directories for the files in base
search_subdirs = True
@classmethod
def wrap_run(cls, instance, request, workspace, editor, editor_test_results, launcher_platform):
root_path = cls.base_dir
if root_path is not None:
root_path = os.path.join(workspace.paths.engine_root(), root_path)
else:
# Default to project folder
root_path = workspace.paths.project()
# Try to locate both target and source files
original_file_list, override_file_list = zip(*cls.files_to_override)
try:
file_list = fm._find_files(original_file_list + override_file_list, root_path, cls.search_subdirs)
except RuntimeWarning as w:
assert False, (
w.message
+ " Please check use of search_subdirs; make sure you are using the correct parent directory."
)
for f in original_file_list:
fm._restore_file(f, file_list[f])
fm._backup_file(f, file_list[f])
for original, override in cls.files_to_override:
fm._copy_file(override, file_list[override], original, file_list[original])
yield # Run Test
for f in original_file_list:
fm._restore_file(f, file_list[f])
@pytest.mark.SUITE_main
@pytest.mark.parametrize("launcher_platform", ['windows_editor'])
@pytest.mark.parametrize("project", ["AutomatedTesting"])
class TestAutomation(EditorTestSuite):
global_extra_cmdline_args = ['-BatchMode', '-autotest_mode',
'--regset=/Amazon/Preferences/EnablePrefabSystem=true']
@staticmethod
def get_number_parallel_editors():
return 16
#########################################
# Non-atomic tests: These need to be run in a single editor because they have custom setup and teardown
class Material_DynamicFriction(EditorSingleTest_WithFileOverrides):
from .tests.material import Material_DynamicFriction as test_module
files_to_override = [
('physxsystemconfiguration.setreg', 'Material_DynamicFriction.setreg_override')
]
base_dir = "AutomatedTesting/Registry"
class Collider_DiffCollisionGroupDiffCollidingLayersNotCollide(EditorSingleTest_WithFileOverrides):
from .tests.collider import Collider_DiffCollisionGroupDiffCollidingLayersNotCollide as test_module
files_to_override = [
('physxsystemconfiguration.setreg', 'Collider_DiffCollisionGroupDiffCollidingLayersNotCollide.setreg_override')
]
base_dir = "AutomatedTesting/Registry"
#########################################
class RigidBody_EnablingGravityWorksUsingNotificationsPoC(EditorSharedTest):
from .tests.rigid_body import RigidBody_EnablingGravityWorksUsingNotificationsPoC as test_module
class ForceRegion_LocalSpaceForceOnRigidBodies(EditorSharedTest):
from .tests.force_region import ForceRegion_LocalSpaceForceOnRigidBodies as test_module
class Collider_SameCollisionGroupDiffLayersCollide(EditorSharedTest):
from .tests.collider import Collider_SameCollisionGroupDiffLayersCollide as test_module
class CharacterController_SwitchLevels(EditorSharedTest):
from .tests.character_controller import CharacterController_SwitchLevels as test_module
class Ragdoll_AddPhysxRagdollComponentWorks(EditorSharedTest):
from .tests.ragdoll import Ragdoll_AddPhysxRagdollComponentWorks as test_module
class ScriptCanvas_MultipleRaycastNode(EditorSharedTest):
# Fixme: This test previously relied on unexpected lines log reading with is now not supported.
# Now the log reading must be done inside the test, preferably with the Tracer() utility
# unexpected_lines = ["Assert"] + test_module.Lines.unexpected
from .tests.script_canvas import ScriptCanvas_MultipleRaycastNode as test_module
class Joints_HingeLeadFollowerCollide(EditorSharedTest):
from .tests.joints import Joints_HingeLeadFollowerCollide as test_module
class ShapeCollider_CylinderShapeCollides(EditorSharedTest):
from .tests.shape_collider import ShapeCollider_CylinderShapeCollides as test_module
@pytest.mark.GROUP_tick
@pytest.mark.xfail(reason="Test still under development.")
class Tick_InterpolatedRigidBodyMotionIsSmooth(EditorSharedTest):
from .tests.tick import Tick_InterpolatedRigidBodyMotionIsSmooth as test_module
@pytest.mark.GROUP_tick
@pytest.mark.xfail(reason="Test still under development.")
class Tick_CharacterGameplayComponentMotionIsSmooth(EditorSharedTest):
from .tests.tick import Tick_CharacterGameplayComponentMotionIsSmooth as test_module
class Collider_BoxShapeEditing(EditorSharedTest):
from .tests.collider import Collider_BoxShapeEditing as test_module
class Collider_SphereShapeEditing(EditorSharedTest):
from .tests.collider import Collider_SphereShapeEditing as test_module
class Collider_CapsuleShapeEditing(EditorSharedTest):
from .tests.collider import Collider_CapsuleShapeEditing as test_module
class Collider_CheckDefaultShapeSettingIsPxMesh(EditorSharedTest):
from .tests.collider import Collider_CheckDefaultShapeSettingIsPxMesh as test_module
class Collider_MultipleSurfaceSlots(EditorSharedTest):
from .tests.collider import Collider_MultipleSurfaceSlots as test_module
class Collider_PxMeshAutoAssignedWhenAddingRenderMeshComponent(EditorSharedTest):
from .tests.collider import Collider_PxMeshAutoAssignedWhenAddingRenderMeshComponent as test_module
@pytest.mark.xfail(reason="AssertionError: Couldn't find Asset with path: Objects/SphereBot/r0-b_body.azmodel")
class Collider_PxMeshAutoAssignedWhenModifyingRenderMeshComponent(EditorSharedTest):
from .tests.collider import Collider_PxMeshAutoAssignedWhenModifyingRenderMeshComponent as test_module
class Collider_PxMeshConvexMeshCollides(EditorSharedTest):
from .tests.collider import Collider_PxMeshConvexMeshCollides as test_module
class Material_LibraryClearingAssignsDefault(EditorSharedTest):
from .tests.material import Material_LibraryClearingAssignsDefault as test_module
class ShapeCollider_CanBeAddedWitNoWarnings(EditorSharedTest):
from .tests.shape_collider import ShapeCollider_CanBeAddedWitNoWarnings as test_module
class ShapeCollider_InactiveWhenNoShapeComponent(EditorSharedTest):
from .tests.shape_collider import ShapeCollider_InactiveWhenNoShapeComponent as test_module
class ShapeCollider_LargeNumberOfShapeCollidersWontCrashEditor(EditorSharedTest):
from .tests.shape_collider import ShapeCollider_LargeNumberOfShapeCollidersWontCrashEditor as test_module
class ForceRegion_WithNonTriggerColliderWarning(EditorSharedTest):
from .tests.force_region import ForceRegion_WithNonTriggerColliderWarning as test_module
# Fixme: expected_lines = ["[Warning] (PhysX Force Region) - Please ensure collider component marked as trigger exists in entity"]
class Collider_PxMeshNotAutoAssignedWhenNoPhysicsFbx(EditorSharedTest):
from .tests.collider import Collider_PxMeshNotAutoAssignedWhenNoPhysicsFbx as test_module
class Collider_AddColliderComponent(EditorSharedTest):
from .tests.collider import Collider_AddColliderComponent as test_module
class Terrain_NoPhysTerrainComponentNoCollision(EditorSharedTest):
from .tests.terrain import Terrain_NoPhysTerrainComponentNoCollision as test_module
class RigidBody_InitialLinearVelocity(EditorSharedTest):
from .tests.rigid_body import RigidBody_InitialLinearVelocity as test_module
class RigidBody_StartGravityEnabledWorks(EditorSharedTest):
from .tests.rigid_body import RigidBody_StartGravityEnabledWorks as test_module
class RigidBody_KinematicModeWorks(EditorSharedTest):
from .tests.rigid_body import RigidBody_KinematicModeWorks as test_module
class ForceRegion_LinearDampingForceOnRigidBodies(EditorSharedTest):
from .tests.force_region import ForceRegion_LinearDampingForceOnRigidBodies as test_module
class ForceRegion_SimpleDragForceOnRigidBodies(EditorSharedTest):
from .tests.force_region import ForceRegion_SimpleDragForceOnRigidBodies as test_module
class ForceRegion_CapsuleShapedForce(EditorSharedTest):
from .tests.force_region import ForceRegion_CapsuleShapedForce as test_module
class ForceRegion_ImpulsesCapsuleShapedRigidBody(EditorSharedTest):
from .tests.force_region import ForceRegion_ImpulsesCapsuleShapedRigidBody as test_module
class RigidBody_MomentOfInertiaManualSetting(EditorSharedTest):
from .tests.rigid_body import RigidBody_MomentOfInertiaManualSetting as test_module
class RigidBody_COM_ManualSettingWorks(EditorSharedTest):
from .tests.rigid_body import RigidBody_COM_ManualSettingWorks as test_module
class RigidBody_AddRigidBodyComponent(EditorSharedTest):
from .tests.rigid_body import RigidBody_AddRigidBodyComponent as test_module
class ForceRegion_SplineForceOnRigidBodies(EditorSharedTest):
from .tests.force_region import ForceRegion_SplineForceOnRigidBodies as test_module
class Collider_ColliderPositionOffset(EditorSharedTest):
from .tests.collider import Collider_ColliderPositionOffset as test_module
class RigidBody_AngularDampingAffectsRotation(EditorSharedTest):
from .tests.rigid_body import RigidBody_AngularDampingAffectsRotation as test_module
class Physics_VerifyColliderRigidBodyMeshAndTerrainWorkTogether(EditorSharedTest):
from .tests import Physics_VerifyColliderRigidBodyMeshAndTerrainWorkTogether as test_module
class ForceRegion_MultipleForcesInSameComponentCombineForces(EditorSharedTest):
from .tests.force_region import ForceRegion_MultipleForcesInSameComponentCombineForces as test_module
class ForceRegion_ImpulsesPxMeshShapedRigidBody(EditorSharedTest):
from .tests.force_region import ForceRegion_ImpulsesPxMeshShapedRigidBody as test_module
class ScriptCanvas_TriggerEvents(EditorSharedTest):
from .tests.script_canvas import ScriptCanvas_TriggerEvents as test_module
# needs to be updated to log for unexpected lines
# expected_lines = test_module.LogLines.expected_lines
class ForceRegion_ZeroPointForceDoesNothing(EditorSharedTest):
from .tests.force_region import ForceRegion_ZeroPointForceDoesNothing as test_module
class ForceRegion_ZeroWorldSpaceForceDoesNothing(EditorSharedTest):
from .tests.force_region import ForceRegion_ZeroWorldSpaceForceDoesNothing as test_module
class ForceRegion_ZeroLinearDampingDoesNothing(EditorSharedTest):
from .tests.force_region import ForceRegion_ZeroLinearDampingDoesNothing as test_module
class ForceRegion_MovingForceRegionChangesNetForce(EditorSharedTest):
from .tests.force_region import ForceRegion_MovingForceRegionChangesNetForce as test_module
class ScriptCanvas_CollisionEvents(EditorSharedTest):
from .tests.script_canvas import ScriptCanvas_CollisionEvents as test_module
class ForceRegion_DirectionHasNoAffectOnTotalForce(EditorSharedTest):
from .tests.force_region import ForceRegion_DirectionHasNoAffectOnTotalForce as test_module
class RigidBody_StartAsleepWorks(EditorSharedTest):
from .tests.rigid_body import RigidBody_StartAsleepWorks as test_module
class ForceRegion_ZeroLocalSpaceForceDoesNothing(EditorSharedTest):
from .tests.force_region import ForceRegion_ZeroLocalSpaceForceDoesNothing as test_module
class ForceRegion_ZeroSimpleDragForceDoesNothing(EditorSharedTest):
from .tests.force_region import ForceRegion_ZeroSimpleDragForceDoesNothing as test_module
class RigidBody_COM_ComputingWorks(EditorSharedTest):
from .tests.rigid_body import RigidBody_COM_ComputingWorks as test_module
class RigidBody_MassDifferentValuesWorks(EditorSharedTest):
from .tests.rigid_body import RigidBody_MassDifferentValuesWorks as test_module
class ForceRegion_SplineRegionWithModifiedTransform(EditorSharedTest):
from .tests.force_region import ForceRegion_SplineRegionWithModifiedTransform as test_module
class RigidBody_InitialAngularVelocity(EditorSharedTest):
from .tests.rigid_body import RigidBody_InitialAngularVelocity as test_module
class ForceRegion_ZeroSplineForceDoesNothing(EditorSharedTest):
from .tests.force_region import ForceRegion_ZeroSplineForceDoesNothing as test_module
class ForceRegion_PositionOffset(EditorSharedTest):
from .tests.force_region import ForceRegion_PositionOffset as test_module
class Ragdoll_LevelSwitchDoesNotCrash(EditorSharedTest):
from .tests.ragdoll import Ragdoll_LevelSwitchDoesNotCrash as test_module
class ForceRegion_MultipleComponentsCombineForces(EditorSharedTest):
from .tests.force_region import ForceRegion_MultipleComponentsCombineForces as test_module
@pytest.mark.xfail(
reason="This test will sometimes fail as the ball will continue to roll before the timeout is reached.")
class RigidBody_SleepWhenBelowKineticThreshold(EditorSharedTest):
from .tests.rigid_body import RigidBody_SleepWhenBelowKineticThreshold as test_module
class RigidBody_COM_NotIncludesTriggerShapes(EditorSharedTest):
from .tests.rigid_body import RigidBody_COM_NotIncludesTriggerShapes as test_module
class Material_NoEffectIfNoColliderShape(EditorSharedTest):
from .tests.material import Material_NoEffectIfNoColliderShape as test_module
class Collider_TriggerPassThrough(EditorSharedTest):
from .tests.collider import Collider_TriggerPassThrough as test_module
class RigidBody_SetGravityWorks(EditorSharedTest):
from .tests.rigid_body import RigidBody_SetGravityWorks as test_module
class Material_EmptyLibraryUsesDefault(EditorSharedTest):
from .tests.material import Material_EmptyLibraryUsesDefault as test_module
class ForceRegion_NoQuiverOnHighLinearDampingForce(EditorSharedTest):
from .tests.force_region import ForceRegion_NoQuiverOnHighLinearDampingForce as test_module
class RigidBody_ComputeInertiaWorks(EditorSharedTest):
from .tests.rigid_body import RigidBody_ComputeInertiaWorks as test_module
class ScriptCanvas_PostPhysicsUpdate(EditorSharedTest):
from .tests.script_canvas import ScriptCanvas_PostPhysicsUpdate as test_module
# Note: Test needs to be updated to log for unexpected lines
# unexpected_lines = ["Assert"] + test_module.Lines.unexpected
class ForceRegion_PxMeshShapedForce(EditorSharedTest):
from .tests.force_region import ForceRegion_PxMeshShapedForce as test_module
# Marking the Test as expected to fail using the xfail decorator due to sporadic failure on Automated Review: SPEC-3146
# The test still runs, but a failure of the test doesn't result in the test run failing
@pytest.mark.xfail(reason="Test Sporadically fails with message [ NOT FOUND ] Success: Bar1 : Expected angular velocity")
class RigidBody_MaxAngularVelocityWorks(EditorSharedTest):
from .tests.rigid_body import RigidBody_MaxAngularVelocityWorks as test_module
class Joints_HingeSoftLimitsConstrained(EditorSharedTest):
from .tests.joints import Joints_HingeSoftLimitsConstrained as test_module
class Joints_BallSoftLimitsConstrained(EditorSharedTest):
from .tests.joints import Joints_BallSoftLimitsConstrained as test_module
class Joints_BallLeadFollowerCollide(EditorSharedTest):
from .tests.joints import Joints_BallLeadFollowerCollide as test_module
class ForceRegion_WorldSpaceForceOnRigidBodies(EditorSharedTest):
from .tests.force_region import ForceRegion_WorldSpaceForceOnRigidBodies as test_module
class ForceRegion_PointForceOnRigidBodies(EditorSharedTest):
from .tests.force_region import ForceRegion_PointForceOnRigidBodies as test_module
class ForceRegion_SphereShapedForce(EditorSharedTest):
from .tests.force_region import ForceRegion_SphereShapedForce as test_module
class ForceRegion_RotationalOffset(EditorSharedTest):
from .tests.force_region import ForceRegion_RotationalOffset as test_module
class RigidBody_EnablingGravityWorksPoC(EditorSharedTest):
from .tests.rigid_body import RigidBody_EnablingGravityWorksPoC as test_module
class Collider_ColliderRotationOffset(EditorSharedTest):
from .tests.collider import Collider_ColliderRotationOffset as test_module
class ForceRegion_ParentChildForcesCombineForces(EditorSharedTest):
from .tests.force_region import ForceRegion_ParentChildForcesCombineForces as test_module
class Physics_UndoRedoWorksOnEntityWithPhysComponents(EditorSharedTest):
from .tests import Physics_UndoRedoWorksOnEntityWithPhysComponents as test_module
class ScriptCanvas_ShapeCast(EditorSharedTest):
from .tests.script_canvas import ScriptCanvas_ShapeCast as test_module
@pytest.mark.SUITE_main
@pytest.mark.parametrize("launcher_platform", ['windows_editor'])
@pytest.mark.parametrize("project", ["AutomatedTesting"])
class TestAutomationNoPrefab(EditorTestSuite):
global_extra_cmdline_args = ["-BatchMode", "-autotest_mode",
'--regset=/Amazon/Preferences/EnablePrefabSystem=false']
@staticmethod
def get_number_parallel_editors():
return 16
@pytest.mark.xfail(reason="AssertionError: Failed to open level: ForceRegion_SliceFileInstantiates does not exist or is invalid")
class ForceRegion_SliceFileInstantiates(EditorSharedTest):
from .tests.force_region import ForceRegion_SliceFileInstantiates as test_module
| 51.195652 | 138 | 0.794321 |
acbff2d72414ffd24ba1254c495ff58bc8e1c2e5 | 884 | py | Python | setup.py | arunanshub/sphinx-nested-apidoc | aa600d311d895ba1b739f946012426aff54bcc4a | [
"MIT"
] | null | null | null | setup.py | arunanshub/sphinx-nested-apidoc | aa600d311d895ba1b739f946012426aff54bcc4a | [
"MIT"
] | 2 | 2021-08-21T22:20:26.000Z | 2022-01-20T18:51:26.000Z | setup.py | arunanshub/sphinx-nested-apidoc | aa600d311d895ba1b739f946012426aff54bcc4a | [
"MIT"
] | null | null | null | from setuptools import find_packages, setup
setup(
name="sphinx-nested-apidoc",
version="0.3.2",
author="Arunanshu Biswas",
author_email="mydellpc07@gmail.com",
description="sphinx-nested-apidoc: When flattened is not enough",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
license="MIT",
packages=find_packages(),
entry_points=dict(
console_scripts=[
"sphinx-nested-apidoc=sphinx_nested_apidoc.__main__:main"
],
),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"Topic :: Documentation :: Sphinx",
],
install_requires=["sphinx"],
url="https://github.com/arunanshub/sphinx-nested-apidoc",
)
| 31.571429 | 69 | 0.649321 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.