blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1bd795ddca133e60d61b47e641b568b3945ea1b9 | 8da76aabcf9cfea3478f56037edbb5fa1513140b | /talls/lib/python2.7/site-packages/html5lib/inputstream.py | 6c211b4e7eee73384858021f951111c1295f5cfd | [] | no_license | mikanyman/.virtualenvs-legacy | 039479f31f2ca9f9a3d3544d8837429ddd0a7492 | 5486128b5b3b7ddb9ec81d43e3bb601a23b4025a | refs/heads/master | 2020-12-31T07:10:07.018881 | 2017-02-01T02:16:55 | 2017-02-01T02:16:55 | 80,566,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30,606 | py | from __future__ import absolute_import, division, unicode_literals
from six import text_type
import codecs
import re
from .constants import EOF, spaceCharacters, asciiLetters, asciiUppercase
from .constants import encodings, ReparseException
from . import utils
from io import StringIO
try:
from io import BytesIO
except ImportError:
BytesIO = StringIO
try:
from io import BufferedIOBase
except ImportError:
class BufferedIOBase(object):
pass
# Non-unicode versions of constants for use in the pre-parser
spaceCharactersBytes = frozenset([item.encode("ascii") for item in spaceCharacters])
asciiLettersBytes = frozenset([item.encode("ascii") for item in asciiLetters])
asciiUppercaseBytes = frozenset([item.encode("ascii") for item in asciiUppercase])
spacesAngleBrackets = spaceCharactersBytes | frozenset([b">", b"<"])
invalid_unicode_re = re.compile("[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uD800-\uDFFF\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]")
non_bmp_invalid_codepoints = set([0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF,
0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE,
0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF,
0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF,
0x10FFFE, 0x10FFFF])
ascii_punctuation_re = re.compile("[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005B-\u0060\u007B-\u007E]")
# Cache for charsUntil()
charsUntilRegEx = {}
class BufferedStream(object):
"""Buffering for streams that do not have buffering of their own
The buffer is implemented as a list of chunks on the assumption that
joining many strings will be slow since it is O(n**2)
"""
def __init__(self, stream):
self.stream = stream
self.buffer = []
self.position = [-1, 0] # chunk number, offset
def tell(self):
pos = 0
for chunk in self.buffer[:self.position[0]]:
pos += len(chunk)
pos += self.position[1]
return pos
def seek(self, pos):
assert pos < self._bufferedBytes()
offset = pos
i = 0
while len(self.buffer[i]) < offset:
offset -= pos
i += 1
self.position = [i, offset]
def read(self, bytes):
if not self.buffer:
return self._readStream(bytes)
elif (self.position[0] == len(self.buffer) and
self.position[1] == len(self.buffer[-1])):
return self._readStream(bytes)
else:
return self._readFromBuffer(bytes)
def _bufferedBytes(self):
return sum([len(item) for item in self.buffer])
def _readStream(self, bytes):
data = self.stream.read(bytes)
self.buffer.append(data)
self.position[0] += 1
self.position[1] = len(data)
return data
def _readFromBuffer(self, bytes):
remainingBytes = bytes
rv = []
bufferIndex = self.position[0]
bufferOffset = self.position[1]
while bufferIndex < len(self.buffer) and remainingBytes != 0:
assert remainingBytes > 0
bufferedData = self.buffer[bufferIndex]
if remainingBytes <= len(bufferedData) - bufferOffset:
bytesToRead = remainingBytes
self.position = [bufferIndex, bufferOffset + bytesToRead]
else:
bytesToRead = len(bufferedData) - bufferOffset
self.position = [bufferIndex, len(bufferedData)]
bufferIndex += 1
rv.append(bufferedData[bufferOffset:bufferOffset + bytesToRead])
remainingBytes -= bytesToRead
bufferOffset = 0
if remainingBytes:
rv.append(self._readStream(remainingBytes))
return "".join(rv)
def HTMLInputStream(source, encoding=None, parseMeta=True, chardet=True):
if hasattr(source, "read"):
isUnicode = isinstance(source.read(0), text_type)
else:
isUnicode = isinstance(source, text_type)
if isUnicode:
if encoding is not None:
raise TypeError("Cannot explicitly set an encoding with a unicode string")
return HTMLUnicodeInputStream(source)
else:
return HTMLBinaryInputStream(source, encoding, parseMeta, chardet)
class HTMLUnicodeInputStream(object):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
_defaultChunkSize = 10240
def __init__(self, source):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
parseMeta - Look for a <meta> element containing encoding information
"""
# Craziness
if len("\U0010FFFF") == 1:
self.reportCharacterErrors = self.characterErrorsUCS4
self.replaceCharactersRegexp = re.compile("[\uD800-\uDFFF]")
else:
self.reportCharacterErrors = self.characterErrorsUCS2
self.replaceCharactersRegexp = re.compile("([\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?<![\uD800-\uDBFF])[\uDC00-\uDFFF])")
# List of where new lines occur
self.newLines = [0]
self.charEncoding = ("utf-8", "certain")
self.dataStream = self.openStream(source)
self.reset()
def reset(self):
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
self.errors = []
# number of (complete) lines in previous chunks
self.prevNumLines = 0
# number of columns in the last line of the previous chunk
self.prevNumCols = 0
# Deal with CR LF and surrogates split over chunk boundaries
self._bufferedCharacter = None
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = StringIO(source)
return stream
def _position(self, offset):
chunk = self.chunk
nLines = chunk.count('\n', 0, offset)
positionLine = self.prevNumLines + nLines
lastLinePos = chunk.rfind('\n', 0, offset)
if lastLinePos == -1:
positionColumn = self.prevNumCols + offset
else:
positionColumn = offset - (lastLinePos + 1)
return (positionLine, positionColumn)
def position(self):
"""Returns (line, col) of the current position in the stream."""
line, col = self._position(self.chunkOffset)
return (line + 1, col)
def char(self):
""" Read one character from the stream or queue if available. Return
EOF when EOF is reached.
"""
# Read a new chunk from the input stream if necessary
if self.chunkOffset >= self.chunkSize:
if not self.readChunk():
return EOF
chunkOffset = self.chunkOffset
char = self.chunk[chunkOffset]
self.chunkOffset = chunkOffset + 1
return char
def readChunk(self, chunkSize=None):
if chunkSize is None:
chunkSize = self._defaultChunkSize
self.prevNumLines, self.prevNumCols = self._position(self.chunkSize)
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
data = self.dataStream.read(chunkSize)
# Deal with CR LF and surrogates broken across chunks
if self._bufferedCharacter:
data = self._bufferedCharacter + data
self._bufferedCharacter = None
elif not data:
# We have no more data, bye-bye stream
return False
if len(data) > 1:
lastv = ord(data[-1])
if lastv == 0x0D or 0xD800 <= lastv <= 0xDBFF:
self._bufferedCharacter = data[-1]
data = data[:-1]
self.reportCharacterErrors(data)
# Replace invalid characters
# Note U+0000 is dealt with in the tokenizer
data = self.replaceCharactersRegexp.sub("\ufffd", data)
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
self.chunk = data
self.chunkSize = len(data)
return True
def characterErrorsUCS4(self, data):
for i in range(len(invalid_unicode_re.findall(data))):
self.errors.append("invalid-codepoint")
def characterErrorsUCS2(self, data):
# Someone picked the wrong compile option
# You lose
skip = False
for match in invalid_unicode_re.finditer(data):
if skip:
continue
codepoint = ord(match.group())
pos = match.start()
# Pretty sure there should be endianness issues here
if utils.isSurrogatePair(data[pos:pos + 2]):
# We have a surrogate pair!
char_val = utils.surrogatePairToCodepoint(data[pos:pos + 2])
if char_val in non_bmp_invalid_codepoints:
self.errors.append("invalid-codepoint")
skip = True
elif (codepoint >= 0xD800 and codepoint <= 0xDFFF and
pos == len(data) - 1):
self.errors.append("invalid-codepoint")
else:
skip = False
self.errors.append("invalid-codepoint")
def charsUntil(self, characters, opposite=False):
""" Returns a string of characters from the stream up to but not
including any character in 'characters' or EOF. 'characters' must be
a container that supports the 'in' method and iteration over its
characters.
"""
# Use a cache of regexps to find the required characters
try:
chars = charsUntilRegEx[(characters, opposite)]
except KeyError:
if __debug__:
for c in characters:
assert(ord(c) < 128)
regex = "".join(["\\x%02x" % ord(c) for c in characters])
if not opposite:
regex = "^%s" % regex
chars = charsUntilRegEx[(characters, opposite)] = re.compile("[%s]+" % regex)
rv = []
while True:
# Find the longest matching prefix
m = chars.match(self.chunk, self.chunkOffset)
if m is None:
# If nothing matched, and it wasn't because we ran out of chunk,
# then stop
if self.chunkOffset != self.chunkSize:
break
else:
end = m.end()
# If not the whole chunk matched, return everything
# up to the part that didn't match
if end != self.chunkSize:
rv.append(self.chunk[self.chunkOffset:end])
self.chunkOffset = end
break
# If the whole remainder of the chunk matched,
# use it all and read the next chunk
rv.append(self.chunk[self.chunkOffset:])
if not self.readChunk():
# Reached EOF
break
r = "".join(rv)
return r
def unget(self, char):
# Only one character is allowed to be ungotten at once - it must
# be consumed again before any further call to unget
if char is not None:
if self.chunkOffset == 0:
# unget is called quite rarely, so it's a good idea to do
# more work here if it saves a bit of work in the frequently
# called char and charsUntil.
# So, just prepend the ungotten character onto the current
# chunk:
self.chunk = char + self.chunk
self.chunkSize += 1
else:
self.chunkOffset -= 1
assert self.chunk[self.chunkOffset] == char
class HTMLBinaryInputStream(HTMLUnicodeInputStream):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
def __init__(self, source, encoding=None, parseMeta=True, chardet=True):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
parseMeta - Look for a <meta> element containing encoding information
"""
# Raw Stream - for unicode objects this will encode to utf-8 and set
# self.charEncoding as appropriate
self.rawStream = self.openStream(source)
HTMLUnicodeInputStream.__init__(self, self.rawStream)
self.charEncoding = (codecName(encoding), "certain")
# Encoding Information
# Number of bytes to use when looking for a meta element with
# encoding information
self.numBytesMeta = 512
# Number of bytes to use when using detecting encoding using chardet
self.numBytesChardet = 100
# Encoding to use if no other information can be found
self.defaultEncoding = "windows-1252"
# Detect encoding iff no explicit "transport level" encoding is supplied
if (self.charEncoding[0] is None):
self.charEncoding = self.detectEncoding(parseMeta, chardet)
# Call superclass
self.reset()
def reset(self):
self.dataStream = codecs.getreader(self.charEncoding[0])(self.rawStream,
'replace')
HTMLUnicodeInputStream.reset(self)
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = BytesIO(source)
try:
stream.seek(stream.tell())
except:
stream = BufferedStream(stream)
return stream
def detectEncoding(self, parseMeta=True, chardet=True):
# First look for a BOM
# This will also read past the BOM if present
encoding = self.detectBOM()
confidence = "certain"
# If there is no BOM need to look for meta elements with encoding
# information
if encoding is None and parseMeta:
encoding = self.detectEncodingMeta()
confidence = "tentative"
# Guess with chardet, if avaliable
if encoding is None and chardet:
confidence = "tentative"
try:
try:
from charade.universaldetector import UniversalDetector
except ImportError:
from chardet.universaldetector import UniversalDetector
buffers = []
detector = UniversalDetector()
while not detector.done:
buffer = self.rawStream.read(self.numBytesChardet)
assert isinstance(buffer, bytes)
if not buffer:
break
buffers.append(buffer)
detector.feed(buffer)
detector.close()
encoding = detector.result['encoding']
self.rawStream.seek(0)
except ImportError:
pass
# If all else fails use the default encoding
if encoding is None:
confidence = "tentative"
encoding = self.defaultEncoding
# Substitute for equivalent encodings:
encodingSub = {"iso-8859-1": "windows-1252"}
if encoding.lower() in encodingSub:
encoding = encodingSub[encoding.lower()]
return encoding, confidence
def changeEncoding(self, newEncoding):
assert self.charEncoding[1] != "certain"
newEncoding = codecName(newEncoding)
if newEncoding in ("utf-16", "utf-16-be", "utf-16-le"):
newEncoding = "utf-8"
if newEncoding is None:
return
elif newEncoding == self.charEncoding[0]:
self.charEncoding = (self.charEncoding[0], "certain")
else:
self.rawStream.seek(0)
self.reset()
self.charEncoding = (newEncoding, "certain")
raise ReparseException("Encoding changed from %s to %s" % (self.charEncoding[0], newEncoding))
def detectBOM(self):
"""Attempts to detect at BOM at the start of the stream. If
an encoding can be determined from the BOM return the name of the
encoding otherwise return None"""
bomDict = {
codecs.BOM_UTF8: 'utf-8',
codecs.BOM_UTF16_LE: 'utf-16-le', codecs.BOM_UTF16_BE: 'utf-16-be',
codecs.BOM_UTF32_LE: 'utf-32-le', codecs.BOM_UTF32_BE: 'utf-32-be'
}
# Go to beginning of file and read in 4 bytes
string = self.rawStream.read(4)
assert isinstance(string, bytes)
# Try detecting the BOM using bytes from the string
encoding = bomDict.get(string[:3]) # UTF-8
seek = 3
if not encoding:
# Need to detect UTF-32 before UTF-16
encoding = bomDict.get(string) # UTF-32
seek = 4
if not encoding:
encoding = bomDict.get(string[:2]) # UTF-16
seek = 2
# Set the read position past the BOM if one was found, otherwise
# set it to the start of the stream
self.rawStream.seek(encoding and seek or 0)
return encoding
def detectEncodingMeta(self):
"""Report the encoding declared by the meta element
"""
buffer = self.rawStream.read(self.numBytesMeta)
assert isinstance(buffer, bytes)
parser = EncodingParser(buffer)
self.rawStream.seek(0)
encoding = parser.getEncoding()
if encoding in ("utf-16", "utf-16-be", "utf-16-le"):
encoding = "utf-8"
return encoding
class EncodingBytes(bytes):
"""String-like object with an associated position and various extra methods
If the position is ever greater than the string length then an exception is
raised"""
def __new__(self, value):
assert isinstance(value, bytes)
return bytes.__new__(self, value.lower())
def __init__(self, value):
self._position = -1
def __iter__(self):
return self
def __next__(self):
p = self._position = self._position + 1
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
return self[p:p + 1]
def next(self):
# Py2 compat
return self.__next__()
def previous(self):
p = self._position
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
self._position = p = p - 1
return self[p:p + 1]
def setPosition(self, position):
if self._position >= len(self):
raise StopIteration
self._position = position
def getPosition(self):
if self._position >= len(self):
raise StopIteration
if self._position >= 0:
return self._position
else:
return None
position = property(getPosition, setPosition)
def getCurrentByte(self):
return self[self.position:self.position + 1]
currentByte = property(getCurrentByte)
def skip(self, chars=spaceCharactersBytes):
"""Skip past a list of characters"""
p = self.position # use property for the error-checking
while p < len(self):
c = self[p:p + 1]
if c not in chars:
self._position = p
return c
p += 1
self._position = p
return None
def skipUntil(self, chars):
p = self.position
while p < len(self):
c = self[p:p + 1]
if c in chars:
self._position = p
return c
p += 1
self._position = p
return None
def matchBytes(self, bytes):
"""Look for a sequence of bytes at the start of a string. If the bytes
are found return True and advance the position to the byte after the
match. Otherwise return False and leave the position alone"""
p = self.position
data = self[p:p + len(bytes)]
rv = data.startswith(bytes)
if rv:
self.position += len(bytes)
return rv
def jumpTo(self, bytes):
"""Look for the next sequence of bytes matching a given sequence. If
a match is found advance the position to the last byte of the match"""
newPosition = self[self.position:].find(bytes)
if newPosition > -1:
# XXX: This is ugly, but I can't see a nicer way to fix this.
if self._position == -1:
self._position = 0
self._position += (newPosition + len(bytes) - 1)
return True
else:
raise StopIteration
class EncodingParser(object):
"""Mini parser for detecting character encoding from meta elements"""
def __init__(self, data):
"""string - the data to work on for encoding detection"""
self.data = EncodingBytes(data)
self.encoding = None
def getEncoding(self):
methodDispatch = (
(b"<!--", self.handleComment),
(b"<meta", self.handleMeta),
(b"</", self.handlePossibleEndTag),
(b"<!", self.handleOther),
(b"<?", self.handleOther),
(b"<", self.handlePossibleStartTag))
for byte in self.data:
keepParsing = True
for key, method in methodDispatch:
if self.data.matchBytes(key):
try:
keepParsing = method()
break
except StopIteration:
keepParsing = False
break
if not keepParsing:
break
return self.encoding
def handleComment(self):
"""Skip over comments"""
return self.data.jumpTo(b"-->")
def handleMeta(self):
if self.data.currentByte not in spaceCharactersBytes:
# if we have <meta not followed by a space so just keep going
return True
# We have a valid meta element we want to search for attributes
hasPragma = False
pendingEncoding = None
while True:
# Try to find the next attribute after the current position
attr = self.getAttribute()
if attr is None:
return True
else:
if attr[0] == b"http-equiv":
hasPragma = attr[1] == b"content-type"
if hasPragma and pendingEncoding is not None:
self.encoding = pendingEncoding
return False
elif attr[0] == b"charset":
tentativeEncoding = attr[1]
codec = codecName(tentativeEncoding)
if codec is not None:
self.encoding = codec
return False
elif attr[0] == b"content":
contentParser = ContentAttrParser(EncodingBytes(attr[1]))
tentativeEncoding = contentParser.parse()
if tentativeEncoding is not None:
codec = codecName(tentativeEncoding)
if codec is not None:
if hasPragma:
self.encoding = codec
return False
else:
pendingEncoding = codec
def handlePossibleStartTag(self):
return self.handlePossibleTag(False)
def handlePossibleEndTag(self):
next(self.data)
return self.handlePossibleTag(True)
def handlePossibleTag(self, endTag):
data = self.data
if data.currentByte not in asciiLettersBytes:
# If the next byte is not an ascii letter either ignore this
# fragment (possible start tag case) or treat it according to
# handleOther
if endTag:
data.previous()
self.handleOther()
return True
c = data.skipUntil(spacesAngleBrackets)
if c == b"<":
# return to the first step in the overall "two step" algorithm
# reprocessing the < byte
data.previous()
else:
# Read all attributes
attr = self.getAttribute()
while attr is not None:
attr = self.getAttribute()
return True
def handleOther(self):
return self.data.jumpTo(b">")
def getAttribute(self):
"""Return a name,value pair for the next attribute in the stream,
if one is found, or None"""
data = self.data
# Step 1 (skip chars)
c = data.skip(spaceCharactersBytes | frozenset([b"/"]))
assert c is None or len(c) == 1
# Step 2
if c in (b">", None):
return None
# Step 3
attrName = []
attrValue = []
# Step 4 attribute name
while True:
if c == b"=" and attrName:
break
elif c in spaceCharactersBytes:
# Step 6!
c = data.skip()
break
elif c in (b"/", b">"):
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrName.append(c.lower())
elif c is None:
return None
else:
attrName.append(c)
# Step 5
c = next(data)
# Step 7
if c != b"=":
data.previous()
return b"".join(attrName), b""
# Step 8
next(data)
# Step 9
c = data.skip()
# Step 10
if c in (b"'", b'"'):
# 10.1
quoteChar = c
while True:
# 10.2
c = next(data)
# 10.3
if c == quoteChar:
next(data)
return b"".join(attrName), b"".join(attrValue)
# 10.4
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
# 10.5
else:
attrValue.append(c)
elif c == b">":
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
# Step 11
while True:
c = next(data)
if c in spacesAngleBrackets:
return b"".join(attrName), b"".join(attrValue)
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
class ContentAttrParser(object):
def __init__(self, data):
assert isinstance(data, bytes)
self.data = data
def parse(self):
try:
# Check if the attr name is charset
# otherwise return
self.data.jumpTo(b"charset")
self.data.position += 1
self.data.skip()
if not self.data.currentByte == b"=":
# If there is no = sign keep looking for attrs
return None
self.data.position += 1
self.data.skip()
# Look for an encoding between matching quote marks
if self.data.currentByte in (b'"', b"'"):
quoteMark = self.data.currentByte
self.data.position += 1
oldPosition = self.data.position
if self.data.jumpTo(quoteMark):
return self.data[oldPosition:self.data.position]
else:
return None
else:
# Unquoted value
oldPosition = self.data.position
try:
self.data.skipUntil(spaceCharactersBytes)
return self.data[oldPosition:self.data.position]
except StopIteration:
# Return the whole remaining value
return self.data[oldPosition:]
except StopIteration:
return None
def codecName(encoding):
"""Return the python codec name corresponding to an encoding or None if the
string doesn't correspond to a valid encoding."""
if isinstance(encoding, bytes):
try:
encoding = encoding.decode("ascii")
except UnicodeDecodeError:
return None
if encoding:
canonicalName = ascii_punctuation_re.sub("", encoding).lower()
return encodings.get(canonicalName, None)
else:
return None
| [
"mika.nyman@synapse-computing.com"
] | mika.nyman@synapse-computing.com |
d80297e71294a37b02f17f59041029e8e7f646a4 | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/PhysicsAnalysis/PhysicsValidation/PhysValMonitoring/share/PhysValSUSY_jobOptions.py | 35e75db8b1b928b6f74ab87eb24a10ea2f37c756 | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,813 | py | ### configuration
run_default_rel19 = True
run_migrated_rel17_tools = True
rel17_use_separate_output = False
run_other_tools = {
'MET' : True,
'Muon' : True,
'Tau' : True,
'InDet': False,
}
### print configuration
print "PhysValSUSY job options:"
print " Running default release 19 tool : %s" % str(run_default_rel19)
print " Running migrated release 17 tool : %s" % str(run_migrated_rel17_tools)
print " output in separate file : %s" % str(rel17_use_separate_output)
print " Running following other tools : %s" % str(run_other_tools)
### consistency checks
# avoid tools being added twice
for other_tool_name in [other_tool for other_tool, run_this in run_other_tools.items() if run_this is True]:
if "do" + other_tool_name in vars():
if vars()["do" + other_tool_name] is True:
print "Configuration warning: do%s was already set active, removing from configuration." % other_tool_name
run_other_tools[other_tool_name] = False
### perform initialization / add tools
if run_default_rel19:
# add default tool
from SUSYPhysValMonitoring.SUSYPhysValMonitoringConf import SUSYPhysVal__SUSYPhysValMonitoring
tool1 = SUSYPhysVal__SUSYPhysValMonitoring()
tool1.EnableLumi = False
tool1.OutputLevel = INFO
tool1.DetailLevel = 10
tool1.UseTruthInformation = True
from AthenaCommon.AppMgr import ToolSvc
ToolSvc += tool1
monMan = CfgMgr.AthenaMonManager("PhysValMonManager")
monMan.AthenaMonTools += [ tool1 ]
for run_other_tool in [other_tool for other_tool, run_this in run_other_tools.items() if run_this is True]:
# add "external tools"
include("PhysValMonitoring/PhysVal" + run_other_tool + "_jobOptions.py")
if run_migrated_rel17_tools:
# add migrated tool (main part of old release 17 validation package)
### imports
from SUSYPhysValMonitoring.SUSYPhysValMonitoringConf import *
doTrigger = False
doTruth = False
# specify input container names
TrigDecisionTool = "TrigDecisionTool"
McEventCollection = "GEN_AOD"
TrackParticleContainer = "TrackParticleCandidate"
JetContainer = "AntiKt4EMTopoJets" ##leave off the suffix "Jets"
ElectronContainer = "ElectronCollection"
MuonContainer = "Muons"
TauJetContainer = "TauRecContainer"
MissingET = "MET_RefFinal"
# for 17.2.1 (SUSYD3PDMaker-00-12-00)
# SUSY_MET_name = "MET_RefFinal_Simplified20" # MET name to be used by SUSYSusyRec.cxx
# SUSY_MET_muons_name = "MET_Muon_Total_Staco_Simplified20" # MET muons contribution name to be used by SUSYSusyRec.cxx
# for 17.2.7.5.9 (SUSYD3PDMaker-00-12-36)
#SUSY_MET_name = "MET_RefFinal_Egamma10NoTau" # MET name to be used by SUSYSusyRec.cxx
#SUSY_MET_muons_name = "MET_Muon_Staco_Egamma10NoTau" # MET muons contribution name to be used by SUSYSusyRec.cxx
SUSY_MET_name = "Final"
SUSY_MET_muons_name = "Muons"
# init and add tool
SusyPlot = SUSYPlot("SusyPlot")
SusyPlot.DoTrigger = doTrigger
SusyPlot.DoTruth = doTruth
SusyPlot.HistToolKeys = [ "SUSYSusyRec/susyTool" ]
SusyPlot += SUSYSusyRec("susyTool")
SusyPlot.susyTool.ElectronName = ElectronContainer
SusyPlot.susyTool.MuonName = MuonContainer
SusyPlot.susyTool.TauName = TauJetContainer
SusyPlot.susyTool.JetName = JetContainer
SusyPlot.susyTool.MetName = MissingET
SusyPlot.susyTool.SUSYMissingET = SUSY_MET_name
SusyPlot.susyTool.SUSYMissingETMuons = SUSY_MET_muons_name
SusyPlot.susyTool.McEventName = McEventCollection
SusyPlot.susyTool.OutputLevel = INFO
if doTrigger:
SusyPlot.TrigDecisionTool = ToolSvc.TrigDecisionTool
SusyPlot.susyTool.PtLeptonCut = 20*GeV
SusyPlot.susyTool.PtLeptonPreCut = 20*GeV
SusyPlot.susyTool.EtIsolCut = 10*GeV
SusyPlot.susyTool.EtMissCut = 80*GeV
SusyPlot.susyTool.etaJetCut = 3
SusyPlot.susyTool.PtJetCut = 20*GeV
SusyPlot.susyTool.PtJet0Cut = 60*GeV
SusyPlot.susyTool.PtJet1Cut = 30*GeV
SusyPlot.susyTool.MeffCut = 500*GeV
SusyPlot.susyTool.MTCut = 100*GeV
SusyPlot.susyTool.STCut = 0.2
topSequence += SusyPlot
if rel17_use_separate_output:
### if you want to write histograms to separate file use this:
from AthenaCommon.AppMgr import theApp
theApp.HistogramPersistency = "ROOT"
from AthenaCommon.AppMgr import ServiceMgr
## The string "TestMon" in the argument below is the 'FileKey'
## used by Athena to access the output file internally
svcMgr.THistSvc.Output += ["PhysVal2 DATAFILE='hist.root' OPT='RECREATE'"]
svcMgr.THistSvc.PrintAll = True
svcMgr.THistSvc.OutputLevel = DEBUG
else:
SusyPlot.susyTool.HistBaseDirectory = "/PhysVal/SUSY/ETmiss/"
| [
"rushioda@lxplus754.cern.ch"
] | rushioda@lxplus754.cern.ch |
d82eb3cb3eb21df97bba550f9eaf5bfe97b112ad | f28e54ae7a04ab40ff1eb71184f1a39fe848c07e | /8Django/formularios2/manage.py | 96468de469d9a69e924933fdcf9f00ff3ae68057 | [] | no_license | Phonethedor/fullPy | 116c2c2ce1d0bc4d42b95e2acaad2a6acf9db4d3 | fbd216470f60c772bb72381990b23e235776479d | refs/heads/master | 2023-08-12T14:02:24.048500 | 2021-09-28T13:09:28 | 2021-09-28T13:09:28 | 371,515,406 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'formularios2.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"alvaroc.giovannim@gmail.com"
] | alvaroc.giovannim@gmail.com |
21c402504fe3f0d408b7357e842acf6ae49d7b72 | 0bb007de7a7b749e71bd4c5a63c9cb754660977d | /results/QPROP/RoboschoolHumanoidFlagrun-v1_Resumed/2018-04-16_01_18_19/QPropPolicy.py | 43dc04d9392b7ae57d300ea6d733441212f56279 | [] | no_license | HarveyYan/RL-Robotic-Control | 000e41032e94d465fddeba9b4094db607f8faa91 | 8c0aa98352fc3ce653de0c6ed7254928c08e67f8 | refs/heads/master | 2020-03-07T12:14:49.860443 | 2018-04-27T04:44:36 | 2018-04-27T04:44:36 | 127,473,793 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,835 | py | """
without eligibility trace, this one is almost identical to the reference policy implementation.
"""
import tensorflow as tf
import tensorflow.contrib.layers
import numpy as np
import os
os.environ["CUDA_VISIBLE_DEVICES"]="0"
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
gpu_options.allow_growth = True
class QPropPolicy:
def __init__(self, obs_dim, act_dim, action_space, kl_target, epochs=1):
self.obs_dim = obs_dim
self.act_dim = act_dim
self.action_space = action_space
self.kl_target = kl_target
self.epochs = epochs
self.eta = 50 # hinge loss multiplier, between actual kl and kl target
self.beta = 1.0 # kl penalty term multiplier
self.lr_multiplier = 1.0 # dynamically adjust lr when D_KL out of control
self.tao = 1e-3
self._build_graph()
self._init_session()
def _build_graph(self):
self.g = tf.Graph()
with self.g.as_default():
self._placeholders()
self._policy_nn()
self._log_prob()
self._kl_and_entropy()
self._sample()
self._loss()
self._train()
self.saver = tf.train.Saver()
self.init = tf.global_variables_initializer()
# build target policy, which is deterministic for DDPG updates
self.target_g = tf.Graph()
with self.target_g.as_default():
self.target_obs_ph = tf.placeholder(tf.float32, (None, self.obs_dim), 'obs')
hid1_size = self.obs_dim * 10 # 10 empirically determined
hid3_size = self.act_dim * 10 # 10 empirically determined
hid2_size = int(np.sqrt(hid1_size * hid3_size))
# 3 hidden layers with relu activations
out = tf.layers.dense(self.target_obs_ph, hid1_size, tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(), name="h1")
out = tf.layers.dense(out, hid2_size, tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(), name="h2")
out = tf.layers.dense(out, hid3_size, tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(), name="h3")
self.target_means = tf.layers.dense(out, self.act_dim,
kernel_initializer=tf.contrib.layers.xavier_initializer(), name="means")
self.target_saver = tf.train.Saver()
def _placeholders(self):
'''
Add placeholders to the graph
:return:
'''
self.obs_ph = tf.placeholder(tf.float32, (None, self.obs_dim), 'obs')
self.act_ph = tf.placeholder(tf.float32, (None, self.act_dim), 'act')
self.learning_signal_ph = tf.placeholder(tf.float32, (None,), 'learning_signal') # number of time steps, usually
self.ctrl_taylor_ph = tf.placeholder(tf.float32, (None, self.act_dim), 'ctrl_taylor') # number of time steps, usually
self.means_old_ph = tf.placeholder(tf.float32, (None, self.act_dim), 'old_means')
self.logvars_old_ph = tf.placeholder(tf.float32, (self.act_dim,), 'old_logvars')
self.beta_ph = tf.placeholder(tf.float32, (), 'kl_penalty_multiplier')
self.eta_ph = tf.placeholder(tf.float32, (), 'hinge_penalty_multiplier')
self.lr_ph = tf.placeholder(tf.float32, (), 'learning_rate')
def _policy_nn(self):
"""
Local mean and global diagonal covariance.
:return:
"""
hid1_size = self.obs_dim * 10 # 10 empirically determined
hid3_size = self.act_dim * 10 # 10 empirically determined
hid2_size = int(np.sqrt(hid1_size * hid3_size))
# heuristic to set learning rate based on NN size (tuned on 'Hopper-v1')
self.lr = 9e-4 / np.sqrt(hid2_size) # 9e-4 empirically determined
# 3 hidden layers with relu activations
out = tf.layers.dense(self.obs_ph, hid1_size, tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(), name="h1")
out = tf.layers.dense(out, hid2_size, tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(), name="h2")
out = tf.layers.dense(out, hid3_size, tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(), name="h3")
self.means = tf.layers.dense(out, self.act_dim,
kernel_initializer=tf.contrib.layers.xavier_initializer(), name="means")
logvar_speed = (10 * hid3_size) // 48
log_vars = tf.get_variable('logvars', (logvar_speed, self.act_dim), tf.float32,
tf.constant_initializer(0.0))
self.log_vars = tf.reduce_sum(log_vars, axis=0) - 1.0
def _log_prob(self):
logp = -0.5 * tf.reduce_sum(self.log_vars) # probability of a trajectory
logp += -0.5 * tf.reduce_sum(tf.square(self.act_ph - self.means) /
tf.exp(self.log_vars), axis=1)
self.logp = logp
logp_old = -0.5 * tf.reduce_sum(self.logvars_old_ph)
logp_old += -0.5 * tf.reduce_sum(tf.square(self.act_ph - self.means_old_ph) /
tf.exp(self.logvars_old_ph), axis=1)
self.logp_old = logp_old
def _kl_and_entropy(self):
"""
Taken directly from Patrick Coady's code. Validity verified.
Add to Graph:
1. KL divergence between old and new distributions
2. Entropy of present policy given states and actions
https://en.wikipedia.org/wiki/Multivariate_normal_distribution#Kullback.E2.80.93Leibler_divergence
https://en.wikipedia.org/wiki/Multivariate_normal_distribution#Entropy
"""
log_det_cov_old = tf.reduce_sum(self.logvars_old_ph)
log_det_cov_new = tf.reduce_sum(self.log_vars)
tr_old_new = tf.reduce_sum(tf.exp(self.logvars_old_ph - self.log_vars))
self.kl = 0.5 * tf.reduce_mean(log_det_cov_new - log_det_cov_old + tr_old_new +
tf.reduce_sum(tf.square(self.means - self.means_old_ph) /
tf.exp(self.log_vars), axis=1) - self.act_dim)
self.entropy = 0.5 * (self.act_dim * (np.log(2 * np.pi) + 1) +
tf.reduce_sum(self.log_vars))
def _sample(self):
"""
Reparametrization trick.
:return:
"""
self.sample = (self.means + tf.exp(self.log_vars / 2.0) *
tf.random_normal(shape=(self.act_dim,)))
# self.sample = tf.clip_by_value(self.sample, self.action_space.low[0], self.action_space.high[0])
def _loss(self):
"""
Four loss terms:
1) standard policy gradient
2) D_KL(pi_old || pi_new)
3) Hinge loss on [D_KL - kl_targ]^2
4) Entropy for encouraging exploration
See: https://arxiv.org/pdf/1707.02286.pdf
"""
"""PPO loss definition"""
self.ppo_loss = -tf.reduce_mean(self.learning_signal_ph * tf.exp(self.logp - self.logp_old))
self.ppo_loss += tf.reduce_mean(self.beta_ph * self.kl)
self.ppo_loss += self.eta_ph * tf.square(tf.maximum(0.0, self.kl - 2.0 * self.kl_target))
"""DDPG loss definition"""
# ctrl_taylor_ph is of shape (#samples, act_dim), means is of shape (#samples, act_dim)
# loss -= tf.reduce_mean(tf.reduce_sum(tf.multiply(self.ctrl_taylor_ph, self.means), axis=1))
self.ddpg_loss = -tf.reduce_mean(tf.diag_part(tf.matmul(self.ctrl_taylor_ph, self.means, transpose_b=True)))
self.loss = self.ppo_loss + self.ddpg_loss
def _train(self):
self.optimizer = tf.train.AdamOptimizer(self.lr_ph)
self.train = self.optimizer.minimize(self.loss)
def _init_session(self):
self.sess = tf.Session(graph=self.g, config=tf.ConfigProto(gpu_options=gpu_options))
self.sess.run(self.init)
self.target_sess = tf.Session(graph=self.target_g, config=tf.ConfigProto(gpu_options=gpu_options))
# initialize target policy to the real policy right here
with self.g.as_default():
policy_tvs = tf.trainable_variables()
with self.target_g.as_default():
target_tvs = tf.trainable_variables()
self.target_sess.run([tar_t.assign(policy_tvs[i].eval(session=self.sess)) for i, tar_t in enumerate(target_tvs)])
def get_sample(self, obs):
"""
Sample an action from the stochastic policy.
:param obs:
:return:
"""
feed_dict = {self.obs_ph: obs}
return self.sess.run(self.sample, feed_dict=feed_dict)
def mean(self, obs):
"""
Expected action from the determinstic target policy.
:param obs:
:return:
"""
feed_dict = {self.target_obs_ph: obs}
return self.target_sess.run(self.target_means, feed_dict=feed_dict)
def update(self, observes, actions, learning_signal, ctrl_taylor):
feed_dict = {self.obs_ph: observes,
self.act_ph: actions,
self.learning_signal_ph: learning_signal,
self.ctrl_taylor_ph: ctrl_taylor,
self.beta_ph: self.beta,
self.eta_ph: self.eta,
self.lr_ph: self.lr * self.lr_multiplier}
# Necessity of conservative policy update
means_old, logvars_old = self.sess.run([self.means, self.log_vars], feed_dict)
feed_dict[self.logvars_old_ph] = logvars_old
feed_dict[self.means_old_ph] = means_old
ppo_loss, ddpg_loss, kl, entropy = 0, 0, 0, 0
for e in range(self.epochs):
# TODO: need to improve data pipeline - re-feeding data every epoch
self.sess.run(self.train, feed_dict)
ppo_loss, ddpg_loss, kl, entropy = self.sess.run([self.ppo_loss, self.ddpg_loss, self.kl, self.entropy], feed_dict)
if kl > self.kl_target * 4: # early stopping if D_KL diverges badly
break
# TODO: too many "magic numbers" in next 8 lines of code, need to clean up
if kl > self.kl_target * 2: # servo beta to reach D_KL target
self.beta = np.minimum(35, 1.5 * self.beta) # max clip beta
if self.beta > 30 and self.lr_multiplier > 0.1:
self.lr_multiplier /= 1.5
elif kl < self.kl_target / 2:
self.beta = np.maximum(1 / 35, self.beta / 1.5) # min clip beta
if self.beta < (1 / 30) and self.lr_multiplier < 10:
self.lr_multiplier *= 1.5
# print(self.beta)
# update target policy
with self.g.as_default():
policy_tvs = tf.trainable_variables()
with self.target_g.as_default():
target_tvs = tf.trainable_variables()
self.target_sess.run([tar_t.assign(self.tao * policy_tvs[i].eval(session=self.sess) +
(1-self.tao) * tar_t.eval(session=self.target_sess)) for i, tar_t in enumerate(target_tvs)])
return ppo_loss, ddpg_loss, kl, entropy, self.beta
def save(self, saveto):
if not os.path.exists(saveto + 'policy'):
os.makedirs(saveto + 'policy')
self.saver.save(self.sess, saveto + 'policy/policy.pl')
self.target_saver.save(self.target_sess, saveto + 'policy/target_policy.pl')
def load(self, load_from):
self.saver.restore(self.sess, load_from+'policy.pl')
self.target_saver.restore(self.target_sess, load_from+'target_policy.pl')
def close_sess(self):
self.sess.close()
self.target_sess.close() | [
"zichao.yan@mail.mcgill.ca"
] | zichao.yan@mail.mcgill.ca |
6fa5e352e75bf210bee00874a2a19a98fab5b1d5 | f02485de5a101f3b69a45b2c4e71bd950ee55eba | /Z_other/ImgMatTiffUtil/ImgMatTiffUtil.py | 647136fee778f51e87704f7bf8050ecf06269481 | [] | no_license | newjokker/PyUtil | ef4266b0ca32157f9de6e2cac1b1a10647190d99 | 32e64be10a6cd2856850f6720d70b4c6e7033f4e | refs/heads/master | 2020-11-28T00:19:02.073391 | 2019-12-23T02:07:40 | 2019-12-23T02:07:40 | 229,654,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,072 | py | # -*- coding: utf-8 -*-
# -*- author: jokker -*-
import gdal
import os
import numpy as np
from PIL import Image, ImageEnhance
import matplotlib.pylab as plt
class ImgMatTiff(object):
"""tiff,mat,image 之间的转化 """
# ------------------------- 四个基础变换 --------------------------
@staticmethod
def tiff_to_mat(path):
"""tiff 转为矩阵"""
# 参数类型检查
if isinstance(path, str):
dataset = gdal.Open(path)
elif isinstance(path, gdal.Dataset):
dataset = path
else:
raise TypeError('path can only be string,unicode or dataset')
if dataset:
im_width = dataset.RasterXSize # 栅格矩阵的列数
im_height = dataset.RasterYSize # 栅格矩阵的行数
im_bands = dataset.RasterCount # 波段数
im_data = dataset.ReadAsArray(0, 0, im_width, im_height) # 获取数据
return im_data, im_bands
@staticmethod
def img_to_mat(img_path, assign_3_band=True):
"""图像转为矩阵"""
if isinstance(img_path, str):
img = Image.open(img_path)
elif isinstance(img_path, Image.Image):
img = img_path
else:
return
# 读取图像矩阵
image_mat = np.asarray(img, dtype='uint8')
# 图像转换为可写
image_mat.flags.writeable = True
if len(image_mat.shape) == 2:
# 二维矩阵
bands = 1
return_mat = np.array([image_mat, image_mat, image_mat], dtype=np.uint8)
elif len(image_mat.shape) == 3:
# 三维矩阵
bands = image_mat.shape[2]
# 强制转为三维矩阵
if assign_3_band:
return_mat = image_mat[:, :, :3]
else:
return_mat = image_mat
else:
return
return return_mat, bands
@staticmethod
def mat_to_img(mat, assign_size=None, save_path=None):
"""矩阵转为图像"""
# 规范类型
mat_format = mat.astype(np.uint8)
# 转为图片
img = Image.fromarray(mat_format)
# 改变大小
if assign_size and mat.shape != assign_size:
img = img.resize(assign_size)
if save_path:
# 当存在阿尔法图层
if save_path[-4:].lower() == '.png' and mat.shape[2] == 4:
print('输出图片存在 α 图层,用的是同一个 save 接口')
img.save(save_path)
else:
img.save(save_path)
else:
return img
@staticmethod
def mat_to_tiff(im_data, im_width, im_height, im_bands, im_geotrans, im_proj, out_path=None,
no_data_value=None, return_mode='TIFF'):
"""
写dataset(需要一个更好的名字)
:param im_data: 输入的矩阵
:param im_width: 宽
:param im_height: 高
:param im_bands: 波段数
:param im_geotrans: 仿射矩阵
:param im_proj: 坐标系
:param out_path: 输出路径,str,None
:param no_data_value: 无效值 ;num_list ,num
:param return_mode: TIFF : 保存为本地文件, MEMORY:保存为缓存
:return: 当保存为缓存的时候,输出为 dataset
"""
# 保存类型选择
if 'int8' in im_data.dtype.name:
datatype = gdal.GDT_Byte
elif 'int16' in im_data.dtype.name:
datatype = gdal.GDT_UInt16
elif 'bool' in im_data.dtype.name:
datatype = gdal.GDT_Byte
else:
datatype = gdal.GDT_Float32
# 矩阵波段识别
if len(im_data.shape) == 3:
im_bands, im_height, im_width = im_data.shape
elif len(im_data.shape) == 2:
# 统一处理为三维矩阵
im_data = np.array([im_data])
else:
im_bands, (im_height, im_width) = 1, im_data.shape
# 根据存储类型的不同,获取不同的驱动
if out_path:
dataset = gdal.GetDriverByName('GTiff').Create(out_path, im_width, im_height, im_bands, datatype)
else:
dataset = gdal.GetDriverByName('MEM').Create('', im_width, im_height, im_bands, datatype)
# 写入数据
if dataset is not None:
dataset.SetGeoTransform(im_geotrans)
dataset.SetProjection(im_proj)
# 写入矩阵
for i in range(im_bands):
dataset.GetRasterBand(i + 1).WriteArray(im_data[i])
# 写入无效值
if no_data_value is not None:
# 当每个图层有一个无效值的时候
if isinstance(no_data_value, list) or isinstance(no_data_value, tuple):
if no_data_value[i] is not None:
dataset.GetRasterBand(i + 1).SetNoDataValue(no_data_value[i])
else:
dataset.GetRasterBand(i + 1).SetNoDataValue(no_data_value)
# 根据返回类型的不同,返回不同的值
if return_mode.upper() == 'MEMORY':
return dataset
elif return_mode.upper == 'TIFF':
del dataset
# ------------------------- 基于基础变换的变换 --------------------------
@staticmethod
def tiff_to_img(tif_path, img_path=None, assign_size=None):
"""只是支持将第一波段输出为黑白图像"""
# 1. tiff to mat
mat, band = ImgMatTiff.tiff_to_mat(tif_path)
# 2. 转换矩阵的类型
if len(mat.shape) == 2:
mat = np.rollaxis(np.tile(mat, (3, 1, 1)), 0, 3) # 单波段,转为多波段
elif len(mat.shape) == 3:
mat = np.rollaxis(mat[[0, 1, 2], :, :], 0, 3) * 10
# 3. mat to image
ImgMatTiff.mat_to_img(mat, save_path=img_path, assign_size=assign_size)
@staticmethod
def img_to_tiff(img_path, im_width, im_height, im_bands, im_geotrans, im_proj, out_path=None, no_data_value=None,
return_mode='TIFF'):
# img to mat
mat, band = ImgMatTiff.img_to_mat(img_path)
# mat to tiff
ImgMatTiff.mat_to_tiff(mat, im_width, im_height, im_bands, im_geotrans, im_proj, out_path=out_path,
no_data_value=no_data_value, return_mode=return_mode)
# ------------------------- 基于变换的操作 ------------------------------
@staticmethod
def tiff_to_img_rgb(tif_path, assign_band, img_path=None, assign_size=None, do_stretch=False):
"""指定 tif 的三个通道 作为 rgb 三个图层"""
# 1. tiff to mat
mat, band = ImgMatTiff.tiff_to_mat(tif_path)
# 2. 数据转为 rgb 需要的那种格式
if band == 1:
mat_rgb = np.rollaxis(np.tile(mat, (3, 1, 1)), 0, 3) # 单波段,转为多波段
else:
mat_rgb = np.rollaxis(mat[[assign_band[0], assign_band[1], assign_band[2]], :, :], 0, 3) # 2. 多波段数据,转换矩阵的类型
# 3. 数据进行拉伸
if do_stretch:
mat_rgb = ImgMatTiff.do_stretch_rgb(mat_rgb)
# 4. mat to image
ImgMatTiff.mat_to_img(mat_rgb, save_path=img_path, assign_size=assign_size)
@staticmethod
def mat_to_img_rgb(mat_r, mat_g, mat_b, assign_size=None, save_path=None):
"""矩阵转为图像,前三个图层指定 rgb"""
mat = np.zeros((mat_r.shape[0], mat_r.shape[1], 3), dtype=np.uint8)
# 插入 r g b 图层
mat[:, :, 0] = mat_r
mat[:, :, 1] = mat_g
mat[:, :, 2] = mat_b
# 规范类型
mat_format = mat.astype(np.uint8)
# 转为图片
img = Image.fromarray(mat_format)
# 改变大小
if assign_size and mat.shape != assign_size:
img = img.resize(assign_size)
if save_path:
# 当存在阿尔法图层
if save_path[-4:].lower() == '.png' and mat.shape[2] == 4:
img.save(save_path)
else:
img.save(save_path)
else:
return img
# ------------------------- 辅助函数 ------------------------------
@staticmethod
def do_stretch_rgb(mat):
"""拉伸每一个波段"""
for i in range(3):
mat[:, :, i] = mat[:, :, i] * 255.0 / (np.max(mat[:, :, i]) - np.min(mat[:, :, i]))
return mat
if __name__ == "__main__":
# tif_path = r'D:\Data\002. 栅格数据\fy4A\20170725\FY4A-_AGRI--_N_DISK_1047E_L1-_FDI-_MULT_NOM_20170725030000_20170725031459_4000M_V0001.tif'
tif_path = r'D:\Code\FireDetectionH8\algorithm\AuxData\Landuse\land_use.tif'
img_path = r'C:\Users\Administrator\Desktop\13254.jpg'
ImgMatTiff.tiff_to_img_rgb(tif_path, [1, 2, 3], img_path=img_path, do_stretch=True)
ImgMatTiff.tiff_to_img_rgb(tif_path, [1, 2, 3], img_path=img_path)
exit()
# FIXME tif 或者 mat 转为 jpg 的时候可以指定一个映射表,每一个值对应一个颜色,可以是 xml 格式的映射表和 arcgis 中类似
savePath = r'C:\Users\Administrator\Desktop\wb\123.png'
r = r'C:\Users\Administrator\Desktop\wb\HS_H08_20181029_0400_B01_FLDK_R20.tif'
g = r'C:\Users\Administrator\Desktop\wb\HS_H08_20181029_0400_B02_FLDK_R20.tif'
b = r'C:\Users\Administrator\Desktop\wb\HS_H08_20181029_0400_B03_FLDK_R20.tif'
im_data_r, im_bands1 = ImgMatTiff.tiff_to_mat(r)
im_data_g, im_bands2 = ImgMatTiff.tiff_to_mat(g)
im_data_b, im_bands3 = ImgMatTiff.tiff_to_mat(b)
ImgMatTiff.mat_to_img_rgb(im_data_r, im_data_g, im_data_b, save_path=savePath)
# fy4 = r'D:\Data\002. 栅格数据\fy4A\20170721\FY4A-_AGRI--_N_DISK_1047E_L1-_FDI-_MULT_NOM_20170721004500_20170721005959_4000M_V0001.tif'
#
# ImgMatTiff.tiff_to_img_rgb(fy4, [6,7,12], img_path=save_path, assign_size=None)
# ImgMatTiff.tiff_to_img(fy4, img_path=save_path, assign_size=None)
"""
1. img.save(img), img 有四个波段,第四个就认为是透明图层
"""
| [
"18761609908@163.com"
] | 18761609908@163.com |
8fd2b5abe11973bb39a5ea5414185d6020233a5c | 8f73125d816f3b44b03159dba272e095f37c1f0c | /scripts/GC.py | e44c355231dd8f230cc820456915ffc1d42efce0 | [] | no_license | tarah28/nanopore | 356b218e5ca3dfb98e4dd7232d8f1c6303f899d1 | ec716ee15ab26d7bf33b7f7352ab8cad1c369ae8 | refs/heads/master | 2021-05-27T06:21:51.958938 | 2014-09-10T11:36:07 | 2014-09-10T11:36:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | #!/usr/bin/python
import sys
from Bio import SeqIO
from Bio.SeqUtils import GC
for rec in SeqIO.parse(open(sys.argv[1]), "fasta"):
print GC(rec.seq)
| [
"n.j.loman@bham.ac.uk"
] | n.j.loman@bham.ac.uk |
6fe58ce1fb865313489a03dc9cbef4f19f953283 | 2b502aae9bc33bac6c4b28d1e702591f2cbed690 | /terrascript/resource/dns.py | 8fd238ad486f0695dd33eb92a6bb8da3554771cd | [
"Python-2.0",
"BSD-2-Clause"
] | permissive | LeeroyC710/python-terrascript | 4c8fbe032e9b7dd8844d962f888c28f87a26ff77 | b8f3c3549b149c124e3e48e0cea0396332ad1a1d | refs/heads/master | 2020-12-28T03:58:04.502969 | 2020-01-19T21:46:52 | 2020-01-19T21:46:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 720 | py | # terrascript/resource/dns.py
import terrascript
class dns_a_record_set(terrascript.Resource):
pass
class dns_aaaa_record_set(terrascript.Resource):
pass
class dns_cname_record(terrascript.Resource):
pass
class dns_mx_record_set(terrascript.Resource):
pass
class dns_ns_record_set(terrascript.Resource):
pass
class dns_ptr_record(terrascript.Resource):
pass
class dns_srv_record_set(terrascript.Resource):
pass
class dns_txt_record_set(terrascript.Resource):
pass
__all__ = [
'dns_a_record_set',
'dns_aaaa_record_set',
'dns_cname_record',
'dns_mx_record_set',
'dns_ns_record_set',
'dns_ptr_record',
'dns_srv_record_set',
'dns_txt_record_set',
] | [
"markus@juenemann.net"
] | markus@juenemann.net |
ac0f4899d4ebbe4d45351d1bb0bc011e329c2d38 | 187a6558f3c7cb6234164677a2bda2e73c26eaaf | /jdcloud_sdk/services/redis/models/RedisKey.py | 4b67d2add41fd0fc6356753b584e5c9f4bdea130 | [
"Apache-2.0"
] | permissive | jdcloud-api/jdcloud-sdk-python | 4d2db584acc2620b7a866af82d21658cdd7cc227 | 3d1c50ed9117304d3b77a21babe899f939ae91cd | refs/heads/master | 2023-09-04T02:51:08.335168 | 2023-08-30T12:00:25 | 2023-08-30T12:00:25 | 126,276,169 | 18 | 36 | Apache-2.0 | 2023-09-07T06:54:49 | 2018-03-22T03:47:02 | Python | UTF-8 | Python | false | false | 1,319 | py | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class RedisKey(object):
def __init__(self, name, db, size, keyType, frequency=None):
"""
:param name: key名称
:param db: key所在的db号
:param size: string类型的key表示字节数,list类型的key表示列表长度,set或zset类型的key表示集合或有序集合的大小、hash类型的key表示字典的大小等等
:param keyType: string、list、set、zset、hash五种类型
:param frequency: (Optional) key访问的频度
"""
self.name = name
self.db = db
self.size = size
self.keyType = keyType
self.frequency = frequency
| [
"tancong@jd.com"
] | tancong@jd.com |
21877d77b4b1bf26d4c63363609811615fcedfb2 | 4c852fab792606580acb3f3a61b7f86ae25930b0 | /Python/UoM/3-UsingPythonToAccessWebData/Assignments/wk06/json2.py | d85bd8bffe3c3a47e3bf9ddff4a4707c8d4955fa | [] | no_license | hmchen47/Programming | a9767a78a35c0844a1366391f48b205ff1588591 | 9637e586eee5c3c751c96bfc5bc1d098ea5b331c | refs/heads/master | 2022-05-01T01:57:46.573136 | 2021-08-09T04:29:40 | 2021-08-09T04:29:40 | 118,053,509 | 2 | 1 | null | 2021-09-20T19:54:02 | 2018-01-19T00:06:04 | Python | UTF-8 | Python | false | false | 358 | py | import json
input = '''
[
{ "id" : "001",
"x" : "2",
"name" : "Chuck"
} ,
{ "id" : "009",
"x" : "7",
"name" : "Chuck"
}
]'''
info = json.loads(input)
# print json.dumps(info, indent = 4)
print 'User count:', len(info)
for item in info:
print 'Name', item['name']
print 'Id', item['id']
print 'Attribute', item['x']
| [
"h.m.chen@ieee.org"
] | h.m.chen@ieee.org |
76d227e325f5ae99474dd87d0bb5ad3011dba504 | c0e9fe97583b8d431064e9bc382f8d4e4b7c2ad4 | /utils/import_gender.py | 5134a5acc0014f9de1fe89a49066b06d33c3aa64 | [] | no_license | satoriforos/data-api-website | 4f21c58702b9586ebc9aea1e6d1db9beb35da190 | 3fad87754568201c1a2dc345227837a1096d2513 | refs/heads/master | 2020-08-06T04:08:53.695134 | 2019-10-04T13:59:34 | 2019-10-04T13:59:34 | 212,810,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,043 | py | #!/usr/bin/env python3
import pandas as pd
from pathlib import Path
from settings.settings import settings
from modules.databasemanager.DatabaseManager import DatabaseManager
from modules.geolocation.UsCounty import UsCounty
from modules.geolocation.City import City
from modules.geolocation.UsState import UsState
from modules.geolocation.Country import Country
from modules.usdemographics.GenderCounty import GenderCounty
def get_database_connection(mysql_settings):
database_manager = DatabaseManager(
host=mysql_settings["server"],
port=mysql_settings["port"],
user=mysql_settings["username"],
password=mysql_settings["password"],
db=mysql_settings["schema"],
charset=mysql_settings["charset"]
)
return database_manager
database_manager = get_database_connection(settings["mysql"])
us_counties = database_manager.fetch_all(UsCounty(database_manager))
cities = database_manager.fetch_all(City(database_manager))
us_states = database_manager.fetch_all(UsState(database_manager))
countries = database_manager.fetch_all(Country(database_manager))
country_id = None
for country in countries:
if country.code == "US":
country_id = country.id
break
file_paths = [
Path("~/Downloads/County Demographic Datasets/SEX01.xls"),
]
header_translations = {
"Area_name": "city_state",
"STCOU": "county_code",
"SEX150200D": "males_2000",
"SEX150201D": "males_2001",
"SEX150202D": "males_2002",
"SEX150203D": "males_2003",
"SEX150204D": "males_2004",
"SEX150205D": "males_2005",
"SEX150206D": "males_2006",
"SEX150207D": "males_2007",
"SEX150208D": "males_2008",
"SEX150209D": "males_2009",
"SEX100210D": "males_2010",
"SEX250200D": "females_2000",
"SEX250201D": "females_2001",
"SEX250202D": "females_2002",
"SEX250203D": "females_2003",
"SEX250204D": "females_2004",
"SEX250205D": "females_2005",
"SEX250206D": "females_2006",
"SEX250207D": "females_2007",
"SEX250208D": "females_2008",
"SEX250209D": "females_2009",
"SEX200210D": "females_2010",
}
headers = list(header_translations.keys())
excel_files = [
pd.ExcelFile(file_path.expanduser().as_posix())
for file_path in file_paths
]
sheets = []
for excel_file in excel_files:
sheet_names = excel_file.sheet_names
for sheet_name in sheet_names:
sheets.append(pd.read_excel(excel_file, sheet_name))
gender_data = []
for i in range(0, sheets[0].shape[0]):
gender_row = GenderCounty(database_manager)
gender_row.country_id = country_id
for sheet in sheets:
for input_header, output_header in header_translations.items():
if input_header != "Area_name" and input_header != "STCOU":
if input_header in sheet.keys():
setattr(gender_row, output_header, int(sheet[input_header][i]))
if gender_row.county_code is None:
gender_row.country_id = country_id
city_state = sheet["Area_name"][i].split(", ")
gender_row.county_name = city_state[0]
gender_row.state_id = None
gender_row.state_code = None
if len(city_state) > 1:
for state in us_states:
if state.code == city_state[1].upper():
gender_row.state_id = state.id
gender_row.state_code = state.code
break
else:
for state in us_states:
if state.code.upper() == sheet["Area_name"][i]:
gender_row.state_id = state.id
gender_row.county_name = None
break
gender_row.county_code = int(sheet["STCOU"][i])
gender_data.append(gender_row)
#for gender_row in gender_data:
# gender_row.database_manager = database_manager
#for gender_row in gender_data:
# database_manager.insert(gender_row)
database_manager.insert_many(gender_data)
| [
"{ID}+{username}@users.noreply.github.com"
] | {ID}+{username}@users.noreply.github.com |
8ecfcc9c5a994ea6745c9f9f0ca6bc160156ace3 | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/C/cptasker/ta.py | 3c5a52688d846067ba383e2459f47873a5980b98 | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,508 | py | #This one scrapes the top 200 list
import scraperwiki
import lxml.html
import decimal
import urlparse
import string
import urllib2
import time
ID = 1
##############################################################################
# scrapeScore takes 2 urls url1/url2 and stores the name and average score
def scrapeScore(url, url2):
Rurl, Rurl2 = loadUrl(url, url2)
#html = scraperwiki.scrape(url)
#html2 = scraperwiki.scrape(url2)
root = lxml.html.fromstring(Rurl)
root2 = lxml.html.fromstring(Rurl2)
titleTag = root.cssselect('title') #finds name of hotel on page
for name in titleTag:
hotelName = name.text
#print hotelName
global ID
#print ID
values = root.cssselect('img') #take img tags
total = 0
average = 0
L = []
for img in values:
if img.get('content') != None: #ignore all images with no 'content' attribute
#print img.get('content') #print the contents of 'content' (a number)
score = float(img.get('content'))
L.append(score)
total = total + score
del L[11:] #take first 11 numbers
del L[:1] #1st number doesnt count
average = sum(L)/10
#print L
#print average
values2 = root2.cssselect('img') #take img tags
total2 = 0
average2 = 0
L2 = []
for img in values2:
if img.get('content') != None: #ignore all images with no 'content' attribute
#print img.get('content') #print the contents of 'content' (a number)
score2 = float(img.get('content'))
L2.append(score)
total2 = total2 + score2
del L2[11:] #take first 11 numbers
del L2[:1] #1st number doesnt count
average2 = sum(L2)/10
finalAverage = (average + average2)/2
#print L2
#print average2
#print finalAverage
scraperwiki.sqlite.save(unique_keys=["HotelName"], data={'HotelName':hotelName, 'Score':finalAverage, 'OverallPosition':ID}, table_name='scores')
ID +=1
###############################################################################
# trying to handle random timeout error (may need to look at closing urllib)
def loadUrl(url, url2):
retries = 3
for i in range(retries):
try:
handle = scraperwiki.scrape(url)
handle2 = scraperwiki.scrape(url2)
return(handle, handle2)
except urllib2.URLError:
if i + 1 == retries:
raise
else:
time.sleep(30)
# never get here
##################################################################
#findUrl2 takes a url and finds the url of the next page (next 10 scores)
def findUrl2(htmlIN):
html = scraperwiki.scrape(htmlIN)
root = lxml.html.fromstring(html)
#print html
link = root.cssselect('a.guiArw')
if link:
for l in link:
url2 = 'http://www.tripadvisor.co.uk/' + l.attrib.get('href')
return url2
else:
return 'http://www.google.com' #we have to return something!
###################################################################
def urlIn(root):
urlRoot = root.cssselect('div.quality a')
for link in urlRoot:
#print link.attrib['href']
url = 'http://www.tripadvisor.co.uk' + link.attrib['href']
url2 = findUrl2(url)
scrapeScore(url, url2)
#loadUrl(url, url2)
##################################################################
#calls scrape (urlIn) function then finds the next link
def scrapeFindNext(nextUrl):
html = scraperwiki.scrape(nextUrl)
root = lxml.html.fromstring(html)
urlIn(root) # calls urlIn function to scrape page of links
nextLink = root.cssselect('a.guiArw.sprite-pageNext')
if nextLink:
nextUrl = urlparse.urljoin(baseUrl, nextLink[0].attrib.get('href'))
scrapeFindNext(nextUrl)
########################################################################################
#need a function which takes the list of hotels and calls scrapefindnext for each link
def scrapeFindHotels(url):
html = scraperwiki.scrape(url)
root = lxml.html.fromstring(html)
links = root.cssselect('span.dt2 a')
for link in links:
url = 'http://www.tripadvisor.co.uk' + link.attrib['href']
scrapeFindNext(url)
#########################################################################################
#
#a function to take the a-z list and grabs list of hotels for each city
#
#def scrapeCityList(pageUrl):
# html = scraperwiki.scrape(pageUrl)
# root = lxml.html.fromstring(html)
# links = root.cssselect('td.dt1 a')
#
# for link in links[1:]:
#
# url = 'http://www.tripadvisor.co.uk/pages/' + link.attrib['href']
# print url
# scrapeFindHotels(url)
#
########################################################################################
baseUrl = 'http://www.tripadvisor.co.uk/'
scrapeFindHotels('http://www.tripadvisor.co.uk/pages/citiesAM.html')
scrapeFindHotels('http://www.tripadvisor.co.uk/pages/citiesNZ.html')
#This one scrapes the top 200 list
import scraperwiki
import lxml.html
import decimal
import urlparse
import string
import urllib2
import time
ID = 1
##############################################################################
# scrapeScore takes 2 urls url1/url2 and stores the name and average score
def scrapeScore(url, url2):
Rurl, Rurl2 = loadUrl(url, url2)
#html = scraperwiki.scrape(url)
#html2 = scraperwiki.scrape(url2)
root = lxml.html.fromstring(Rurl)
root2 = lxml.html.fromstring(Rurl2)
titleTag = root.cssselect('title') #finds name of hotel on page
for name in titleTag:
hotelName = name.text
#print hotelName
global ID
#print ID
values = root.cssselect('img') #take img tags
total = 0
average = 0
L = []
for img in values:
if img.get('content') != None: #ignore all images with no 'content' attribute
#print img.get('content') #print the contents of 'content' (a number)
score = float(img.get('content'))
L.append(score)
total = total + score
del L[11:] #take first 11 numbers
del L[:1] #1st number doesnt count
average = sum(L)/10
#print L
#print average
values2 = root2.cssselect('img') #take img tags
total2 = 0
average2 = 0
L2 = []
for img in values2:
if img.get('content') != None: #ignore all images with no 'content' attribute
#print img.get('content') #print the contents of 'content' (a number)
score2 = float(img.get('content'))
L2.append(score)
total2 = total2 + score2
del L2[11:] #take first 11 numbers
del L2[:1] #1st number doesnt count
average2 = sum(L2)/10
finalAverage = (average + average2)/2
#print L2
#print average2
#print finalAverage
scraperwiki.sqlite.save(unique_keys=["HotelName"], data={'HotelName':hotelName, 'Score':finalAverage, 'OverallPosition':ID}, table_name='scores')
ID +=1
###############################################################################
# trying to handle random timeout error (may need to look at closing urllib)
def loadUrl(url, url2):
retries = 3
for i in range(retries):
try:
handle = scraperwiki.scrape(url)
handle2 = scraperwiki.scrape(url2)
return(handle, handle2)
except urllib2.URLError:
if i + 1 == retries:
raise
else:
time.sleep(30)
# never get here
##################################################################
#findUrl2 takes a url and finds the url of the next page (next 10 scores)
def findUrl2(htmlIN):
html = scraperwiki.scrape(htmlIN)
root = lxml.html.fromstring(html)
#print html
link = root.cssselect('a.guiArw')
if link:
for l in link:
url2 = 'http://www.tripadvisor.co.uk/' + l.attrib.get('href')
return url2
else:
return 'http://www.google.com' #we have to return something!
###################################################################
def urlIn(root):
urlRoot = root.cssselect('div.quality a')
for link in urlRoot:
#print link.attrib['href']
url = 'http://www.tripadvisor.co.uk' + link.attrib['href']
url2 = findUrl2(url)
scrapeScore(url, url2)
#loadUrl(url, url2)
##################################################################
#calls scrape (urlIn) function then finds the next link
def scrapeFindNext(nextUrl):
html = scraperwiki.scrape(nextUrl)
root = lxml.html.fromstring(html)
urlIn(root) # calls urlIn function to scrape page of links
nextLink = root.cssselect('a.guiArw.sprite-pageNext')
if nextLink:
nextUrl = urlparse.urljoin(baseUrl, nextLink[0].attrib.get('href'))
scrapeFindNext(nextUrl)
########################################################################################
#need a function which takes the list of hotels and calls scrapefindnext for each link
def scrapeFindHotels(url):
html = scraperwiki.scrape(url)
root = lxml.html.fromstring(html)
links = root.cssselect('span.dt2 a')
for link in links:
url = 'http://www.tripadvisor.co.uk' + link.attrib['href']
scrapeFindNext(url)
#########################################################################################
#
#a function to take the a-z list and grabs list of hotels for each city
#
#def scrapeCityList(pageUrl):
# html = scraperwiki.scrape(pageUrl)
# root = lxml.html.fromstring(html)
# links = root.cssselect('td.dt1 a')
#
# for link in links[1:]:
#
# url = 'http://www.tripadvisor.co.uk/pages/' + link.attrib['href']
# print url
# scrapeFindHotels(url)
#
########################################################################################
baseUrl = 'http://www.tripadvisor.co.uk/'
scrapeFindHotels('http://www.tripadvisor.co.uk/pages/citiesAM.html')
scrapeFindHotels('http://www.tripadvisor.co.uk/pages/citiesNZ.html')
| [
"pallih@kaninka.net"
] | pallih@kaninka.net |
a619a2370f98ade6097f5eee217494a59fb8c848 | 026991d5749c55910f4c33cc6f35a778494ef89e | /Laboratory Works/Lab_7/Problems/Informatics/Loops/For/335.py | 86aa7d0128aee684c9b5ea93d22e666baa62e8a3 | [
"MIT"
] | permissive | diable201/WEB-development | e832df0e35a837fc5464d6b0dada1c8fd8c9783b | 370bd731b9a65a1658033a60c63abece11d4e259 | refs/heads/master | 2023-06-02T03:46:20.786310 | 2021-06-28T15:57:11 | 2021-06-28T15:57:11 | 336,358,294 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | from math import sqrt
a = int(input())
b = int(input())
result = []
for i in range(a, b + 1):
j = int(sqrt(i))
if j * j == i:
result.append(i)
print(' '.join(str(i) for i in result))
| [
"diable201@protonmail.com"
] | diable201@protonmail.com |
f3368b9c95ad10ebfa7c6e9990af3b273864b5ed | 0e25538b2f24f1bc002b19a61391017c17667d3d | /cmdt/win_cmdtdirectory.py | e380d3bf0c25daf67520c700f8e97391f780840d | [] | no_license | trondhindenes/Ansible-Auto-Generated-Modules | 725fae6ba9b0eef00c9fdc21179e2500dfd6725f | efa6ac8cd2b545116f24c1929936eb8cc5c8d337 | refs/heads/master | 2020-04-06T09:21:00.756651 | 2016-10-07T07:08:29 | 2016-10-07T07:08:29 | 36,883,816 | 12 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,378 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# <COPYRIGHT>
# <CODEGENMETA>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
DOCUMENTATION = '''
---
module: win_cmdtdirectory
version_added:
short_description: Generated from DSC module cmdt version 1.0.0.4 at 07.10.2016 00.47.03
description:
- A DSC Module for installing Microsoft Deployment Toolkit
options:
Ensure:
description:
-
required: True
default:
aliases: []
choices:
- Absent
- Present
Name:
description:
-
required: True
default:
aliases: []
Path:
description:
-
required: True
default:
aliases: []
PSDriveName:
description:
-
required: False
default:
aliases: []
PSDrivePath:
description:
-
required: False
default:
aliases: []
PsDscRunAsCredential_username:
description:
-
required: False
default:
aliases: []
PsDscRunAsCredential_password:
description:
-
required: False
default:
aliases: []
AutoInstallModule:
description:
- If true, the required dsc resource/module will be auto-installed using the Powershell package manager
required: False
default: false
aliases: []
choices:
- true
- false
AutoConfigureLcm:
description:
- If true, LCM will be auto-configured for directly invoking DSC resources (which is a one-time requirement for Ansible DSC modules)
required: False
default: false
aliases: []
choices:
- true
- false
| [
"trond@hindenes.com"
] | trond@hindenes.com |
bc2906b349072602366dcb6b45306532d65f9503 | ddd993057174b52a9c4ecffddda655504ccc2366 | /src/main/python/systemds/operator/algorithm/builtin/img_posterize.py | c6f2b41dcd3bc80e6a8d7069d6d0509a782f2bbc | [
"Apache-2.0"
] | permissive | atefeh-asayesh/systemds | 68840e3e8005d5bff3e76aeed811c7ab1cb89e8f | 96733360c8f600355d5600f2edb8960ba1d47861 | refs/heads/master | 2023-08-04T18:23:56.076995 | 2021-09-27T08:41:40 | 2021-09-27T08:41:40 | 368,129,199 | 0 | 0 | Apache-2.0 | 2021-06-08T20:22:08 | 2021-05-17T09:29:42 | Java | UTF-8 | Python | false | false | 1,694 | py | # -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
# Autogenerated By : src/main/python/generator/generator.py
# Autogenerated From : scripts/builtin/img_posterize.dml
from typing import Dict, Iterable
from systemds.operator import OperationNode, Matrix, Frame, List, MultiReturn, Scalar
from systemds.script_building.dag import OutputType
from systemds.utils.consts import VALID_INPUT_TYPES
def img_posterize(img_in: Matrix,
bits: int):
"""
:param img_in: Input image
:param bits: The number of bits keep for the values.
:param 1: and white, 8 means every integer between 0 and 255.
:return: 'OperationNode' containing
"""
params_dict = {'img_in': img_in, 'bits': bits}
return Matrix(img_in.sds_context,
'img_posterize',
named_input_nodes=params_dict)
| [
"baunsgaard@tugraz.at"
] | baunsgaard@tugraz.at |
2e92535c0ca6f3823476fc3db4570fe323ad764c | c989cc503ffd0ad57653f89ad49f882ae3a12839 | /env/bin/pildriver.py | 2e6a9017a9f66aa8ddbbf2bf2df1be9333364931 | [] | no_license | armandohuarcaya/SoftwareReservas | e963ae2e7baec1cfc6b6fa4f128b9884ae48f59b | 6e913d1ec2ac1cb692d51829eef9988356dbc016 | refs/heads/master | 2020-12-03T01:48:56.024828 | 2017-06-30T09:01:45 | 2017-06-30T09:01:45 | 95,869,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,525 | py | #!/home/abaro/pro/env/bin/python3
"""PILdriver, an image-processing calculator using PIL.
An instance of class PILDriver is essentially a software stack machine
(Polish-notation interpreter) for sequencing PIL image
transformations. The state of the instance is the interpreter stack.
The only method one will normally invoke after initialization is the
`execute' method. This takes an argument list of tokens, pushes them
onto the instance's stack, and then tries to clear the stack by
successive evaluation of PILdriver operators. Any part of the stack
not cleaned off persists and is part of the evaluation context for
the next call of the execute method.
PILDriver doesn't catch any exceptions, on the theory that these
are actually diagnostic information that should be interpreted by
the calling code.
When called as a script, the command-line arguments are passed to
a PILDriver instance. If there are no command-line arguments, the
module runs an interactive interpreter, each line of which is split into
space-separated tokens and passed to the execute method.
In the method descriptions below, a first line beginning with the string
`usage:' means this method can be invoked with the token that follows
it. Following <>-enclosed arguments describe how the method interprets
the entries on the stack. Each argument specification begins with a
type specification: either `int', `float', `string', or `image'.
All operations consume their arguments off the stack (use `dup' to
keep copies around). Use `verbose 1' to see the stack state displayed
before each operation.
Usage examples:
`show crop 0 0 200 300 open test.png' loads test.png, crops out a portion
of its upper-left-hand corner and displays the cropped portion.
`save rotated.png rotate 30 open test.tiff' loads test.tiff, rotates it
30 degrees, and saves the result as rotated.png (in PNG format).
"""
# by Eric S. Raymond <esr@thyrsus.com>
# $Id$
# TO DO:
# 1. Add PILFont capabilities, once that's documented.
# 2. Add PILDraw operations.
# 3. Add support for composing and decomposing multiple-image files.
#
from __future__ import print_function
from PIL import Image
class PILDriver(object):
verbose = 0
def do_verbose(self):
"""usage: verbose <int:num>
Set verbosity flag from top of stack.
"""
self.verbose = int(self.do_pop())
# The evaluation stack (internal only)
stack = [] # Stack of pending operations
def push(self, item):
"Push an argument onto the evaluation stack."
self.stack.insert(0, item)
def top(self):
"Return the top-of-stack element."
return self.stack[0]
# Stack manipulation (callable)
def do_clear(self):
"""usage: clear
Clear the stack.
"""
self.stack = []
def do_pop(self):
"""usage: pop
Discard the top element on the stack.
"""
return self.stack.pop(0)
def do_dup(self):
"""usage: dup
Duplicate the top-of-stack item.
"""
if hasattr(self, 'format'): # If it's an image, do a real copy
dup = self.stack[0].copy()
else:
dup = self.stack[0]
self.push(dup)
def do_swap(self):
"""usage: swap
Swap the top-of-stack item with the next one down.
"""
self.stack = [self.stack[1], self.stack[0]] + self.stack[2:]
# Image module functions (callable)
def do_new(self):
"""usage: new <int:xsize> <int:ysize> <int:color>:
Create and push a greyscale image of given size and color.
"""
xsize = int(self.do_pop())
ysize = int(self.do_pop())
color = int(self.do_pop())
self.push(Image.new("L", (xsize, ysize), color))
def do_open(self):
"""usage: open <string:filename>
Open the indicated image, read it, push the image on the stack.
"""
self.push(Image.open(self.do_pop()))
def do_blend(self):
"""usage: blend <image:pic1> <image:pic2> <float:alpha>
Replace two images and an alpha with the blended image.
"""
image1 = self.do_pop()
image2 = self.do_pop()
alpha = float(self.do_pop())
self.push(Image.blend(image1, image2, alpha))
def do_composite(self):
"""usage: composite <image:pic1> <image:pic2> <image:mask>
Replace two images and a mask with their composite.
"""
image1 = self.do_pop()
image2 = self.do_pop()
mask = self.do_pop()
self.push(Image.composite(image1, image2, mask))
def do_merge(self):
"""usage: merge <string:mode> <image:pic1>
[<image:pic2> [<image:pic3> [<image:pic4>]]]
Merge top-of stack images in a way described by the mode.
"""
mode = self.do_pop()
bandlist = []
for band in mode:
bandlist.append(self.do_pop())
self.push(Image.merge(mode, bandlist))
# Image class methods
def do_convert(self):
"""usage: convert <string:mode> <image:pic1>
Convert the top image to the given mode.
"""
mode = self.do_pop()
image = self.do_pop()
self.push(image.convert(mode))
def do_copy(self):
"""usage: copy <image:pic1>
Make and push a true copy of the top image.
"""
self.dup()
def do_crop(self):
"""usage: crop <int:left> <int:upper> <int:right> <int:lower>
<image:pic1>
Crop and push a rectangular region from the current image.
"""
left = int(self.do_pop())
upper = int(self.do_pop())
right = int(self.do_pop())
lower = int(self.do_pop())
image = self.do_pop()
self.push(image.crop((left, upper, right, lower)))
def do_draft(self):
"""usage: draft <string:mode> <int:xsize> <int:ysize>
Configure the loader for a given mode and size.
"""
mode = self.do_pop()
xsize = int(self.do_pop())
ysize = int(self.do_pop())
self.push(self.draft(mode, (xsize, ysize)))
def do_filter(self):
"""usage: filter <string:filtername> <image:pic1>
Process the top image with the given filter.
"""
from PIL import ImageFilter
imageFilter = getattr(ImageFilter, self.do_pop().upper())
image = self.do_pop()
self.push(image.filter(imageFilter))
def do_getbbox(self):
"""usage: getbbox
Push left, upper, right, and lower pixel coordinates of the top image.
"""
bounding_box = self.do_pop().getbbox()
self.push(bounding_box[3])
self.push(bounding_box[2])
self.push(bounding_box[1])
self.push(bounding_box[0])
def do_getextrema(self):
"""usage: extrema
Push minimum and maximum pixel values of the top image.
"""
extrema = self.do_pop().extrema()
self.push(extrema[1])
self.push(extrema[0])
def do_offset(self):
"""usage: offset <int:xoffset> <int:yoffset> <image:pic1>
Offset the pixels in the top image.
"""
xoff = int(self.do_pop())
yoff = int(self.do_pop())
image = self.do_pop()
self.push(image.offset(xoff, yoff))
def do_paste(self):
"""usage: paste <image:figure> <int:xoffset> <int:yoffset>
<image:ground>
Paste figure image into ground with upper left at given offsets.
"""
figure = self.do_pop()
xoff = int(self.do_pop())
yoff = int(self.do_pop())
ground = self.do_pop()
if figure.mode == "RGBA":
ground.paste(figure, (xoff, yoff), figure)
else:
ground.paste(figure, (xoff, yoff))
self.push(ground)
def do_resize(self):
"""usage: resize <int:xsize> <int:ysize> <image:pic1>
Resize the top image.
"""
ysize = int(self.do_pop())
xsize = int(self.do_pop())
image = self.do_pop()
self.push(image.resize((xsize, ysize)))
def do_rotate(self):
"""usage: rotate <int:angle> <image:pic1>
Rotate image through a given angle
"""
angle = int(self.do_pop())
image = self.do_pop()
self.push(image.rotate(angle))
def do_save(self):
"""usage: save <string:filename> <image:pic1>
Save image with default options.
"""
filename = self.do_pop()
image = self.do_pop()
image.save(filename)
def do_save2(self):
"""usage: save2 <string:filename> <string:options> <image:pic1>
Save image with specified options.
"""
filename = self.do_pop()
options = self.do_pop()
image = self.do_pop()
image.save(filename, None, options)
def do_show(self):
"""usage: show <image:pic1>
Display and pop the top image.
"""
self.do_pop().show()
def do_thumbnail(self):
"""usage: thumbnail <int:xsize> <int:ysize> <image:pic1>
Modify the top image in the stack to contain a thumbnail of itself.
"""
ysize = int(self.do_pop())
xsize = int(self.do_pop())
self.top().thumbnail((xsize, ysize))
def do_transpose(self):
"""usage: transpose <string:operator> <image:pic1>
Transpose the top image.
"""
transpose = self.do_pop().upper()
image = self.do_pop()
self.push(image.transpose(transpose))
# Image attributes
def do_format(self):
"""usage: format <image:pic1>
Push the format of the top image onto the stack.
"""
self.push(self.do_pop().format)
def do_mode(self):
"""usage: mode <image:pic1>
Push the mode of the top image onto the stack.
"""
self.push(self.do_pop().mode)
def do_size(self):
"""usage: size <image:pic1>
Push the image size on the stack as (y, x).
"""
size = self.do_pop().size
self.push(size[0])
self.push(size[1])
# ImageChops operations
def do_invert(self):
"""usage: invert <image:pic1>
Invert the top image.
"""
from PIL import ImageChops
self.push(ImageChops.invert(self.do_pop()))
def do_lighter(self):
"""usage: lighter <image:pic1> <image:pic2>
Pop the two top images, push an image of the lighter pixels of both.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.lighter(image1, image2))
def do_darker(self):
"""usage: darker <image:pic1> <image:pic2>
Pop the two top images, push an image of the darker pixels of both.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.darker(image1, image2))
def do_difference(self):
"""usage: difference <image:pic1> <image:pic2>
Pop the two top images, push the difference image
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.difference(image1, image2))
def do_multiply(self):
"""usage: multiply <image:pic1> <image:pic2>
Pop the two top images, push the multiplication image.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.multiply(image1, image2))
def do_screen(self):
"""usage: screen <image:pic1> <image:pic2>
Pop the two top images, superimpose their inverted versions.
"""
from PIL import ImageChops
image2 = self.do_pop()
image1 = self.do_pop()
self.push(ImageChops.screen(image1, image2))
def do_add(self):
"""usage: add <image:pic1> <image:pic2> <int:offset> <float:scale>
Pop the two top images, produce the scaled sum with offset.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
scale = float(self.do_pop())
offset = int(self.do_pop())
self.push(ImageChops.add(image1, image2, scale, offset))
def do_subtract(self):
"""usage: subtract <image:pic1> <image:pic2> <int:offset> <float:scale>
Pop the two top images, produce the scaled difference with offset.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
scale = float(self.do_pop())
offset = int(self.do_pop())
self.push(ImageChops.subtract(image1, image2, scale, offset))
# ImageEnhance classes
def do_color(self):
"""usage: color <image:pic1>
Enhance color in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Color(image)
self.push(enhancer.enhance(factor))
def do_contrast(self):
"""usage: contrast <image:pic1>
Enhance contrast in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Contrast(image)
self.push(enhancer.enhance(factor))
def do_brightness(self):
"""usage: brightness <image:pic1>
Enhance brightness in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Brightness(image)
self.push(enhancer.enhance(factor))
def do_sharpness(self):
"""usage: sharpness <image:pic1>
Enhance sharpness in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Sharpness(image)
self.push(enhancer.enhance(factor))
# The interpreter loop
def execute(self, list):
"Interpret a list of PILDriver commands."
list.reverse()
while len(list) > 0:
self.push(list[0])
list = list[1:]
if self.verbose:
print("Stack: " + repr(self.stack))
top = self.top()
if not isinstance(top, str):
continue
funcname = "do_" + top
if not hasattr(self, funcname):
continue
else:
self.do_pop()
func = getattr(self, funcname)
func()
if __name__ == '__main__':
import sys
# If we see command-line arguments, interpret them as a stack state
# and execute. Otherwise go interactive.
driver = PILDriver()
if len(sys.argv[1:]) > 0:
driver.execute(sys.argv[1:])
else:
print("PILDriver says hello.")
while True:
try:
if sys.version_info[0] >= 3:
line = input('pildriver> ')
else:
line = raw_input('pildriver> ')
except EOFError:
print("\nPILDriver says goodbye.")
break
driver.execute(line.split())
print(driver.stack)
# The following sets edit modes for GNU EMACS
# Local Variables:
# mode:python
# End:
| [
"armando_huarcaya@upeu.edu.pe"
] | armando_huarcaya@upeu.edu.pe |
7fb067b74a39322835924b99508c932a474ff19d | 8c209079e798c53a5a149613de06f96d10ad756a | /backend/tst_al_11171_dev_15203/wsgi.py | 3faa1e00012a46d702dc1e691a81218d24ab281d | [] | no_license | crowdbotics-apps/tst-al-11171-dev-15203 | d415d3a80eca99c6f76740db887de1345bdf1306 | c7e456053f15411f8e6635d4888520e335c5980b | refs/heads/master | 2023-01-13T20:57:17.850058 | 2020-11-17T22:26:37 | 2020-11-17T22:26:37 | 313,758,333 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | """
WSGI config for tst_al_11171_dev_15203 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tst_al_11171_dev_15203.settings')
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
8af796943db03458872ba948f02d5d4666aa68ea | 284c49f1a514088cd25b29375c1e2c6c2b5d9628 | /src/conf/gunicorn_conf.py | f4b6a9b80ee828bf0a8acb8d2f75d143b7cd272d | [] | no_license | oscar6echo/redirect-server | 2397578680c194c086f0d456da862f71f004f95c | d72ae59ec32595519f976d46b0556b5370c2936e | refs/heads/master | 2020-04-26T12:35:11.232084 | 2020-03-16T10:32:15 | 2020-03-16T10:32:15 | 173,554,405 | 0 | 0 | null | 2019-05-28T15:38:37 | 2019-03-03T09:23:41 | Vue | UTF-8 | Python | false | false | 290 | py |
bind = '0.0.0.0:5000'
backlog = 2048
workers = 1
worker_class = 'sync'
worker_connections = 1000
timeout = 30
keepalive = 2
daemon = False
errorlog = '-'
loglevel = 'debug'
accesslog = '-'
access_log_format = '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"'
reload = True
| [
"olivier.borderies@gmail.com"
] | olivier.borderies@gmail.com |
c8eb4caf29ca60835a43044d8d8a3a9e735b9d52 | ccc545fb3f4107741c715b41976b72177bea0957 | /encoding/functions/basic.py | e7cc2e09ae4b16466ec8c5af8f6ee0388792d906 | [
"MIT"
] | permissive | lfz/PyTorch-Encoding | 98eada3a73da6f6f7bed08e1a079c9071f1638a4 | dbcae04e3fb29417fbafcb2cc96d87def3739400 | refs/heads/master | 2021-05-02T11:27:54.213818 | 2018-02-08T15:27:36 | 2018-02-08T15:27:36 | 120,778,758 | 1 | 0 | null | 2018-02-08T15:24:08 | 2018-02-08T15:24:07 | null | UTF-8 | Python | false | false | 7,871 | py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Hang Zhang
## ECE Department, Rutgers University
## Email: zhang.hang@rutgers.edu
## Copyright (c) 2017
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import threading
import torch
import torch.nn.functional as F
from torch.autograd import Function, Variable
__all__ = ['squeeze_each', 'view_each', 'multi_each', 'sum_each',
'cat_each', 'upsample', 'dropout', 'relu']
def squeeze_each(x, dim=None):
"""Multi-GPU version torch. squeeze()
"""
y = []
for i in range(len(x)):
if dim is None:
y.append(x[i].squeeze())
else:
y.append(x[i].squeeze(dim))
return y
def view_each(x, size):
"""Multi-GPU version torch.view
Returns a new tensor with the same data but different size.
The returned tensor shares the same data and must have the same number
of elements, but may have a different size. A tensor must be
:attr:`contiguous` to be viewed.
Args:
input: list of multi-gpu tensors
size (torch.Size or int...): Desired size
"""
y = []
for i in range(len(x)):
y.append(x[i].view(size))
return y
def multi_each(a, b):
"""Multi-GPU version multiplication
.. math::
y[i] = a[i] * b[i]
"""
y = []
for i in range(len(a)):
y.append(a[i] * b[i])
return y
def sum_each(x, y):
"""Multi-GPU version torch.add
.. math::
y[i] = a[i] + b[i]
"""
assert(len(x)==len(y))
z = []
for i in range(len(x)):
z.append(x[i]+y[i])
return z
def cat_each(x1, x2, dim):
"""Multi-GPU version torch.cat
.. math::
y[i] = torch.cat(a[i], b[i], dim)
"""
assert(len(x1)==len(x2))
z = []
for i in range(len(x1)):
with torch.cuda.device_of(x1[i]):
x = torch.cat((x1[i], x2[i]), dim)
z.append(x)
return z
def dict_to_list(x):
"""Converting Dict{} to list[]
"""
y = []
for i in range(len(x)):
xi = x[i]
if isinstance(xi, Exception):
raise xi
y.append(xi)
return y
def upsample(input, size=None, scale_factor=None, mode='nearest'):
"""Multi-GPU version torch.nn.functional.upsample
Upsamples the input to either the given :attr:`size` or the given
:attr:`scale_factor`
The algorithm used for upsampling is determined by :attr:`mode`.
Currently temporal, spatial and volumetric upsampling are supported, i.e.
expected inputs are 3-D, 4-D or 5-D in shape.
The input dimensions are interpreted in the form:
`mini-batch x channels x [depth] x [height] x width`
The modes available for upsampling are: `nearest`, `linear` (3D-only),
`bilinear` (4D-only), `trilinear` (5D-only)
Args:
input (Variable): input
size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int]):
output spatial size.
scale_factor (int): multiplier for spatial size. Has to be an integer.
mode (string): algorithm used for upsampling:
'nearest' | 'linear' | 'bilinear' | 'trilinear'. Default: 'nearest'
"""
if isinstance(input, Variable):
return F.upsample(input, size=size, scale_factor=scale_factor,
mode=mode)
elif isinstance(input, tuple) or isinstance(input, list):
lock = threading.Lock()
results = {}
def _worker(i, x):
try:
with torch.cuda.device_of(x):
result = F.upsample(x, size=size, \
scale_factor=scale_factor,mode=mode)
with lock:
results[i] = result
except Exception as e:
with lock:
resutls[i] = e
# multi-threading for different gpu
threads = [threading.Thread(target=_worker,
args=(i, x),
)
for i, (x) in enumerate(input)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
outputs = dict_to_list(results)
return outputs
else:
raise RuntimeError('unknown input type')
def dropout(input, p=0.5, training=False, inplace=True):
"""Multi-GPU version torch.nn.functional.droupout
The channels to zero-out are randomized on every forward call.
*Usually the input comes from Conv2d modules.*
As described in the paper
`Efficient Object Localization Using Convolutional Networks`,
if adjacent pixels within feature maps are strongly correlated
(as is normally the case in early convolution layers) then iid dropout
will not regularize the activations and will otherwise just result
in an effective learning rate decrease.
In this case, :func:`nn.Dropout2d` will help promote independence between
feature maps and should be used instead.
Args:
p (float, optional): probability of an element to be zeroed.
inplace (bool, optional): If set to True, will do this operation
in-place
Shape:
- Input: :math:`(N, C, H, W)`
- Output: :math:`(N, C, H, W)` (same shape as input)
"""
if isinstance(input, Variable):
return F.dropout(input, p, training, inplace)
elif isinstance(input, tuple) or isinstance(input, list):
lock = threading.Lock()
results = {}
def _worker(i, x):
try:
with torch.cuda.device_of(x):
result = F.dropout(x, p, training, inplace)
with lock:
results[i] = result
except Exception as e:
with lock:
resutls[i] = e
# multi-threading for different gpu
threads = [threading.Thread(target=_worker,
args=(i, x),
)
for i, (x) in enumerate(input)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
outputs = dict_to_list(results)
return outputs
else:
raise RuntimeError('unknown input type')
def relu(input, inplace=False):
"""Multi-GPU version torch.nn.functional.relu
Applies the rectified linear unit function element-wise
:math:`{ReLU}(x)= max(0, x)`
Args:
inplace: can optionally do the operation in-place. Default: False
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
"""
if isinstance(input, Variable):
return F.relu(input, inplace)
elif isinstance(input, tuple) or isinstance(input, list):
lock = threading.Lock()
results = {}
def _worker(i, x):
try:
with torch.cuda.device_of(x):
result = F.relu(x, inplace)
with lock:
results[i] = result
except Exception as e:
with lock:
resutls[i] = e
# multi-threading for different gpu
threads = [threading.Thread(target=_worker,
args=(i, x),
)
for i, (x) in enumerate(input)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
outputs = dict_to_list(results)
return outputs
else:
raise RuntimeError('unknown input type')
| [
"zhang.hang@rutgers.edu"
] | zhang.hang@rutgers.edu |
45ce06463620ba75517c876a1aa7fa66b990db3c | e9c9e38ed91969df78bbd7f9ca2a0fdb264d8ddb | /lib/python3.8/site-packages/ansible_collections/fortinet/fortios/plugins/modules/fortios_web_proxy_url_match.py | ea53f2672a69ee663d05463797204ebfd95b0008 | [] | no_license | Arceusir/PRELIM_SKILLS_EXAM | 882fcf2868926f0bbfe1fb18d50e5fe165936c02 | b685c5b28d058f59de2875c7579739c545df2e0c | refs/heads/master | 2023-08-15T07:30:42.303283 | 2021-10-09T01:27:19 | 2021-10-09T01:27:19 | 415,167,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,156 | py | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019-2020 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_web_proxy_url_match
short_description: Exempt URLs from web proxy forwarding and caching in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify web_proxy feature and url_match category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.0
version_added: "2.10"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Hongbin Lu (@fgtdev-hblu)
- Frank Shen (@frankshen01)
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Legacy fortiosapi has been deprecated, httpapi is the preferred way to run playbooks
requirements:
- ansible>=2.9.0
options:
access_token:
description:
- Token-based authentication.
Generated from GUI of Fortigate.
type: str
required: false
enable_log:
description:
- Enable/Disable logging for task.
type: bool
required: false
default: false
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
web_proxy_url_match:
description:
- Exempt URLs from web proxy forwarding and caching.
default: null
type: dict
suboptions:
cache_exemption:
description:
- Enable/disable exempting this URL pattern from caching.
type: str
choices:
- enable
- disable
comment:
description:
- Comment.
type: str
forward_server:
description:
- Forward server name. Source web-proxy.forward-server.name web-proxy.forward-server-group.name.
type: str
name:
description:
- Configure a name for the URL to be exempted.
required: true
type: str
status:
description:
- Enable/disable exempting the URLs matching the URL pattern from web proxy forwarding and caching.
type: str
choices:
- enable
- disable
url_pattern:
description:
- URL pattern to be exempted from web proxy forwarding and caching.
type: str
'''
EXAMPLES = '''
- hosts: fortigates
collections:
- fortinet.fortios
connection: httpapi
vars:
vdom: "root"
ansible_httpapi_use_ssl: yes
ansible_httpapi_validate_certs: no
ansible_httpapi_port: 443
tasks:
- name: Exempt URLs from web proxy forwarding and caching.
fortios_web_proxy_url_match:
vdom: "{{ vdom }}"
state: "present"
access_token: "<your_own_value>"
web_proxy_url_match:
cache_exemption: "enable"
comment: "Comment."
forward_server: "<your_own_value> (source web-proxy.forward-server.name web-proxy.forward-server-group.name)"
name: "default_name_6"
status: "enable"
url_pattern: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import FortiOSHandler
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_legacy_fortiosapi
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import schema_to_module_spec
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_schema_versioning
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FAIL_SOCKET_MSG
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.comparison import is_same_comparison
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.comparison import serialize
def filter_web_proxy_url_match_data(json):
option_list = ['cache_exemption', 'comment', 'forward_server',
'name', 'status', 'url_pattern']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def web_proxy_url_match(data, fos, check_mode=False):
vdom = data['vdom']
state = data['state']
web_proxy_url_match_data = data['web_proxy_url_match']
filtered_data = underscore_to_hyphen(filter_web_proxy_url_match_data(web_proxy_url_match_data))
# check_mode starts from here
if check_mode:
mkey = fos.get_mkey('system', 'interface', filtered_data, vdom=vdom)
current_data = fos.get('system', 'interface', vdom=vdom, mkey=mkey)
is_existed = current_data and current_data.get('http_status') == 200 \
and isinstance(current_data.get('results'), list) \
and len(current_data['results']) > 0
# 2. if it exists and the state is 'present' then compare current settings with desired
if state == 'present' or state is True:
if mkey is None:
return False, True, filtered_data
# if mkey exists then compare each other
# record exits and they're matched or not
if is_existed:
is_same = is_same_comparison(
serialize(current_data['results'][0]), serialize(filtered_data))
return False, not is_same, filtered_data
# record does not exist
return False, True, filtered_data
if state == 'absent':
if mkey is None:
return False, False, filtered_data
if is_existed:
return False, True, filtered_data
return False, False, filtered_data
return True, False, {'reason: ': 'Must provide state parameter'}
if state == "present" or state is True:
return fos.set('web-proxy',
'url-match',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('web-proxy',
'url-match',
mkey=filtered_data['name'],
vdom=vdom)
else:
fos._module.fail_json(msg='state must be present or absent!')
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_web_proxy(data, fos, check_mode):
if data['web_proxy_url_match']:
resp = web_proxy_url_match(data, fos, check_mode)
else:
fos._module.fail_json(msg='missing task body: %s' % ('web_proxy_url_match'))
if check_mode:
return resp
return not is_successful_status(resp), \
resp['status'] == "success" and \
(resp['revision_changed'] if 'revision_changed' in resp else True), \
resp
versioned_schema = {
"type": "list",
"children": {
"status": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"comment": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"cache_exemption": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"forward_server": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"url_pattern": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
def main():
module_spec = schema_to_module_spec(versioned_schema)
mkeyname = 'name'
fields = {
"access_token": {"required": False, "type": "str", "no_log": True},
"enable_log": {"required": False, "type": bool},
"vdom": {"required": False, "type": "str", "default": "root"},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"web_proxy_url_match": {
"required": False, "type": "dict", "default": None,
"options": {
}
}
}
for attribute_name in module_spec['options']:
fields["web_proxy_url_match"]['options'][attribute_name] = module_spec['options'][attribute_name]
if mkeyname and mkeyname == attribute_name:
fields["web_proxy_url_match"]['options'][attribute_name]['required'] = True
check_legacy_fortiosapi()
module = AnsibleModule(argument_spec=fields,
supports_check_mode=True)
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if 'access_token' in module.params:
connection.set_option('access_token', module.params['access_token'])
if 'enable_log' in module.params:
connection.set_option('enable_log', module.params['enable_log'])
else:
connection.set_option('enable_log', False)
fos = FortiOSHandler(connection, module, mkeyname)
versions_check_result = check_schema_versioning(fos, versioned_schema, "web_proxy_url_match")
is_error, has_changed, result = fortios_web_proxy(module.params, fos, module.check_mode)
else:
module.fail_json(**FAIL_SOCKET_MSG)
if versions_check_result and versions_check_result['matched'] is False:
module.warn("Ansible has detected version mismatch between FortOS system and your playbook, see more details by specifying option -vvv")
if not is_error:
if versions_check_result and versions_check_result['matched'] is False:
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result)
else:
module.exit_json(changed=has_changed, meta=result)
else:
if versions_check_result and versions_check_result['matched'] is False:
module.fail_json(msg="Error in repo", version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| [
"aaronchristopher.dalmacio@gmail.com"
] | aaronchristopher.dalmacio@gmail.com |
ecde5f0d489cadb5227aa49e7e8fe29890b68494 | 9680800074ee2f50c7f9573076f0414b0b37cc70 | /backend/home/migrations/0003_test.py | f8d4b6538dd97edf54edbec05074b17072795872 | [] | no_license | crowdbotics-apps/testnewmobile-dev-1517 | 6bc6b0df8bef375dd13a750ad4f4894e55465482 | 6eeb801f949dac5d47391fdef4c99c3c2750b19a | refs/heads/master | 2022-11-18T04:01:52.930493 | 2020-07-15T23:38:56 | 2020-07-15T23:38:56 | 234,537,030 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 707 | py | # Generated by Django 2.2.9 on 2020-01-23 10:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("home", "0002_load_initial_data"),
]
operations = [
migrations.CreateModel(
name="Test",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("test", models.DecimalField(decimal_places=10, max_digits=30)),
],
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
235a3f420a9e5d6da034ccc5fa3786a231a5c5c5 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_mixing.py | 3fd7241702edc77070a2b1628ae489ab3e937317 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py |
#calss header
class _MIXING():
def __init__(self,):
self.name = "MIXING"
self.definitions = mix
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['mix']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
518d57e4b6824f9d2d23efade951dfe404bad0ca | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_waivers.py | 4561725c43217e00c698770290379ce1da4c364a | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py |
#calss header
class _WAIVERS():
def __init__(self,):
self.name = "WAIVERS"
self.definitions = waiver
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['waiver']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
ef9227af50c4ba29def9aa46fa510d5b13377a44 | a9e3f3ad54ade49c19973707d2beb49f64490efd | /Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/mobile_api/urls.py | a6fec8dd978ca0c55326ed76d877e0eff6cfc7b0 | [
"AGPL-3.0-only",
"AGPL-3.0-or-later",
"MIT"
] | permissive | luque/better-ways-of-thinking-about-software | 8c3dda94e119f0f96edbfe5ba60ca6ec3f5f625d | 5809eaca7079a15ee56b0b7fcfea425337046c97 | refs/heads/master | 2021-11-24T15:10:09.785252 | 2021-11-22T12:14:34 | 2021-11-22T12:14:34 | 163,850,454 | 3 | 1 | MIT | 2021-11-22T12:12:31 | 2019-01-02T14:21:30 | JavaScript | UTF-8 | Python | false | false | 341 | py | """
URLs for mobile API
"""
from django.conf.urls import include, url
from .users.views import my_user_info
urlpatterns = [
url(r'^users/', include('lms.djangoapps.mobile_api.users.urls')),
url(r'^my_user_info', my_user_info, name='user-info'),
url(r'^course_info/', include('lms.djangoapps.mobile_api.course_info.urls')),
]
| [
"rafael.luque@osoco.es"
] | rafael.luque@osoco.es |
d5da1b70e6407c1638c9816437723719580a57d4 | 70730512e2643833e546e68761ee6cd3d7b95e1d | /01-python基础/code/day14/module01.py | be1e6837225cdfe1bbc42fd6506d29dd55e3f212 | [] | no_license | Yuchen1995-0315/review | 7f0b0403aea2da62566642c6797a98a0485811d1 | 502859fe11686cc59d2a6d5cc77193469997fe6a | refs/heads/master | 2020-08-26T23:16:33.193952 | 2019-10-24T00:30:32 | 2019-10-24T00:30:32 | 217,177,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | """
Module01 模块
"""
def fun01():
print("Module01 -- fun01")
def fun02():
print("Module01 -- fun02")
def fun03():
print("Module01 -- fun03") | [
"2456830920@qq.com"
] | 2456830920@qq.com |
d2081c5594a00d4d326c9ed1272419e2f5280042 | e86364b36b82c24596dd71f9fa2221d036e8defc | /collections/ansible_collections/cisco/nxos/plugins/modules/nxos_hsrp_interfaces.py | 4f1e68c3cdc543565ca3705927e1027b20a80ae6 | [] | no_license | ganeshrn/network_collections_migration | b3f11be5ecb9557787bcd12ca01b227379c7c102 | 8f56b60bfde606b291627665a1218bf7ce15f3a1 | refs/heads/master | 2020-09-12T12:10:58.189645 | 2019-11-18T11:44:48 | 2019-11-18T11:44:48 | 222,419,125 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,905 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2019 Cisco and/or its affiliates.
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#############################################
# WARNING #
#############################################
#
# This file is auto generated by the resource
# module builder playbook.
#
# Do not edit this file manually.
#
# Changes to this file will be over written
# by the resource module builder.
#
# Changes should be made in the model used to
# generate this file or in the resource module
# builder template.
#
#############################################
"""
The module file for nxos_hsrp_interfaces
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'
}
DOCUMENTATION = '''module: nxos_hsrp_interfaces
short_description: Manages HSRP attributes of NXOS interfaces.
description: Manages Hot Standby Router Protocol (HSRP) interface attributes.
author: Chris Van Heuveln (@chrisvanheuveln)
notes: null
options:
config:
description: The provided configuration
type: list
elements: dict
suboptions:
name:
type: str
description: The name of the interface.
bfd:
type: str
description:
- Enable/Disable HSRP Bidirectional Forwarding Detection (BFD) on the interface.
choices:
- enable
- disable
state:
description:
- The state the configuration should be left in
type: str
choices:
- merged
- replaced
- overridden
- deleted
default: merged
'''
EXAMPLES = """
# Using deleted
- name: Configure hsrp attributes on interfaces
nxos_hsrp_interfaces:
config:
- name: Ethernet1/1
- name: Ethernet1/2
operation: deleted
# Using merged
- name: Configure hsrp attributes on interfaces
nxos_hsrp_interfaces:
config:
- name: Ethernet1/1
bfd: enable
- name: Ethernet1/2
bfd: disable
operation: merged
# Using overridden
- name: Configure hsrp attributes on interfaces
nxos_hsrp_interfaces:
config:
- name: Ethernet1/1
bfd: enable
- name: Ethernet1/2
bfd: disable
operation: overridden
# Using replaced
- name: Configure hsrp attributes on interfaces
nxos_hsrp_interfaces:
config:
- name: Ethernet1/1
bfd: enable
- name: Ethernet1/2
bfd: disable
operation: replaced
"""
RETURN = """
before:
description: The configuration prior to the model invocation.
returned: always
type: list
sample: >
The configuration returned will always be in the same format
of the parameters above.
after:
description: The resulting configuration model invocation.
returned: when changed
type: list
sample: >
The configuration returned will always be in the same format
of the parameters above.
commands:
description: The set of commands pushed to the remote device.
returned: always
type: list
sample: ['interface Ethernet1/1', 'hsrp bfd']
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.argspec.hsrp_interfaces.hsrp_interfaces import Hsrp_interfacesArgs
from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.config.hsrp_interfaces.hsrp_interfaces import Hsrp_interfaces
def main():
"""
Main entry point for module execution
:returns: the result form module invocation
"""
module = AnsibleModule(argument_spec=Hsrp_interfacesArgs.argument_spec,
supports_check_mode=True)
result = Hsrp_interfaces(module).execute_module()
module.exit_json(**result)
if __name__ == '__main__':
main()
| [
"ganesh634@gmail.com"
] | ganesh634@gmail.com |
86b2703f93f197bc7cf3ed852e7f82e536e6d092 | 17a0371a52c00e949460a891702109d1471d19af | /backend/no_crop_ig_stories_20282/urls.py | 5dcff4097643848f9f51e9868241285e295389ba | [] | no_license | crowdbotics-apps/no-crop-ig-stories-20282 | 2df2d1957f660e7fcd89f9a9b6619cb1fe54a6dc | a79f7e9b3ed3533b2e6fcbc2562db32f2d50d46a | refs/heads/master | 2022-12-12T18:17:55.054916 | 2020-09-15T08:27:57 | 2020-09-15T08:27:57 | 295,663,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,979 | py | """no_crop_ig_stories_20282 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "No Crop IG Stories"
admin.site.site_title = "No Crop IG Stories Admin Portal"
admin.site.index_title = "No Crop IG Stories Admin"
# swagger
api_info = openapi.Info(
title="No Crop IG Stories API",
default_version="v1",
description="API documentation for No Crop IG Stories App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
905c0a9dbb7ada9c3de9b778a76664ef072ef7c6 | 284f4775828d155fd289697f5da52cf138e8c937 | /abc104/a.py | c8723430c9a572ab922018eb1b2075e7fb5bfe42 | [] | no_license | aikiyy/AtCoder | f006d9eec4bea0c265b700259fa4a43790f92df0 | e4208bcc708c301b088d01528294fe013a475f21 | refs/heads/master | 2020-06-29T16:57:29.692986 | 2020-01-20T04:53:08 | 2020-01-20T04:53:08 | 200,572,345 | 0 | 0 | null | 2020-01-19T15:36:57 | 2019-08-05T03:00:55 | Python | UTF-8 | Python | false | false | 157 | py | r = int(input())
if r < 1200:
print('ABC')
elif r < 2800:
print('ARC')
else:
print('AGC')
# print(['ABC', 'ARC', 'AGC'][int(input())//50+8>>5])
| [
"aiki.yougai@gmail.com"
] | aiki.yougai@gmail.com |
c87942ff1d3c1afae847306e4346fda63c7e5397 | 919817200361a55f194e32ad1636a5a7054e4c58 | /传统方法爬虫/requests/模拟登陆.py | 012c9349753b677de5452930ae6997a38fd7152d | [] | no_license | luoshanya/crawl_project | 2d68272d6ef71c9fb033dc267ce6b56dfebfddc4 | 2881093ad37dd005be72f8b2214cadc97255470f | refs/heads/master | 2020-07-05T01:36:28.985965 | 2019-08-23T02:06:03 | 2019-08-23T02:06:03 | 202,484,100 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,581 | py | import requests
import ssl
#ssl错误:requests.exceptions.SSLError: HTTPSConnectionPool(host='www.yaozh.com', port=443): Max retries exceeded with url: /login/ (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1051)')))
def crawl_simulation_login():
login_url = "https://www.yaozh.com/login"
member_url = "https://www.yaozh.com/member/"
login_from_data = {
"username": "zhukelin",
"pwd": "as751286012",
"formhash": "6E1A90D3A4",
"backurl": "https%3A%2F%2Fwww.yaozh.com%2F"
}
#cookies字符串类型
cookies = 'acw_tc=2f624a0515512677621551497e3b45fc21cc4993bec3b66097298b4d2f5af6; _ga=GA1.2.1912755189.1551267759; _gid=GA1.2.1265000741.1551267759; MEIQIA_EXTRA_TRACK_ID=1HlDPR7oAd6PDoNK1fY0ZfeO5PX; think_language=zh-CN; _gat=1; yaozh_userId=700746; yaozh_uidhas=1; acw_tc=2f624a0515512677621551497e3b45fc21cc4993bec3b66097298b4d2f5af6; MEIQIA_VISIT_ID=1HlDPSplYHyyd7siY0XAKgfkoDK; MEIQIA_EXTRA_TRACK_ID=1HlDPR7oAd6PDoNK1fY0ZfeO5PX; UtzD_f52b_saltkey=NzGN3K1P; UtzD_f52b_lastvisit=1551264450; UtzD_f52b_ulastactivity=1551268037%7C0; UtzD_f52b_creditnotice=0D0D2D0D0D0D0D0D0D642384; UtzD_f52b_creditbase=0D0D0D0D0D0D0D0D0; UtzD_f52b_creditrule=%E6%AF%8F%E5%A4%A9%E7%99%BB%E5%BD%95; PHPSESSID=fo15u1d4ongpgtkgebsrcd4356; MEIQIA_VISIT_ID=1HmsqntTEG0EiuP1HZM1bzIiO52; yaozh_logintime=1551322261; yaozh_user=700746%09zhukelin; db_w_auth=642384%09zhukelin; UtzD_f52b_lastact=1551322262%09uc.php%09; UtzD_f52b_auth=0399WzIhvn1Fu%2F47yHRCEdQnDn59FdE8ZuTYR1ONc%2BKlHDxRExiBnwuBVPQP3lM%2FskfcEMyd5%2FG6jJkbockEeJTdLuI; yaozh_mylogin=1551322265; Hm_lvt_65968db3ac154c3089d7f9a4cbb98c94=1551267759%2C1551267973%2C1551318793; Hm_lpvt_65968db3ac154c3089d7f9a4cbb98c94=1551322269'
#但是cookies需要的是 字典类型 出现错误:TypeError: string indices must be integers
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
}
cookies_split = cookies.split('; ')
cookies_data = {cookies.split('=')[0]:cookies.split('=')[1] for cookies in cookies_split}
print(cookies_data)
login_request = requests.post(login_url,headers=header,cookies=cookies_data,verify=False)
# print(login_request)
data = requests.get(url=member_url,headers=header,verify=False).content.decode('utf-8')
with open("simulation_login.html","w",encoding='utf-8') as f:
f.write(data)
crawl_simulation_login()
| [
"310927880@qq.com"
] | 310927880@qq.com |
fbf195990428f078f116132132635148407abaa2 | c9ad6ad969de505b3c8471c6f46dfd782a0fb498 | /0x0F-python-object_relational_mapping/10-model_state_my_get.py | e1b8c12f3ce8649549fd011e01d9d604ab15cec7 | [] | no_license | enterpreneur369/holbertonschool-higher_level_programming | 002fd5a19b40c8b1db06b34c4344e307f24c17ac | dd7d3f14bf3bacb41e2116d732ced78998a4afcc | refs/heads/master | 2022-06-20T00:57:27.736122 | 2020-05-06T14:26:10 | 2020-05-06T14:26:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 801 | py | #!/usr/bin/python3
"""Module 9-model_state_filter_a.py
"""
import sys
from model_state import Base, State
from sqlalchemy import (create_engine)
if __name__ == "__main__":
""" Main
Get the States by a n letter using SQLAlchemy
"""
engine = create_engine(
'mysql+mysqldb://{}:{}@localhost/{}'
.format(
sys.argv[1], sys.argv[2],
sys.argv[3]
),
pool_pre_ping=True
)
sname = sys.argv[4]
Base.metadata.create_all(engine)
sql = "SELECT * FROM states ORDER BY states.id;"
result = engine.execute(sql)
states = result.fetchall()
exist = False
for s in states:
if sname == s.name:
print("{:d}".format(s.id))
exist = True
if exist is False:
print("Not found")
| [
"jose.calderon@holbertonschool.com"
] | jose.calderon@holbertonschool.com |
a37d7da8f835b83be2821b76a95ed83ee07ad3b5 | 5181d3b3ef8fe301ea2d6b095260e9d327c2fd79 | /scripts/iemre/areal_coverage.py | dcbd0b2c6f99ad435cfa8f0078503e6ef15a8952 | [] | no_license | danhreitz/iem | 88113ef9c9c4a2918c9c2abdfd0510d5ca4ec819 | ed490dcd6c2a8359f88cb805ccee8f6707566f57 | refs/heads/master | 2021-01-18T15:27:28.607250 | 2015-08-10T21:33:54 | 2015-08-10T21:33:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,568 | py | import netCDF4
from pyiem import iemre, plot
import numpy
import datetime
import pytz
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
sts = datetime.datetime(2013,4,1, 0)
sts = sts.replace(tzinfo=pytz.timezone("UTC"))
ets = datetime.datetime(2013,6,16, 0)
ets = ets.replace(tzinfo=pytz.timezone("UTC"))
nc = netCDF4.Dataset('/mesonet/data/iemre/2013_mw_daily.nc')
lons = nc.variables['lon'][:]
lats = nc.variables['lat'][:]
precip = nc.variables['p01d']
nc2 = netCDF4.Dataset("/mesonet/data/iemre/state_weights.nc")
iowa = nc2.variables['IA'][:]
iowapts = numpy.sum(numpy.where(iowa > 0, 1, 0))
nc2.close()
days = []
coverage = []
now = sts
while now < ets:
idx = iemre.daily_offset(now)
pday = numpy.where(iowa > 0, precip[idx,:,:], -1)
tots = numpy.sum(numpy.where(pday >= (0.05 * 25.4), 1, 0 ))
days.append( now )
coverage.append( tots / float(iowapts) * 100.0)
now += datetime.timedelta(days=1)
days.append( now )
coverage.append( 0 )
days.append( now + datetime.timedelta(days=1))
coverage.append( 0 )
(fig, ax) = plt.subplots(1,1)
ax.bar(days, coverage, fc='b', ec='b')
ax.set_yticks([0,25,50,75,100])
ax.grid(True)
ax.set_title("2013 Daily Iowa Precipitation Coverage of 0.05+ inch")
ax.set_ylabel("Areal Coverage [%]")
ax.xaxis.set_major_locator(
mdates.DayLocator(interval=7)
)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%-d\n%b'))
ax.set_xlim(min(days), max(days))
fig.savefig('test.svg')
import iemplot
iemplot.makefeature('test') | [
"akrherz@iastate.edu"
] | akrherz@iastate.edu |
1e333d4c033792130b775c73a3c6c372ece02d41 | 14373275670c1f3065ce9ae195df142146e2c1a4 | /stubs/python-xlib/Xlib/ext/ge.pyi | d0add2edbd711d2a66f70a9614bafbfc6f77c1ed | [
"Apache-2.0",
"MIT"
] | permissive | sobolevn/typeshed | eb7af17c06a9722f23c337e6b9a4726223155d58 | d63a82640390a9c130e0fe7d409e8b0b836b7c31 | refs/heads/master | 2023-08-04T05:59:29.447015 | 2023-06-14T21:27:53 | 2023-06-14T21:27:53 | 216,265,622 | 2 | 0 | Apache-2.0 | 2022-02-08T10:40:53 | 2019-10-19T20:21:25 | Python | UTF-8 | Python | false | false | 556 | pyi | from typing_extensions import Final
from Xlib._typing import Unused
from Xlib.display import Display
from Xlib.protocol import rq
from Xlib.xobject import resource
extname: Final = "Generic Event Extension"
GenericEventCode: Final = 35
class GEQueryVersion(rq.ReplyRequest): ...
def query_version(self: Display | resource.Resource) -> GEQueryVersion: ...
class GenericEvent(rq.Event): ...
def add_event_data(self: Display | resource.Resource, extension: int, evtype: int, estruct: int) -> None: ...
def init(disp: Display, info: Unused) -> None: ...
| [
"noreply@github.com"
] | sobolevn.noreply@github.com |
83da271d2007ec5ce1f1ba16f484a54719876d17 | da5bf3d91fd7b73752d955b6ae783019c11267ec | /cuda-device-2.py | 6f653305999de71218042a7ef855d8f0be1290e5 | [] | no_license | izham-sugita/numba-fd | badd89608fcbbc3cd69be1a92ff69b81248b2498 | 8c4b2003c800454e5202908fc9abeb0df531e9df | refs/heads/master | 2023-03-28T01:04:56.136570 | 2021-03-30T10:09:36 | 2021-03-30T10:09:36 | 284,838,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | from numba import cuda, float32
import math
import time
@cuda.jit
def matmul(A, B, C):
"""Perform square matrix multiplication of C = A * B
"""
i, j = cuda.grid(2)
if i < C.shape[0] and j < C.shape[1]:
tmp = 0.
for k in range(A.shape[1]):
tmp += A[i, k] * B[k, j]
C[i, j] = tmp
deviceID = int( input("Select device ID: "))
print(cuda.select_device(deviceID))
import numpy as np
N = 4096
A = np.random.rand(N,N)
B = np.identity(N)
C = np.zeros_like(A)
#print(A)
threadsperblock = (16, 16)
blockspergrid_x = math.ceil(A.shape[0] / threadsperblock[0])
blockspergrid_y = math.ceil(A.shape[1] / threadsperblock[1])
blockspergrid = (blockspergrid_x, blockspergrid_y)
ts = time.time()
matmul[blockspergrid,threadsperblock](A,B,C)
te = time.time()
elp = te -ts
gflops = ( ( float(N)**3 ) / elp ) * 10.0e-9
print("Elapsed time: ",elp, "secs")
print("Throughput ", gflops, "GFLOPS ")
print()
#print(C)
#rvalue = str( cuda.detect() ) # return True, False only
#print(rvalue) #cannot get the device
| [
"sugita5019@gmail.com"
] | sugita5019@gmail.com |
aaad4f9650237b09e2f1935c4e00ff4f34b6c145 | 9cc6721acb439db2e7cff8eb4dbff4b6e14040d5 | /백준/2231.py | 12b6383564cc1f9fc8b9974f79a0ce94df16f6f4 | [] | no_license | young31/Algorithm | 35c6ec6b6d9b192f9d0e6e6f6484f33c92100232 | bfcccfa798d031a930490efa24d9b2263bd4b984 | refs/heads/master | 2021-11-04T14:01:53.827508 | 2021-10-25T06:35:24 | 2021-10-25T06:35:24 | 196,034,851 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | n = list(input())
l = len(n)
n = int(''.join(n))
for i in range(max(0, n-9*l), n):
k = list(map(int, list(str(i))))
if sum(k)+i == n:
print(i)
break
else:
print(0)
| [
"migael38317@gmail.com"
] | migael38317@gmail.com |
4de5316a1b00eba55f5fbc916cdf2bbf7b91b27d | 763be4d77e20504848c9ddd29fe99b8012b00ea7 | /uchicagohvz/game/dorm_migrations/0008_highvaluedorm_dorm_fk.py | 7484b9415b6045ca84f26983520b969e079375f4 | [
"MIT"
] | permissive | kz26/uchicago-hvz | 2207c944f19c6fcc3310d4a43b4e733ac8225b18 | 85e89a1b70fa2a23445890686312407fe8b2084a | refs/heads/master | 2021-12-07T03:21:03.118945 | 2020-10-08T14:31:02 | 2020-10-08T14:31:02 | 13,137,628 | 11 | 6 | MIT | 2021-11-29T17:59:45 | 2013-09-27T00:28:39 | HTML | UTF-8 | Python | false | false | 447 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('game', '0007_set_dorm_fk'),
]
operations = [
migrations.AddField(
model_name='highvaluedorm',
name='dorm_fk',
field=models.ForeignKey(default=1, to='game.Dorm'),
preserve_default=False,
),
]
| [
"whitehat2k9@gmail.com"
] | whitehat2k9@gmail.com |
09cbf9993c29b825bd52fd02515b9643ed8493ff | 2b42b40ae2e84b438146003bf231532973f1081d | /spec/mgm4458198.3.spec | 9afe952d68ad4351bc2a552770949ffc40099690 | [] | no_license | MG-RAST/mtf | 0ea0ebd0c0eb18ec6711e30de7cc336bdae7215a | e2ddb3b145068f22808ef43e2bbbbaeec7abccff | refs/heads/master | 2020-05-20T15:32:04.334532 | 2012-03-05T09:51:49 | 2012-03-05T09:51:49 | 3,625,755 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 14,313 | spec | {
"id": "mgm4458198.3",
"metadata": {
"mgm4458198.3.metadata.json": {
"format": "json",
"provider": "metagenomics.anl.gov"
}
},
"providers": {
"metagenomics.anl.gov": {
"files": {
"100.preprocess.info": {
"compression": null,
"description": null,
"size": 736,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/100.preprocess.info"
},
"100.preprocess.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 1019112,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/100.preprocess.passed.fna.gz"
},
"100.preprocess.passed.fna.stats": {
"compression": null,
"description": null,
"size": 310,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/100.preprocess.passed.fna.stats"
},
"100.preprocess.removed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 63290,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/100.preprocess.removed.fna.gz"
},
"100.preprocess.removed.fna.stats": {
"compression": null,
"description": null,
"size": 307,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/100.preprocess.removed.fna.stats"
},
"205.screen.h_sapiens_asm.info": {
"compression": null,
"description": null,
"size": 450,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/205.screen.h_sapiens_asm.info"
},
"299.screen.info": {
"compression": null,
"description": null,
"size": 410,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/299.screen.info"
},
"299.screen.passed.fna.gcs": {
"compression": null,
"description": null,
"size": 1781,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/299.screen.passed.fna.gcs"
},
"299.screen.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 622754,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/299.screen.passed.fna.gz"
},
"299.screen.passed.fna.lens": {
"compression": null,
"description": null,
"size": 508,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/299.screen.passed.fna.lens"
},
"299.screen.passed.fna.stats": {
"compression": null,
"description": null,
"size": 310,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/299.screen.passed.fna.stats"
},
"440.cluster.rna97.fna.gz": {
"compression": "gzip",
"description": null,
"size": 25263,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/440.cluster.rna97.fna.gz"
},
"440.cluster.rna97.fna.stats": {
"compression": null,
"description": null,
"size": 308,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/440.cluster.rna97.fna.stats"
},
"440.cluster.rna97.info": {
"compression": null,
"description": null,
"size": 947,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/440.cluster.rna97.info"
},
"440.cluster.rna97.mapping": {
"compression": null,
"description": null,
"size": 1441193,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/440.cluster.rna97.mapping"
},
"440.cluster.rna97.mapping.stats": {
"compression": null,
"description": null,
"size": 49,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/440.cluster.rna97.mapping.stats"
},
"450.rna.expand.lca.gz": {
"compression": "gzip",
"description": null,
"size": 124893,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/450.rna.expand.lca.gz"
},
"450.rna.expand.rna.gz": {
"compression": "gzip",
"description": null,
"size": 47486,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/450.rna.expand.rna.gz"
},
"450.rna.sims.filter.gz": {
"compression": "gzip",
"description": null,
"size": 27095,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/450.rna.sims.filter.gz"
},
"450.rna.sims.gz": {
"compression": "gzip",
"description": null,
"size": 260283,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/450.rna.sims.gz"
},
"900.abundance.function.gz": {
"compression": "gzip",
"description": null,
"size": 6935,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/900.abundance.function.gz"
},
"900.abundance.lca.gz": {
"compression": "gzip",
"description": null,
"size": 5604,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/900.abundance.lca.gz"
},
"900.abundance.md5.gz": {
"compression": "gzip",
"description": null,
"size": 11567,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/900.abundance.md5.gz"
},
"900.abundance.ontology.gz": {
"compression": "gzip",
"description": null,
"size": 43,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/900.abundance.ontology.gz"
},
"900.abundance.organism.gz": {
"compression": "gzip",
"description": null,
"size": 16430,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/900.abundance.organism.gz"
},
"900.loadDB.sims.filter.seq": {
"compression": null,
"description": null,
"size": 13645393,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/900.loadDB.sims.filter.seq"
},
"900.loadDB.source.stats": {
"compression": null,
"description": null,
"size": 98,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/900.loadDB.source.stats"
},
"999.done.COG.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/999.done.COG.stats"
},
"999.done.KO.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/999.done.KO.stats"
},
"999.done.NOG.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/999.done.NOG.stats"
},
"999.done.Subsystems.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/999.done.Subsystems.stats"
},
"999.done.class.stats": {
"compression": null,
"description": null,
"size": 425,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/999.done.class.stats"
},
"999.done.domain.stats": {
"compression": null,
"description": null,
"size": 30,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/999.done.domain.stats"
},
"999.done.family.stats": {
"compression": null,
"description": null,
"size": 1129,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/999.done.family.stats"
},
"999.done.genus.stats": {
"compression": null,
"description": null,
"size": 1569,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/999.done.genus.stats"
},
"999.done.order.stats": {
"compression": null,
"description": null,
"size": 597,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/999.done.order.stats"
},
"999.done.phylum.stats": {
"compression": null,
"description": null,
"size": 221,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/999.done.phylum.stats"
},
"999.done.rarefaction.stats": {
"compression": null,
"description": null,
"size": 23105,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/999.done.rarefaction.stats"
},
"999.done.sims.stats": {
"compression": null,
"description": null,
"size": 79,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/999.done.sims.stats"
},
"999.done.species.stats": {
"compression": null,
"description": null,
"size": 5231,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458198.3/file/999.done.species.stats"
}
},
"id": "mgm4458198.3",
"provider": "metagenomics.anl.gov",
"providerId": "mgm4458198.3"
}
},
"raw": {
"mgm4458198.3.fna.gz": {
"compression": "gzip",
"format": "fasta",
"provider": "metagenomics.anl.gov",
"url": "http://api.metagenomics.anl.gov/reads/mgm4458198.3"
}
}
} | [
"jared.wilkening@gmail.com"
] | jared.wilkening@gmail.com |
61ba5f04c6fdb92dd53cf834f471d9a8bfa509ea | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/netapp/v20201201/account.py | f3edb9464b8f1982985ca14d2a40dd982d6b1e04 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,934 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['AccountArgs', 'Account']
@pulumi.input_type
class AccountArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
account_name: Optional[pulumi.Input[str]] = None,
active_directories: Optional[pulumi.Input[Sequence[pulumi.Input['ActiveDirectoryArgs']]]] = None,
encryption: Optional[pulumi.Input['AccountEncryptionArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Account resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] account_name: The name of the NetApp account
:param pulumi.Input[Sequence[pulumi.Input['ActiveDirectoryArgs']]] active_directories: Active Directories
:param pulumi.Input['AccountEncryptionArgs'] encryption: Encryption settings
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if account_name is not None:
pulumi.set(__self__, "account_name", account_name)
if active_directories is not None:
pulumi.set(__self__, "active_directories", active_directories)
if encryption is not None:
pulumi.set(__self__, "encryption", encryption)
if location is not None:
pulumi.set(__self__, "location", location)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the NetApp account
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="activeDirectories")
def active_directories(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ActiveDirectoryArgs']]]]:
"""
Active Directories
"""
return pulumi.get(self, "active_directories")
@active_directories.setter
def active_directories(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ActiveDirectoryArgs']]]]):
pulumi.set(self, "active_directories", value)
@property
@pulumi.getter
def encryption(self) -> Optional[pulumi.Input['AccountEncryptionArgs']]:
"""
Encryption settings
"""
return pulumi.get(self, "encryption")
@encryption.setter
def encryption(self, value: Optional[pulumi.Input['AccountEncryptionArgs']]):
pulumi.set(self, "encryption", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class Account(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
active_directories: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ActiveDirectoryArgs']]]]] = None,
encryption: Optional[pulumi.Input[pulumi.InputType['AccountEncryptionArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
NetApp account resource
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: The name of the NetApp account
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ActiveDirectoryArgs']]]] active_directories: Active Directories
:param pulumi.Input[pulumi.InputType['AccountEncryptionArgs']] encryption: Encryption settings
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: AccountArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
NetApp account resource
:param str resource_name: The name of the resource.
:param AccountArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(AccountArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
active_directories: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ActiveDirectoryArgs']]]]] = None,
encryption: Optional[pulumi.Input[pulumi.InputType['AccountEncryptionArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = AccountArgs.__new__(AccountArgs)
__props__.__dict__["account_name"] = account_name
__props__.__dict__["active_directories"] = active_directories
__props__.__dict__["encryption"] = encryption
__props__.__dict__["location"] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:netapp:Account"), pulumi.Alias(type_="azure-native:netapp/v20170815:Account"), pulumi.Alias(type_="azure-native:netapp/v20190501:Account"), pulumi.Alias(type_="azure-native:netapp/v20190601:Account"), pulumi.Alias(type_="azure-native:netapp/v20190701:Account"), pulumi.Alias(type_="azure-native:netapp/v20190801:Account"), pulumi.Alias(type_="azure-native:netapp/v20191001:Account"), pulumi.Alias(type_="azure-native:netapp/v20191101:Account"), pulumi.Alias(type_="azure-native:netapp/v20200201:Account"), pulumi.Alias(type_="azure-native:netapp/v20200301:Account"), pulumi.Alias(type_="azure-native:netapp/v20200501:Account"), pulumi.Alias(type_="azure-native:netapp/v20200601:Account"), pulumi.Alias(type_="azure-native:netapp/v20200701:Account"), pulumi.Alias(type_="azure-native:netapp/v20200801:Account"), pulumi.Alias(type_="azure-native:netapp/v20200901:Account"), pulumi.Alias(type_="azure-native:netapp/v20201101:Account"), pulumi.Alias(type_="azure-native:netapp/v20210201:Account"), pulumi.Alias(type_="azure-native:netapp/v20210401:Account"), pulumi.Alias(type_="azure-native:netapp/v20210401preview:Account"), pulumi.Alias(type_="azure-native:netapp/v20210601:Account")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Account, __self__).__init__(
'azure-native:netapp/v20201201:Account',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Account':
"""
Get an existing Account resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = AccountArgs.__new__(AccountArgs)
__props__.__dict__["active_directories"] = None
__props__.__dict__["encryption"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return Account(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="activeDirectories")
def active_directories(self) -> pulumi.Output[Optional[Sequence['outputs.ActiveDirectoryResponse']]]:
"""
Active Directories
"""
return pulumi.get(self, "active_directories")
@property
@pulumi.getter
def encryption(self) -> pulumi.Output[Optional['outputs.AccountEncryptionResponse']]:
"""
Encryption settings
"""
return pulumi.get(self, "encryption")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
Azure lifecycle management
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
The system meta data relating to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
| [
"noreply@github.com"
] | bpkgoud.noreply@github.com |
e12c1c659d3522d8afd7c9c6a5000dcd949f7080 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /070_oop/007_exceptions/_exercises/templates/GoCongr/022_Coding Exceptions Classes_!cool!.py | 741999e8439973d27e69f5a1bd8bf673b05a8ae2 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 351 | py | # # Coding Exceptions Classes
# c_ General ? p..
# c_ Specific1 G.. p..
# c_ Specific2 G.. p..
#
# ___ raiser0 r____ G...
# ___ raiser1 r____ S.1
# ___ raiser2 r____ S.2
#
# ___ func __ _0 _1 _2
# ___
# ?
# ____ G.. __ X # X is the raised instance
# print('caught:' X. -c # Same as sys.exc_info()[0]
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
3e68c157c162e237bcf7cbdfab3344aa166b63ed | f76862c7dfcc08a495927eae3f4a995e416b948c | /amulet_map_editor/api/opengl/__init__.py | 485d3392e3d5341aa400e59e6ba9d552087799c4 | [] | permissive | Ex-Ark/Amulet-Map-Editor | 0bc1e6ac07349736114ea80e6c3ee2233863366e | 24704b91749727e8bce25aecf41d39f4b0eba433 | refs/heads/master | 2023-03-02T23:06:46.201874 | 2021-02-14T12:31:59 | 2021-02-14T12:31:59 | 221,939,739 | 0 | 0 | MIT | 2019-11-15T14:26:14 | 2019-11-15T14:26:14 | null | UTF-8 | Python | false | false | 145 | py | from .context_manager import ContextManager
from .drawable import Drawable
from .thread_generator import ThreadedObject, ThreadedObjectContainer
| [
"james_clare1@yahoo.co.uk"
] | james_clare1@yahoo.co.uk |
5f94299b8ab5fe88eeb251205f9efa410e4838a0 | acfac20206023b951a1016664771e1dda6c22a45 | /QUANTAXIS/QAData/QADataStruct.py | 9ab3f5fb84537052f42572ca99a9dcdf4c052dd2 | [
"MIT"
] | permissive | qzm/QUANTAXIS | fc8b99dc2fdb2d6f2c38556710337f2cf97d5da3 | 055fdc16d67670fb4770e7097865336199e55f3e | refs/heads/master | 2020-03-17T19:53:31.346748 | 2018-05-17T14:40:33 | 2018-05-17T14:40:33 | 133,882,796 | 1 | 0 | null | 2018-05-18T00:35:15 | 2018-05-18T00:35:14 | null | UTF-8 | Python | false | false | 24,156 | py | # coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2018 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
定义一些可以扩展的数据结构
方便序列化/相互转换
"""
import datetime
import itertools
import os
import platform
import statistics
import sys
import time
import webbrowser
from copy import copy
from functools import lru_cache, partial, reduce
import numpy as np
import pandas as pd
from pyecharts import Kline
from QUANTAXIS.QAData.base_datastruct import _quotation_base
from QUANTAXIS.QAData.data_fq import QA_data_stock_to_fq
from QUANTAXIS.QAData.data_resample import QA_data_tick_resample
from QUANTAXIS.QAData.proto import stock_day_pb2 # protobuf import
from QUANTAXIS.QAData.proto import stock_min_pb2
from QUANTAXIS.QAFetch.QATdx import QA_fetch_get_stock_realtime
from QUANTAXIS.QAIndicator import EMA, HHV, LLV, SMA
from QUANTAXIS.QAUtil import (DATABASE, QA_util_log_info,
QA_util_random_with_topic,
QA_util_to_json_from_pandas,
QA_util_to_pandas_from_json, trade_date_sse)
from QUANTAXIS.QAUtil.QADate import QA_util_to_datetime
from QUANTAXIS.QAUtil.QAParameter import FREQUENCE, MARKET_TYPE
class QA_DataStruct_Stock_day(_quotation_base):
"""
this is a datastruct for stock_day
"""
def __init__(self, DataFrame, dtype='stock_day', if_fq='bfq'):
super().__init__(DataFrame, dtype, if_fq)
if 'high_limit' not in self.data.columns:
self.data['high_limit'] = round(
(self.data.close.shift(1) + 0.0002) * 1.1, 2)
if 'low_limit' not in self.data.columns:
self.data['low_limit'] = round(
(self.data.close.shift(1) + 0.0002) * 0.9, 2)
if 'next_day_high_limit' not in self.data.columns:
self.data['next_day_high_limit'] = round(
(self.data.close + 0.0002) * 1.1, 2)
if 'next_day_low_limit' not in self.data.columns:
self.data['next_day_low_limit'] = round(
(self.data.close + 0.0002) * 0.9, 2)
def __repr__(self):
return '< QA_DataStruct_Stock_day with {} securities >'.format(len(self.code))
__str__ = __repr__
def to_qfq(self):
if self.if_fq is 'bfq':
if len(self.code) < 1:
self.if_fq = 'qfq'
return self
elif len(self.code) < 20:
return self.new(pd.concat(list(map(
lambda x: QA_data_stock_to_fq(self.data[self.data['code'] == x]), self.code))), self.type, 'qfq')
else:
return self.new(
self.data.groupby('code').apply(QA_data_stock_to_fq), self.type, 'qfq')
else:
QA_util_log_info(
'none support type for qfq Current type is: %s' % self.if_fq)
return self
def to_hfq(self):
if self.if_fq is 'bfq':
if len(self.code) < 1:
self.if_fq = 'hfq'
return self
else:
return self.new(pd.concat(list(map(lambda x: QA_data_stock_to_fq(
self.data[self.data['code'] == x], 'hfq'), self.code))), self.type, 'hfq')
else:
QA_util_log_info(
'none support type for qfq Current type is: %s' % self.if_fq)
return self
@property
def high_limit(self):
'涨停价'
return self.data.high_limit
@property
def low_limit(self):
'跌停价'
return self.data.low_limit
@property
def next_day_low_limit(self):
"明日跌停价"
return self.data.next_day_low_limit
@property
def next_day_high_limit(self):
"明日涨停价"
return self.data.next_day_high_limit
@property
def preclose(self):
try:
return self.data.preclose
except:
return None
@property
def price_chg(self):
try:
return (self.close-self.preclose)/self.preclose
except:
return None
class QA_DataStruct_Stock_min(_quotation_base):
def __init__(self, DataFrame, dtype='stock_min', if_fq='bfq'):
super().__init__(DataFrame, dtype, if_fq)
try:
self.data = DataFrame.ix[:, [
'code', 'open', 'high', 'low', 'close', 'volume', 'preclose', 'datetime', 'date']]
except:
self.data = DataFrame.ix[:, [
'code', 'open', 'high', 'low', 'close', 'volume', 'datetime', 'date']]
if 'high_limit' not in self.data.columns:
self.data['high_limit'] = round(
(self.data.close.shift(1) + 0.0002) * 1.1, 2)
if 'low_limit' not in self.data.columns:
self.data['low_limit'] = round(
(self.data.close.shift(1) + 0.0002) * 0.9, 2)
self.type = dtype
self.if_fq = if_fq
self.mongo_coll = DATABASE.stock_min
def __repr__(self):
return '< QA_DataStruct_Stock_Min with {} securities >'.format(len(self.code))
__str__ = __repr__
def to_qfq(self):
if self.if_fq is 'bfq':
if len(self.code) < 1:
self.if_fq = 'qfq'
return self
elif len(self.code) < 20:
data = QA_DataStruct_Stock_min(pd.concat(list(map(lambda x: QA_data_stock_to_fq(
self.data[self.data['code'] == x]), self.code))).set_index(['datetime', 'code'], drop=False))
data.if_fq = 'qfq'
return data
else:
data = QA_DataStruct_Stock_min(
self.data.groupby('code').apply(QA_data_stock_to_fq))
return data
else:
QA_util_log_info(
'none support type for qfq Current type is:%s' % self.if_fq)
return self
def to_hfq(self):
if self.if_fq is 'bfq':
if len(self.code) < 1:
self.if_fq = 'hfq'
return self
else:
data = QA_DataStruct_Stock_min(pd.concat(list(map(lambda x: QA_data_stock_to_fq(
self.data[self.data['code'] == x], 'hfq'), self.code))).set_index(['datetime', 'code'], drop=False))
data.if_fq = 'hfq'
return data
else:
QA_util_log_info(
'none support type for qfq Current type is:%s' % self.if_fq)
return self
@property
def high_limit(self):
'涨停价'
return self.data.high_limit
@property
def low_limit(self):
'跌停价'
return self.data.low_limit
class QA_DataStruct_Future_day(_quotation_base):
def __init__(self, DataFrame, dtype='future_day', if_fq=''):
self.type = 'future_day'
self.data = DataFrame.ix[:, [
'code', 'open', 'high', 'low', 'close', 'trade', 'position', 'datetime', 'date']]
self.mongo_coll = DATABASE.future_day
def __repr__(self):
return '< QA_DataStruct_Future_day with {} securities >'.format(len(self.code))
__str__ = __repr__
class QA_DataStruct_Future_min(_quotation_base):
"""
struct for future
"""
def __init__(self, DataFrame, dtype='future_min', if_fq=''):
self.type = 'future_day'
self.data = DataFrame.ix[:, [
'code', 'open', 'high', 'low', 'close', 'trade', 'position', 'datetime', 'date']]
self.mongo_coll = DATABASE.future_min
def __repr__(self):
return '< QA_DataStruct_Future_min with {} securities >'.format(len(self.code))
__str__ = __repr__
class QA_DataStruct_Index_day(_quotation_base):
'自定义的日线数据结构'
def __init__(self, DataFrame, dtype='index_day', if_fq=''):
self.data = DataFrame
self.type = dtype
self.if_fq = if_fq
self.mongo_coll = eval(
'DATABASE.{}'.format(self.type))
"""
def __add__(self,DataStruct):
'add func with merge list and reindex'
assert isinstance(DataStruct,QA_DataStruct_Index_day)
if self.if_fq==DataStruct.if_fq:
self.sync_status(pd.concat())
"""
def __repr__(self):
return '< QA_DataStruct_Index_day with {} securities >'.format(len(self.code))
__str__ = __repr__
class QA_DataStruct_Index_min(_quotation_base):
'自定义的分钟线数据结构'
def __init__(self, DataFrame, dtype='index_min', if_fq=''):
self.type = dtype
self.if_fq = if_fq
self.data = DataFrame.ix[:, [
'code', 'open', 'high', 'low', 'close', 'volume', 'datetime', 'date']]
self.mongo_coll = DATABASE.index_min
def __repr__(self):
return '< QA_DataStruct_Index_Min with %s securities >' % len(self.code)
__str__ = __repr__
class QA_DataStruct_Stock_block():
def __init__(self, DataFrame):
self.data = DataFrame
def __repr__(self):
return '< QA_DataStruct_Stock_Block >'
def __call__(self):
return self.data
@property
def len(self):
"""返回DataStruct的长度
Returns:
[type] -- [description]
"""
return len(self.data)
@property
def block_name(self):
"""返回所有的板块名
Returns:
[type] -- [description]
"""
return self.data.groupby('blockname').sum().index.unique().tolist()
@property
def code(self):
"""返回唯一的证券代码
Returns:
[type] -- [description]
"""
return self.data.code.unique().tolist()
def show(self):
"""展示DataStruct
Returns:
dataframe -- [description]
"""
return self.data
def get_code(self, code):
"""getcode 获取某一只股票的板块
Arguments:
code {str} -- 股票代码
Returns:
DataStruct -- [description]
"""
return QA_DataStruct_Stock_block(self.data[self.data['code'] == code])
def get_block(self, _block_name):
"""getblock 获取板块
Arguments:
_block_name {[type]} -- [description]
Returns:
[type] -- [description]
"""
return QA_DataStruct_Stock_block(self.data[self.data['blockname'] == _block_name])
def getdtype(self, dtype):
"""getdtype
Arguments:
dtype {str} -- gn-概念/dy-地域/fg-风格/zs-指数
Returns:
[type] -- [description]
"""
return QA_DataStruct_Stock_block(self.data[self.data['type'] == dtype])
def get_price(self, _block_name=None):
"""get_price
Keyword Arguments:
_block_name {[type]} -- [description] (default: {None})
Returns:
[type] -- [description]
"""
if _block_name is not None:
try:
code = self.data[self.data['blockname']
== _block_name].code.unique().tolist()
# try to get a datastruct package of lastest price
return QA_fetch_get_stock_realtime(code)
except:
return "Wrong Block Name! Please Check"
else:
code = self.data.code.unique().tolist()
return QA_fetch_get_stock_realtime(code)
class QA_DataStruct_Stock_transaction():
def __init__(self, DataFrame):
"""Stock Transaction
Arguments:
DataFrame {pd.Dataframe} -- [input is one/multi day transaction]
"""
self.type = 'stock_transaction'
self.data = DataFrame
if 'amount' not in DataFrame.columns:
if 'vol' in DataFrame.columns:
self.data['amount'] = self.data.vol * self.data.price * 100
elif 'volume' in DataFrame.columns:
self.data['amount'] = self.data.volume * self.data.price * 100
self.mongo_coll = DATABASE.stock_transaction
@property
@lru_cache()
def buyorsell(self):
"""return the buy or sell towards 0--buy 1--sell 2--none
Decorators:
lru_cache
Returns:
[pd.Series] -- [description]
"""
return self.data.buyorsell
@property
@lru_cache()
def price(self):
"""return the deal price of tick transaction
Decorators:
lru_cache
Returns:
[type] -- [description]
"""
return self.data.price
@property
@lru_cache()
def vol(self):
"""return the deal volume of tick
Decorators:
lru_cache
Returns:
pd.Series -- volume of transaction
"""
try:
return self.data.volume
except:
return self.data.vol
volume = vol
@property
@lru_cache()
def date(self):
"""return the date of transaction
Decorators:
lru_cache
Returns:
pd.Series -- date of transaction
"""
return self.data.date
@property
@lru_cache()
def time(self):
"""return the exact time of transaction(to minute level)
Decorators:
lru_cache
Returns:
pd.Series -- till minute level
"""
return self.data.time
@property
@lru_cache()
def datetime(self):
"""return the datetime of transaction
Decorators:
lru_cache
Returns:
pd.Series -- [description]
"""
return self.data.datetime
@property
@lru_cache()
def order(self):
"""return the order num of transaction/ for everyday change
Decorators:
lru_cache
Returns:
pd.series -- [description]
"""
return self.data.order
@property
@lru_cache()
def index(self):
"""return the transaction index
Decorators:
lru_cache
Returns:
[type] -- [description]
"""
return self.data.index
@property
@lru_cache()
def amount(self):
"""return current tick trading amount
Decorators:
lru_cache
Returns:
[type] -- [description]
"""
return self.data.amount
"""
最新:IF(ISNULL(NEW),PRE,NEW);
IF (ISNULL(RANGE_AVG_PRICE) OR RANGE_AVG_PRICE <= 0)
{
IF (MARKETTYPE == 232 OR MARKETTYPE == 56 OR MARKETTYPE==64 OR MARKETTYPE==128 OR MARKETTYPE==168 OR MARKETTYPE==184 OR MARKETTYPE == 200 OR MARKETTYPE == 80 OR (VOL > 1 AND VOL<100))
{
b=SUBSAMEDAY(&VOL) ;
m=SUM(b*最新,0);
均价:IF(m>0,m/VOL,PRE);
}
ELSE IF(CODETYPE!=0 AND MONEY>0)
{
IF(ISNULL(MONEY) OR ISNULL(VOL) OR VOL==0 OR MONEY==0)
均价:PRE;
ELSE IF(VOL==VOL[1] OR MONEY==MONEY[1])
均价:均价[1];
ELSE
均价:MONEY/VOL;
}
ELSE IF (MARKETTYPE == 176)
{
b=SUBSAMEDAY(&MONEY);
m=SUM(b*最新,0);
IF(m>0)
均价:m/MONEY;
}
}
ELSE
{
均价:RANGE_AVG_PRICE;
}
DRAWGBK(MARKETTYPE==32 AND FORMATTIME(1)<10 AND TRADETIME>242),RGB(0,0,128);
RETURN;
hx_star;
hx_star_p;
"""
def __repr__(self):
return '< QA_DataStruct_Stock_Transaction >'
def __call__(self):
return self.data
def resample(self, type_='1min'):
"""resample methods
Returns:
[type] -- [description]
"""
return QA_DataStruct_Stock_min(QA_data_tick_resample(self.data, type_))
def get_big_orders(self, bigamount=1000000):
"""return big order
Keyword Arguments:
bigamount {[type]} -- [description] (default: {1000000})
Returns:
[type] -- [description]
"""
return self.data.query('amount>={}'.format(bigamount))
def get_medium_order(self, lower=200000, higher=1000000):
"""return medium
Keyword Arguments:
lower {[type]} -- [description] (default: {200000})
higher {[type]} -- [description] (default: {1000000})
Returns:
[type] -- [description]
"""
return self.data.query('amount>={}'.format(lower)).query('amount<={}'.format(higher))
def get_small_order(self, smallamount=200000):
"""return small level order
Keyword Arguments:
smallamount {[type]} -- [description] (default: {200000})
Returns:
[type] -- [description]
"""
return self.data.query('amount<={}'.format(smallamount))
def get_time(self, start, end=None):
if end is None:
return self.data.loc[start]
else:
return self.data.loc[start:end]
class _realtime_base():
"""
realtime 基类
主要字段有:
code/name
time
open/high/low
买卖报价队列:(不同的可能不一样 只提供list)
ask_list[ask1_price/ask1_volume|ask2_price/ask2_volume|ask3_price/ask3_volume....]
bid_list[bid1_price/bid1_volume|bid2_price/bid2_volume|bid3_price/bid3_volume....]
"""
def __init__(self, market_data):
"""转化成dict模式
Arguments:
market_data {[type]} -- [description]
"""
if isinstance(market_data, dict):
self.market_data = market_data
elif isinstance(market_data, pd.DataFrame):
self.market_data = QA_util_to_json_from_pandas(market_data)
@property
def open(self):
return self.market_data.get('open', None)
@property
def price(self):
return self.market_data.get('price', None)
@property
def datetime(self):
return self.market_data.get('datetime', None)
@property
def high(self):
return self.market_data.get('high', None)
@property
def low(self):
return self.market_data.get('low', None)
@property
def code(self):
return self.market_data.get('code', None)
@property
def last_close(self):
return self.market_data.get('last_close', None)
@property
def cur_vol(self):
return self.market_data.get('cur_vol', None)
@property
def bid1(self):
return self.market_data.get('bid1', None)
@property
def bid_vol1(self):
return self.market_data.get('bid_vol1', None)
@property
def bid2(self):
return self.market_data.get('bid2', None)
@property
def bid_vol2(self):
return self.market_data.get('bid_vol2', None)
@property
def bid3(self):
return self.market_data.get('bid3', None)
@property
def bid_vol3(self):
return self.market_data.get('bid_vol3', None)
@property
def bid4(self):
return self.market_data.get('bid4', None)
@property
def bid_vol4(self):
return self.market_data.get('bid_vol4', None)
@property
def bid5(self):
return self.market_data.get('bid5', None)
@property
def bid_vol5(self):
return self.market_data.get('bid_vol5', None)
@property
def ask1(self):
return self.market_data.get('ask1', None)
@property
def ask_vol1(self):
return self.market_data.get('ask_vol1', None)
@property
def ask2(self):
return self.market_data.get('ask2', None)
@property
def ask_vol2(self):
return self.market_data.get('ask_vol2', None)
@property
def ask3(self):
return self.market_data.get('ask3', None)
@property
def ask_vol3(self):
return self.market_data.get('ask_vol3', None)
@property
def ask4(self):
return self.market_data.get('ask4', None)
@property
def ask_vol4(self):
return self.market_data.get('ask_vol4', None)
@property
def ask5(self):
return self.market_data.get('ask5', None)
@property
def ask_vol5(self):
return self.market_data.get('ask_vol5', None)
class QA_DataStruct_Stock_realtime(_realtime_base):
def __init__(self, market_data):
if isinstance(market_data, dict):
self.market_data = market_data
elif isinstance(market_data, pd.DataFrame):
self.market_data = QA_util_to_json_from_pandas(market_data)
def __repr__(self):
return '< QA_REALTIME_STRUCT {}{} >'.format(self.code, self.datetime)
# @property
# def ask_list(self):
# return self.market_data.ix[:, ['ask1', 'ask_vol1', 'bid1', 'bid_vol1', 'ask2', 'ask_vol2',
# 'bid2', 'bid_vol2', 'ask3', 'ask_vol3', 'bid3', 'bid_vol3', 'ask4',
# 'ask_vol4', 'bid4', 'bid_vol4', 'ask5', 'ask_vol5', 'bid5', 'bid_vol5']]
# @property
# def bid_list(self):
# return self.market_data.ix[:, ['bid1', 'bid_vol1', 'bid2', 'bid_vol2', 'bid3', 'bid_vol3', 'bid4', 'bid_vol4', 'bid5', 'bid_vol5']]
@property
def _data(self):
"""
return a dataframe-type result
"""
return pd.DataFrame(self.market_data)
@property
def ab_board(self):
"""ask_bid board
bid3 bid_vol3
bid2 bid_vol2
bid1 bid_vol1
===============
price /cur_vol
===============
ask1 ask_vol1
ask2 ask_vol2
ask3 ask_vol3
"""
return 'BID5 {} {} \nBID4 {} {} \nBID3 {} {} \nBID2 {} {} \nBID1 {} {} \n============\nCURRENT {} {} \n============\
\nASK1 {} {} \nASK2 {} {} \nASK3 {} {} \nASK4 {} {} \nASK5 {} {} \nTIME {} CODE {} '.format(
self.bid5, self.bid_vol5, self.bid4, self.bid_vol4, self.bid3, self.bid_vol3, self.bid2, self.bid_vol2, self.bid1, self.bid_vol1,
self.price, self.cur_vol,
self.ask1, self.ask_vol1, self.ask2, self.ask_vol2, self.ask3, self.ask_vol3, self.ask4, self.ask_vol4, self.ask5, self.ask_vol5,
self.datetime, self.code
)
def serialize(self):
"""to_protobuf
"""
pass
class QA_DataStruct_Stock_realtime_series():
def __init__(self, sr_series):
if isinstance(sr_series[0], QA_DataStruct_Stock_realtime):
self.sr_series = sr_series
elif isinstance(sr_series[0], dict):
self.sr_series = [
QA_DataStruct_Stock_realtime(sr) for sr in sr_series]
self.table = pd.concat([sr._data for sr in self.sr_series])
class QA_DataStruct_Security_list():
def __init__(self, DataFrame):
self.data = DataFrame.loc[:, ['sse', 'code', 'name']].set_index(
'code', drop=False)
@property
def code(self):
return self.data.code
@property
def name(self):
return self.data.name
def get_stock(self, ST_option):
return self.data
def get_index(self):
return self.data
def get_etf(self):
return self.data
| [
"yutiansut@qq.com"
] | yutiansut@qq.com |
0eed7b1f3619229cbb86a603a93f32fc7c03967f | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_065/ch140_2020_04_01_19_28_19_184199.py | e9a126ebc7ed42b415088a88add0bda4fb22a9be | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 89 | py | def faixa_notas(notas):
result = [0]*len(notas)
while notas:
print(notas) | [
"you@example.com"
] | you@example.com |
ad8e82445acb57add0fd8ed5e34f5c5e3db445b7 | 20b5ef21ed539ac5b906d252e323d3622432fe52 | /phantompy/webelements.py | bfcca92a3940679bdfdd6110ed810aba1e8a47b4 | [
"BSD-2-Clause"
] | permissive | ballacky13/phantompy | 51277264e4d3f4368abb329f798413e04a276ab4 | 9460c93565151b40f0137b29f8c6dde8eded7651 | refs/heads/master | 2021-01-21T01:59:25.939983 | 2013-05-24T14:38:19 | 2013-05-24T14:38:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 999 | py | # -*- coding: utf-8 -*-
from .api import library as lib
from .api import ctypes
class WebElement(object):
def __init__(self, el_ptr, frame):
self._el_ptr = el_ptr
self._frame = frame
self._closed = False
# Setup tagname
tagname = lib.ph_webelement_tag_name(el_ptr)
self._tag_name = tagname.decode("utf-8")
def __repr__(self):
return "<WebElement <{0}> >".format(self.name)
@property
def ptr(self):
return self._el_ptr
def __del__(self):
lib.ph_webelement_free(self.ptr)
@property
def name(self):
return self._tag_name
def inner_html(self):
result = lib.ph_webelement_inner_html(self.ptr)
return result.decode("utf-8")
def inner_text(self):
result = lib.ph_webelement_inner_text(self.ptr)
return result.decode("utf-8")
def is_none(self):
result = lib.ph_webelement_is_null(self.ptr)
return True if result == 0 else False
| [
"niwi@niwi.be"
] | niwi@niwi.be |
ed07f6d6de8dd7e3d776e9358767ea4cc11affc9 | ba602dc67ad7bb50133aeb312f3c6c54627b3dec | /data/3955/WA_py/508331.py | e587cdede063c05ef1b12076f39eb043f6dff69e | [] | no_license | Dearyyyyy/TCG | 0d21d89275906157372d775f33309ce337e6bc95 | 7b80de16de2d3f5d95a7c4ed95d45a9e38882e67 | refs/heads/master | 2020-12-27T23:19:44.845918 | 2020-02-04T01:59:23 | 2020-02-04T01:59:23 | 238,101,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | # coding=utf-8
while True:
n1,n2 = input().split()
n3 = ''
for i in range(len(n1)):
n3 = n1[i:] + n1[:i]
if n3 == n2:
print("Yes")
break
else:
print("No") | [
"543271544@qq.com"
] | 543271544@qq.com |
5be7a9deeb09ea616df6b8cc3c5b75a2fd056175 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03645/s325823599.py | 4ee88108ab98ed34053bd76fe0f66cfe1a062006 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | n, m = map(int, input().split())
arr_ = [tuple(map(int, input().split())) for _ in range(m)]
one = set([b for a, b in arr_ if a == 1])
last = set([a for a, b in arr_ if b == n])
print("POSSIBLE" if len(one & last) >= 1 else "IMPOSSIBLE") | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
a24259cf4a4c9640df796299cdbea9e8c318b970 | 2312ee83cd5cdfcd83af3a805dc14444b38f89c6 | /barpie.py | 6cda7263efdad4ff5e567070c4c80f8530c455cb | [
"MIT"
] | permissive | llord1/DataVisualization | 49ea215d012566d149a740c87185092d0a4e8ede | d7f7f43479549732ef6c94e7cd1c1ccc401593a8 | refs/heads/master | 2021-09-21T15:29:40.016594 | 2018-08-28T14:34:46 | 2018-08-28T14:34:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 746 | py | #!/usr/bin/env pythonw
import matplotlib.pyplot as plt
import seaborn as sns
#sns.set_style('ticks')
# Get default color cycle
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
fig, axs = plt.subplots(nrows=2, ncols=3)
dataA = [17, 18, 20, 22, 24]
dataB = [20, 20, 19, 21, 20]
dataC = [24, 22, 20, 18, 17]
axs[0][0].pie(dataA, labels=range(5))
axs[0][1].pie(dataB, labels=range(5))
axs[0][2].pie(dataC, labels=range(5))
axs[1][0].bar(x=range(5), height=dataA, color=colors)
axs[1][0].set_ylim(0, 25)
axs[1][1].bar(x=range(5), height=dataB, color=colors)
axs[1][1].set_ylim(0, 25)
axs[1][2].bar(x=range(5), height=dataC, color=colors)
axs[1][2].set_ylim(0, 25)
fig.show()
fig.savefig('barpie.png', dpi=300) | [
"bgoncalves@gmail.com"
] | bgoncalves@gmail.com |
697efe77da57a33f49ad2ede10702df06f27631b | b3c3a810d48b02e40685f57d346fd9c0f2237a9e | /Python Essentials/5.0 Lists Basics/05. Numbers Filter.py | ce2d045c045aed6537ce710c040b83b686b4c757 | [] | no_license | byAbaddon/Essentials-Course-with----JavaScript___and___Python | 7ef8112edafd6a2e2cef82c7709f974a67c64cc0 | 5194d9e74c2aa186e5571745f8931f31595d4b99 | refs/heads/main | 2023-03-27T23:23:18.426633 | 2021-04-05T21:09:06 | 2021-04-05T21:09:06 | 349,848,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | num_list = [int(input()) for _ in range(int(input()))]
command = input()
def calc(type_command):
switch_command = {
'even': list(filter(lambda x: not x & 1, num_list)),
'odd': list(filter(lambda x: x & 1, num_list)),
'negative': list(filter(lambda x: x < 0, num_list)),
'positive': list(filter(lambda x: x >= 0, num_list)),
}
return switch_command.get(type_command)
print(calc(command))
'''
5
33
19
-2
18
998
even
#[-2, 18, 998]
''' | [
"noreply@github.com"
] | byAbaddon.noreply@github.com |
97a8d4ed865ee8e97ff76957231e9f0eafaa5a40 | 1925c535d439d2d47e27ace779f08be0b2a75750 | /CtCl/Sorting and Searching/quick_sort.py | c1b55d5d7746ba459bff4223121d04d400ffdc4c | [] | no_license | arthurDz/algorithm-studies | ee77d716041671c4b8bb757d8d96f3d10b6589f7 | 1e4d23dd0c40df34f58d71c7ca3e6491be732075 | refs/heads/master | 2023-04-27T12:17:06.209278 | 2021-04-30T20:16:18 | 2021-04-30T20:16:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | # Runtime O(NlogN) - average, O(N^2) - worst, Memory O(logN)
def quickSort(num):
if len(num) < 2: return num
less = []
more = []
equal = [num[0]]
for i in range(1, len(num)):
if num[i] < num[0]:
less.append(num[i])
elif num[i] == num[0]:
equal.append(num[i])
else:
more.append(num[i])
return quickSort(less) + equal + quickSort(more) | [
"yunfan.yang@minerva.kgi.edu"
] | yunfan.yang@minerva.kgi.edu |
04eb1ce6cbc6df42d8291e624e380c36614137c9 | b08d42933ac06045905d7c005ca9c114ed3aecc0 | /src/coefSubset/evaluate/ranks/fiftyPercent/rank_2aq1_M.py | 908c5c4edee253682ca2a80a88fbae072bc24c6e | [] | no_license | TanemuraKiyoto/PPI-native-detection-via-LR | d148d53f5eb60a4dda5318b371a3048e3f662725 | 897e7188b0da94e87126a4acc0c9a6ff44a64574 | refs/heads/master | 2022-12-05T11:59:01.014309 | 2020-08-10T00:41:17 | 2020-08-10T00:41:17 | 225,272,083 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,390 | py | # 9 July 2019
# Kiyoto Aramis Tanemura
# Several metrics are used to assess the performance of the trained RF model, notably native ranking. This script returns a ranking of the native protein-protein complex among a decoy set. For convenience, I will define as a function and will call in a general performance assessment script.
# Modified 11 July 2019 by Kiyoto Aramis Tanemura. To parallelize the process, I will replace the for loop for the testFileList to a multiprocessing pool.
# Modified 9 September 2019 by Kiyoto Aramis Tanemura. I will use the function to perform the calculation on one CSV file only. Thus instead of a function to import in other scripts, they will be individual jobs parallelized as individual jobs in the queue.
import os
import pandas as pd
import numpy as np
import pickle
os.chdir('/mnt/scratch/tanemur1/')
# Read the model and trainFile
testFile = '2aq1.csv'
identifier = 'M'
coefFrac = 0.5
testFilePath = '/mnt/scratch/tanemur1/CASF-PPI/nonb_descriptors/complete/'
modelPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/fiftyPercent/'
outputPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/evaluate/fiftyPercent/ranks/'
pdbID = testFile[:4]
with open(modelPath + 'model' + identifier + '.pkl', 'rb') as f:
clf = pickle.load(f)
result = pd.DataFrame()
scoreList = []
df1 = pd.read_csv(testFilePath + testFile)
dropList = ['Unnamed: 0', 'Unnamed: 0.1', 'ref']
df1 = df1.drop(dropList, axis = 1)
df1 = df1.set_index('Pair_name')
df1 = pd.DataFrame(df1.values.T, columns = df1.index, index = df1.columns)
df1.fillna(0.0, inplace = True)
#df1 = df1.reindex(sorted(df1.columns), axis = 1)
# Keep coefficients within the given fraction when ordered by decreasing order of coefficient magnitude
coefs = pd.read_csv('/mnt/home/tanemur1/6May2019/2019-11-11/results/medianCoefs.csv', index_col = 0, header = None, names = ['coefficients'])
coefs['absVal'] = np.abs(coefs['coefficients'])
coefs.sort_values(by = 'absVal', ascending = False, inplace = True)
coefs = coefs[:int(14028 * coefFrac + 0.5)]
keepList = list(coefs.index)
del coefs
df1 = df1[keepList]
df1 = df1.reindex(sorted(df1.columns), axis = 1)
with open(modelPath + 'standardScaler' + identifier + '.pkl', 'rb') as g:
scaler = pickle.load(g)
for i in range(len(df1)):
# subtract from one row each row of the dataframe, then remove the trivial row[[i]] - row[[i]]. Also some input files have 'class' column. This is erroneous and is removed.
df2 = pd.DataFrame(df1.iloc[[i]].values - df1.values, index = df1.index, columns = df1.columns)
df2 = df2.drop(df1.iloc[[i]].index[0], axis = 0)
# Standardize inut DF using the standard scaler used for training data.
df2 = scaler.transform(df2)
# Predict class of each comparison descriptor and sum the classes to obtain score. Higher score corresponds to more native-like complex
predictions = clf.predict(df2)
score = sum(predictions)
scoreList.append(score)
# Make a new DataFrame to store the score and corresponding descriptorID. Add rank as column. Note: lower rank corresponds to more native-like complex
result = pd.DataFrame(data = {'score': scoreList}, index = df1.index.tolist()).sort_values(by = 'score', ascending = False)
result['rank'] = range(1, len(result) + 1)
with open(outputPath + pdbID + identifier + '.csv', 'w') as h:
result.to_csv(h)
| [
"tanemur1@msu.edu"
] | tanemur1@msu.edu |
ad7770f5327908343402cf810055eb1ed533e4e5 | ce16345f020d14b138c0cb152abbdd1acbe450f5 | /grobber/grobber/sources/masteranime.py | 6e021e191b6101afd8a9d6aa3d9def0cee96ffef | [
"MIT"
] | permissive | siku2/MyAnimeStream | 9f1b57f33f344e35c6f0bae0c0008c6c76518eea | addfa3831cbe52667fea7f58d49325d6f200b2a1 | refs/heads/master | 2020-03-21T19:04:45.667147 | 2018-12-27T09:34:03 | 2018-12-27T09:34:03 | 138,929,617 | 1 | 1 | MIT | 2018-10-05T18:33:22 | 2018-06-27T20:27:27 | Python | UTF-8 | Python | false | false | 3,914 | py | import json
import logging
from typing import Any, AsyncIterator, Dict, List, Optional
from . import register_source
from .. import utils
from ..decorators import cached_property
from ..languages import Language
from ..models import Anime, Episode, SearchResult, get_certainty
from ..request import DefaultUrlFormatter, Request
from ..url_pool import UrlPool
log = logging.getLogger(__name__)
BASE_URL = "{MASTERANIME_URL}"
SEARCH_URL = BASE_URL + "/api/anime/filter"
ANIME_URL = BASE_URL + "/api/anime/{anime_id}/detailed"
EPISODE_URL = BASE_URL + "/anime/watch/{anime_slug}/{episode}"
class MasterEpisode(Episode):
ATTRS = ("mirror_data",)
@cached_property
async def mirror_data(self) -> List[Dict[str, Any]]:
bs = await self._req.bs
element = bs.select_one("video-mirrors")
if not element:
return []
return json.loads(element[":mirrors"])
@cached_property
async def raw_streams(self) -> List[str]:
links = []
for mirror in await self.mirror_data:
host_data = mirror["host"]
prefix = host_data["embed_prefix"]
suffix = host_data["embed_suffix"] or ""
embed_id = mirror["embed_id"]
links.append(f"{prefix}{embed_id}{suffix}")
return links
class MasterAnime(Anime):
ATTRS = ("anime_id", "anime_slug")
EPISODE_CLS = MasterEpisode
@cached_property
async def info_data(self) -> Dict[str, Any]:
return (await self._req.json)["info"]
@cached_property
async def episode_data(self) -> List[Dict[str, Any]]:
return (await self._req.json)["episodes"]
@cached_property
async def anime_id(self) -> int:
return (await self.info_data)["id"]
@cached_property
async def anime_slug(self) -> str:
return (await self.info_data)["slug"]
@cached_property
async def title(self) -> str:
return (await self.info_data)["title"]
@cached_property
async def is_dub(self) -> bool:
return False
@cached_property
async def language(self) -> Language:
return Language.ENGLISH
@cached_property
async def episode_count(self) -> int:
return len(await self.episode_data)
@classmethod
async def search(cls, query: str, *, language=Language.ENGLISH, dubbed=False) -> AsyncIterator[SearchResult]:
if dubbed or language != Language.ENGLISH:
return
# Query limit is 45 characters!!
req = Request(SEARCH_URL, {"search": query[:45], "order": "relevance_desc"})
json_data = await req.json
if not json_data:
logging.warning("couldn't get json from masteranime")
return
for raw_anime in json_data["data"]:
anime_id = raw_anime["id"]
title = raw_anime["title"]
req = Request(utils.format_available(ANIME_URL, anime_id=anime_id))
anime = cls(req)
anime._anime_id = anime_id
anime._anime_slug = raw_anime["slug"]
anime._title = title
yield SearchResult(anime, get_certainty(title, query))
@cached_property
async def raw_eps(self) -> List[Episode]:
episodes = []
slug = await self.anime_slug
for ep_data in await self.episode_data:
ep_id = ep_data["info"]["episode"]
req = Request(utils.format_available(EPISODE_URL, anime_slug=slug, episode=ep_id))
episodes.append(self.EPISODE_CLS(req))
return episodes
async def get_episode(self, index: int) -> Optional[Episode]:
return (await self.raw_eps)[index]
async def get_episodes(self) -> List[Episode]:
return await self.raw_eps
masteranime_pool = UrlPool("MasterAnime", ["https://www.masterani.me"])
DefaultUrlFormatter.add_field("MASTERANIME_URL", lambda: masteranime_pool.url)
register_source(MasterAnime)
| [
"siku2@outlook.com"
] | siku2@outlook.com |
87b28b565810895139b674762e321a21a41a3b6a | 2da56d31de9ed0727be67e1080c9ba7c9491c824 | /ilit/data/transforms/transform.py | 9dbd8d7cfe29bdd2437c0021231f186b2d6b394b | [
"Apache-2.0",
"Intel",
"MIT"
] | permissive | TrendingTechnology/lp-opt-tool | 49b564b09721b2a70ac603ee9e9323a4daf58c62 | e2e3ff30292bced1122fef519d1768a2eb72faf7 | refs/heads/master | 2023-01-07T16:55:51.230933 | 2020-11-09T07:25:02 | 2020-11-09T07:25:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,407 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import abstractmethod
from ilit.utils.utility import LazyImport, singleton
torchvision = LazyImport('torchvision')
torch = LazyImport('torch')
tf = LazyImport('tensorflow')
mx = LazyImport('mxnet')
class BaseTransforms(object):
def __init__(self, process, concat_general=True):
transform_map = {"preprocess": self._get_preprocess,
"postprocess": self._get_postprocess,
"general": self._get_general, }
self.transforms = transform_map[process]()
# if set True users can use general transform in both preprocess or postprocess
if concat_general:
self.transforms.update(transform_map['general']())
@abstractmethod
def _get_preprocess(self):
raise NotImplementedError
@abstractmethod
def _get_postprocess(self):
raise NotImplementedError
@abstractmethod
def _get_general(self):
raise NotImplementedError
class TensorflowTransforms(BaseTransforms):
def _get_preprocess(self):
preprocess = {
"resize": WrapFunction(tf.image.resize),
# "resize_with_pad" : WrapFunction(tf.image.resize_with_pad),
"resize_with_crop_or_pad": WrapFunction(tf.image.resize_with_crop_or_pad),
"grayscale_to_rgb": WrapFunction(tf.image.grayscale_to_rgb),
"rgb_to_grayscale": WrapFunction(tf.image.rgb_to_grayscale),
"hsv_to_rgb": WrapFunction(tf.image.hsv_to_rgb),
"rgb_to_hsv": WrapFunction(tf.image.rgb_to_hsv),
"yiq_to_rgb": WrapFunction(tf.image.yiq_to_rgb),
"rgb_to_yiq": WrapFunction(tf.image.rgb_to_yiq),
"yuv_to_rgb": WrapFunction(tf.image.yuv_to_rgb),
"rgb_to_yuv": WrapFunction(tf.image.rgb_to_yuv),
"image_gradients": WrapFunction(tf.image.image_gradients),
"convert_image_dtype": WrapFunction(tf.image.convert_image_dtype),
"adjust_brightness": WrapFunction(tf.image.adjust_brightness),
"adjust_contrast": WrapFunction(tf.image.adjust_contrast),
"adjust_gamma": WrapFunction(tf.image.adjust_gamma),
"adjust_hue": WrapFunction(tf.image.adjust_hue),
"adjust_jpeg_quality": WrapFunction(tf.image.adjust_jpeg_quality),
"adjust_saturation": WrapFunction(tf.image.adjust_saturation),
"random_brightness": WrapFunction(tf.image.random_brightness),
"random_contrast": WrapFunction(tf.image.random_contrast),
"random_saturation": WrapFunction(tf.image.random_hue),
"per_image_standardization": WrapFunction(tf.image.per_image_standardization),
"central_crop": WrapFunction(tf.image.central_crop),
"crop_and_resize": WrapFunction(tf.image.crop_and_resize),
"crop_to_bounding_box": WrapFunction(tf.image.crop_to_bounding_box),
"extract_glimpse": WrapFunction(tf.image.extract_glimpse),
"random_crop": WrapFunction(tf.image.random_crop),
"resize_with_crop_or_pad": WrapFunction(tf.image.resize_with_crop_or_pad),
"flip_left_right": WrapFunction(tf.image.flip_left_right),
"flip_up_down": WrapFunction(tf.image.flip_up_down),
"random_flip_left_right": WrapFunction(tf.image.random_flip_left_right),
"random_flip_up_down": WrapFunction(tf.image.random_flip_up_down),
"rot90": WrapFunction(tf.image.rot90),
"decode_and_crop_jpeg": WrapFunction(tf.io.decode_and_crop_jpeg),
"decode_bmp": WrapFunction(tf.io.decode_bmp),
"decode_gif": WrapFunction(tf.io.decode_gif),
"decode_image": WrapFunction(tf.io.decode_image),
"decode_jpeg": WrapFunction(tf.io.decode_jpeg),
"decode_png": WrapFunction(tf.io.decode_png),
"encode_jpeg": WrapFunction(tf.io.encode_jpeg),
}
# update the registry transforms
preprocess.update(TENSORFLOWTRANSFORMS["preprocess"])
return preprocess
def _get_postprocess(self):
postprocess = {
"non_max_suppression": WrapFunction(tf.image.non_max_suppression),
"non_max_suppression_overlaps": WrapFunction(tf.image.non_max_suppression_overlaps),
"non_max_suppression_padded": WrapFunction(tf.image.non_max_suppression_padded),
"non_max_suppression_with_scores": WrapFunction(
tf.image.non_max_suppression_with_scores),
"pad_to_bounding_box": WrapFunction(tf.image.pad_to_bounding_box),
"sample_distorted_bounding_box": WrapFunction(tf.image.sample_distorted_bounding_box),
"draw_bounding_boxes": WrapFunction(tf.image.draw_bounding_boxes),
"combined_non_max_suppression": WrapFunction(tf.image.combined_non_max_suppression),
}
postprocess.update(TENSORFLOWTRANSFORMS["postprocess"])
return postprocess
def _get_general(self):
general = {
"transpose": WrapFunction(tf.image.transpose),
}
general.update(TENSORFLOWTRANSFORMS["general"])
return general
class MXNetTransforms(BaseTransforms):
def _get_preprocess(self):
preprocess = {
'ToTensor': mx.gluon.data.vision.transforms.ToTensor,
'Normalize': mx.gluon.data.vision.transforms.Normalize,
'Rotate': mx.gluon.data.vision.transforms.Rotate,
'RandomRotation': mx.gluon.data.vision.transforms.RandomRotation,
'RandomResizedCrop': mx.gluon.data.vision.transforms.RandomResizedCrop,
'CropResize': mx.gluon.data.vision.transforms.CropResize,
'RandomCrop': mx.gluon.data.vision.transforms.RandomCrop,
'CenterCrop': mx.gluon.data.vision.transforms.CenterCrop,
'Resize': mx.gluon.data.vision.transforms.Resize,
'RandomFlipLeftRight': mx.gluon.data.vision.transforms.RandomFlipLeftRight,
'RandomFlipTopBottom': mx.gluon.data.vision.transforms.RandomFlipTopBottom,
'RandomBrightness': mx.gluon.data.vision.transforms.RandomBrightness,
'RandomContrast': mx.gluon.data.vision.transforms.RandomContrast,
'RandomSaturation': mx.gluon.data.vision.transforms.RandomSaturation,
'RandomHue': mx.gluon.data.vision.transforms.RandomHue,
'RandomColorJitter': mx.gluon.data.vision.transforms.RandomColorJitter,
'RandomLighting': mx.gluon.data.vision.transforms.RandomLighting,
'RandomGray': mx.gluon.data.vision.transforms.RandomGray
}
preprocess.update(MXNETTRANSFORMS["preprocess"])
return preprocess
def _get_postprocess(self):
postprocess = {}
postprocess.update(MXNETTRANSFORMS["postprocess"])
return postprocess
def _get_general(self):
general = {
'Compose': mx.gluon.data.vision.transforms.Compose,
'HybridCompose': mx.gluon.data.vision.transforms.HybridCompose,
'Cast': mx.gluon.data.vision.transforms.Cast,
'RandomApply': mx.gluon.data.vision.transforms.RandomApply,
'HybridRandomApply': mx.gluon.data.vision.transforms.HybridRandomApply,
}
general.update(MXNETTRANSFORMS["general"])
return general
class PyTorchTransforms(BaseTransforms):
def _get_preprocess(self):
preprocess = {
"ToTensor": torchvision.transforms.ToTensor,
"ToPILImage": torchvision.transforms.ToPILImage,
"Normalize": torchvision.transforms.Normalize,
"Resize": torchvision.transforms.Resize,
"Scale": torchvision.transforms.Scale,
"CenterCrop": torchvision.transforms.CenterCrop,
"Pad": torchvision.transforms.Pad,
"RandomChoice": torchvision.transforms.RandomChoice,
"RandomOrder": torchvision.transforms.RandomOrder,
"RandomCrop": torchvision.transforms.RandomCrop,
"RandomHorizontalFlip": torchvision.transforms.RandomHorizontalFlip,
"RandomVerticalFlip": torchvision.transforms.RandomVerticalFlip,
"RandomResizedCrop": torchvision.transforms.RandomResizedCrop,
"RandomSizedCrop": torchvision.transforms.RandomSizedCrop,
"FiveCrop": torchvision.transforms.FiveCrop,
"TenCrop": torchvision.transforms.TenCrop,
"ColorJitter": torchvision.transforms.ColorJitter,
"RandomRotation": torchvision.transforms.RandomRotation,
"RandomAffine": torchvision.transforms.RandomAffine,
"Grayscale": torchvision.transforms.Grayscale,
"RandomGrayscale": torchvision.transforms.RandomGrayscale,
"RandomPerspective": torchvision.transforms.RandomPerspective,
"RandomErasing": torchvision.transforms.RandomErasing
}
preprocess.update(PYTORCHTRANSFORMS["preprocess"])
return preprocess
def _get_postprocess(self):
postprocess = {}
postprocess.update(PYTORCHTRANSFORMS["postprocess"])
return postprocess
def _get_general(self):
general = {
"Compose": torchvision.transforms.Compose,
"Lambda": torchvision.transforms.Lambda,
"RandomApply": torchvision.transforms.RandomApply,
"LinearTransformation": torchvision.transforms.LinearTransformation,
}
general.update(PYTORCHTRANSFORMS["general"])
return general
framework_transforms = {"tensorflow": TensorflowTransforms,
"mxnet": MXNetTransforms,
"pytorch": PyTorchTransforms, }
class TRANSFORMS(object):
def __init__(self, framework, process):
assert framework in ("tensorflow", "pytorch",
"mxnet"), "framework support tensorflow pytorch mxnet"
assert process in ("preprocess", "postprocess",
"general"), "process support preprocess postprocess, general"
self.transforms = framework_transforms[framework](process).transforms
def __getitem__(self, transform_type):
assert transform_type in self.transforms.keys(), "transform support {}".\
format(self.transforms.keys())
return self.transforms[transform_type]
# transform registry will register transforms into these dicts
TENSORFLOWTRANSFORMS = {"preprocess": {}, "postprocess": {}, "general": {}}
MXNETTRANSFORMS = {"preprocess": {}, "postprocess": {}, "general": {}}
PYTORCHTRANSFORMS = {"preprocess": {}, "postprocess": {}, "general": {}}
registry_transforms = {"tensorflow": TENSORFLOWTRANSFORMS,
"mxnet": MXNETTRANSFORMS,
"pytorch": PYTORCHTRANSFORMS, }
def transform_registry(transform_type, process, framework):
"""The class decorator used to register all transform subclasses.
Args:
transform_type (str): Transform registration name
process (str): support 3 process including 'preprocess', 'postprocess', 'general'
framework (str): support 3 framework including 'tensorflow', 'pytorch', 'mxnet'
cls (class): The class of register.
Returns:
cls: The class of register.
"""
def decorator_transform(cls):
assert framework in (
"tensorflow",
"mxnet",
"pytorch"), "The framework support tensorflow, mxnet and pytorch"
if transform_type in registry_transforms[framework][process].keys():
raise ValueError('Cannot have two transforms with the same name')
registry_transforms[framework][process][transform_type] = cls
return cls
return decorator_transform
class Transform(object):
"""The base class for transform. __call__ method is needed when write user specific transform
"""
@abstractmethod
def __call__(self, *args, **kwargs):
raise NotImplementedError
class WrapTransform(Transform):
def __init__(self, transform_func, **kwargs):
self.kwargs = kwargs
self.transform_func = transform_func
def __call__(self, sample):
return self.transform_func(sample, **self.kwargs)
# wrap tensorflow functions to a transform
class WrapFunction(object):
def __init__(self, transform_func):
self.transform_func = transform_func
def __call__(self, **kwargs):
return WrapTransform(self.transform_func, **kwargs)
@transform_registry(transform_type="Compose", process="general", framework="tensorflow")
class ComposeTFTransform(Transform):
def __init__(self, transform_list):
self.transform_list = transform_list
def __call__(self, sample):
for transform in self.transform_list:
sample = transform(sample)
return sample
| [
"feng.tian@intel.com"
] | feng.tian@intel.com |
9a501b13539c3f91f4c336f040f5baa05f56b93c | 53cb2e9e1f5dfb57090295fe45d4810aac07caad | /example/example/tests/test_tenants.py | 4d69f27fa8417ea99daead345777b5a42686dc35 | [] | no_license | kissgyorgy/django-tenants | 47c01f79b459842d6aaf72828a16327b9690ff47 | 327fc85ba18a10b6622a06ad1fe6c5f9ec8d83c6 | refs/heads/master | 2020-04-29T07:58:43.877588 | 2013-11-23T11:27:50 | 2013-11-23T11:27:50 | 14,571,644 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | from django.test import TestCase
from django.db import connection as conn
from django.core.management import call_command
from example.models import Team
class TenantCreationTest(TestCase):
def setUp(self):
Team.objects.create(name='Something', domain='127.0.0.1', schema='something')
def test_create_tenant(self):
team = Team.objects.get(schema='something')
self.assertEqual(team.name, 'Something')
self.assertEqual(team.domain, '127.0.0.1')
| [
"kissgyorgy@me.com"
] | kissgyorgy@me.com |
2993e9834a1447e598649f1c186faef412f6e96f | 3e06c2e64c14c3e3486cd3604268f12510fdeb56 | /nostradamus/nostradamus/urls.py | da6c0686f55de26517f6a72f5c2a01de940b74af | [
"Apache-2.0"
] | permissive | exactpro/nostradamus | 42296e9d4762ac6d7364a665dd5cd74117caacc8 | 80df847a012374ad2b702cc9f9c9cb46c1153ee7 | refs/heads/master | 2022-09-29T08:49:14.505795 | 2021-12-21T12:43:01 | 2021-12-21T12:43:01 | 162,601,150 | 32 | 8 | Apache-2.0 | 2022-09-13T23:04:20 | 2018-12-20T15:58:05 | TypeScript | UTF-8 | Python | false | false | 1,588 | py | """nostradamus URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
schema_view = get_schema_view(
openapi.Info(title="Nostradamus API", default_version="v1"),
public=True,
permission_classes=(permissions.AllowAny,),
)
urlpatterns = [
path(
"swagger/",
schema_view.with_ui("swagger", cache_timeout=0),
),
path("admin/", admin.site.urls),
path("analysis_and_training/", include("apps.analysis_and_training.urls")),
path("settings/", include("apps.settings.urls")),
path(
"description_assessment/", include("apps.description_assessment.urls")
),
path("qa_metrics/", include("apps.qa_metrics.urls")),
path("virtual_assistant/", include("apps.virtual_assistant.urls")),
] + staticfiles_urlpatterns()
| [
"litvinov.ivan44@gmail.com"
] | litvinov.ivan44@gmail.com |
4de9633aa2daa7c8e1e69befaaa0c97c1d963018 | 8e833f71bc2f913f459b112e08725ad6d37b0897 | /myapp/migrations/0002_auto_20200309_2105.py | 8bcc39339b7f2fdb42f5e6f23a51a5694601bec4 | [] | no_license | 21toffy/slider | 0d5727befac41f7e37160e6684dd92d9c2583671 | fc064ce61b110351be911abd614fab2810c9c046 | refs/heads/master | 2021-01-09T19:12:39.625561 | 2020-04-22T01:25:23 | 2020-04-22T01:25:23 | 242,425,521 | 0 | 1 | null | 2020-02-23T01:06:51 | 2020-02-22T23:16:47 | Python | UTF-8 | Python | false | false | 406 | py | # Generated by Django 2.0.13 on 2020-03-09 20:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='pictures',
name='image',
field=models.FileField(blank=True, null=True, upload_to='imagefile'),
),
]
| [
"oketofoke@gmail.com"
] | oketofoke@gmail.com |
184ae98d1ffdd0597c98eebc3660eb8b5a22ef05 | e76ea38dbe5774fccaf14e1a0090d9275cdaee08 | /src/chrome/app/DEPS | 4d39b7881b84cb0b57b7e6b77d01a35d4ff0eefb | [
"BSD-3-Clause"
] | permissive | eurogiciel-oss/Tizen_Crosswalk | efc424807a5434df1d5c9e8ed51364974643707d | a68aed6e29bd157c95564e7af2e3a26191813e51 | refs/heads/master | 2021-01-18T19:19:04.527505 | 2014-02-06T13:43:21 | 2014-02-06T13:43:21 | 16,070,101 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 743 | include_rules = [
"+apps",
"+breakpad",
"+chrome/browser",
"+chrome/installer",
"+chrome/plugin/chrome_content_plugin_client.h",
"+chrome/renderer/chrome_content_renderer_client.h",
"+chrome/utility/chrome_content_utility_client.h",
"+chromeos/chromeos_paths.h",
"+chromeos/chromeos_switches.h",
"+components/breakpad",
"+components/nacl/common",
"+components/nacl/zygote",
"+components/startup_metric_utils",
"+content/public/app",
"+content/public/browser/browser_main_runner.h",
"+content/public/browser/render_process_host.h",
"+grit", # For generated headers
"+native_client/src/trusted/service_runtime/osx",
"+policy", # For generated headers and source
"+sandbox",
"+tools/memory_watcher",
]
| [
"ronan@fridu.net"
] | ronan@fridu.net | |
c0ff601cfe6ec3253c26ecaea026da62e49352a3 | 4985143dce9379c939d562d277350f0d8224f06a | /todo_project/todo_project/urls.py | f70f3583dc5a3c121ffd9fcc5b4e30bf38a0350f | [] | no_license | jkinathan/Task_todo | a74ae010dc703ba0ed4654a569b57a5ce7634857 | e19da9ab9dede272b6c148b686e6e77e3da1687a | refs/heads/master | 2023-03-23T13:51:41.816050 | 2021-03-20T09:32:32 | 2021-03-20T09:32:32 | 274,080,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 849 | py | """todo_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('',include("todoapp.urls")),
path('accounts/', include("allauth.urls")),
]
| [
"jkinobe@gmail.com"
] | jkinobe@gmail.com |
d3389799ab4c3d8f942947a427250ff6be14a12c | 626b14ce13986b6d5e03143e151004247659625a | /Day01-15/code/Day13/generator1.py | 7b575dd1c65f8f34f1e31fbf29de050b1a02a0d5 | [] | no_license | Focavn/Python-100-Days | c7586ecf7ae3f1fd42f024558bb998be23ee9df8 | d8de6307aeff9fe31fd752bd7725b9cc3fbc084b | refs/heads/master | 2021-08-08T17:57:02.025178 | 2020-09-17T11:58:04 | 2020-09-17T11:58:04 | 220,427,144 | 0 | 0 | null | 2019-11-08T08:59:43 | 2019-11-08T08:59:41 | null | UTF-8 | Python | false | false | 365 | py | """
生成器 - 生成器语法
Version: 0.1
Author: 骆昊
Date: 2018-03-21
"""
seq = [x * x for x in range(10)]
print(seq)
gen = (x * x for x in range(10))
print(gen)
for x in gen:
print(x)
num = 10
gen = (x ** y for x, y in zip(range(1, num), range(num - 1, 0, -1)))
print(gen)
n = 1
while n < num:
print(next(gen))
n += 1
| [
"Focavn@users.github.com"
] | Focavn@users.github.com |
79eda3f61f4e8bd36a9b6f559862df999c11672e | 1e9ad304868c2bda918c19eba3d7b122bac3923b | /kubernetes/client/models/v1_persistent_volume_claim_list.py | 0424df102137fcef71706c3593b0b6412a5b3642 | [
"Apache-2.0"
] | permissive | pineking/client-python | c77e5bd3d476ac852e6dffa96056008baa0f597f | 74a64d7325518f4298600d4bb300f92843c29347 | refs/heads/master | 2021-01-22T22:16:27.368406 | 2017-03-15T08:21:21 | 2017-03-15T08:21:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,612 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.5.1-660c2a2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1PersistentVolumeClaimList(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, api_version=None, items=None, kind=None, metadata=None):
"""
V1PersistentVolumeClaimList - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'api_version': 'str',
'items': 'list[V1PersistentVolumeClaim]',
'kind': 'str',
'metadata': 'UnversionedListMeta'
}
self.attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
self._api_version = api_version
self._items = items
self._kind = kind
self._metadata = metadata
@property
def api_version(self):
"""
Gets the api_version of this V1PersistentVolumeClaimList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources
:return: The api_version of this V1PersistentVolumeClaimList.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1PersistentVolumeClaimList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources
:param api_version: The api_version of this V1PersistentVolumeClaimList.
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""
Gets the items of this V1PersistentVolumeClaimList.
A list of persistent volume claims. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims
:return: The items of this V1PersistentVolumeClaimList.
:rtype: list[V1PersistentVolumeClaim]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this V1PersistentVolumeClaimList.
A list of persistent volume claims. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims
:param items: The items of this V1PersistentVolumeClaimList.
:type: list[V1PersistentVolumeClaim]
"""
if items is None:
raise ValueError("Invalid value for `items`, must not be `None`")
self._items = items
@property
def kind(self):
"""
Gets the kind of this V1PersistentVolumeClaimList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:return: The kind of this V1PersistentVolumeClaimList.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1PersistentVolumeClaimList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1PersistentVolumeClaimList.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1PersistentVolumeClaimList.
Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:return: The metadata of this V1PersistentVolumeClaimList.
:rtype: UnversionedListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1PersistentVolumeClaimList.
Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:param metadata: The metadata of this V1PersistentVolumeClaimList.
:type: UnversionedListMeta
"""
self._metadata = metadata
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"mehdy@google.com"
] | mehdy@google.com |
9d23561509673b26ce9e745b5ff62cef4fcfd824 | c44bc905d49853f089b653af8a2ea2e1a5fc3a15 | /attributemaps.old/saml_uri.py | ea5b7dbff0452739d54f57a34c19fb7fcb8b65c9 | [
"Apache-2.0"
] | permissive | sheagcraig/crypt-server-saml | 1730113bbb153db608ae33da799858699a8521d2 | 70a95aeb13325bac094c2a6340b19de0958279fe | refs/heads/master | 2022-11-13T17:38:50.227183 | 2020-06-12T14:30:23 | 2020-06-12T14:30:23 | 254,650,451 | 0 | 0 | Apache-2.0 | 2020-04-10T14:09:42 | 2020-04-10T14:09:41 | null | UTF-8 | Python | false | false | 13,252 | py | EDUCOURSE_OID = "urn:oid:1.3.6.1.4.1.5923.1.6.1."
EDUPERSON_OID = "urn:oid:1.3.6.1.4.1.5923.1.1.1."
LDAPGVAT_OID = (
"urn:oid:1.2.40.0.10.2.1.1."
) # ldap.gv.at definitions as specified in http://www.ref.gv.at/AG-IZ-PVP2-Version-2-1-0-2.2754.0.html
UCL_DIR_PILOT = "urn:oid:0.9.2342.19200300.100.1."
X500ATTR_OID = "urn:oid:2.5.4."
LDAPGVAT_UCL_DIR_PILOT = UCL_DIR_PILOT
LDAPGVAT_X500ATTR_OID = X500ATTR_OID
NETSCAPE_LDAP = "urn:oid:2.16.840.1.113730.3.1."
NOREDUPERSON_OID = "urn:oid:1.3.6.1.4.1.2428.90.1."
PKCS_9 = "urn:oid:1.2.840.113549.1.9.1."
SCHAC = "urn:oid:1.3.6.1.4.1.25178.1.2."
SIS = "urn:oid:1.2.752.194.10.2."
UMICH = "urn:oid:1.3.6.1.4.1.250.1.57."
MAP = {
"identifier": "urn:oasis:names:tc:SAML:2.0:attrname-format:uri",
"fro": {
EDUCOURSE_OID + "1": "eduCourseOffering",
EDUCOURSE_OID + "2": "eduCourseMember",
EDUPERSON_OID + "1": "eduPersonAffiliation",
EDUPERSON_OID + "2": "eduPersonNickname",
EDUPERSON_OID + "3": "eduPersonOrgDN",
EDUPERSON_OID + "4": "eduPersonOrgUnitDN",
EDUPERSON_OID + "5": "eduPersonPrimaryAffiliation",
EDUPERSON_OID + "6": "eduPersonPrincipalName",
EDUPERSON_OID + "7": "eduPersonEntitlement",
EDUPERSON_OID + "8": "eduPersonPrimaryOrgUnitDN",
EDUPERSON_OID + "9": "eduPersonScopedAffiliation",
EDUPERSON_OID + "10": "eduPersonTargetedID",
EDUPERSON_OID + "11": "eduPersonAssurance",
LDAPGVAT_OID + "1": "PVP-GID",
LDAPGVAT_OID + "149": "PVP-BPK",
LDAPGVAT_OID + "153": "PVP-OU-OKZ",
LDAPGVAT_OID + "261.10": "PVP-VERSION",
LDAPGVAT_OID + "261.20": "PVP-PRINCIPAL-NAME",
LDAPGVAT_OID + "261.24": "PVP-PARTICIPANT-OKZ",
LDAPGVAT_OID + "261.30": "PVP-ROLES",
LDAPGVAT_OID + "261.40": "PVP-INVOICE-RECPT-ID",
LDAPGVAT_OID + "261.50": "PVP-COST-CENTER-ID",
LDAPGVAT_OID + "261.60": "PVP-CHARGE-CODE",
LDAPGVAT_OID + "3": "PVP-OU-GV-OU-ID",
LDAPGVAT_OID + "33": "PVP-FUNCTION",
LDAPGVAT_OID + "55": "PVP-BIRTHDATE",
LDAPGVAT_OID + "71": "PVP-PARTICIPANT-ID",
LDAPGVAT_UCL_DIR_PILOT + "1": "PVP-USERID",
LDAPGVAT_UCL_DIR_PILOT + "3": "PVP-MAIL",
LDAPGVAT_X500ATTR_OID + "11": "PVP-OU",
LDAPGVAT_X500ATTR_OID + "20": "PVP-TEL",
LDAPGVAT_X500ATTR_OID + "42": "PVP-GIVENNAME",
NETSCAPE_LDAP + "1": "carLicense",
NETSCAPE_LDAP + "2": "departmentNumber",
NETSCAPE_LDAP + "3": "employeeNumber",
NETSCAPE_LDAP + "4": "employeeType",
NETSCAPE_LDAP + "39": "preferredLanguage",
NETSCAPE_LDAP + "40": "userSMIMECertificate",
NETSCAPE_LDAP + "216": "userPKCS12",
NETSCAPE_LDAP + "241": "displayName",
NOREDUPERSON_OID + "1": "norEduOrgUniqueNumber",
NOREDUPERSON_OID + "2": "norEduOrgUnitUniqueNumber",
NOREDUPERSON_OID + "3": "norEduPersonBirthDate",
NOREDUPERSON_OID + "4": "norEduPersonLIN",
NOREDUPERSON_OID + "5": "norEduPersonNIN",
NOREDUPERSON_OID + "6": "norEduOrgAcronym",
NOREDUPERSON_OID + "7": "norEduOrgUniqueIdentifier",
NOREDUPERSON_OID + "8": "norEduOrgUnitUniqueIdentifier",
NOREDUPERSON_OID + "9": "federationFeideSchemaVersion",
NOREDUPERSON_OID + "10": "norEduPersonLegalName",
NOREDUPERSON_OID + "11": "norEduOrgSchemaVersion",
NOREDUPERSON_OID + "12": "norEduOrgNIN",
PKCS_9 + "1": "email",
SCHAC + "1": "schacMotherTongue",
SCHAC + "2": "schacGender",
SCHAC + "3": "schacDateOfBirth",
SCHAC + "4": "schacPlaceOfBirth",
SCHAC + "5": "schacCountryOfCitizenship",
SCHAC + "6": "schacSn1",
SCHAC + "7": "schacSn2",
SCHAC + "8": "schacPersonalTitle",
SCHAC + "9": "schacHomeOrganization",
SCHAC + "10": "schacHomeOrganizationType",
SCHAC + "11": "schacCountryOfResidence",
SCHAC + "12": "schacUserPresenceID",
SCHAC + "13": "schacPersonalPosition",
SCHAC + "14": "schacPersonalUniqueCode",
SCHAC + "15": "schacPersonalUniqueID",
SCHAC + "17": "schacExpiryDate",
SCHAC + "18": "schacUserPrivateAttribute",
SCHAC + "19": "schacUserStatus",
SCHAC + "20": "schacProjectMembership",
SCHAC + "21": "schacProjectSpecificRole",
SIS + "1": "sisLegalGuardianFor",
SIS + "2": "sisSchoolGrade",
UCL_DIR_PILOT + "1": "uid",
UCL_DIR_PILOT + "3": "mail",
UCL_DIR_PILOT + "25": "dc",
UCL_DIR_PILOT + "37": "associatedDomain",
UCL_DIR_PILOT + "43": "co",
UCL_DIR_PILOT + "60": "jpegPhoto",
UMICH + "57": "labeledURI",
X500ATTR_OID + "2": "knowledgeInformation",
X500ATTR_OID + "3": "cn",
X500ATTR_OID + "4": "sn",
X500ATTR_OID + "5": "serialNumber",
X500ATTR_OID + "6": "c",
X500ATTR_OID + "7": "l",
X500ATTR_OID + "8": "st",
X500ATTR_OID + "9": "street",
X500ATTR_OID + "10": "o",
X500ATTR_OID + "11": "ou",
X500ATTR_OID + "12": "title",
X500ATTR_OID + "14": "searchGuide",
X500ATTR_OID + "15": "businessCategory",
X500ATTR_OID + "16": "postalAddress",
X500ATTR_OID + "17": "postalCode",
X500ATTR_OID + "18": "postOfficeBox",
X500ATTR_OID + "19": "physicalDeliveryOfficeName",
X500ATTR_OID + "20": "telephoneNumber",
X500ATTR_OID + "21": "telexNumber",
X500ATTR_OID + "22": "teletexTerminalIdentifier",
X500ATTR_OID + "23": "facsimileTelephoneNumber",
X500ATTR_OID + "24": "x121Address",
X500ATTR_OID + "25": "internationaliSDNNumber",
X500ATTR_OID + "26": "registeredAddress",
X500ATTR_OID + "27": "destinationIndicator",
X500ATTR_OID + "28": "preferredDeliveryMethod",
X500ATTR_OID + "29": "presentationAddress",
X500ATTR_OID + "30": "supportedApplicationContext",
X500ATTR_OID + "31": "member",
X500ATTR_OID + "32": "owner",
X500ATTR_OID + "33": "roleOccupant",
X500ATTR_OID + "36": "userCertificate",
X500ATTR_OID + "37": "cACertificate",
X500ATTR_OID + "38": "authorityRevocationList",
X500ATTR_OID + "39": "certificateRevocationList",
X500ATTR_OID + "40": "crossCertificatePair",
X500ATTR_OID + "42": "givenName",
X500ATTR_OID + "43": "initials",
X500ATTR_OID + "44": "generationQualifier",
X500ATTR_OID + "45": "x500UniqueIdentifier",
X500ATTR_OID + "46": "dnQualifier",
X500ATTR_OID + "47": "enhancedSearchGuide",
X500ATTR_OID + "48": "protocolInformation",
X500ATTR_OID + "50": "uniqueMember",
X500ATTR_OID + "51": "houseIdentifier",
X500ATTR_OID + "52": "supportedAlgorithms",
X500ATTR_OID + "53": "deltaRevocationList",
X500ATTR_OID + "54": "dmdName",
X500ATTR_OID + "65": "pseudonym",
},
"to": {
"associatedDomain": UCL_DIR_PILOT + "37",
"authorityRevocationList": X500ATTR_OID + "38",
"businessCategory": X500ATTR_OID + "15",
"c": X500ATTR_OID + "6",
"cACertificate": X500ATTR_OID + "37",
"carLicense": NETSCAPE_LDAP + "1",
"certificateRevocationList": X500ATTR_OID + "39",
"cn": X500ATTR_OID + "3",
"co": UCL_DIR_PILOT + "43",
"crossCertificatePair": X500ATTR_OID + "40",
"dc": UCL_DIR_PILOT + "25",
"deltaRevocationList": X500ATTR_OID + "53",
"departmentNumber": NETSCAPE_LDAP + "2",
"destinationIndicator": X500ATTR_OID + "27",
"displayName": NETSCAPE_LDAP + "241",
"dmdName": X500ATTR_OID + "54",
"dnQualifier": X500ATTR_OID + "46",
"eduCourseMember": EDUCOURSE_OID + "2",
"eduCourseOffering": EDUCOURSE_OID + "1",
"eduPersonAffiliation": EDUPERSON_OID + "1",
"eduPersonAssurance": EDUPERSON_OID + "11",
"eduPersonEntitlement": EDUPERSON_OID + "7",
"eduPersonNickname": EDUPERSON_OID + "2",
"eduPersonOrgDN": EDUPERSON_OID + "3",
"eduPersonOrgUnitDN": EDUPERSON_OID + "4",
"eduPersonPrimaryAffiliation": EDUPERSON_OID + "5",
"eduPersonPrimaryOrgUnitDN": EDUPERSON_OID + "8",
"eduPersonPrincipalName": EDUPERSON_OID + "6",
"eduPersonScopedAffiliation": EDUPERSON_OID + "9",
"eduPersonTargetedID": EDUPERSON_OID + "10",
"email": PKCS_9 + "1",
"employeeNumber": NETSCAPE_LDAP + "3",
"employeeType": NETSCAPE_LDAP + "4",
"enhancedSearchGuide": X500ATTR_OID + "47",
"facsimileTelephoneNumber": X500ATTR_OID + "23",
"federationFeideSchemaVersion": NOREDUPERSON_OID + "9",
"generationQualifier": X500ATTR_OID + "44",
"givenName": X500ATTR_OID + "42",
"houseIdentifier": X500ATTR_OID + "51",
"initials": X500ATTR_OID + "43",
"internationaliSDNNumber": X500ATTR_OID + "25",
"jpegPhoto": UCL_DIR_PILOT + "60",
"knowledgeInformation": X500ATTR_OID + "2",
"l": X500ATTR_OID + "7",
"labeledURI": UMICH + "57",
"mail": UCL_DIR_PILOT + "3",
"member": X500ATTR_OID + "31",
"norEduOrgAcronym": NOREDUPERSON_OID + "6",
"norEduOrgNIN": NOREDUPERSON_OID + "12",
"norEduOrgSchemaVersion": NOREDUPERSON_OID + "11",
"norEduOrgUniqueIdentifier": NOREDUPERSON_OID + "7",
"norEduOrgUniqueNumber": NOREDUPERSON_OID + "1",
"norEduOrgUnitUniqueIdentifier": NOREDUPERSON_OID + "8",
"norEduOrgUnitUniqueNumber": NOREDUPERSON_OID + "2",
"norEduPersonBirthDate": NOREDUPERSON_OID + "3",
"norEduPersonLIN": NOREDUPERSON_OID + "4",
"norEduPersonLegalName": NOREDUPERSON_OID + "10",
"norEduPersonNIN": NOREDUPERSON_OID + "5",
"o": X500ATTR_OID + "10",
"ou": X500ATTR_OID + "11",
"owner": X500ATTR_OID + "32",
"physicalDeliveryOfficeName": X500ATTR_OID + "19",
"postOfficeBox": X500ATTR_OID + "18",
"postalAddress": X500ATTR_OID + "16",
"postalCode": X500ATTR_OID + "17",
"preferredDeliveryMethod": X500ATTR_OID + "28",
"preferredLanguage": NETSCAPE_LDAP + "39",
"presentationAddress": X500ATTR_OID + "29",
"protocolInformation": X500ATTR_OID + "48",
"pseudonym": X500ATTR_OID + "65",
"PVP-USERID": LDAPGVAT_UCL_DIR_PILOT + "1",
"PVP-MAIL": LDAPGVAT_UCL_DIR_PILOT + "3",
"PVP-GID": LDAPGVAT_OID + "1",
"PVP-BPK": LDAPGVAT_OID + "149",
"PVP-OU-OKZ": LDAPGVAT_OID + "153",
"PVP-VERSION": LDAPGVAT_OID + "261.10",
"PVP-PRINCIPAL-NAME": LDAPGVAT_OID + "261.20",
"PVP-PARTICIPANT-OKZ": LDAPGVAT_OID + "261.24",
"PVP-ROLES": LDAPGVAT_OID + "261.30",
"PVP-INVOICE-RECPT-ID": LDAPGVAT_OID + "261.40",
"PVP-COST-CENTER-ID": LDAPGVAT_OID + "261.50",
"PVP-CHARGE-CODE": LDAPGVAT_OID + "261.60",
"PVP-OU-GV-OU-ID": LDAPGVAT_OID + "3",
"PVP-FUNCTION": LDAPGVAT_OID + "33",
"PVP-BIRTHDATE": LDAPGVAT_OID + "55",
"PVP-PARTICIPANT-ID": LDAPGVAT_OID + "71",
"PVP-OU": LDAPGVAT_X500ATTR_OID + "11",
"PVP-TEL": LDAPGVAT_X500ATTR_OID + "20",
"PVP-GIVENNAME": LDAPGVAT_X500ATTR_OID + "42",
"registeredAddress": X500ATTR_OID + "26",
"roleOccupant": X500ATTR_OID + "33",
"schacCountryOfCitizenship": SCHAC + "5",
"schacCountryOfResidence": SCHAC + "11",
"schacDateOfBirth": SCHAC + "3",
"schacExpiryDate": SCHAC + "17",
"schacGender": SCHAC + "2",
"schacHomeOrganization": SCHAC + "9",
"schacHomeOrganizationType": SCHAC + "10",
"schacMotherTongue": SCHAC + "1",
"schacPersonalPosition": SCHAC + "13",
"schacPersonalTitle": SCHAC + "8",
"schacPersonalUniqueCode": SCHAC + "14",
"schacPersonalUniqueID": SCHAC + "15",
"schacPlaceOfBirth": SCHAC + "4",
"schacProjectMembership": SCHAC + "20",
"schacProjectSpecificRole": SCHAC + "21",
"schacSn1": SCHAC + "6",
"schacSn2": SCHAC + "7",
"schacUserPresenceID": SCHAC + "12",
"schacUserPrivateAttribute": SCHAC + "18",
"schacUserStatus": SCHAC + "19",
"searchGuide": X500ATTR_OID + "14",
"serialNumber": X500ATTR_OID + "5",
"sisLegalGuardianFor": SIS + "1",
"sisSchoolGrade": SIS + "2",
"sn": X500ATTR_OID + "4",
"st": X500ATTR_OID + "8",
"street": X500ATTR_OID + "9",
"supportedAlgorithms": X500ATTR_OID + "52",
"supportedApplicationContext": X500ATTR_OID + "30",
"telephoneNumber": X500ATTR_OID + "20",
"teletexTerminalIdentifier": X500ATTR_OID + "22",
"telexNumber": X500ATTR_OID + "21",
"title": X500ATTR_OID + "12",
"uid": UCL_DIR_PILOT + "1",
"uniqueMember": X500ATTR_OID + "50",
"userCertificate": X500ATTR_OID + "36",
"userPKCS12": NETSCAPE_LDAP + "216",
"userSMIMECertificate": NETSCAPE_LDAP + "40",
"x121Address": X500ATTR_OID + "24",
"x500UniqueIdentifier": X500ATTR_OID + "45",
},
}
| [
"graham@grahamgilbert.com"
] | graham@grahamgilbert.com |
802b0edc1a9e3592359f16311158f23402fe536a | 4eb967cfb3d78ee72e9647f12157eeb58c1f07a4 | /time.py | 5f23e2f7542adfcecc7b2f1b8e999938f472b422 | [] | no_license | shyam96s/python1 | 757ed6fc0e16dfc58e3b9fda53f6337de74cd305 | 2907e768092d2c05b7c2cb07d69ba7572422430b | refs/heads/master | 2020-04-05T20:53:54.284850 | 2018-11-12T17:01:34 | 2018-11-12T17:01:34 | 157,199,142 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | import time
import calendar
cal=calendar.month(2018,11)
myTime=time.localtime(time.time())
print(myTime)
print(cal) | [
"you@example.com"
] | you@example.com |
a921dee3bc36edab5c71cb2fc72aada3b8e898c0 | 63c5306b91db445016059a7f0c7ac167bf231d3c | /caffe2/python/core.py | a929a4ae53151406074aadf72d1263aa2ea13e91 | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Fletcher3003/caffe2 | b57ad712993b7c50d16b8f0eedc2e5587bc89e0e | 731096902a090b49612b02cc5a1301c81bf93943 | refs/heads/master | 2020-04-15T18:10:11.514190 | 2019-01-09T17:10:14 | 2019-01-09T17:10:14 | 164,903,847 | 0 | 0 | Apache-2.0 | 2019-01-09T17:02:59 | 2019-01-09T17:02:53 | Shell | UTF-8 | Python | false | false | 101,685 | py | ## @package core
# Module caffe2.python.core
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple, OrderedDict
from past.builtins import basestring
from future.utils import viewitems, viewkeys, viewvalues
from itertools import chain
from six import binary_type, string_types, text_type
from caffe2.proto import caffe2_pb2
from collections import defaultdict
from caffe2.python import scope, utils, workspace
import caffe2.python._import_c_extension as C
import google.protobuf.text_format as protobuftx
import pickle
import numpy as np
import sys
# Mac os specific message
if (sys.platform == 'darwin' and 'leveldb' in C.registered_dbs()):
print('If you are using homebrew leveldb on a Mac OS, you might see an '
'error warning you that malloc_zone_unregister() failed. This is '
'not a caffe2 issue but is due to the homebrew leveldb having an '
'incompatible memory allocator. It does not affect usage.')
# Convenience redirections to functions inside scope.
DeviceScope = scope.DeviceScope
NameScope = scope.NameScope
# Bring datatype enums to the main namespace
class DataType:
pass
def _InitDataType():
for name, value in caffe2_pb2.TensorProto.DataType.items():
setattr(DataType, name, value)
_InitDataType()
def _GetRegisteredOperators():
return set(workspace.RegisteredOperators())
_REGISTERED_OPERATORS = _GetRegisteredOperators()
def RefreshRegisteredOperators():
global _REGISTERED_OPERATORS
_REGISTERED_OPERATORS = _GetRegisteredOperators()
_GLOBAL_INIT_ARGS = []
def GlobalInit(args):
_GLOBAL_INIT_ARGS.extend(args[1:])
C.global_init(args)
def GetGlobalInitArgs():
return _GLOBAL_INIT_ARGS[:]
def IsOperator(op_type):
return (op_type in _REGISTERED_OPERATORS)
def IsOperatorWithEngine(op_type, engine):
return (op_type + "_ENGINE_" + engine in _REGISTERED_OPERATORS)
def DeviceOption(device_type, cuda_gpu_id=0, random_seed=None):
option = caffe2_pb2.DeviceOption()
option.device_type = device_type
option.cuda_gpu_id = cuda_gpu_id
if random_seed is not None:
option.random_seed = random_seed
return option
def InferBlobDevices(net):
'''
Compute mapping from parameters to devices by looking at the
device option of the op that creates the blob has
'''
mapping = {}
for op in net.Proto().op:
op_device = op.device_option
if op_device is None:
op_device = caffe2_pb2.DeviceOption(caffe2_pb2.CPU)
# TODO: T18892922, use device annotations
for b in op.output:
mapping[b] = op_device
return mapping
def InferOpBlobDevices(op):
device_info = C.infer_op_input_output_device(op.SerializeToString())
input_info = []
output_info = []
for dev_str in device_info[0]:
device_option = caffe2_pb2.DeviceOption()
device_option.ParseFromString(dev_str)
input_info.append(device_option)
for dev_str in device_info[1]:
device_option = caffe2_pb2.DeviceOption()
device_option.ParseFromString(dev_str)
output_info.append(device_option)
return input_info, output_info
GradientSlice = namedtuple('GradientSlice', ['indices', 'values'])
class BlobReference(object):
"""A wrapper around a blob in a net.
BlobReference gives us a way to refer to the network that the blob is
generated from. Note that blobs are, essentially, just strings in the
current workspace.
"""
def __init__(self, name, net=None):
"""Initializes a blob reference.
Note that this does not prepends the namescope. If needed, use
ScopedBlobReference() to prepend the existing namespace.
"""
if isinstance(name, string_types):
self._name = name
elif isinstance(name, binary_type):
self._name = name.decode('utf-8')
else:
self._name = str(name)
self._from_net = net
# meta allows helper functions to put whatever metainformation needed
# there.
self.meta = {}
def __hash__(self):
return hash(self._name)
def __eq__(self, other):
if isinstance(other, string_types):
return self._name == other
elif isinstance(other, binary_type):
return self._name == other.decode('utf-8')
elif isinstance(other, BlobReference):
return self._name == other._name
else:
return False
def __ne__(self, other):
return not(self == other)
def __str__(self):
return self._name
def __repr__(self):
return 'BlobReference("{}")'.format(self._name)
def __add__(self, other):
if not isinstance(other, string_types):
raise RuntimeError('Cannot add BlobReference to a non-string.')
return BlobReference(self._name + other, self._from_net)
def __radd__(self, other):
if not isinstance(other, string_types):
raise RuntimeError('Cannot add a non-string to BlobReference.')
return BlobReference(other + self._name, self._from_net)
def Net(self):
return self._from_net
def GetNameScope(self):
return self._name[:self._name.rfind(scope._NAMESCOPE_SEPARATOR) + 1]
def _CreateAndAddToNet(self, op_type, inputs=None, *args, **kwargs):
"""Internal function that routes the operator generation to the
network's __getattr__ function.
"""
inputs = [] if inputs is None else inputs
if isinstance(inputs, BlobReference) or isinstance(inputs, string_types):
inputs = [inputs]
# add self to the input list.
inputs.insert(0, self)
return self._from_net.__getattr__(op_type)(inputs, *args, **kwargs)
def __getattr__(self, op_type):
"""A wrapper allowing one to initiate operators from a blob reference.
Example: for a blob reference b that comes from network n, doing
b.Relu(...)
is equivalent to doing
net.Relu([b], ...)
"""
if op_type.startswith('__'):
raise AttributeError('Attribute {} not found.'.format(op_type))
if self._from_net is None:
raise RuntimeError(
'You cannot use a blob reference that does not have a net '
'source to create operators. Create the operator from an '
'explicit net object.')
if not IsOperator(op_type):
raise RuntimeError(
'Method ' + op_type + ' is not a registered operator.' +
' Did you mean: [' +
",".join(workspace.C.nearby_opnames(op_type)) + ']'
)
return lambda *args, **kwargs: self._CreateAndAddToNet(
op_type, *args, **kwargs)
def __dir__(self):
additional_methods = [
op
for op in _REGISTERED_OPERATORS
if '_ENGINE_' not in op or '_ENGINE_CUDNN' in op]
return sorted(set(chain(
dir(type(self)),
viewkeys(self.__dict__),
additional_methods
)))
def ScopedName(name):
"""prefix the name with the current scope."""
if isinstance(name, binary_type):
name = name.decode('ascii')
return scope.CurrentNameScope() + name
def ScopedBlobReference(name, *args, **kwargs):
"""Returns a blob reference with scope prefixed."""
return BlobReference(ScopedName(name), *args, **kwargs)
def _RectifyInputOutput(blobs, net=None):
"""A helper function to rectify the input or output of the CreateOperator
interface.
"""
if isinstance(blobs, string_types) or isinstance(blobs, binary_type):
# If blobs is a single string, prepend scope.CurrentNameScope()
# and put it as a list.
# TODO(jiayq): enforce using BlobReference instead of raw strings.
return [ScopedBlobReference(blobs, net=net)]
elif type(blobs) is BlobReference:
# If blob is a BlobReference, simply put it as a list.
return [blobs]
elif type(blobs) in (list, tuple):
# If blob is a list, we go through it and type check.
rectified = []
for blob in blobs:
if isinstance(blob, string_types) or isinstance(blob, binary_type):
rectified.append(ScopedBlobReference(blob, net=net))
elif type(blob) is BlobReference:
rectified.append(blob)
else:
raise TypeError(
"I/O blob #{} of unsupported type: {} of type {}"
.format(len(rectified), str(blob), type(blob)))
return rectified
else:
raise TypeError(
"Unknown input/output type: %s of type %s." %
(str(blobs), type(blobs))
)
def CreateOperator(
operator_type,
inputs,
outputs,
name='',
control_input=None,
device_option=None,
arg=None,
engine=None,
**kwargs
):
"""A function wrapper that allows one to create operators based on the
operator type. The type should be a string corresponding to an operator
registered with Caffe2.
"""
operator = caffe2_pb2.OperatorDef()
operator.type = operator_type
operator.name = name
# Add rectified inputs and outputs
inputs = _RectifyInputOutput(inputs)
outputs = _RectifyInputOutput(outputs)
operator.input.extend([text_type(i) for i in inputs])
operator.output.extend([text_type(o) for o in outputs])
if control_input:
control_input = _RectifyInputOutput(control_input)
operator.control_input.extend([text_type(i) for i in control_input])
# Set device option:
# (1) If device_option is explicitly set, use device_option.
# (2) If not, but scope.CurrentDeviceScope() is set,
# then we use scope.CurrentDeviceScope().
# (3) Otherwise, do not set device option.
if device_option is not None:
operator.device_option.CopyFrom(device_option)
elif scope.CurrentDeviceScope() is not None:
operator.device_option.CopyFrom(scope.CurrentDeviceScope())
if engine is not None:
operator.engine = engine
# random seed is defined in the device option, so we need to do special
# care.
if 'random_seed' in kwargs:
operator.device_option.random_seed = kwargs['random_seed']
del kwargs['random_seed']
# Add given arguments that do not need parsing
if arg is not None:
operator.arg.extend(arg)
# Add all other arguments
for key, value in viewitems(kwargs):
operator.arg.add().CopyFrom(utils.MakeArgument(key, value))
if workspace.IsImmediate():
workspace.RunOperatorImmediate(operator)
return operator
def _RegisterPythonImpl(
f, grad_f=None, python_func_type=None, pass_workspace=False
):
if python_func_type:
func = python_func_type(f)
f = func.forward
grad_f = func.backward
else:
if isinstance(f, tuple):
f = f[0](*f[1], **f[2])
if isinstance(grad_f, tuple):
grad_f = grad_f[0](*grad_f[1], **grad_f[2])
token = C.register_python_op(f, pass_workspace, '')
if grad_f:
C.register_python_gradient_op(token, grad_f)
return token
def CreatePythonOperator(
f, inputs,
outputs,
grad_f=None,
pass_workspace=False,
python_func_type=None,
*args,
**kwargs
):
"""
`f` should have a signature (inputs, outputs)
If `pass_workspace` is True, the signature is changed to
(inputs, outputs, workspace) where `workspace` is the workspace the op
is going to run on. This is potentially dangerous (as the op can manipulate
the workspace directly), use on your own risk.
"""
kwargs["token"] = _RegisterPythonImpl(
f, grad_f, python_func_type, pass_workspace=pass_workspace
)
return CreateOperator("Python", inputs, outputs, *args, **kwargs)
def GetIndexFromGradientList(g_list, name):
"""A helper function to get the index from a gradient list, None if not
matching."""
for i, g in enumerate(g_list):
if g == name:
return i
elif type(g) is GradientSlice:
if (g.indices == name or g.values == name):
return i
return None
OpSSA = namedtuple('OpSSA', ['op', 'in_versions', 'out_versions'])
GradGenMeta = namedtuple('GradGenMeta', ['grad_op', 'idx', 'gradient'])
SparseGradGenMeta = namedtuple('SparseGradGenMeta', [
'grad_op_indices', 'idx_indices',
'grad_op_values', 'idx_values',
'gradient',
])
class IR(object):
"""A simple IR class to keep track of all intermediate representations used
in the gradient computation.
"""
def __init__(self, operators):
# The IR class holds multiple metadata from the forward pass:
# a) ssa: a list of [op, in_versions, out_versions] recording the
# input and the output version of each operator, similar
# to a normal SSA form.
# b) input_count: a dictionary specifying for each blob and
# each of its version, how many times it is used as input for another
# op.
# c) frontier: maintaining the current versions of the blobs
# we are having in the workspace, after the execution of all the ops
# added to the IR so far. This is useful because if a gradient is
# trying to access an earlier version of a blob, we can sanity check
# that it is no longer there, and thus throw an error.
# d) gradient_frontier: maps the names of blobs to its version that the
# gradient corresponds to.
# e) gradient_generators: for each blob and each of its version, maps to
# a list of operators that generates its gradient together with the
# gradient name.
self.ssa = []
self.input_usages = defaultdict(lambda: defaultdict(list))
self.frontier = defaultdict(int)
self.gradient_frontier = {}
self.gradient_generators = defaultdict(lambda: defaultdict(list))
self.out_version_history = defaultdict(list)
self.in_version_history = defaultdict(list)
for op in operators:
self.Play(op)
self.SanityCheck(operators)
def SanityCheck(self, operators):
# Validate StopGradient usage by checking that StopGradient's output
# is actually passed forward
for op in operators:
if op.type == 'StopGradient':
if op.output[0] not in self.input_usages:
raise Exception("""StopGradient's output '{}' is orphan.
You typically want to specify same input and output for
StopGradient. Op:\n\n{}""".format(op.output[0], str(op)))
def Play(self, op):
""""Adds an op to the current IR, and update the internal states to
reflect the blobs and versions after the execution of the op.
"""
# For input, they are the current version in the dict.
in_versions = {}
for s in op.input:
in_versions[s] = self.frontier[s]
self.input_usages[s][self.frontier[s]].append(len(self.ssa))
self.in_version_history[s].append((op, self.frontier[s]))
# For output, they are the current version plus one. If this is a
# newly created blob, its version starts with zero.
out_versions = {}
for s in op.output:
if s in self.frontier:
self.frontier[s] += 1
out_versions[s] = self.frontier[s]
self.out_version_history[s].append((op, self.frontier[s]))
# Add to SSA for bookkeeping.
self.ssa.append(OpSSA(op, in_versions, out_versions))
def CheckGradientOperatorInput(
self, grad_op_input, g_output, fwd_op_idx, locally_generated_blobs):
"""Checks if the gradient operators can be correctly carried out."""
forward_op, in_versions, out_versions = self.ssa[fwd_op_idx]
original_index = GetIndexFromGradientList(g_output, grad_op_input)
# Functions to generate debug help for version-mismatches
def versionMismatchInfoOut(name):
s = "DEBUG HELP:\n"
s += "Maybe you use same output blob twice for different ops?\n"
s += "== Version history of blob [{}]\n".format(name)
for (op, vers) in self.out_version_history[name]:
s += "Version (out) {} <-- {}".format(vers, op)
s += "\n"
return s
def versionMismatchInfoIn(name):
s = "DEBUG HELP:\n"
s += "Maybe the blob was overwritten by another op?\n"
s += "== Version history of blob [{}]\n".format(name)
for (op, vers) in self.in_version_history[name]:
s += "version (in) {} <-- {}".format(vers, op)
s += "\n"
return s
# If it is a dense or sparse gradient name, it should match the
# version of the corresponding output.
if original_index is not None:
original_name = forward_op.output[original_index]
if (out_versions[original_name] !=
self.gradient_frontier[original_name]):
raise RuntimeError(
'Gradient name "%s" is expected to correspond '
'to version %d of "%s", but currently we have '
'version %d.\n\n' % (
grad_op_input, out_versions[original_name],
original_name,
self.gradient_frontier[original_name]) +
versionMismatchInfoOut(original_name))
# If it is an output name, the current version should match the
# version when the operator was run.
elif grad_op_input in out_versions:
if self.frontier[grad_op_input] != out_versions[grad_op_input]:
raise RuntimeError(
'Gradient operator needs output "%s" at version'
' %d, but currently we have version %d.\n\n' % (
grad_op_input, out_versions[grad_op_input],
self.frontier[grad_op_input]
) + versionMismatchInfoOut(grad_op_input)
)
# If it is an input name, the current version should match the
# version when the operator was run.
elif grad_op_input in in_versions:
if (self.frontier[grad_op_input] != in_versions[grad_op_input]):
raise RuntimeError(
'Gradient operator needs input "%s" at version '
'%d, but currently we have version %d.\n\n' % (
grad_op_input, in_versions[grad_op_input],
self.frontier[grad_op_input]
) + versionMismatchInfoIn(grad_op_input)
)
# If it is none of the above, it should be a blob that is
# generated locally by one of the previous gradient operators.
else:
if grad_op_input not in locally_generated_blobs:
raise RuntimeError(
'Blob name "%s" not in the scope of operator: '
'%s\nand is not generated by any of the local '
'gradient operators.' % (grad_op_input, str(forward_op))
)
def AppendSparseGenerators(self, sparse_generators):
# merge indices and values generators for sparse gradients
for name, input_generators in viewitems(sparse_generators):
for version, generators in viewitems(input_generators):
if len(generators) == 1:
# either indices or values are generated (but not both)
generator = generators[0]
else:
# both indices and values are generated
assert(len(generators) == 2)
op1_i, idx1_i, op1_v, idx1_v, g1 = generators[0]
op2_i, idx2_i, op2_v, idx2_v, g2 = generators[1]
assert(g1 == g2)
assert(op1_i is None or op2_i is None)
assert(op1_v is None or op2_v is None)
assert(idx1_i == 0 or idx2_i == 0)
assert(idx1_v == 0 or idx2_v == 0)
generator = SparseGradGenMeta(
op1_i or op2_i, idx1_i + idx2_i,
op1_v or op2_v, idx1_v + idx2_v,
g1)
self.gradient_generators[name][version].append(generator)
def BuildGradientGenerators( # NOQA
self, fwd_op_idx, gradient_ops, g_output, g_input):
"""Updates gradient_generators and gradient_frontier"""
forward_op, in_versions, out_versions = self.ssa[fwd_op_idx]
locally_generated_blobs = []
sparse_generators = defaultdict(lambda: defaultdict(list))
for grad_op in gradient_ops:
# (1) check that inputs are valid
for s in grad_op.input:
self.CheckGradientOperatorInput(
s, g_output, fwd_op_idx, locally_generated_blobs)
# (2) add outputs to the locally generated blobs
# If an output corresponds to the gradient of an input, we also
# record it to gradient_generators
locally_generated_blobs.extend([str(s) for s in grad_op.output])
for i, output in enumerate(grad_op.output):
input_index = GetIndexFromGradientList(g_input, output)
if input_index is not None:
input_name = forward_op.input[input_index]
input_version = in_versions[input_name]
g = g_input[input_index]
if type(g) is GradientSlice:
# the output corresponds either to the indices or the
# values of the sparse gradient. In either case we
# create a (partial) SparseGradGenMeta. If necessary,
# we'll merge indices and values generators
# corresponding to the same gradient in step (3)
if g.indices == output:
m = SparseGradGenMeta(grad_op, i, None, 0, g)
else:
assert(g.values == output)
m = SparseGradGenMeta(None, 0, grad_op, i, g)
sparse_generators[input_name][input_version].append(m)
else:
self.gradient_generators[input_name][input_version] \
.append(GradGenMeta(
grad_op, i, g))
# (3) merge indices and values generators for sparse gradients, and
# add them to gradient_generators
self.AppendSparseGenerators(sparse_generators)
# (4) for ops (e.g., Add, Sum, Sub) which have gradient outputs directly
# passed from inputs (not computed from gradient ops), we create an
# GradGenMeta with None grad_op and idx so that the gradient_generators
# knows where the gradients are coming from. This is needed for creating
# Sum op to accumulate the gradients from multiple parents.
for input_index, g in enumerate(g_input):
input_name = forward_op.input[input_index]
input_version = in_versions[input_name]
if not g:
continue
if type(g) is GradientSlice:
if str(g.indices) not in locally_generated_blobs and \
str(g.values) not in locally_generated_blobs:
self.gradient_generators[input_name][input_version].append(
SparseGradGenMeta(None, 0, None, 0, g))
else:
if str(g) not in locally_generated_blobs:
self.gradient_generators[input_name][input_version].append(
GradGenMeta(None, 0, g))
# Finally, for the gradients specified in g_input, we update the
# gradient frontier to reflect the input versions that the gradients
# correspond to.
for i, g in enumerate(g_input):
if g is not None:
input_name = forward_op.input[i]
input_version = in_versions[input_name]
self.gradient_frontier[input_name] = input_version
def _GetSumOpOutputName(self, generator, input_name):
def remove_suffix(s, suffix):
if s.endswith(suffix):
return s[:-len(suffix)]
return s
for g in generator:
if type(g) is GradGenMeta:
grad_op, idx, _ = g
if grad_op:
return grad_op.output[idx]
else:
assert(type(g) is SparseGradGenMeta)
op_i, idx_i, op_v, idx_v, _ = g
if op_i:
return remove_suffix(op_i.output[idx_i], '_indices')
if op_v:
return remove_suffix(op_v.output[idx_v], '_values')
return input_name + '_grad'
def _SetSumOpsDeviceOption(self, sum_ops, generators):
# we already checked that device options are consistent so we can just
# use the first one we find
for generator in generators:
grad_op = generator.grad_op if type(generator) is GradGenMeta \
else generator.grad_op_values or generator.grad_op_indices
if grad_op:
if grad_op.HasField('device_option'):
for op in sum_ops:
op.device_option.CopyFrom(grad_op.device_option)
break
def _DisambiguateGradOpOutput(self, grad_op, idx, cnt):
grad_op.output[idx] = (
'_' + grad_op.output[idx] + '_autosplit_{}'.format(cnt))
return grad_op.output[idx], cnt + 1
def _CheckSumOpsConflict(self, out_base_name, g):
if str(out_base_name) == str(g):
# TODO not sure what this message really means
raise RuntimeError(
'The gradient output of empty gradient op can not '
'be the same as the normal name of the current '
'input gradient.')
def _MakeDenseSumOps(self, generators, out_base_name):
sum_op_input = []
cnt = 0
assert len(generators) > 1
first_grad_op = True
for generator in generators:
grad_op, idx, g = generator
assert(type(g) is not GradientSlice)
if grad_op:
if first_grad_op:
first_grad_op = False
out = grad_op.output[idx]
else:
out, cnt = self._DisambiguateGradOpOutput(grad_op, idx, cnt)
sum_op_input.append(out)
else:
self._CheckSumOpsConflict(out_base_name, g)
sum_op_input.append(str(g))
if out_base_name in sum_op_input:
# Sum inplace mode works only for the first input
# So we do a swap
idx = sum_op_input.index(out_base_name)
sum_op_input[0], sum_op_input[idx] = (
sum_op_input[idx], sum_op_input[0]
)
sum_ops = [CreateOperator(
"Sum",
[BlobReference(x) for x in sum_op_input],
BlobReference(out_base_name))]
return sum_ops, out_base_name
def _MakeSparseSumOps(self, generators, out_base_name):
indices_concat_input = []
values_concat_input = []
cnt_i = 0
cnt_v = 0
for generator in generators:
assert(type(generator) is SparseGradGenMeta)
op_i, idx_i, op_v, idx_v, g = generator
if op_i:
out, cnt_i = self._DisambiguateGradOpOutput(op_i, idx_i, cnt_i)
indices_concat_input.append(out)
else:
self._CheckSumOpsConflict(out_base_name, g.indices)
indices_concat_input.append(g.indices)
if op_v:
out, cnt_v = self._DisambiguateGradOpOutput(op_v, idx_v, cnt_v)
values_concat_input.append(out)
else:
self._CheckSumOpsConflict(out_base_name, g.values)
values_concat_input.append(g.values)
indices_concat_output = out_base_name + '_indices_concat'
indices_concat_split = out_base_name + '_indices_concat_split'
values_concat_output = out_base_name + '_values_concat'
values_concat_split = out_base_name + '_values_concat_split'
# Sum the given sparse representations by simply concatenating the
# indices (resp. values) tensors together. We don't do any deduplication
# of indices at this point. This will be done as needed before the
# optimizer is called
sum_ops = [
CreateOperator(
"Concat",
[BlobReference(x) for x in indices_concat_input],
[BlobReference(x) for x in
[indices_concat_output, indices_concat_split]],
axis=0
),
CreateOperator(
"Concat",
[BlobReference(x) for x in values_concat_input],
[BlobReference(x) for x in
[values_concat_output, values_concat_split]],
axis=0
),
]
sum_op_output = GradientSlice(
indices=indices_concat_output,
values=values_concat_output,
)
return sum_ops, sum_op_output
def _MakeSumOps(self, input_name, input_version):
generators = self.gradient_generators[input_name][input_version]
out_base_name = self._GetSumOpOutputName(generators, input_name)
types = list(set(type(x) for x in generators))
assert(len(types) == 1)
if types[0] is GradGenMeta:
sum_ops, g = self._MakeDenseSumOps(generators, out_base_name)
else:
assert(types[0] is SparseGradGenMeta)
sum_ops, g = self._MakeSparseSumOps(generators, out_base_name)
self._SetSumOpsDeviceOption(sum_ops, generators)
return sum_ops, g
def _VerifyGradientGenerators(self, generator):
# (1) check if all gradients are of the same type. Aggregating a mix of
# sparse and dense gradients is not supported yet
if len({type(g) for g in generator}) > 1:
raise RuntimeError(
'Automatic aggregation of a mix of sparse and dense gradients '
'is not supported yet')
# If for all the operators that used the operator, none or only one
# produced the gradient, then no additional sum needs to be carried
# out.
if len(generator) < 2:
return False
all_gradient_names = []
all_device_options = []
for g in generator:
if type(g) is GradGenMeta:
if g.grad_op:
all_gradient_names.append(g.gradient)
all_device_options.append(g.grad_op.device_option)
else:
assert(type(g) is SparseGradGenMeta)
if g.grad_op_indices:
all_device_options.append(g.grad_op_indices.device_option)
if g.grad_op_values:
all_device_options.append(g.grad_op_values.device_option)
all_gradient_names.append(g.gradient.values)
# Check if all grad op device options are the same.
if len(all_device_options) >= 2 and not all(
d == all_device_options[0] for d in all_device_options[1:]):
raise RuntimeError('Unexpected behavior: not all grad ops'
'have the same device option.')
return True
def DoGradientAccumulation(self, fwd_op_idx):
"""For each input name in the forward op, check if we will need to
add gradient accumulation. If so, do gradient accumulation and return
the list of gradient operators.
The criteria for doing gradient accumulation is:
(1) the specific input version has been used by multiple operators.
(2) the current fwd_op_idx is the first to use that input, i.e. in the
backward pass, is the last to optionally generate the gradient for
the op.
(3) For the operators that used the input, their gradient operators
have generated more than 1 gradient.
When accumulating operators, our current solution is to rename all the
created gradients with an internal intermediate name, and then add a
Sum() operator that adds up all the gradients. This may use more memory
due to intermediate storage, but is usually the fastest approach as one
can do one single sum for multiple intermediate gradients.
"""
forward_op, in_versions, out_versions = self.ssa[fwd_op_idx]
additional_sum_ops = []
grad_map = {}
for _i, input_name in enumerate(set(forward_op.input)):
input_version = in_versions[input_name]
input_usage = self.input_usages[input_name][input_version]
if (len(input_usage) <= 1 or fwd_op_idx != input_usage[0]):
# We do not need to do gradient accumulation yet.
continue
generator = self.gradient_generators[input_name][input_version]
try:
if not self._VerifyGradientGenerators(generator):
continue
except RuntimeError as err:
raise RuntimeError(
"Gradients for param ''{}'' failed to verify: {}".format(
input_name,
err
)
)
# Finally, let's create the sum operator.
sum_ops, g = self._MakeSumOps(input_name, input_version)
additional_sum_ops.extend(sum_ops)
grad_map[input_name] = g
return additional_sum_ops, grad_map
def _AppendAutoGradGenerator(self, y, grad, autograd_op):
# Gradient here is not sparse as it was generated by
# a ConstantFill operator. Autogeneration for sparse gradients is
# not supported
generator = GradGenMeta(
autograd_op, 0 if autograd_op else None, str(grad))
self.gradient_generators[str(y)][self.frontier[str(y)]].append(
generator)
def _GetInitGradients(self, ys):
input_to_grad = {}
gradient_ops = []
for y, g in viewitems(ys):
autograd_op = None
if g is None:
autograd_op = CreateOperator(
"ConstantFill", [y], [str(y) + "_autogen_grad"],
value=1.0)
gradient_ops.append(autograd_op)
g = autograd_op.output[0]
# Since the C++ gradient registry does not have notion of
# NameScopes, we will convert all references to strings.
input_to_grad[str(y)] = (
GradientSlice(str(g[0]), str(g[1]))
if isinstance(g, GradientSlice) else str(g))
# Autogenerated gradients are assumed to be provided for the last
# input version
if autograd_op is not None:
self._AppendAutoGradGenerator(y, g, autograd_op)
return input_to_grad, gradient_ops
def _GenerateGradientsForForwardOp(
self, forward_op_idx, input_to_grad):
new_input_to_grad = {}
gradient_ops = []
forward_op, in_versions, out_versions = self.ssa[forward_op_idx]
g_output = list(
input_to_grad.get(name, None) for name in forward_op.output)
if not all(g is None for g in g_output) or (
forward_op.type == "ZeroGradient"):
gradient_ops, g_input = GradientRegistry.GetGradientForOp(
forward_op, g_output)
# Check if the gradient operators are legal, and update
# gradient_generators and gradient_frontier
self.BuildGradientGenerators(
forward_op_idx, gradient_ops, g_output, g_input)
# Record the gradient map to all_input_to_grad.
for name, grad in zip(forward_op.input, g_input):
# Do not overwrite an existing gradient with a None
# unless the input is also an output of the op, since
# we update the blob version when blob is output of an
# operator.
if grad is not None or \
name not in input_to_grad or \
name in list(forward_op.output):
new_input_to_grad[name] = grad
return new_input_to_grad, gradient_ops
def GetBackwardPass(self, ys):
"""Gets the backward pass that computes the derivatives of given blobs.
Inputs:
ys: a list or a dictionary specifying what blobs we want to compute
derivatives of. If the input is a list, we will automatically
generate their gradients with all-one values; if the input is a
dictionary, for any dictionary entries that are not None, we will
take the corresponding blobs as their gradients; for all those
that are None, we will auto-fill them with 1.
"""
if isinstance(ys, list):
ys = dict((y, None) for y in ys)
elif not isinstance(ys, dict):
raise TypeError("ys should either be a list or a dict.")
# Set the gradient frontier with the initialized external
# gradients.
for y in viewkeys(ys):
self.gradient_frontier[y] = self.frontier[y]
self.input_usages[str(y)][self.frontier[str(y)]].append(
len(self.ssa))
all_input_to_grad, all_gradient_ops = self._GetInitGradients(ys)
# (2) Now, after having the virtual play above, we now play the ops
# backwards, creating the gradients along the path. Note that although
# we are playing it backwards, we cannot refer to variables that are
# at a version older than current_versions because it is already been
# overwritten.
for forward_op_idx in reversed(range(len(self.ssa))):
input_to_grad, gradient_ops = self._GenerateGradientsForForwardOp(
forward_op_idx, all_input_to_grad)
all_input_to_grad.update(input_to_grad)
all_gradient_ops += gradient_ops
# If there are multiple use blobs, do gradient accumulation.
additional_sum_ops, grad_map = self.DoGradientAccumulation(
forward_op_idx)
# This line is so that if in an accumulation some of the operators
# have not produced gradients, they still do not overwrite the
# general all_input_to_grad map.
all_input_to_grad.update(grad_map)
all_gradient_ops += additional_sum_ops
# (3) Post-processing.
# After we have done computation for each op, we now have the gradient
# operators ready. For the output map, we will convert everything to
# BlobReferences for easier handling in python.
all_input_to_grad_out = {}
for key, val in viewitems(all_input_to_grad):
if val is not None:
if (isinstance(val, string_types) or
isinstance(val, binary_type)):
grad_out = BlobReference(val)
else:
grad_out = GradientSlice(BlobReference(val[0]),
BlobReference(val[1]))
all_input_to_grad_out[BlobReference(key)] = grad_out
return all_gradient_ops, all_input_to_grad_out
class GradientRegistry(object):
"""GradientRegistry holds the mapping from operators to their gradients."""
gradient_registry_ = {}
@classmethod
def RegisterGradient(cls, op_type):
"""A decorator for registering gradient mappings."""
def Wrapper(func):
cls.gradient_registry_[op_type] = func
return func
return Wrapper
@classmethod
def _GetGradientForOpCC(cls, op_def, g_output):
# TODO(tulloch) - Propagate GradientWrapper up through the stack.
def from_untyped(grad):
if grad is None:
w = C.GradientWrapper()
assert w.is_empty()
return w
try:
(indices, values) = grad
w = C.GradientWrapper()
w.indices = indices
w.values = values
assert w.is_sparse()
return w
except ValueError:
w = C.GradientWrapper()
w.dense = grad
assert w.is_dense()
return w
g_output = [from_untyped(grad) for grad in g_output]
grad_defs_str, g_input = C.get_gradient_defs(
op_def.SerializeToString(), g_output)
def to_untyped(grad_wrapper):
if grad_wrapper.is_empty():
return None
if grad_wrapper.is_sparse():
return GradientSlice(grad_wrapper.indices, grad_wrapper.values)
assert grad_wrapper.is_dense()
return grad_wrapper.dense
g_input = [to_untyped(grad_wrapper) for grad_wrapper in g_input]
grad_defs = []
for grad_def_str in grad_defs_str:
grad_def = caffe2_pb2.OperatorDef()
grad_def.ParseFromString(grad_def_str)
grad_defs.append(grad_def)
return grad_defs, g_input
@classmethod
def GetGradientForOp(cls, op, g_output):
try:
gradient_ops, g_input = cls._GetGradientForOpCC(op, g_output)
except Exception as e:
# Not supported in C++; will try python registration next.
if op.type in cls.gradient_registry_:
gradient_ops, g_input = cls.gradient_registry_[op.type](
op, g_output
)
else:
raise Exception(
"Exception when creating the gradient for [{}]: {}.".
format(op.type, e)
)
if gradient_ops is None:
return [], g_input
if type(gradient_ops) is not list:
gradient_ops = [gradient_ops]
return gradient_ops, g_input
@classmethod
def GetBackwardPass(cls, operators, ys, ys_generate_gradient=False):
"""Gets the backward pass for the list of operators.
Args:
operators: a list of operators constituting the forward pass.
ys: a list or a dictionary specifying what blobs we want to compute
derivatives of. If the input is a list, we will automatically
generate their gradients with all-one values; if the input is a
dictionary, for any dictionary entries that are not None, we'll
take the corresponding blobs as their gradients; for all those
that are None, we will auto-fill them with 1.
Returns:
gradient_ops: a list of gradient operators to run.
all_input_to_grads: a map from input to their corresponding
gradients.
"""
ir = IR(operators)
return ir.GetBackwardPass(ys)
def get_ssa(net, blob_versions=None):
"""
Given a net, return a structure containing the version of each input and
output blob used by each operator.
Args:
net: either a Net or a NetDef
blob_versions: (optional) map with current version number for given
blob names. If not provided or blob not found, start
from version 0.
Returns:
Tuple (ssa, blob_versions)
ssa: list of tuples (versioned_inputs, versioned_outputs)
for each op in the net. A versioned input is a tuple
(blob_name, version).
blob_versions: updated map with latest version of each blob found in
the net.
"""
proto = net.Proto() if isinstance(net, Net) else net
assert isinstance(proto, caffe2_pb2.NetDef)
if blob_versions is None:
blob_versions = {}
if isinstance(net, list):
return [get_ssa(n, blob_versions) for n in net], blob_versions
for i in proto.external_input:
if i not in blob_versions:
blob_versions[str(i)] = 0
ssa = []
for op in proto.op:
if not proto.external_input:
for i in op.input:
if i not in blob_versions:
blob_versions[i] = 0
inputs = [(str(i), blob_versions.get(str(i), 0)) for i in op.input]
for o in op.output:
blob_versions[str(o)] = blob_versions.get(str(o), 0) + 1
outputs = [(str(o), blob_versions[str(o)]) for o in op.output]
ssa.append((inputs, outputs))
return ssa, blob_versions
def get_undefined_blobs(ssa):
"""
Given a ssa in the format produced by get_ssa(), return a set of blobs that
are used before they are defined, which corresponds to inputs at version 0.
"""
undef_blobs = set()
for inputs, _outputs in ssa:
undef_blobs |= set(name for (name, ver) in inputs if ver == 0)
return undef_blobs
def get_output_producers(ssa):
"""
Given a ssa in the format produced by get_ssa(), returns a map from
versioned blob into the operator index that produces that version of
the blob. A versioned blob is a tuple (blob_name, version).
"""
producers = {}
for i, (_inputs, outputs) in enumerate(ssa):
for o in outputs:
producers[o] = i
return producers
def get_op_ids_in_path(ssa, blob_versions, inputs, outputs):
"""
Given a ssa and blob_versions as produced by get_ssa(), returns the list
of op indices that are necessary in order to generate the blobs in
`outputs`, given blobs in `inputs`.
Consider that the `inputs` are given in their latest version.
"""
inputs_set = set((str(i), blob_versions[str(i)]) for i in inputs)
producers = get_output_producers(ssa)
queue = [(str(o), blob_versions[str(o)]) for o in outputs]
used_op_ids = set()
while len(queue) > 0:
o = queue.pop()
if (o not in inputs_set) and (o in producers):
op_id = producers[o]
if op_id not in used_op_ids:
used_op_ids |= {op_id}
inputs, _ = ssa[op_id]
queue.extend(inputs)
return sorted(used_op_ids)
def recurrent_network_op_remap(op, prefix, blob_remap):
"""
Parameters
----------
op : Caffe2 operator (RecurrentNetworkOp or RecurrentNetworkGradientOp).
prefix: this argument is not used in this function, just for legacy support.
blob_remap : Dictionary that represents the map from old blob name to new.
Updates blob names in arguments of RecurrentNetworkOp and
RecurrentNetworkGradientOp to conform to cloned input and output of both
operators and also makes sure names of locally generated blobs in arguments
have the same prefix as the input and output of the operators.
"""
def get_remapped_str(blob_str):
if isinstance(blob_str, binary_type):
blob_str = blob_str.decode('utf-8')
return blob_remap.get(blob_str, blob_str).encode('utf-8')
for argument in op.arg:
if len(argument.strings) > 0:
for i in range(len(argument.strings)):
argument.strings[i] = get_remapped_str(argument.strings[i])
elif argument.name == 'timestep':
argument.s = get_remapped_str(argument.s)
elif argument.name.endswith('step_net'):
# argument is a proto
remap_proto(argument, blob_remap)
DEFAULT_REMAP_FUNCS = {
'RecurrentNetwork': recurrent_network_op_remap,
'RecurrentNetworkGradient': recurrent_network_op_remap,
}
def remap_proto(argument, blob_remap):
proto = caffe2_pb2.NetDef()
protobuftx.Merge(argument.s.decode('utf-8'), proto)
subnet = Net(proto)
cloned_sub_net = subnet.Clone(
'cloned_sub_net',
blob_remap,
)
argument.s = str(cloned_sub_net.Proto()).encode('utf-8')
def clone_and_bind_net(net, name, prefix, blob_remap=None, inputs=None,
keep_schema=True):
"""
Clone the given Net, binding its input schema to the given `inputs` record.
Blob names defined by the net are prepended with the given `prefix`.
Args:
net: the net to clone
name: the name of the new net
prefix: the prefix to append to local blobs
blob_remap: (optional) dict with additional blob name remapping.
inputs: (optional) input record that will provide actual input
values for the cloned net. Must be compatible with the
net's input schema or be a strict superset of it
keep_schema: by default (True), the original schema will be kept and
remapped accordingly. otherwise, the schema will be set as
inputs or left empty if inputs is not given.
Returns:
Tuple (cloned_net, blob_remap)
clone_net: the cloned Net
blob_remap: a map from original blob names into remapped blob names
"""
from caffe2.python import schema
assert isinstance(net, Net)
if blob_remap is None:
blob_remap = {}
if inputs is not None:
assert isinstance(inputs, schema.Field)
original = net.input_record()
assert original is not None
# TODO(azzolini): improve schema type checking
diff = set(original.field_names()) - set(inputs.field_names())
assert len(diff) == 0, (
"Schemas don't match, extra fields {diff} found in the net {name}. "
"original: {original}; inputs: {inputs}"
.format(
diff=diff, name=net.Name(), original=original.field_names(),
inputs=inputs.field_names()
)
)
original_mapping = dict(zip(original.field_names(),
original.field_blobs()))
for fn, fb in zip(inputs.field_names(), inputs.field_blobs()):
if fn in original_mapping:
blob_remap[str(original_mapping[fn])] = str(fb)
proto = net.Proto()
ssa, blob_versions = get_ssa(proto)
undef_blobs = get_undefined_blobs(ssa)
for blob in viewkeys(blob_versions):
if blob in blob_remap:
continue
elif blob in undef_blobs:
blob_remap[blob] = blob
else:
blob_remap[blob] = prefix + blob
cloned_net = net.Clone(name, blob_remap, keep_schema=keep_schema)
if not keep_schema and inputs:
cloned_net.set_input_record(inputs)
return cloned_net, blob_remap
def _get_blob_ref(blob_name_or_ref):
return (
blob_name_or_ref if isinstance(input, BlobReference)
else BlobReference(blob_name_or_ref)
)
def _recover_record_by_prefix(names, prefix=''):
"""
Tries to recover record by taking a subset of blob names with
a given prefix name and interpreting them as schema column names
"""
from caffe2.python import schema
column_names = [name[len(prefix):] for name in names
if name.startswith(prefix)]
if not column_names:
return None
return schema.from_column_list(
column_names,
col_blobs=[_get_blob_ref(prefix + name) for name in column_names])
class Net(object):
_net_names_used = set()
operator_registry_ = {}
@staticmethod
def current_prefix():
from caffe2.python.net_builder import NetBuilder
builder = NetBuilder.current(required=False)
return builder.name if builder else ''
@staticmethod
def _get_next_net_name(basename):
name = basename = '/'.join(
x for x in [Net.current_prefix(), basename] if x
)
next_idx = 1
while name in Net._net_names_used:
name = basename + '_' + str(next_idx)
next_idx += 1
Net._net_names_used |= set([name])
return name
def __init__(self, name_or_proto):
"""
Create a Net.
Args:
name_or_proto: If a NetDef is provided, clone it. Otherwise,
create an empty net with the given name.
"""
self._input_record = None
self._output_record = None
# Register blobs so that it's guaranteed that different calls to
# NextBlob/NextScopedBlob always return blobs with different names
self._registered_blob_names = set()
self._recreate_lookup_tables = False
self._op_outputs = set()
self._external_input_map = set()
self._attr_dict = defaultdict(list)
if type(name_or_proto) is caffe2_pb2.NetDef:
proto = name_or_proto
# We rae initializing a network by a NetDef. In this case, we will
# initialize our network with the given netdef.
self._net = caffe2_pb2.NetDef()
self._net.CopyFrom(proto)
existing_outputs = [list(op.output) for op in self._net.op]
self._external_input_map.update(list(self._net.external_input))
# Set the next name index properly.
existing_names = set(
sum(
[list(op.input) for op in self._net.op], []
) + sum(
existing_outputs, []
)
)
for outs in existing_outputs:
self._op_outputs.update(outs)
prefix_len = len(self._net.name + '_blob_')
autogen_indices = []
for s in existing_names:
if s.startswith(self._net.name + '_blob_'):
try:
autogen_indices.append(int(s[prefix_len]))
except ValueError:
pass
if len(autogen_indices):
self._next_name_index = max(autogen_indices) + 1
else:
self._next_name_index = 0
name = self._net.name
else:
name = name_or_proto
self._net = caffe2_pb2.NetDef()
self._next_name_index = 0
# make sure that this net name hasn't been used before
self._net.name = Net._get_next_net_name(name)
def AppendNet(self, net):
assert isinstance(net, Net)
for i in net.Proto().external_input:
if (
i not in self.Proto().external_input and
i not in self._op_outputs
):
self.Proto().external_input.append(i)
self.Proto().external_output.extend(
[
o for o in net.Proto().external_output
if o not in self.Proto().external_output
]
)
self._ExtendOps(net.Proto().op)
return self
def LogInfo(self, *msg_or_blobs):
for msg_or_blob in msg_or_blobs:
if not isinstance(msg_or_blob, BlobReference):
blob = self.GivenTensorStringFill(
[], self.NextName('log'),
shape=[], values=[msg_or_blob])
else:
blob = msg_or_blob
self.Print(blob, [])
def add_attribute(self, name, obj):
"""
Add `obj` to the list of attributes in this net under the given `name`.
Attributes are user-defined objects and have no pre-defined semantics.
"""
self._attr_dict[name].append(obj)
def get_attributes(self, name):
"""
Returns the list of attributes in this net for a given `name`.
Attributes are user-defined objects added with `add_attribute'.
"""
return self._attr_dict.get(name, [])
def set_rand_seed(self, seed=100, sequence_seed=True, seed_on_op_def=False):
"""
Adds a random seed to each op in the net.
If sequence_seed is set, the i-th op has rand_seed=`seed + i`
If seed_on_op_def is set, the op rand_seed=hash(str(op))
sequence_seed and seed_on_op_def cannot be both set to True.
"""
assert not (sequence_seed and seed_on_op_def), (
'sequence_seed and seed_on_op_def cannot be both set to True.')
for i, op in enumerate(self.Proto().op):
if sequence_seed:
curr_seed = seed + i
elif seed_on_op_def:
curr_seed = hash(str(op) + str(seed)) % np.iinfo(np.uint32).max
else:
curr_seed = seed
op.device_option.random_seed = curr_seed
def Name(self):
return self._net.name
def __str__(self):
return self.Name()
def Const(self, array, blob_out=None, dtype=None):
if isinstance(array, bool):
return self.ConstantFill(
[],
blob_out or 1,
dtype=DataType.BOOL,
value=array)
if dtype is None:
array = np.array(array)
else:
array = np.array(array, dtype=dtype)
def do_set(operator):
return operator(
[],
blob_out or 1,
shape=array.shape,
values=array.flatten().tolist())
if array.dtype == np.int32:
return do_set(self.GivenTensorIntFill)
elif array.dtype == np.int64:
return do_set(self.GivenTensorInt64Fill)
elif array.dtype == np.str:
return do_set(self.GivenTensorStringFill)
else:
return do_set(self.GivenTensorFill)
def BlobIsDefined(self, blob):
"""
Returns true if the given BlobReference is produced as output of
an operator in this net, or if it is provided as an external input.
"""
if self._recreate_lookup_tables:
self._RecreateLookupTables()
name = str(blob)
return (name in self._op_outputs) or (name in self._external_input_map)
def UsesBlob(self, blob):
"""
Returns true iff the given BlobReference is used by any operator
or this net, or if it is one of the external inputs of the net.
"""
blob_name = str(blob)
for op in self._net.op:
for input in op.input:
if input == blob_name:
return True
return blob_name in self._external_input_map
def GetBlobRef(self, blob_name):
"""
Given the name of a blob produced by this net, return a BlobReference
to it. If the blob is not produced by any op in this net,
raises KeyError.
"""
blob_name = str(blob_name)
if not self.BlobIsDefined(blob_name):
raise KeyError('Net does not define blob %s' % blob_name)
return BlobReference(blob_name, self)
def Clone(
self,
name,
blob_remap=None,
op_id_mask=None,
remap_funcs=None,
keep_schema=True
):
"""
Clone this net.
Args:
name: name of the cloned net
blob_remap: optional map with list of blob names to replace
op_id_mask: optional list of operator indices to include in
the cloned net. If not provided, all ops are included.
"""
orig_remap_funcs = {} if remap_funcs is None else remap_funcs
# by default we want to put RecurrentNetworkOp and
# RecurrentNetworkGradientOp into remap_funcs, as these two operators
# also take blobs and proto into the arguments.
remap_funcs = DEFAULT_REMAP_FUNCS.copy()
remap_funcs.update(orig_remap_funcs)
proto = self._net
new_proto = caffe2_pb2.NetDef()
new_proto.CopyFrom(proto)
new_proto.name = name
if blob_remap is None:
blob_remap = {}
if op_id_mask is None:
op_id_mask = list(range(0, len(proto.op)))
def get_remapped_str(blob):
blob_str = str(blob)
return str(blob_remap.get(blob_str, blob_str))
def remap_list(proto_list):
new_list = [get_remapped_str(b) for b in proto_list]
del proto_list[:]
proto_list.extend(new_list)
def remap_op(op):
new_op = caffe2_pb2.OperatorDef()
new_op.CopyFrom(op)
remap_list(new_op.input)
remap_list(new_op.output)
if new_op.type in remap_funcs:
remap_funcs[new_op.type](
new_op,
(name + '/') if name else '',
blob_remap,
)
return new_op
del new_proto.op[:]
new_proto.op.extend([remap_op(proto.op[op_id]) for op_id in op_id_mask])
remap_list(new_proto.external_input)
remap_list(new_proto.external_output)
new_net = Net(new_proto)
if keep_schema:
from caffe2.python import schema
if self._input_record:
new_net._input_record = schema.from_blob_list(
self._input_record,
[
BlobReference(get_remapped_str(blob), net=new_net)
for blob in self._input_record.field_blobs()
],
)
if self._output_record:
new_net._output_record = schema.from_blob_list(
self._output_record,
[
BlobReference(get_remapped_str(blob), net=new_net)
for blob in self._output_record.field_blobs()
],
)
new_net._attr_dict.update(self._attr_dict)
return new_net
def ClonePartial(self, name, inputs, outputs, remap_funcs=None):
"""
Clone this net, including only ops that are necessary in order to
compute `outputs` given `inputs`. Return references to the cloned
outputs. Internal blobs (blobs that are produced and consumed inside
the net but not used as outputs) will be remapped to avoid name
conflict.
Args:
name: the name of the cloned net
inputs: map where the keys correspond to BlobReferences in the
original net, and the values correspond to external inputs
in the partially cloned net. If `inputs` is a list, don't
remap input names.
outputs: outputs to be produced by the cloned net.
Returns:
Tuple (new_net, new_outputs)
new_net: a new Net object.
new_outputs: list of BlobReferences corresponding to the
outputs produced by new_net.
"""
input_is_pair_list = isinstance(inputs, list) and all(
isinstance(i, tuple) and len(i) == 2 for i in inputs)
inputs = (
inputs if isinstance(inputs, (dict, OrderedDict)) else
OrderedDict(inputs) if input_is_pair_list else
OrderedDict(zip(inputs, inputs)))
for output in outputs:
assert self.BlobIsDefined(output)
input_names = {str(k): str(v) for k, v in viewitems(inputs)}
output_names = [str(o) for o in outputs]
proto = self._net
blob_versions = {str(i): 0 for i in inputs}
ssa, blob_versions = get_ssa(proto, blob_versions)
used_op_ids = get_op_ids_in_path(ssa, blob_versions, inputs, outputs)
disallowed_op_ids = get_op_ids_in_path(ssa, blob_versions, [], inputs)
assert len(set(used_op_ids) & set(disallowed_op_ids)) == 0, (
'Cannot partially clone net: some of the ops required would ' +
'generate the given input.')
sub_ssa = [op for i, op in enumerate(ssa) if i in used_op_ids]
undef_blobs = get_undefined_blobs(sub_ssa) - set(viewkeys(input_names))
prefix = (name + '/') if name else ''
def remap(blob_name):
if blob_name in input_names:
return input_names[blob_name]
elif blob_name in undef_blobs:
return blob_name
else:
return prefix + blob_name
blob_mapping = {b: remap(b) for b in viewkeys(blob_versions)}
new_net = self.Clone(name, blob_mapping, used_op_ids, remap_funcs)
new_in = [
blob_mapping[i] for i in viewkeys(input_names)] + list(undef_blobs)
new_out = [blob_mapping[o] for o in output_names]
del new_net.Proto().external_input[:]
new_net.Proto().external_input.extend(new_in)
new_net._external_input_map = set(list(new_in))
del new_net.Proto().external_output[:]
new_net.Proto().external_output.extend(new_out)
return new_net, [new_net.GetBlobRef(o) for o in new_out]
def Proto(self):
self._InvalidateLookupTables()
return self._net
def PopulateProtoWithFileName(self):
net_tb = workspace.operator_tracebacks.get(self.Name(), None)
if net_tb is not None:
for idx, op in enumerate(self.Proto().op):
if idx in net_tb:
op.name = ':'.join(map(str, net_tb[idx][0]))
def NextScopedBlob(self, prefix='unnamed'):
"""Return the blob that has not been defined or registered in the
current net. It returns `ScopedBlobReference(prefix)`, if it's valid,
otherwise `ScopedBlobReference(prefix) + '_auto_' + ?`. Different calls
is guaranteed to return blob with different names.
"""
output_blob_base = ScopedName(prefix)
return self.NextBlob(output_blob_base)
def NextBlob(self, prefix='unnamed'):
"""Return the blob that has not been defined or registered in the
current net. It returns `BlobReference(prefix)`, if it's valid,
otherwise `BlobReference(prefix) + '_auto_' + ?`. Different calls
is guaranteed to return blob with different names."""
output_blob_base = BlobReference(prefix)
output_blob = output_blob_base
index = 0
while str(output_blob) in self._registered_blob_names or (
self.BlobIsDefined(output_blob)):
output_blob = output_blob_base + '_auto_' + str(index)
index += 1
self._registered_blob_names.add(str(output_blob))
return output_blob
def NextName(self, prefix=None, output_id=None):
"""Returns the next name to be used, if you do not want to explicitly
name your blob. [Deprecated, use NextBlob, NextScopedBlob instead]"""
if prefix:
output_name_base = self._net.name + '/' + prefix
output_name = output_name_base
if output_id is not None:
output_name += ':' + str(output_id)
index = 2
while self.BlobIsDefined(str(ScopedBlobReference(output_name))):
output_name = output_name_base + '_' + str(index)
if output_id is not None:
output_name += ':' + str(output_id)
index += 1
else:
output_name = self._net.name + '_blob_' + str(self._next_name_index)
self._next_name_index += 1
return str(output_name)
def _ExtendOps(self, new_ops):
self._net.op.extend(new_ops)
for op in new_ops:
self._op_outputs.update([text_type(o) for o in op.output])
def _CheckLookupTables(self):
'''
Called from unit tests to validate the internal lookup tables
match the protobuf contents.
'''
test_op_outputs = set()
for op in self._net.op:
for o in op.output:
test_op_outputs.add(o)
test_external_inp = set()
for inp in self._net.external_input:
test_external_inp.add(inp)
assert test_op_outputs.difference(self._op_outputs) == set()
assert test_external_inp.difference(self._external_input_map) == set()
def _InvalidateLookupTables(self):
self._recreate_lookup_tables = True
def _RecreateLookupTables(self):
self._op_outputs = set()
for op in self._net.op:
for o in op.output:
self._op_outputs.add(o)
self._external_input_map = set()
for inp in self._net.external_input:
self._external_input_map.add(inp)
self._recreate_lookup_tables = False
def AddGradientOperators(self, ys, skip=0):
"""Add the gradient for operators in the net.
Inputs:
ys: a list or a dictionary specifying what blobs we want to compute
derivatives of. If the input is a list, we will automatically
generate their gradients with all-one values; if the input is a
dictionary, for any dictionary entries that are not None, we will
take the corresponding blobs as their gradients; for all those
that are None, we will auto-fill them with 1.
skip: skips the first n operators. This is provided mainly because a
lot of nets may use the first few operators for data generation
like stuff which really do not need to have gradients.
Outputs:
returns a map from the blob name in the input network to a blob
containing gradient or a GradientSlice in case of sparse gradient
Currently, this is hard-coded for float operators if there are branches
(i.e. a blob is used as input to multiple operators). This is because
the gradient accumulation (Sum) is float only right now.
"""
grad_ops, input_to_grad = GradientRegistry.GetBackwardPass(
self._net.op[skip:], ys)
# Check if in immediate mode: the grad_ops are actually being produced
# by C++ and bypasses the CreateOperator() call, so in immediate mode
# we will have to explicitly run them.
if workspace.IsImmediate():
for op in grad_ops:
workspace.RunOperatorImmediate(op)
self._ExtendOps(grad_ops)
return input_to_grad
def AddExternalInput(self, *inputs):
assert len(inputs) > 0
refs = []
for input in inputs:
input_name = str(input)
assert str(input) not in self._external_input_map, (
'Net already contains an input named %s' % input_name)
for input in inputs:
input_name = str(input)
self._net.external_input.extend([input_name])
self._external_input_map.update([input_name])
refs.append(_get_blob_ref(input_name))
return refs[0] if len(refs) == 1 else refs
def AddExternalOutput(self, *outputs):
for output in outputs:
assert isinstance(output, BlobReference)
assert self.BlobIsDefined(output)
for output in outputs:
self.Proto().external_output.extend([str(output)])
def AddScopedExternalInputs(self, *inputs):
res = self.AddExternalInput(
* [ScopedBlobReference(b) for b in inputs]
)
if not isinstance(res, list):
res = [res]
return res
def AddScopedExternalOutputs(self, *outputs):
return self.AddExternalOutput(
* [ScopedBlobReference(b) for b in outputs]
)
@property
def external_inputs(self):
return [_get_blob_ref(x) for x in self._net.external_input]
@property
def external_outputs(self):
return [_get_blob_ref(x) for x in self._net.external_output]
def set_input_record(self, input_record):
from caffe2.python import schema
assert self._input_record is None or (input_record.has_blobs() and
set(input_record.field_blobs()) ==
set(self._input_record.field_blobs())), (
'Input schema cannot be reset')
if not input_record.has_blobs():
with NameScope(self.Name()):
self._input_record = schema.NewRecord(self, input_record)
else:
self._input_record = input_record
for blob in input_record.field_blobs():
if blob not in self.external_inputs:
self.AddExternalInput(blob)
return self._input_record
def recover_input_record_by_prefix(self, prefix):
"""
Tries to recover input record by taking a subset of external_inputs with
a given prefix name and interpreting them as schema column names
"""
record = _recover_record_by_prefix(self._net.external_input, prefix)
if record:
self.set_input_record(record)
def set_output_record(self, record):
assert self._output_record is None or (record.has_blobs() and
set(record.field_blobs()) ==
set(self._output_record.field_blobs())), (
'Output schema cannot be reset')
for blob in record.field_blobs():
assert self.BlobIsDefined(blob), "{} is not defined".format(blob)
for blob in record.field_blobs():
self.AddExternalOutput(blob)
self._output_record = record
def recover_output_record_by_prefix(self, prefix):
"""
Tries to recover out record by taking a subset of external_outputs with
a given prefix name and interpreting them as schema column names
"""
record = _recover_record_by_prefix(self._net.external_output, prefix)
if record:
self.set_output_record(record)
def AppendOutputRecordField(self, field_name, record):
from caffe2.python import schema
assert self._output_record is not None, (
'Tried to append to missing output record'
)
for blob in record.field_blobs():
assert self.BlobIsDefined(blob)
for blob in record.field_blobs():
self.AddExternalOutput(blob)
self._output_record = self._output_record + schema.Struct(
(field_name, record)
)
def input_record(self):
return self._input_record
def output_record(self):
return self._output_record
def AddExternalInputs(self, *inputs):
return self.AddExternalInput(*inputs)
def AddExternalOutputs(self, *outputs):
self.AddExternalOutput(*outputs)
def DeduplicateGradientSlices(self, g, aggregator='sum'):
assert isinstance(g, GradientSlice)
unique, remapping = self.Unique([g.indices], 2, engine='SparseHash')
if aggregator.lower() == 'sum':
new_g = self.UnsortedSegmentSum([g.values, remapping], 1)
elif aggregator.lower() == 'mean':
new_g = self.UnsortedSegmentMean([g.values, remapping], 1)
else:
raise ValueError('{} is not supported'.format(aggregator))
return GradientSlice(indices=unique, values=new_g)
def RunAllOnGPU(self, gpu_id=0, use_cudnn=False):
"""A convenient function to run everything on the GPU."""
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = caffe2_pb2.CUDA
device_option.cuda_gpu_id = gpu_id
self._net.device_option.CopyFrom(device_option)
if use_cudnn:
for op in self._net.op:
op.engine = "CUDNN"
def RunAllOnMKL(self):
"""A convenient function to run everything on the GPU."""
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = caffe2_pb2.MKLDNN
self._net.device_option.CopyFrom(device_option)
def _CreateAndAddToSelf(self, op_type, inputs, outputs=None, **kwargs):
"""A helper function to create an operator and add it to self.
"""
inputs = _RectifyInputOutput(inputs)
for input in inputs:
if not self.BlobIsDefined(input):
assert input.Net() != self
self.AddExternalInput(input)
if outputs is None:
# If we do not specify an output, we will assume that this op
# produces one output in this case.
outputs = self.NextName(prefix=op_type)
elif type(outputs) is int:
# In this case, we will auto-fill the given number of outputs
# with auto-generated names.
outputs = [
self.NextName(prefix=op_type, output_id=i)
for i in range(outputs)]
outputs = _RectifyInputOutput(outputs, net=self)
op = CreateOperator(op_type, inputs, outputs, **kwargs)
self._ExtendOps([op])
workspace.operator_tracebacks[self.Name()][
len(self._net.op) - 1] = _extract_stacktrace()
if len(op.output) == 0:
return
elif len(op.output) == 1:
return BlobReference(op.output[0], self)
else:
return tuple(BlobReference(o, self) for o in op.output)
def __getattr__(self, op_type):
if op_type.startswith('__'):
raise AttributeError('Attribute {} not found.'.format(op_type))
if not IsOperator(op_type) and not IsOperatorWithEngine(op_type, "CUDNN"):
raise AttributeError(
'Method ' + op_type + ' is not a registered operator.' +
' Did you mean: [' +
",".join(workspace.C.nearby_opnames(op_type)) + ']'
)
return lambda *args, **kwargs: self._CreateAndAddToSelf(
op_type, *args, **kwargs)
def __dir__(self):
additional_methods = [
op
for op in _REGISTERED_OPERATORS
if '_ENGINE_' not in op]
return sorted(set(chain(
dir(type(self)),
viewkeys(self.__dict__),
additional_methods
)))
def Python(
self,
f,
grad_f=None,
python_func_type=None,
pass_workspace=False,
grad_output_indices=None,
grad_input_indices=None
):
"""
Registers and returns a python operator.
`f` and `grad_f` can be one of the following:
- a function with signature (inputs, outputs), where inputs and
outputs are a list of CPUTensor objects. This function will be
called from C++ everytime the operator is executed.
- a tuple (func, args, kwargs), here `func` is a callable, args is
an argument list, and kwargs is a dict list. The call:
f = func(*args, kwargs)
will be performed locally at node initialization time, on all of
the nodes of the job, returning `f`, a callable that will be used
as the python operator function to be called during Net execution.
This is to be used when using python operator in a distributed
context, and allows to create and keep local python state across
calls to the operator.
`python_func_type` is a type of an object that constructed as
python_func_type(f) and provides an implementation to forward and
backward functions. Its useful in such a case where users needs
a statefull PythonOp (ex: use autograd for computing grad_f).
If `pass_workspace` is True, the signature is changed to
(inputs, outputs, workspace) where `workspace` is the workspace the op
is going to run on. This is potentially dangerous (as the op can
manipulate the workspace directly), use on your own risk.
If a gradient function is specified (`grad_f`), by default its inputs
will be: (1) all inputs to `f`, (2) followed by all outputs of `f`, (3)
and then all gradient outputs of `f`. The outputs of `grad_f` will be
(by default) all gradient inputs to `f`. If a subset of the gradient
outputs or gradient inputs is desired instead, then the subsets can be
specified by providing `grad_output_indices` and/or `grad_input_indices`
which identify the indices of `f`'s inputs and outputs which have
gradients.
"""
assert(IsOperator('Python'))
def make_builder(t):
if not isinstance(t, tuple):
return ''
assert len(t) == 3, 'Expected builder tuple (func, args, kwargs)'
func, args, kwargs = t
normalized = (func, tuple(args), dict(kwargs))
return pickle.dumps(normalized)
f_builder = make_builder(f)
grad_f_builder = make_builder(grad_f)
assert (not grad_f) or ((not f_builder) == (not grad_f_builder)), (
'A tuple has to be passed to both f and grad_f or neither.')
core_kwargs = {}
if f_builder:
core_kwargs['pickled_builder'] = f_builder
core_kwargs['pickled_grad_builder'] = grad_f_builder
core_kwargs['pass_workspace'] = pass_workspace
else:
core_kwargs['token'] = _RegisterPythonImpl(
f, grad_f, python_func_type, pass_workspace=pass_workspace)
grad_output_indices = grad_output_indices or []
grad_input_indices = grad_input_indices or []
return lambda *args, **kwargs: self._CreateAndAddToSelf(
'Python',
grad_output_indices=grad_output_indices,
grad_input_indices=grad_input_indices,
*args,
**dict(chain(viewitems(kwargs), viewitems(core_kwargs)))
)
def is_external_input(self, blob):
name = str(blob)
return name in self._external_input_map
def extend_ops(self, new_ops):
return self._ExtendOps(new_ops)
def copy_func_between_devices(src, dst):
CPU = caffe2_pb2.CPU
CUDA = caffe2_pb2.CUDA
if src.device_type == CPU and dst.device_type == CPU:
return None
if src.device_type == CUDA and dst.device_type == CUDA:
if src.cuda_gpu_id == dst.cuda_gpu_id:
return None
else:
def fun(net, *args, **kw):
with DeviceScope(dst):
return net.Copy(*args, **kw)
return fun
if src.device_type == CUDA and dst.device_type == CPU:
def fun(net, *args, **kw):
with DeviceScope(src):
return net.CopyGPUToCPU(*args, **kw)
return fun
if src.device_type == CPU and dst.device_type == CUDA:
def fun(net, *args, **kw):
with DeviceScope(dst):
return net.CopyCPUToGPU(*args, **kw)
return fun
raise ValueError('Non-supported devices: %s and %s' % (src, dst))
def device_equal(src, dst):
'''
We are using this fucntion instead of == operator because optional-value
comparison between empty device_options and {device_type:0, cuda_gpu_id:0}
returns not equal in some cases.
'''
return src.device_type == dst.device_type and src.cuda_gpu_id == dst.cuda_gpu_id
class RemapEntry:
def __init__(self, blob, device):
self.blob = blob
self.device = device
def __eq__(self, other):
return self.blob == other.blob and self.device == other.device
def __hash__(self):
return hash(self.blob + str(self.device))
def InjectCrossDeviceCopies(net, blob_to_device=None):
'''
Injecting Copy functions between device within a net. Users can provide
a net with part of operators using different device_options. This method
will automatically create a new net with Copy ops inserted in it.
Inputs:
blob_to_device: If not None, it is a map of blobs and their device locations.
Outputs:
new_net: A new net with CopyCPUToGPU inserted with correct device option
required_external_to_device:
A mapping between unresolved external inputs and their
required device options.
Assumptions:
1. every external inputs of this net is already in blob_to_device!
2. if not, this function will use net device option
'''
new_net = net.Clone(net._net.name + '_cross_device', keep_schema=True)
del new_net._net.op[:]
blob_to_device = blob_to_device or {}
# remapping of input blobs for each op.
blob_remap = {}
temp_remap = {}
net_option = net._net.device_option or caffe2_pb2.DeviceOption()
for op in net._net.op:
temp_remap.clear()
# Get where inputs and outputs should be
input_dev, output_dev = InferOpBlobDevices(op)
for dev, input in zip(input_dev, op.input):
assert net.BlobIsDefined(input), \
"input {} should be defined in the net.".format(input)
if input not in blob_to_device:
if net.is_external_input(input):
blob_to_device[input] = net_option
else:
raise AttributeError(
"No device information found for blob {}.".
format(input)
)
if not device_equal(blob_to_device[input], dev):
# reuse already moved input
if (RemapEntry(input, dev) in blob_remap and
blob_to_device[blob_remap[RemapEntry(input, dev)]] == dev):
temp_remap[input] = blob_remap[RemapEntry(input, dev)]
else:
# need to make input on correct device.
copy_func = copy_func_between_devices(
blob_to_device[input], dev
)
def _gen_new_name(blob, device_option):
CPU = caffe2_pb2.CPU
CUDA = caffe2_pb2.CUDA
if device_option.device_type == CPU:
suffix = '_cpu'
elif device_option.device_type == CUDA:
suffix = '_cuda_' + str(device_option.cuda_gpu_id)
else:
raise RuntimeError(
"Unknown device type: {}".
format(device_option.device_type)
)
return blob + suffix
new_name = _gen_new_name(input, dev)
copy_func(new_net, input, new_name)
blob_remap[RemapEntry(input, dev)] = new_name
temp_remap[input] = new_name
blob_to_device[new_name] = dev
# Enforcing no reuse blob between operators. In-place blob usage in an
# op is allowed. This is based on the assumption that in-place op has
# same device info
for out_blob, device in zip(op.output, output_dev):
if out_blob in blob_to_device and (
out_blob not in op.input and
not device_equal(blob_to_device[out_blob], device)
):
raise RuntimeError(
"In-place blob: {} is not supported between operators "
"with different device option previous:{} now: {}. "
"Failed op:\n {}".format(
out_blob, blob_to_device[out_blob], device, op
)
)
blob_to_device.update({o: d for d, o in zip(output_dev, op.output)})
new_op = caffe2_pb2.OperatorDef()
new_op.CopyFrom(op)
new_list = [temp_remap.get(b, b) for b in new_op.input]
del new_op.input[:]
new_op.input.extend(new_list)
new_net.extend_ops([new_op])
return new_net, blob_to_device
def InjectDeviceCopiesAmongNets(nets, blob_to_device_init=None):
"""
Takes in a list of nets. They usually represent your whole execution graph.
This function will insert cross device copy functions to all nets, and resolve
inter-net external inputs dependencies. This method will insert Copy funcitons if
external inputs of a net is produced on different device than it is required.
Inputs:
nets: a list of nets
Outputs:
new_nets: a list of new nets with device difference solved.
Some notes from wyiming:
1. You MUST pass nets in execution order. e.g. [train_init, train]
"""
assert isinstance(nets, list), \
"nets {} should be a list of nets.".format(str(nets))
assert all(isinstance(net, Net) for net in nets), \
"nets {} should be a list of nets.".format(str(nets))
# A holistic blob to device mapping.
blob_to_device = blob_to_device_init or {}
new_nets = []
for net in nets:
new_net, blob_to_device = InjectCrossDeviceCopies(
net, blob_to_device=blob_to_device
)
new_nets.append(new_net)
return new_nets, blob_to_device
def InjectDeviceCopiesAmongNetsWithoutB2D(nets, blob_to_device_init=None):
new_nets, _ = InjectDeviceCopiesAmongNets(nets, blob_to_device_init)
return new_nets
def get_net_name(netlike):
if isinstance(netlike, Net):
return netlike.Proto().name
elif isinstance(netlike, caffe2_pb2.NetDef):
return netlike.name
else:
return netlike
def output_to_list(op_output):
"""
Ensures that the output of an operator is a list.
Use when an operator has a variable number of outputs, but a list of
outputs is desired even when number of outputs is 1.
Args:
op_output: Either a BlobReferenece or an iterable of BlobReferences.
Returns:
A list of BlobReferences.
"""
assert type(op_output) in (list, tuple, BlobReference)
return (
[op_output]
if isinstance(op_output, BlobReference) else list(op_output))
def _add_net_to_dict(net_dict, net):
name = get_net_name(net)
if name in net_dict:
assert net_dict[name] is None or net == net_dict[name], (
'Different nets with same name: ' + name)
return False
else:
net_dict[name] = net if isinstance(net, Net) else None
return True
class ExecutionStep(object):
_step_names_used = set()
@staticmethod
def _get_next_step_name(basename):
name = basename
next_idx = 1
while name in ExecutionStep._step_names_used:
name = basename + '_' + str(next_idx)
next_idx += 1
ExecutionStep._step_names_used |= set([name])
return name
def __init__(self, name, nets=None, num_iter=None):
self._step = caffe2_pb2.ExecutionStep()
self._step.name = name or ExecutionStep._get_next_step_name('step')
self._net_dict = OrderedDict()
self._is_used = False
self._substeps = []
if nets is not None:
if type(nets) is Net:
nets = [nets]
for net in nets:
if _add_net_to_dict(self._net_dict, net):
self._step.network.extend([get_net_name(net)])
if num_iter is not None:
self._step.num_iter = num_iter
def get_net(self, name):
return self._net_dict[name]
def Name(self):
return self._step.name
def __str__(self):
return self._step.name
def _assert_can_mutate(self):
assert not self._is_used, (
'Cannot mutate a step that has already been added to a plan/step.')
def _notify_is_used(self):
self._is_used = True
def Proto(self):
return self._step
def HasNets(self):
return self._step.network is not None and (
len(self._step.network) > 0)
def HasSubsteps(self):
return self._step.substep is not None and (
len(self._step.substep) > 0)
def Nets(self):
return list(viewvalues(self._net_dict))
def Substeps(self):
return self._substeps
def SetIter(self, num_iter):
self._assert_can_mutate()
self._step.num_iter = num_iter
def SetCreateWorkspace(self, create_workspace):
self._assert_can_mutate()
self._step.create_workspace = create_workspace
def SetNumConcurrentInstances(self, num_concurrent_instances):
self._assert_can_mutate()
self._step.num_concurrent_instances = num_concurrent_instances
def SetOnlyOnce(self, only_once):
self._assert_can_mutate()
self._step.only_once = only_once
def SetShouldStopBlob(self, should_stop_blob):
assert isinstance(should_stop_blob, BlobReference), (
"expects BlobReference here, got {}".format(type(should_stop_blob)))
self._assert_can_mutate()
self._step.should_stop_blob = str(should_stop_blob)
def RunEveryMillis(self, interval):
"""
Run this step every interval millisecods, as long as its
siblings are still running. It is guaranteed that, after all
siblings finish, this step will run at least one.
This property is ignored for top-level ExecutionSteps.
"""
self._step.run_every_ms = interval
def SetReportNet(self, report_net, report_interval):
""" DEPRECATED. Use RunEveryMillis instead. """
self._assert_can_mutate()
_add_net_to_dict(self._net_dict, report_net)
self._step.report_net = get_net_name(report_net)
self._step.report_interval = report_interval
def AddSubstep(self, substep):
self._assert_can_mutate()
assert not self.HasNets(), 'Cannot have both network and substeps.'
if isinstance(substep, ExecutionStep):
substep._notify_is_used()
if not substep.HasNets() and not substep.HasSubsteps():
return self
for net in substep.Nets():
_add_net_to_dict(self._net_dict, net)
self._substeps.append(substep)
proto = substep.Proto()
else:
proto = substep
self._step.substep.add().CopyFrom(proto)
return self
def SetConcurrentSubsteps(self, concurrent_substeps):
self._assert_can_mutate()
assert not self.HasNets(), 'Cannot have both network and substeps.'
self._step.concurrent_substeps = concurrent_substeps
def AddNet(self, net):
self._assert_can_mutate()
assert not self.HasSubsteps(), 'Cannot have both network and substeps.'
assert isinstance(net, Net)
_add_net_to_dict(self._net_dict, net)
self._step.network.extend([get_net_name(net)])
return self
def get_all_attributes(self, name):
"""
Return the list of all attributes under the given `name`, present in
all of the nets used in this execution step and its children.
"""
return [
attr
for net in viewvalues(self._net_dict)
for attr in net.get_attributes(name)
]
def add_nets_in_order(step, net_list):
proto = step.Proto()
for substep in step.Substeps():
add_nets_in_order(substep, net_list)
for net in proto.network:
if net not in net_list:
net_list.append(net)
# FIXME(azzolini): This is actually wrong. Report nets should be
# instantiated first since they may run before any substep is run.
# However, curerntly, Reporter depends on this behavior.
if proto.report_net and proto.report_net not in net_list:
net_list.append(proto.report_net)
class Plan(object):
def __init__(self, name_or_step):
self._plan = caffe2_pb2.PlanDef()
self._net_dict = OrderedDict()
if isinstance(name_or_step, ExecutionStep):
self._plan.name = name_or_step.Name()
self.AddStep(name_or_step)
elif isinstance(name_or_step, basestring):
self._plan.name = name_or_step
else:
raise ValueError('name_or_step must be a string or ExecutionStep')
def __str__(self):
return self._plan.name
def Proto(self):
return self._plan
def AddNets(self, nets):
for net in nets:
if _add_net_to_dict(self._net_dict, net):
assert isinstance(net, Net)
self._plan.network.add().CopyFrom(net.Proto())
def Nets(self):
return list(viewvalues(self._net_dict))
def AddStep(self, step):
assert isinstance(step, ExecutionStep)
step._notify_is_used()
if not step.HasNets() and not step.HasSubsteps():
return
self._plan.execution_step.add().CopyFrom(step.Proto())
# nets need to be added to the plan in order of usage
net_list = []
add_nets_in_order(step, net_list)
self.AddNets([step.get_net(n) for n in net_list])
def get_all_attributes(self, name):
"""
Return the list of all attributes under the given `name`, present in
all of the nets used in this plan.
"""
return [
attr
for net in viewvalues(self._net_dict)
for attr in net.get_attributes(name)
]
def to_execution_step(step_or_nets, default_name=None):
from caffe2.python.net_builder import NetBuilder
if isinstance(step_or_nets, ExecutionStep):
return step_or_nets
stop_blob = None
if not default_name and hasattr(step_or_nets, 'name'):
default_name = step_or_nets.name
if isinstance(step_or_nets, NetBuilder):
stop_blob = step_or_nets._stop_blob
step_or_nets = step_or_nets.get()
return execution_step(
default_name, step_or_nets, should_stop_blob=stop_blob)
def execution_step(default_name,
steps_or_nets,
num_iter=None,
report_net=None,
report_interval=None,
concurrent_substeps=None,
should_stop_blob=None,
only_once=None,
num_concurrent_instances=None,
create_workspace=False):
"""
Helper for creating an ExecutionStep.
- steps_or_nets can be:
- None
- Net
- ExecutionStep
- list<Net>
- list<ExecutionStep>
- should_stop_blob is either None or a scalar boolean blob.
- This blob is checked AFTER every substeps/subnets.
- If specified and true, then this step will return immediately.
- Be sure to handle race conditions if setting from concurrent threads.
- if no should_stop_blob or num_iter is provided, defaults to num_iter=1
"""
assert should_stop_blob is None or num_iter is None, (
'Cannot set both should_stop_blob and num_iter.')
if should_stop_blob is None and num_iter is None:
num_iter = 1
step = ExecutionStep(default_name)
if should_stop_blob is not None:
step.SetShouldStopBlob(should_stop_blob)
if num_iter is not None:
step.SetIter(num_iter)
if only_once is not None:
step.SetOnlyOnce(only_once)
if concurrent_substeps is not None:
step.SetConcurrentSubsteps(concurrent_substeps)
if report_net is not None:
assert report_interval is not None
step.SetReportNet(report_net, report_interval)
if num_concurrent_instances is not None:
step.SetNumConcurrentInstances(num_concurrent_instances)
if create_workspace:
step.SetCreateWorkspace(True)
if isinstance(steps_or_nets, ExecutionStep):
step.AddSubstep(steps_or_nets)
elif isinstance(steps_or_nets, Net):
step.AddNet(steps_or_nets)
elif isinstance(steps_or_nets, list):
if all(isinstance(x, Net) for x in steps_or_nets):
for x in steps_or_nets:
step.AddNet(x)
else:
for x in steps_or_nets:
step.AddSubstep(to_execution_step(x))
elif steps_or_nets:
raise ValueError(
'steps_or_nets must be a step, a net, or a list of nets or steps.')
return step
def scoped_execution_step(name, *args, **kwargs):
"""Same as execution_step() except that the step name is scoped."""
default_name = ScopedName(name) if name else name
return execution_step(default_name, *args, **kwargs)
def _extract_stacktrace():
'''
This function extracts stacktrace without file system access
by purely using sys._getframe() and removes part that belongs to
this file (core.py). We are not using inspect module because
its just a wrapper on top of sys._getframe() whos
logis is based on accessing source files on disk - exactly what
we are trying to avoid here. Same stands for traceback module
The reason for file system access avoidance is that
if code is located on an NFS, file access might be slow
Function returns a list of tuples (file_name, line_number)
'''
current_file_name = __name__.replace('.', '/') + ".py"
result = []
frame = sys._getframe(1)
# We just go down the frame stack in a loop
while frame:
if current_file_name not in frame.f_code.co_filename:
# Its important to extract information from the frame here
# as frame's current line most probably will change later.
result.append((frame.f_code.co_filename, frame.f_lineno))
frame = frame.f_back
return result
| [
"charliehouseago@gmail.com"
] | charliehouseago@gmail.com |
498f10683ef402dac4279ed635e049a50304e21b | 51f43f1901d5aad2bce2b5ccd82bd9d5a0b397d1 | /TranskribusDU/graph/FeatureDefinition.py | ebadd300a6236d4fd7664bfa8d804b789e9ac6d6 | [
"BSD-3-Clause"
] | permissive | kapitsa2811/TranskribusDU | e1fd32b656ed8e3dcddc62e149647398cc48030e | 9e680b0bf14ea52678f7c4dccad465d5a9d9ee9d | refs/heads/master | 2020-08-09T12:52:52.832320 | 2019-10-01T15:04:38 | 2019-10-01T15:04:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,235 | py | # -*- coding: utf-8 -*-
"""
Feature Definition
Sub-class it and specialize getTransformer and clean_tranformers
Copyright Xerox(C) 2016 JL. Meunier
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Developed for the EU project READ. The READ project has received funding
from the European Union's Horizon 2020 research and innovation programme
under grant agreement No 674943.
"""
import math
from common.trace import traceln
class FeatureDefinition:
"""
A class to sub-class to define which features from a Tranformer class, you want for node and edges
"""
def __init__(self, nbClass=None, node_transformer=None, edge_transformer=None):
self.nbClass = nbClass #number of node classes (also called 'labels', and 'states' in pystruct)
self._node_transformer = node_transformer
self._edge_transformer = edge_transformer
def setTransformers(self, node_transformer, edge_transformer):
self._node_transformer = node_transformer
self._edge_transformer = edge_transformer
def getTransformers(self):
"""
return (node transformer, edge transformer)
"""
return self._node_transformer, self._edge_transformer
def fitTranformers(self, lGraph,lY=None):
"""
Fit the transformers using the graphs
return True
"""
lAllNode = [nd for g in lGraph for nd in g.lNode]
self._node_transformer.fit(lAllNode,lY)
del lAllNode #trying to free the memory!
lAllEdge = [edge for g in lGraph for edge in g.lEdge]
self._edge_transformer.fit(lAllEdge,lY)
del lAllEdge
return True
def cleanTransformers(self):
"""
Some extractors/transfomers keep a large state in memory , which is not required in "production".
This method must clean this useless large data
For instance: the TFIDF transformers are keeping the stop words => huge pickled file!!!
"""
for _trnsf in self.getTransformers():
try:
_trnsf.cleanTransformers()
except Exception as e:
traceln("Cleaning warning: ", e)
return None
def _getTypeNumber(self, kwargs):
"""
Utility function. In some case the __init__ method gets a dictionary of length N + N^2
(N config for unary extractor, N^2 config for pairwise)
Here we compute N from the dictionary length. ^^
"""
return int(round(math.sqrt( len(kwargs) + 1/4.0)-0.5, 0))
| [
"jean-luc.meunier@naverlabs.com"
] | jean-luc.meunier@naverlabs.com |
d7fee68df34da617cb66adea6b51ff9b78e676cb | f66ef1c5aff4a601deb7cb6957e380f53fdfd72a | /lectures/session2/session2_warmup_1.py | a00d13f18d6ee1653d2cdaaebd7e639def828a1f | [] | no_license | techkids-vn/C4E | dca0673a4b86aa3e4351f418283d9d62b41c813a | b8c32c17b05fa8415845b263f75ef801609ce352 | refs/heads/master | 2016-09-12T14:48:13.238969 | 2016-04-26T10:52:18 | 2016-04-26T10:52:18 | 57,104,583 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 99 | py | from turtle import *
side = 5
for i in range(side):
forward(20)
left(360/side)
mainloop()
| [
"qhuydtvt@gmail.com"
] | qhuydtvt@gmail.com |
14a8ed01957f432d88c64cd1119811b80606126c | 6238dc5b5818f54295547cf4cb1afa5553ddfb94 | /taobao/top/api/rest/SimbaRptAdgroupeffectGetRequest.py | 3a84baac14a3d21395468c846584c6fc0d259785 | [] | no_license | liaosiwei/guagua | 8208bb82b1df5506dcb86c1a7094c849ea5576a6 | ee6025813e83568dc25beb52279c86f8bd33f1a4 | refs/heads/master | 2016-09-06T16:45:00.798633 | 2013-05-03T04:02:35 | 2013-05-03T04:02:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | '''
Created by auto_sdk on 2013-04-14 16:35:32
'''
from top.api.base import RestApi
class SimbaRptAdgroupeffectGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.adgroup_id = None
self.campaign_id = None
self.end_time = None
self.nick = None
self.page_no = None
self.page_size = None
self.search_type = None
self.source = None
self.start_time = None
self.subway_token = None
def getapiname(self):
return 'taobao.simba.rpt.adgroupeffect.get'
| [
"liaosiweiorxiaowei@gmail.com"
] | liaosiweiorxiaowei@gmail.com |
9fc4ac132c48756bea8f1d552e5271d4383972b4 | 9d1e84e70048a6a6cba71e93c3f0a66afbd276dd | /code/storyboard_root/models/storytext_model.py | e438cdf05920d4e9a3aa77c4f18e98ee6b741cc1 | [] | no_license | 1SouravGhosh/StoryBoard-API-services | ab7cd6523bf06d1f042aa0a9dbba52a5bc712336 | 4cb79e7df1d95ae1dc337267b008c0a6cf42e80a | refs/heads/master | 2023-04-09T01:59:01.882784 | 2023-03-26T14:18:18 | 2023-03-26T14:18:18 | 166,553,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,739 | py | import datetime
from optparse import Option
from os.path import getsize, join
from symbol import with_item
from typing import Text
from xml.etree.ElementTree import tostring
import psycopg2
from click import DateTime
from flask.globals import session
from flask_restful.fields import Boolean, DateTime, Integer
from psycopg2.extensions import Column
from pylint.pyreverse.diagrams import Relationship
from sqlalchemy.orm import backref, defer, load_only, relationship, undefer
from sqlalchemy.sql.expression import outerjoin
from sqlalchemy.sql.operators import like_op
from sqlalchemy.sql.schema import FetchedValue, ForeignKey
from storyboard_root.resources.database_resources.db_resource import db
class StoryTextModel(db.Model):
__table_args__ = {"schema":"sch_storyboard"}
__tablename__ = "tbl_storytext"
story_text_id = db.Column( db.Integer , primary_key = True )
story_text = db.Column( db.Text )
story_table = db.relationship( "StoryModel" , backref='storytext')
def __init__(self,i_story_text_id, i_story_text):
self.story_text_id = i_story_text_id
self.story_text = i_story_text
def json(self):
return {
"storytextid" : self.story_text_id ,
"storytext" : self.story_text
}
@classmethod
def get_storytext_by_id(self,in_storytext_id):
try:
storytext = self.query.filter_by(story_text_id=in_storytext_id).first()
return storytext
except:
print("exception occured storytext model")
@classmethod
def create_storytext(self, in_storytext): #need to inlude story text model
try:
new_storytext = self(i_story_text_id=None,i_story_text=in_storytext)
db.session.add(new_storytext)
db.session.commit()
storytext = self.query.order_by(self.story_text_id.desc()).first()
return storytext
except:
print("exception occured")
@classmethod
def update_storytext(self, in_storytext_id, in_storytext):
try:
existing_storytext = self.get_storytext_by_id(in_storytext_id) # reusing the "get_storydetails_by_id" function of this class
if in_storytext is not None:
existing_storytext.story_text = in_storytext
except:
print("exception occured")
finally:
db.session.commit()
@classmethod
def delete_story_by_id(self,in_storytext_id):
try:
self.query.filter_by(story_text_id=in_storytext_id).delete()
except:
print("exception occured")
finally:
db.session.commit()
| [
"1SouravGhosh@noreply.github.com"
] | 1SouravGhosh@noreply.github.com |
d8ce2e16873a5987085be1bb79a1fefb9ae058d4 | 5e8d200078e64b97e3bbd1e61f83cb5bae99ab6e | /main/source/src/python/PyRosetta/src/pyrosetta/__init__.py | 3f89716dc31faf5c322be7b1d5ec64653c77360a | [] | no_license | MedicaicloudLink/Rosetta | 3ee2d79d48b31bd8ca898036ad32fe910c9a7a28 | 01affdf77abb773ed375b83cdbbf58439edd8719 | refs/heads/master | 2020-12-07T17:52:01.350906 | 2020-01-10T08:24:09 | 2020-01-10T08:24:09 | 232,757,729 | 2 | 6 | null | null | null | null | UTF-8 | Python | false | false | 15,367 | py | from __future__ import absolute_import
# :noTabs=true:
#
# (c) Copyright Rosetta Commons Member Institutions.
# (c) This file is part of the Rosetta software suite and is made available under license.
# (c) The Rosetta software is developed by the contributing members of the Rosetta Commons.
# (c) For more information, see http://www.rosettacommons.org.
# (c) Questions about this can be addressed to University of Washington CoMotion, email: license@uw.edu.
###############################################################################
# Imports.
# Standard library.
import os, sys
import pyrosetta.rosetta as rosetta
import pyrosetta.bindings
import pyrosetta.protocols
import warnings
import logging
logger = logging.getLogger("pyrosetta.rosetta")
import pyrosetta.logging_support as logging_support
# this try/except black should be removed after the decorator module
# is installed on the test server.
try:
from pyrosetta.distributed.utility.log import LoggingContext
except:
pass
from pyrosetta.toolbox import etable_atom_pair_energies, PyJobDistributor
# PyRosetta-3 comapatability
# WARNING WARNING WARNING: do not add anything extra imports/names here! If you feel strongly that something needs to be added please contact author first!
from pyrosetta.rosetta.core.kinematics import FoldTree, MoveMap
from pyrosetta.rosetta.core.io.pdb import dump_pdb
from pyrosetta.rosetta.core.id import AtomID
from pyrosetta.rosetta.core.scoring import ScoreFunction
from pyrosetta.rosetta.protocols.moves import PyMOLMover, SequenceMover, RepeatMover, TrialMover, MonteCarlo
from pyrosetta.rosetta.protocols.simple_moves import SwitchResidueTypeSetMover
from pyrosetta.rosetta.protocols.loops import get_fa_scorefxn
from pyrosetta.io import pose_from_pdb, pose_from_file, pose_from_sequence, poses_from_silent, Pose
from pyrosetta.rosetta.core.scoring import get_score_function
create_score_function = pyrosetta.rosetta.core.scoring.ScoreFunctionFactory.create_score_function
rosetta.utility.vector1_string = rosetta.utility.vector1_std_string
###############################################################################
# Exception handling.
class PyRosettaException(Exception):
def __str__(self):
return 'PyRosettaException'
class PythonPyExitCallback(rosetta.utility.py.PyExitCallback):
def __init__(self):
rosetta.utility.py.PyExitCallback.__init__(self)
def exit_callback(self):
raise PyRosettaException()
###############################################################################
#
def _rosetta_database_from_env():
"""Read rosetta database directory from environment or standard install locations.
Database resolution proceeds by first searching the current installation for a 'database' or 'rosetta_database'
path. If not found the search then continues to the users's home dir, cygwin, and osx standard installation
locations.
Returns database path if found, else None."""
# Figure out database dir....
if 'PYROSETTA_DATABASE' in os.environ:
database = os.path.abspath(os.environ['PYROSETTA_DATABASE'])
if os.path.isdir(database):
logger.info('PYROSETTA_DATABASE environment variable was set to: %s; using it....', database)
return database
else:
logger.warning('Invalid PYROSETTA_DATABASE environment variable was specified: %s', database)
candidate_paths = []
database_names = ["rosetta_database", "database"]
for database_name in database_names:
#Package directory database
candidate_paths.append(os.path.join(os.path.dirname(__file__), database_name))
candidate_paths.append(os.path.join(os.path.dirname(__file__), "..", database_name))
for database_name in database_names:
#Current directory database
candidate_paths.append(database_name)
#Home directory database
if 'HOME' in os.environ:
candidate_paths.append(os.path.join(os.environ['HOME'], database_name))
#Cygwin root install
if sys.platform == "cygwin":
candidate_paths.append(os.path.join('/', database_name))
# Mac /usr/lib database install
candidate_paths.append(os.path.join('rosetta', database_name))
for candidate in candidate_paths:
if os.path.isdir(candidate):
database = os.path.abspath(candidate)
logger.info('Found rosetta database at: %s; using it....', database)
return database
# No database found.
return None
def _is_interactive():
"""Determine if in an interactive context.
See: https://stackoverflow.com/questions/2356399/tell-if-python-is-in-interactive-mode
"""
import __main__ as main
return not hasattr(main, '__file__')
def init(options='-ex1 -ex2aro', extra_options='', set_logging_handler=None, notebook=None, silent=False):
"""Initialize Rosetta. Includes core data and global options.
options string with default Rosetta command-line options args.
(default: '-ex1 -ex2aro')
kargs -
extra_options - Extra command line options to pass rosetta init.
(default None)
set_logging_handler - Route rosetta tracing through logging logger 'rosetta':
None - Set handler if interactive, otherwise not.
False - Write logs via c++-level filehandles.
"interactive" - Register python log handling and make visible if not.
"logging" - Register python log handling, do not update logging config.
True - Register python log handling, make visible if logging isn't configured.
Examples:
init() # uses default flags
init(extra_options='-pH') # adds flags to supplement the default
init('-pH -database /home/me/pyrosetta/rosetta_database') # overrides default flags - be sure to include the dB last
"""
if set_logging_handler is None and _is_interactive():
set_logging_handler = "interactive"
elif notebook is not None:
warnings.warn(
"pyrosetta.init 'notebook' argument is deprecated and may be removed in 2018. "
"See set_logging_handler='interactive'.",
stacklevel=2
)
set_logging_handler = "interactive"
assert set_logging_handler in (None, True, False, "interactive", "logging")
logging_support.maybe_initialize_handler(set_logging_handler)
if (set_logging_handler):
logging_support.set_logging_sink()
args = ['PyRosetta'] + options.split() + extra_options.split()
# Attempt to resolve database location from environment if not present, else fallback
# to rosetta's standard resolution
if not "-database" in args:
database = _rosetta_database_from_env()
if database is not None: args.extend(["-database", database])
v = rosetta.utility.vector1_string()
v.extend(args)
if not silent: print( version() )
logger.info( version() )
rosetta.protocols.init.init(v)
pyrosetta.protocols.h5_fragment_store_provider.init_H5FragmentStoreProvider()
# FIXME: create 'version' struct in utility instead
def _version_string():
version, commit = rosetta.utility.Version.version(), rosetta.utility.Version.commit()
version = version.split(".")
if commit.startswith(version[-1]):
version.pop()
version.append(commit)
return rosetta.utility.Version.package() + " " + ".".join(version)
def version():
return "PyRosetta-4 " + rosetta.utility.Version.date().split("-").pop(0) + \
" [Rosetta " + _version_string() + ' ' + rosetta.utility.Version.date() + \
"] retrieved from: " + rosetta.utility.Version.url() + \
"\n(C) Copyright Rosetta Commons Member Institutions. Created in JHU by Sergey Lyskov and PyRosetta Team."
###############################################################################
# Vector compatibility: Adding 'extend' to all utility.vector* functions
def _vector_extend_func(vec, othervec):
for i in othervec: vec.append(i)
for k, vectype in rosetta.utility.__dict__.items():
if k.startswith("vector1_") or k.startswith("vector0_") or k.startswith("vectorL_"): vectype.extend = _vector_extend_func
def Vector1(list_in):
"""Creates a Vector1 object, deducing type from the given list."""
if all([isinstance(x, bool) for x in list_in]):
t = rosetta.utility.vector1_bool
elif all([isinstance(x, int) for x in list_in]):
t = rosetta.utility.vector1_int
elif all([isinstance(x, float) or isinstance(x, int) for x in list_in]):
t = rosetta.utility.vector1_double
elif all([isinstance(x, str) for x in list_in]):
t = rosetta.utility.vector1_string
elif all([isinstance(x, rosetta.core.id.AtomID) for x in list_in]):
t = rosetta.utility.vector1_AtomID
else:
raise Exception('Vector1: attemting to create vector of unknow type ' +
'or mixed type vector init_list = ' + str(list_in))
v = t()
for i in list_in:
v.append(i)
return v
def Set(list_in):
"""Creates a std::set object, deducing type from the given list."""
if all([isinstance(x, int) for x in list_in]):
t = rosetta.utility.set_int
elif all([isinstance(x, float) or isinstance(x, int) for x in list_in]):
t = rosetta.utility.set_double
elif all([isinstance(x, str) for x in list_in]):
t = rosetta.utility.set_string
else:
raise Exception('Set: attemting to create vector of unknow type ' +
'or mixed type vector init_list = ' + str(list_in))
s = t()
for i in list_in: s.add(i)
return s
###############################################################################
# New methods.
def generate_nonstandard_residue_set(pose, params_list):
"""
Places the ResidueTypes corresponding to a list of .params filenames into a given pose
.params files must be generated beforehand. Typically, one would obtain a
molfile (.mdl) generated from the xyz coordinates of a residue, small
molecule, or ion. The script molfile_to_params.py can be used to convert
to a Rosetta-readable .params file. It can be found in the /test/tools
folder of your PyRosetta installation or downloaded from the Rosetta
Commons.
Example:
params = ["penicillin.params", "amoxicillin.params"]
pose = Pose()
generate_nonstandard_residue_set(pose, params)
pose_from_file(pose, "TEM-1_with_substrates.pdb")
See also:
ResidueTypeSet
Vector1()
pose_from_file()
"""
res_set = pose.conformation().modifiable_residue_type_set_for_conf()
res_set.read_files_for_base_residue_types(Vector1(params_list))
pose.conformation().reset_residue_type_set_for_conf( res_set )
return pose.residue_type_set_for_pose()
def standard_task_factory():
tf = rosetta.core.pack.task.TaskFactory()
tf.push_back(rosetta.core.pack.task.operation.InitializeFromCommandline())
#tf.push_back(rosetta.core.pack.task.operation.IncludeCurrent())
tf.push_back(rosetta.core.pack.task.operation.NoRepackDisulfides())
return tf
def standard_packer_task(pose):
tf = standard_task_factory()
task = tf.create_task_and_apply_taskoperations(pose)
return task
###############################################################################
# Decorator generation for custom PyRosetta energy methods.
_mem_EnergyMethods_ = []
_mem_EnergyCreators_ = []
from collections import namedtuple
CD = namedtuple("CD", "base first last methods")
_ScoreTypesRegistryByType_ = [
CD(base=rosetta.core.scoring.methods.ContextIndependentTwoBodyEnergy,
first=rosetta.core.scoring.PyRosettaTwoBodyContextIndepenedentEnergy_first,
last=rosetta.core.scoring.PyRosettaTwoBodyContextIndepenedentEnergy_last,
methods={}),
CD(base=rosetta.core.scoring.methods.ContextDependentTwoBodyEnergy,
first=rosetta.core.scoring.PyRosettaTwoBodyContextDependentEnergy_first,
last=rosetta.core.scoring.PyRosettaTwoBodyContextDependentEnergy_last,
methods={}),
CD(base=None,
first=rosetta.core.scoring.PyRosettaEnergy_first,
last=rosetta.core.scoring.PyRosettaEnergy_last,
methods={}),
]
ScoreTypesRegistry = {}
def defineEnergyMethodCreator(class_, scoreType):
class Abstract_EnergyMethodCreator(
rosetta.core.scoring.methods.EnergyMethodCreator):
def __init__(self):
rosetta.core.scoring.methods.EnergyMethodCreator.__init__(self)
def create_energy_method(self, energy_method_options):
e = self.EnergyMethodClass()
_mem_EnergyMethods_.append(e)
return e
def score_types_for_method(self):
sts = rosetta.utility.vector1_core_scoring_ScoreType()
sts.append(self.scoreType)
return sts
class_name = class_.__name__ + '_Creator'
new_class = type(class_name, (Abstract_EnergyMethodCreator,),
{'EnergyMethodClass': class_,
'scoreType': rosetta.core.scoring.ScoreType(scoreType)})
return new_class
class EnergyMethod:
"""
Decorator function for custom EnergyMethods in PyRosetta.
"""
def __init__(self, scoreName=None, scoreType=None, version=1):
self.scoreName = scoreName
self.scoreType = scoreType
self.version = version
def __call__(self, original_class):
self.scoreName = self.scoreName or original_class.__name__
# Try to automatically determine first avaliable scoreType.
if not self.scoreType:
for s in _ScoreTypesRegistryByType_:
if not s.base or issubclass(original_class, s.base):
self.scoreType = max(s.methods.keys() or [int(s.first) - 1]) + 1
if self.scoreType > int(s.last):
err_msg = 'Cannot find free ScoreType to create %s! (looking in range [%s, %s])' % (self.scoreName, s.first, s.last)
raise Exception(err_msg)
s.methods[self.scoreType] = self.scoreName
ScoreTypesRegistry[self.scoreType] = self.scoreName
break
def _clone(self):
_mem_EnergyMethods_.append( self.__class__() )
return _mem_EnergyMethods_[-1]
def _f_version(self):
return self.version
def _indicate_required_context_graphs(self, v):
pass
creator = defineEnergyMethodCreator(original_class, self.scoreType)
if 'clone' not in original_class.__dict__:
original_class.clone = _clone
if 'version' not in original_class.__dict__:
original_class.version = _f_version
if 'indicate_required_context_graphs' not in original_class.__dict__:
original_class.indicate_required_context_graphs = _indicate_required_context_graphs
original_class.creator = creator
original_class.scoreType = rosetta.core.scoring.ScoreType(self.scoreType)
_mem_EnergyCreators_.append( creator() )
rosetta.core.scoring.methods.PyEnergyMethodRegistrator(_mem_EnergyCreators_[-1])
return original_class
| [
"36790013+MedicaicloudLink@users.noreply.github.com"
] | 36790013+MedicaicloudLink@users.noreply.github.com |
ecb2e410d0ae0ebcdf8a1a3a003cd918ee96c2e2 | 9127a5582694a055e7c64ae65ae97a11728ff82f | /cunt/pools/pool_wallet_info.py | 7400e5ee4d419ef638f85277bf296554277e9d21 | [
"Apache-2.0"
] | permissive | nahvan/cunt-blockchain | 0f881df58f3ca5fe554b52a025437053df6f1037 | 447084a809ec0339bcd24f7141f39ee0e8dddffa | refs/heads/main | 2023-06-24T16:47:31.099801 | 2021-07-29T21:17:02 | 2021-07-29T21:17:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,502 | py | from dataclasses import dataclass
from enum import IntEnum
from typing import Optional, Dict
from blspy import G1Element
from cunt.protocols.pool_protocol import POOL_PROTOCOL_VERSION
from cunt.types.blockchain_format.coin import Coin
from cunt.types.blockchain_format.program import Program
from cunt.types.blockchain_format.sized_bytes import bytes32
from cunt.util.byte_types import hexstr_to_bytes
from cunt.util.ints import uint32, uint8
from cunt.util.streamable import streamable, Streamable
class PoolSingletonState(IntEnum):
"""
From the user's point of view, a pool group can be in these states:
`SELF_POOLING`: The singleton exists on the blockchain, and we are farming
block rewards to a wallet address controlled by the user
`LEAVING_POOL`: The singleton exists, and we have entered the "escaping" state, which
means we are waiting for a number of blocks = `relative_lock_height` to pass, so we can leave.
`FARMING_TO_POOL`: The singleton exists, and it is assigned to a pool.
`CLAIMING_SELF_POOLED_REWARDS`: We have submitted a transaction to sweep our
self-pooled funds.
"""
SELF_POOLING = 1
LEAVING_POOL = 2
FARMING_TO_POOL = 3
SELF_POOLING = PoolSingletonState.SELF_POOLING
LEAVING_POOL = PoolSingletonState.LEAVING_POOL
FARMING_TO_POOL = PoolSingletonState.FARMING_TO_POOL
@dataclass(frozen=True)
@streamable
class PoolState(Streamable):
"""
`PoolState` is a type that is serialized to the blockchain to track the state of the user's pool singleton
`target_puzzle_hash` is either the pool address, or the self-pooling address that pool rewards will be paid to.
`target_puzzle_hash` is NOT the p2_singleton puzzle that block rewards are sent to.
The `p2_singleton` address is the initial address, and the `target_puzzle_hash` is the final destination.
`relative_lock_height` is zero when in SELF_POOLING state
"""
version: uint8
state: uint8 # PoolSingletonState
# `target_puzzle_hash`: A puzzle_hash we pay to
# When self-farming, this is a main wallet address
# When farming-to-pool, the pool sends this to the farmer during pool protocol setup
target_puzzle_hash: bytes32 # TODO: rename target_puzzle_hash -> pay_to_address
# owner_pubkey is set by the wallet, once
owner_pubkey: G1Element
pool_url: Optional[str]
relative_lock_height: uint32
def initial_pool_state_from_dict(state_dict: Dict, owner_pubkey: G1Element, owner_puzzle_hash: bytes32) -> PoolState:
state_str = state_dict["state"]
singleton_state: PoolSingletonState = PoolSingletonState[state_str]
if singleton_state == SELF_POOLING:
target_puzzle_hash = owner_puzzle_hash
pool_url: str = ""
relative_lock_height = uint32(0)
elif singleton_state == FARMING_TO_POOL:
target_puzzle_hash = bytes32(hexstr_to_bytes(state_dict["target_puzzle_hash"]))
pool_url = state_dict["pool_url"]
relative_lock_height = uint32(state_dict["relative_lock_height"])
else:
raise ValueError("Initial state must be SELF_POOLING or FARMING_TO_POOL")
# TODO: change create_pool_state to return error messages, as well
assert relative_lock_height is not None
return create_pool_state(singleton_state, target_puzzle_hash, owner_pubkey, pool_url, relative_lock_height)
def create_pool_state(
state: PoolSingletonState,
target_puzzle_hash: bytes32,
owner_pubkey: G1Element,
pool_url: Optional[str],
relative_lock_height: uint32,
) -> PoolState:
if state not in set(s.value for s in PoolSingletonState):
raise AssertionError("state {state} is not a valid PoolSingletonState,")
ps = PoolState(
POOL_PROTOCOL_VERSION, uint8(state), target_puzzle_hash, owner_pubkey, pool_url, relative_lock_height
)
# TODO Move verify here
return ps
@dataclass(frozen=True)
@streamable
class PoolWalletInfo(Streamable):
"""
Internal Pool Wallet state, not destined for the blockchain. This can be completely derived with
the Singleton's CoinSolutions list, or with the information from the WalletPoolStore.
"""
current: PoolState
target: Optional[PoolState]
launcher_coin: Coin
launcher_id: bytes32
p2_singleton_puzzle_hash: bytes32
current_inner: Program # Inner puzzle in current singleton, not revealed yet
tip_singleton_coin_id: bytes32
singleton_block_height: uint32 # Block height that current PoolState is from
| [
"svginsomnia@gmail.com"
] | svginsomnia@gmail.com |
72c779db595f8a965e534bfecb6c2abac1be83d7 | b49e1aa873f8bc6669eea59acad1f5a01b3a94b5 | /amset/scattering/elastic.py | cbd59d4cc6c13fb872886a2827df4934a14ca962 | [
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-hdf5",
"BSD-2-Clause"
] | permissive | wangvei/amset | 981222f9e00cdf2a4a2010a9a1d727b3ce2f0bf4 | 1e341d68bd03eef47916e680e687fc085966a0c0 | refs/heads/master | 2023-01-21T14:43:08.303303 | 2020-11-30T13:25:50 | 2020-11-30T13:25:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,792 | py | import logging
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Dict, Tuple
import numpy as np
from pymatgen import Spin
from tabulate import tabulate
from amset.constants import (
boltzmann_au,
coulomb_to_au,
ev_to_hartree,
gpa_to_au,
m_to_bohr,
s_to_au,
)
from amset.core.data import AmsetData, check_nbands_equal
from amset.interpolation.deformation import DeformationPotentialInterpolator
from amset.scattering.common import calculate_inverse_screening_length_sq
__author__ = "Alex Ganose"
__maintainer__ = "Alex Ganose"
__email__ = "aganose@lbl.gov"
logger = logging.getLogger(__name__)
class AbstractElasticScattering(ABC):
name: str
required_properties: Tuple[str]
def __init__(self, properties, doping, temperatures, nbands):
self.properties = properties
self.doping = doping
self.temperatures = temperatures
self.nbands = nbands
self.spins = list(nbands.keys())
@classmethod
def from_amset_data(
cls, materials_properties: Dict[str, Any], amset_data: AmsetData
):
return cls(
cls.get_properties(materials_properties),
amset_data.doping,
amset_data.temperatures,
cls.get_nbands(amset_data),
)
@abstractmethod
def prefactor(self, spin: Spin, b_idx: int):
pass
@abstractmethod
def factor(
self,
unit_q: np.array,
norm_q_sq: np.ndarray,
spin: Spin,
band_idx: int,
kpoint: np.ndarray,
velocity: np.ndarray,
):
pass
def to_reference(self):
return self.properties, self.doping, self.temperatures, self.nbands
@classmethod
def from_reference(cls, properties, doping, temperatures, nbands):
return cls(properties, doping, temperatures, nbands)
@classmethod
def get_properties(cls, materials_properties):
return {p: materials_properties[p] for p in cls.required_properties}
@staticmethod
def get_nbands(amset_data):
return {s: len(amset_data.energies[s]) for s in amset_data.spins}
class AcousticDeformationPotentialScattering(AbstractElasticScattering):
name = "ADP"
required_properties = ("deformation_potential", "elastic_constant")
def __init__(
self,
properties,
doping,
temperatures,
nbands,
deformation_potential,
vb_idx,
is_metal,
fermi_levels,
):
super().__init__(properties, doping, temperatures, nbands)
self._prefactor = boltzmann_au * s_to_au
self.elastic_constant = self.properties["elastic_constant"] * gpa_to_au
self.deformation_potential = deformation_potential
self.vb_idx = vb_idx
self.is_metal = is_metal
self.fermi_levels = fermi_levels
@classmethod
def from_amset_data(
cls, materials_properties: Dict[str, Any], amset_data: AmsetData
):
vb_idx = amset_data.vb_idx
is_metal = amset_data.is_metal
deformation_potential = materials_properties["deformation_potential"]
if isinstance(deformation_potential, (str, Path)):
deformation_potential = DeformationPotentialInterpolator.from_file(
deformation_potential, scale=ev_to_hartree
)
equal = check_nbands_equal(deformation_potential, amset_data)
if not equal:
raise RuntimeError(
"Deformation potential file does not contain the correct number of"
" bands\nEnsure it was generated using the same energy_cutoff as "
"this AMSET run."
)
elif is_metal and isinstance(deformation_potential, tuple):
logger.warning(
"System is metallic but deformation potentials for both "
"the valence and conduction bands have been set... using the "
"valence band potential for all bands"
)
deformation_potential = deformation_potential[0] * ev_to_hartree
elif is_metal:
deformation_potential = deformation_potential * ev_to_hartree
elif not is_metal and not isinstance(deformation_potential, tuple):
logger.warning(
"System is semiconducting but only one deformation "
"potential has been set... using this potential for all bands."
)
deformation_potential = (
deformation_potential * ev_to_hartree,
deformation_potential * ev_to_hartree,
)
else:
deformation_potential = (
deformation_potential[0] * ev_to_hartree,
deformation_potential[1] * ev_to_hartree,
)
return cls(
cls.get_properties(materials_properties),
amset_data.doping,
amset_data.temperatures,
cls.get_nbands(amset_data),
deformation_potential,
vb_idx,
is_metal,
amset_data.fermi_levels,
)
def prefactor(self, spin: Spin, b_idx: int):
prefactor = (
self._prefactor
* self.temperatures[None, :]
* np.ones((len(self.doping), len(self.temperatures)))
)
return prefactor
def factor(
self,
unit_q: np.array,
norm_q_sq: np.ndarray,
spin: Spin,
band_idx: int,
kpoint: np.ndarray,
velocity: np.ndarray,
):
christoffel_tensors = get_christoffel_tensors(self.elastic_constant, unit_q)
(
(c_trans_a, c_trans_b, c_long),
(v_trans_a, v_trans_b, v_long),
) = solve_christoffel_equation(christoffel_tensors)
if isinstance(self.deformation_potential, DeformationPotentialInterpolator):
deform = self.deformation_potential.interpolate(spin, [band_idx], [kpoint])
deform = np.abs(deform[0])
deform += np.outer(velocity, velocity) # velocity correction
strain_long, strain_trans_a, strain_trans_b = prepare_acoustic_strains(
unit_q, v_long, v_trans_a, v_trans_b
)
factor = (
np.tensordot(strain_long, deform) ** 2 / c_long
+ np.tensordot(strain_trans_a, deform) ** 2 / c_trans_a
+ np.tensordot(strain_trans_b, deform) ** 2 / c_trans_b
)
elif self.is_metal:
factor = self.deformation_potential ** 2 / c_long
else:
def_idx = 1 if band_idx > self.vb_idx[spin] else 0
factor = self.deformation_potential[def_idx] ** 2 / c_long
return factor[None, None] * np.ones(self.fermi_levels.shape + norm_q_sq.shape)
def to_reference(self):
base_reference = super().to_reference()
if isinstance(self.deformation_potential, DeformationPotentialInterpolator):
deformation_reference = self.deformation_potential.to_reference()
is_interpolator = True
else:
deformation_reference = self.deformation_potential
is_interpolator = False
return base_reference + (
deformation_reference,
self.vb_idx,
self.is_metal,
self.fermi_levels,
is_interpolator,
)
@classmethod
def from_reference(
cls,
properties,
doping,
temperatures,
nbands,
deformation_reference,
vb_idx,
is_metal,
fermi_levels,
is_interpolator,
):
if is_interpolator:
deformation_potential = DeformationPotentialInterpolator.from_reference(
*deformation_reference
)
else:
deformation_potential = deformation_reference
return cls(
properties,
doping,
temperatures,
nbands,
deformation_potential,
vb_idx,
is_metal,
fermi_levels,
)
def prepare_acoustic_strains(unit_q, v_long, v_trans_a, v_trans_b):
# orient v_long and unit_q to face the same direction
# the einsum is just pairwise dot product along the first axis
sign = np.sign(np.einsum("ij,ij->i", unit_q, v_long))[:, None]
v_long *= sign
v_trans_a *= sign
v_trans_b *= sign
strain_long = get_unit_strain_tensors(unit_q, v_long)
strain_trans_a = get_unit_strain_tensors(unit_q, v_trans_a)
strain_trans_b = get_unit_strain_tensors(unit_q, v_trans_b)
return strain_long, strain_trans_a, strain_trans_b
def get_christoffel_tensors(elastic_constant, unit_q):
return np.einsum("ijkl,ni,nl->njk", elastic_constant, unit_q, unit_q)
def solve_christoffel_equation(christoffel_tensors):
eigenvalues, eigenvectors = np.linalg.eigh(christoffel_tensors)
return eigenvalues.T, eigenvectors.transpose(2, 0, 1)
def get_unit_strain_tensors(propagation_vectors, polarization_vectors):
return propagation_vectors[:, :, None] * polarization_vectors[:, None, :]
class IonizedImpurityScattering(AbstractElasticScattering):
name = "IMP"
required_properties = ("acceptor_charge", "donor_charge", "static_dielectric")
def __init__(
self,
properties,
doping,
temperatures,
nbands,
impurity_concentration,
inverse_screening_length_sq,
):
super().__init__(properties, doping, temperatures, nbands)
self._prefactor = impurity_concentration * s_to_au * np.pi
self.inverse_screening_length_sq = inverse_screening_length_sq
@classmethod
def from_amset_data(
cls, materials_properties: Dict[str, Any], amset_data: AmsetData
):
from amset.constants import bohr_to_cm
avg_diel = np.linalg.eigvalsh(materials_properties["static_dielectric"]).mean()
inverse_screening_length_sq = calculate_inverse_screening_length_sq(
amset_data, avg_diel
)
imp_info = []
impurity_concentration = np.zeros(amset_data.fermi_levels.shape)
for n, t in np.ndindex(inverse_screening_length_sq.shape):
n_conc = np.abs(amset_data.electron_conc[n, t])
p_conc = np.abs(amset_data.hole_conc[n, t])
impurity_concentration[n, t] = (
n_conc * materials_properties["donor_charge"] ** 2
+ p_conc * materials_properties["acceptor_charge"] ** 2
)
imp_info.append(
(
amset_data.doping[n] * (1 / bohr_to_cm) ** 3,
amset_data.temperatures[t],
inverse_screening_length_sq[n, t],
impurity_concentration[n, t] * (1 / bohr_to_cm) ** 3,
)
)
logger.info("Inverse screening length (β) and impurity concentration (Nᵢᵢ):")
table = tabulate(
imp_info,
headers=("conc [cm⁻³]", "temp [K]", "β² [a₀⁻²]", "Nᵢᵢ [cm⁻³]"),
numalign="right",
stralign="center",
floatfmt=(".2e", ".1f", ".2e", ".2e"),
)
logger.info(table)
return cls(
cls.get_properties(materials_properties),
amset_data.doping,
amset_data.temperatures,
cls.get_nbands(amset_data),
impurity_concentration,
inverse_screening_length_sq,
)
def prefactor(self, spin: Spin, b_idx: int):
# need to return prefactor with shape (nspins, ndops, ntemps, nbands)
return self._prefactor
def factor(
self,
unit_q: np.array,
norm_q_sq: np.ndarray,
spin: Spin,
band_idx: int,
kpoint: np.ndarray,
velocity: np.ndarray,
):
static_tensor = self.properties["static_dielectric"] / (4 * np.pi)
static_diel = np.einsum("ij,ij->i", unit_q, np.dot(static_tensor, unit_q.T).T)
diel_factor = (1 / static_diel) ** 2
return (
diel_factor[None, None]
/ (norm_q_sq[None, None] + self.inverse_screening_length_sq[..., None]) ** 2
)
class PiezoelectricScattering(AbstractElasticScattering):
name = "PIE"
required_properties = (
"piezoelectric_constant",
"elastic_constant",
"high_frequency_dielectric",
"free_carrier_screening",
)
def __init__(
self,
properties,
doping,
temperatures,
nbands,
piezoelectric_constant,
inverse_screening_length_sq,
):
super().__init__(properties, doping, temperatures, nbands)
self.piezoelectric_constant = piezoelectric_constant
self.inverse_screening_length_sq = inverse_screening_length_sq
self._prefactor = self.temperatures[None, :] * boltzmann_au * s_to_au
self._shape = np.ones((len(self.doping), len(self.temperatures)))
self.elastic_constant = self.properties["elastic_constant"] * gpa_to_au
@classmethod
def from_amset_data(
cls, materials_properties: Dict[str, Any], amset_data: AmsetData
):
# convert dielectric to atomic units
shape = (len(amset_data.doping), len(amset_data.temperatures))
e = materials_properties["piezoelectric_constant"]
e *= coulomb_to_au / m_to_bohr ** 2 # convert to atomic units
dielectric = materials_properties["high_frequency_dielectric"] / (4 * np.pi)
inv_dielectric = np.linalg.inv(dielectric)
# use h piezoelectric coefficient (Stress-Voltage)
piezoelectric_constant = np.einsum("mn,mkl->nkl", inv_dielectric, e)
if materials_properties["free_carrier_screening"]:
avg_diel = np.linalg.eigvalsh(
materials_properties["high_frequency_dielectric"]
).mean()
inverse_screening_length_sq = calculate_inverse_screening_length_sq(
amset_data, avg_diel
)
else:
# fill with small value for numerical convergence
inverse_screening_length_sq = np.full(shape, 1e-12)
return cls(
cls.get_properties(materials_properties),
amset_data.doping,
amset_data.temperatures,
cls.get_nbands(amset_data),
piezoelectric_constant,
inverse_screening_length_sq,
)
def prefactor(self, spin: Spin, b_idx: int):
# need to return prefactor with shape (ndops, ntemps)
return self._prefactor * np.ones(self._shape)
def factor(
self,
unit_q: np.array,
norm_q_sq: np.ndarray,
spin: Spin,
band_idx: int,
kpoint: np.ndarray,
velocity: np.ndarray,
):
christoffel_tensors = get_christoffel_tensors(self.elastic_constant, unit_q)
(
(c_trans_a, c_trans_b, c_long),
(v_trans_a, v_trans_b, v_long),
) = solve_christoffel_equation(christoffel_tensors)
strain_long, strain_trans_a, strain_trans_b = prepare_acoustic_strains(
unit_q, v_long, v_trans_a, v_trans_b
)
qh = np.einsum("ijk,nj->nik", self.piezoelectric_constant, unit_q)
# einsum is double dot product along first axis
factor = (
np.einsum("nij,nij->n", strain_long, qh) ** 2 / c_long
+ np.einsum("nij,nij->n", strain_trans_a, qh) ** 2 / c_trans_a
+ np.einsum("nij,nij->n", strain_trans_b, qh) ** 2 / c_trans_b
)
return (
factor[None, None]
* np.ones(self._shape + norm_q_sq.shape)
/ (norm_q_sq[None, None, :] + self.inverse_screening_length_sq[..., None])
)
| [
"alexganose@googlemail.com"
] | alexganose@googlemail.com |
dd0718fed67c8e6fbc08d84adfd6864150fac493 | 5f2103b1083b088aed3f3be145d01a770465c762 | /210. Course Schedule II.py | 68cd9b9b2fc5afeac0093f89da02788485c01257 | [] | no_license | supersj/LeetCode | 5605c9bcb5ddcaa83625de2ad9e06c3485220019 | 690adf05774a1c500d6c9160223dab7bcc38ccc1 | refs/heads/master | 2021-01-17T17:23:39.585738 | 2017-02-27T15:08:42 | 2017-02-27T15:08:42 | 65,526,089 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 921 | py | class Solution(object):
def findOrder(self, numCourses, prerequisites):
"""
:type numCourses: int
:type prerequisites: List[List[int]]
:rtype: bool
"""
innode = {}
graph = {}
haszero = 0
for i in range(numCourses):
graph[i] = []
innode[i] = 0
for ele in prerequisites:
graph[ele[0]].append(ele[1])
innode[ele[1]] += 1
while innode:
haszero = 0
for k,v in innode.items():
if v == 0:
haszero = 1
for ele in graph[k]:
innode[ele] -= 1
del innode[k]
del graph[k]
break
if haszero == 0:
return False
if graph:
return False
return True
| [
"ml@ml.ml"
] | ml@ml.ml |
1e90f2c55b41185c48d23dff1e6fb2d9fad2fd87 | 09cead98874a64d55b9e5c84b369d3523c890442 | /sj200917_python2m6/day02_200924/dict_11_methodlist.py | 5a508f734ba7992c2c1f3f0404da772b9b41c2af | [] | no_license | edu-athensoft/stem1401python_student | f12b404d749286036a090e941c0268381ce558f8 | baad017d4cef2994855b008a756758d7b5e119ec | refs/heads/master | 2021-08-29T15:01:45.875136 | 2021-08-24T23:03:51 | 2021-08-24T23:03:51 | 210,029,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 640 | py | """
dictionary methods
"""
"""
dict.clear() - remove all items
dict.copy() - generate a dictionary
dict.fromkeys() - generate a dictionary
dict.get(key) - get value by a key , v.s. dictname[key]
dict.items() - get items (key-value pairs)
dict.keys() - get keys
dict.values() - get values
dict.pop() - remove item by key
dict.popitem() - remove an item and return it
dict.update() - update dictionary
"""
mydict = {}
mydict = mydict.fromkeys([1,2,3,4])
print(mydict)
mydict = mydict.fromkeys([1,2,3,4],0)
print(mydict)
print(mydict.get(1))
print(mydict.get(5,'unknown'))
| [
"lada314@gmail.com"
] | lada314@gmail.com |
af08d772855a08671c2e692e3c7b06ad5fbcf4d6 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Scripts/Lazymux/routersploit/tests/exploits/routers/multi/test_misfortune_cookie.py | 8e606a9221a4791e9846362b46e42b3d6b09a037 | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:6bcf1caec62792e74d178e8d2aa3078c7144e75b45a0fa53cc1c5451184f3f3e
size 695
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
c14014bc0cff2309d97cd09333fc79ea81f4c9c5 | a05c81cb118116d655e1d5d8aaefb2647e134def | /src/azure-cli-core/azure/cli/core/local_context.py | 654097bea7154bd2b5780882af546281e9a1d2fd | [
"MIT"
] | permissive | yungezz/azure-cli | 8acef71d30d937f2fc48981804fa66fd9e1c980d | c7b9f98ffe0f824fc16252a9e990d3b4da46b80c | refs/heads/master | 2023-03-15T14:22:26.085920 | 2020-07-14T11:41:19 | 2020-07-14T11:41:19 | 279,810,977 | 0 | 1 | MIT | 2021-02-08T08:09:06 | 2020-07-15T08:26:59 | Python | UTF-8 | Python | false | false | 9,030 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import shutil
import configparser
import enum
from knack.log import get_logger
from knack.config import _ConfigFile
ALL = 'all' # effective level of local context, ALL means all commands can share this parameter value
LOCAL_CONTEXT_FILE = '.local_context_{}' # each user has a separate file, an example is .local_context_username
LOCAL_CONTEXT_ON_OFF_CONFIG_SECTION = 'local_context'
LOCAL_CONTEXT_NOTICE = '; This file is used to store local context data.\n'\
'; DO NOT modify it manually unless you know it well.\n'
logger = get_logger(__name__)
class LocalContextAction(enum.Enum):
SET = 1 # action for a parameter in local context, SET means its value will be saved to local context
GET = 2 # action for a parameter in local context, GET means will read value from local context for this parameter
def _get_current_system_username():
try:
import getpass
return getpass.getuser()
except Exception: # pylint: disable=broad-except
pass
return None
class AzCLILocalContext(object): # pylint: disable=too-many-instance-attributes
def __init__(self, cli_ctx):
self.cli_ctx = cli_ctx
self.config = cli_ctx.config
self.dir_name = os.path.basename(self.config.config_dir)
self.username = None
self.is_on = False
self.current_dir = None
# only used in get/set/effective_working_directory function, to avoid calling load files to many times.
self._local_context_file = None
self.initialize()
def initialize(self):
self.username = _get_current_system_username()
self.is_on = self.config.getboolean(LOCAL_CONTEXT_ON_OFF_CONFIG_SECTION, self.username, False) \
if self.username else False
try:
self.current_dir = os.getcwd()
except FileNotFoundError:
if self.is_on:
logger.warning('The working directory has been deleted or recreated. Local context is ignored.')
if self.is_on:
self._local_context_file = self._get_local_context_file()
def _get_local_context_file_name(self):
return LOCAL_CONTEXT_FILE.format(self.username)
def _load_local_context_files(self, recursive=False):
local_context_files = []
if self.username and self.current_dir:
current_dir = self.current_dir
while current_dir:
dir_path = os.path.join(current_dir, self.dir_name)
file_path = os.path.join(dir_path, self._get_local_context_file_name())
if os.path.isfile(file_path) and os.access(file_path, os.R_OK) and os.access(file_path, os.W_OK):
local_context_files.append(_ConfigFile(dir_path, file_path, LOCAL_CONTEXT_NOTICE))
if not recursive:
break # load only one local context
# Stop if already in root drive
if current_dir == os.path.dirname(current_dir):
break
current_dir = os.path.dirname(current_dir)
return local_context_files
def _get_local_context_file(self):
local_context_files = self._load_local_context_files(recursive=False)
if len(local_context_files) == 1:
return local_context_files[0]
return None
def effective_working_directory(self):
return os.path.dirname(self._local_context_file.config_dir) if self._local_context_file else ''
def get(self, command, argument):
if self.is_on and self._local_context_file:
command_parts = command.split()
while True:
section = ' '.join(command_parts) if command_parts else ALL
try:
return self._local_context_file.get(section.lower(), argument)
except (configparser.NoSectionError, configparser.NoOptionError):
pass
if not command_parts:
break
command_parts = command_parts[:-1]
return None
def set(self, scopes, argument, value):
if self.is_on and self.username and self.current_dir:
if self._local_context_file is None:
file_path = os.path.join(self.current_dir, self.dir_name, self._get_local_context_file_name())
dir_path = os.path.join(self.current_dir, self.dir_name)
self._local_context_file = _ConfigFile(dir_path, file_path, LOCAL_CONTEXT_NOTICE)
for scope in scopes:
self._local_context_file.set_value(scope.lower(), argument, value)
def turn_on(self):
self.config.set_value(LOCAL_CONTEXT_ON_OFF_CONFIG_SECTION, self.username, 'on')
self.is_on = self.config.getboolean(LOCAL_CONTEXT_ON_OFF_CONFIG_SECTION, self.username, False)
self._local_context_file = self._get_local_context_file()
def turn_off(self):
self.config.remove_option(LOCAL_CONTEXT_ON_OFF_CONFIG_SECTION, self.username)
self.is_on = self.config.getboolean(LOCAL_CONTEXT_ON_OFF_CONFIG_SECTION, self.username, False)
self._local_context_file = None
def delete_file(self, recursive=False):
local_context_files = self._load_local_context_files(recursive=recursive)
for local_context_file in local_context_files:
try:
os.remove(local_context_file.config_path)
parent_dir = os.path.dirname(local_context_file.config_path)
if not os.listdir(parent_dir):
shutil.rmtree(parent_dir)
logger.warning('Local context persistence file in working directory %s is deleted.',
os.path.dirname(local_context_file.config_dir))
except Exception: # pylint: disable=broad-except
logger.warning('Fail to delete local context persistence file in working directory %s',
os.path.dirname(local_context_file.config_dir))
def clear(self, recursive=False):
local_context_files = self._load_local_context_files(recursive=recursive)
for local_context_file in local_context_files:
local_context_file.clear()
logger.warning('Local context information in working directory %s is cleared.',
os.path.dirname(local_context_file.config_dir))
def delete(self, names=None):
local_context_file = self._get_local_context_file()
if local_context_file:
for scope in local_context_file.sections():
for name in names:
local_context_file.remove_option(scope, name)
logger.warning('Local context value is deleted. You can run `az local-context show` to show all available '
'values.')
def get_value(self, names=None):
result = {}
local_context_file = self._get_local_context_file()
if not local_context_file:
return result
for scope in local_context_file.sections():
try:
if names is None:
for name, value in local_context_file.items(scope): # may raise NoSectionError
if scope not in result:
result[scope] = {}
result[scope][name] = value
else:
for name in names:
value = local_context_file.get(scope, name) # may raise NoOptionError
if scope not in result:
result[scope] = {}
result[scope][name] = value
except (configparser.NoSectionError, configparser.NoOptionError):
pass
return result
class LocalContextAttribute(object):
# pylint: disable=too-few-public-methods
def __init__(self, name, actions, scopes=None):
""" Local Context Attribute arguments
:param name: Argument name in local context. Make sure it is consistent for SET and GET.
:type name: str
:param actions: Which action should be taken for local context. Allowed values: SET, GET
:type actions: list
:param scopes: The effective commands or command groups of this argument when saved to local context.
:type scopes: list
"""
self.name = name
if isinstance(actions, str):
actions = [actions]
self.actions = actions
if isinstance(scopes, str):
scopes = [scopes]
if scopes is None and LocalContextAction.SET in actions:
scopes = [ALL]
self.scopes = scopes
| [
"noreply@github.com"
] | yungezz.noreply@github.com |
d04fd4b38a21bf1a91a7f87ac2ddf9a8d4186477 | 9e3620265aee10c0772484403509fbace7259f40 | /mhw_armor_edit/ftypes/wp_dat_g.py | 44512ba612426c4b7a65e1e6cc23e28121a973e4 | [
"MIT",
"LicenseRef-scancode-public-domain"
] | permissive | nikibobi/MHWorldData | 5d104fa886087fe121b262497686ad81e6720751 | 78b5a4dc10ef532d5bad7359ef0b098f99104782 | refs/heads/master | 2020-12-14T23:41:45.224370 | 2020-01-20T08:40:16 | 2020-01-20T08:40:16 | 234,912,823 | 0 | 0 | MIT | 2020-01-19T14:25:05 | 2020-01-19T14:25:04 | null | UTF-8 | Python | false | false | 1,023 | py | # coding: utf-8
from mhw_armor_edit.ftypes import StructFile, Struct
class WpDatGEntry(Struct):
STRUCT_SIZE = 68
id: "<I"
unk1: "<H"
base_model_id: "<h"
part1_id: "<h"
part2_id: "<h"
color: "<B"
tree_id: "<B"
is_fixed_upgrade: "<B"
muzzle_type: "<B"
barrel_type: "<B"
magazine_type: "<B"
scope_type: "<B"
crafting_cost: "<I"
rarity: "<B"
raw_damage: "<H"
defense: "<H"
affinity: "<b"
element_id: "<B"
element_damage: "<H"
hidden_element_id: "<B"
hidden_element_damage: "<H"
elderseal: "<B"
shell_table_id: "<H"
deviation: "<B"
num_gem_slots: "<B"
gem_slot1_lvl: "<B"
gem_slot2_lvl: "<B"
gem_slot3_lvl: "<B"
unk2: "<I"
unk3: "<I"
unk4: "<I"
unk5: "<B"
special_ammo_type: "<B"
tree_position: "<B"
order: "<H"
gmd_name_index: "<H"
gmd_description_index: "<H"
skill_id: "<H"
unk6: "<H"
class WpDatG(StructFile):
EntryFactory = WpDatGEntry
MAGIC = 0x01B1
| [
"cfern1990@gmail.com"
] | cfern1990@gmail.com |
469c0b09e5b32a0e9eaee51ec4926608ce93ff46 | 7b85779e7cec84604315ffe3929e325b32ccd9b0 | /Python设计模式/singleton/with_decorator_and_param.py | 0c69281b9cafa823ebbb9e1e9c6800242f191deb | [] | no_license | clara123clara/test_auto | d27fa78b8d7f1f917402d4c3336ef003ceeaf4d5 | 26cf9ab7428a8895450e94bbae894aeb4462358f | refs/heads/master | 2023-07-09T19:56:05.248972 | 2021-08-18T02:44:14 | 2021-08-18T02:44:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 565 | py | def singleton(cls):
"""
定义单例的装饰器(闭包)
:param cls:
:return:
"""
_instance = {}
def _singleton(*args, **kargs):
if cls not in _instance:
_instance[cls] = cls(*args, **kargs)
return _instance[cls]
return _singleton
@singleton
class Singleton(object):
"""单例实例"""
def __init__(self, arg1):
self.arg1 = arg1
if __name__ == '__main__':
instance1 = Singleton("xag")
instance2 = Singleton("xingag")
print(id(instance1))
print(id(instance2))
| [
"xinganguo@gmail.com"
] | xinganguo@gmail.com |
25fa2da3e63064f3e8395f02f7af98751bcd472a | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/coverage-big-4678.py | 98da00a1f98c3949df99c4823a1f0695572b0799 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,350 | py | count:int = 0
count2:int = 0
count3:int = 0
count4:int = 0
count5:int = 0
def foo(s: str) -> int:
return len(s)
def foo2(s: str, s2: str) -> int:
return len(s)
def foo3(s: str, s2: str, s3: str) -> int:
return len(s)
def foo4(s: str, s2: str, s3: str, s4: str) -> int:
return len(s)
def foo5(s: str, s2: str, s3: str, s4: str, s5: str) -> int:
return len(s)
class bar(object):
p: bool = True
def baz(self:"bar", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar2(object):
p: bool = True
p2: bool = True
def baz(self:"bar2", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar2", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar3(object):
p: bool = True
p2: bool = True
p3: bool = True
def baz(self:"bar3", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar3", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar3", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar4(object):
p: bool = True
p2: bool = True
p3: bool = True
p4: bool = True
def baz(self:"bar4", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar4", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar4", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz4(self:"bar4", xx: [int], xx2: [int], xx3: [int], xx4: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar5(object):
p: bool = True
p2: bool = True
p3: bool = True
p4: bool = True
p5: bool = True
def baz(self:"bar5", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar5", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar5", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz4(self:"bar5", xx: [int], xx2: [int], xx3: [int], xx4: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz5(self:"bar5", xx: [int], xx2: [int], xx3: [int], xx4: [int], xx5: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
x5:int = 0
y:int = 1
y2:int = 1
y3:$Type = 1
y4:int = 1
y5:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
def qux5(y: int, y2: int, y3: int, y4: int, y5: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
nonlocal x5
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
print(bar().baz([1,2]))
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
f69f660717b475fe52eb6697bf4346389ecffd0b | 759de7cc7c264b165cc481d277eaf46235470bc6 | /database/async_saver.py | a14ca2cc66d956e8aec3bb7fcfa0ad010ac9375b | [] | no_license | RuzzyRullezz/insta_project | cdc743bcad8c95bbbec589d202aaa4e4bcc44857 | 3c29e5a505abaec9b468817117da1f0222fb2b49 | refs/heads/main | 2021-06-27T01:01:02.532268 | 2020-10-28T09:45:25 | 2020-10-28T09:45:25 | 174,750,569 | 1 | 0 | null | 2020-06-05T20:05:13 | 2019-03-09T21:56:57 | Python | UTF-8 | Python | false | false | 721 | py | import json
from django.db import IntegrityError
from mq_consumer.consumers import Consumer
from utils.mq import get_connector
class DBSaver(Consumer):
def __init__(self):
from database import models
queue = 'db_save'
self.model_attr = 'model'
self.models = models
super().__init__(get_connector(queue), self.handle)
def handle(self, channel, method, properties, body):
data = json.loads(body)
model = data.pop(self.model_attr)
model_cls = getattr(self.models, model)
obj = model_cls(**data)
try:
obj.save()
except IntegrityError:
pass
channel.basic_ack(delivery_tag=method.delivery_tag)
| [
"rgilfanov@fix.ru"
] | rgilfanov@fix.ru |
04fd9dc3ef6cc0f4d6723d76bc02b3ed81a09056 | ba4bcfdfc70062bc904bd6490faaf66c5c4c7345 | /TestResult_Project_ver0.3/GetResultsXls/write_Excel_UnixBench_1thread-good.py | 77cdbc940fa778c5812cd8b138a0e739f138b8ed | [] | no_license | jianxiamage/Proj_TestResults | 8ac5cf9c2eb20685e6e03aea8999a07768154bf4 | 4a5c2c7c44babad754ac0755787022308888713d | refs/heads/master | 2022-11-28T09:23:34.617677 | 2020-04-23T07:44:03 | 2020-04-23T07:44:03 | 203,519,769 | 0 | 0 | null | 2022-11-22T00:34:30 | 2019-08-21T06:20:47 | Python | UTF-8 | Python | false | false | 9,528 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys #引入模块
import os
import traceback
import ConfigParser
import glob
import xlwt
#防止自动将ini文件中的键名转换成小写
class myconf(ConfigParser.ConfigParser):
def __init__(self,defaults=None):
ConfigParser.ConfigParser.__init__(self,defaults=None)
def optionxform(self, optionstr):
return optionstr
workbook = xlwt.Workbook(encoding='utf-8')
booksheet = workbook.add_sheet('UnixBench_1thread', cell_overwrite_ok=True)
ResultPath='/data/'
PointsPath='Points_Files'
curPointsPath='ini_Points'
def init_xls(iniFile,xlsFile):
#-------------------------------------------------------------------------------
#首先插入表头,包括每一行的测试字段以及三个测试节点
booksheet.col(0).width = 9000
alignment = xlwt.Alignment() # Create Alignment
alignment.horz = xlwt.Alignment.HORZ_CENTER # May be: HORZ_GENERAL, HORZ_LEFT, HORZ_CENTER, HORZ_RIGHT, HORZ_FILLED, HORZ_JUSTIFIED, HORZ_CENTER_ACROSS_SEL, HORZ_DISTRIBUTED
alignment.vert = xlwt.Alignment.VERT_CENTER # May be: VERT_TOP, VERT_CENTER, VERT_BOTTOM, VERT_JUSTIFIED, VERT_DISTRIBUTED
style = xlwt.XFStyle() # Create Style
style.alignment = alignment # Add Alignment to Style
#初始化Excel表头
booksheet.write(0,0,'TestItem')
booksheet.write_merge(0, 0, 1, 3, 'Node-1')
booksheet.write(0,1,'Node-1',style)
booksheet.write_merge(0, 0, 4, 6, 'Node-2')
booksheet.write(0,4,'Node-2',style)
booksheet.write_merge(0, 0, 7, 9, 'Node-3')
booksheet.write(0,7,'Node-3',style)
#config = ConfigParser.ConfigParser()
config = myconf()
#print os.getcwd() #获取当前工作目录路径
config.readfp(open(iniFile))
sec_len = len(config.sections())
print('---------------------------------')
print sec_len
print('---------------------------------')
i = 0
j = 0
dictionary = {}
for section in config.sections():
dictionary[section] = {}
print('---------------------------------')
print section
print('---------------------------------')
xls_row = i + 2
booksheet.write(xls_row,0,section)
for option in config.options(section):
dictionary[section][option] = config.get(section, option)
#print dictionary[section][option]
print 'option:%s,value:%s' %(option,dictionary[section][option])
value = dictionary[section][option]
#booksheet.write(j,0,option)
if (i == 0):
m = j + 1
booksheet.write(1,m,option)
booksheet.write(1,m + 3,option)
booksheet.write(1,m + 6,option)
j = j + 1
i = i + 1
workbook.save(xlsFile)
def end_xls(iniFile,xlsFile,row_start,col_start):
#-------------------------------------------------------------------------------
#首先插入表头,包括每一行的测试字段以及三个测试节点
booksheet.col(0).width = 9000
alignment = xlwt.Alignment() # Create Alignment
alignment.horz = xlwt.Alignment.HORZ_CENTER # May be: HORZ_GENERAL, HORZ_LEFT, HORZ_CENTER, HORZ_RIGHT, HORZ_FILLED, HORZ_JUSTIFIED, HORZ_CENTER_ACROSS_SEL, HORZ_DISTRIBUTED
alignment.vert = xlwt.Alignment.VERT_CENTER # May be: VERT_TOP, VERT_CENTER, VERT_BOTTOM, VERT_JUSTIFIED, VERT_DISTRIBUTED
style = xlwt.XFStyle() # Create Style
style.alignment = alignment # Add Alignment to Style
#config = ConfigParser.ConfigParser()
config = myconf()
#print os.getcwd() #获取当前工作目录路径
config.readfp(open(iniFile))
sec_len = len(config.sections())
print('---------------------------------')
print sec_len
print('---------------------------------')
real_start_row = int(row_start) + sec_len - 1
i = 0
j = 0
curOpt_len = 0
dictionary = {}
for section in config.sections():
dictionary[section] = {}
print('---------------------------------')
print section
print('---------------------------------')
xls_row = i + 2
#booksheet.write(xls_row,0,section)
#每次循环新的section都要将列号恢复,因为如果不恢复列号,写入内容会向右偏移
j = 0
curOpt_len = 0
for option in config.options(section):
curOpt_len = len(config.options(section))
dictionary[section][option] = config.get(section, option)
#print dictionary[section][option]
print 'option:%s,value:%s' %(option,dictionary[section][option])
value = dictionary[section][option]
value_float = float(value)
row_num = real_start_row
col_num = j + col_start
merge_str = str(col_num)
if i == (sec_len - 1):
booksheet.write_merge(real_start_row, real_start_row, col_num, col_num + curOpt_len + 1, merge_str) #col_num + curOpt_len + 1,加1的原因是需要排除当前列col_num
booksheet.write(real_start_row,col_num,value_float,style)
#booksheet.write(row_num,col_num,value_float)
#break
j = j + 1
i = i + 1
workbook.save(xlsFile)
def write_xls(iniFile,xlsFile,row_start,col_start):
config = myconf()
#print os.getcwd() #获取当前工作目录路径
config.readfp(open(iniFile))
i = 0
j = 0
dictionary = {}
for section in config.sections():
dictionary[section] = {}
print('---------------------------------')
print section
print('---------------------------------')
xls_row = i + 2
#booksheet.write(xls_row,0,section)
#每次循环新的section都要将列号恢复,因为如果不恢复列号,写入内容会向右偏移
j = 0
for option in config.options(section):
dictionary[section][option] = config.get(section, option)
#print dictionary[section][option]
print 'option:%s,value:%s' %(option,dictionary[section][option])
value = dictionary[section][option]
value_float = float(value)
row_num = i + row_start
col_num = j + col_start
booksheet.write(row_num,col_num,value_float)
j = j + 1
i = i + 1
workbook.save(xlsFile)
def writeResult(TestType,Platform,TestCase,mode,count):
print count
IniPath = str(curPointsPath) + '/' + str(TestCase) + '_' + str(mode) + '.ini'
print('****************************************************')
print IniPath
print('****************************************************')
ExcelPath = ResultPath + str(TestType) + '/' + str(Platform) + '/' + str(TestCase) + '/' + str(PointsPath) + '/' + str(TestCase) + '_' + str(mode) + '_' + str(Platform) + '_' + str(TestType) + '.xls'
print IniPath
print ExcelPath
init_xls(IniPath,ExcelPath)
countNum = int(count) + 1
for i in range(1,countNum):
print '第%d个节点' %(i)
ResultIniPath = str(ResultPath) + str(TestType) + '/' + str(Platform) + '/' + str(TestCase) + '/' + str(PointsPath) + '/' + str(TestCase) + '_' + str(mode) + '_' + str(i) + '.ini'
print ResultIniPath
col_start_tag = 1 + (int(count))*(int(i)-1)
print('****************************************************')
print col_start_tag
print('****************************************************')
write_xls(ResultIniPath,ExcelPath,2,col_start_tag)
for i in range(1,countNum):
print '第%d个节点' %(i)
ResultIniPath = str(ResultPath) + str(TestType) + '/' + str(Platform) + '/' + str(TestCase) + '/' + str(PointsPath) + '/' + str(TestCase) + '_' + str(mode) + '_' + str(i) + '.ini'
print ResultIniPath
col_start_tag = 1 + (int(count))*(int(i)-1)
print('****************************************************')
print col_start_tag
print('****************************************************')
end_xls(ResultIniPath,ExcelPath,2,col_start_tag)
#ResultIniPath = ResultPath + str(TestType) + '/' + str(Platform) + '/' + str(TestCase) + '/' + str(PointsPath) + '/' + str(TestCase) + '_' + str(1) + '.ini'
#write_xls(ResultIniPath,ExcelPath,2)
# countNum = int(count) + 1
# for i in range(1,countNum):
# print '第%d个节点' %(i)
# ResultIniPath = str(ResultPath) + str(TestType) + '/' + str(Platform) + '/' + str(TestCase) + '/' + str(PointsPath) + '/' + str(TestCase) + '_' + str(mode) + '_' + str(i) + '.ini'
# print ResultIniPath
# write_xls(ResultIniPath,ExcelPath,i)
print('---------------------------------------------------')
print('Please check by the Excel file:')
print ExcelPath
print('---------------------------------------------------')
retCode = 0
return retCode
if __name__=='__main__':
try:
test_case_type = sys.argv[1]
test_case_platform = sys.argv[2]
test_case = sys.argv[3]
test_mode = sys.argv[4]
node_count = sys.argv[5]
result_code = writeResult(test_case_type,test_case_platform,test_case,test_mode,node_count)
retCode = result_code
print retCode
except Exception as E:
#print('str(Exception):', str(Exception))
print('str(e):', str(E))
#print('repr(e):', repr(E))
#print('traceback.print_exc(): ', traceback.print_exc())
| [
"jianxiamage@163.com"
] | jianxiamage@163.com |
4b1685e4d87b983cc9dd2aa6bb969e8ca01d7711 | a873f3cd46a10ad879fc56d78e1f533d8bf486c0 | /spider/阶段11-爬虫开发/代码以及其他/06.mongodb数据库/code/1.pymongo_test.py | ee716a5ce8e5881760249665d2d051612ead4a44 | [] | no_license | shenhaiyu0923/resful | d0301b39363e6b3d3659f62fa4a9b2532ebcd225 | 1e66cae7d68fa231794776953cc1a5e999bf36c6 | refs/heads/master | 2021-07-08T20:46:57.300298 | 2021-06-01T08:17:27 | 2021-06-01T08:17:27 | 244,308,016 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 853 | py | #coding:utf-8
from pymongo import MongoClient
# 创建数据库链接对象
client = MongoClient('172.16.123.223', 27017)
# 选择一个数据库
db = client['admin']
db.authenticate('python','python')
# 选择一个集合
col = client['pydata']['test']
# col.insert({"class":"python37"})
# col.insert([{"class":"python38"},{"class":"python39"},{"class":"python40"}])
for data in col.find():
print(data)
# print(col.find_one())
print("*"*50)
# 全文档覆盖更新
# col.update({"class":"python40"},{"message":"helloworld"})
# col.update({},{"$set":{"id":"xxxx-xxxx"}})
# col.update({}, {"$set": {"id": "xxxx-xxxx"}}, multi=True)
# col.update({"message":"hello world"}, {"$set": {"id": "xxxx-xxx2"}}, upsert=True)
# col.delete_one({"message":"helloworld"})
col.delete_many({"id": "xxxx-xxxx"})
for data in col.find():
print(data)
| [
"1802161998@qq.com"
] | 1802161998@qq.com |
e7817dd212cdd36df40dd836c8d9c61472074615 | 7def8c4abacc5c596358467c90afdc8dbd677c02 | /SWEA/swea_4012_chef.py | 2de21e8079a7670e820becb96d956a34785627b2 | [] | no_license | liza0525/algorithm-study | 0b2e41a29e6f263c1906a90771f9c932008b84d2 | 906e817ba1d033b2e6cfad4b64bb9906d0fe03b7 | refs/heads/master | 2023-08-18T11:08:31.238163 | 2021-09-06T14:31:50 | 2021-09-06T14:31:50 | 208,087,839 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 843 | py | def score(arr):
s = 0
for i in range(len(arr)-1):
for j in range(i+1, len(arr)):
s += table[arr[i]][arr[j]] + table[arr[j]][arr[i]]
return s
def combi(arr, d, next):
global group1,group2, res
if d == int(N/2):
group1 = arr[:]
group2 = list(set(food) - set(arr))
temp_res = abs(score(group1) - score(group2))
if temp_res < res:
res = temp_res
else:
for i in range(next, N):
temp = arr[:]
temp.append(i)
combi(temp, d+1, i+1)
temp.pop()
for test in range(int(input())):
N = int(input())
table = [list(map(int, input().split())) for _ in range(N)]
food = [i for i in range(N)]
group1, group2 = [], []
res = 987654321
combi([], 0, 0)
print('#{} {}'.format(test+1, res)) | [
"double.y.0525@gmail.com"
] | double.y.0525@gmail.com |
d824969c11f9acd62838ef00fe7b652c4b39d466 | f2658c4bd7f833ace25ac2b63e88317b05f4602d | /2017 July/2017-July-11/st_rdf_test/model2/RelationsAdmin.py | 789db15c499db32fbdf401d7303f8bbb4316d67f | [] | no_license | xiaochao00/telanav_diary | e4c34ac0a14b65e4930e32012cc2202ff4ed91e2 | 3c583695e2880322483f526c98217c04286af9b2 | refs/heads/master | 2022-01-06T19:42:55.504845 | 2019-05-17T03:11:46 | 2019-05-17T03:11:46 | 108,958,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,148 | py | #-------------------------------------------------------------------------------
# Name: RelationsAdmin model
# Purpose: this model is used to mapping the
# columns: [ ]
#
# Author: rex
#
# Created: 10/12/2015
# Copyright: (c) rex 2015
# Licence: <your licence>
#-------------------------------------------------------------------------------
from record import Record
from record import CSV_SEP
from constants import *
import os
import sys
import datetime
import json
ROOT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),"..")
GLOBAL_KEY_PREFIX = "relations_admin_"
#CSV_SEP = '`'
LF = '\n'
#(key, category, function)
STATISTIC_KEYS = (("type",False,"type"),
("admin_order",False,"admin_order"),
("iso", True, "iso"),
("admin_level", True, "admin_level"),
("admin_type", True, "admin_type"),
("timezone", False, "timezone"),
("dst_observed", True, "dst_observed"),
("dst_start_day", False, "dst_start_day"),
("dst_start_weekday", False, "dst_start_weekday"),
("dst_start_month", False, "dst_start_month"),
("dst_start_time", False, "dst_start_time"),
("dst_end_day", False, "dst_end_day"),
("dst_end_weekday", False, "dst_end_weekday"),
("dst_end_month", False, "dst_end_month"),
("dst_end_time", False, "dst_end_time"))
class RelationsAdmin(Record):
def __init__(self, region):
Record.__init__(self)
self.dump_file = os.path.join(ROOT_DIR, "temporary", self.__class__.__name__)
self.stat = {}
self.region = region
def dump2file(self):
cmd = "SELECT \
DISTINCT(rah.admin_place_id), \
rah.admin_order, \
rah.iso_country_code, \
rap.admin_type, \
rap.time_zone, \
rad.dst_observed, \
rad.dst_start_day, \
rad.dst_start_weekday, \
rad.dst_start_month, \
rad.dst_start_time, \
rad.dst_end_day, \
rad.dst_end_weekday, \
rad.dst_end_month, \
rad.dst_end_time \
FROM \
public.rdf_admin_place AS rap LEFT JOIN public.rdf_admin_hierarchy AS rah ON rap.admin_place_id=rah.admin_place_id \
LEFT JOIN public.rdf_admin_dst AS rad ON rad.dst_id = rap.dst_id \
WHERE rah.iso_country_code IN (%s)"%(REGION_COUNTRY_CODES(self.region, GLOBAL_KEY_PREFIX))
print cmd
self.cursor.copy_expert("COPY (%s) TO STDOUT DELIMITER '%s'"%(cmd, CSV_SEP),open(self.dump_file,"w"))
def get_statistic(self):
try:
self.dump2file()
except:
print "Some table or schema don't exist! Please check the upper sql"
return {}
processcount = 0
with open(self.dump_file, "r",1024*1024*1024) as csv_f:
for line in csv_f:
line = line.rstrip()
#line_p = line.split(CSV_SEP)
line_p = Record.split(line)
if len(line_p) < 1:
continue
self.__statistic(line_p)
processcount += 1
if processcount%5000 == 0:
print "\rProcess index [ "+str(processcount)+" ]",
print "\rProcess index [ "+str(processcount)+" ]",
# write to file
with open(os.path.join(ROOT_DIR, "output", "stat", self.__class__.__name__), 'w') as stf:
stf.write(json.dumps(self.stat))
return self.stat
def __statistic(self,line):
for keys in STATISTIC_KEYS:
try:
getattr(self,'_RelationsAdmin__get_'+keys[2])(keys,line)
except:
print "The statistic [ %s ] didn't exist"%(keys[2])
print ("Unexpected error:[ RelationsAdmin.py->__statistic] "+str(sys.exc_info()))
def __count(self,key):
if self.stat.has_key(key):
self.stat[key] += 1
else:
self.stat[key] = 1
# all statistic method
def __get_type(self,keys,line):
if '\N' != line[0]:
self.__count("%s%s"%(GLOBAL_KEY_PREFIX,keys[0]))
def __get_admin_order(self,keys,line):
if '\N' != line[1]:
self.__count("%s%s"%(GLOBAL_KEY_PREFIX,keys[0]))
def __get_iso(self,keys,line):
if '\N' != line[2]:
self.__count("%s%s%s"%(GLOBAL_KEY_PREFIX,keys[0],keys[1] and "#%s"%(line[2]) or ""))
def __get_admin_level(self,keys,line):
pass
def __get_admin_type(self,keys,line):
if '\N' != line[3]:
self.__count("%s%s%s"%(GLOBAL_KEY_PREFIX,keys[0],keys[1] and "#%s"%(line[3]) or ""))
def __get_timezone(self,keys,line):
if '\N' != line[4]:
self.__count("%s%s"%(GLOBAL_KEY_PREFIX,keys[0]))
def __get_dst_observed(self,keys,line):
if 'Y' == line[5]:
self.__count("%s%s%s"%(GLOBAL_KEY_PREFIX,keys[0],keys[1] and "#%s"%('yes') or ""))
def __get_dst_start_day(self,keys,line):
if '\N' != line[6]:
self.__count("%s%s"%(GLOBAL_KEY_PREFIX,keys[0]))
def __get_dst_start_weekday(self,keys,line):
if '\N' != line[7]:
self.__count("%s%s"%(GLOBAL_KEY_PREFIX,keys[0]))
def __get_dst_start_month(self,keys,line):
if '\N' != line[8]:
self.__count("%s%s"%(GLOBAL_KEY_PREFIX,keys[0]))
def __get_dst_start_time(self,keys,line):
if '\N' != line[9]:
self.__count("%s%s"%(GLOBAL_KEY_PREFIX,keys[0]))
def __get_dst_end_day(self,keys,line):
if '\N' != line[10]:
self.__count("%s%s"%(GLOBAL_KEY_PREFIX,keys[0]))
def __get_dst_end_weekday(self,keys,line):
if '\N' != line[11]:
self.__count("%s%s"%(GLOBAL_KEY_PREFIX,keys[0]))
def __get_dst_end_month(self,keys,line):
if '\N' != line[12]:
self.__count("%s%s"%(GLOBAL_KEY_PREFIX,keys[0]))
def __get_dst_end_time(self,keys,line):
if '\N' != line[13]:
self.__count("%s%s"%(GLOBAL_KEY_PREFIX,keys[0]))
if __name__ == "__main__":
# use to test this model
bg = datetime.datetime.now()
stat = RelationsAdmin('na').get_statistic()
keys = stat.keys()
print "==>"
print "{%s}"%(",".join(map(lambda px: "\"%s\":%s"%(px,stat[px]) ,keys)))
print "<=="
ed = datetime.datetime.now()
print "Cost time:"+str(ed - bg)
| [
"1363180272@qq.com"
] | 1363180272@qq.com |
97d687b966e97df88df10a56f64cc44e28716f66 | 51f887286aa3bd2c3dbe4c616ad306ce08976441 | /pybind/slxos/v17r_2_00/openflow_state/interface/__init__.py | 66c3df256e4d4472e9e3d147fac2436ed05e9fbe | [
"Apache-2.0"
] | permissive | b2220333/pybind | a8c06460fd66a97a78c243bf144488eb88d7732a | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | refs/heads/master | 2020-03-18T09:09:29.574226 | 2018-04-03T20:09:50 | 2018-04-03T20:09:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,118 | py |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class interface(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-openflow-operational - based on the path /openflow-state/interface. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Openflow enabled interface details
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__port','__interface_type','__interface_name','__link','__port_state','__speed','__mac','__port_id','__mode',)
_yang_name = 'interface'
_rest_name = 'interface'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__port_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="port-id", rest_name="port-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)
self.__interface_type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-openflow-interface-type-invalid': {'value': 0}, u'dcm-openflow-interface-type-ethernet': {'value': 1}, u'dcm-openflow-interface-type-portchannel': {'value': 2}},), is_leaf=True, yang_name="interface-type", rest_name="interface-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='openflow-interface-type', is_config=False)
self.__port_state = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-port-state-live': {'value': 3}, u'dcm-port-state-forward': {'value': 4}, u'dcm-port-state-invalid': {'value': 0}, u'dcm-port-state-blocked': {'value': 2}, u'dcm-port-state-link-down': {'value': 1}},), is_leaf=True, yang_name="port-state", rest_name="port-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='port-state', is_config=False)
self.__mac = YANGDynClass(base=unicode, is_leaf=True, yang_name="mac", rest_name="mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
self.__link = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="link", rest_name="link", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='boolean', is_config=False)
self.__mode = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-port-mode-unknown': {'value': 0}, u'dcm-port-mode-hybrid-l3': {'value': 5}, u'dcm-port-mode-hybrid-l2': {'value': 4}, u'dcm-port-mode-l23': {'value': 3}, u'dcm-port-mode-hybrid-l23': {'value': 6}, u'dcm-port-mode-l3': {'value': 2}, u'dcm-port-mode-l2': {'value': 1}, u'egress-mode': {'value': 7}},), is_leaf=True, yang_name="mode", rest_name="mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='port-mode', is_config=False)
self.__interface_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="interface-name", rest_name="interface-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
self.__speed = YANGDynClass(base=unicode, is_leaf=True, yang_name="speed", rest_name="speed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
self.__port = YANGDynClass(base=unicode, is_leaf=True, yang_name="port", rest_name="port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'openflow-state', u'interface']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'openflow-state', u'interface']
def _get_port(self):
"""
Getter method for port, mapped from YANG variable /openflow_state/interface/port (string)
YANG Description: Port
"""
return self.__port
def _set_port(self, v, load=False):
"""
Setter method for port, mapped from YANG variable /openflow_state/interface/port (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_port is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_port() directly.
YANG Description: Port
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="port", rest_name="port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """port must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="port", rest_name="port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)""",
})
self.__port = t
if hasattr(self, '_set'):
self._set()
def _unset_port(self):
self.__port = YANGDynClass(base=unicode, is_leaf=True, yang_name="port", rest_name="port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
def _get_interface_type(self):
"""
Getter method for interface_type, mapped from YANG variable /openflow_state/interface/interface_type (openflow-interface-type)
YANG Description: openflow interface type
"""
return self.__interface_type
def _set_interface_type(self, v, load=False):
"""
Setter method for interface_type, mapped from YANG variable /openflow_state/interface/interface_type (openflow-interface-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_type() directly.
YANG Description: openflow interface type
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-openflow-interface-type-invalid': {'value': 0}, u'dcm-openflow-interface-type-ethernet': {'value': 1}, u'dcm-openflow-interface-type-portchannel': {'value': 2}},), is_leaf=True, yang_name="interface-type", rest_name="interface-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='openflow-interface-type', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface_type must be of a type compatible with openflow-interface-type""",
'defined-type': "brocade-openflow-operational:openflow-interface-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-openflow-interface-type-invalid': {'value': 0}, u'dcm-openflow-interface-type-ethernet': {'value': 1}, u'dcm-openflow-interface-type-portchannel': {'value': 2}},), is_leaf=True, yang_name="interface-type", rest_name="interface-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='openflow-interface-type', is_config=False)""",
})
self.__interface_type = t
if hasattr(self, '_set'):
self._set()
def _unset_interface_type(self):
self.__interface_type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-openflow-interface-type-invalid': {'value': 0}, u'dcm-openflow-interface-type-ethernet': {'value': 1}, u'dcm-openflow-interface-type-portchannel': {'value': 2}},), is_leaf=True, yang_name="interface-type", rest_name="interface-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='openflow-interface-type', is_config=False)
def _get_interface_name(self):
"""
Getter method for interface_name, mapped from YANG variable /openflow_state/interface/interface_name (string)
YANG Description: interface name
"""
return self.__interface_name
def _set_interface_name(self, v, load=False):
"""
Setter method for interface_name, mapped from YANG variable /openflow_state/interface/interface_name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_name() directly.
YANG Description: interface name
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="interface-name", rest_name="interface-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface_name must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="interface-name", rest_name="interface-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)""",
})
self.__interface_name = t
if hasattr(self, '_set'):
self._set()
def _unset_interface_name(self):
self.__interface_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="interface-name", rest_name="interface-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
def _get_link(self):
"""
Getter method for link, mapped from YANG variable /openflow_state/interface/link (boolean)
YANG Description: Link
"""
return self.__link
def _set_link(self, v, load=False):
"""
Setter method for link, mapped from YANG variable /openflow_state/interface/link (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_link is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_link() directly.
YANG Description: Link
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="link", rest_name="link", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """link must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="link", rest_name="link", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='boolean', is_config=False)""",
})
self.__link = t
if hasattr(self, '_set'):
self._set()
def _unset_link(self):
self.__link = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="link", rest_name="link", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='boolean', is_config=False)
def _get_port_state(self):
"""
Getter method for port_state, mapped from YANG variable /openflow_state/interface/port_state (port-state)
YANG Description: Port State
"""
return self.__port_state
def _set_port_state(self, v, load=False):
"""
Setter method for port_state, mapped from YANG variable /openflow_state/interface/port_state (port-state)
If this variable is read-only (config: false) in the
source YANG file, then _set_port_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_port_state() directly.
YANG Description: Port State
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-port-state-live': {'value': 3}, u'dcm-port-state-forward': {'value': 4}, u'dcm-port-state-invalid': {'value': 0}, u'dcm-port-state-blocked': {'value': 2}, u'dcm-port-state-link-down': {'value': 1}},), is_leaf=True, yang_name="port-state", rest_name="port-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='port-state', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """port_state must be of a type compatible with port-state""",
'defined-type': "brocade-openflow-operational:port-state",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-port-state-live': {'value': 3}, u'dcm-port-state-forward': {'value': 4}, u'dcm-port-state-invalid': {'value': 0}, u'dcm-port-state-blocked': {'value': 2}, u'dcm-port-state-link-down': {'value': 1}},), is_leaf=True, yang_name="port-state", rest_name="port-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='port-state', is_config=False)""",
})
self.__port_state = t
if hasattr(self, '_set'):
self._set()
def _unset_port_state(self):
self.__port_state = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-port-state-live': {'value': 3}, u'dcm-port-state-forward': {'value': 4}, u'dcm-port-state-invalid': {'value': 0}, u'dcm-port-state-blocked': {'value': 2}, u'dcm-port-state-link-down': {'value': 1}},), is_leaf=True, yang_name="port-state", rest_name="port-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='port-state', is_config=False)
def _get_speed(self):
"""
Getter method for speed, mapped from YANG variable /openflow_state/interface/speed (string)
YANG Description: Speed
"""
return self.__speed
def _set_speed(self, v, load=False):
"""
Setter method for speed, mapped from YANG variable /openflow_state/interface/speed (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_speed is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_speed() directly.
YANG Description: Speed
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="speed", rest_name="speed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """speed must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="speed", rest_name="speed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)""",
})
self.__speed = t
if hasattr(self, '_set'):
self._set()
def _unset_speed(self):
self.__speed = YANGDynClass(base=unicode, is_leaf=True, yang_name="speed", rest_name="speed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
def _get_mac(self):
"""
Getter method for mac, mapped from YANG variable /openflow_state/interface/mac (string)
YANG Description: MAC
"""
return self.__mac
def _set_mac(self, v, load=False):
"""
Setter method for mac, mapped from YANG variable /openflow_state/interface/mac (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_mac is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mac() directly.
YANG Description: MAC
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="mac", rest_name="mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mac must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="mac", rest_name="mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)""",
})
self.__mac = t
if hasattr(self, '_set'):
self._set()
def _unset_mac(self):
self.__mac = YANGDynClass(base=unicode, is_leaf=True, yang_name="mac", rest_name="mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
def _get_port_id(self):
"""
Getter method for port_id, mapped from YANG variable /openflow_state/interface/port_id (uint32)
YANG Description: OF-Port-ID
"""
return self.__port_id
def _set_port_id(self, v, load=False):
"""
Setter method for port_id, mapped from YANG variable /openflow_state/interface/port_id (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_port_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_port_id() directly.
YANG Description: OF-Port-ID
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="port-id", rest_name="port-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """port_id must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="port-id", rest_name="port-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)""",
})
self.__port_id = t
if hasattr(self, '_set'):
self._set()
def _unset_port_id(self):
self.__port_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="port-id", rest_name="port-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)
def _get_mode(self):
"""
Getter method for mode, mapped from YANG variable /openflow_state/interface/mode (port-mode)
YANG Description: Mode
"""
return self.__mode
def _set_mode(self, v, load=False):
"""
Setter method for mode, mapped from YANG variable /openflow_state/interface/mode (port-mode)
If this variable is read-only (config: false) in the
source YANG file, then _set_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mode() directly.
YANG Description: Mode
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-port-mode-unknown': {'value': 0}, u'dcm-port-mode-hybrid-l3': {'value': 5}, u'dcm-port-mode-hybrid-l2': {'value': 4}, u'dcm-port-mode-l23': {'value': 3}, u'dcm-port-mode-hybrid-l23': {'value': 6}, u'dcm-port-mode-l3': {'value': 2}, u'dcm-port-mode-l2': {'value': 1}, u'egress-mode': {'value': 7}},), is_leaf=True, yang_name="mode", rest_name="mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='port-mode', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mode must be of a type compatible with port-mode""",
'defined-type': "brocade-openflow-operational:port-mode",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-port-mode-unknown': {'value': 0}, u'dcm-port-mode-hybrid-l3': {'value': 5}, u'dcm-port-mode-hybrid-l2': {'value': 4}, u'dcm-port-mode-l23': {'value': 3}, u'dcm-port-mode-hybrid-l23': {'value': 6}, u'dcm-port-mode-l3': {'value': 2}, u'dcm-port-mode-l2': {'value': 1}, u'egress-mode': {'value': 7}},), is_leaf=True, yang_name="mode", rest_name="mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='port-mode', is_config=False)""",
})
self.__mode = t
if hasattr(self, '_set'):
self._set()
def _unset_mode(self):
self.__mode = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-port-mode-unknown': {'value': 0}, u'dcm-port-mode-hybrid-l3': {'value': 5}, u'dcm-port-mode-hybrid-l2': {'value': 4}, u'dcm-port-mode-l23': {'value': 3}, u'dcm-port-mode-hybrid-l23': {'value': 6}, u'dcm-port-mode-l3': {'value': 2}, u'dcm-port-mode-l2': {'value': 1}, u'egress-mode': {'value': 7}},), is_leaf=True, yang_name="mode", rest_name="mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='port-mode', is_config=False)
port = __builtin__.property(_get_port)
interface_type = __builtin__.property(_get_interface_type)
interface_name = __builtin__.property(_get_interface_name)
link = __builtin__.property(_get_link)
port_state = __builtin__.property(_get_port_state)
speed = __builtin__.property(_get_speed)
mac = __builtin__.property(_get_mac)
port_id = __builtin__.property(_get_port_id)
mode = __builtin__.property(_get_mode)
_pyangbind_elements = {'port': port, 'interface_type': interface_type, 'interface_name': interface_name, 'link': link, 'port_state': port_state, 'speed': speed, 'mac': mac, 'port_id': port_id, 'mode': mode, }
| [
"badaniya@brocade.com"
] | badaniya@brocade.com |
f9b5d946001dbc2a7b0e167bab23eec454bf4d50 | 7ddb110792c8242acd2c1a8042caf62a586dd3f5 | /OnlineClustering/main_fast.py | 2b9d60f523f3721b1ffdafe8c65c7ce0bfe78205 | [] | no_license | rashadulrakib/short-text-stream-clustering | 43744dd5761ca102d576d90f487c1c5b63e75c6a | f7600a3501064000ddfd849653c7b36f5cc742f7 | refs/heads/master | 2021-07-03T09:05:29.161305 | 2020-12-17T01:23:31 | 2020-12-17T01:23:31 | 320,949,303 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,444 | py | import os
from datetime import datetime
from general_util import readlistWholeJsonDataSet
from evaluation import Evaluate_old
from read_pred_true_text import ReadPredTrueText
from clustering_term_online_fast import cluster_biterm
from word_vec_extractor import extractAllWordVecsPartialStemming
ignoreMinusOne=True
isSemantic=False
dataDir = "data/"
outputPath = "result/"
dataset='News-T' # 'stackoverflow_javascript' 'stackoverflow_java' 'stackoverflow_python' 'stackoverflow_csharp' 'stackoverflow_php' 'stackoverflow_android' 'stackoverflow_jquery' 'stackoverflow_r' 'stackoverflow_java' # 'stackoverflow_java' 'stackoverflow_cplus' 'stackoverflow_mysql' 'stackoverflow_large_tweets-T_news-T_suff' 'stackoverflow_large_tweets-T' #'News-T' 'NT-mstream-long1' #'Tweets-T' # 'stackoverflow_large' 'stackoverflow_large_tweets-T'
inputfile = dataDir+dataset
resultFile=outputPath+'personal_cluster_biterm.txt'
#list_pred_true_words_index_postid=readStackOverflowDataSet(inputfile)
list_pred_true_words_index=readlistWholeJsonDataSet(inputfile)
print(len(list_pred_true_words_index))
all_words=[]
for item in list_pred_true_words_index:
all_words.extend(item[2])
all_words=list(set(all_words))
gloveFile = "glove.6B.50d.txt"
embedDim=50
wordVectorsDic={}
if isSemantic==True:
wordVectorsDic=extractAllWordVecsPartialStemming(gloveFile, embedDim, all_words)
if os.path.exists(resultFile):
os.remove(resultFile)
c_bitermsFreqs={}
c_totalBiterms={}
c_wordsFreqs={}
c_totalWords={}
c_txtIds={}
c_clusterVecs={}
txtId_txt={}
last_txtId=0
max_c_id=0
dic_clus__id={}
dic_biterm__clusterId_Freq={}
dic_biterm__allClusterFreq={}
dic_biterm__clusterIds={}
f = open(resultFile, 'w')
t11=datetime.now()
c_bitermsFreqs, c_totalBiterms, c_wordsFreqs, c_totalWords, c_txtIds, c_clusterVecs, txtId_txt, last_txtId, dic_clus__id, dic_biterm__clusterId_Freq, dic_biterm__allClusterFreq, dic_biterm__clusterIds=cluster_biterm(f, list_pred_true_words_index, c_bitermsFreqs, c_totalBiterms, c_wordsFreqs, c_totalWords, c_txtIds, c_clusterVecs, txtId_txt, last_txtId, max_c_id, wordVectorsDic, dic_clus__id, dic_biterm__clusterId_Freq, dic_biterm__allClusterFreq, dic_biterm__clusterIds)
t12=datetime.now()
t_diff = t12-t11
print("total time diff secs=",t_diff.seconds)
f.close()
listtuple_pred_true_text=ReadPredTrueText(resultFile, ignoreMinusOne)
print('result for', inputfile)
Evaluate_old(listtuple_pred_true_text) | [
"rashadul.rakib@gmail.com"
] | rashadul.rakib@gmail.com |
7b462f75e7c76807b69e821d3c3f4330b2d1ff28 | be30e4f6bdd9e9e9ec1fc56d6c083fc4ebcf0b23 | /urls/client.py | 6991212984d87225f3864f4a2a7ef17a9ef94cd2 | [] | no_license | Cronopioss/braavos | 2ecc404ce4efdc29434ca9c1ebbe385e86da4f68 | 32ebd07177c06a5e6bec4a69cd1abde2a5faf64b | refs/heads/master | 2021-06-07T12:17:19.647469 | 2016-07-28T07:07:12 | 2016-07-28T07:07:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | from controllers.client import client_bp
def client_register_blueprint(app):
app.register_blueprint(client_bp, url_prefix='/clients') | [
"guoyu@inad.com"
] | guoyu@inad.com |
a8b3047f947b583472067a2401b98ff1708113fb | 2b5dfacdb7389aefff64c67fac863e3f82d3723e | /source/tygame-sdk/src/tysdk/entity/pay4/payment/payv4_wandoujiadanji.py | dc33aaaa616e37b8b338639cfd8b5dbdbf2c1dad | [] | no_license | hi-noikiy/hall0 | 54ef76c715f7ac7fec4c9ca175817e12f60fbd6a | 21ea94c5b048bc611fb1557ac0b6e3ef4fdbbc09 | refs/heads/master | 2020-04-08T21:58:55.239106 | 2018-01-15T14:58:32 | 2018-01-15T14:58:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,657 | py | # -*- coding=utf-8 -*-
import json
from payv4_helper import PayHelperV4
from tyframework.context import TyContext
from tysdk.entity.pay.rsacrypto import rsaVerify
from tysdk.entity.pay4.charge_model import ChargeModel
from tysdk.entity.pay4.decorator.payv4_callback import payv4_callback
from tysdk.entity.pay4.decorator.payv4_order import payv4_order
from tysdk.entity.pay4.payment.payv4_base import PayBaseV4
class TuYouPayWandoujiadanji(PayBaseV4):
@payv4_order("wandoujiadanji")
def charge_data(cls, mi):
charge_info = cls.get_charge_info(mi)
return cls.return_mo(0, chargeInfo=charge_info)
@payv4_callback("/open/ve/pay/wandoujiadanji/callback")
def doCallback(cls, rpath):
rparams = TyContext.RunHttp.convertArgsToDict()
content = rparams['content']
content_json = json.loads(content)
orderId = content_json['out_trade_no']
if not cls.verify_sign(rparams):
TyContext.ftlog.info('TuYouPayWandoujiadanji->sign verify ERROR')
return "fail"
total_fee = int(float(content_json['money']))
total_fee = int(total_fee / 100)
ChargeModel.save_third_pay_order_id(orderId, content_json.get('orderId', ''))
is_ok = PayHelperV4.callback_ok(orderId, total_fee, rparams)
if is_ok:
return 'success'
else:
return 'fail'
@classmethod
def verify_sign(cls, rparams):
sign = rparams['sign']
data = rparams['content']
# wandoujiadanji跟wandoujia使用的是同一个公钥
if rsaVerify(data, sign, 'wandoujia'):
return True
return False
| [
"cg@ibenxi.com"
] | cg@ibenxi.com |
63154825c27e52db1c81a916e178d71201a7bb5a | b59f66a9c4b5492b95c767b7ca76cd026f6f572a | /aac/transforms/pad.py | df4df193638df824329e201a743c55d1d9400c40 | [] | no_license | Labbeti/dcase2021task6 | b50f51370af15c241bd9f257920e2df4bc925669 | 2e792749bd9b2a495fa4b870f6190f6fb389fc56 | refs/heads/main | 2023-06-11T07:10:50.179348 | 2021-07-05T09:28:11 | 2021-07-05T09:28:11 | 377,414,427 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,562 | py |
import random
import torch
from torch import Tensor
from torch.nn import Module
from torch.nn.functional import pad
class Pad(Module):
def __init__(
self,
target_length: int,
align: str = 'left',
fill_value: float = 0.0,
dim: int = -1,
mode: str = 'constant',
p: float = 1.0,
):
"""
Example :
>>> import torch; from torch import tensor
>>> x = torch.ones(6)
>>> zero_pad = Pad(10, align='left')
>>> x_pad = zero_pad(x)
... tensor([1, 1, 1, 1, 1, 1, 0, 0, 0, 0])
:param target_length: The target length of the dimension.
:param align: The alignment type. Can be 'left', 'right', 'center' or 'random'. (default: 'left')
:param fill_value: The fill value used for constant padding. (default: 0.0)
:param dim: The dimension to pad. (default: -1)
:param mode: The padding mode. Can be 'constant', 'reflect', 'replicate' or 'circular'. (default: 'constant')
:param p: The probability to apply the transform. (default: 1.0)
"""
super().__init__()
self.target_length = target_length
self.align = align
self.fill_value = fill_value
self.dim = dim
self.mode = mode
self.p = p
def forward(self, data: Tensor) -> Tensor:
if self.p >= 1.0 or self.p <= random.random():
return self.process(data)
else:
return data
def process(self, data: Tensor) -> Tensor:
if self.align == 'left':
return self.pad_align_left(data)
elif self.align == 'right':
return self.pad_align_right(data)
elif self.align == 'center':
return self.pad_align_center(data)
elif self.align == 'random':
return self.pad_align_random(data)
else:
raise ValueError(f'Unknown alignment "{self.align}". Must be one of {str(["left", "right", "center", "random"])}.')
def pad_align_left(self, x: Tensor) -> Tensor:
# Note: pad_seq : [pad_left_dim_-1, pad_right_dim_-1, pad_left_dim_-2, pad_right_dim_-2, ...)
idx = len(x.shape) - (self.dim % len(x.shape)) - 1
pad_seq = [0 for _ in range(len(x.shape) * 2)]
missing = max(self.target_length - x.shape[self.dim], 0)
pad_seq[idx * 2 + 1] = missing
x = pad(x, pad_seq, mode=self.mode, value=self.fill_value)
return x
def pad_align_right(self, x: Tensor) -> Tensor:
idx = len(x.shape) - (self.dim % len(x.shape)) - 1
pad_seq = [0 for _ in range(len(x.shape) * 2)]
missing = max(self.target_length - x.shape[self.dim], 0)
pad_seq[idx * 2] = missing
x = pad(x, pad_seq, mode=self.mode, value=self.fill_value)
return x
def pad_align_center(self, x: Tensor) -> Tensor:
idx = len(x.shape) - (self.dim % len(x.shape)) - 1
pad_seq = [0 for _ in range(len(x.shape) * 2)]
missing = max(self.target_length - x.shape[self.dim], 0)
missing_left = missing // 2 + missing % 2
missing_right = missing // 2
pad_seq[idx * 2] = missing_left
pad_seq[idx * 2 + 1] = missing_right
x = pad(x, pad_seq, mode=self.mode, value=self.fill_value)
return x
def pad_align_random(self, x: Tensor) -> Tensor:
idx = len(x.shape) - (self.dim % len(x.shape)) - 1
pad_seq = [0 for _ in range(len(x.shape) * 2)]
missing = max(self.target_length - x.shape[self.dim], 0)
missing_left = torch.randint(low=0, high=missing + 1, size=()).item()
missing_right = missing - missing_left
pad_seq[idx * 2] = missing_left
pad_seq[idx * 2 + 1] = missing_right
x = pad(x, pad_seq, mode=self.mode, value=self.fill_value)
return x
def extra_repr(self) -> str:
return (
f'target_length={self.target_length}, '
f'align={self.align}, '
f'fill_value={self.fill_value}, '
f'dim={self.dim}, '
f'mode={self.mode}'
)
| [
"etienne.labbe31@gmail.com"
] | etienne.labbe31@gmail.com |
bb568e0c498b71992d25b88b505eca73155e3abd | c0d28a5a52748d78563372a8cffa53cacab1847a | /django项目/MovieProject/MovieProject/wsgi.py | 111e9f0f5ef27d51c33b447d4ded1ca266301eaf | [] | no_license | fanfanstl/projects | 5f1cf83bdb1c21855c3c1b3b4904f99a08dd2808 | 0c12892e691971a55239a1c5317df77220402d5e | refs/heads/master | 2021-08-11T04:21:50.379625 | 2018-12-27T15:39:19 | 2018-12-27T15:39:19 | 148,894,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | """
WSGI config for MovieProject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "MovieProject.settings")
application = get_wsgi_application()
| [
"2094531487@qq.com"
] | 2094531487@qq.com |
817c70df624353a0834faa58ab16bbffc92b6df0 | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /little_thing/thing/problem/next_place/big_woman_and_large_number/big_person.py | cfd590d9cafb67969b1bdf84b3de88bdc33d89f5 | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py |
#! /usr/bin/env python
def know_big_work_after_way(str_arg):
have_great_world_in_way(str_arg)
print('important_group')
def have_great_world_in_way(str_arg):
print(str_arg)
if __name__ == '__main__':
know_big_work_after_way('last_work')
| [
"jingkaitang@gmail.com"
] | jingkaitang@gmail.com |
2b1bfe55b661da28a117416f4296194407e005db | ad23b164febd12d5c6d97cfbcd91cf70e2914ab3 | /TestCaseFunction/autotest/test/marketing/yichang/AutoTestRwIDToYy.py | c243b03e7d9fad39965f8b95438d6b3c9674a98e | [] | no_license | wawj901124/webtestdata | 9eedf9a01dec2c157725299bda9a42e8d357ef0b | 54f6412566fce07ece912760c5caea73ede819cb | refs/heads/master | 2022-12-09T14:18:38.125191 | 2021-04-25T07:54:07 | 2021-04-25T07:54:07 | 175,773,318 | 1 | 1 | null | 2022-12-08T02:39:15 | 2019-03-15T07:49:16 | Python | UTF-8 | Python | false | false | 17,940 | py | import unittest
from webtestdata.settings import WEB_URL_TITLE,MANAGER_LOGIN_ACCOUNT,MANAGER_LOGIN_PASSWORD,MARKETING_CREATE_ACTIVITYID
from webtestdata.settings import RW_DSX_EDIT_ACTIVITYID,RW_JXZ_EDIT_ACTIVITYID,RW_YJS_EDIT_ACTIVITYID #导入任务活动ID
from webtestdata.settings import YY_DSX_EDIT_ACTIVITYID,YY_JXZ_EDIT_ACTIVITYID,YY_YJS_EDIT_ACTIVITYID #导入运营活动ID
# ----------------------------------------------------------------------
import os, django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "webtestdata.settings")
django.setup()
# ----------------------------------------------------------------------
#独运行某一个py文件时会出现如下错误:django.core.exceptions.AppRegistryNotReady: Apps aren't loaded yet.,以上内容可以解决此问题,加载django中的App
from TestCaseFunction.base.activebase import ActiveWeb
from TestCaseFunction.util.operation_json import OperationJson
from TestCaseFunction.util.gettimestr import GetTimeStr
from TestCaseFunction.autotest.config.page.manager.loginPage import LoginPage #导入登录页
class TestStatusCheckClass(unittest.TestCase): # 创建测试类
@classmethod # 类方法,只执行一次,但必须要加注解@classmethod,且名字固定为setUpClass
def setUpClass(cls):
# from base.getcookie import GetCookie
# outjsonfile = "../../../cookiejson/cookiemanager.json"
# outloginurl = LoginPage().pageurl
# outloginaccountxpath = LoginPage().account
# outloginaccounttext = "81122336666"
# outloginppasswordxpath = LoginPage().password
# outloginpasswordtext = "abc123456"
# outloginbuttonxpath = LoginPage().loginbutton
#
# getcookie = GetCookie(outjsonfile=outjsonfile, outloginurl=outloginurl,
# outloginaccountxpath=outloginaccountxpath,
# outloginaccounttext=outloginaccounttext, outloginppasswordxpath=outloginppasswordxpath,
# outloginpasswordtext=outloginpasswordtext,
# outloginbuttonxpath=outloginbuttonxpath) # 实例化
# getcookie.writerCookieToJson()
pass
@classmethod # 类方法,只执行一次,但必须要加注解@classmethod,且名字固定为tearDownClass
def tearDownClass(cls):
pass
def setUp(self): # 每条用例执行测试之前都要执行此方法
# self.jsonfile = '../../../cookiejson/cookieagent.json'
# self.operationjson = OperationJson(file_path=self.jsonfile) #实例化
# self.cookie = self.operationjson.get_all_data()
# print("self.cookie:%s" % self.cookie)
self.activeweb = ActiveWeb() # 实例化
self.loginurl = LoginPage().pageurl
self.activeweb.getUrl(self.loginurl) # 打开网址
self.activeweb.findElementByXpathAndInput(LoginPage().account,MANAGER_LOGIN_ACCOUNT)
self.activeweb.findElementByXpathAndInput(LoginPage().password,MANAGER_LOGIN_PASSWORD)
self.activeweb.findElementByXpathAndClick(LoginPage().loginbutton)
self.activeweb.delayTime(3)
#pass
def tearDown(self): # 每条用例执行测试之后都要执行此方法
self.activeweb.closeBrowse()
# pass
def writexunicookie(self):
addcookie = {'name': '.nereus.manager.settle.banks', 'value': 'QCK9GvKG8OEOh6lRUyyLlmKnHl8i3w'}
self.activeweb.driver.add_cookie(addcookie)
self.activeweb.driver.refresh()
self.activeweb.delayTime(5)
self.activeweb.outPutMyLog("写入虚拟银行cookie完成")
#定义创建活动,
#投放渠道一级为1表示内部渠道,为2表示外部渠道
#投放渠道二级为0表示全选,为1,2,等表示选一项和选多项组合,程序中只有全选和选择一项的情况
# 任务类型为1表示注册,为2表示交易类型
#奖励类型1表示固定奖励
def definestatuscheck(self,num,
firsturl,secondurl,
isstatus,istype,type):
#加载一个待上线的运营活动编辑页路径
# firsturl = "https://bjw.halodigit.com:9060/nereus/marketing/admin/v/#/activityManage/missionAct/modifyOnLine/%s"
self.activeweb.getUrl(firsturl)
self.activeweb.delayTime(6)
#加载一个进行中的运营活动的ID
# secondurl = "https://bjw.halodigit.com:9060/nereus/marketing/admin/v/#/activityManage/missionAct/modifyOnLine/%s"
self.activeweb.getUrl(secondurl)
self.activeweb.delayTime(8)
if isstatus:
#断言处于活动列表页
realurl = self.activeweb.getNowPageUrl()
if type == "1":
perurl = "%s/nereus/marketing/admin/v/#/activityManage/missionAct/list"% WEB_URL_TITLE
elif type == "2":
perurl = "%s/nereus/marketing/admin/v/#/activityManage/operation/list" % WEB_URL_TITLE
self.assertEqual(perurl,realurl)
if istype:
#断言出现参数错误弹框
realtankuangtext = self.activeweb.findElementByXpathAndReturnText(num,"/html/body/div[5]/div[2]/div/div/div/div/div[2]/div")
pretankuangtext = "Params invalid"
self.assertEqual(pretankuangtext, realtankuangtext)
#编辑页测试
# @unittest.skip('test0001')
def test0001(self):
"""
测试在运营活动待上线活动编辑页,加载运营活动进行中的活动的ID,到达运营活动列表页
:return:
"""
firsturl = "%s/nereus/marketing/admin/v/#/activityManage/operation/modifyOnLine/%s"%(WEB_URL_TITLE,YY_DSX_EDIT_ACTIVITYID)
secondurl = "%s/nereus/marketing/admin/v/#/activityManage/operation/modifyOnLine/%s"%(WEB_URL_TITLE,YY_JXZ_EDIT_ACTIVITYID)
self.definestatuscheck("test0001",firsturl,secondurl,True,False,"2")
# @unittest.skip('test0002')
def test0002(self):
"""
测试在运营活动待上线活动编辑页,加载运营活动已结束的活动的ID,到达运营活动列表页
:return:
"""
firsturl = "%s/nereus/marketing/admin/v/#/activityManage/operation/modifyOnLine/%s"%(WEB_URL_TITLE,YY_DSX_EDIT_ACTIVITYID)
secondurl = "%s/nereus/marketing/admin/v/#/activityManage/operation/modifyOnLine/%s"%(WEB_URL_TITLE,YY_YJS_EDIT_ACTIVITYID)
self.definestatuscheck("test0002",firsturl,secondurl,True,False,"2")
# @unittest.skip('test0003')
def test0003(self):
"""
测试在运营活动进行中的活动编辑页,加载运营活动待上线的活动的ID,到达运营活动列表页
:return:
"""
firsturl = "%s/nereus/marketing/admin/v/#/activityManage/operation/modifyOffLine/%s"%(WEB_URL_TITLE,YY_JXZ_EDIT_ACTIVITYID)
secondurl = "%s/nereus/marketing/admin/v/#/activityManage/operation/modifyOffLine/%s"%(WEB_URL_TITLE,YY_DSX_EDIT_ACTIVITYID)
self.definestatuscheck("test0003",firsturl,secondurl,True,False,"2")
# @unittest.skip('test0005')
def test0005(self):
"""
测试在运营活动进行中的活动编辑页,加载运营活动已结束的活动的ID,到达运营活动列表页
:return:
"""
firsturl = "%s/nereus/marketing/admin/v/#/activityManage/operation/modifyOffLine/%s"%(WEB_URL_TITLE,YY_JXZ_EDIT_ACTIVITYID)
secondurl = "%s/nereus/marketing/admin/v/#/activityManage/operation/modifyOffLine/%s"%(WEB_URL_TITLE,YY_YJS_EDIT_ACTIVITYID)
self.definestatuscheck("test0005",firsturl,secondurl,True,False,"2")
# @unittest.skip('test0006')
def test0006(self):
"""
测试在运营活动待上线的活动编辑页,加载任务活动待上线的活动的ID,出现参数错误弹框
:return:
"""
firsturl = "%s/nereus/marketing/admin/v/#/activityManage/operation/modifyOnLine/%s"%(WEB_URL_TITLE,YY_DSX_EDIT_ACTIVITYID)
secondurl = "%s/nereus/marketing/admin/v/#/activityManage/operation/modifyOnLine/%s"%(WEB_URL_TITLE,RW_DSX_EDIT_ACTIVITYID)
self.definestatuscheck("test0006",firsturl,secondurl,False,True,"2")
# @unittest.skip('test0007')
def test0007(self):
"""
测试在运营活动待上线的活动编辑页,加载任务活动进行中的活动的ID,出现参数错误弹框
:return:
"""
firsturl = "%s/nereus/marketing/admin/v/#/activityManage/operation/modifyOnLine/%s"%(WEB_URL_TITLE,YY_DSX_EDIT_ACTIVITYID)
secondurl = "%s/nereus/marketing/admin/v/#/activityManage/operation/modifyOnLine/%s"%(WEB_URL_TITLE,RW_JXZ_EDIT_ACTIVITYID)
self.definestatuscheck("test0007",firsturl,secondurl,False,True,"2")
# @unittest.skip('test0008')
def test0008(self):
"""
测试在运营活动待上线的活动编辑页,加载任务活动已结束的活动的ID,出现参数错误弹框
:return:
"""
firsturl = "%s/nereus/marketing/admin/v/#/activityManage/operation/modifyOnLine/%s"%(WEB_URL_TITLE,YY_DSX_EDIT_ACTIVITYID)
secondurl = "%s/nereus/marketing/admin/v/#/activityManage/operation/modifyOnLine/%s"%(WEB_URL_TITLE,RW_YJS_EDIT_ACTIVITYID)
self.definestatuscheck("test0008",firsturl,secondurl,False,True,"2")
# @unittest.skip('test0009')
def test0009(self):
"""
测试在运营活动进行中的活动编辑页,加载任务活动待上线的活动的ID,出现参数错误弹框
:return:
"""
firsturl = "%s/nereus/marketing/admin/v/#/activityManage/operation/modifyOffLine/%s"%(WEB_URL_TITLE,YY_JXZ_EDIT_ACTIVITYID)
secondurl = "%s/nereus/marketing/admin/v/#/activityManage/operation/modifyOffLine/%s"%(WEB_URL_TITLE,RW_DSX_EDIT_ACTIVITYID)
self.definestatuscheck("test0009",firsturl,secondurl,False,True,"2")
# @unittest.skip('test0010')
def test0010(self):
"""
测试在运营活动进行中的活动编辑页,加载任务活动进行中的活动的ID,出现参数错误弹框
:return:
"""
firsturl = "%s/nereus/marketing/admin/v/#/activityManage/operation/modifyOffLine/%s"%(WEB_URL_TITLE,YY_JXZ_EDIT_ACTIVITYID)
secondurl = "%s/nereus/marketing/admin/v/#/activityManage/operation/modifyOffLine/%s"%(WEB_URL_TITLE,RW_JXZ_EDIT_ACTIVITYID)
self.definestatuscheck("test0010",firsturl,secondurl,False,True,"2")
# @unittest.skip('test0011')
def test0011(self):
"""
测试在运营活动进行中的活动编辑页,加载任务活动已结束的活动的ID,出现参数错误弹框
:return:
"""
firsturl = "%s/nereus/marketing/admin/v/#/activityManage/operation/modifyOffLine/%s"%(WEB_URL_TITLE,YY_JXZ_EDIT_ACTIVITYID)
secondurl = "%s/nereus/marketing/admin/v/#/activityManage/operation/modifyOffLine/%s"%(WEB_URL_TITLE,RW_YJS_EDIT_ACTIVITYID)
self.definestatuscheck("test0011",firsturl,secondurl,False,True,"2")
#详情页测试
# @unittest.skip('test0012')
def test0012(self):
"""
测试在运营活动待上线的活动详情页,加载任务活动待上线的活动的ID,出现参数错误弹框
:return:
"""
firsturl = "%s/nereus/marketing/admin/v/#/activityManage/operation/operationDetail/%s"%(WEB_URL_TITLE,YY_DSX_EDIT_ACTIVITYID)
secondurl = "%s/nereus/marketing/admin/v/#/activityManage/operation/operationDetail/%s"%(WEB_URL_TITLE,RW_DSX_EDIT_ACTIVITYID)
self.definestatuscheck("test0012",firsturl,secondurl,False,True,"2")
# @unittest.skip('test0013')
def test0013(self):
"""
测试在运营活动待上线的活动详情页,加载任务活动进行中的活动的ID,出现参数错误弹框
:return:
"""
firsturl = "%s/nereus/marketing/admin/v/#/activityManage/operation/operationDetail/%s"%(WEB_URL_TITLE,YY_DSX_EDIT_ACTIVITYID)
secondurl = "%s/nereus/marketing/admin/v/#/activityManage/operation/operationDetail/%s"%(WEB_URL_TITLE,RW_JXZ_EDIT_ACTIVITYID)
self.definestatuscheck("test0013",firsturl,secondurl,False,True,"2")
# @unittest.skip('test0014')
def test0014(self):
"""
测试在运营活动待上线的活动详情页,加载任务活动已结束的活动的ID,出现参数错误弹框
:return:
"""
firsturl = "%s/nereus/marketing/admin/v/#/activityManage/operation/operationDetail/%s"%(WEB_URL_TITLE,YY_DSX_EDIT_ACTIVITYID)
secondurl = "%s/nereus/marketing/admin/v/#/activityManage/operation/operationDetail/%s"%(WEB_URL_TITLE,RW_YJS_EDIT_ACTIVITYID)
self.definestatuscheck("test0014",firsturl,secondurl,False,True,"2")
#
#
#
#
#
# def defineasserttextnum(self,num,testelexpath,expecttext):
# #断言是否存在某个文本
# testtext = self.activeweb.findElementByXpathAndReturnText(num,testelexpath)
# self.assertEqual(expecttext,testtext)
# self.activeweb.outPutMyLog("存在text:%s"%testtext)
#
# def defineisintable(self,num,testtablexpath,expecttext,tablecolnum):
# notexsitflag = True
# tabledic = self.activeweb.findElementByXpathAndReturnTableNum(num, testtablexpath)
# for value in tabledic.values():
# # self.activeweb.outPutMyLog("%s"% value[int(tablecolnum)])
# if str(expecttext).lower() in value[int(tablecolnum)].lower():
# self.assertTrue(True)
# self.activeweb.outPutMyLog("在【%s】中存在:【%s】"% (value[int(tablecolnum)],expecttext))
# notexsitflag = False
# break
# if notexsitflag:
# self.activeweb.outPutMyLog("在【%s】不存在:【%s】"% (tabledic,expecttext))
# self.assertTrue(False)
#
#
#
# @staticmethod #根据不同的参数生成测试用例
# def getTestFunc(num,hdmcinputtext,hdysinputtext,
# tfqdyj, tfqdej,hdbztextareainputtext,
# rwlx,tjrwxz,
# jyjylx, jyzffs,
# jymgyhzdcycsinputtext,jymgyhmrcycsinputtext,
# jllx,iscancel):
# def func(self):
# self.definecreateactivity(num,
# hdmcinputtext,hdysinputtext,
# tfqdyj, tfqdej,hdbztextareainputtext,
# rwlx,tjrwxz,
# jyjylx, jyzffs,
# jymgyhzdcycsinputtext,jymgyhmrcycsinputtext,
# jllx,iscancel)
# return func
#
# def __generateTestCases():
# from addactivity.models import AddActivity
#
# addactivity_all = AddActivity.objects.filter(testproject="营销系统").filter(testmodule="任务活动管理").filter(testpage="创建活动").filter(id=1).order_by('id')
# rows_count = addactivity_all.count()
#
# for addactivity in addactivity_all:
#
# if len(str(addactivity.id)) == 1:
# addactivityid = '0000%s'% addactivity.id
# elif len(str(addactivity.id)) == 2:
# addactivityid = '000%s' % addactivity.id
# elif len(str(addactivity.id)) == 3:
# addactivityid = '00%s' % addactivity.id
# elif len(str(addactivity.id)) == 4:
# addactivityid = '0%s' % addactivity.id
# elif len(str(addactivity.id)) == 5:
# addactivityid = '%s' % addactivity.id
# else:
# addactivityid ='Id已经超过5位数,请重新定义'
#
#
# args = []
# args.append(addactivity.id)
# args.append("%s_%s"%(addactivity.hdmcinputtext,GetTimeStr().getTimeStr()))
# args.append(addactivity.hdysinputtext)
# args.append(addactivity.tfqdyj)
# args.append(addactivity.tfqdej)
# args.append(addactivity.hdbztextareainputtext)
# args.append(addactivity.rwlx)
# args.append(addactivity.tjrwxz)
# args.append(addactivity.jyjylx)
# args.append(addactivity.jyzffs)
# args.append(addactivity.jymgyhzdcycsinputtext)
# args.append(addactivity.jymgyhmrcycsinputtext)
# args.append(addactivity.jllx)
# args.append(addactivity.iscancel)
#
# setattr(TestCreateActivityClass, 'test_func_%s_%s' % (addactivityid,addactivity.testcasetitle),
# TestCreateActivityClass.getTestFunc(*args)) # 通过setattr自动为TestCase类添加成员方法,方法以“test_func_”开头
#
# # file_name = "D:\\Users\\Administrator\\PycharmProjects\\seleniumweb\\sele\\dataconfig\\assertselectsearchmanager.xls"
# # sheet_id = 0
# # datasheet = GetData(file_name,sheet_id) #实例化
# # # rows_count = datasheet.get_case_lines() #获取表的行数
# # for i in range(1, rows_count): # 循环,但去掉第一
# # args = []
# # args.append(i)
# # args.append(datasheet.is_cookie(i))
# # args.append(datasheet.get_url(i))
# # args.append(datasheet.get_selectxpath(i))
# # args.append(datasheet.get_selectoptiontext(i))
# # args.append(datasheet.get_selectinputxpath(i))
# # args.append(datasheet.get_selectinputtext(i))
# # args.append(datasheet.get_searchbuttonxpath(i))
# # args.append(datasheet.get_searchtableresultxpath(i))
# # args.append(datasheet.get_colnum(i))
# # args.append(datasheet.get_checktext(i))
# #
# #
# # setattr(TestSearchClass, 'test_func_%s_%s' % (datasheet.get_id(i),datasheet.get_title(i)),
# # TestSearchClass.getTestFunc(*args)) # 通过setattr自动为TestCase类添加成员方法,方法以“test_func_”开头
#
#
# __generateTestCases()
if __name__ == '__main__':
print("hello world")
unittest.main()
| [
"410287958@qq.com"
] | 410287958@qq.com |
73bec882ceac5d897c1bcee5afab1ea363ad254c | c31ee23b3d0c219deba0b0f462f172858df8b5ac | /chineblog/chineblog/wsgi.py | ebf3780fe13bfd1997d36b0c71cbfb80ba8c827f | [] | no_license | qinxuye/chineblog | b0afca7302c121021d0af821fa1143c42686bfa2 | 8c7df04d8cd4e3f120ef78546c3a18000909d2aa | refs/heads/master | 2021-06-04T04:48:51.404366 | 2016-10-07T14:49:02 | 2016-10-07T14:49:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | """
WSGI config for chineblog project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "chineblog.settings")
application = get_wsgi_application()
| [
"xuye.qin@alibaba-inc.com"
] | xuye.qin@alibaba-inc.com |
af4ee720495d2af2515908848f9e97725a22992d | 93fc75b62e3fb6524f3891daf58772175fee781c | /夏丽平/第一次作业/第一次作业-金工17-1 -2017310413--夏丽平/夏丽平 金工17-1 2017310413/zy4.py | fe4cb5c18967dbeb5f85aedf3d715091ae3ff401 | [] | no_license | jingong171/jingong-homework | 13174a4a7b39b8ae6d5da103cbf0fb40766d59c1 | 542e8781f26676a62538714b92fb0bccdf41b47b | refs/heads/master | 2020-03-29T13:38:34.152280 | 2018-12-17T14:38:08 | 2018-12-17T14:38:08 | 149,974,131 | 8 | 11 | null | 2018-10-08T14:40:58 | 2018-09-23T10:32:35 | Python | UTF-8 | Python | false | false | 153 | py | list=[]
j=2
for i in range(2,100):
for j in range(2,i):
if i%j==0:
break;
else:
list.append(i)
print(list)
| [
"35986375+FrancisLau098@users.noreply.github.com"
] | 35986375+FrancisLau098@users.noreply.github.com |
826d074f96c4b666cf4a019492d4a6be84d6a780 | 5dd7eccc1314861babdb19b840c117da46b70c3f | /dispersing/kaitai_parsers/summoning_colors.py | fbd3a95c7c262a5e8c9c7fde02a36416f3c3c29e | [
"BSD-3-Clause"
] | permissive | matthewturk/dispersing | fba98e06e3b3a97ce819f09d485310268bbfc38b | e368e21bb7b42035b1b28f38727f4e0f880fec0b | refs/heads/main | 2023-08-05T00:22:06.065355 | 2023-07-02T20:14:13 | 2023-07-02T20:14:13 | 226,524,950 | 1 | 1 | NOASSERTION | 2023-08-02T02:05:13 | 2019-12-07T14:14:56 | Python | UTF-8 | Python | false | false | 3,272 | py | # This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from pkg_resources import parse_version
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
import collections
if parse_version(kaitaistruct.__version__) < parse_version('0.9'):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
class SummoningColors(KaitaiStruct):
SEQ_FIELDS = ["ncolors", "palettes"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['ncolors']['start'] = self._io.pos()
self.ncolors = self._io.read_u1()
self._debug['ncolors']['end'] = self._io.pos()
self._debug['palettes']['start'] = self._io.pos()
self.palettes = [None] * ((self._root._io.size() - 1) // (self.ncolors * 3))
for i in range((self._root._io.size() - 1) // (self.ncolors * 3)):
if not 'arr' in self._debug['palettes']:
self._debug['palettes']['arr'] = []
self._debug['palettes']['arr'].append({'start': self._io.pos()})
self.palettes[i] = SummoningColors.Palette(self._io, self, self._root)
self._debug['palettes']['arr'][i]['end'] = self._io.pos()
self._debug['palettes']['end'] = self._io.pos()
class Palette(KaitaiStruct):
SEQ_FIELDS = ["colors"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['colors']['start'] = self._io.pos()
self.colors = [None] * (self._root.ncolors)
for i in range(self._root.ncolors):
if not 'arr' in self._debug['colors']:
self._debug['colors']['arr'] = []
self._debug['colors']['arr'].append({'start': self._io.pos()})
self.colors[i] = SummoningColors.Rgb(self._io, self, self._root)
self._debug['colors']['arr'][i]['end'] = self._io.pos()
self._debug['colors']['end'] = self._io.pos()
class Rgb(KaitaiStruct):
SEQ_FIELDS = ["red", "green", "blue"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['red']['start'] = self._io.pos()
self.red = self._io.read_u1()
self._debug['red']['end'] = self._io.pos()
self._debug['green']['start'] = self._io.pos()
self.green = self._io.read_u1()
self._debug['green']['end'] = self._io.pos()
self._debug['blue']['start'] = self._io.pos()
self.blue = self._io.read_u1()
self._debug['blue']['end'] = self._io.pos()
| [
"matthewturk@gmail.com"
] | matthewturk@gmail.com |
68060140507db7822a5adeded2f77f1d002209a9 | a8750439f200e4efc11715df797489f30e9828c6 | /CodeForces/stack_sorting.py | 3a9343d2622a99a3a07cf0a0a9e909d06efc4016 | [] | no_license | rajlath/rkl_codes | f657174305dc85c3fa07a6fff1c7c31cfe6e2f89 | d4bcee3df2f501349feed7a26ef9828573aff873 | refs/heads/master | 2023-02-21T10:16:35.800612 | 2021-01-27T11:43:34 | 2021-01-27T11:43:34 | 110,989,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,447 | py | '''
Let's suppose you have an array a, a stack s (initially empty) and an array b (also initially empty).
You may perform the following operations until both a and s are empty:
Take the first element of a, push it into s and remove it from a (if a is not empty);
Take the top element from s, append it to the end of array b and remove it from s (if s is not empty).
You can perform these operations in arbitrary order.
If there exists a way to perform the operations such that array b is sorted in non-descending order in the end,
then array a is called stack-sortable.
For example, [3, 1, 2] is stack-sortable, because b will be sorted if we perform the following operations:
Remove 3 from a and push it into s;
Remove 1 from a and push it into s;
Remove 1 from s and append it to the end of b;
Remove 2 from a and push it into s;
Remove 2 from s and append it to the end of b;
Remove 3 from s and append it to the end of b.
After all these operations b = [1, 2, 3], so [3, 1, 2] is stack-sortable. [2, 3, 1] is not stack-sortable.
You are given k first elements of some permutation p of size n (recall that a permutation of size n is an array of
size n where each integer from 1 to n occurs exactly once). You have to restore the remaining n - k elements of this permutation so it is stack-sortable. If there are multiple answers, choose the answer such that p is lexicographically maximal (an array q is lexicographically greater than an array p iff there exists some integer k such that for every i < k qi = pi, and qk > pk). You may not swap or change any of first k elements of the permutation.
Print the lexicographically maximal permutation p you can obtain.
If there exists no answer then output -1.
Input
The first line contains two integers n and k (2 ≤ n ≤ 200000, 1 ≤ k < n) — the size of a desired permutation,
and the number of elements you are given, respectively.
The second line contains k integers p1, p2, ..., pk (1 ≤ pi ≤ n) — the first k elements of p.
These integers are pairwise distinct.
Output
If it is possible to restore a stack-sortable permutation p of size n such that the first k elements of p are
equal to elements given in the input, print lexicographically maximal such permutation.
Otherwise print -1.
Examples
input
5 3
3 2 1
output
3 2 1 5 4
input
5 3
2 3 1
output
-1
input
5 1
3
output
3 2 1 5 4
input
5 2
3 4
output
-1
'''
| [
"raj.lath@gmail.com"
] | raj.lath@gmail.com |
aa74dd25d623bfc3edf85fed5ea24609da6a8f6a | 35e41b591609e17e6de4a27dfe27ac0233bd58c3 | /src/forms/unused_or_obsolete/open_meta_mac.py.bak.py.bak | c35a727cea1e4067f9f3a8b38f8ddd8cd6195fc8 | [] | no_license | doclumbri666/OpenMeta-analyst- | 9fe1449f08c99b9f703fc34c02f29522cdb8a6ad | 7ed715b5fe30ffe28d553685808c6ac988975a2b | refs/heads/master | 2020-12-25T21:33:51.153178 | 2014-01-08T20:33:37 | 2014-01-08T20:33:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,039 | bak | #------------------------------------------------------------------------------
# open_meta_mac.py
#
# this is an initscript used by cx_freeze for our mac
# distribution. we set the R_HOME variable via the sys
# library before starting up meta_form. to use this script
# just point cx_freeze to it via the initscript argument,
# eg.,
# $ build/scripts-2.7/cxfreeze /Users/byronwallace/dev/OpenMeta-analyst-/meta_form.py \
# --init-script=/Users/byronwallace/dev/OpenMeta-analyst-/open_meta_mac.py
#
# note that we also set the path to
#------------------------------------------------------------------------------
import os
import sys
import zipimport
import pdb
print "\n\nR.I.P. Steve Jobs. \n\nI'm setting your R path temporarily (this console only).\n\ns"
# issue #160 - setting path to dynamic libraries
paths = os.environ.get("DYLD_LIBRARY_PATH", "").split(os.pathsep)
if DIR_NAME not in paths:
paths.insert(0, DIR_NAME)
os.environ["DYLD_LIBRARY_PATH"] = os.pathsep.join(paths)
os.execv(sys.executable, sys.argv)
else:
paths = os.environ.get("LD_LIBRARY_PATH", "").split(os.pathsep)
if DIR_NAME not in paths:
paths.insert(0, DIR_NAME)
os.environ["LD_LIBRARY_PATH"] = os.pathsep.join(paths)
os.execv(sys.executable, sys.argv)
#os.environ["DYLD_LIBRARY_PATH"] = DIR_NAME
#os.execv(sys.executable, sys.argv)
print "dynamic library path set...I think?"
#pdb.set_trace()
os.environ["R_HOME"] = os.path.join(DIR_NAME, "R_dist", "2.10", "Resources")
sys.frozen = True
sys.path = sys.path[:4]
# *now* we can import meta_form... cross your fingers.
#import meta_form
#meta_form.start()
print "\n\nok...?\n\n"
m = __import__("__main__")
importer = zipimport.zipimporter(INITSCRIPT_ZIP_FILE_NAME)
code = importer.get_code(m.__name__)
exec code in m.__dict__
versionInfo = sys.version_info[:3]
if versionInfo >= (2, 5, 0) and versionInfo <= (2, 6, 4):
module = sys.modules.get("threading")
if module is not None:
module._shutdown()
| [
"byron.wallace@gmail.com"
] | byron.wallace@gmail.com |
0019c9d8911a07b945a54c9471c43fe8d4cc2941 | 16136f6f9578358ad6ff00101831978d20a43926 | /bhch13/bhch13exrc15.py | 7dc7b1bef78419ceceac1a59a36181c5d77bee30 | [] | no_license | Yaachaka/pyPractice1 | 567c0f8e62cb4f6bff66f1f50672a2ffbc57eeee | fcd4deda3d1094c91ef228b36dfb6124cfa86a8b | refs/heads/main | 2023-06-15T17:14:59.697340 | 2021-07-07T05:01:20 | 2021-07-07T05:01:20 | 331,349,117 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | """
bhch13exrc15.py: Write a function called root that is given a number x and an integer n and returns x^(1/n). In the function definition, set the default value of n to 2.
"""
print('*'*80)
def root(x, n=2):
return x**(1/n)
x = eval(input('Enter value of x: '))
n = eval(input('Enter value of n: '))
print('The solution upon passing only x: {:}.'.format(root(x)))
print('The solution upon passing both x and n: {:}.'.format(root(x, n)))
print('*'*80)
"""PROGRAM OUTPUT
********************************************************************************
Enter value of x: 5
Enter value of n: 3
The solution upon passing only x: 2.23606797749979.
The solution upon passing both x and n: 1.7099759466766968.
********************************************************************************
""" | [
"rosaarjuna@gmail.com"
] | rosaarjuna@gmail.com |
b22e13972500b9baa18d1bdd873447fb0805c26c | 463c053bcf3f4a7337b634890720ea9467f14c87 | /rllib/models/tf/complex_input_net.py | 02efc73860bbf01ab0f73a8e22611968dc985e00 | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | pdames/ray | e8faddc4440976211a6bcead8f8b6e62c1dcda01 | 918d3601c6519d333f10910dc75eb549cbb82afa | refs/heads/master | 2023-01-23T06:11:11.723212 | 2022-05-06T22:55:59 | 2022-05-06T22:55:59 | 245,515,407 | 1 | 1 | Apache-2.0 | 2023-01-14T08:02:21 | 2020-03-06T20:59:04 | Python | UTF-8 | Python | false | false | 8,273 | py | from gym.spaces import Box, Discrete, MultiDiscrete
import numpy as np
import tree # pip install dm_tree
from ray.rllib.models.catalog import ModelCatalog
from ray.rllib.models.modelv2 import ModelV2, restore_original_dimensions
from ray.rllib.models.tf.misc import normc_initializer
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.models.utils import get_filter_config
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_tf
from ray.rllib.utils.spaces.space_utils import flatten_space
from ray.rllib.utils.tf_utils import one_hot
tf1, tf, tfv = try_import_tf()
# __sphinx_doc_begin__
class ComplexInputNetwork(TFModelV2):
"""TFModelV2 concat'ing CNN outputs to flat input(s), followed by FC(s).
Note: This model should be used for complex (Dict or Tuple) observation
spaces that have one or more image components.
The data flow is as follows:
`obs` (e.g. Tuple[img0, img1, discrete0]) -> `CNN0 + CNN1 + ONE-HOT`
`CNN0 + CNN1 + ONE-HOT` -> concat all flat outputs -> `out`
`out` -> (optional) FC-stack -> `out2`
`out2` -> action (logits) and vaulue heads.
"""
def __init__(self, obs_space, action_space, num_outputs, model_config, name):
self.original_space = (
obs_space.original_space
if hasattr(obs_space, "original_space")
else obs_space
)
self.processed_obs_space = (
self.original_space
if model_config.get("_disable_preprocessor_api")
else obs_space
)
super().__init__(
self.original_space, action_space, num_outputs, model_config, name
)
self.flattened_input_space = flatten_space(self.original_space)
# Build the CNN(s) given obs_space's image components.
self.cnns = {}
self.one_hot = {}
self.flatten_dims = {}
self.flatten = {}
concat_size = 0
for i, component in enumerate(self.flattened_input_space):
# Image space.
if len(component.shape) == 3:
config = {
"conv_filters": model_config["conv_filters"]
if "conv_filters" in model_config
else get_filter_config(obs_space.shape),
"conv_activation": model_config.get("conv_activation"),
"post_fcnet_hiddens": [],
}
self.cnns[i] = ModelCatalog.get_model_v2(
component,
action_space,
num_outputs=None,
model_config=config,
framework="tf",
name="cnn_{}".format(i),
)
concat_size += self.cnns[i].num_outputs
# Discrete|MultiDiscrete inputs -> One-hot encode.
elif isinstance(component, (Discrete, MultiDiscrete)):
if isinstance(component, Discrete):
size = component.n
else:
size = sum(component.nvec)
config = {
"fcnet_hiddens": model_config["fcnet_hiddens"],
"fcnet_activation": model_config.get("fcnet_activation"),
"post_fcnet_hiddens": [],
}
self.one_hot[i] = ModelCatalog.get_model_v2(
Box(-1.0, 1.0, (size,), np.float32),
action_space,
num_outputs=None,
model_config=config,
framework="tf",
name="one_hot_{}".format(i),
)
concat_size += self.one_hot[i].num_outputs
# Everything else (1D Box).
else:
size = int(np.product(component.shape))
config = {
"fcnet_hiddens": model_config["fcnet_hiddens"],
"fcnet_activation": model_config.get("fcnet_activation"),
"post_fcnet_hiddens": [],
}
self.flatten[i] = ModelCatalog.get_model_v2(
Box(-1.0, 1.0, (size,), np.float32),
action_space,
num_outputs=None,
model_config=config,
framework="tf",
name="flatten_{}".format(i),
)
self.flatten_dims[i] = size
concat_size += self.flatten[i].num_outputs
# Optional post-concat FC-stack.
post_fc_stack_config = {
"fcnet_hiddens": model_config.get("post_fcnet_hiddens", []),
"fcnet_activation": model_config.get("post_fcnet_activation", "relu"),
}
self.post_fc_stack = ModelCatalog.get_model_v2(
Box(float("-inf"), float("inf"), shape=(concat_size,), dtype=np.float32),
self.action_space,
None,
post_fc_stack_config,
framework="tf",
name="post_fc_stack",
)
# Actions and value heads.
self.logits_and_value_model = None
self._value_out = None
if num_outputs:
# Action-distribution head.
concat_layer = tf.keras.layers.Input((self.post_fc_stack.num_outputs,))
logits_layer = tf.keras.layers.Dense(
num_outputs,
activation=None,
kernel_initializer=normc_initializer(0.01),
name="logits",
)(concat_layer)
# Create the value branch model.
value_layer = tf.keras.layers.Dense(
1,
activation=None,
kernel_initializer=normc_initializer(0.01),
name="value_out",
)(concat_layer)
self.logits_and_value_model = tf.keras.models.Model(
concat_layer, [logits_layer, value_layer]
)
else:
self.num_outputs = self.post_fc_stack.num_outputs
@override(ModelV2)
def forward(self, input_dict, state, seq_lens):
if SampleBatch.OBS in input_dict and "obs_flat" in input_dict:
orig_obs = input_dict[SampleBatch.OBS]
else:
orig_obs = restore_original_dimensions(
input_dict[SampleBatch.OBS], self.processed_obs_space, tensorlib="tf"
)
# Push image observations through our CNNs.
outs = []
for i, component in enumerate(tree.flatten(orig_obs)):
if i in self.cnns:
cnn_out, _ = self.cnns[i](SampleBatch({SampleBatch.OBS: component}))
outs.append(cnn_out)
elif i in self.one_hot:
if "int" in component.dtype.name:
one_hot_in = {
SampleBatch.OBS: one_hot(
component, self.flattened_input_space[i]
)
}
else:
one_hot_in = {SampleBatch.OBS: component}
one_hot_out, _ = self.one_hot[i](SampleBatch(one_hot_in))
outs.append(one_hot_out)
else:
nn_out, _ = self.flatten[i](
SampleBatch(
{
SampleBatch.OBS: tf.cast(
tf.reshape(component, [-1, self.flatten_dims[i]]),
tf.float32,
)
}
)
)
outs.append(nn_out)
# Concat all outputs and the non-image inputs.
out = tf.concat(outs, axis=1)
# Push through (optional) FC-stack (this may be an empty stack).
out, _ = self.post_fc_stack(SampleBatch({SampleBatch.OBS: out}))
# No logits/value branches.
if not self.logits_and_value_model:
return out, []
# Logits- and value branches.
logits, values = self.logits_and_value_model(out)
self._value_out = tf.reshape(values, [-1])
return logits, []
@override(ModelV2)
def value_function(self):
return self._value_out
# __sphinx_doc_end__
| [
"noreply@github.com"
] | pdames.noreply@github.com |
22de353a73e7d67235c8c5e2de8e3c62546c7dcb | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02801/s775521872.py | cf142388b832a1e7f19d0475b989460a9fb8bec4 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 82 | py | n = input()
alf = [chr(i) for i in range(97, 97+26)]
print(alf[(alf.index(n))+1]) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
3ee509e0d421e69dd1400611c08ff74fb32c9166 | 0005e05b9d8b8ad0d3c3c0539b2ded9db6e9f1dd | /test/test_inline_response_200_22_result.py | fb5ddc58fea76b357717bc0b200a0de81c7004cb | [] | no_license | termicoder/codechef-client-lib | a3e3de2b300355c5daa5ed3fad03a9859af13d86 | 74d6b21787c75a987e3451751f5554e4cc6cf469 | refs/heads/master | 2020-03-27T17:58:45.298121 | 2018-09-30T18:03:14 | 2018-09-30T18:03:14 | 146,889,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 978 | py | # coding: utf-8
"""
CodeChef API
CodeChef API to support different applications. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import codechef_client
from codechef_client.models.inline_response_200_22_result import InlineResponse20022Result # noqa: E501
from codechef_client.rest import ApiException
class TestInlineResponse20022Result(unittest.TestCase):
"""InlineResponse20022Result unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testInlineResponse20022Result(self):
"""Test InlineResponse20022Result"""
# FIXME: construct object with mandatory attributes with example values
# model = codechef_client.models.inline_response_200_22_result.InlineResponse20022Result() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"diveshuttamchandani@gmail.com"
] | diveshuttamchandani@gmail.com |
0bd0c5a423a35ddb6ce09b44bc1e4e5507e2d085 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/providerhub/v20210901preview/skus_nested_resource_type_second.py | cbd7c2e1f4054011798cc402a3eaf677db6d89f1 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,513 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['SkusNestedResourceTypeSecondArgs', 'SkusNestedResourceTypeSecond']
@pulumi.input_type
class SkusNestedResourceTypeSecondArgs:
def __init__(__self__, *,
nested_resource_type_first: pulumi.Input[str],
nested_resource_type_second: pulumi.Input[str],
provider_namespace: pulumi.Input[str],
resource_type: pulumi.Input[str],
properties: Optional[pulumi.Input['SkuResourcePropertiesArgs']] = None,
sku: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a SkusNestedResourceTypeSecond resource.
:param pulumi.Input[str] nested_resource_type_first: The first child resource type.
:param pulumi.Input[str] nested_resource_type_second: The second child resource type.
:param pulumi.Input[str] provider_namespace: The name of the resource provider hosted within ProviderHub.
:param pulumi.Input[str] resource_type: The resource type.
:param pulumi.Input[str] sku: The SKU.
"""
pulumi.set(__self__, "nested_resource_type_first", nested_resource_type_first)
pulumi.set(__self__, "nested_resource_type_second", nested_resource_type_second)
pulumi.set(__self__, "provider_namespace", provider_namespace)
pulumi.set(__self__, "resource_type", resource_type)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if sku is not None:
pulumi.set(__self__, "sku", sku)
@property
@pulumi.getter(name="nestedResourceTypeFirst")
def nested_resource_type_first(self) -> pulumi.Input[str]:
"""
The first child resource type.
"""
return pulumi.get(self, "nested_resource_type_first")
@nested_resource_type_first.setter
def nested_resource_type_first(self, value: pulumi.Input[str]):
pulumi.set(self, "nested_resource_type_first", value)
@property
@pulumi.getter(name="nestedResourceTypeSecond")
def nested_resource_type_second(self) -> pulumi.Input[str]:
"""
The second child resource type.
"""
return pulumi.get(self, "nested_resource_type_second")
@nested_resource_type_second.setter
def nested_resource_type_second(self, value: pulumi.Input[str]):
pulumi.set(self, "nested_resource_type_second", value)
@property
@pulumi.getter(name="providerNamespace")
def provider_namespace(self) -> pulumi.Input[str]:
"""
The name of the resource provider hosted within ProviderHub.
"""
return pulumi.get(self, "provider_namespace")
@provider_namespace.setter
def provider_namespace(self, value: pulumi.Input[str]):
pulumi.set(self, "provider_namespace", value)
@property
@pulumi.getter(name="resourceType")
def resource_type(self) -> pulumi.Input[str]:
"""
The resource type.
"""
return pulumi.get(self, "resource_type")
@resource_type.setter
def resource_type(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_type", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['SkuResourcePropertiesArgs']]:
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['SkuResourcePropertiesArgs']]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter
def sku(self) -> Optional[pulumi.Input[str]]:
"""
The SKU.
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sku", value)
class SkusNestedResourceTypeSecond(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
nested_resource_type_first: Optional[pulumi.Input[str]] = None,
nested_resource_type_second: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[pulumi.InputType['SkuResourcePropertiesArgs']]] = None,
provider_namespace: Optional[pulumi.Input[str]] = None,
resource_type: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Create a SkusNestedResourceTypeSecond resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] nested_resource_type_first: The first child resource type.
:param pulumi.Input[str] nested_resource_type_second: The second child resource type.
:param pulumi.Input[str] provider_namespace: The name of the resource provider hosted within ProviderHub.
:param pulumi.Input[str] resource_type: The resource type.
:param pulumi.Input[str] sku: The SKU.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SkusNestedResourceTypeSecondArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a SkusNestedResourceTypeSecond resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param SkusNestedResourceTypeSecondArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SkusNestedResourceTypeSecondArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
nested_resource_type_first: Optional[pulumi.Input[str]] = None,
nested_resource_type_second: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[pulumi.InputType['SkuResourcePropertiesArgs']]] = None,
provider_namespace: Optional[pulumi.Input[str]] = None,
resource_type: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SkusNestedResourceTypeSecondArgs.__new__(SkusNestedResourceTypeSecondArgs)
if nested_resource_type_first is None and not opts.urn:
raise TypeError("Missing required property 'nested_resource_type_first'")
__props__.__dict__["nested_resource_type_first"] = nested_resource_type_first
if nested_resource_type_second is None and not opts.urn:
raise TypeError("Missing required property 'nested_resource_type_second'")
__props__.__dict__["nested_resource_type_second"] = nested_resource_type_second
__props__.__dict__["properties"] = properties
if provider_namespace is None and not opts.urn:
raise TypeError("Missing required property 'provider_namespace'")
__props__.__dict__["provider_namespace"] = provider_namespace
if resource_type is None and not opts.urn:
raise TypeError("Missing required property 'resource_type'")
__props__.__dict__["resource_type"] = resource_type
__props__.__dict__["sku"] = sku
__props__.__dict__["name"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:providerhub:SkusNestedResourceTypeSecond"), pulumi.Alias(type_="azure-native:providerhub/v20201120:SkusNestedResourceTypeSecond"), pulumi.Alias(type_="azure-native:providerhub/v20210501preview:SkusNestedResourceTypeSecond"), pulumi.Alias(type_="azure-native:providerhub/v20210601preview:SkusNestedResourceTypeSecond")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(SkusNestedResourceTypeSecond, __self__).__init__(
'azure-native:providerhub/v20210901preview:SkusNestedResourceTypeSecond',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'SkusNestedResourceTypeSecond':
"""
Get an existing SkusNestedResourceTypeSecond resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = SkusNestedResourceTypeSecondArgs.__new__(SkusNestedResourceTypeSecondArgs)
__props__.__dict__["name"] = None
__props__.__dict__["properties"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
return SkusNestedResourceTypeSecond(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output['outputs.SkuResourceResponseProperties']:
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
| [
"noreply@github.com"
] | bpkgoud.noreply@github.com |
64589a084642ed05b9d147b11ed69dfa36eb6f9b | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /apigatewayv2_write_3/api-mapping_update.py | 96fa1132d9b4a58195e8e476f296d2f2e2c06bb6 | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,315 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_three_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/apigatewayv2/update-api-mapping.html
if __name__ == '__main__':
"""
create-api-mapping : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/apigatewayv2/create-api-mapping.html
delete-api-mapping : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/apigatewayv2/delete-api-mapping.html
get-api-mapping : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/apigatewayv2/get-api-mapping.html
get-api-mappings : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/apigatewayv2/get-api-mappings.html
"""
parameter_display_string = """
# api-id : The API identifier.
# api-mapping-id : The API mapping identifier.
# domain-name : The domain name.
"""
add_option_dict = {}
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_three_parameter("apigatewayv2", "update-api-mapping", "api-id", "api-mapping-id", "domain-name", add_option_dict)
| [
"hcseo77@gmail.com"
] | hcseo77@gmail.com |
be7b2fc4819c966c4e8aeae9a2b7673d0262e9a6 | 06cf972369c30da9d98b296bcbc26a826aa98126 | /aloisioimoveis/locations/tests/views/test_view_cities.py | 38198318512150fe06089a2c2fac463e84bfffb1 | [] | no_license | thiagorossener/aloisioimoveis | 2597422af6ac058ed3b8aa6e58f0f8913488a7fe | f9d974440f9a8cc875da8a1d4a5c885429563c1b | refs/heads/master | 2021-06-16T23:02:11.193518 | 2021-02-01T14:17:10 | 2021-02-01T14:17:10 | 94,144,023 | 18 | 17 | null | 2021-06-10T20:35:48 | 2017-06-12T21:55:18 | JavaScript | UTF-8 | Python | false | false | 787 | py | from django.test import TestCase
from model_mommy import mommy
from aloisioimoveis.locations.models import City
class CitiesViewTest(TestCase):
def test_get(self):
"""GET /api/locations/cities should return status 200"""
response = self.client.get('/api/locations/cities')
self.assertEqual(200, response.status_code)
def test_json(self):
"""GET /api/locations/cities should return json with all cities"""
mommy.make(City, name='Taubaté')
mommy.make(City, name='Tremembé')
response = self.client.get('/api/locations/cities')
self.assertJSONEqual(str(response.content, encoding='utf8'),
[{'id': 1, 'name': 'Taubaté'},
{'id': 2, 'name': 'Tremembé'}])
| [
"thiago.rossener@gmail.com"
] | thiago.rossener@gmail.com |
1058a4ceb4d0151caf6da4ca219bf6f9660dec47 | c2d3c6d5fe759f8b582ad9f3adba0c9889be7299 | /modules/demo/test_module/module.py | d48f6a87b880f959c0163d38d4e4ae233ade2ae9 | [
"Apache-2.0"
] | permissive | nepeplwu/HubModule | 592e272df32797730cec0afdbe8537359bae44cc | 590b6e617038cbdf3851de8c12cc43e44cfffe59 | refs/heads/master | 2021-01-07T00:46:50.620074 | 2020-10-19T06:41:30 | 2020-10-19T06:41:30 | 241,528,583 | 2 | 4 | Apache-2.0 | 2020-04-22T12:58:33 | 2020-02-19T04:00:21 | Python | UTF-8 | Python | false | false | 236 | py | from paddlehub.module.module import moduleinfo
@moduleinfo(
name='test_module',
version='1.0.0'
)
class TestModule:
def __init__(self):
print('This is a test module.')
def echo(self, text):
print(text)
| [
"wuzewu@baidu.com"
] | wuzewu@baidu.com |
ce299ac7109455025df8ba76b1decea26d789703 | 0f3146f6e44e43048dc030a6ad44def9201dbd29 | /src/basket/highscore/models.py | 4256326506b4ef5ccd57752527aa60814f4ec122 | [] | no_license | socek/basket | 30c7c4be753006a33b997c17cf6348a32b420cd6 | 30ba79a35f63fd1cf4a4cdaf4b3d21b063cfc1b6 | refs/heads/master | 2016-09-10T18:40:40.334233 | 2015-03-25T21:29:00 | 2015-03-25T21:29:24 | 30,159,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | from haplugin.sql import Base
from sqlalchemy import Column, Integer, ForeignKey
from sqlalchemy.orm import relationship
class HighScore(Base):
__tablename__ = 'highscores'
id = Column(Integer, primary_key=True)
index = Column(Integer, nullable=False)
team_id = Column(Integer, ForeignKey('teams.id'), nullable=False)
team = relationship("Team")
| [
"msocek@gmail.com"
] | msocek@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.